贝叶斯数据分析代码

第1章没有代码

第2章 代码

p1=dbinom(5,10,1/2)*.1
p2=dbinom(5,10,1/4)*.9
(post=p1/(p1+p2))

第3章代码

R代码

% % %

x=1:100 #把1,2,...,100这个整数向量赋值到x
(x=1:100)  #同上, 只不过显示出来
sample(x,20)  #从1,2,...,100中随机不放回地抽取20个值作为样本
set.seed(0);sample(1:10,3) #先设随机种子再抽样
 #从1,2,...,200000中随机不放回地抽取10000个值作为样本:
z=sample(1:200000,10000)
z[1:10] #方括号中为向量z的下标
y=c(1,3,7,3,4,2)
z[y] #以y为下标的z的元素值
(z=sample(x,100,rep=T)) #从x有放回地随机抽取100个值作为样本
(z1=unique(z))
length(z1) #z中不同元素的个数
xz=setdiff(x,z)  #x和z之间的不同元素--集合差
sort(union(xz,z)) #对xz及z的并的元素从小到大排序
setequal(union(xz,z),x)  #xz及z的并的元素与x是否一样
intersect(1:10,7:50)  #两个数据的交
sample(1:100,20,prob=1:100) #从1:100中不等概率随机抽样,
 #上一语句各数字被抽到的概率与其值大小成比例
pi*10^2  #能够用?"*"、?"^"等来看某些基本算子的帮助, pi是圆周率
"*"(pi,"^"(10,2))  #和上面一样, 有些烦琐, 是吧! 没有人这么用
pi*(1:10)^-2.3 #可以对向量求指数幂
x = pi * 10^2 ; print(x) 
(x=pi *10^2)  #赋值带打印
pi^(1:5)  #指数也可以是向量
print(x, digits= 12) #输出x的12位数字
x=pi*10^2
class(x)  #x的class
typeof(x)  #x的type
class(cars) #cars是一个R中自带的数据
typeof(cars)  #cars的type
names(cars) #cars数据的变量名字
summary(cars)  #cars的汇总
head(cars) #cars的头几行数据, 和cars[1:6,]相同
tail(cars)  #cars的最后几行数据
str(cars) #也是汇总
row.names(cars)  #行名字
attributes(cars) #cars的一些信息
class(dist~speed) #公式形式,"~"左边是因变量,右边是自变量
plot(dist~speed,cars) #两个变量的散点图
plot(cars$speed,cars$dist)  #同上
ncol(cars);nrow(cars)  #cars的行列数
dim(cars)  #cars的维数
lm(dist ~ speed, data = cars) #以dist为因变量,speed为自变量做OLS回归
cars$qspeed =cut(cars$speed, breaks=quantile(cars$speed),
include.lowest = TRUE)  #增加定性变量qspeed, 四分位点为分割点
names(cars)  #数据cars多了一个变量
cars[3] #第三个变量的值, 和cars[,3]类似
table(cars[3]) #列表
is.factor(cars$qspeed)
plot(dist ~ qspeed, data = cars) #点出箱线图
 #拟合线性模型(简单最小二乘回归):
(a=lm(dist ~ qspeed, data = cars))
summary(a) #回归结果(包括一些检验)
x <- round(runif(20,0,20), digits=2)#四舍五入
summary(x) #汇总
min(x);max(x) #极值, 与range(x)类似
median(x)  # 中位数(median)
mean(x)    # 均值(mean)
var(x)     #方差(variance)
sd(x)      # 标准差(standard deviation),为方差的平方根
sqrt(var(x)) #平方根
rank(x)    # 秩(rank)
order(x) #升序排列的x的下标
order(x,decreasing = T) #降序排列的x的下标
x[order(x)]  #和sort(x)相同
sort(x)      #同上, 升序排列的x
sort(x,decreasing=T) #sort(x,dec=T) 降序排列的x
sum(x);length(x) #元素和及向量元素个数
round(x)  #四舍五入,等于round(x,0),而round(x,5)为保留到小数点后5位
fivenum(x)   # 五数汇总, quantile
quantile(x)  # 分位点 quantile (different convention)有多种定义
quantile(x, c(0,.33,.66,1))
mad(x)  # "median average distance"
cummax(x) #累积最大值
cummin(x) #累积最小值
cumprod(x) #累积积
cor(x,sin(x/20))  #线性相关系数 (linear correlation)
x=rnorm(200) #将200个随机正态数赋值到x
hist(x, col = "light blue") #直方图(histogram)
rug(x)  #在直方图下面加上实际点的大小位置
stem(x) #茎叶图
x <- rnorm(500)
y <- x + rnorm(500)  #构造一个线性关系
plot(y~ x)  #散点图
a=lm(y~x)  #做回归
abline(a,col="red") #或者abline(lm(y~x),col="red")散点图加拟合线
print("Hello World!")
paste("x 的最小值= ", min(x))  #打印
demo(graphics) #演示画图(点击Enter来切换)
(2+4i)^-3.5+(2i+4.5)*(-1.7-2.3i)/((2.6-7i)*(-4+5.1i)) #复数运算
#下面构造一个10维复向量, 实部和虚部均为10个标准正态样本点:
(z <-complex(real=rnorm(10), imaginary =rnorm(10)))
complex(re=rnorm(3),im=rnorm(3))#3维复向量
Re(z) #实部
Im(z) #虚部
Mod(z) #模
Arg(z) #辐角
choose(3,2)  #组合
factorial(6)#排列6!

#定义函数
test=function(x,den,...){
  y=den(x,...)
  return(y)
}
test(12,dnorm,10,1)
plot(seq(0,5,.1),test(seq(0,5,.1),dgamma,5,5),type='l')


#求函数极值
f=function(x) x^2+2*x+1 #定义一个二次函数
optimize(f,c(-2,2)) #在区间(-2,2)内求极值
curve(f, from = -3,to=2) #在区间(-3,2)内画上面定义的函数f图

#求从常数项开始到5次方项的系数分别为1, 2, 2, 4, -9, 8的多项式的根: 
polyroot(c(1,2,2,4,-9,8))
a=factor(letters[1:10]);a #letters:小写字母向量,LETTERS:大写
a[3]="w" #不行! 会给出警告
a=as.character(a) #转换一下
a[3]="w" #可以了
a;factor(a) #两种不同的类型
#定性变量的水平:
levels(factor(a))
sex=sample(0:1,10,r=T)
sex=factor(sex);levels(sex)
#改变因子的水平:
levels(sex)=c("Male","Female");levels(sex)
#确定水平次序:
sex=ordered(sex,c("Female","Male"));sex
levels(sex)
x=scan() #屏幕输入, 可键入或粘贴, 多行输入在空行后按Enter键
1.5 2.6 3.7 2.1 8.9 12 -1.2 -4

x=c(1.5,2.6,3.7,2.1,8.9,12,-1.2,-4) #等价于上面代码
w=read.table(file.choose(),header=T) #从列表中选择有变量名的数据
setwd("f:/mydata") #建立工作路径
(x=rnorm(20)) #给x赋值20个标准正态数据值
#(注:有常见分布的随机数、分布函数、密度函数及分位数函数)
write(x,"test.txt") #把数据写入文件(路径要对)
y=scan("test.txt");y #扫描文件数值数据到y
y=iris;y[1:5,];str(y)  #iris是R自带数据
write.table(y,"test.txt",row.names=F) #把数据写入文本文件
w=read.table("test.txt",header=T) #读带有变量名的数据
str(w)  #汇总
write.csv(y,"test.csv") #把数据写入csv文件
v=read.csv("test.csv") #读入csv数据文件
str(v)  #汇总
data=read.table("clipboard") #读入剪贴板的数据
(z=seq(-1,10,length=100)) #从-1到10等间隔的100个数组成的序列
z=seq(-1,10,len=100) #和上面写法等价
(z=seq(10,-1,-0.1))  #10到-1间隔为-0.1的序列
(x=rep(1:3,3))  #三次重复1:3
(x=rep(3:5,1:3)) #自己看, 这又是什么呢?
x=rep(c(1,10),c(4,5))
w=c(1,3,x,z);w[3] #把数据(包括向量)组合(combine)成一个向量
x=rep(0,10);z=1:3;x+z  #向量加法(如果长度不同, R给出警告和结果)
x*z    #向量乘法
rev(x) #颠倒次序
z=c("no cat","has ","nine","tails") #字符向量
z[1]=="no cat" #双等号为逻辑等式
z=1:5
z[7]=8;z #什么结果? 注:NA为缺失值(not available)
z=NULL
z[c(1,3,5)]=1:3;
z
rnorm(10)[c(2,5)]
z[-c(1,3)] #去掉第1、3元素
z=sample(1:100,10);z
which(z==max(z)) #给出最大值的下标
x=sample(1:100,12);x #抽样
all(x>0);all(x!=0);any(x>0);(1:10)[x>0]#逻辑符号的应用
diff(x) #差分
diff(x,lag=2) #差分
x=matrix(1:20,4,5);x #矩阵的构造
x=matrix(1:20,4,5,byrow=T);x #矩阵的构造, 按行排列
t(x) #矩阵转置
x=matrix(sample(1:100,20),4,5)
2*x
x+5
y=matrix(sample(1:100,20),5,4)
x+t(y) #矩阵之间相加
(z=x%*%y) #矩阵乘法
z1=solve(z) #用solve(a,b)可以解方程ax=b
z1%*%z #应该是单位向量, 但浮点运算不可能得到干净的0
round(z1%*%z,14)  #四舍五入
b=solve(z,1:4); b #解联立方程
nrow(x);ncol(x);dim(x) #行列数目
x=matrix(rnorm(24),4,6)
x[c(2,1),] #第2和第1行
x[,c(1,3)] #第1和第3列
x[2,1] #第[2,1]元素
x[x[,1]>0,1] #第1列大于0的元素
sum(x[,1]>0) #第1列大于0的元素的个数
sum(x[,1]<=0) #第1列不大于0的元素的个数
x[,-c(1,3)] #没有第1、3列的x.
diag(x)  #x的对角线元素
diag(1:5) #以1:5为对角线元素,其他元素为0的对角线矩阵
diag(5) #5维单位矩阵
x[-2,-c(1,3)] #没有第2行, 第1、3列的x
x[x[,1]>0&x[,3]<=1,1] #第1列>0并且第3列<=1的第1列元素
x[x[,2]>0|x[,1]<.51,1] #第1列<.51或者第2列>0的第1列元素
x[!x[,2]<.51,1] #第1列中相应于第2列>=.51的元素
apply(x,1,mean) #对行(第一维)求均值
apply(x,2,sum) #对列(第二维)求和
x=matrix(rnorm(24),4,6)
x[lower.tri(x)]=0;x #得到上三角阵,
#为得到下三角阵, 用x[upper.tri(x)]=0)
x=array(runif(24),c(4,3,2));x
#上面用24个服从均匀分布的样本点构造4乘3乘2的三维数组
is.matrix(x)
dim(x) #得到维数(4,3,2)
is.matrix(x[1,,]) #部分三维数组是矩阵
x=array(1:24,c(4,3,2))
x[c(1,3),,]
x=array(1:24,c(4,3,2))
apply(x,1,mean)   #可以对部分维做求均值运算
apply(x,1:2,sum)  #可以对部分维做求和运算
apply(x,c(1,3),prod) #可以对部分维做求乘积运算
x=matrix(1:20,5,4) #5乘4矩阵
sweep(x,1,1:5,"*") #把向量1:5的每个元素乘到每一行
sweep(x,2,1:4,"+") #把向量1:4的每个元素加到每一列
x*1:5
 #下面把x标准化,即每一元素减去该列均值,除以该列标准差
(x=matrix(sample(1:100,24),6,4));(x1=scale(x))
(x2=scale(x,scale=F)) #自己观察并总结结果
(x3=scale(x,center=F))  #自己观察并总结结果
round(apply(x1,2,mean),14)  #自己观察并总结结果
apply(x1,2,sd) #自己观察并总结结果
round(apply(x2,2,mean),14);apply(x2,2,sd) #自己观察并总结结果
round(apply(x3,2,mean),14);apply(x3,2,sd) #自己观察并总结结果
airquality  #有缺失值(NA)的R自带数据
complete.cases(airquality) #判断每行有没有缺失值
which(complete.cases(airquality)==F) #有缺失值的行号
sum(complete.cases(airquality)) #完整观测值的个数
na.omit(airquality) #删去缺失值的数据
#附加, 横或竖合并数据: append,cbind,rbind
x=1:10;x[12]=3
(x1=append(x,77,after=5))
cbind(1:5,rnorm(5))
rbind(1:5,rnorm(5))
cbind(1:3,4:6);rbind(1:3,4:6) #去掉矩阵重复的行
(x=rbind(1:5,runif(5),runif(5),1:5,7:11))
x[!duplicated(x),]
unique(x)
#list可以是任何对象(包括list本身)的集合
z=list(1:3,Tom=c(1:2,a=list("R",letters[1:5]),w="hi!"))
z[[1]];z[[2]]
z$T
z$T$a2
z$T[[3]]
z$T$w
for (i in z){
  print(i)
  for (j in i)
    print(j)
}

y=list(1:5,rnorm(10))
lapply(y, function(x) sum(x^2)) #对list中的每个元素实施函数运算, 输出list
sapply(y, function(x) sum(x^2)) #同上, 但输出为向量或矩阵等形式
x=scan() #30个顾客在5个品牌中的挑选
3 3 3 4 1 4 2 1 3 2 5 3 1 2 5 2 3 4 2 2 5 3 1 4 2 2 4 3 5 2

barplot(x)  #不合题意的图
table(x) #制表
barplot(table(x)) #正确的图
barplot(table(x)/length(x)) #比例图(和上图形状一样)
table(x)/length(x)
library(MASS) #载入程序包MASS
quine  #MASS所带数据
attach(quine) #把数据变量的名字放入内存
#下面语句产生从该数据得到的各种表格
table(Age)
table(Sex, Age); tab=xtabs(~ Sex + Age, quine); unclass(tab)
tapply(Days, Age, mean)
tapply(Days, list(Sex, Age), mean)
detach(quine) #attach的逆运行
#下面这个函数是按照定义(编程简单, 但效率不高)求n以内的素数
ss=function(n=100){z=2;
  for (i in 2:n)if(any(i%%2:(i-1)==0)==F)z=c(z,i);return(z) }
fix(ss) #用来修改任何函数或编写一个新函数
ss() #计算100以内的素数
t1=Sys.time() #记录时间点
ss(10000) #计算10000以内的素数
Sys.time()-t1 #计算费了多少时间
system.time(ss(10000)) #计算执行ss(10000)所用时间
#函数可以不写return,这时最后一个值为return的值
#为了输出多个值最好使用list输出
x=seq(-3,3,len=20);y=dnorm(x) #产生数据
w= data.frame(x,y) #合并x,成为数据w
par(mfcol=c(2,2)) #准备画四个图的地方
plot(y ~ x, w,main="正态密度函数")
plot(y ~ x,w,type="l", main="正态密度函数")
plot(y ~ x,w,type="o", main="正态密度函数")
plot(y ~ x,w,type="b",main="正态密度函数")
par(mfcol=c(1,1)) #取消par(mfcol=c(2,2))
plot(1,1,xlim=c(1,7.5),ylim=c(0,5),type="n") #画出框架
#在plot命令后面追加点(如要追加线可用lines函数):
points(1:7,rep(4.5,7),cex=seq(1,4,l=7),col=1:7, pch=0:6)
text(1:7,rep(3.5,7),labels=paste(0:6,letters[1:7]),
cex=seq(1,4,l=7),col=1:7) #在指定位置加文字
points(1:7,rep(2,7), pch=(0:6)+7) #点出符号7到13
text((1:7)+0.25, rep(2,7), paste((0:6)+7)) #加符号号码
points(1:7,rep(1,7), pch=(0:6)+14) #点出符号14到20
text((1:7)+0.25, rep(1,7), paste((0:6)+14)) #加符号号码
#关于符号形状、大小、颜色以及其他画图选项的说明可用"?par"来查看
function (x, ...) 
UseMethod("mean")
> methods(mean)
[1] mean,ANY-method          mean,Matrix-method      
[3] mean,sparseMatrix-method mean,sparseVector-method
[5] mean.Date                mean.default            
[7] mean.difftime            mean.POSIXct            
[9] mean.POSIXlt            
see '?methods' for accessing help and source code
A single object matching ‘mean.default’ was found
It was found in the following places
package:base
registered S3 method for mean from namespace base
namespace:base
with value

function (x, trim = 0, na.rm = FALSE, ...) 
{
  if (!is.numeric(x) && !is.complex(x) && !is.logical(x)) {
  warning("argument is not numeric or logical: returning NA")
  return(NA_real_)
  }
  if (na.rm) 
    x <- x[!is.na(x)]
  if (!is.numeric(trim) || length(trim) != 1L) 
    stop("'trim' must be numeric of length one")
  n <- length(x)
  if (trim > 0 && n) {
    if (is.complex(x)) 
      stop("trimmed means are not defined for complex data")
    if (anyNA(x)) 
      return(NA_real_)
    if (trim >= 0.5) 
      return(stats::median(x, na.rm = FALSE))
    lo <- floor(n * trim) + 1
    hi <- n + 1 - lo
    x <- sort.int(x, partial = unique(c(lo, hi)))[lo:hi]
  }
  .Internal(mean(x))
}
<bytecode: 0x1198b8e00>
<environment: namespace:base>

Python代码

3*' Python is easy!'
import os
print(os.getcwd()) #查看目录
os.chdir('D:/Python work') #Windows系统中改变工作目录
os.chdir('/users/Python work') #OSx系统中改变工作目录
import os
from os.path import join
for (dirname, dirs, files) in os.walk('/users/work/'):
    for filename in files:
        if filename.endswith('.csv') :
            thefile = os.path.join(dirname,filename)
            print(thefile,os.path.getsize(thefile))
y=[[1,2],[1,2,3],['ss','swa','stick']]
y[2],y[2][:2],y[1][1:]
x='A poet can survive everything but a misprint.'
x[:10]+x[10:20]+x[20:30]+x[30:40]+x[40:]
x=[[1,2],[3,5,7],'Oscar Wilde']
y=['save','the world']
x.append(y);print(x)
x.extend(y);print(x)
x.pop();print(x)
x.pop(2);print(x)
print(2**0.5,2.0**(1/2),2**(1/2.))
print( 4/3,4./3 )
x=[0,1,4,23]
x.remove(4);print(x)
del x[0];print(x, type(x))
x =(0,12,345,67,8,9,'we','they')
print(type(x),x[-4:-1])
x=range(2,11,2)
print('x={}, list(x)={}'.format(x,list(x)))
print('type of x is {}'.format(type(x)))
data = {'age': 34, 'Children' : [1,2], 1: 'apple','zip': 'NA'}
print(type(data))
print('age=',data['age'])
data['age'] = '99'
data['name'] = 'abc'
print(data)
x=set(['we','you','he','I','they']);y=set(['I','we','us'])
x.add('all');print(x,type(x),len(x))
set.add(x,'none');print(x)
print('set.difference(x,y)=', set.difference(x,y))
print('set.union(x,y)=',set.union(x,y))
print('set.intersection(x,y)=',set.intersection(x,y))
x.remove('none')
print('x=',x,'\n','y=', y)
x=1;y=x;print(x,y,id(x),id(y))
x=2.0;print(x,y,id(x),id(y))
x = [1, 2, 3];y = x;y[0] = 10
print(x,y,id(x),id(y))
x = [1, 2, 3];y = x[:]
print(x,y,id(x)==id(y),id(x[0])==id(y[0]))
print(id(x[1])==id(y[1]),id(x[2])==id(y[2]))
def f(x): return x**2-x
g=lambda x: max(x**2,x**3)
print(list(map(lambda x: x**2+1-abs(x), [1.2,5.7,23.6,6])))
print(f(10),g(-3.4))
print(list(range(-10,10,2)),'\n', list(filter(lambda x: x>0,range(-10,10,2))))
from random import *
def RandomHappy():
    if randint(1,100)>50:
        x='happy'
    else:
        x='unhappy'
    if randint(1,100)>50:
        y='happy'
    else:
        y='unhappy'    
    if x=='happy' and y=='happy':
        print('You both are happy')
    elif x!=y:
        print('One of you is happy')
    else:
        print('Both are unhappy')

RandomHappy() #执行函数
for line in open("UN.txt"):
    for word in line.split():
        if word.endswith('er'):
            print(word)
# 例1
for line in open("UN.txt"):
    for word in line.split():
        if word.endswith('er'):
            print(word)

# 例2
with open('UN.txt') as f:
    lines=f.readlines()
lines[1:20]

# 例3            
x='Just a word' 
for i in  x:
    print(i)

# 例4
for i in  x.split():
    print(i,len(i))

# 例5
for i in [-1,4,2,27,-34]:
    if i>0 and i<15:
        print(i,i**2+i/.5)
    elif i<0 and abs(i)>5:
        print(abs(i))
    else:
        print(4.5**i)
x = range(5)
y = []
for i in range(len(x)):
    if float(i/2)==i/2:
        y.append(x[i]**2)
print('y', y)
z=[x[i]**2 for i in range(len(x)) if float(i/2)==i/2]
print('z',z)
import numpy as np 
x = np.random.randn(25,5)
np.savetxt('tabs.txt',x)#存成制表符分隔的文件
np.savetxt('commas.csv',x,delimiter=',')#存成逗号分隔的文件(如csv)
u = np.loadtxt('commas.csv',delimiter=',')#读取逗号分隔的文件
v = np.loadtxt('tabs.txt')#读取逗号分隔的文件
import numpy as np
y = np.array([[[1,4,7],[2,5,8]],[[3,6,9],[10,100,1000]]])
print(y)
print(np.shape(y))
print(type(y),y.dtype)
print(y[1,0,0],y[0,1,:])
import numpy as np
u = [0, 1, 2];v=[5,2,7]
u=np.array(u);v=np.array(v)
print(u.shape,v.shape)
print(u+v,u/v,np.dot(u,v))
u = [0.0, 1, 2];v=[5,2,7]
u=np.array(u);v=np.array(v)
print(u+v,u/v)
print(v/3, v/3.,v/float(3),(v-2.5)**2)
x=np.arange(3,5,.5)
y=np.arange(4)
print(x,y,x+y,x*y) #向量计算
print(x[:,np.newaxis].dot(y[np.newaxis,:]))
print(np.shape(x),np.shape(y))
print(np.shape(x[:,np.newaxis]),np.shape(y[np.newaxis,:]))
print(np.dot(x.reshape(4,1),y.reshape(1,4)))
x.shape=4,1;y.shape=1,4
print(x.dot(y))
print(np.dot(x,y))
print(np.dot(x.T,y.T), x.T.dot(y.T))#x.T是x的转置
print(x.reshape(2,2).dot(np.reshape(y,(2,2))))
x=[[2,3],[7,5]]
z = np.asmatrix(x)
print(z, type(z))
print(z.transpose() * z )
print(z.T*z== z.T.dot(z),z.transpose()*z==z.T*z)
print(np.ndim(z),z.shape)
x = np.array([[1.0,2.0],[3.0,4.0]])
y = np.array([[5.0,6.0],[7.0,8.0]])
z = np.concatenate((x,y),axis = 0)
z1 = np.concatenate((x,y),axis = 1)
print(z,"\n" ,z1,"\n",z.transpose()*z1)
z = np.vstack((x,y)) # Same as z = concatenate((x,y),axis = 0)
z1 = np.hstack((x,y))
print(z,"\n",z1)
print(np.ones((2,2,3)),np.zeros((2,2,3)),np.empty((2,2,3)))
x=np.random.randn(20).reshape(2,2,5);print(x)
x=np.random.randn(20).reshape(4,5)
x[0,:]=np.pi
print(x)
x[0:2,0:2]=0
print(x)
x[:,4]=np.arange(4)
print(x)
x[1:3,2:4]=np.array([[1,2],[3,4]])
print(x)
print(np.c_[0:10:2],np.c_[0:10:2].shape)
print(np.c_[1:5:4j],np.c_[1:5:4j].shape)
print(np.r_[1:5:4j],np.r_[1:5:4j].shape)
print(np.ogrid[0:3,0:2:.5],'\n',np.mgrid[0:3,0:2:.5])
print(np.ogrid[0:3:3j,0:2:5j],'\n',np.mgrid[0:3:3j,0:2:5j])
x = np.reshape(np.arange(25.0),(5,5))
print('x=\n',x)
print('np.ix_(np.arange(2,4),[0,1,2])=\n',np.ix_(np.arange(2,4),[0,1,2]))
print('ix_([2,3],[0,1,2])=\n',np.ix_([2,3],[0,1,2]))
print('x[np.ix_(np.arange(2,4),[0,1,2])]=\n',
x[np.ix_(np.arange(2,4),[0,1,2])]) # Rows 2 & 3, cols 0, 1 and 2
print('x[ix_([3,0],[1,4,2])]=\n', x[np.ix_([3,0],[1,4,2])])
print('x[2:4,:3]=\n',x[2:4,:3])# Same, standard slice
print('x[ix_([0,3],[0,1,4])]=\n',x[np.ix_([0,3],[0,1,4])])
x = np.random.randn(3)
print('np.round(x,2)={},np.round(x, 4)={}'.format(np.round(x,2),np.round(x, 4)))
print('np.around(np.pi,4)=', np.around(np.pi,4))
print('np.around(x,3)=', np.around(x,3))

print('x.round(3)={},np.floor(x)={}'.format(x.round(3),np.floor(x)))
print('np.ceil(x)={}, np.sum(x)={},'.format(np.ceil(x), np.sum(x)))
print('np.cumsum(x)={},np.prod(x)={}'.format(np.cumsum(x),np.prod(x)))
print('np.cumprod(x)={},np.diff(x)={}'.format(np.cumprod(x),np.diff(x)))

x= np.random.randn(3,4)
print('x={},np.diff(x)={}'.format( x,np.diff(x)))
print('np.diff(x,axis=0)=',np.diff(x,axis=0))
print('np.diff(x,axis=1)=',np.diff(x,axis=1))
print('np.diff(x,2,1)=', np.diff(x,2,1))
print('np.sign(x)={}, np.exp(x)={}'.format(np.sign(x),np.exp(x)))
print('np.log(np.abs(x))={},x.max()={}'.format(np.log(np.abs(x)),x.max()))
print(',x.max(1)={},,np.argmin(x,0)={}'.format(x.max(1),np.argmin(x,0)))
print('np.max(x,0)={},np.argmax(x,0)={}'.format(np.max(x,0),np.argmax(x,0)))
print('x.argmin(0)={},x[x.argmax(1)]={}'.format(x.argmin(0),x[:,x.argmax(1)]))
x = np.repeat(np.random.randn(3),(2))
print(x)
print(np.unique(x))
y,ind = (np.unique(x, True))
print('y={},ind={},x[ind]={},x.flat[ind]={}'.format(y,ind,x[ind],x.flat[ind]))

x = np.arange(10.0)
y = np.arange(5.0,15.0)
print('np.in1d(x,y)=', np.in1d(x,y))
print('np.intersect1d(x,y)=', np.intersect1d(x,y))
print('np.union1d(x,y)=', np.union1d(x,y))
print('np.setdiff1d(x,y)=' , np.setdiff1d(x,y))
print('np.setxor1d(x,y)=',np.setxor1d(x,y))
x=np.random.randn(4,2)
print(x,'\n','\n',np.sort(x,1),'\n',np.sort(x,axis=None))
print('np.sort(x,0)',np.sort(x,0))
print('x.sort(0)',x.sort(axis=0) )
x=np.random.randn(3)
x[0]=np.nan #赋缺失值
print('x{}\nsum(x)={}\nnp.nansum(x)={}'.format(x,sum(x),np.nansum(x)))
print('np.nansum(x)/np.nanmax(x)=', np.nansum(x)/np.nanmax(x))
x = np.reshape(np.arange(24),(4,6))
y = np.array(np.vsplit(x,2))
z = np.array(np.hsplit(x,3))
print('x={}\ny={}\nz={}'.format(x,y,z))
print(x.shape,y.shape,z.shape)
print(np.delete(x,1,axis=0)) #删除x第1行
print(np.delete(x,[2,3],axis=1)) #删除x第2,3列
print(x.flat[:], x.flat[:4]) #把x变成向量
x = np.array([[10,2,7],[3,5,4],[45,76,100],[30,2,0]])#same as R
y=np.diag(x) #对角线元素
print('x={}\ny={}'.format(x,y))
print('np.diag(y)=\n',np.diag(y)) #由向量形成对角线方阵
print('np.triu(x)=\n' ,np.triu(x)) #x上三角阵
print('np.tril(x)=\n',np.tril(x))#x下三角阵
print(np.random.randn(2,3))#随机标准正态2x3矩阵
#给定均值矩阵和标准差矩阵的随机正态矩阵:
print(np.random.normal([[1,0,3],[3,2,1]],[[1,1,2],[2,1,1]]))
print(np.random.normal((2,3),(3,1)))#均值为2,3标准差为3,1的2个随机正态数
print(np.random.uniform(2,3))#均匀U[2,3]随机数
np.random.seed(1010)#随机种子
print(np.random.random(10))#10个随机数(0-1之间)
print(np.random.randint(20,100))#20到100之间的随机整数
print(np.random.randint(20,100,10))#20到100之间的10个随机整数
print(np.random.choice(np.arange(-10,10,3)))#从序列随机选一个
x=np.arange(10);np.random.shuffle(x);print(x)
import numpy as np
x=np.random.randn(3,4)
print(x)
u,s,v= np.linalg.svd(x)#奇异值分解
Z=np.array([[1,-2j],[2j,5]])
print('Cholsky:', np.linalg.cholesky(Z))#Cholsky分解
print('x={}\nu={}\ndiag(s)={}\nv={}'.format(x,u,np.diag(s),v))
print(np.linalg.cond(x))#条件数
x=np.random.randn(3,3)
print(np.linalg.slogdet(x))#行列式的对数(及符号:1.为正, -1.为负)
print(np.linalg.det(x)) #行列式
y=np.random.randn(3)
print(np.linalg.solve(x,y)) #解联立方程
X = np.random.randn(100,2)
y = np.random.randn(100)
beta, SSR, rank, sv= np.linalg.lstsq(X,y,rcond=None)#最小二乘法
print('beta={}\nSSR={}\nrank={}\nsv={}'.format(beta, SSR, rank, sv))
#cov(x)方阵的特征值问题解:
va,ve=np.linalg.eig(np.cov(x))
print('eigen value={}\neigen vectors={}'.format(va,ve))
x = np.array([[1,.5],[.5,1]])
print('x inverse=', np.linalg.inv(x))#矩阵的逆
x = np.asmatrix(x)
print('x inverse=', np.asmatrix(x)**(-1)) #注意使用**(-1)的限制
z = np.kron(np.eye(3),np.ones((2,2)))#单位阵和全1矩阵的Kronecker积
print('z={},z.shape={}'.format(z,z.shape))
print('trace(Z)={}, rank(Z)={}'.format(np.trace(z),np.linalg.matrix_rank(z)))
import datetime as dt
yr, mo, dd = 2016, 8, 30
print('dt.date(yr, mo, dd)=',dt.date(yr, mo, dd))
hr, mm, ss, ms= 10, 32, 10, 11
print('dt.time(hr, mm, ss, ms)=',dt.time(hr, mm, ss, ms))
print(dt.datetime(yr, mo, dd, hr, mm, ss, ms))
d1 = dt.datetime(yr, mo, dd, hr, mm, ss, ms)
d2 = dt.datetime(yr + 1, mo, dd, hr, mm, ss, ms)
print('d2-d1', d2-d1 )
print(np.datetime64('2016'))
print(np.datetime64('2016-08'))
print(np.datetime64('2016-08-30'))
print(np.datetime64('2016-08-30T12:00')) # Time
print(np.datetime64('2016-08-30T12:00:01')) # Seconds
print(np.datetime64('2016-08-30T12:00:01.123456789')) # Nanoseconds
print(np.datetime64('2016-08-30T00','h'))
print(np.datetime64('2016-08-30T00','s'))
print(np.datetime64('2016-08-30T00','ms'))
print(np.datetime64('2016-08-30','W'))#Upcase!
dates = np.array(['2016-09-01','2017-09-02'],dtype='datetime64')
print(dates)
print(dates[0])
import pandas as pd
np.random.seed(1010)
w=pd.DataFrame(np.random.randn(10,5),columns=['X1','X2','X3','X4','Y'])
v=pd.DataFrame(np.random.randn(20,4),columns=['X1','X2','X3','Y'])
w.to_csv('Test.csv',index=False)
writer=pd.ExcelWriter('Test1.xlsx')
v.to_excel(writer,'sheet1',index=False)
w.to_excel(writer,'sheet2')
W=pd.read_csv('Test.csv') 
V=pd.read_excel('Test1.xlsx','sheet2')
U=pd.read_table('Test.csv',sep=',')
print('V.head()=\n',V.head())#头5行
print('U.head(2)=\n',U.head(2))#头两行
print('U.tail(3)=\n',U.tail(3))#最后三行
print('U.size={}\nU.columns={}'.format(U.size, U.columns))
U.describe() #简单汇总统计量
diamonds=pd.read_csv("diamonds.csv")
print(diamonds.head())
print(diamonds.describe())
print('diamonds.columns=',diamonds.columns)
print('sample size=', len(diamonds)) #样本量
cut=diamonds.groupby("cut") #按照变量cut的各水平分群
print('cut.median()=\n',cut.median())
print('Cross table=\n',pd.crosstab(diamonds.cut, diamonds.color))
#如果输入下一行代码, 则会产生输出结果之间的插图(不是独立的图)
#%matplotlib inline 
import matplotlib.pyplot as plt 
y = np.random.randn(100)
plt.plot(y)
plt.plot(y,'g--')
plt.title('Random number')
plt.xlabel('Index')
plt.ylabel('y')
plt.show()
import scipy.stats as stats
fig = plt.figure(figsize=(15,10))
ax = fig.add_subplot(2, 3, 1)#2x3图形阵
y = 50*np.exp(.0004 + np.cumsum(.01*np.random.randn(100)))
plt.plot(y)
plt.xlabel('time ($\tau$)')
plt.ylabel('Price',fontsize=16)
plt.title('Random walk: $d\ln p_t = \mu dt + \sigma dW_t$',fontsize=16)

y = np.random.rand(5)
x = np.arange(5)
ax = fig.add_subplot(2, 3, 5)
colors = ['#FF0000','#FFFF00','#00FF00','#00FFFF','#0000FF']
plt.barh(x, y, height = 0.5, color = colors, \
edgecolor = '#000000', linewidth = 5)
ax.set_title('Bar plot')

y = np.random.rand(5)
y = y / sum(y)
y[y < .05] = .05
ax = fig.add_subplot(2, 3, 3)
plt.pie(y)
ax.set_title('Pie plot')

z = np.random.randn(100, 2)
z[:, 1] = 0.5 * z[:, 0] + np.sqrt(0.5) * z[:, 1]
x = z[:, 0]
y = z[:, 1]
ax = fig.add_subplot(2, 3, 4)
plt.scatter(x, y)
ax.set_title('Scatter plot')

ax = fig.add_subplot(2, 3, 2)
x = np.random.randn(100)
ax.hist(x, bins=30, label='Empirical')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
pdfx = np.linspace(xlim[0], xlim[1], 200)
pdfy = stats.norm.pdf(pdfx)
pdfy = pdfy / pdfy.max() * ylim[1]
plt.plot(pdfx, pdfy,'r-',label='PDF')
ax.set_ylim((ylim[0], 1.2 * ylim[1]))
plt.legend()
plt.title('Histogram')

ax = fig.add_subplot(2, 3, 6)
x = np.cumsum(np.random.randn(100,4), axis = 0)
plt.plot(x[:,0],'b-',label = 'Series 1')
plt.plot(x[:,1],'g-.',label = 'Series 2')
plt.plot(x[:,2],'r:',label = 'Series 3')
plt.plot(x[:,3],'h--',label = 'Series 4')
plt.legend()
plt.title('Random lines')
plt.show()

第4章代码

for (s in 0:8) curve(dbeta(x,s+1,8-s+1),0,1,add=s!=0,xlab='',ylab='')
library(hdrcde)
x <- rbeta(100000,2,27)
hdr.den(x)
triplot(c(2,18),c(0,9),where='top')
from scipy.stats import beta
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
rb = beta.rvs(2,27,size=10000)
fig, ax = plt.subplots(figsize=(15,5))
x=np.linspace(0,0.4,100)
rv = beta(a, b)
plt.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
plt.ylim(ymin=0)
alpha=[0.01,0.05,0.5]
col=['y','b','r']
for i in range(3):
    x1,x2=pm.stats.hpd(rb,alpha[i])
    ix=np.linspace(x1,x2)
    iy = rv.pdf(ix)
    verts = [(x1, 0)] + list(zip(ix, iy)) + [(x2, 0)]
    poly = Polygon(verts, facecolor=col[i], edgecolor='0.5')
    ax.add_patch(poly)
plt.ylabel('Density')
plt.title('50%, 95%, 99% Highest Posterior Density Regions of Beta(2,27)')    
plt.show()
data(rat, package = "BayesGOF") 
log_l=function(alpha,beta,df=rat){
  L = 0
  for (i in 1:nrow(df))
    L= L+ lgamma(alpha+beta) - lgamma(alpha) - 
    lgamma(beta) + lgamma(alpha+df$y[i]) +
    lgamma(beta+df$n[i]-df$y[i]) - lgamma(alpha+beta+df$n[i])
  return(L)
}
log_p=function(A,B) {-5/2*log(A+B)}
tr2beta=function(x,y){exp(y)/(exp(x)+1)}
tr2alpha=function(x,y){exp(x)*tr2beta(x,y)}
library(pracma)
mg=meshgrid(seq(-2.3,-1.3,0.01),seq(1,5,0.01))
X=t(mg[[1]])
Z=t(mg[[2]])
df=data.frame(X=as.vector(X),Z=as.vector(Z))
df$alpha = tr2alpha(df$X,df$Z)
df$beta = tr2beta(df$X,df$Z)
df$log_post = log_p(df$alpha,df$beta) + log_l(df$alpha,df$beta, rat)
df$log_jacob = log(df$alpha) + log(df$beta)
df$transformed = df$log_post+df$log_jacob
df$exp_trans = exp(df$transformed - max(df$transformed))
df$normed_exp_trans = df$exp_trans/sum(df$exp_trans)
library(ggplot2)
p1=ggplot(df, aes(X, Z, z = exp_trans))+
  geom_contour()+
  xlab(expression(log(alpha/beta)))+ylab(expression(log(alpha+beta)))+
  annotate('text', x = -1.8+.12, y = 2.74+.42, 
    label = expression(paste(log(alpha/beta)==-1.8,'; ',
    log(alpha+beta)==2.74)), color="black", size=4 , angle=45)

I=sample(1:nrow(df),1000,prob=df$exp_trans,rep=T)

p2=ggplot(df[I,], aes(X,Z))+
  geom_point()+
  xlab(expression(log(alpha/beta)))+ylab(expression(log(alpha+beta)))
library(gridExtra)
grid.arrange(p1, p2, ncol=2)
df[order(df$exp_trans,decreasing = T),1:2][1,]
ea=sum(df$alpha*df$normed_exp_trans) # 2.362943
eb=sum(df$beta*df$normed_exp_trans) # 14.26711
(ez=log(ea+eb)) #2.811212
(ex=log(ea/eb)) #-1.798049
K=rep(1:nrow(rat),ceiling(nrow(df)/nrow(rat)))[1:nrow(df)]
set.seed(1010)
S=sample(1:nrow(df),50000,prob=df$exp_trans,rep=T)
sl=list()
for (i in 1:70){
  KS=(K[S]==i)
  sl[[i]]=rbeta(sum(KS),df$alpha[S[KS]]+
  rat$y[i],df$beta[S[KS]]+rat$n[i]-rat$y[i])
}
layout(matrix(c(1,3,2,3),nrow=2,byrow=T))
hist(sl[[1]],prob = T,main=expression(paste('Histogram of ',theta[1] )),xlab='')
lines(density(sl[[1]]))
hist(sl[[70]],prob = T,main=expression(paste('Histogram of ',theta[70] )),xlab='')
lines(density(sl[[70]]))

plot(0,0,type='n',xlim=c(0,.4),ylim=range(sl[[70]])+c(-.1,0),
  ylab=expression(paste('95% posterior interval of ',theta[i])),
  xlab=expression(y[i]/n[i]),cex=.1)
for (i in 1:nrow(rat)){
  points(rep(rat$y[i]/rat$n[i],length(sl[[i]])),sl[[i]],cex=.1)
  q=quantile(sl[[i]],c(.25,.5,.925))
  arrows(x0=rat$y[i]/rat$n[i],y0=q[1],y1=q[3],col=2,lwd=2,
    length = 0.03,angle = 90,code = 3)
  points(rat$y[i]/rat$n[i],q[2],cex=1,col=4,pch=16)
}
abline(0,1,lty=2)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy.special import gammaln
rat=pd.read_csv("rat.csv")
y=np.array(rat['y']);n=np.array(rat['n'])
def log_l(alpha,beta,y,n):
    L = 0
    for Y,N in zip(y,n):
        L+= gammaln(alpha+beta) - gammaln(alpha) - gammaln(beta) +\
        gammaln(alpha+Y) +gammaln(beta+N-Y) - gammaln(alpha+beta+N)
    return L

def log_p(A,B): return -5/2*np.log(A+B)

def tr2beta(x,y): return np.exp(y)/(np.exp(x)+1)

def tr2alpha(x,y): return np.exp(x)*trans_to_beta(x,y)
X,Z = np.meshgrid(np.arange(-2.3,-1.3,0.01),np.arange(1,5,0.01))
param_space = np.c_[X.ravel(), Z.ravel()]
df= pd.DataFrame(param_space, columns=['X','Z'])
df['alpha']= tr2alpha(df.X,df.Z)
df['beta'] = tr2beta(df.X,df.Z)
df['log_post'] = log_p(df.alpha,df.beta) + log_l(df.alpha,df.beta, y,n)
df['log_jacob'] = np.log(df.alpha) + np.log(df.beta)
df['transformed'] = df.log_post+df.log_jacob
df['exp_trans'] = np.exp(df.transformed - df.transformed.max())
df['normed_exp_trans'] = df.exp_trans/df.exp_trans.sum()
z = df.set_index(['X','Z']).exp_trans.unstack().values.T
fig=plt.figure(figsize = (14,7))
plt.subplot(1,2,1)
plt.contourf(X,Z, z)
plt.xlim(-2.1, -1.55)  
plt.ylim(2.0, 3.6)  
plt.xlabel(r'$\log(\alpha/\beta)$', fontsize = 16)
plt.ylabel(r'$\log(\alpha+\beta)$', fontsize = 16)
I=np.random.choice(np.arange(len(df)), size=1000, replace=True,\
  p=df['normed_exp_trans'])
plt.subplot(1,2,2)
plt.plot(df['X'][I],df['Z'][I],'bo')
plt.xlabel(r'$\log(\alpha/\beta)$', fontsize = 16)
plt.ylabel(r'$\log(\alpha+\beta)$', fontsize = 16)
plt.show()
import math

K=[]
for i in np.arange(math.ceil(len(df)/len(rat))):
    K.extend(np.arange(len(rat)))
K=K[:len(df)]
K=np.array(K)

np.random.seed(1010)
S=np.random.choice(np.arange(len(df)), size=50000, p=df['normed_exp_trans'])

sl=[]
for i in range(len(rat)):
    KS=(K[S]==i)
    sl.append(np.random.beta(df['alpha'][S[KS]]+y[i],df['beta'][S[KS]]+n[i]-y[i],\
        size=np.sum(KS)))
fig = plt.figure(figsize=(14,7))

ax1 = plt.subplot(221)
plt.hist(sl[0])
plt.title(r'Histogram of $\theta_{1}$')
ax2 = plt.subplot(223)
plt.hist(sl[69])
plt.title(r'Histogram of $\theta_{70}$')
ax3 = plt.subplot(122)
seg=[]
for i in range(len(y)):
    q=np.quantile(sl[i],[.25,.5,.925])
    ax3.plot(np.repeat(y[i]/n[i],len(sl[i])),sl[i],'b.')
    seg.extend([(y[i]/n[i],y[i]/n[i]),(q[0],q[2])])
ax3.plot(*seg,linewidth=4.0)
for i in range(len(y)):
    q=np.quantile(sl[i],[.25,.5,.925])
    ax3.plot(np.repeat(y[i]/n[i],3),q,'ko')
plt.xlabel(r'$y_i/n_i$', fontsize = 16)
plt.ylabel(r'95% posterior interval of $\theta_i$', fontsize = 16)

plt.tight_layout();plt.show()

第5章代码

library(latex2exp)
n=c(1,5,10,50);y=c(7,40, 60, 370)
for(i in 4:1)
  curve(dgamma(x,1+y[i],1+n[i]),0,10,add=i!=4,ylab="",xlab="",col=i,lty=i)
legend(0,1,legend=TeX(sprintf("$\\sum^{%d}_{i=1} y_i=%d$", n,y)),col=1:4,lty=1:4) 
y=c(6,7,5,4,5,14,10,10,7,8);
sumy=sum(y)
n=c(0,10);sy=c(0,sumy)
for(i in 1:2)
curve(dgamma(x,1+sy[i],1+n[i]),0,10,add=i!=1,ylab="",xlab="",col=i,lty=i) 
legend(4,1,legend=c("Prior: Beta(1,1)","Posterior: Beta(77,11) "),col=1:2,lty=1:2) 
set.seed(1010)
theta <- rgamma(9999,sumy+1,10+1)
y.pred=vector()
for (i in 1:9999){
  y.pred[i] = rpois(1,theta[i])
}

par(mfrow=c(1,3))
hist(y,col=4,xlim=c(0,20),breaks=10,prob=T)
hist(theta,col=4,xlim=c(0,20),prob=T,ylim=c(0,.6))
lines(density(theta))
hist(y.pred,col=4,xlim=c(0,20),prob=T)
from scipy.stats import gamma
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon

rb = gamma.rvs(77,11,size=10000)
fig, ax = plt.subplots(figsize=(15,5))
x=np.linspace(50,125,100)
rv = gamma(77, 11)
plt.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
plt.ylim(ymin=0)
alpha=[0.01,0.05,0.5]
col=['y','b','r']
for i in range(3):
    x1,x2=pm.stats.hpd(rb,alpha[i])
    ix=np.linspace(x1,x2)
    iy = rv.pdf(ix)
    verts = [(x1, 0)] + list(zip(ix, iy)) + [(x2, 0)]
    poly = Polygon(verts, facecolor=col[i], edgecolor='0.5')
    ax.add_patch(poly)
plt.ylabel('Density')
plt.title('50%, 95%, 99% Highest Posterior Density Regions of Gamma(77,11)')    
plt.show()

第6章代码

w=read.csv("THM.csv",na.strings = "N/A")
x=w[,2];mx=mean(x);n=length(x)
t=(sd(x))^(-2);m0=60;t0=1 
mu=(t0*m0+t*sum(x))/(t0+n*t)#后验均值
tau=t0+n*t #后验精度
library(hdrcde)
layout(t(1))
y <- rnorm(100000,mu,tau)
hdr.den(y)
set.seed(1010)
theta <- rnorm(9999,mu,tau)
y.pred=vector()
for (i in 1:9999){
y.pred[i] = rnorm(1,theta[i],sd(x))
}

par(mfrow=c(1,3))
hist(x,col=4,breaks=20,prob=T)
hist(theta,col=4,breaks=20,prob=T)
lines(density(theta))
hist(y.pred,col=4,breaks=20,prob=T)
from scipy.stats import norm
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
rn = norm.rvs(loc=55.17602,scale=0.9371718,size=10000)
fig, ax = plt.subplots(figsize=(15,5))
x=np.linspace(50,60,100)
rv = norm(loc=55.17602,scale=0.9371718)
plt.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
plt.ylim(ymin=0)
alpha=[0.01,0.05,0.5]
col=['y','b','r']
for i in range(3):
    x1,x2=pm.stats.hpd(rn,alpha[i])
    ix=np.linspace(x1,x2)
    iy = rv.pdf(ix)
    verts = [(x1, 0)] + list(zip(ix, iy)) + [(x2, 0)]
    poly = Polygon(verts, facecolor=col[i], edgecolor='0.5')
    ax.add_patch(poly)
plt.ylabel('Density')
plt.title('50%, 95%, 99% Highest Posterior Density Regions of Norm(55.2,0.94)')    
plt.show()
w=read.csv("THM.csv",na.strings = "N/A") 
x=w[,2];par = c(60, 0.05, 1, 1);par2=c(20.42652,32.05,17, 3619.50508)
#install.packages("nclbayes", repos="http://R-Forge.R-project.org")
library(nclbayes)
mu=seq(0,100,len=1000)
tau=seq(0,4,len=1000)
mu2=seq(13,27,len=1000)
tau2=seq(0.002,.008,len=1000)
layout(t(1:2))
NGacontour(mu,tau,par[1],par[2],par[3],par[4])
NGacontour(mu2,tau2,par2[1],par2[2],par2[3],par2[4])
par(mar=c(4,4,1,1))
rnormgam=function(n, mu, lambda, alpha, beta,seed=1010) {
  set.seed(seed)
  tau=rgamma(n, alpha, beta)
  x=rnorm(n, mu, sqrt(1/(lambda*tau)))
  data.frame(tau = tau, x = x)
}
c=rnormgam(1000,par[1],par[2],par[3],par[4])
c2=rnormgam(1000,par2[1],par2[2],par2[3],par2[4])
plot(tau~x,c,pch=16,col=4,main="Sampling from prior",ylab = expression(tau))
plot(tau~x,c2,pch=16,col=4,main="Sampling from posterior",ylab = expression(tau))
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import gamma
from scipy.stats import norm
def parp(par,x):
    v=[]
    v.append((par[0]*par[1]+np.sum(x))/(par[1]+len(x)))
    v.append(par[1]+len(x))
    v.append(par[2]+len(x)/2)
    v.append(par[3]+0.5*(np.sum((x-np.mean(x))**2)+len(x)*par[1]/\
    (par[1]+len(x))*(np.mean(x)-par[0])**2))
    return(v)
def rnormgam(n, mu, lam, alpha, beta,seed=1010):
    np.random.seed(seed)
    tau=gamma.rvs(a=alpha, scale=1/beta,size=n)
    x=norm.rvs(loc=mu, scale=np.sqrt(1/(lam*tau)),size=n)
    df= pd.DataFrame(np.c_[tau.ravel(), x.ravel()], columns=['tau','x'])
    return(df)
x=pd.read_csv("THM.csv")['Sample_result'] 
par = [60, 0.05, 1, 1]
par2=parp(par,x)
c=rnormgam(1000,par[0],par[1],par[2],par[3])
c2=rnormgam(1000,par2[0],par2[1],par2[2],par2[3])
plt.figure(figsize=(10,3))
plt.subplot(1,2, 1) 
plt.scatter(c['x'],c['tau'])
plt.title('Sample from prior')

plt.subplot(1,2,2)
plt.ylim(0.0015, 0.01)  
plt.scatter(c2['x'],c2['tau'])
plt.title('Sample from posterior')
plt.show()

第7章代码

P=matrix(c(.5,0,.5,0,.8,.2,.6,.2,.2),3,3,b=T)
p0=c(.3,.1,.6) #可以取任何其他的概率向量
p1=p0
for (i in 1:55)
p1=p1%*%P  
p1
M=20000;k=floor(M/2)
X = NULL
x = 1
set.seed(1010)
for (i in 1:M) {
  u = rnorm(1,0,4)
  alpha = dgamma(u,5,5)/dgamma(x,5,5)
  if (runif(1) < min(alpha, 1)) x = u
  X[i] = x}
layout(t(1:2))
hist(X[-(1:k)],20,prob=TRUE,xlim = c(0,8),xlab='X',ylab='',main="")
curve(dgamma(x,5,5),from=0,to=8,add=TRUE,col=2,lwd=3)
legend('topright',c('true density'),col=2,lty=1)
plot(1:k,X[1:k],type='l',col=2,lty=2,ylab='X',xlab="index",xlim=c(1,M),ylim=range(X))
lines((k+1):M,X[(k+1):M])
legend('top',c('after burn-in','burn-in'),lty=1:2,col=1:2,cex=.6)
M=20000;k=floor(M/2)
set.seed(1010)
X=vector()
for (i in 1:M) {
  ch<-rchisq(1,3)
  alpha <- (dgamma(ch,5,5)/dgamma(x,5,5))*(dchisq(ch,3)/dchisq(x,3))
  if (runif(1) < min(alpha, 1)) x = ch
  X[i] = x}
par(mfrow=c(1,2))
hist(X[-(1:k)],15,prob=TRUE,xlim = c(0,8),xlab='X',ylab='',ylim=c(0,1),main="")
curve(dgamma(x,5,5),from=0,to=8,add=TRUE,col=2,lwd=3)
legend('topright',c('true density'),col=2,lty=1)
plot(1:k,X[1:k],type='l',col=2,lty=2,ylab='X',xlab="index",xlim=c(1,M),ylim=range(X))
lines((k+1):M,X[(k+1):M])
legend('top',c('after burn-in','burn-in'),lty=1:2,col=1:2,cex=.6)
n = 70; xbar = 8; s2  = 4
M=99999; k = 5000  
mu = vector() -> tau
tau[1] = 1  
set.seed(1010)
for(i in 2:M) {   
  mu[i] = rnorm(n = 1, mean = xbar, sd = sqrt(1/(n*tau[i-1])))    
  tau[i] = rgamma(n = 1, shape = n/2, scale = 2/((n-1)*s2+n*(mu[i]-xbar)^2))
}
par(mfrow=c(1,2))
hist(mu[-(1:k)])
hist(tau[-(1:k)])
Hmc=function(f, delta, q0, L, M){
  q=vector()
  U=vector()
  q[1]=q0
  U[1]=f(q0)
  for (i in 2:M){#i=2
    p=rnorm(1)
    K = sum(p^2)/2

    pp=p
    pq=q[i-1]
    pp = pp - 0.5 * delta * grad(f, pq)
    for (j in 1:(L - 1)) {
      pq = pq + delta * pp
      if (i!=L) pp = pp - delta * grad(f, pq)#
    }
    pp = pp - 0.5 * delta * grad(f, pq)
    pp=-pp #make symmetric

    U[i]=f(q[i])#      
    pU = f(pq)
    pK = sum(pp^2)/2
    if (runif(1) < exp(K - pK + U[i - 1] - pU)) {
      q[i] = pq
      U[i] = pU
    }
    else {
      q[i] = q[i - 1]
      U[i] = U[i - 1]
    }
  } 
  return(list(chain = q, U = U))
}

library(numDeriv)
set.seed(101010)
f1= function(x){x^(-6/2)*exp(-4/(2*x))}
res=Hmc(f1,delta=.3,q0=19,L=16,M=5000)
hist(res[[1]])

第8章代码

install.packages("rstan",repos="https://cloud.r-project.org/", dependencies=TRUE)
pkgbuild::has_build_tools(debug = TRUE)
dotR <- file.path(Sys.getenv("HOME"), ".R")
if (!file.exists(dotR)) dir.create(dotR)
  M<-file.path(dotR,ifelse(.Platform$OS.type=="windows","Makevars.win","Makevars"))
if (!file.exists(M)) file.create(M)
cat("\nCXX14FLAGS=-O3 -march=native -mtune=native",
  if( grepl("^darwin", R.version$os)) 
    "CXX14FLAGS += -arch x86_64 -ftemplate-depth-256" 
  else 
    if (.Platform$OS.type == "windows") 
      "CXX11FLAGS=-O3 -march=native -mtune=native" 
    else
      "CXX14FLAGS += -fPIC",
  file = M, sep = "\n", append = TRUE)
dat=scan("toy.txt")
toy_data=list(Y=dat,N=length(dat))
toy="data{
  int N;
  vector [N] Y;
}
parameters{
  real <lower=0> lambda;
}
model{
  lambda ~ gamma (1,1);
  Y ~ exponential(lambda);
}
generated quantities{
  real pred;
  pred = exponential_rng(lambda);
}
"
library(rstan)
fit=stan(model_code = toy,data=toy_data,
                    warmup=1000,iter=2000,chain=2)
chain=as.matrix(fit)
acf(chain[,"lambda"],500)
traceplot(fit)
hist(toy_data$Y,prob=T)
lines(density(chain[,'pred']))
library(hdrcde)
hdr.den(chain[,'pred'])
library(gridExtra)
grid.arrange(traceplot(fit,pars="lambda"),
traceplot(fit2,pars="lambda"),
traceplot(fit3,pars="lambda"),
traceplot(fit4,pars="lambda"),nrow=1)
hist(toy_data$Y,probability = T)
lines(density(as.matrix(fit)[,"pred"]),lwd=2)
lines(density(as.matrix(fit2)[,"pred"]),lwd=2,col=2,lty=2)
lines(density(as.matrix(fit3)[,"pred"]),lwd=2,col=3,lty=3)
lines(density(as.matrix(fit4)[,"pred"]),lwd=2,col=4,lty=4)
legend(5.2,.75,c("Gamma prior","Normal prior","Cauchy prior","Uniform prior"),
  lwd=2,lty=1:4,col=1:4)
conda install -c conda-forge PyMC3
pip install PyMC3
%matplotlib inline
import pandas as pd
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt

v= np.loadtxt('toy.txt').reshape(500) 
with pm.Model() as toy:
    lambda_ = pm.Gamma('lambda_', 1, 1)
    y = pm.Exponential('y', lambda_, observed=v)
    fit_gamma = pm.sample(2000, cores=2,tune=1000)
with pm.Model() as toy:
    lambda_ = pm.Gamma('lambda_', 1, 1)
with toy:
    y = pm.Exponential('y', lambda_, observed=v)
with toy:
    fit_gamma = pm.sample(2000, cores=2,tune=1000)
print(pm.summary(fit_gamma).round(4))
pm.traceplot(fit_gamma);
plt.figure(figsize=(10,2))
pm.forestplot(fit_gamma,rhat=False)
plt.figure(figsize=(10,2))
pm.energyplot(fit_gamma)
plt.figure(figsize=(10,2))
pm.forestplot(fit_gamma,rhat=False)
pm.plot_posterior(fit_gamma, lw=0, alpha=0.5,figsize=(10,4));
pm.traceplot(fit_gamma);
pm.traceplot(fit_norm);
pm.traceplot(fit_cauchy);
pm.traceplot(fit_unif);
SNC_model="
data {
  int<lower=0> J;
  real y[J];
  real<lower=0> sigma[J];
}

parameters {
  real mu;
  real<lower=0> tau;
  real theta_tilde[J];
}

transformed parameters {
  real theta[J];
  for (j in 1:J)
    theta[j] = mu + tau * theta_tilde[j];
}

model {
  mu ~ normal(0, 5);
  tau ~ cauchy(0, 5);
  theta_tilde ~ normal(0, 1);
  y ~ normal(theta, sigma);
}"
data {
  int<lower=0> J;
  real y[J];
  real<lower=0> sigma[J];
}
parameters {
  real mu;
  real<lower=0> tau;
  real theta_tilde[J];
}
transformed parameters {
  real theta[J];
  for (j in 1:J)
    theta[j] = mu + tau * theta_tilde[j];
}
model {
  mu ~ normal(0, 5);
  tau ~ cauchy(0, 5);
  theta_tilde ~ normal(0, 1);
  y ~ normal(theta, sigma);
}
schools.data <- list(
  J = 8,
  y = c(28.39, 7.94, -2.75 , 6.82, -0.64, 0.63, 18.01, 12.16),
sigma = c(14.9, 10.2, 16.3, 11.0, 9.4, 11.4, 10.4, 17.6)
)
library(rstan)
fit <- stan(
  model_code=SNC_model,   # Stan 程序
  data = schools.data,    # 数据(变量名)
  chains = 2,             # 用2条马尔可夫链
  warmup = 1000,          # 每条链的热身次数
  iter = 2000,            # 每条链迭代总次数
  refresh = 1000          # 每1000次迭代显示过程
  )
print(fit, pars = c("mu","tau"))
plot(fit,plotfun="trace", pars = c("mu","tau","theta"), inc_warmup=TRUE,nrow=2)
#traceplot(fit, pars = c("mu", "tau","theta"), inc_warmup = TRUE, nrow = 2)#等价
pairs(fit, pars = c("theta", "theta_tilde"), log = TRUE, las = 1,include = F)
samples <- extract(fit, permuted = TRUE) 
layout(t(1:3))
library(hdrcde)
hdr.den(samples$tau)
hdr.den(samples$mu)
hdr.den(samples$lp__)
sampler_params <- get_sampler_params(fit, inc_warmup = FALSE)
sampler_params_chain1 <- sampler_params[[1]]
colnames(sampler_params_chain1)
library(shinystan)
launch_shinystan(fit)
%matplotlib inline
import pymc3 as pm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
from collections import defaultdict
J = 8
y = np.array([28.39, 7.94, -2.75 , 6.82, -0.64, 0.63, 18.01, 12.16])
sigma = np.array([14.9, 10.2, 16.3, 11.0, 9.4, 11.4, 10.4, 17.6])
with pm.Model() as NC:
    mu = pm.Normal('mu', mu=0, sd=5)
    tau = pm.HalfCauchy('tau', beta=5)
    theta_tilde = pm.Normal('theta_t', mu=0, sd=1, shape=J)
    theta = pm.Deterministic('theta', mu + tau * theta_tilde)
    obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
with NC:
    fit = pm.sample(5000, chains=2, tune=1000, 
            random_seed=[20190818, 20191010], target_accept=.90)
pm.summary(fit).round(2)
plt.figure(figsize=(15,5))
pm.forestplot(fit,rhat=False)
pm.plot_posterior(fit, lw=0, varnames=["mu","tau","theta_t"],
  alpha=0.5,figsize=(10,6));
pm.traceplot(fit,varnames=["mu","tau","theta_t"])
logtau = np.log(fit['tau'])
mlogtau = [np.mean(logtau[:i]) for i in np.arange(1, len(logtau))]
plt.figure(figsize=(15, 4))
plt.axhline(0.7657852, lw=2.5, color='gray')
plt.plot(mlogtau, lw=2.5)
plt.ylim(0, 2)
plt.xlabel('Iteration')
plt.ylabel('MCMC mean of log(tau)')
plt.title('MCMC estimation of log(tau)')
divergent = fit['diverging']
print('Number of Divergent %d' % divergent.nonzero()[0].size)
divperc = divergent.nonzero()[0].size / len(fit) * 100
print('Percentage of Divergent %.1f' % divperc)
theta = fit.get_values(varname='theta', combine=True)[:, 0]
logtau = fit.get_values(varname='tau_log__', combine=True)
_, ax = plt.subplots(1, 1, figsize=(10, 3))
ax.plot(theta, logtau, 'o', color='C3', alpha=.5)
divergent = fit['diverging']
ax.plot(theta[divergent], logtau[divergent], 'o', color='C2')
ax.set_xlabel('theta[0]')
ax.set_ylabel('log(tau)')
ax.set_title('scatter plot between log(tau) and theta[0]');
from pandas.plotting import scatter_matrix
scatter_matrix(pm.trace_to_dataframe(fit)[["tau","mu","theta_t__0","theta__0"]],\
       figsize=(10,4))
library(rstanarm)
wells$dist100 <- wells$dist / 100
fit_wells <- stan_glm(
  switch ~ dist100 + arsenic, 
  data = wells, 
  family = binomial(link = "logit"), 
  prior = student_t(df = 7),
  prior_intercept = student_t(df = 7),
  QR = TRUE,
  chains = 2, iter = 2000 
)
plot(fit_wells)
library(gridExtra)
grid.arrange(plot(fit_wells,"trace"),
plot(fit_wells, "dens_overlay"),nrow=2)
stancode <- rstan::get_stancode(fit_wells$stanfit)#对于rstanarm
cat(stancode)
library(brms)
w=read.csv("fish.csv")
fit_w <- brm(count ~ persons + child + camper, data = w,
                 family = zero_inflated_poisson("log"))
get_prior(count ~ persons + child + camper, data = w,
                 family = zero_inflated_poisson("log"))
prior <- c(prior_string("normal(0,10)", class = "b"),
           prior(normal(1,2), class = b, coef = treat),
           prior_(~cauchy(0,2), class = ~sd, 
                  group = ~subject, coef = ~Intercept))
make_stancode(count ~ persons + child + camper, data = w,
                 family = zero_inflated_poisson("log"))
import numpy as np
from bayespy.nodes import GaussianARD, Gamma
from bayespy.inference import VB
mu_0=0;tau_0=1e-6;alpha=1e-6;beta=1e-6; #确定超参数
n=[20,100,500,1000] #样本量
MU=[];TAU=[]
for N in n:
    np.random.seed(1010)
    data = np.random.normal(5, 10, size=(N,))
    mu = GaussianARD(mu_0, tau_0)
    tau = Gamma(alpha, beta)
    y = GaussianARD(mu, tau, plates=(N,)) #由于是观测值, 有plates=(N,)
    y.observe(data) #输入观测数据
    Q = VB(mu, tau, y)
    Q.update(repeat=N)
    MU.append(mu)
    TAU.append(tau)
lty=['-',':','--','-.']
lab=['N=20','N=100','N=500','N=1000']

import bayespy.plot as bpplt
bpplt.pyplot.figure(figsize=(12, 4))
bpplt.pyplot.subplot(121)
for i in range(len(MU)):
    bpplt.pdf(MU[i], np.linspace(0, 10, num=100), color='b',
      linestyle=lty[i], name=r'\mu',label=lab[i])
bpplt.pyplot.legend()
bpplt.pyplot.subplot(122)
for i in range(len(TAU)):
    bpplt.pdf(TAU[i], np.linspace(1e-10, 0.02, num=100), color='r',
      linestyle=lty[i], name=r'\tau',label=lab[i])
bpplt.pyplot.legend()
bpplt.pyplot.tight_layout()
bpplt.pyplot.show()

第9章代码

set.seed(1010)
N <- 100
theta <- 0.6
y <- rbinom(N, 1, theta)
coin.data <- list(N=N, y=y)
Coin="
  data {
  int<lower=0> N;
  int<lower=0,upper=1> y[N];
}
parameters {
  real<lower=0, upper=1> theta;
}
model {
  theta ~ beta(1, 1);
for (n in 1:N)
  y[n] ~ bernoulli(theta);
}
"
library(rstan)
fit_c <- stan(model_code=Coin, data = coin.data, chains = 2, warmup = 1000, 
  iter = 2000, refresh = 1000 )
ce=extract(fit_c)
library(lattice)
require(gridExtra)
densityplot(ce$theta)
layout(t(1:2))
library(hdrcde)
hdr.den(ce$theta)
hdr.den(ce$lp__)
import pymc3 as pm
import scipy.stats as stats
import pandas as pd
import numpy as np
n = 100
h = 61
niter = 1000
with pm.Model() as CT:
    p = pm.Beta('p', alpha=1, beta=1)
    y = pm.Binomial('y', n=n, p=p, observed=h)
    fit_CT = pm.sample(2000, chains=2, tune=1000,
               random_seed=[20190818, 20191010], target_accept=.90)
plt.figure(figsize=(10,4))
plt.hist(fit_CT['p'], 15, histtype='step', normed=True, label='post');
x = np.linspace(0, 1, 100)
plt.plot(x, stats.beta.pdf(x, 1, 1), label='prior');
plt.legend(loc='best');
N <- 100
y <- rnorm(N, 10, 2)
NMS_data=list(N=N,y=y)
NMS="
data {
  int<lower=0> N;
  real y[N];
}
parameters {
  real<lower=0> mu;
  real<lower=0> sigma;
}
model {
  mu ~ uniform(0, 100);
  sigma ~ uniform(0, 100);
  for (n in 1:N)
    y[n] ~ normal(mu,sigma);
}
"
fit_NMS <- stan(
  model_code=NMS,  data = NMS_data, chains = 2,  
  warmup = 1000, iter = 2000, refresh = 1000 
  )
nmse=extract(fit_NMS)
layout(t(1:2))
library(hdrcde)
hdr.den(nmse$mu)
hdr.den(nmse$sigma)
N = 100
_mu = np.array([10])
_sigma = np.array([2])
y = np.random.normal(_mu, _sigma, N)
with pm.Model() as NMS:
    mu = pm.Uniform('mu', lower=0, upper=100, shape=_mu.shape)
    sigma = pm.Uniform('sigma', lower=0, upper=10, shape=_sigma.shape)
    y_obs = pm.Normal('Y_obs', mu=mu, sd=sigma, observed=y)
    fit_NMS = pm.sample(2000, chains=2, tune=1000,
               random_seed=[20190818, 20191010], target_accept=.90)
pm.plot_posterior(fit_NMS, lw=0, varnames=["mu","sigma"],alpha=0.5,figsize=(10,3));
N = 30
x = seq(0, 1, length=N)
y = 6 -2 *x + rnorm(N)
lms_data=list(y=y,x=x,N=N)
lms="data {
  int<lower=0> N;
  vector[N] x;
  vector[N] y;
}
parameters {
  real a;
  real b;
  real<lower=0> tau;
}
transformed parameters {
     real sigma=1/tau;
   }

model {
     a~normal(0,100);
     b~normal(0,100);
     tau~gamma(0.1,0.1);
  y ~ normal(a + b * x, sigma);
}"
fit_NMS <- stan(
  model_code=lms,  data = lms_data, chains = 2,  
  warmup = 1000, iter = 2000, refresh = 1000 
  )
pairs(fit_NMS,pars="sigma",include = F)
traceplot(fit_NMS, inc_warmup = TRUE, nrow = 2)
n = 30
_a = 6
_b = -2
x = np.linspace(0, 1, n)
y = _a + _b*x + np.random.randn(n)
with pm.Model() as LM:
    a = pm.Normal('a', mu=0, sd=100)
    b = pm.Normal('b', mu=0, sd=100)
    tau = pm.Gamma('tau', 0.1, 0.1)
    y_est = a*x + b # simple auxiliary variables
    likelihood = pm.Normal('y', mu=y_est, tau=tau, observed=y)
    fit_LM = pm.sample(2000, chains=2, tune=1000,
               random_seed=[20190818, 20191010], target_accept=.90)
print(pm.summary(fit_LM).round(2))
pm.traceplot(fit_LM);
w=read.csv("HTWT.csv")
w=read.csv("HTWT.csv")
w_data=list(N=nrow(w), X=cbind(1,w[,-1]), K=3,
  y=w$male,  beta_loc = rep(0, 3), beta_scale = rep(100, 3))
logit="
data {
//输入的数据
  int N;
  int y[N];
  int K;
  matrix[N, K] X;
  // 先验参数值
  vector[K] beta_loc;
  vector[K] beta_scale;
}
parameters {
  vector[K] beta;
}
transformed parameters {
  // linear predictor
  vector[N] eta;
  eta = X * beta;
}
model {
  beta ~ normal(beta_loc, beta_scale);
  //  y ~ bernoulli(inv_logit(eta)); 和下面等价, 但慢些
  y ~ bernoulli_logit(eta);
}
generated quantities {
  // 每个观测值的对数似然
  vector[N] log_lik;
  // 概率
  vector[N] mu;
  for (i in 1:N) {
    mu[i] = inv_logit(eta[i]);
    log_lik[i] = bernoulli_logit_lpmf(y[i] | eta[i]);
  }
}
"
library(rstan)
fit_log=stan(
  model_code = logit, data = w_data, chains = 2, warmup = 1000,
  iter = 2000, cores = 2, refresh = 1000
)
pairs(fit_log,pars="beta",include = T)
traceplot(fit_log, pars="beta", inc_warmup = TRUE, nrow = 1)
w=pd.read_csv('htwt.csv')
with pm.Model() as my_model:
    my_priors = {"Intercept": pm.Normal.dist(mu=0, tau=1e-2),
               "height": pm.Normal.dist(0,100),
               "weight": pm.Normal.dist(0,100)
              }
    pm.glm.GLM.from_formula('male ~ height + weight',w,
                            family=pm.glm.families.Binomial(), priors=my_priors)
    trace_my_model = pm.sample(2000, chains=2, tune=1000, init='adapt_diag')
print(pm.summary(trace_my_model).round(2))
import seaborn
plt.figure(figsize=(9,7))
seaborn.jointplot(trace_my_model['height'], trace_my_model['weight'],
    kind="hex", color="#4CB391")
plt.xlabel("beta_height")
plt.ylabel("beta_weight");
pm.traceplot(trace_my_model);
# The Stan model as a string.
model_b2 <- "
data {
  # Number of data points
  int N;
  # Number of successes
  int y[N];
  #Number of trials
  int n[N];
}
parameters {      
  real logodds; // can't use 'logit' because it is a function name
}
transformed parameters { // functions of parameters that we would like to estimate
  real theta; 
  real odds;
  theta = 1/(1 + exp(-logodds));
  odds = theta/(1-theta);
}

model {  
  theta ~ beta(1, 1);
  y ~ binomial(n,theta);
}

generated quantities {
}
"
#y = sample(0:1,100,rep=T,prob = c(.2,.8))
#data_list <- list(y = y, n = length(y))

rat=read.csv("rat.csv")
data_list2 <- list(N=nrow(rat), y = rat$y, n = rat$n)
# Compiling and producing posterior samples from the model.
stan_samples <- stan(model_code = model_b2, data = data_list2)
library(lattice)
require(gridExtra)
dev.off()
p1=plot(stan_samples)
p2=stan_trace(stan_samples)
grid.arrange(p1,p2, ncol=2)
ef=extract(stan_samples)
g1=densityplot(ef$theta)
g2=densityplot(ef$odds)
g3=densityplot(ef$logodds)
grid.arrange(g1,g2, g3,ncol=3)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm

w=pd.read_csv("rat.csv")
y=w['y'];n=w['n']

with pm.Model() as binom_model:
    theta=pm.Beta('theta',alpha=1,beta=1)
    odds=pm.Deterministic('odds',theta/(1-theta))
    logodds=pm.Deterministic('logodds',pm.math.log(odds))
    obs = pm.Binomial('obs', p=theta, n=n,observed=y)
with binom_model:
    trace=pm.sample(10000, njobs=4,tune=1000)
pm.traceplot(trace);plt.show()
library(rstan)
model_p <- "
data {
  int n;
  int y[n];
}
parameters {      
  real theta;
}
model {  
  theta ~ gamma(1, 1);
  y ~ poisson(theta);
}
"
y = rep(0,9)
data_list <- list(y = y, n = length(y))
stan_samples <- stan(model_code = model_p, data = data_list)
stan_samples
library(lattice)
require(gridExtra)
p1=plot(stan_samples)
p2=stan_trace(stan_samples)
grid.arrange(p1,p2, ncol=2)
ef=extract(stan_samples)
densityplot(ef$theta)
library(hdrcde)
hdr.den(ef$theta)
y=np.repeat(0,9)
with pm.Model() as Poisson_model:
    theta=pm.Gamma('theta',alpha=1,beta=1)
    obs = pm.Poisson('obs', mu=theta, observed=y)
with Poisson_model:
    trace=pm.sample(10000, njobs=4,tune=1000)
pm.traceplot(trace);plt.show()
stanmodelcode = "
data {                      // 数据块
  int<lower=1> N;           // 样本量
  vector [N] y;             // 目标变量
}
parameters {                // 参数块
  real mu;           
  real sigma;           
}
model {                     // 模型块
  // 先验分布
  mu~normal(0,1000);
  sigma ~ uniform(0, 40); 
  // 似然
  y[N] ~normal(mu,sigma);
}
"
w=read.csv("THM.csv")
N=nrow(w)
y=w[,2]
dat = list(N=N, y=y) 
library(rstan)
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
### Run the model and examine results
fit = stan(model_code = stanmodelcode,
  data = dat,
  iter = 10000,
  control=list(max_treedepth=15,
  adapt_delta=0.95),
  chains = 2)  #选择运行两个Markov链
library(lattice)
require(gridExtra)
ef=extract(fit)
g1=densityplot(ef$mu)
g2=densityplot(ef$sigma)
grid.arrange(g1,g2, ncol=2)
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm
y=pd.read_csv("THM.csv")['Sample_result']
with pm.Model() as norm_model:
    mu0=pm.Normal('mu0',mu=0,sd=1000)
    sd0=pm.Uniform('sd0',lower=0, upper=40)
    obs = pm.Normal('obs', mu=mu0, sd=sd0, observed=y)
with norm_model:
    trace=pm.sample(10000, njobs=2,tune=1000)

第10章代码

MM="
data { 
  int<lower=1> N;  // total number of observations 
  vector[N] Y;  // response variable 
  int<lower=1> K;  // number of population-level effects 
  matrix[N, K] X;  // population-level design matrix 
} 
transformed data {
  real mean_y=mean(Y);
  real sd_y=sd(Y);
}
parameters { 
  real m; 
  real lam;
  real lambda;
  vector[K] b;  // population-level effects 
  real Intercept;  // intercept 
} 
model { 
  vector[N] mu = Intercept + X * b;
  m~normal(0,10);
  lam~uniform(0,100);
  target += gamma_lpdf (lambda| 1, .1); 
  for(i in 1:K){
  target += student_t_lpdf(b[i] |1, m, lambda);
  };
  Intercept~normal(mean_y, sd_y);
  target += student_t_lpdf(Y |1.5, mu,lam);
}
"
library(rstan)
w=read.csv("Anscombe.csv")#读入数据
data=list(N=nrow(w),Y=w[,1],K=ncol(w)-1,X=w[,-1])#数据必须用list
fit1 <- stan(
  model_code = MM,  # Stan 模型程序
  data = data,    # 数据名字
  chains = 4,             # 使用的Markov链数目
  warmup = 1000,          # 每条链热身迭代次数
  iter = 20000,            # 每条链总迭代次数
  cores = 4,              # 核的数目
  refresh = 1000          # 每1000次迭代显示结果
  )
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from pymc3 import  *
import theano
import pandas as pd
from statsmodels.formula.api import glm as glm_sm
import statsmodels.api as sm
from pandas.plotting import scatter_matrix
w = pd.read_csv('Anscombe.csv')
with Model() as model_edu1:
    family = glm.families.StudentT(link=glm.families.Identity(),
               priors={'nu': 1.5,'lam': Uniform.dist(0, 100)})
    gmean = Normal('gmean', mu=0, sd=10)
    gprec = Gamma('gprec', alpha=1, beta=.1, testval=1.)
    slope = StudentT.dist(mu=gmean, lam=gprec, nu=1)
    intercept = Normal.dist(mu=w.education.mean(), sd=w.education.std())
    GLM.from_formula('education ~ income + young + urban', w,
        priors={'Intercept': intercept, 'Regressor': slope},family=family)
    trace_edu1 = sample(10000, cores=4)
plt.figure(figsize=(12,3))
forestplot(trace_edu1, varnames=['gmean', 'income','young','urban'],rhat=False)
scatter_matrix(trace_to_dataframe(trace_edu1)[['gmean','income','gprec']],\
    figsize=(12,3))
ML="
data {
//输入的数据
  int N;
  int y[N];
  int K;
  matrix[N, K] X;
  // 先验参数值
  real alpha_loc;
  real alpha_scale;
  vector[K] beta_loc;
  vector[K] beta_scale;
}
parameters {
  real alpha;
  vector[K] beta;
}
transformed parameters {
  // linear predictor
  vector[N] eta;
  eta = alpha + X * beta;
}
model {
  alpha ~ normal(alpha_loc, alpha_scale);
  beta ~ normal(beta_loc, beta_scale);
  //  y ~ bernoulli(inv_logit(eta)); 和下面等价, 但慢些
  y ~ bernoulli_logit(eta);
}
generated quantities {
  // 每个观测值的对数似然
  vector[N] log_lik;
  // 概率
  vector[N] mu;
  for (i in 1:N) {
    mu[i] = inv_logit(eta[i]);
    log_lik[i] = bernoulli_logit_lpmf(y[i] | eta[i]);
  }
}
"
u=read.csv("Sports.csv")#Label~PRP+VBN
u_data=list(N=nrow(u), X=u[,c(22,34)], K=2,
  y=(u$Label=='subjective')*1, #换成哑元
  alpha_loc = -1.5, alpha_scale= 2, beta_loc = rep(0, 2),
  beta_scale = rep(0.5, 2)
)
library(rstan)
fit2 <- stan(
  model_code = ML, data = u_data, chains = 4, warmup = 1000,
  iter = 2000, cores = 2, refresh = 1000
)
%matplotlib inline
import pandas as pd
import pymc3 as pm
from pymc3 import  *
import matplotlib.pyplot as plt
import numpy as np
import theano.tensor as t
from scipy.stats import mode
w=pd.read_csv('sports.csv')
priors = {"Intercept": pm.Normal.dist(-1.5, 2),
          "Regressor": pm.Normal.dist(0, .5)
          }

with pm.Model() as BC:
    pm.glm.GLM.from_formula('Label~PRP+VBN',
    w, family=pm.glm.families.Binomial(), priors = priors)
    trace_BC = pm.sample(2000, cores=2,tune=1000)
plt.figure(figsize=(12,3))
pm.plots.forestplot(trace_BC, varnames=['PRP', 'VBN'],rhat=False)
XX="
data {
  int N; 
  int J; 
  int K; 
  int id[N]; 
  matrix[N,K] X; 
  vector[N] y; 
}
parameters {
  vector[K] gamma;
  vector[K] tau; 
  
  vector[K] beta[J]; 
  real sigma; 
}
model {
  vector[N] mu; //linear predictor
  //priors
  gamma ~ normal(0,5); 
  tau ~ cauchy(0,5); 
  sigma ~ cauchy(0,5); 
  
  for(j in 1:J){
   beta[j] ~ normal(gamma,tau); 
  }
  
  for(n in 1:N){
    mu[n] = X[n] * beta[id[n]]; 
  }

  //likelihood
  y ~ normal(mu,sigma);
}
"
library(rstan)
w=read.csv("sleepstudy.csv")
ss=list(N=nrow(w),J=18,K=2,id=rep(1:18,each=10),
          X=cbind(1,w[,2]),y=w[,1])
m_s0<-stan(model_code = XX,data=ss,chains = 2)
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import pandas as pd
import theano
w = pd.read_csv('sleepstudy.csv')
sub_idx=np.repeat(np.arange(18),10)
n_sub=18
with pm.Model() as w2_model:
    mu_a = pm.Normal('mu_a', mu=0., sd=100**2)
    sigma_a = pm.HalfCauchy('sigma_a', 5)
    mu_b = pm.Normal('mu_b', mu=0., sd=100**2)
    sigma_b = pm.HalfCauchy('sigma_b', 5)
    a = pm.Normal('a', mu=mu_a, sd=sigma_a, shape=n_sub)
    b = pm.Normal('b', mu=mu_b, sd=sigma_b, shape=n_sub)
    eps = pm.HalfCauchy('eps', 5)

    Reaction_est = a[sub_idx] + b[sub_idx] * w.Days

    Reaction_like = pm.Normal('Reaction_like', mu=Reaction_est,\
     sd=eps, observed=w.Reaction)
with w2_model:
    w2_trace = pm.sample(draws=2000, n_init=1000)
plt.figure(figsize=(12,5))
pm.plots.forestplot(w2_trace,rhat=False)
w=read.csv("cbpp.csv")
w[,4]=factor(w[,4])
w1=data.frame(model.matrix(~.-1,w))
MLL="
data {
  int N; //the number of observations (N=nrow(w1)=56)
  int J; //the number of groups (J=length(unique(w1$herd))=15)
  int K; //number of columns in the model matrix (K=4 (包括 1))
  int id[N]; //vector of group indeces  (w1$herd)
  matrix[N,K] X; //the model matrix (56,4: w1[,c(4:7)]) 
  int y[N]; //the response variable (w1[,2]或w$incidence)
  int n[N]; // w1$size
}
parameters {
  real mu; //population-level regression coefficients
  real tau; //the standard deviation of the regression coefficients

  vector[K] beta[J]; //matrix of group-level regression coefficients
}
model {
  vector[N] eta; //linear predictor
  //priors
  mu ~ student_t(3, 0, 1); 
  tau ~ normal(0,1); 

//priors <- c(set_prior(“normal(0,1)”, class = “Intercept”),
//set_prior(“normal(0,.5)”, class = “b”, coef = “” , lb = 0))
  
  for(j in 1:J){
   beta[j] ~ normal(mu,tau); 
  }
  
  for (i in 1:N) {
    eta[i] = inv_logit(X[i] * beta[id[i]]);
  }
  //likelihood binomial_lpmf(ints n | ints N, reals theta)
  // y ~ binomial(n, eta);
  for (i in 1:N) {
  target += binomial_lpmf(y[i] | n[i], eta[i]);
  }
}
"
library(rstan)
df=list(N=nrow(w1),J=15,K=4,id=w1$herd,X=w1[,4:7],y=w1[,2],n=w1$size)
mll0<-stan(model_code = MLL,data=df,chains = 2)
pairs(mll0, inc_warmup =TRUE,pars=c("mu","tau","beta[1,1]","beta[1,2]"))
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import preprocessing
import pymc3 as pm
import theano.tensor as T
w=pd.read_csv("cbpp.csv")
le = preprocessing.LabelEncoder()
herd_idx = le.fit_transform(w['herd'])
n_herds=len(set(herd_idx))
w['period']=w['period'].astype('category')
w1=pd.get_dummies(w,drop_first=False)
with pm.Model() as mll_model:
  
    # 超先验
    mu_b = pm.StudentT('mu_b', nu=3, mu=0., sd=1.0)
    sigma_b = pm.HalfNormal('sigma_b', sd=1.0)
 
    # 四个截距
    b1 = pm.Normal('b1', mu=mu_b, sd=sigma_b, shape=n_herds)
    b2 = pm.Normal('b2', mu=mu_b, sd=sigma_b, shape=n_herds)
    b3 = pm.Normal('b3', mu=mu_b, sd=sigma_b, shape=n_herds)
    b4 = pm.Normal('b4', mu=mu_b, sd=sigma_b, shape=n_herds)
 
    yhat = pm.invlogit(b1[herd_idx] * w1.period_1.values+
                       b2[herd_idx] * w1.period_2.values+
                       b3[herd_idx] * w1.period_3.values+
                       b4[herd_idx] * w1.period_4.values)
 
    # Make predictions fit reality
    y = pm.Binomial('y', n=w1.size, p=yhat,
                    observed=w1.incidence.values)
with mll_model:
    mll_trace = pm.sample(2000)
plt.figure(figsize=(15,5))
pm.forestplot(mll_trace,rhat=False)#, varnames=['a_inv', 'fin'])

第11章代码

w=read.csv("aml.csv")
library(survival)
fit <- survfit(Surv(time, status) ~ x, data=w)
library(survminer)
ggsurvplot(fit, data = w, pval = TRUE)
set.seed(9000);flchain[sample(1:7874,100),]
w=read.csv("flc9000.csv")
library(survival)
fit <- survfit(Surv(futime, death) ~ mgus, data=w)
library(survminer)
ggsurvplot(fit, data = w, pval = TRUE)
w=read.csv('mastectomy.csv')
library(survival)
fit <- survfit(Surv(time, event) ~ metastized, data=w)
library(survminer)
ggsurvplot(fit, data = w, pval = TRUE)
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import pymc3 as pm
from pymc3.distributions.timeseries import GaussianRandomWalk
import seaborn as sns
import pandas as pd
from theano import tensor as T
df = pd.read_csv('mastectomy.csv')#输入数据
df.event = df.event*1 #把因子哑元化
df.metastized= (df.metastized == 'yes')*1 #把因子哑元化
n=df.shape[0] #样本量
row_names=np.arange(n) #行号

iv_len = 3 #划分的区间长度
iv_cut = np.arange(0, df.time.max() + iv_len + 1, iv_len)#区间端点
N = iv_cut.size - 1 #区间数目 
iv_number = np.arange(N)#区间编号

last_period = np.floor((df.time - 0.01) / iv_len).astype(int)
#上边是每个时间除以3
death = np.zeros((n, N))#矩阵: 每个对象一行, 每个区间一列
death[row_names, last_period] = df.event #每个对象的最后时间区间为0/1

expos = np.greater_equal.outer(df.time, iv_cut[:-1]) * iv_len
# (n, N)矩阵: 如果事件至少在某区间发生则先为3, 如发生则改为实际时长(0到3):
expos[row_names, last_period] = df.time - iv_cut[last_period]
with pm.Model() as Cox_ph_model:
    lambda_ = pm.Gamma('lambda_', 0.01, 0.01, shape=N)
    beta = pm.Normal('beta', 0, sd=1000)
    eta_ = pm.Deterministic('eta_', T.outer(T.exp(beta * df.metastized), lambda_))
    mu = pm.Deterministic('mu', expos * eta_)
    obs = pm.Poisson('obs', mu, observed=death)
    Cox_PH_trace = pm.sample(1000, tune=1000, random_seed=1010)
pm.traceplot(Cox_PH_trace, varnames=['beta','lambda_']);
pm.summary(Cox_PH_trace, varnames=['beta','lambda_']).round(2) 
np.exp(Cox_PH_trace['beta'].mean())
pm.forestplot(Cox_PH_trace,rhat=False, varnames=['beta'])
pm.energyplot(Cox_PH_trace,figsize=(10,4))
w=read.csv("aml.csv")

A=1*(w$x=='Maintained')
X <- model.matrix(~ A)
aml_data=list(
  y_m=as.vector(scale(log(w$time[w$status==0]))),X_m=X[w$status==0,],
  y_o=as.vector(scale(log(w$time[w$status==1]))),X_o=X[w$status==1,],
  N_m=sum(w$status==0),N_o=sum(w$status==1),
  P=2
)
AFT_model <- "
data {
  int P; // number of beta parameters
  
  // data for censored subjects
  int N_m;
  matrix[N_m,P] X_m;
  vector[N_m] y_m;
  
  // data for observed subjects
  int N_o;
  matrix[N_o,P] X_o;
  vector[N_o] y_o;
}

parameters {
  vector[P] beta; //coefficients                
  real alpha; // Gumbel scale      
}

transformed parameters{
  // model Weibull rate as function of covariates
  vector[N_m] eta_m;
  vector[N_o] eta_o;
  
  // standard weibull AFT re-parameterization
  eta_m = exp(X_m*beta);
  eta_o = exp(X_o*beta);
}

model {
  beta ~ normal(0, 5);
  alpha ~ normal(0,5);
  
  // evaluate likelihood for censored and uncensored subjects
  target += gumbel_lcdf(y_o | eta_o,alpha);
  target += gumbel_lccdf(y_m | eta_m,alpha);
}
"
library(rstan)
amlAFT <- stan(model_code = AFT_model,data = aml_data,
               chains=2,control = list(max_treedepth=12,adapt_delta=.99))
post_draws<-extract(amlAFT)
nm=names(post_draws)
layout(matrix(1:4,2,2,by=TRUE))
for(i in 1:4){
hist(post_draws[[i]],col=4,probability = T,
     xlab=nm[[i]], main=paste(nm[[i]],'Posterior Distribution'))
lines(density(post_draws[[i]]),col=2)
}
traceplot(amlAFT, pars=c("beta","alpha"),
          inc_warmup =TRUE,nrow=1)
%matplotlib inline

from matplotlib import pyplot as plt
from matplotlib.ticker import StrMethodFormatter
import numpy as np
import pymc3 as pm
import scipy as sp
import seaborn as sns
from statsmodels import datasets
from theano import shared, tensor as tt
import pandas as pd

plt.style.use('seaborn-darkgrid')
df=pd.read_csv('aml.csv')
n_patient, _ = df.shape
X = np.empty((n_patient, 2))
X[:, 0] = 1.
X[:, 1] = (df.x=='Maintained').values*1
X_ = shared(X)
y = np.log(df.time.values)
y_std = (y - y.mean()) / y.std()
cens = df.status.values == 0.
cens_ = shared(cens)
def gumbel_sf(y, mu, sigma):
    return 1. - tt.exp(-tt.exp(-(y - mu) / sigma))
with pm.Model() as amlAFT_model:
    beta = pm.Normal('beta', 0., 5, shape=2)
    eta = beta.dot(X_.T)
    alpha = pm.HalfNormal('alpha', 5.)
    y_obs = pm.Gumbel('y_obs', eta[~cens_], alpha, observed=y_std[~cens])
    y_cens = pm.Potential('y_cens', gumbel_sf(y_std[cens], eta[cens_], alpha))
    amlAFT_trace = pm.sample(draws=1000, tune=1000,target_accept=0.9)
pm.traceplot(amlAFT_trace, varnames=['alpha', 'beta'])
pm.summary(amlAFT_trace, varnames=['alpha', 'beta']).round(2)
pm.forestplot(amlAFT_trace,rhat=False, varnames=['alpha','beta'])
pm.energyplot(amlAFT_trace,figsize=(15,3));
pm.plot_posterior(amlAFT_trace, lw=0, alpha=0.5,figsize=(10,4));
AFTLL_model <- "
data {
  int P; // number of beta parameters
  
  // data for censored subjects
  int N_m;
  matrix[N_m,P] X_m;
  vector[N_m] y_m;
  
  // data for observed subjects
  int N_o;
  matrix[N_o,P] X_o;
  vector[N_o] y_o;
}

parameters {
  vector[P] beta; //coefficients                
  real alpha; // Gumbel scale      
}

transformed parameters{
  // function of covariates
  vector[N_m] eta_m;
  vector[N_o] eta_o;
  
  // standard weibull AFT re-parameterization
  eta_m = exp(X_m*beta);
  eta_o = exp(X_o*beta);
}

model {
  beta ~ normal(0, 5);
  alpha ~ normal(0,5);
  
  // evaluate likelihood for censored and uncensored subjects
  target += logistic_lcdf(y_o | eta_o,alpha);
  target += logistic_lccdf(y_m | eta_m,alpha);
}
"
library(rstan)
amlLLAFT <- stan(model_code = AFTLL_model,data = aml_data,
               chains=2,control = list(max_treedepth=12,adapt_delta=.99))
post_draws<-extract(amlLLAFT)
nm=names(post_draws)
layout(matrix(1:4,2,2,by=TRUE))
for(i in 1:4){
hist(post_draws[[i]],col=4,probability = T,
     xlab=nm[[i]], main=paste(nm[[i]],'Posterior Distribution'))
lines(density(post_draws[[i]]),col=2)
}
traceplot(amlLLAFT, pars=c("beta","alpha"),
          inc_warmup =TRUE,nrow=1)
def logistic_sf(y, mu, alpha):
    return 1. - pm.math.sigmoid((y - mu) / alpha)
with pm.Model() as amlLL_model:
    beta = pm.Normal('beta', 0., 5, shape=2)
    eta = beta.dot(X_.T)
    alpha = pm.HalfNormal('alpha', 5.)
    y_obs = pm.Logistic('y_obs', eta[~cens_], alpha, observed=y_std[~cens])
    y_cens = pm.Potential('y_cens', logistic_sf(y_std[cens], eta[cens_], alpha))
    amlLL_trace = pm.sample(draws=1000, tune=1000,target_accept=0.9)        
pm.traceplot(amlLL_trace, varnames=['alpha', 'beta'])
pm.summary(amlLL_trace, varnames=['alpha', 'beta']).round(2)
pm.forestplot(amlLL_trace,rhat=False, varnames=['alpha','beta'])
pm.energyplot(amlLL_trace,figsize=(15,3));
pm.plot_posterior(amlLL_trace, lw=0, alpha=0.5,figsize=(10,4));
w=read.csv("flc9000.csv")
A=1*(w$mgus==1)
X <- model.matrix(~ A)
flc_data=list(
  y_m=w$futime[w$death==0],X_m=X[w$death==0,],
  y_o=w$futime[w$death==1],X_o=X[w$death==1,],
  N_m=sum(w$death==0),N_o=sum(w$death==1),
  P=2
)
flc_model <- "
data {
  int P; // number of beta parameters
  
  // data for censored subjects
  int N_m;
  matrix[N_m,P] X_m;
  vector[N_m] y_m;
  
  // data for observed subjects
  int N_o;
  matrix[N_o,P] X_o;
  real y_o[N_o];
}

parameters {
  vector[P] beta; //Scale                
  real alpha; // Weibull Shape      
}

transformed parameters{
  // model Weibull rate as function of covariates
  vector[N_m] lambda_m;
  vector[N_o] lambda_o;
  
  // standard weibull AFT re-parameterization
  lambda_m = exp(-(X_m*beta)/alpha);
  lambda_o = exp(-(X_o*beta)/alpha);
}

model {
  beta ~ normal(0, 100);
  alpha ~ gamma(1, 0.001);
  
  // evaluate likelihood for censored and uncensored subjects
  target += weibull_lpdf(y_o | alpha, lambda_o);
  target += weibull_lccdf(y_m | alpha, lambda_m);
}

// generate posterior quantities of interest
generated quantities{
  real hazard_ratio;
  
  // generate hazard ratio
  hazard_ratio = exp(beta[2]/alpha ) ;
}
"
library(rstan)
flc_fit <- stan(model_code = flc_model,data = flc_data)
post_draws<-extract(flc_fit)
hist(post_draws$hazard_ratio,col=4,probability = T,
     xlab='Hazard Ratio', main='Hazard Ratio Posterior Distribution')
lines(density(post_draws$hazard_ratio),col=2)
traceplot(flc_fit, pars=c("alpha","beta"),
          inc_warmup =TRUE,nrow=1)
%matplotlib inline
import pymc3 as pm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels
import patsy
import theano.tensor as tt

plt.style.use('seaborn-darkgrid')
w=pd.read_csv('/users/wuxizhi/xwu/bayes/data/flc9000.csv')
y = w.futime.values #只取一个变量
censored = ~w['death'].values.astype(bool)
def weibull_lccdf(x, alpha, beta):
    ''' Log complementary cdf of Weibull distribution. '''
    return -(x / beta)**alpha
with pm.Model() as flc_Weibull:
    b0 = pm.Normal('b0', mu=0, sd=100)
    b = pm.Normal('b', mu=0, sd=100)
    alpha = pm.Gamma('alpha', alpha=1, beta=0.001, testval=0.25)
    betaO = pm.Deterministic('betaO', tt.exp(-(b0+b*y[~censored]) / alpha))
    betaM = pm.Deterministic('betaM', tt.exp(-(b0+b*y[censored]) / alpha))
    hazard_ratio = pm.Deterministic('hazard_ratio', tt.exp(b/alpha)) ;
  
    y_obs = pm.Weibull('y_obs', alpha=alpha, beta=betaO, observed=y[~censored])
    y_cens = pm.Potential('y_cens', weibull_lccdf(y[censored], alpha, betaM,))
pm.traceplot(flc_trace, varnames=['alpha', 'b0','b','betaO','hazard_ratio'])
with flc_Weibull:
    flc_trace = pm.sample(draws=1000, tune=1000,
                        target_accept=0.9,
                        init='adapt_diag')
pm.summary(flc_trace, varnames=['alpha', 'b0','b','hazard_ratio']).round(2)
pm.forestplot(flc_trace,rhat=False, varnames=['b0', 'alpha'])
pm.forestplot(flc_trace,rhat=False, varnames=['b'])
pm.forestplot(flc_trace,rhat=False, varnames=['hazard_ratio'])

第12章代码

library(e1071);library(tidyverse)
w=read.csv('indian_diabetes.csv')
w$class=factor(w$class)
a=naiveBayes(class~.,w)%>% predict(w)
table(w$class,a);mean(w$class!=a)
Fold=function(Z=10,w,D,seed=7777){ 
  n=nrow(w);d=1:n; e=levels(w[,D]);
  N=length(e)#目标变量的水平个数
  set.seed(seed)
  dd=lapply(1:N, function(i){
    d0=d[w[,D]==e[i]];j=length(d0)
    ZT=rep(1:Z,ceiling(j/Z))[1:j]
    id=cbind(sample(ZT),d0);id})
  #上面每个dd[[i]]是随机1:Z及i类的下标集组成的矩阵
  mm=lapply(1:Z,
    function(i){u=NULL;for(j in 1:N)   
    u=c(u,dd[[j]][dd[[j]][,1]==i,2]);u})
  return(mm)
}
library(e1071);library(tidyverse)
Z=10;D=9
mm=Fold(Z,w,D)
Pr=data.frame(NB=w$class)
for(i in 1:Z){
     Pr$NB[mm[[i]]]=
       a=naiveBayes(class~.,w[-mm[[i]],])%>%
       predict(w[mm[[i]],])
}
table(w$class,Pr$NB);mean(w$class!=Pr$NB)
def Fold(u,Z,seed=8888):
    u=np.array(u).reshape(-1)
    id=np.arange(len(u))
    zid=[];ID=[];np.random.seed(seed)
    for i in np.unique(u):
        n=sum(u==i)
        ID.extend(id[u==i])
        k=(list(range(Z))*int(n/Z+1))
        np.random.shuffle(k)
        zid.extend(k[:n])
    zid=np.array(zid);ID=np.array(ID)
    zid=zid[np.argsort(ID)]
    return zid

def CCV(clf,X, y,Zid):
    y_pred=np.array(y)
    for j in np.unique(Zid): #j has Z kinds of values
        clf.fit(X[Zid!=j],y[Zid!=j])
        y_pred[Zid==j]=clf.predict(X[Zid==j])
    error=np.mean(y!=y_pred)
    return(error,y_pred)
from sklearn.metrics import roc_curve, auc, confusion_matrix
import pandas as pd
import numpy as np
w=pd.read_csv("indian_diabetes.csv")
X=w.iloc[:,:-1];y=w["class"]
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
Zid=Fold(y,Z=10,seed=8888)
a,y_pred=CCV(clf,X=X,y=y,Zid=Zid)
print(a,"\n",confusion_matrix(y,y_pred))

第13章代码

import pyAgrum as gum
import pyAgrum.lib.notebook as gnb

bna=gum.BayesNet('Avian') #注意: 下面3行只能运行一次, 反复运行会报错!
A, B, C, R, H, N = [ bna.add(name, 2) for name in "ABCRHN" ] 
for link in [(B,A),(C,H),(C,R),(A,H),(A,N),(A,R)]:
    bna.addArc(*link)

bna.cpt(C).fillWith([0.9,0.1])
bna.cpt(B).fillWith([0.999,0.001])
bna.cpt(A)[:]=[ [1,0],[0.99,0.01]]
bna.cpt(R)[:]=[ [[0.95,0.05],[0.5,0.5]],[[0.1,0.9],[0.02,0.98]]]
bna.cpt(H)[:]=[ [[0.94,0.06],[0.03,0.97]],[[0.4,0.6],[0.01,0.99]]]
bna.cpt(N)[:]=[ [0.7,0.3],[0.001,0.999]]
bna;bna.cpt(C);bna.cpt(B);bna.cpt(A);bna.cpt(R);bna.cpt(H);bna.cpt(N)
gnb.showInference(bna,evs={})
gnb.showInference(bna,evs={'C':1,'A':0})
gnb.showInference(bna,evs={'C':[0.3,0.9],'B':1},targets={'A','H'})
library(bnlearn)
library(Rgraphviz)
bl.av <- model2network('[C][B][A|B][R|C:A][H|C:A][N|A]')
graphviz.plot(bl.av)
ny <- c("no","yes")
C <- array(dimnames = list(C = ny), dim = 2, c(0.90,0.10))
B <- array(dimnames = list(B = ny), dim = 2, c(0.999,0.001))
A <- array(dimnames = list(A = ny, B = ny), dim = c(2, 2),
c(1,0,0.99,0.01))
R <- array(dimnames = list(R = ny,A=ny,C=ny), dim = c(2,2,2), 
           c(0.95,0.05,0.5,0.5,0.1,0.9,0.02,0.98))
H <- array(dimnames = list(H = ny,A=ny,C=ny), dim = c(2,2,2), 
           c(0.94,0.06,0.03,0.97,0.4,0.6,0.01,0.99))
N <- array(dimnames = list(N = ny, A = ny), dim = c(2, 2),
           c(0.7,0.3,0.001,0.999))

cpts <- list(C=C,B = B, A = A, R = R,H=H, N = N)
bn.av.fit = custom.fit(bl.av, cpts)
bn.fit.barchart(bn.av.fit$H)
bn.fit.barchart(bn.av.fit$N)
w=read.csv("driver.csv")
for(i in 1:ncol(w))w[,i]=factor(w[,i])
names(w)=c("Y","D","A","V","C","G")#为简单用缩写变量名
library("bnlearn")
(w.hc.aic=hc(w, score = "aic")) 
(w.hc.bic=hc(w, score = "bic")) 
(w.hc.loglik=hc(w, score = "loglik"))
(w.hc.bde=hc(w, score = "bde"))   
(w.hc.mbde=hc(w, score = "mbde"))
(w.hc.k2=hc(w, score = "k2"))
library("Rgraphviz")
par(mfrow = c(2, 3))
graphviz.plot(w.hc.aic,main="Score-based hill-climbing: aic")
graphviz.plot(w.hc.bic,main="Score-based hill-climbing: bic")
graphviz.plot(w.hc.loglik, 
    main="Score-based hill-climbing: loglik")
graphviz.plot(w.hc.bde,main="Score-based hill-climbing: bde")
graphviz.plot(w.hc.mbde,main="Score-based hill-climbing: mbde")
graphviz.plot(w.hc.k2,main="Score-based hill-climbing: k2")
w.gs <- gs(w,debug=T)#成为bn类型object
(w.iamb=iamb(w))
(w.fiamb=fast.iamb(w))
(w.iiamb=inter.iamb(w))
(w.mmpc=mmpc(w))
par(mfrow = c(2, 3))
graphviz.plot(w.gs,main="Constraint-based algorithms: gs")
graphviz.plot(w.iamb,main="Constraint-based algorithms: iamb")
graphviz.plot(w.fiamb,main="Constraint-based algorithms: fiamb")
graphviz.plot(w.iiamb,main="Constraint-based algorithms: iiamb")
graphviz.plot(w.mmpc,main="mmpc")
WL=data.frame(from=c(rep("Y",4),rep("D",3)),
    to=c("D",rep(c("A","C","V"),2)))
w.gs <- gs(w, whitelist=WL);graphviz.plot(w.gs)
from pomegranate import *
import pandas as pd
import numpy as np

w=pd.read_csv("DriverC.csv")
bn = BayesianNetwork.from_samples(np.array(w), algorithm='exact-dp',
                                  state_names=w.columns)
bn.bake()
bn.structure
print(bn.state_count(),bn.node_count(),bn.edge_count())
bn.predict([['Y0','D1',None,None,None,'G1'],
            ['Y1','D1',None,None,'C0','G0'],
            ['Y1','D0',None,None,'C0','G0'],
            ['Y1',None,'A1','V1','C0','G0'],
            [None,None,None,None,None,None]])
bn.predict_proba([['Y0','D1',None,None,None,'G1'],
                  ['Y1','D1',None,None,'C0','G0'],
                  ['Y1','D0',None,None,'C0','G0'],
                  ['Y1',None,'A1','V1','C0','G0'],
                  [None,None,None,None,None,None]])
bn.predict_proba([['Y1','D0',None,None,'C0','G0']])
library(bnlearn);U=read.csv("score.csv")
for(i in 1:ncol(U))U[,i]=as.numeric(U[,i])
U.gs0=gs(U);U.hc0=hc(U)#未加黑白名单
U.w=data.frame(from=c("M","M","LITE","U","U","U","MATH"),
    to=c("LITE","HIST","HIST","MATH","CHEM","PHYS","PHYS"))
U.gs=gs(U,whitelist=U.w)#加白名单
U.hc=hc(U,whitelist=U.w,blacklist=c("LITE","MATH"))#加黑白名单

par(mfrow = c(2, 2))
graphviz.plot(U.gs0,main="Original gs")
graphviz.plot(U.hc0,main="Original hc")
graphviz.plot(U.gs,main="Gs with whitelist")
graphviz.plot(U.hc,main="Hc with white list & blacklist")
w=read.csv("ksl.csv")
for(i in c(5:9))w[,i]=factor(w[,i])
library(deal) 
w.nw <- network(w) #创造一个空的框架
w.prior <- jointprior(w.nw) #默认想象样本量64(可自己设)
#下面查看默认的超参数值(也可以修改)
w.prior$jointalpha #2^5=32种 均匀离散 Dirichlet参数 
w.prior$jointnu     #2^5 均匀离散 正态协方差的除数
w.prior$jointrho    #2^5 均匀离散 逆Wishart自由度
w.prior$jointmu #32乘4 每种离散设置4个连续(样本均值)正态均值
w.prior$jointsigma  #32个4乘4样本方差对角阵 正态协方差
w.prior$jointphi  #32个4乘4样本方差对角阵 IW协方差
mybanlist <- matrix(c(5,5,6,6,7,7,9,8,9,8,9,8,9,8),ncol=2)
banlist(w.nw) <- mybanlist
w.nw <- learn(w.nw,w,w.prior)$nw
# 结构搜索:
w.search <- autosearch(w.nw,w,w.prior,trace=TRUE)
# 扰动最好的, 重复搜寻2次:
w.heuristic <- heuristic(w.search$nw,w,w.prior,
    restart=2,degree=10,
    trace=TRUE,trylist=w.search$trylist)
thebest2 <- w.heuristic$nw

第14章代码

VITERBI(o, s, pi, O, A, B) #定义函数, 输出最可能的状态S 
  #O是观测值, o为观测值空间, s是状态空间, 
  #pi 是初始概率, A是状态转移概率矩阵, B是发射转移概率矩阵
    for i in 1:n #对每个状态 
        T1[i,1]<- pi[i] B_[i,O[1]]
        T2[i,1]<- 0
    for j in 2:T  #对第2:T观测值
        for i in 1:n #对每个状态
            T1[i,j]<- max_k(T1[k,j-1]A[k,i]B[i,O[j]])
            T2[i,j]<- arg max_k(T1[k,j-1]A[k,i])
    z[T]<- arg max_k(T1[k,T])
    S[T]<-s[z[T]]
    for j in T:2
        z[j-1]<-T2[z[j],j]
        S[j-1]<-s[z[j-1]]
    return S
library(HMM)
w=read.csv("dishonestCasino.csv")
S = c("Fair", "Unfair")
Sb = 1:6
A = matrix(c(0.99, 0.01, 0.02, 0.98), c(length(S), 
        length(S)), byrow = TRUE)
B = matrix(c(rep(1/6, 6), c(rep(0.1, 5), 0.5)), 
        c(length(S), length(Sb)), byrow = TRUE)
Pi=c(.5,.5)
hmm = initHMM(S, Sb, startProbs=Pi,transProbs = A, emissionProbs = B)
f = forward(hmm, w$observation)
b = backward(hmm, w$observation) 
vit = viterbi(hmm, w$observation) #已经有了M得到最优的状态
Pi0=c(.6,.4);A0=matrix(c(.95,.05,.05,.95),by=T,nrow=2);
B0=matrix(c(rep(1,6),rep(.4,5),.8),by=T,nrow=2)
hmm0 = initHMM(S, Sb, startProbs=Pi0,transProbs = A0, emissionProbs = B0 )
bw=baumWelch(hmm0, w$observation, maxIterations=100, delta=1E-9, pseudoCount=0)
q=2000
i <- f[1, q]
j <- f[2, q]
p_O = (i + log(1 + exp(j - i)))
post = exp((f + b) - p_O) #后验分布
x = list(hmm = hmm, sim = w, vit = vit, post = post)
mn = "Fair and unfair die"
xlb = "Throw nr."
ylb = ""
plot(x$sim$observation, ylim = c(-9.5, 6), pch = 3, main = mn, 
xlab = xlb, ylab = ylb, bty = "n", yaxt = "n")
axis(2, at = 1:6)
text(0, -1.2, adj = 0, cex = 0.6, col = "black", "True: green = fair die")
for (i in 1:q) {
  if (x$sim$states[i] == "Fair") 
    rect(i, -1, i + 1, 0, col = "green", border = NA)
  else rect(i, -1, i + 1, 0, col = "red", border = NA)
}
text(0, -3.2, adj = 0, cex = 0.6, col = "black", "Most probable path")
for (i in 1:q) {
  if (x$vit[i] == "Fair") 
    rect(i, -3, i + 1, -2, col = "green", border = NA)
  else rect(i, -3, i + 1, -2, col = "red", border = NA)
}
text(0, -5.2, adj = 0, cex = 0.6, col = "black", "Difference")
differing = !(x$sim$states == x$vit)
for (i in 1:q) {
  if (differing[i]) 
    rect(i, -5, i + 1, -4, col = rgb(0.3, 0.3, 0.3), border = NA)
  else rect(i, -5, i + 1, -4, col = rgb(0.9, 0.9, 0.9), border = NA)
}
points(x$post[2, ] - 3, type = "l") #画后验概率曲线
text(0, -7.2, adj=0, cex=0.8, col="black", "Difference by posterior-probability")
for (i in 1:nSim) {
  if (post[1, i] > 0.5) {
    if (x$sim$states[i] == "Fair") 
      rect(i, -7, i + 1, -6, col = rgb(0.9, 0.9, 0.9), border = NA)
    else rect(i, -7, i + 1, -6, col = rgb(0.3, 0.3, 0.3), border = NA)
  }
  else {
    if (x$sim$states[i] == "Unfair") 
        rect(i, -7, i + 1, -6, col = rgb(0.9, 0.9, 0.9), border = NA)
    else rect(i, -7, i + 1, -6, col = rgb(0.3, 0.3, 0.3), border = NA)
  }
}
text(0, -9.2, adj = 0, cex = 0.6, col = "black", "Difference by 
    posterior-probability > .95")
for (i in 1:q) {
  if (post[2, i] > 0.95 || post[2, i] < 0.05) {
    if (differing[i]) 
      rect(i, -9, i + 1, -8, col = rgb(0.3, 0.3, 0.3),border = NA)
    else rect(i, -9, i + 1, -8, col = rgb(0.9, 0.9, 0.9),border = NA)
  }
  else {
    rect(i, -9, i + 1, -8, col = rgb(0.9, 0.9, 0.9),border = NA)
  }
}
import numpy as np
import pandas as pd
w=pd.read_csv("dishonestCasino.csv")

pi = [0.5, 0.5]    #p(Fair)=P(Unfair)=0.5
A = [[0.99, 0.01], # p(Fair->Fair)=0.99, p(Fair->Unfair)=0.01
     [0.02, 0.98]] # p(Unfair->Fair)=0.02, p(Unfair->Unfair)=0.98
B = [[1/6,1/6,1/6,1/6,1/6,1/6],
     [0.1,0.1,0.1,0.1,0.1,0.5]]
states = ['1', '2','3','4','5','6']
hidden_states = ['Fair', 'Unfair']
A=np.array(A)
B=np.array(B)
hmm={'A':A,'B':B,'pi':pi}
obs_map = {1:0, 2:1, 3:2, 4:3, 5:4, 6:5, }
obs=w['observation']-1
inv_obs_map = dict((v,k) for k, v in obs_map.items())
obs_seq = [inv_obs_map[v] for v in list(obs)]
state_space =pd.Series(pi, index=hidden_states, name='states')
a_df=pd.DataFrame(A,columns=hidden_states, index=hidden_states)
b_df=pd.DataFrame(B,columns=states, index=hidden_states)
print("Observations:\n",pd.DataFrame(np.column_stack([obs, obs_seq])[:5],
    columns=['Obs_code', 'Obs_seq']) )
print(state_space)
print("\n HMM matrix:\n", a_df)
print("\n Observable layer  matrix:\n",b_df)
f=forward(hmm,obs)
b=backward(hmm,obs)
def forward(hmm,obs):
    no=len(obs)
    ns=hmm['A'].shape[0]
    f=np.full_like(np.zeros((ns,no)), np.nan)
    for i in range(ns):
        f[i, 0] = np.log(hmm['pi'][i] * hmm['B'][i,obs[0]])
    for k in range(1,no):
        for i in range(ns):
            logsum = -math.inf
            for j in range(ns):
                temp = f[j, k - 1] + np.log(hmm['A'][j,i])
                if temp > -math.inf:
                    logsum = temp + np.log(1 + np.exp(logsum - temp))
            f[i, k] = np.log(hmm['B'][i, obs[k]]) + logsum
    return f

def backward(hmm,obs):
    no=len(obs)
    ns=hmm['A'].shape[0]
    b=np.full_like(np.zeros((ns,no)), np.nan)    #名字和index
    for i in range(ns):
        b[i, no-1] = np.log(1)
    for k in range(no-2,-1,-1):
        for i in range(ns):
            logsum = -math.inf
            for j in range(ns):
                temp = b[j, k + 1] + np.log(hmm['A'][i,j] * hmm['B'][j, obs[k + 1]])
                if temp > -math.inf:
                    logsum = temp + np.log(1 + np.exp(logsum - temp))
            b[i, k] = logsum
    return b
path, delta, phi = viterbi(pi, np.array(A), np.array(B), obs)
# define Viterbi algorithm for shortest path
# code adapted from Stephen Marsland's, 
#        Machine Learning An Algorthmic Perspective, Vol. 2
# https://github.com/alexsosn/MarslandMLAlgo/blob/master/Ch16/HMM.py

def viterbi(pi, a, b, obs):
    
    nStates = np.shape(b)[0]
    T = np.shape(obs)[0]
    
    # init blank path
    path = np.zeros(T)
    # delta --> highest probability of any path that reaches state i
    delta = np.zeros((nStates, T))
    # phi --> argmax by time step for each state
    phi = np.zeros((nStates, T))
    
    # init delta and phi 
    delta[:, 0] = pi * b[:, obs[0]]
    phi[:, 0] = 0

    print('\nStart Walk Forward\n')    
    # the forward algorithm extension
    for t in range(1, T):
        for s in range(nStates):
            delta[s, t] = np.max(delta[:, t-1] * a[:, s]) * b[s, obs[t]] 
            phi[s, t] = np.argmax(delta[:, t-1] * a[:, s])
            print('s={s} and t={t}: phi[{s}, {t}] =\
             {phi}'.format(s=s, t=t, phi=phi[s, t]))
    
    # find optimal path
    print('-'*50)
    print('Start Backtrace\n')
    path[T-1] = np.argmax(delta[:, T-1])
    for t in range(T-2, -1, -1):
        path[t] = phi[path[t+1], [t+1]]
        print('path[{}] = {}'.format(t, path[t]))
        
    return path, delta, phi
pi0=[.6,.4]
A0 = [[.95,.05],
     [.05,.95]] 
B0 = [[1/6,1/6,1/6,1/6,1/6,1/6],
     [0.1428571, 0.1428571, 0.1428571, 0.1428571, 0.1428571, 0.2857143]]
A0=np.array(A0)
B0=np.array(B0)
hmm0 = {'A':A0,'B':B0,'pi':pi0}
bw=baumWelch(hmm0, obs)
print('A=','\n', bw['A'],'\n','B=','\n', bw['B'])
def baumWelch(hmm, obs, maxI=100, delta=1e-09,pseudoCount=0): 
    tempHmm = {'A':hmm['A'],'B':hmm['B'],'pi':hmm['pi']}
    diff = []
    for i in range(maxI):
        bw = baumWelchRecursion(tempHmm, obs)
        T = bw[0]
        E = bw[1]
        T= T + pseudoCount
        E= E + pseudoCount
        T = T/(T.sum(1))[:,None]
        E = E/(E.sum(1))[:,None]
        d=np.sqrt(np.sum((tempHmm['A']-T)**2))+np.sqrt(np.sum((tempHmm['B']-E)**2))
        diff.append(d)
        tempHmm['A'] = T
        tempHmm['B'] = E
        if d < delta:
            break
    return {'A':tempHmm['A'], 'B':tempHmm['B']}


def baumWelchRecursion(hmm,obs):
    no=len(obs)
    ns=hmm['A'].shape[0]
    T=np.zeros(hmm['A'].shape)
    E=np.zeros(hmm['B'].shape)
    f = forward(hmm,obs)
    b = backward(hmm,obs)
    p_obs = f[0,no-1]
    for i in range(1,ns):
        j = f[i,no-1]
        if j > - math.inf:
            p_obs = j + np.log(1+np.exp(p_obs-j))
    for x in range(ns):
        for y in range(ns):
            temp=f[x,0]+np.log(hmm['A'][x,y])+np.log(hmm['B'][y,obs[0+1]])+b[y,0+1]
            for i in range(1,no-1):
                j=f[x,i]+np.log(hmm['A'][x,y])+np.log(hmm['B'][y,obs[i+1]])+b[y,i+1]
                if j > - math.inf:
                    temp = j + np.log(1+np.exp(temp-j))
            temp = np.exp(temp - p_obs)
            T[x,y] = temp
    for x in range(ns):
        for s in np.unique(obs):
            temp = -math.inf
            for i in range(no):
                if s == obs[i]:
                    j = f[x,i] + b[x,i]
                    if j > - math.inf:
                        temp = j + np.log(1+np.exp(temp-j))
            temp = np.exp(temp - p_obs)
            E[x,s] = temp
    return T,E
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
贝叶斯非线性回归是一种使用贝叶斯统计推断进行非线性回归建模的方法。在传统的非线性回归中,我们通常使用最小二乘法来估计模型参数。然而,贝叶斯非线性回归通过引入先验分布和后验分布来进行参数估计,从而提供了更加灵活的建模方式。 在Python中,你可以使用PyMC3库来实现贝叶斯非线性回归。PyMC3是一个用于贝叶斯统计建模的强大工具,它提供了一套灵活的API来定义模型和推断参数。下面是一个简单的例子: ```python import pymc3 as pm import numpy as np import matplotlib.pyplot as plt # 生成一些带有噪声的非线性数据 np.random.seed(0) X = np.linspace(-5, 5, 100) y = 2 * np.sin(X) + np.random.normal(0,0.5, size=X.shape[0]) # 定义贝叶斯非线性回归模型 with pm.Model() as model: # 定义参数的先验分布 alpha = pm.Normal('alpha', mu=0, sd=1) beta = pm.Normal('beta', mu=0, sd=1) # 定义模型的输出 mu = alpha + beta * X # 定义观测数据的似然分布 likelihood = pm.Normal('y', mu=mu, sd=0.5, observed=y) # 进行参数推断 trace = pm.sample(2000, tune=1000) # 绘制参数的后验分布 pm.plot_posterior(trace, var_names=['alpha', 'beta']) plt.show() ``` 在上面的代码中,我们首先生成了一些带有噪声的非线性数据。然后我们使用PyMC3定义了贝叶斯非线性回归模型,其中alpha和beta是模型的参数。我们使用观测数据y来定义参数的似然分布,并通过调用`pm.sample()`进行参数推断。最后,我们使用`pm.plot_posterior()`绘制了参数的后验分布。 这只是一个简单的示例,你可以根据自己的需求对模型进行更复杂的定义和调整。希望对你有所帮助!

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

qq_38220914

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值