In [1]:
from pandas import *
df1=DataFrame({'key':['b','b','a','c','a','a','b'],
'data1':range(7)})
df2=DataFrame({'key':['a','b','d'],
'data2':range(3)})
print (df1)
print (df2)
In [3]:
merge(df1,df2,on='key') #黏连
Out[3]:
In [4]:
df3=DataFrame({'lkey':['b','b','a','c','a','a','b'],
'data1':range(7)})
df4=DataFrame({'rkey':['a','b','d'],
'data2':range(3)})
print (df3)
print (df4)
merge(df3,df4,left_on='lkey',right_on='rkey') #两边的键名不一样
Out[4]:
In [5]:
merge(df1,df2,how='outer') #外连接
Out[5]:
In [6]:
df1=DataFrame({'key':['b','b','a','c','a','b'],
'data1':range(6)})
df2=DataFrame({'key':['a','b','a','b','d'],
'data2':range(5)})
print (df1)
print (df2)
merge(df1,df2,on='key',how='left') #行的笛卡尔积,左边有3个b右边有2个b 所以最终就有6个b行
Out[6]:
In [7]:
left=DataFrame({'key1':['foo','foo','bar'],
'key2':['one','two','one'],
'lval':[1,2,3]})
right=DataFrame({'key1':['foo','foo','bar','bar'],
'key2':['one','one','one','two'],
'rval':[4,5,6,7]})
print(left)
print(right)
merge(left,right,on=['key1','key2'],how='outer') #两个键值
Out[7]:
In [8]:
left1=DataFrame({'key':['a','b','a','a','b','c'],
'value':range(6)})
right1=DataFrame({'group_val':[3.5,7]},index=['a','b'])
print(left1)
print(right1)
merge(left1,right1,left_on='key',right_index=True) #索引作为连接键
Out[8]:
In [9]:
lefth=DataFrame({'key1':['Ohio','Ohio','Ohio','Nevada','Nevada'],
'key2':[2000,2001,2002,2001,2002],
'data':np.arange(5)})
righth=DataFrame(np.arange(12).reshape((6,2)),
index=[['Nevada','Nevada','Ohio','Ohio','Ohio','Ohio'],
[2001,2000,2000,2000,2001,2002]],
columns=['event1','event2'])
print(lefth)
print(righth)
merge(lefth,righth,left_on=['key1','key2'],right_index=True) #默认取交集
Out[9]:
In [10]:
left2=DataFrame([[1,2],[3,4],[5,6]],index=['a','c','e'],
columns=['Ohio','Nevada'])
right2=DataFrame([[7,8],[9,10],[11,12],[13,14]],
index=['b','c','d','e'],columns=['Missouri','Alabama'])
print(left2)
print(right2)
merge(left2,right2,how='outer',left_index=True,right_index=True)
left2.join(right2,how='outer') #
Out[10]:
In [11]:
#concat连接
s1=Series([0,1],index=['a','b'])
s2=Series([2,3,4],index=['c','d','e'])
s3=Series([5,6],index=['f','g'])
print(concat([s1,s2,s3]))
s4=concat([s1*5,s3])
print (s4)
print(concat([s1,s4],axis=1)) #按照列黏连
concat([s1,s4],axis=1,join='inner')
Out[11]:
In [12]:
result=concat([s1,s1,s3],keys=['one','two','three']) #创建层次化索引
print(result)
result.unstack() #堆叠
Out[12]:
In [13]:
print(concat([s1,s2,s3],axis=1,keys=['one','two','three']))
#concat([df1,df2],axis=1,keys=['level1','level2'],
# names=['upper','lower'])
df1=DataFrame(np.random.randn(3,4),columns=['a','b','c','d'])
df2=DataFrame(np.random.randn(2,3),columns=['b','d','a'])
print(df1)
print(df2)
In [14]:
concat([df1,df2],ignore_index=True)
Out[14]:
In [15]:
#stack旋转
data=DataFrame(np.arange(6).reshape((2,3)),index=pandas.Index(['Ohio',
'Colorado'],name='state'),columns=pandas.Index(['one','two','three'],name='number'))
print(data)
result=data.stack() #列转换为行
print(result)
type(result) #result为一个序列Series
result.unstack()
Out[15]:
In [16]:
print(result.unstack(0))
print(result.unstack('state'))
In [17]:
import pandas as pd
s1=Series([0,1,2,3],index=['a','b','c','d'])
s2=Series([4,5,6],index=['c','d','e'])
data2=pd.concat([s1,s2],keys=['one','two'])
print(data2)
data2.unstack()
Out[17]:
In [18]:
data2.unstack().stack() #层次化索引
Out[18]:
In [19]:
df=DataFrame({'left':result,'right':result+5},columns=pd.Index(['left','right'],name='side')) #多层索引
print(df)
df.unstack('state')
Out[19]:
In [20]:
data = pd.read_csv('macrodata.csv')
periods = pd.PeriodIndex(year=data.year, quarter=data.quarter, name='date')
data = DataFrame(data.to_records(),
columns=pd.Index(['realgdp', 'infl', 'unemp'], name='item'),
index=periods.to_timestamp('D', 'end'))
ldata = data.stack().reset_index().rename(columns={0: 'value'})
wdata = ldata.pivot('date', 'item', 'value')
print(ldata[:10])
pivoted=ldata.pivot('date','item','value') #date作为行索引 item作为列索引 value作为填充
pivoted.head()
Out[20]:
In [24]:
ldata['value2']=np.random.randn(len(ldata))
print(ldata[:10])
pivoted=ldata.pivot('date','item')
print(pivoted[:5]) #透视表
print(pivoted['value'][:5])
In [29]:
data=DataFrame({'k1':['one']*3+['two']*4,
'k2':[1,1,2,3,3,4,4]})
print(data)
print(data.duplicated()) #是否是重复的
data.drop_duplicates() #丢弃重复的
Out[29]:
In [31]:
data['v1']=range(7)
data.drop_duplicates(['k1']) #丢掉重复的
Out[31]:
In [33]:
data=DataFrame({'food':['bacon','pulled pork','bacon','Pastrami',
'corned beef','Bacon','pastrami','honey ham',
'nova lox'],
'ounces':[4,3,12,6,7.5,8,3,5,6]})
data
Out[33]:
In [40]:
meat_to_animal={'bacon':'pig','pulled pork':'pig','pastrami':'cow','corned beef':'cow','honey ham':'pig','nova lox':'salmon'}
meat_to_animal #字典映射
Out[40]:
In [41]:
data['animal']=data['food'].map(str.lower).map(meat_to_animal)
print (data)
data['food'].map(lambda x:meat_to_animal[x.lower()]) #匿名函数!!!
Out[41]:
In [43]:
#数据替换
data=Series([1,-999,2,-999,-1000,3])
data
data.replace(-999,np.nan)
Out[43]:
In [47]:
print(data.replace([-999,-1000],np.nan))
print(data.replace({-999:np.nan,-1000:0}))
In [49]:
data=DataFrame(np.arange(12).reshape((3,4)),
index=['Ohio','Colorado','New York'],
columns=['Ohio','two','three','four'])
print(data)
data.index.map(str.upper)
Out[49]:
In [50]:
data.rename(index=str.title,columns=str.upper)
Out[50]:
In [52]:
data.rename(index={'Ohio':'INDIANA'},
columns={'three':'peekaboo'}) #部分轴标签的更新
Out[52]:
In [53]:
ages=[20,22,25,27,21,23,37,31,61,45,41,32]
bins=[18,25,35,60,100] #自定义分割区间
cats=pd.cut(ages,bins)
cats
Out[53]:
In [60]:
print(cats.labels) #分属四个级
pd.value_counts(cats)
Out[60]:
In [61]:
group_names=['Youth','YoungAdult','MiddleAged','Senior'] #给各个级命名
pd.cut(ages,bins,labels=group_names)
Out[61]:
In [65]:
data=np.random.rand(20)
cc=pd.cut(data,4,precision=2)
print(cc)
pd.value_counts(cc) #注意区间宽度是相等的
Out[65]:
In [68]:
data=np.random.randn(1000)
cats=pd.qcut(data,4)
print(cats)
pd.value_counts(cats) # qcut每个等级的个数基本相等
Out[68]:
In [69]:
#检测和过滤异常值
np.random.seed(12345)
data=DataFrame(np.random.randn(1000,4))
data.describe()
Out[69]:
In [72]:
col=data[3] #某一列中绝对值超过3的
print(col[np.abs(col)>3])
data[np.abs(col)>3]
Out[72]:
In [78]:
data[(np.abs(data)>3).any(1)] # 任意有一行>3的
Out[78]:
In [82]:
data[np.abs(data)>3]=np.sign(data)*3 # sign -1 1
data.describe() #将值限制在-3到3之间
Out[82]:
In [2]:
#排列和随机采样
import pandas as pd
df=DataFrame(np.arange(5*4).reshape(5,4))
sampler=np.random.permutation(5)
sampler
Out[2]:
In [3]:
print(df)
print(df.take(sampler)) #行重排
In [5]:
df.take(np.random.permutation(len(df))[:3]) #一种随机选取子集的方法!
Out[5]:
In [6]:
#似乎完成了一个重采样的过程?
bag=np.array([5,7,-1,6,4])
sampler=np.random.randint(0,len(bag),size=10)
print(sampler)
draws=bag.take(sampler)
draws
Out[6]:
In [11]:
#哑变量
df=DataFrame({'key':['b','b','a','c','a','b'],
'data1':np.random.randint(0,6,6)})
print(df)
pd.get_dummies(df['key']) #索引填充!
Out[11]:
In [12]:
dummies=pd.get_dummies(df['key'],prefix='key')
df_with_dummy=df[['data1']].join(dummies)
df_with_dummy
Out[12]:
In [14]:
mnames=['movie_id','title','genres']
movies=pd.read_table('movies.dat',sep='::',header=None,names=mnames)
movies[:10]
Out[14]:
In [16]:
genre_iter=(set(x.split('|')) for x in movies.genres)
print(genre_iter)
genres=sorted(set.union(*genre_iter))
print(genres)
In [17]:
dummies=DataFrame(np.zeros((len(movies),len(genres))),columns=genres)
for i,gen in enumerate(movies.genres): #enumerate可以同时得到索引和元素
dummies.ix[i,gen.split('|')]=1 #使用ix将dummies中对应的位置赋值为1
movies_windic=movies.join(dummies.add_prefix('Genre_'))
movies_windic.ix[0]
Out[17]:
In [18]:
#数据离散化
values=np.random.rand(10)
print(values)
bins=[0,0.2,0.4,0.6,0.8,1]
pd.cut(values,bins)
Out[18]:
In [19]:
pd.get_dummies(pd.cut(values,bins)) #哑编码 独热码 one-hot-encoder
Out[19]:
In [31]:
val='a,b, guido'
print(val.split(','))
pieces=[x.strip() for x in val.split(',')] #strip可以去除空格符换行符等
pieces
Out[31]:
In [35]:
first,second,third=pieces
print(first+'::'+second+'::'+third)
print('::'.join(pieces))
val.count(',') #count计数
Out[35]:
In [36]:
val.replace(',','::')
Out[36]:
In [40]:
## re正则模块
import re
text="foo bar\t baz \tqux"
print(re.split('\s+',text))
regex=re.compile('\s+') #定义一个正则
print(regex.split(text))
regex.findall(text)
Out[40]:
In [41]:
text="""Dave dave@google.com
Steve steve@gmail.com
Rob rob@gmail.com
Ryan ryan@yahoo.com
"""
pattern=r'[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}' #重复两到四次
regex=re.compile(pattern,flags=re.IGNORECASE)
regex.findall(text)
Out[41]:
In [43]:
m=regex.search(text)
print(m)
text[m.start():m.end()]
Out[43]:
In [50]:
print(regex.sub('REDACTED',text))
pattern=r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})'
regex=re.compile(pattern,flags=re.IGNORECASE)
m=regex.match('wesm@bright.net')
m
m.groups() #元组
Out[50]:
In [51]:
regex.findall(text)
Out[51]:
In [54]:
data={'Dave':'dave@google.com','Steve':'steve@gmail.com',
'Rob':'rob@gmail.com','Wes':np.nan}
data=Series(data)
print(data)
data.isnull()
Out[54]:
In [56]:
pattern #将邮件地址分成了三个部分
data.str.findall(pattern,flags=re.IGNORECASE)
Out[56]:
In [59]:
matches=data.str.match(pattern,flags=re.IGNORECASE)
print(matches)
print(matches.str.get(1))
matches.str[0]
Out[59]: