机器学习---数据科学包-第3天

1 pandas索引



1
import pandas as pd
2
import numpy as np
索引
In [2]:

1
s = pd.Series(np.random.rand(5), index = list('abcde'))
2
s
Out[2]:
a    0.143653
b    0.648518
c    0.782733
d    0.739794
e    0.005048
dtype: float64
In [3]:

1
s.index
Out[3]:
Index(['a', 'b', 'c', 'd', 'e'], dtype='object')
In [4]:

1
s.index.name = 'alpha' # 给索引命名
2
s
Out[4]:
alpha
a    0.143653
b    0.648518
c    0.782733
d    0.739794
e    0.005048
dtype: float64
In [5]:

1
s = pd.DataFrame(np.random.randn(4, 3), columns = ['one', 'two', 'three'])
2
s
Out[5]:
one	two	three
0	0.650481	0.729876	0.679825
1	-0.985725	-0.048903	-2.196983
2	1.143242	0.624922	-0.818083
3	-0.379713	1.778475	0.898793
In [6]:

1
s.index
Out[6]:
RangeIndex(start=0, stop=4, step=1)
In [7]:

1
s.columns
Out[7]:
Index(['one', 'two', 'three'], dtype='object')
In [8]:

1
s.index.name = 'row'
2
s.columns.name = 'col'
3
s
Out[8]:
col	one	two	three
row			
0	0.650481	0.729876	0.679825
1	-0.985725	-0.048903	-2.196983
2	1.143242	0.624922	-0.818083
3	-0.379713	1.778475	0.898793
重复索引
In [9]:

1
s = pd.Series(np.arange(6), index = list('abcbda'))
2
s
Out[9]:
a    0
b    1
c    2
b    3
d    4
a    5
dtype: int32
In [10]:

1
s['a'] # 重复索引,返回的是一个Series
Out[10]:
a    0
a    5
dtype: int32
In [11]:

1
s['c'] # 返回的只是一个数字
Out[11]:
2
In [12]:

1
s.index.is_unique # 判断有没有重复索引
Out[12]:
False
In [13]:

1
s.index.unique() # 返回只包含唯一索引的列表
Out[13]:
Index(['a', 'b', 'c', 'd'], dtype='object')
In [14]:

1
s.groupby(s.index).sum() # 对重复索引的值求和
Out[14]:
a    5
b    4
c    2
d    4
dtype: int32
In [15]:

1
s.groupby(s.index).first() # 对重复索引的值只取第一项
Out[15]:
a    0
b    1
c    2
d    4
dtype: int32
In [16]:

1
s.groupby(s.index).mean() # 对重复索引的值取平均值
Out[16]:
a    2.5
b    2.0
c    2.0
d    4.0
dtype: float64
多级索引
In [17]:

1
a = [['a', 'a', 'a', 'b', 'b', 'c', 'c'], [1, 2, 3, 1, 2, 2, 3]]
2
t = list(zip(*a))
3
t
Out[17]:
[('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2), ('c', 2), ('c', 3)]
In [19]:

1
index = pd.MultiIndex.from_tuples(t, names = ['level1', 'level2'])
2
index
Out[19]:
MultiIndex([('a', 1),
            ('a', 2),
            ('a', 3),
            ('b', 1),
            ('b', 2),
            ('c', 2),
            ('c', 3)],
           names=['level1', 'level2'])
In [20]:

1
s = pd.Series(np.random.rand(7), index = index)
2
s
Out[20]:
level1  level2
a       1         0.251561
        2         0.746525
        3         0.762872
b       1         0.154093
        2         0.980960
c       2         0.296425
        3         0.178885
dtype: float64
In [21]:

1
s['b'] # 返回以level2为索引的Series
Out[21]:
level2
1    0.154093
2    0.980960
dtype: float64
In [22]:

1
s['b':'c']
Out[22]:
level1  level2
b       1         0.154093
        2         0.980960
c       2         0.296425
        3         0.178885
dtype: float64
In [23]:

1
s[['a', 'c']]
Out[23]:
level1  level2
a       1         0.251561
        2         0.746525
        3         0.762872
c       2         0.296425
        3         0.178885
dtype: float64
In [24]:

1
s[:, 2] # 第一级索引为任意值,第二级索引为2
Out[24]:
level1
a    0.746525
b    0.980960
c    0.296425
dtype: float64
DateFrame的多级索引
In [27]:

1
df = pd.DataFrame(np.random.randint(1, 10, (4, 3)), index = [['a', 'a', 'b', 'b'], [1, 2, 1, 2]], 
2
                  columns = [['one', 'one', 'two'], ['blue', 'red', 'blue']])
3
df.index.names = ['row_1', 'row_2']
4
df.columns.names = ['col_1', 'col_2']
5
df
Out[27]:
col_1	one	two
col_2	blue	red	blue
row_1	row_2			
a	1	2	1	4
2	8	9	3
b	1	8	2	5
2	7	8	8
In [28]:

1
df.loc['a']
Out[28]:
col_1	one	two
col_2	blue	red	blue
row_2			
1	2	1	4
2	8	9	3
In [29]:

1
df.loc['a', 1] # 返回一个Series
Out[29]:
col_1  col_2
one    blue     2
       red      1
two    blue     4
Name: (a, 1), dtype: int32
In [30]:

1
df.loc['a'].loc[1]
Out[30]:
col_1  col_2
one    blue     2
       red      1
two    blue     4
Name: 1, dtype: int32
索引的交换
In [31]:

1
df2 = df.swaplevel('row_1', 'row_2')
2
df2
Out[31]:
col_1	one	two
col_2	blue	red	blue
row_2	row_1			
1	a	2	1	4
2	a	8	9	3
1	b	8	2	5
2	b	7	8	8
In [38]:

1
df2.sort_index(0) # 按照1级索引排序
Out[38]:
col_1	one	two
col_2	blue	red	blue
row_2	row_1			
1	a	2	1	4
b	8	2	5
2	a	8	9	3
b	7	8	8
In [39]:

1
df2.sort_index(1) # 按照2级索引排序
Out[39]:
col_1	one	two
col_2	blue	red	blue
row_2	row_1			
1	a	2	1	4
2	a	8	9	3
1	b	8	2	5
2	b	7	8	8
In [40]:

1
df
Out[40]:
col_1	one	two
col_2	blue	red	blue
row_1	row_2			
a	1	2	1	4
2	8	9	3
b	1	8	2	5
2	7	8	8
In [41]:

1
df.sum(level = 0) # 根据第一级索引求和
Out[41]:
col_1	one	two
col_2	blue	red	blue
row_1			
a	10	10	7
b	15	10	13
In [42]:

1
df.sum(level = 1) # 根据第二级索引求和
Out[42]:
col_1	one	two
col_2	blue	red	blue
row_2			
1	10	3	9
2	15	17	11
把列数据转换成索引
In [43]:

1
df = pd.DataFrame({'a':np.arange(7),
2
                   'b':np.arange(7, 0, -1),
3
                   'c':['one', 'one', 'one', 'two', 'two', 'two', 'two'],
4
                   'd':[0, 1, 2, 0, 1, 2, 3]})
5
df
Out[43]:
a	b	c	d
0	0	7	one	0
1	1	6	one	1
2	2	5	one	2
3	3	4	two	0
4	4	3	two	1
5	5	2	two	2
6	6	1	two	3
In [45]:

1
df.set_index('c') # 把c这一列设置成索引值
Out[45]:
a	b	d
c			
one	0	7	0
one	1	6	1
one	2	5	2
two	3	4	0
two	4	3	1
two	5	2	2
two	6	1	3
In [46]:

1
df.set_index(['c', 'd']) # 把c和d设置成二级索引
Out[46]:
a	b
c	d		
one	0	0	7
1	1	6
2	2	5
two	0	3	4
1	4	3
2	5	2
3	6	1
In [47]:

1
df2 = df.set_index(['c', 'd'])
2
df2
Out[47]:
a	b
c	d		
one	0	0	7
1	1	6
2	2	5
two	0	3	4
1	4	3
2	5	2
3	6	1
In [48]:

1
df2.reset_index() # 把多级索引重新转化成一级索引
Out[48]:
c	d	a	b
0	one	0	0	7
1	one	1	1	6
2	one	2	2	5
3	two	0	3	4
4	two	1	4	3
5	two	2	5	2
6	two	3	6	1
In [49]:

1
df2.reset_index().sort_index('columns') # 根据列索引排序
Out[49]:
a	b	c	d
0	0	7	one	0
1	1	6	one	1
2	2	5	one	2
3	3	4	two	0
4	4	3	two	1
5	5	2	two	2
6	6	1	two	3
In [ ]:

1

2 分组计算


1
import pandas as pd
2
import numpy as np
In [2]:

1
df = pd.DataFrame({'key1':['a', 'a', 'b', 'b', 'a'],
2
                   'key2':['one', 'two', 'one', 'two', 'one'],
3
                   'data1':np.random.randint(1, 10, 5),
4
                   'data2':np.random.randint(1, 10, 5)})
5
df
Out[2]:
key1	key2	data1	data2
0	a	one	5	9
1	a	two	5	6
2	b	one	5	4
3	b	two	3	6
4	a	one	6	2
In [3]:

1
df['data1']
Out[3]:
0    5
1    5
2    5
3    3
4    6
Name: data1, dtype: int32
In [4]:

1
df['data1'].groupby(df['key1'])
Out[4]:
<pandas.core.groupby.generic.SeriesGroupBy object at 0x0000004BC4731630>
In [5]:

1
df['data1'].groupby(df['key1']).mean()
Out[5]:
key1
a    5.333333
b    4.000000
Name: data1, dtype: float64
In [6]:

1
df
Out[6]:
key1	key2	data1	data2
0	a	one	5	9
1	a	two	5	6
2	b	one	5	4
3	b	two	3	6
4	a	one	6	2
In [7]:

1
# 也可以自己创建key
2
key = [1, 2, 1, 1, 2]
3
df['data1'].groupby(key).mean() # 传入的key是一个列表
Out[7]:
1    4.333333
2    5.500000
Name: data1, dtype: float64
多级分组
In [8]:

1
# 也可以是多级索引
2
df['data1'].groupby([df['key1'], df['key2']]).sum()
Out[8]:
key1  key2
a     one     11
      two      5
b     one      5
      two      3
Name: data1, dtype: int32
In [9]:

1
df['data1'].groupby([df['key1'], df['key2']]).size() # 各个分组的数据个数
Out[9]:
key1  key2
a     one     2
      two     1
b     one     1
      two     1
Name: data1, dtype: int64
In [10]:

1
df.groupby('key1').sum()
2
# 结果中key2没有出现,因为key2的元素不是数值,所以在求和的时候忽略
Out[10]:
data1	data2
key1		
a	16	17
b	8	10
In [11]:

1
df
Out[11]:
key1	key2	data1	data2
0	a	one	5	9
1	a	two	5	6
2	b	one	5	4
3	b	two	3	6
4	a	one	6	2
In [12]:

1
df.groupby('key1').sum()['data1']
Out[12]:
key1
a    16
b     8
Name: data1, dtype: int32
多级索引的分组
In [13]:

1
mean = df.groupby(['key1', 'key2']).sum()
2
mean
Out[13]:
data1	data2
key1	key2		
a	one	11	11
two	5	6
b	one	5	4
two	3	6
In [14]:

1
type(mean)
Out[14]:
pandas.core.frame.DataFrame
In [15]:

1
mean.unstack() # 转换成dataframe
Out[15]:
data1	data2
key2	one	two	one	two
key1				
a	11	5	11	6
b	5	3	4	6
In [16]:

1
# groupby支持迭代器协议
2
for name, group in df.groupby('key1'):
3
    print(name)
4
    print(group)
a
  key1 key2  data1  data2
0    a  one      5      9
1    a  two      5      6
4    a  one      6      2
b
  key1 key2  data1  data2
2    b  one      5      4
3    b  two      3      6
In [17]:

1
df
Out[17]:
key1	key2	data1	data2
0	a	one	5	9
1	a	two	5	6
2	b	one	5	4
3	b	two	3	6
4	a	one	6	2
In [18]:

1
# 可以转换成列表
2
list(df.groupby('key1'))
Out[18]:
[('a',   key1 key2  data1  data2
  0    a  one      5      9
  1    a  two      5      6
  4    a  one      6      2), ('b',   key1 key2  data1  data2
  2    b  one      5      4
  3    b  two      3      6)]
In [19]:

1
type(list(df.groupby('key1'))[0][0])
Out[19]:
str
In [20]:

1
# 也可以转换成字典
2
dict(list(df.groupby('key1')))
Out[20]:
{'a':   key1 key2  data1  data2
 0    a  one      5      9
 1    a  two      5      6
 4    a  one      6      2, 'b':   key1 key2  data1  data2
 2    b  one      5      4
 3    b  two      3      6}
In [21]:

1
df.dtypes # 返回每一列的数据类型
Out[21]:
key1     object
key2     object
data1     int32
data2     int32
dtype: object
In [22]:

1
type(df.dtypes)
Out[22]:
pandas.core.series.Series
In [23]:

1
df
Out[23]:
key1	key2	data1	data2
0	a	one	5	9
1	a	two	5	6
2	b	one	5	4
3	b	two	3	6
4	a	one	6	2
In [24]:

1
df.groupby(df.dtypes, axis = 1).sum()
Out[24]:
int32	object
0	14	aone
1	11	atwo
2	9	bone
3	9	btwo
4	8	aone
通过字典分组
In [25]:

1
df = pd.DataFrame(np.random.randint(1, 10, (5, 5)), columns = list('abcde'), 
2
                  index = ['alice', 'bob', 'jack', 'marry', 'brand'])
3
df.iloc[1, 1:3] = np.NaN
4
df
Out[25]:
a	b	c	d	e
alice	4	1.0	2.0	7	9
bob	9	NaN	NaN	4	5
jack	4	5.0	3.0	3	5
marry	5	8.0	3.0	4	2
brand	3	4.0	5.0	2	9
In [26]:

1
mapping = {'a':'red', 'b':'red', 'c':'blue', 'd':'orange', 'e':'blue'}
2
grouped = df.groupby(mapping, axis = 1)
3
grouped.sum()
4
# 从结果可看出,nan被当成0处理
Out[26]:
blue	orange	red
alice	11.0	7.0	5.0
bob	5.0	4.0	9.0
jack	8.0	3.0	9.0
marry	5.0	4.0	13.0
brand	14.0	2.0	7.0
In [27]:

1
grouped.size()
Out[27]:
blue      2
orange    1
red       2
dtype: int64
In [28]:

1
grouped.count()
Out[28]:
blue	orange	red
alice	2	1	2
bob	1	1	1
jack	2	1	2
marry	2	1	2
brand	2	1	2
通过函数分组
In [51]:

1
def _group_key(idx):
2
    print(idx)
3
    return idx
45
df.groupby(_group_key)
alice
bob
jack
marry
brand
Out[51]:
<pandas.core.groupby.generic.DataFrameGroupBy object at 0x0000004BC47B8550>
In [54]:

1
type(df.index)
Out[54]:
pandas.core.indexes.base.Index
In [44]:

1
df
Out[44]:
a	b	c	d	e
alice	4	1.0	2.0	7	9
bob	9	NaN	NaN	4	5
jack	4	5.0	3.0	3	5
marry	5	8.0	3.0	4	2
brand	3	4.0	5.0	2	9
In [45]:

1
def _group_key(idx):
2
    print(idx)
3
    return idx
45
df.groupby(_group_key).size()
alice
bob
jack
marry
brand
Out[45]:
alice    1
bob      1
brand    1
jack     1
marry    1
dtype: int64
In [48]:

1
def _group_key(idx):
2
    print(idx)
3
    return len(idx)
45
df.groupby(_group_key).size
alice
bob
jack
marry
brand
Out[48]:
<bound method GroupBy.size of <pandas.core.groupby.generic.DataFrameGroupBy object at 0x0000004BC4800C88>>
In [49]:

1
df.groupby(len).size()
Out[49]:
3    1
4    1
5    3
dtype: int64
In [50]:

1
df
Out[50]:
a	b	c	d	e
alice	4	1.0	2.0	7	9
bob	9	NaN	NaN	4	5
jack	4	5.0	3.0	3	5
marry	5	8.0	3.0	4	2
brand	3	4.0	5.0	2	9
In [52]:

1
df.groupby(len).sum()
Out[52]:
a	b	c	d	e
3	9	0.0	0.0	4	5
4	4	5.0	3.0	3	5
5	12	13.0	10.0	13	20
多级索引的分组
In [34]:

1
# columns = pd.MultiIndex.from_arrays([['China', 'USA', 'China', 'USA', 'China'], ['a', 'a', 'b', 'c', 'b']],
2
#                                    names = ['country', 'index'])
3
# df = pd.DataFrame(np.random.randint(0, 10, (5, 5)), columns = columns)
4
# df
In [35]:

1
# 按照country分组
2
# df.groupby(level = 'country', axis = 1).sum() # 默认按照行分组,所以要指定axis=1
In [36]:

1
# 按照index 

3 聚合运算


内置聚合函数
In [1]:

1
import pandas as pd
2
import numpy as np
In [6]:

1
df = pd.DataFrame({'key1':['a', 'a', 'b', 'b', 'a'],
2
                   'key2':['one', 'two', 'one', 'two', 'one'],
3
                   'data1':np.random.randint(1, 10, 5),
4
                   'data2':np.random.randint(1, 10, 5)})
5
df
Out[6]:
key1	key2	data1	data2
0	a	one	9	5
1	a	two	2	1
2	b	one	4	6
3	b	two	5	5
4	a	one	2	3
In [7]:

1
df.groupby('key1').sum()
Out[7]:
data1	data2
key1		
a	13	9
b	9	11
In [8]:

1
df.groupby('key1').mean()
Out[8]:
data1	data2
key1		
a	4.333333	3.0
b	4.500000	5.5
In [9]:

1
df.groupby('key1').min()
Out[9]:
key2	data1	data2
key1			
a	one	2	1
b	one	4	5
In [10]:

1
df.groupby('key1').max()
Out[10]:
key2	data1	data2
key1			
a	two	9	5
b	two	5	6
In [11]:

1
df.groupby('key1').describe()
Out[11]:
data1	data2
count	mean	std	min	25%	50%	75%	max	count	mean	std	min	25%	50%	75%	max
key1																
a	3.0	4.333333	4.041452	2.0	2.00	2.0	5.50	9.0	3.0	3.0	2.000000	1.0	2.00	3.0	4.00	5.0
b	2.0	4.500000	0.707107	4.0	4.25	4.5	4.75	5.0	2.0	5.5	0.707107	5.0	5.25	5.5	5.75	6.0
自定义聚合函数
In [12]:

1
grouped = df.groupby('key1')
2
grouped
Out[12]:
<pandas.core.groupby.generic.DataFrameGroupBy object at 0x000000A6A6987F60>
In [15]:

1
df
Out[15]:
key1	key2	data1	data2
0	a	one	9	5
1	a	two	2	1
2	b	one	4	6
3	b	two	5	5
4	a	one	2	3
In [17]:

1
def peak_range(s):
2
    print(type(s))
3
    return s.max() - s.min()
45
grouped.agg(peak_range)
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
Out[17]:
data1	data2
key1		
a	7	4
b	1	1
In [18]:

1
# 一次应用多个聚合函数
2
grouped.agg(['std', 'mean', 'sum', peak_range])
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
Out[18]:
data1	data2
std	mean	sum	peak_range	std	mean	sum	peak_range
key1								
a	4.041452	4.333333	13	7	2.000000	3.0	9	4
b	0.707107	4.500000	9	1	0.707107	5.5	11	1
In [19]:

1
# 修改自定义的聚合函数的列名称
2
grouped.agg(['std', 'mean', 'sum', ('range', peak_range)])
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
Out[19]:
data1	data2
std	mean	sum	range	std	mean	sum	range
key1								
a	4.041452	4.333333	13	7	2.000000	3.0	9	4
b	0.707107	4.500000	9	1	0.707107	5.5	11	1
对不同的列应用不同的聚合函数
In [22]:

1
d = {'data1':['mean', ('range', peak_range)], 'data2':'sum'}
2
grouped.agg(d)
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
Out[22]:
data1	data2
mean	range	sum
key1			
a	4.333333	7	9
b	4.500000	1	11
In [23]:

1
# 将key1作为数据而不是索引
2
grouped.agg(d).reset_index()
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
Out[23]:
key1	data1	data2
mean	range	sum
0	a	4.333333	7	9
1	b	4.500000	1	11
In [24]:

1
df.groupby('key1', as_index = False).agg(d) # 将key1作为数据而不是索引
<class 'pandas.core.series.Series'>
<class 'pandas.core.series.Series'>
Out[24]:
key1	data1	data2
mean	range	sum
0	a	4.333333	7	9
1	b	4.500000	1	11
分组运算的高级应用
In [25]:

1
df = pd.DataFrame({'key1':['a', 'a', 'b', 'b', 'a'],
2
                   'key2':['one', 'two', 'one', 'two', 'one'],
3
                   'data1':np.random.randint(1, 10, 5),
4
                   'data2':np.random.randint(1, 10, 5)})
5
df
Out[25]:
key1	key2	data1	data2
0	a	one	7	6
1	a	two	3	6
2	b	one	1	2
3	b	two	1	5
4	a	one	9	2
In [28]:

1
k1_mean = df.groupby('key1').mean()
2
k1_mean
Out[28]:
data1	data2
key1		
a	6.333333	4.666667
b	1.000000	3.500000
In [29]:

1
# 更改k1_mean的列名称
2
k1_mean = df.groupby('key1').mean().add_prefix('mean_')
3
k1_mean
Out[29]:
mean_data1	mean_data2
key1		
a	6.333333	4.666667
b	1.000000	3.500000
In [30]:

1
# 如何将k1_mean与df合并
2
pd.merge(df, k1_mean, left_on = 'key1', right_index = True)
Out[30]:
key1	key2	data1	data2	mean_data1	mean_data2
0	a	one	7	6	6.333333	4.666667
1	a	two	3	6	6.333333	4.666667
4	a	one	9	2	6.333333	4.666667
2	b	one	1	2	1.000000	3.500000
3	b	two	1	5	1.000000	3.500000
In [32]:

1
# 另一种实现方法
2
k1_mean = df.groupby('key1').transform(np.mean).add_prefix('mean_')
3
k1_mean
Out[32]:
mean_data1	mean_data2
0	6.333333	4.666667
1	6.333333	4.666667
2	1.000000	3.500000
3	1.000000	3.500000
4	6.333333	4.666667
In [33]:

1
df[k1_mean.columns] = k1_mean
2
df
Out[33]:
key1	key2	data1	data2	mean_data1	mean_data2
0	a	one	7	6	6.333333	4.666667
1	a	two	3	6	6.333333	4.666667
2	b	one	1	2	1.000000	3.500000
3	b	two	1	5	1.000000	3.500000
4	a	one	9	2	6.333333	4.666667
用自定义函数调用transform
In [34]:

1
df = pd.DataFrame(np.random.randint(1, 10, (5, 5)), index = ['alice', 'bob', 'marry', 'jack', 'bran'],
2
                 columns = list('abcde'))
3
df
Out[34]:
a	b	c	d	e
alice	1	3	6	1	1
bob	1	3	8	5	5
marry	9	6	8	8	2
jack	5	4	7	2	9
bran	7	2	7	2	8
In [37]:

1
def demean(s):
2
    return s - s.mean()
34
key = ['one', 'one', 'two', 'one', 'two']
5
demeaned = df.groupby(key).transform(demean)
6
demeaned
Out[37]:
a	b	c	d	e
alice	-1.333333	-0.333333	-1.0	-1.666667	-4.0
bob	-1.333333	-0.333333	1.0	2.333333	0.0
marry	1.000000	2.000000	0.5	3.000000	-3.0
jack	2.666667	0.666667	0.0	-0.666667	4.0
bran	-1.000000	-2.000000	-0.5	-3.000000	3.0
apply()方法
In [39]:

1
df = pd.DataFrame({'key1':list('aabbababaa'),
2
                  'key2':['one', 'two', 'one', 'two', 'one', 'one', 'two', 'one', 'one', 'two'],
3
                  'data1':np.random.randint(1, 10, 10),
4
                  'data2':np.random.randint(1, 10, 10)})
5
df
Out[39]:
key1	key2	data1	data2
0	a	one	9	8
1	a	two	6	9
2	b	one	1	4
3	b	two	6	2
4	a	one	4	2
5	b	one	9	3
6	a	two	5	6
7	b	one	4	9
8	a	one	1	2
9	a	two	8	9
In [42]:

1
# 输出按照data1排序的最大的前两列
2
def top(g, n = 2, columns = 'data1'):
3
    return g.sort_values(by = columns, ascending = False)[:n]
4
top(df)
Out[42]:
key1	key2	data1	data2
0	a	one	9	8
5	b	one	9	3
In [43]:

1
top(df, n = 3)
Out[43]:
key1	key2	data1	data2
0	a	one	9	8
5	b	one	9	3
9	a	two	8	9
In [44]:

1
df.groupby('key1').apply(top)
Out[44]:
key1	key2	data1	data2
key1					
a	0	a	one	9	8
9	a	two	8	9
b	5	b	one	9	3
3	b	two	6	2
In [47]:

1
states = ['e1', 'e2', 'e3', 'e4', 'w1', 'w2', 'w3', 'w4']
2
group_key = ['east'] * 4 + ['west'] * 4
3
data = pd.Series(np.random.randn(8), index = states)
4
data[['e1', 'e3', 'w4']] = np.NaN # 同data['e1', 'e3', 'w4'] = np.NaN
5
data
Out[47]:
e1         NaN
e2   -0.218916
e3         NaN
e4   -0.939582
w1    0.713938
w2    0.572105
w3    0.330894
w4         NaN
dtype: float64
In [48]:

1
# 要求east分组里的nan用east分组的平均值填充,west分组的nan一样
2
# 先求出east分组和west分组的平均值
3
data.groupby(group_key).mean()
Out[48]:
east   -0.579249
west    0.538979
dtype: float64
In [50]:

1
data.groupby(group_key).apply(lambda g : g.fillna(g.mean()))
Out[50]:
e1   -0.579249
e2   -0.218916
e3   -0.579249
e4   -0.939582
w1    0.713938
w2    0.572105
w3    0.330894
w4    0.538979
dtype: float64
In [ ]:

1

4 时间日期


python里的datetime
In [1]:

1
import pandas as pd
2
import numpy as np
In [2]:

1
from datetime import datetime
2
from datetime import timedelta
In [3]:

1
now = datetime.now() # 返回现在的时间
2
now
Out[3]:
datetime.datetime(2020, 4, 10, 9, 23, 31, 801144)
In [4]:

1
now.year
Out[4]:
2020
In [5]:

1
type(now.year)
Out[5]:
int
In [6]:

1
now.year, now.month, now.day
Out[6]:
(2020, 4, 10)
timedelta
In [7]:

1
date1 = datetime(2020, 4, 20)
2
date2 = datetime(2020, 4, 16)
3
delta = date1 - date2
4
delta
Out[7]:
datetime.timedelta(days=4)
In [8]:

1
delta.days
Out[8]:
4
In [9]:

1
delta.total_seconds()
Out[9]:
345600.0
In [10]:

1
date2 + delta 
Out[10]:
datetime.datetime(2020, 4, 20, 0, 0)
In [11]:

1
date2 + timedelta(4.5) # date2加上4天半
Out[11]:
datetime.datetime(2020, 4, 20, 12, 0)
字符串与datetime之间的转换
In [12]:

1
# datetime转换成字符串
2
date = datetime(2020, 4, 9, 17, 17, 17)
3
date
Out[12]:
datetime.datetime(2020, 4, 9, 17, 17, 17)
In [13]:

1
str(date)
Out[13]:
'2020-04-09 17:17:17'
In [14]:

1
# 将datetime格式化为字符串
2
date.strftime('%Y/%m/%d %H:%M:%S') # 年月日用/连接,时分秒用:连接
Out[14]:
'2020/04/09 17:17:17'
In [15]:

1
date.strftime('%y/%m/%d %H:%M:%S') # 大写的Y表示四位数的年份,小写的y表示两位数的年份
Out[15]:
'20/04/09 17:17:17'
In [16]:

1
# 字符串转换成datetime
2
date.strptime('2020-4-9 17:24:24', '%Y-%m-%d %H:%M:%S')
Out[16]:
datetime.datetime(2020, 4, 9, 17, 24, 24)
python里的时间序列
In [17]:

1
dates = [datetime(2020, 4, 1), datetime(2020, 4, 2), datetime(2020, 4, 3), datetime(2020, 4, 4)]
2
s = pd.Series(np.random.randn(4), index = dates)
3
s
Out[17]:
2020-04-01   -0.843503
2020-04-02   -0.135930
2020-04-03   -0.290967
2020-04-04   -1.474397
dtype: float64
In [18]:

1
type(s.index)
Out[18]:
pandas.core.indexes.datetimes.DatetimeIndex
In [19]:

1
type(s.index[0])
Out[19]:
pandas._libs.tslibs.timestamps.Timestamp
date_range生成时间序列
In [20]:

1
pd.date_range('20200401', '20200406')
Out[20]:
DatetimeIndex(['2020-04-01', '2020-04-02', '2020-04-03', '2020-04-04',
               '2020-04-05', '2020-04-06'],
              dtype='datetime64[ns]', freq='D')
In [21]:

1
pd.date_range('20200401', periods = 10) # 生成10个
Out[21]:
DatetimeIndex(['2020-04-01', '2020-04-02', '2020-04-03', '2020-04-04',
               '2020-04-05', '2020-04-06', '2020-04-07', '2020-04-08',
               '2020-04-09', '2020-04-10'],
              dtype='datetime64[ns]', freq='D')
In [22]:

1
pd.date_range('20200401 17:42:42', periods = 10)
Out[22]:
DatetimeIndex(['2020-04-01 17:42:42', '2020-04-02 17:42:42',
               '2020-04-03 17:42:42', '2020-04-04 17:42:42',
               '2020-04-05 17:42:42', '2020-04-06 17:42:42',
               '2020-04-07 17:42:42', '2020-04-08 17:42:42',
               '2020-04-09 17:42:42', '2020-04-10 17:42:42'],
              dtype='datetime64[ns]', freq='D')
In [23]:

1
# 假设不需要时分秒
2
pd.date_range('20200401 17:42:42', periods = 10, normalize = True)
Out[23]:
DatetimeIndex(['2020-04-01', '2020-04-02', '2020-04-03', '2020-04-04',
               '2020-04-05', '2020-04-06', '2020-04-07', '2020-04-08',
               '2020-04-09', '2020-04-10'],
              dtype='datetime64[ns]', freq='D')
In [24]:

1
pd.date_range('20200401', periods = 10, freq = 'M') # 以月为单位
Out[24]:
DatetimeIndex(['2020-04-30', '2020-05-31', '2020-06-30', '2020-07-31',
               '2020-08-31', '2020-09-30', '2020-10-31', '2020-11-30',
               '2020-12-31', '2021-01-31'],
              dtype='datetime64[ns]', freq='M')
In [25]:

1
pd.date_range('20200401', periods = 10, freq = '4H') # 以4小时为单位
Out[25]:
DatetimeIndex(['2020-04-01 00:00:00', '2020-04-01 04:00:00',
               '2020-04-01 08:00:00', '2020-04-01 12:00:00',
               '2020-04-01 16:00:00', '2020-04-01 20:00:00',
               '2020-04-02 00:00:00', '2020-04-02 04:00:00',
               '2020-04-02 08:00:00', '2020-04-02 12:00:00'],
              dtype='datetime64[ns]', freq='4H')
时期(某一个时间范围)
In [26]:

1
p = pd.Period(2020)
2
p
3
# 结果中A表示年,DEC表示12月,表示这一年以12月结束
Out[26]:
Period('2020', 'A-DEC')
In [27]:

1
p + 2
Out[27]:
Period('2022', 'A-DEC')
In [28]:

1
p = pd.Period(2020, freq = 'M') # 指定单位为月份
2
p
Out[28]:
Period('2020-01', 'M')
In [30]:

1
p + 2
Out[30]:
Period('2020-03', 'M')
时期序列 period_range
In [33]:

1
pd.period_range('2020-04', periods = 10, freq = 'M')
Out[33]:
PeriodIndex(['2020-04', '2020-05', '2020-06', '2020-07', '2020-08', '2020-09',
             '2020-10', '2020-11', '2020-12', '2021-01'],
            dtype='period[M]', freq='M')
In [34]:

1
# 创建季度序列
2
pd.period_range('2020Q1', periods = 10, freq = 'Q')
Out[34]:
PeriodIndex(['2020Q1', '2020Q2', '2020Q3', '2020Q4', '2021Q1', '2021Q2',
             '2021Q3', '2021Q4', '2022Q1', '2022Q2'],
            dtype='period[Q-DEC]', freq='Q-DEC')
时间单位的转化
In [35]:

1
s = pd.Period('2020') # 以年为单位
2
s
Out[35]:
Period('2020', 'A-DEC')
In [36]:

1
s.asfreq('M') # 以月为单位
Out[36]:
Period('2020-12', 'M')
In [37]:

1
s.asfreq('M', how = 'start')
Out[37]:
Period('2020-01', 'M')
In [38]:

1
p = pd.Period('2020-04', freq = 'M')
2
p
Out[38]:
Period('2020-04', 'M')
In [39]:

1
p.asfreq('A-DEC') # 转化为以年为单位,并且以12月为结束
Out[39]:
Period('2020', 'A-DEC')
In [40]:

1
p.asfreq('A-MAR') # 转化为以年为单位,并且以3月份为结束。所以2020-04属于2021年
Out[40]:
Period('2021', 'A-MAR')
In [41]:

1
p = pd.Period('2020Q4', freq = 'Q-JAN') # 以季度为单位,并且以一月份为结束
2
p
Out[41]:
Period('2020Q4', 'Q-JAN')
In [42]:

1
p.asfreq('M', how = 'start'), p.asfreq('M', how = 'end')
Out[42]:
(Period('2019-11', 'M'), Period('2020-01', 'M'))
In [52]:

1
# 获取该季度倒数第二个工作日下午4点20分
2
(p.asfreq('B', how = 'end') - 1).asfreq('T', how = 'start') + 16 * 60 + 20
Out[52]:
Period('2020-01-30 16:20', 'T')
In [49]:

1
p.asfreq('B')
Out[49]:
Period('2020-01-31', 'B')
In [ ]:

1

5 时间重采样


timestamp和period的相互转化
In [2]:

1
import pandas as pd
2
import numpy as np
In [3]:

1
s = pd.Series(np.random.randn(5), index = pd.date_range('20200401', periods = 5, freq = 'M'))
2
s
Out[3]:
2020-04-30    0.250342
2020-05-31    0.393492
2020-06-30   -0.833173
2020-07-31    1.561602
2020-08-31   -2.354146
Freq: M, dtype: float64
In [4]:

1
# 将s转化成时期的时间序列
2
s.to_period()
Out[4]:
2020-04    0.250342
2020-05    0.393492
2020-06   -0.833173
2020-07    1.561602
2020-08   -2.354146
Freq: M, dtype: float64
In [6]:

1
ts = pd.Series(np.random.randn(5), index = pd.date_range('20161229', periods = 5, freq = 'D'))
2
ts
Out[6]:
2016-12-29    0.328473
2016-12-30    0.460058
2016-12-31   -0.091054
2017-01-01    0.098486
2017-01-02   -0.642376
Freq: D, dtype: float64
In [33]:

1
type(ts)
Out[33]:
pandas.core.series.Series
In [7]:

1
ts.to_period()
Out[7]:
2016-12-29    0.328473
2016-12-30    0.460058
2016-12-31   -0.091054
2017-01-01    0.098486
2017-01-02   -0.642376
Freq: D, dtype: float64
In [9]:

1
pts = ts.to_period(freq = 'M')
2
pts
Out[9]:
2016-12    0.328473
2016-12    0.460058
2016-12   -0.091054
2017-01    0.098486
2017-01   -0.642376
Freq: M, dtype: float64
In [12]:

1
pts.index
Out[12]:
PeriodIndex(['2016-12', '2016-12', '2016-12', '2017-01', '2017-01'], dtype='period[M]', freq='M')
In [13]:

1
# 根据月份分组求和
2
pts.groupby(level = 0).sum()
Out[13]:
2016-12    0.697478
2017-01   -0.543890
Freq: M, dtype: float64
In [16]:

1
# 将pts转化回基于时间戳的时间序列
2
pts.to_timestamp()
Out[16]:
2016-12-01    0.328473
2016-12-01    0.460058
2016-12-01   -0.091054
2017-01-01    0.098486
2017-01-01   -0.642376
dtype: float64
In [17]:

1
pts.to_timestamp(how = 'end')
Out[17]:
2016-12-31 23:59:59.999999999    0.328473
2016-12-31 23:59:59.999999999    0.460058
2016-12-31 23:59:59.999999999   -0.091054
2017-01-31 23:59:59.999999999    0.098486
2017-01-31 23:59:59.999999999   -0.642376
dtype: float64
重采样
In [18]:

1
ts = pd.Series(np.random.randint(0, 50, 60), 
2
               index = pd.date_range('20200410 09:30:00', periods = 60, freq = 'T'))
3
ts
Out[18]:
2020-04-10 09:30:00    10
2020-04-10 09:31:00    29
2020-04-10 09:32:00    14
2020-04-10 09:33:00    45
2020-04-10 09:34:00    22
2020-04-10 09:35:00     3
2020-04-10 09:36:00    29
2020-04-10 09:37:00    19
2020-04-10 09:38:00    32
2020-04-10 09:39:00    44
2020-04-10 09:40:00     5
2020-04-10 09:41:00     1
2020-04-10 09:42:00    13
2020-04-10 09:43:00    45
2020-04-10 09:44:00    36
2020-04-10 09:45:00    23
2020-04-10 09:46:00     9
2020-04-10 09:47:00    20
2020-04-10 09:48:00    30
2020-04-10 09:49:00    41
2020-04-10 09:50:00    41
2020-04-10 09:51:00    14
2020-04-10 09:52:00    35
2020-04-10 09:53:00    48
2020-04-10 09:54:00    19
2020-04-10 09:55:00    14
2020-04-10 09:56:00    11
2020-04-10 09:57:00    33
2020-04-10 09:58:00    47
2020-04-10 09:59:00    36
2020-04-10 10:00:00    15
2020-04-10 10:01:00    33
2020-04-10 10:02:00    34
2020-04-10 10:03:00    35
2020-04-10 10:04:00    28
2020-04-10 10:05:00     4
2020-04-10 10:06:00     5
2020-04-10 10:07:00    33
2020-04-10 10:08:00    30
2020-04-10 10:09:00    11
2020-04-10 10:10:00    19
2020-04-10 10:11:00    15
2020-04-10 10:12:00    40
2020-04-10 10:13:00    17
2020-04-10 10:14:00     3
2020-04-10 10:15:00    28
2020-04-10 10:16:00    39
2020-04-10 10:17:00    39
2020-04-10 10:18:00    44
2020-04-10 10:19:00    47
2020-04-10 10:20:00    45
2020-04-10 10:21:00    27
2020-04-10 10:22:00    35
2020-04-10 10:23:00    38
2020-04-10 10:24:00    27
2020-04-10 10:25:00    22
2020-04-10 10:26:00    44
2020-04-10 10:27:00    24
2020-04-10 10:28:00    49
2020-04-10 10:29:00     8
Freq: T, dtype: int32
In [23]:

1
# 每五分钟重新采一次样,并求和
2
ts.resample('5min').sum() # 默认以时间段起始时间为行索引
Out[23]:
2020-04-10 09:30:00    120
2020-04-10 09:35:00    127
2020-04-10 09:40:00    100
2020-04-10 09:45:00    123
2020-04-10 09:50:00    157
2020-04-10 09:55:00    141
2020-04-10 10:00:00    145
2020-04-10 10:05:00     83
2020-04-10 10:10:00     94
2020-04-10 10:15:00    197
2020-04-10 10:20:00    172
2020-04-10 10:25:00    147
Freq: 5T, dtype: int32
In [24]:

1
ts.resample('5min', label = 'right').sum() # 以时间段结束时间为行索引
Out[24]:
2020-04-10 09:35:00    120
2020-04-10 09:40:00    127
2020-04-10 09:45:00    100
2020-04-10 09:50:00    123
2020-04-10 09:55:00    157
2020-04-10 10:00:00    141
2020-04-10 10:05:00    145
2020-04-10 10:10:00     83
2020-04-10 10:15:00     94
2020-04-10 10:20:00    197
2020-04-10 10:25:00    172
2020-04-10 10:30:00    147
Freq: 5T, dtype: int32
In [26]:

1
ts.resample('5min').ohlc() # o:开盘价;h:最高价;l:最低价;c:收盘价
Out[26]:
open	high	low	close
2020-04-10 09:30:00	10	45	10	22
2020-04-10 09:35:00	3	44	3	44
2020-04-10 09:40:00	5	45	1	36
2020-04-10 09:45:00	23	41	9	41
2020-04-10 09:50:00	41	48	14	19
2020-04-10 09:55:00	14	47	11	36
2020-04-10 10:00:00	15	35	15	28
2020-04-10 10:05:00	4	33	4	11
2020-04-10 10:10:00	19	40	3	3
2020-04-10 10:15:00	28	47	28	47
2020-04-10 10:20:00	45	45	27	27
2020-04-10 10:25:00	22	49	8	8
通过groupby重采样
In [27]:

1
ts = pd.Series(np.random.randint(0, 50, 100), index = pd.date_range('20200401', periods = 100, freq = 'D'))
2
ts 
Out[27]:
2020-04-01    17
2020-04-02    37
2020-04-03    29
2020-04-04    19
2020-04-05    24
              ..
2020-07-05    19
2020-07-06    48
2020-07-07    20
2020-07-08    37
2020-07-09     6
Freq: D, Length: 100, dtype: int32
通过月份重采样
In [28]:

1
# 方法一
2
ts.groupby(lambda x : x.month).sum()
Out[28]:
4    725
5    695
6    640
7    215
dtype: int32
In [30]:

1
# 方法二
2
ts.groupby(ts.index.to_period('M')).sum()
Out[30]:
2020-04    725
2020-05    695
2020-06    640
2020-07    215
Freq: M, dtype: int32
In [32]:

1
type(ts.index)
Out[32]:
pandas.core.indexes.datetimes.DatetimeIndex
In [35]:

1
df = pd.DataFrame(np.random.randint(0, 50, 2), index = pd.date_range('20160422', periods = 2, freq = 'W-FRI')) # W-FRI表示以星期为单位,并且周五为一周的结束
2
df
Out[35]:
0
2016-04-22	43
2016-04-29	7
In [37]:

1
# df.resample('D', fill_method = 'ffill')
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-37-1600fd4048a6> in <module>
----> 1 df.resample('D', fill_method = 'ffill')

TypeError: resample() got an unexpected keyword argument 'fill_method'

时期的重采样
In [39]:

1
df = pd.DataFrame(np.random.randint(2, 30, (24, 4)), 
2
                  index = pd.period_range('202001', '202112', freq = 'M'), columns = list('ABCD'))
3
df
Out[39]:
A	B	C	D
2020-01	27	7	12	10
2020-02	22	8	22	3
2020-03	29	8	14	6
2020-04	21	13	14	10
2020-05	3	19	19	8
2020-06	19	16	16	27
2020-07	15	4	27	6
2020-08	2	21	14	28
2020-09	28	17	15	28
2020-10	24	5	23	14
2020-11	19	29	17	9
2020-12	5	6	14	10
2021-01	5	14	17	18
2021-02	9	19	29	19
2021-03	3	17	7	16
2021-04	10	16	14	4
2021-05	29	27	26	16
2021-06	14	26	19	28
2021-07	11	21	8	15
2021-08	4	6	7	24
2021-09	17	12	11	13
2021-10	23	22	13	16
2021-11	17	3	21	15
2021-12	10	8	13	21
In [41]:

1
df.resample('A-DEC').mean()
Out[41]:
A	B	C	D
2020	17.833333	12.750000	17.250000	13.250000
2021	12.666667	15.916667	15.416667	17.083333
In [42]:

1
df.resample('A-MAR').mean()
Out[42]:
A	B	C	D
2020	26.00	7.666667	16.000000	6.333333
2021	12.75	15.000000	17.666667	16.083333
2022	15.00	15.666667	14.666667	16.888889
In [ ]:

1
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值