In [44]:
from __future__ import division
from numpy.random import randn
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
np.random.seed(12345)
plt.rc('figure', figsize=(10, 6))
from pandas import Series, DataFrame
import pandas as pd
np.set_printoptions(precision=4)
In [4]:
df = pd.read_csv('ex1.csv')
df
Out[4]:
In [5]:
pd.read_table('ex1.csv', sep=',')
Out[5]:
In [6]:
pd.read_csv('ex2.csv', header=None)
Out[6]:
In [7]:
pd.read_csv('ex2.csv', names=['a', 'b', 'c', 'd', 'message'])
Out[7]:
In [8]:
names = ['a', 'b', 'c', 'd', 'message']
pd.read_csv('ex2.csv', names=names, index_col='message') # message 作为DF的索引
Out[8]:
In [9]:
parsed = pd.read_csv('csv_mindex.csv', index_col=['key1', 'key2'])
parsed
Out[9]:
In [10]:
list(open('ex3.txt'))
Out[10]:
In [11]:
result = pd.read_table('ex3.txt', sep='\s+')
result
Out[11]:
In [12]:
pd.read_csv('ex4.csv', skiprows=[0, 2, 3]) # skiprows 跳过数据
Out[12]:
In [14]:
result = pd.read_csv('ex5.csv')
result
Out[14]:
In [15]:
pd.isnull(result) #判断是否为空值
Out[15]:
In [33]:
result = pd.read_csv('ex5.csv',na_values=['NULL']) # na_values将某个数值变成空值
Out[33]:
In [34]:
sentinels = {'message': ['foo'], 'something': ['two']}
pd.read_csv('ex5.csv', na_values=sentinels) # na_values 可以接受一个字典
Out[34]:
In [35]:
result = pd.read_csv('ex6.csv')
result
Out[35]:
In [36]:
pd.read_csv('ex6.csv', nrows=5) # 只读取前5行
Out[36]:
In [37]:
chunker = pd.read_csv('ex6.csv', chunksize=1000)
chunker
Out[37]:
In [39]:
chunker = pd.read_csv('ex6.csv', chunksize=1000)
tot = Series([])
for piece in chunker:
tot = tot.add(piece['key'].value_counts(), fill_value=0)
tot = tot.sort_values(ascending=False)
In [40]:
tot[:10]
Out[40]:
In [41]:
data = pd.read_csv('ex5.csv')
data
Out[41]:
In [42]:
data.to_csv('out.csv')
In [45]:
data.to_csv(sys.stdout, sep='|')# 分隔符用'|'
In [46]:
data.to_csv(sys.stdout, na_rep='NULL')# 缺失数据用NULL代替
In [47]:
data.to_csv(sys.stdout, index=False, header=False) #禁用行和列标签
In [48]:
data.to_csv(sys.stdout, index=False, columns=['a', 'b', 'c'])# 指定部分列和顺序
In [49]:
dates = pd.date_range('1/1/2000', periods=7)
ts = Series(np.arange(7), index=dates)
ts.to_csv('tseries.csv')
In [50]:
Series.from_csv('tseries.csv', parse_dates=True)
Out[50]:
In [51]:
import csv
f = open('ex7.csv')
reader = csv.reader(f)
In [52]:
for line in reader:
print(line)
In [55]:
lines = list(csv.reader(open('ex7.csv')))
lines
Out[55]:
In [57]:
header, values = lines[0], lines[1:]
header
Out[57]:
In [58]:
values
Out[58]:
In [63]:
zip(header, zip(*values))
Out[63]:
In [65]:
data_dict = {h: v for h, v in zip(header, zip(*values))}
data_dict
Out[65]:
In [66]:
class my_dialect(csv.Dialect):
lineterminator = '\n'
delimiter = ';'
quotechar = '"'
quoting = csv.QUOTE_MINIMAL
In [67]:
with open('mydata.csv', 'w') as f:
writer = csv.writer(f, dialect=my_dialect)
writer.writerow(('one', 'two', 'three'))
writer.writerow(('1', '2', '3'))
writer.writerow(('4', '5', '6'))
writer.writerow(('7', '8', '9'))
In [69]:
obj = """
{"name": "Wes",
"places_lived": ["United States", "Spain", "Germany"],
"pet": null,
"siblings": [{"name": "Scott", "age": 25, "pet": "Zuko"},
{"name": "Katie", "age": 33, "pet": "Cisco"}]
}
"""
In [70]:
import json
result = json.loads(obj)
result
Out[70]:
In [71]:
asjson = json.dumps(result)
In [72]:
siblings = DataFrame(result['siblings'], columns=['name', 'age'])
siblings
Out[72]:
In [73]:
from lxml.html import parse
from urllib2 import urlopen
parsed = parse(urlopen('http://finance.yahoo.com/q/op?s=AAPL+Options'))
doc = parsed.getroot()
In [74]:
links = doc.findall('.//a')
links[15:20]
Out[74]:
In [75]:
lnk = links[28]
lnk
lnk.get('href')
lnk.text_content()
Out[75]:
In [88]:
frame = pd.read_csv('ex1.csv')
frame
Out[88]:
In [89]:
frame.to_pickle('frame_pickle')
In [90]:
pd.read_pickle('frame_pickle')
Out[90]:
In [91]:
store = pd.HDFStore('mydata.h5')
store['obj1'] = frame
store['obj1_col'] = frame['a']
store
Out[91]:
In [92]:
store['obj1']
Out[92]:
In [93]:
store.close()
os.remove('mydata.h5')
In [94]:
import requests
url = 'https://api.github.com/repos/pydata/pandas/milestones/28/labels'
resp = requests.get(url)
resp
Out[94]:
In [95]:
data[:5]
Out[95]:
In [96]:
issue_labels = DataFrame(data)
issue_labels
Out[96]:
In [97]:
import sqlite3
query = """
CREATE TABLE test
(a VARCHAR(20), b VARCHAR(20),
c REAL, d INTEGER
);"""
con = sqlite3.connect(':memory:')
con.execute(query) # 创建表
con.commit() # 发出命令
In [98]:
data = [('Atlanta', 'Georgia', 1.25, 6),
('Tallahassee', 'Florida', 2.6, 3),
('Sacramento', 'California', 1.7, 5)]
stmt = "INSERT INTO test VALUES(?, ?, ?, ?)"
con.executemany(stmt, data) # 插入数据
con.commit()
In [99]:
cursor = con.execute('select * from test') #查询
rows = cursor.fetchall()
rows
Out[99]:
In [100]:
cursor.description
Out[100]:
In [101]:
DataFrame(rows, columns=zip(*cursor.description)[0]) # 增加列名
Out[101]:
In [102]:
import pandas.io.sql as sql
sql.read_sql('select * from test', con) # 简单的归整数据方法(调用pandas.io.sql)
Out[102]: