pandas随笔

pandas随笔

# 数据读取
import pandas as pd
# 读取csv文件,如果不在当前目录,需要引入绝对路径
food_info = pd.read_csv("food_info.csv")
# DataFrame就相当于也是一个矩阵结构
print(type(food_info))
# 这里的object其实就是String类型的数据
print(food_info.dtypes)
# 帮助文档
print(help(pd.read_csv))
# 如果想用以前写的变量比如导的包,还有引用food_info这个变量,就得将前面的执行过才行,历史执行的不算
# .head()就是将表中的数据显示,默认显示前五条数据。参数3就是显示前3条数据
food_info.head(3)
# notebook方便的地方就是可以把中间的很多结果显示出来,不用像IDEA从头开始执行
# 显示后几条数据
food_info.tail(3)
# 想看每一列的指标(即列名)
print(food_info.columns)
# 显示数据有多少行多少列
print(food_info.shape)
# 想取第一个数据(结果显示第一行)
print(food_info.loc[0])
# 切片参数,从第三行开始到第六行结束
food_info.loc[3:6]
# 想取2,5,10行的数据
two_five_ten = [2,5,10]
food_info.loc[two_five_ten]
# 想一列一列的取数据,通过列名进行定位
ndb_col = food_info["NDB_No"]
print(ndb_col)
# 想同时取两个列的数据
columns = ["Zinc_(mg)", "Copper_(mg)"]
zinc_copper = food_info[columns]
print(zinc_copper)
# 查找表中哪些数据的单位是以“克”为结尾的
# 把当前的列名定义为list
col_names = food_info.columns.tolist()
print(col_names)
gram_columns = []
for c in col_names:
    if c.endswith("(g)"):
        gram_columns.append(c)
gram_df = food_info[gram_columns]
print(gram_df.head(3))
# 进行加减乘除的操作
# 把mg为结尾的数据转换为以g为结尾的,对一个数据列操作全部行的值都会变
print(food_info["Iron_(mg)"])
div_1000 = food_info["Iron_(mg)"] / 1000
print(div_1000)
# 将两个列进行组合,如果维度一样,就会进行对应位置的操作
water_energy = food_info["Water_(g)"] * food_info["Energ_Kcal"]
print(water_energy)
# 新建一个列
iron_grams = food_info["Iron_(mg)"] / 1000  
print(food_info.shape)
food_info["Iron_(g)"] = iron_grams
print(food_info.shape)
# 求一列的最大值
max_calories = food_info["Energ_Kcal"].max()
print(max_calories)
# 让对应一列都除以最大值
normalized_calories = food_info["Energ_Kcal"] / max_calories
normalized_fat = food_info["Lipid_Tot_(g)"] / food_info["Lipid_Tot_(g)"].max()
# 数据读取
import pandas as pd
# 读取csv文件,如果不在当前目录,需要引入绝对路径
food_info = pd.read_csv("food_info.csv")
# 对数据进行排序,可以指定某一列,默认从小到大排序,NAN是缺失值
food_info.sort_values("Sodium_(mg)",inplace=True)
print(food_info["Sodium_(mg)"])
# 如果想要从大到小排序
food_info.sort_values("Sodium_(mg)",inplace=True,ascending=False)
print(food_info["Sodium_(mg)"])
import numpy as np
import pandas as pd
titanic_survival = pd.read_csv("titanic_train.csv")
titanic_survival.head()

# 观察年龄那一列
age = titanic_survival["Age"]
# 切片显示前10条数据
print(age.loc[0:10])
# 数据中出现了NAN缺失值,需要处理这样的数据
age_is_null = pd.isnull(age)
print(age_is_null)
# 将所有缺失值以布尔类型放入数组
age_null_true = age[age_is_null]
print(age_null_true)
# 查看有多少条缺失值得数据
age_null_count=len(age_null_true)
print(age_null_count)

# 如果不对缺失值进行处理,在运算的过程就没办法操作
# 求年龄的平均值
mean_age = sum(titanic_survival["Age"]) / len(titanic_survival["Age"])
print(mean_age)
# 正确算均值的手法,就是把没有缺失值的结果拿出来
good_ages = titanic_survival["Age"][age_is_null == False]
correct_mean_age = sum(good_ages) / len(good_ages)
print(correct_mean_age)
# pandas中有现成的函数求均值 mean()
correct_mean_age = titanic_survival["Age"].mean()
print(correct_mean_age)
# 根据船舱等级的不同,求对应船舱的票价平均值
passenger_classes = [1,2,3]
fares_by_class = {}
for this_class in passenger_classes:
    # 把对应仓位的数据拿到手
    pclass_rows = titanic_survival[titanic_survival["Pclass"] == this_class]
    # 定位到船票价格的那一列
    pclass_fares = pclass_rows["Fare"]
    # 对这一列的数据求均值
    fare_for_class = pclass_fares.mean()
    # 把这样的价格放入字典
    fares_by_class[this_class] = fare_for_class
print(fares_by_class)
# pandas有现成的函数满足上述要求 pivot_table统计一个量和其他量关系的函数
# index是统计的东西是以Pclass为基准,values统计Pclass和哪一个量的关系,aggfunc统计二者之间是什么样的关系
passenger_survival = titanic_survival.pivot_table(index = "Pclass",values="Fare",aggfunc=np.mean)
print(passenger_survival)
# 统计船舱等级和年龄的关系,未指定aggfunc,默认求均值的操作
passenger_age = titanic_survival.pivot_table(index="Pclass",values="Age")
print(passenger_age)
# 想统计一个量和另外两个量的关系
# 计算C,Q,S三个地方总的船票价格,总的获救人数的关系
port_stats = titanic_survival.pivot_table(index="Embarked", values=["Fare","Survived"], aggfunc=np.sum)
print(port_stats)
# 沿着列维度将缺失值丢掉
drop_na_colums = titanic_survival.dropna(axis=1)
print(drop_na_colums)
# 如果"Age","Sex"列的某一数据有缺失值,那么就把对应行舍弃掉
new_titanic_survival = titanic_survival.dropna(axis=0,subset=["Age","Sex"])
print(new_titanic_survival)
# 查看具体值 第83行的数据年龄那一列
row_index_83_age = titanic_survival.loc[83,"Age"]
print(row_index_83_age)
row_index_766_pclass = titanic_survival.loc[766,"Pclass"]
print(row_index_766_pclass)
# jupyter数据显示不全,需要在显示的时候加上下面这行代码
pd.set_option('isplay.max_columns', None)
# 按Age进行从大到小排序,取前十个值。\这个符号就是一页显示不完,分割下面继续显示那一行数据的内容
new_titanic_survival = titanic_survival.sort_values("Age",ascending=False)
print(new_titanic_survival[0:10])
# 前面的序号显示的是第几行,想要从0开始重新定义索引,drop就是把之前的数据舍弃,形成新的
titanic_reindexed = new_titanic_survival.reset_index(drop=True)
print("----------")
print(titanic_reindexed.loc[0:10])
# 如果pandas的函数实现不了,可以自定义函数
# 想把第100行的数据返回出来
def hundredth_row(column):
    hundredth_row = column.loc[99]
    return hundredth_row
# apply函数
hundredth_row = titanic_survival.apply(hundredth_row)
print(hundredth_row)
# 对于每一列缺失值的个数统计
def null_count(column):
    column_null =  pd.isnull(column)
    null = column[column_null]
    return len(null)
column_null_count = titanic_survival.apply(null_count)
print(column_null_count)
# 将数据内容转换,比如船舱等级1,2,3换成first,second,third
def which_class(row):
    pclass = row["Pclass"]
    if pd.isnull(pclass):
        return "Unknown"
    elif pclass == 1:
        return "First Class"
    elif pclass == 2:
        return "Second Class"
    elif pclass == 3:
        return "Third Class"
classes = titanic_survival.apply(which_class,axis=1)
print(classes)
# 把连续值离散化
def is_minor(row):
    if row["Age"] < 18:
        return True
    else:
        return False

minors = titanic_survival.apply(is_minor,axis=1)
print(minors)

def generate_age_label(row):
    age = row["Age"]
    if pd.isnull(age):
        return "unknown"
    elif age < 18:
        return "minor"
    else:
        return "adult"
age_labels = titanic_survival.apply(generate_age_label, axis=1)
print(age_labels)
# 成年或未成年获救的平均值是多少
titanic_survival["age_labels"] = age_labels
age_group_survial = titanic_survival.pivot_table(index="age_labels",values="Survived")
print(age_group_survial)
# 之前pandas的结构都是DataFrame结构,类似矩阵。而其中一个行或一个列就称为Series结构
import pandas as pd
fandango = pd.read_csv('fandango_score_comparison.csv')
series_film = fandango['FILM']
print(type(series_film))
# 通过切片定义值
print(series_film[0:5])
series_rt = fandango['RottenTomatoes']
print (series_rt[0:5])
# DataFrame里面的结构是Series,Series里面的结构是ndarray
film_names = series_film.values
print(type(film_names))
# 使用Series,需要引入
from pandas import Series
# 在Series结构中,一个电影名字对应其中一个媒体的评分
rt_scores = series_rt.values
print(rt_scores)
# 用名字当成索引
series_custom = Series(rt_scores,index=film_names)
series_custom[['Minions (2015)', 'Leviathan (2014)']]
fiveten = series_custom[5:10]
print(fiveten)
# 对Series排序
original_index = series_custom.index.tolist()
print(original_index)
sorted_index = sorted(original_index)
sorted_by_index = series_custom.reindex(sorted_index)
print(sorted_by_index)
sc2 = series_custom.sort_index()
sc3 = series_custom.sort_values()
print(sc2[0:10])
print(sc3[0:10])
# Series对应维度想加
import numpy as np
print(np.add(series_custom, series_custom))
np.sin(series_custom)
np.max(series_custom)
# 返回指定值
series_custom > 50
series_greater_than_50 = series_custom[series_custom > 50]

criteria_one = series_custom > 50
criteria_two = series_custom < 75
both_criteria = series_custom[criteria_one & criteria_two]
print(both_criteria)
# 计算两个媒体评分的平均值
rt_critics = Series(fandango['RottenTomatoes'].values, index=fandango['FILM'])
rt_users = Series(fandango['RottenTomatoes_User'].values, index=fandango['FILM'])
rt_mean = (rt_critics + rt_users)/2
print(rt_mean)
# 指定索引
fandango_films = fandango.set_index('FILM', drop=False)
print(fandango_films.index)
# String也可以:取出
fandango_films["Avengers: Age of Ultron (2015)":"Hot Tub Time Machine 2 (2015)"]
fandango_films.loc["Avengers: Age of Ultron (2015)":"Hot Tub Time Machine 2 (2015)"]
fandango_films.loc['Kumiko, The Treasure Hunter (2015)']
# 之前都是用index的形式,现在可以直接用String值显示
movies = ['Kumiko, The Treasure Hunter (2015)', 'Do You Believe? (2015)', 'Ant-Man (2015)']
fandango_films.loc[movies]
# 数据类型转化
import numpy as np

# returns the data types as a Series
types = fandango_films.dtypes
#print types
# filter data types to just floats, index attributes returns just column names
float_columns = types[types.values == 'float64'].index
# use bracket notation to filter columns to just float columns
float_df = fandango_films[float_columns]
#print float_df
# `x` is a Series object representing a column
deviations = float_df.apply(lambda x: np.std(x))

print(deviations)
# 算标准差
rt_mt_user = float_df[['RT_user_norm', 'Metacritic_user_nom']]
rt_mt_user.apply(lambda x: np.std(x), axis=1)
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值