压缩内存的轮子

导入包

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats

import gc
from collections import Counter
import copy

import warnings
warnings.filterwarnings("ignore")

#读取数据集

#test_data = pd.read_csv('./data_format1/test_format1.csv')
#train_data = pd.read_csv('./data_format1/train_format1.csv')

#user_info = pd.read_csv('./data_format1/user_info_format1.csv')
#user_log = pd.read_csv('./data_format1/user_log_format1.csv')

数据内存压缩(对源码这部分有一定的修改,鉴于内存原因)

def read_csv(file_name, num_rows):
    return pd.read_csv(file_name, nrows=num_rows, encoding='gbk')


# reduce memory  # 内存压缩轮子
def reduce_mem_usage(df, verbose=True):
    start_mem = df.memory_usage().sum() / 1024 ** 2
    numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']

    for col in df.columns:
        col_type = df[col].dtypes
        if col_type in numerics:
            c_min = df[col].min()
            c_max = df[col].max()
            if str(col_type)[:3] == 'int':
                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                    df[col] = df[col].astype(np.int8)
                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                    df[col] = df[col].astype(np.int16)
                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                    df[col] = df[col].astype(np.int32)

            else:
                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
                    df[col] = df[col].astype(np.float16)
                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
                    df[col] = df[col].astype(np.float32)


    end_mem = df.memory_usage().sum() / 1024 ** 2
    print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
    print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
    return df

num_rows = None
#num_rows = 2000  # 1000条测试代码使用

train_file = '天猫数据/train_format1.csv'
test_file = '天猫数据/test_format1.csv'

user_info_file = '天猫数据/user_info_format1.csv'
user_log_file = '天猫数据/user_log_format1.csv'

# 进行压缩
train_data = reduce_mem_usage(read_csv(train_file, num_rows))
test_data = reduce_mem_usage(read_csv(test_file, num_rows))

user_info = reduce_mem_usage(read_csv(user_info_file, num_rows))
user_log = reduce_mem_usage(read_csv(user_log_file, num_rows))

 构造特征

# 开始构造特征

# 合并用户信息
all_data = train_data.append(test_data)  # 纵向按列拼接
print(all_data)
all_data = all_data.merge(user_info, on=['user_id'], how='left')
del train_data, test_data, user_info
gc.collect()
print(all_data)

"""
按时间排序
"""
user_log = user_log.sort_values(['user_id', 'time_stamp'])

"""
合并数据  之 item_id:商品ID cat_id:商品类目id seller_id:店铺ID brand_id:品牌ID time_tamp:行为发生时间 action_type:行为类型
"""
list_join_func = lambda x: " ".join([str(i) for i in x])


agg_dict = {
            'item_id' : list_join_func,
            'cat_id' : list_join_func,
            'seller_id' : list_join_func,
            'brand_id' : list_join_func,
            'time_stamp' : list_join_func,
            'action_type' : list_join_func
        }

rename_dict = {
            'item_id' : 'item_path',
            'cat_id' : 'cat_path',
            'seller_id' : 'seller_path',
            'brand_id' : 'brand_path',
            'time_stamp' : 'time_stamp_path',
            'action_type' : 'action_type_path'
        }
# 从用户角度合并数据 重构索引以及再合并all_data
def merge_list(df_ID, join_columns, df_data, agg_dict, rename_dict):
    df_data = df_data.\
            groupby(join_columns).\
            agg(agg_dict).\
            reset_index().\
            rename(columns=rename_dict)

    df_ID = df_ID.merge(df_data, on=join_columns, how="left")
    return df_ID

all_data = merge_list(all_data, 'user_id', user_log, agg_dict, rename_dict)

"""
删除不需要的数据
"""
del user_log
gc.collect()
print(all_data.head(20))
print(all_data['item_path'].head(10))


# 定义统计量为构建特征做准备
def cnt_(x):
    try:
        return len(x.split(' '))
    except:
        return -1

def nunique_(x):
    try:
        return len(set(x.split(' ')))
    except:
        return -1

def max_(x):
    try:
        return np.max([int(i) for i in x.split(' ')])
    except:
        return -1


def min_(x):
    try:
        return np.min([int(i) for i in x.split(' ')])
    except:
        return -1
# 方差
def std_(x):
    try:
        return np.std([float(i) for i in x.split(' ')])
    except:
        return -1
# 统计数据中top N的数据
def most_n(x, n):
    try:
        return Counter(x.split(' ')).most_common(n)[n-1][0]
    except:
        return -1
# 统计数据中top N数据的总数
def most_n_cnt(x, n):
    try:
        return Counter(x.split(' ')).most_common(n)[n-1][1]
    except:
        return -1


###
def user_cnt(df_data, single_col, name):
    df_data[name] = df_data[single_col].apply(cnt_)
    return df_data


def user_nunique(df_data, single_col, name):
    df_data[name] = df_data[single_col].apply(nunique_)
    return df_data


def user_max(df_data, single_col, name):
    df_data[name] = df_data[single_col].apply(max_)
    return df_data


def user_min(df_data, single_col, name):
    df_data[name] = df_data[single_col].apply(min_)
    return df_data


def user_std(df_data, single_col, name):
    df_data[name] = df_data[single_col].apply(std_)
    return df_data


def user_most_n(df_data, single_col, name, n=1):
    func = lambda x: most_n(x, n)
    df_data[name] = df_data[single_col].apply(func)
    return df_data


def user_most_n_cnt(df_data, single_col, name, n=1):
    func = lambda x: most_n_cnt(x, n)
    df_data[name] = df_data[single_col].apply(func)
    return df_data



"""
 
   提取基本统计特征

"""

all_data_test = all_data.head(2000)
#all_data_test = all_data
# 统计用户 点击、浏览、加购、购买行为
# 统计每个用户逛店的总次数
all_data_test = user_cnt(all_data_test,  'seller_path', 'user_cnt')
# 统计每个用户逛店不同店铺种数
all_data_test = user_nunique(all_data_test,  'seller_path', 'seller_nunique')
# 统计每个用户逛不同品类种数
all_data_test = user_nunique(all_data_test,  'cat_path', 'cat_nunique')
# 统计每个用户逛不同品牌种数
all_data_test = user_nunique(all_data_test,  'brand_path', 'brand_nunique')
# 统计每个用户逛不同商品种数
all_data_test = user_nunique(all_data_test,  'item_path', 'item_nunique')
# 统计每个用户总活跃天数  time_stamp_path格式为mmdd 即为月日格式(几月几号)
all_data_test = user_nunique(all_data_test,  'time_stamp_path', 'time_stamp_nunique')
# 统计每个用户不用行为种数
all_data_test = user_nunique(all_data_test,  'action_type_path', 'action_type_nunique')


# 最晚md时间
all_data_test = user_max(all_data_test,  'time_stamp_path', 'time_stamp_max')
# 最早md时间
all_data_test = user_min(all_data_test,  'time_stamp_path', 'time_stamp_min')
# 活跃天数方差:即波动情况
all_data_test = user_std(all_data_test,  'time_stamp_path', 'time_stamp_std')
# 最早和最晚相差天数
all_data_test['time_stamp_range'] = all_data_test['time_stamp_max'] - all_data_test['time_stamp_min']


# 用户最喜欢的店铺
all_data_test = user_most_n(all_data_test, 'seller_path', 'seller_most_1', n=1)
# 最喜欢的商品类目
all_data_test = user_most_n(all_data_test, 'cat_path', 'cat_most_1', n=1)
# 最喜欢的商品品牌
all_data_test = user_most_n(all_data_test, 'brand_path', 'brand_most_1', n=1)
# 最常见的行为动作
all_data_test = user_most_n(all_data_test, 'action_type_path', 'action_type_1', n=1)
# .....

# 用户最喜欢的店铺 行为(不区分种类)次数
all_data_test = user_most_n_cnt(all_data_test, 'seller_path', 'seller_most_1_cnt', n=1)
# 最喜欢的类目 行为(不区分种类)次数
all_data_test = user_most_n_cnt(all_data_test, 'cat_path', 'cat_most_1_cnt', n=1)
# 最喜欢的品牌 行为(不区分种类)次数
all_data_test = user_most_n_cnt(all_data_test, 'brand_path', 'brand_most_1_cnt', n=1)
# 最常见的行为动作 行为次数
all_data_test = user_most_n_cnt(all_data_test, 'action_type_path', 'action_type_1_cnt', n=1)
# .....



# 点击、加购、购买、收藏 分开统计
"""
统计基本特征函数  
-- 知识点二
-- 根据不同行为的业务函数
-- 提取不同特征
"""


def col_cnt_(df_data, columns_list, action_type):
    try:
        data_dict = {}

        col_list = copy.deepcopy(columns_list)
        if action_type != None:
            col_list += ['action_type_path']

        for col in col_list:
            data_dict[col] = df_data[col].split(' ')

        path_len = len(data_dict[col])

        data_out = []
        for i_ in range(path_len):
            data_txt = ''
            for col_ in columns_list:
                if data_dict['action_type_path'][i_] == action_type:
                    data_txt += '_' + data_dict[col_][i_]
            data_out.append(data_txt)

        return len(data_out)
    except:
        return -1


def col_nuique_(df_data, columns_list, action_type):
    try:
        data_dict = {}

        col_list = copy.deepcopy(columns_list)
        if action_type != None:
            col_list += ['action_type_path']

        for col in col_list:
            data_dict[col] = df_data[col].split(' ')

        path_len = len(data_dict[col])

        data_out = []
        for i_ in range(path_len):
            data_txt = ''
            for col_ in columns_list:
                if data_dict['action_type_path'][i_] == action_type:
                    data_txt += '_' + data_dict[col_][i_]
            data_out.append(data_txt)

        return len(set(data_out))
    except:
        return -1


def user_col_cnt(df_data, columns_list, action_type, name):
    df_data[name] = df_data.apply(lambda x: col_cnt_(x, columns_list, action_type), axis=1)
    return df_data


def user_col_nunique(df_data, columns_list, action_type, name):
    df_data[name] = df_data.apply(lambda x: col_nuique_(x, columns_list, action_type), axis=1)
    return df_data


# 统计用户的点击店铺次数
all_data_test = user_col_cnt(all_data_test,  ['seller_path'], '0', 'user_cnt_0')
# 统计用户的加购店铺次数
all_data_test = user_col_cnt(all_data_test,  ['seller_path'], '1', 'user_cnt_1')
# 统计用户店铺购买次数
all_data_test = user_col_cnt(all_data_test,  ['seller_path'], '2', 'user_cnt_2')
# 统计用户收藏店铺次数
all_data_test = user_col_cnt(all_data_test,  ['seller_path'], '3', 'user_cnt_3')

# 统计用户点击不同店铺个数
all_data_test = user_col_nunique(all_data_test,  ['seller_path'], '0', 'seller_nunique_0')
# ....


# 组合特征
# 点击次数
all_data_test = user_col_cnt(all_data_test,  ['seller_path', 'item_path'], '0', 'user_cnt_0')

# 不同店铺个数
all_data_test = user_col_nunique(all_data_test,  ['seller_path', 'item_path'], '0', 'seller_nunique_0')
# ....

print(all_data_test.columns)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值