题目-Predict the relevance of search results on homedepot
竞赛题地址:https://www.kaggle.com/c/home-depot-product-search-relevance
参考github上的原文地址:https://github.com/yjfiejd/Product_search_relevance_NLP-/blob/master/Product_search_relevance(jupyter%20notebook).ipynb
具体实现代码及理解注释如下:
# -*-coding: UTF-8 -*-
# @Time:2019/8/2720:19
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, BaggingRegressor
from nltk.stem.snowball import SnowballStemmer
#工具用来进行词干提取
stemmer = SnowballStemmer('english')
#pandas用来读取数据批量处理
df_train = pd.read_csv('train.csv', encoding="ISO-8859-1")
df_test = pd.read_csv('test.csv', encoding="ISO-8859-1")
#产品介绍的表格数据来读取进来
df_pro_desc = pd.read_csv('product_descriptions.csv')
#训练数据的行数
num_train = df_train.shape[0]
#此函数对词语进行词干提取
def str_stemmer(s):
return " ".join([stemmer.stem(word) for word in s.lower().split()])
#计算关键词次数
def str_common_word(str1, str2):
return sum(int(str2.find(word)>=0) for word in str1.split())
#直接合并测试集和训练集,以便于同意做进一步的文本预处理
df_all = pd.concat((df_train, df_test), axis=0, ignore_index=True)
#产品介绍也是一个有用的信息,我们把它放在表格的左面
# #把描述信息加入表,how='left'表示左边全部保留,on表示以什么为基准对齐
df_all = pd.merge(df_all, df_pro_desc, how='left', on='product_uid')
#把每一个column进行处理,以清洁所有的文本内容
#把表格中的搜索关键词进行词干提取
#这里要记住map函数和lambda函数的用法
"""
map(function, iterable)
匿名函数lambda
"""
df_all['search_term'] = df_all['search_term'].map(lambda x:str_stemmer(x))
df_all['product_title'] = df_all['product_title'].map(lambda x:str_stemmer(x))
df_all['product_description'] = df_all['product_description'].map(lambda x:str_stemmer(x))
#提取文本特征
#关键词的长度
df_all['len_of_query'] = df_all['search_term'].map(lambda x:len(x.split())).astype(np.int64)
#描述中有多少关键词重合
df_all['product_info'] = df_all['search_term']+"\t"+df_all['product_title']+"\t"+df_all['product_description']
df_all['word_in_title'] = df_all['product_info'].map(lambda x:str_common_word(x.split('\t')[0],x.split('\t')[1]))
df_all['word_in_description'] = df_all['product_info'].map(lambda x:str_common_word(x.split('\t')[0],x.split('\t')[2]))
#把不能用于机器学习模型处理的column给drop
df_all = df_all.drop(['search_term','product_title','product_description','product_info'],axis=1)
#分开训练集和测试集
df_train = df_all.iloc[:num_train]
df_test = df_all.iloc[num_train:]
#记录下测试集的id
id_test = df_test['id']
#分离出y_train
y_train = df_train['relevance'].values
X_train = df_train.drop(['id','relevance'],axis=1).values
X_test = df_test.drop(['id','relevance'],axis=1).values
rf = RandomForestRegressor(n_estimators=15, max_depth=6, random_state=0)
clf = BaggingRegressor(rf, n_estimators=45, max_samples=0.1, random_state=25)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
pd.DataFrame({"id": id_test, "relevance": y_pred}).to_csv('submission.csv',index=False)