使用matchzoo解决房产行业问答匹配比赛问题

前言

参加了贝壳找房的房产问答匹配比赛(比赛链接:https://www.datafountain.cn/competitions/474),于是利用matchzoo库解决房产行业问答匹配比赛问题。

比赛流程

导入第三方库包

import matchzoo as mz
import pandas as pd
import numpy as np
import numpy as np 
import tensorflow.keras as K
from matchzoo.preprocessors import BasicPreprocessor
from sklearn.model_selection import train_test_split
import datetime
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
import tensorflow as tf
from keras.layers import *
from keras.models import Model
import keras.backend as K
from keras.optimizers import Adam
from random import choice
# from keras_bert import load_trained_model_from_checkpoint, Tokenizer
import re, os
import codecs
from keras.callbacks import Callback

数据预处理

1.读取数据集

#数据读取及处理
train_left = pd.read_csv('./train/train.query.tsv',sep='\t',header=None)
train_left.columns=['id','q1']
train_right = pd.read_csv('./train/train.reply.tsv',sep='\t',header=None)
train_right.columns=['id','id_sub','q2','label']
df_train = train_left.merge(train_right, how='left')
df_train['q2'] = df_train['q2'].fillna('好的')
test_left = pd.read_csv('./test/test.query.tsv',sep='\t',header=None, encoding='gbk')
test_left.columns = ['id','q1']
test_right =  pd.read_csv('./test/test.reply.tsv',sep='\t',header=None, encoding='gbk')
test_right.columns=['id','id_sub','q2']
df_test = test_left.merge(test_right, how='left')

2.检查训练集和测试集

训练集

测试集

3.将数据集转化成matchzoo的形式

#构造训练集和验证集
sent1_=df_train.q1.values
sent2_=df_train.q2.values
label_=df_train.label.values

all_data=pd.DataFrame()
all_data['id_left']=range(len(df_train))
all_data['text_left']=sent1_
all_data['id_right']=range(len(df_train))
all_data['text_right']=sent2_
all_data['label']=label_

#构造测试集
_sent1=df_test.q1.values
_sent2=df_test.q2.values
# _label=label[2501:]
tmp_data=pd.DataFrame()
tmp_data['id_left']=range(len(df_test))
tmp_data['text_left']=_sent1
tmp_data['id_right']=range(len(df_test))
tmp_data['text_right']=_sent2
# test_data['label']=_label

4.转化成matchzoo需要的格式,构造数据管道

def load_data(df_data):
# 	df_data = pd.read_csv(data_path, sep='\t', header=None)
# 	df_data = pd.DataFrame(df_data.values, columns=['id_left', 'text_left', 'id_right', 'text_right', 'label'])
	df_data = mz.pack(df_data)
	return df_data

train_data = load_data(all_data)
test_data=load_data(tmp_data)
#用最基础的BasicPreprocessor
#将匹配的文本都拓展成15个字符的长度
preprocessor=BasicPreprocessor(15,15)
train = train_data[:train_split]
dev = train_data[train_split:]
train_pack_processed = preprocessor.fit_transform(train) 
# 其实就是做了一个字符转id操作,所以对于中文文本,不需要分词
dev_pack_processed = preprocessor.transform(dev)  
test_pack_processed = preprocessor.transform(test_data)
train_data_generator = mz.DataGenerator(train_pack_processed
                                        , batch_size=32
                                        , shuffle=True)  # 训练数据生成器

test_x, test_y = test_pack_processed.unpack()
dev_x, dev_y = dev_pack_processed.unpack()

4.创建目录的源码

def mkdir(path):
    # 引入模块
    import os
 
    # 去除首位空格
    path=path.strip()
    # 去除尾部 \ 符号
    path=path.rstrip("\\")
 
    # 判断路径是否存在
    # 存在     True
    # 不存在   False
    isExists=os.path.exists(path)
 
    # 判断结果
    if not isExists:
        # 如果不存在则创建目录
        # 创建目录操作函数
        os.makedirs(path) 
 
        print(path+' 创建成功')
        return True
    else:
        # 如果目录存在则不创建,并提示目录已存在
        print(path+' 目录已存在')
        return False

模型训练

DenseBaseline

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
model = mz.models.DenseBaseline()
model.params['task'] = task
model.params['mlp_num_units'] = 3
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result','DenseBaseline')
logdir1 = os.path.join('outputs', 'model', 'DenseBaseline')

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+'DenseBaseline'+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+'DenseBaseline'+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

DenseBaseline的运行结果(之后的模型结果由于篇幅就不展示了)

DRMMTKS

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
model =mz.models.DRMMTKS()
model.params['embedding_input_dim'] = 10000
model.params['embedding_output_dim'] = 100
model.params['top_k'] = 20
model.params['mlp_num_layers'] = 1
model.params['mlp_num_units'] = 5
model.params['mlp_num_fan_out'] = 1
model.params['mlp_activation_func'] = 'tanh'
model.guess_and_fill_missing_params(verbose=0)
# model.build()
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result','DRMMTKS')
logdir1 = os.path.join('outputs', 'model', 'DRMMTKS')

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+'DRMMTKS'+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+'DRMMTKS'+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

KNRM

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
model =mz.models.KNRM()
model.params['embedding_input_dim'] =  10000
model.params['embedding_output_dim'] =  10
model.params['embedding_trainable'] = True
model.params['kernel_num'] = 11
model.params['sigma'] = 0.1
model.params['exact_sigma'] = 0.001
model.guess_and_fill_missing_params(verbose=0)
# model.build()

# model.build()
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result','KNRM')
logdir1 = os.path.join('outputs', 'model', 'KNRM')

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+'KNRM'+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+'KNRM'+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

MVLSTM

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
model =mz.models.MVLSTM()
model.params['lstm_units'] = 32
model.params['top_k'] = 50
model.params['mlp_num_layers'] = 2
model.params['mlp_num_units'] = 20
model.params['mlp_num_fan_out'] = 10
model.params['mlp_activation_func'] = 'relu'
model.params['dropout_rate'] = 0.5
model.guess_and_fill_missing_params(verbose=0)

model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

modeltype='MVLSTM'
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result',modeltype)
logdir1 = os.path.join('outputs', 'model',modeltype)

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+modeltype+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+modeltype+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

HBMP

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
model = mz.contrib.models.HBMP()
model.guess_and_fill_missing_params(verbose=0)
model.params['embedding_input_dim'] = 200
model.params['embedding_output_dim'] = 100
model.params['embedding_trainable'] = True
model.params['alpha'] = 0.1
model.params['mlp_num_layers'] = 3
model.params['mlp_num_units'] = [10, 10]
model.params['lstm_num_units'] = 5
model.params['dropout_rate'] = 0.1
# model.build()

# model.build()
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

modeltype='HBMP'
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result',modeltype)
logdir1 = os.path.join('outputs', 'model',modeltype)

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+modeltype+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+modeltype+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

ArcI

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
model =mz.models.ArcI()
model.params['num_blocks'] = 1
model.params['left_filters'] = [32]
model.params['right_filters'] = [32]
model.params['left_kernel_sizes'] = [3]
model.params['right_kernel_sizes'] = [3]
model.params['left_pool_sizes'] = [2]
model.params['right_pool_sizes'] = [4]
model.params['conv_activation_func'] = 'relu'
model.params['mlp_num_layers'] = 1
model.params['mlp_num_units'] = 64
model.params['mlp_num_fan_out'] = 32
model.params['mlp_activation_func'] = 'relu'
model.params['dropout_rate'] = 0.5
model.guess_and_fill_missing_params(verbose=0)
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

modeltype='ArcI'
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result',modeltype)
logdir1 = os.path.join('outputs', 'model',modeltype)

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+modeltype+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+modeltype+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

ConvKNRM

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
model = mz.models.ConvKNRM()
model.params['embedding_input_dim'] = 10000
model.params['embedding_output_dim'] = 300
model.params['embedding_trainable'] = True
model.params['filters'] = 128
model.params['conv_activation_func'] = 'tanh'
model.params['max_ngram'] = 3
model.params['use_crossmatch'] = True
model.params['kernel_num'] = 11
model.params['sigma'] = 0.1
model.params['exact_sigma'] = 0.001
model.guess_and_fill_missing_params(verbose=0)
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

modeltype='ConvKNRM'
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result',modeltype)
logdir1 = os.path.join('outputs', 'model',modeltype)

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+modeltype+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+modeltype+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

DUET

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
model = mz.models.DUET()
model.params['embedding_input_dim'] = 1000
model.params['embedding_output_dim'] = 300
model.params['lm_filters'] = 32
model.params['lm_hidden_sizes'] = [64, 32]
model.params['dropout_rate'] = 0.5
model.params['dm_filters'] = 32
model.params['dm_kernel_size'] = 3
model.params['dm_d_mpool'] = 4
model.params['dm_hidden_sizes'] = [64, 32]
model.guess_and_fill_missing_params(verbose=0)
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

modeltype='DUET'
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result',modeltype)
logdir1 = os.path.join('outputs', 'model',modeltype)

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+modeltype+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+modeltype+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

ESIM

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
# model = mz.models.ConvKNRM()
model = mz.contrib.models.ESIM()
model.params['task'] = task
model.params['input_shapes'] = [(15, ), (15, )]
model.params['lstm_dim'] = 300
model.params['mlp_num_units'] = 300
model.params['embedding_input_dim'] =  5000
model.params['embedding_output_dim'] =  10
model.params['embedding_trainable'] = False
model.params['mlp_num_layers'] = 0
model.params['mlp_num_fan_out'] = 300
model.params['mlp_activation_func'] = 'tanh'
model.params['mask_value'] = 0
model.params['dropout_rate'] = 0.5
model.params['optimizer'] = K.optimizers.Adam(lr=4e-4)
model.guess_and_fill_missing_params()
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

modeltype='ESIM'
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result',modeltype)
logdir1 = os.path.join('outputs', 'model',modeltype)

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+modeltype+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+modeltype+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

MatchLSTM

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
# model = mz.models.ConvKNRM()
model =mz.contrib.models.MatchLSTM()
model.guess_and_fill_missing_params(verbose=0)
model.params['embedding_input_dim'] = 10000
model.params['embedding_output_dim'] = 100
model.params['embedding_trainable'] = True
model.params['fc_num_units'] = 200
model.params['lstm_num_units'] = 256
model.params['dropout_rate'] = 0.5
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

modeltype='MatchLSTM'
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result',modeltype)
logdir1 = os.path.join('outputs', 'model',modeltype)

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+modeltype+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+modeltype+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

MatchSRNN

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
# model = mz.models.ConvKNRM()
model =mz.contrib.models.MatchSRNN()
model.params['channels'] = 4
model.params['units'] = 10
model.params['dropout_rate'] = 0.0
model.params['direction'] = 'lt'
model.guess_and_fill_missing_params(verbose=0)
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

modeltype='MatchSRNN'
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result',modeltype)
logdir1 = os.path.join('outputs', 'model',modeltype)

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+modeltype+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+modeltype+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

BiMPM

### 定义任务,包含两种,一个是Ranking,一个是classification
task = mz.tasks.Ranking()
print('='*20)
print(task)

### 创建模型以及修改参数(可以使用mz.models.list_available()查看可用的模型列表)
# model = mz.models.ConvKNRM()
model = mz.contrib.models.BiMPM()
model.guess_and_fill_missing_params(verbose=0)
model.params.update(preprocessor.context)
model.params.completed()
model.build()
model.compile()
model.backend.summary()

### 训练, 评估, 预测
x, y = train_pack_processed .unpack()
test_x, test_y = test_pack_processed.unpack()
model.fit(x , y,batch_size=32, epochs=10)
print(model.evaluate(dev_x,dev_y))
output_csv=pd.DataFrame()
pred=model.predict(test_x)
output_csv["pred"]=list(pred)

modeltype='BiMPM'
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join('outputs', 'result',modeltype)
logdir1 = os.path.join('outputs', 'model',modeltype)

mkdir(logdir)
output_csv.to_csv('./outputs/result/'+modeltype+'/'+stamp+'-pred.csv')

mkdir(logdir1)
### 保存模型

model.save('./outputs/model/'+modeltype+'/'+stamp)
print('保存成功')

#这里是加载
# loaded_model = mz.load_model('./outputs/model/DenseBaseline-model.h5')

github 链接在这里:

https://github.com/yingdajun/matchzooForRealEstateQA

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
对面向开发者的干货内容进行了梳理和总结: 一是理论。 过去一年间, AI 科技评论不断报道与人工智能技术相关的公开课程, 请来多个相关领域的资深学者, 持续解读基础概念, 为大家答疑解惑。 该栏目涵盖深度学习以及相关应用和延伸, 涉及自动驾驶、 语音、医疗、 人脸识别等方方面面。 二是工具。 AI 科技评论也在时刻关注着相关深度学习工具的动态和更新,如 TensorFlow、PyTorch、 Theano 等。 不仅如此, 我们还推出相关实战课程, 例如如何搭建系统进行图像识别。 除了底层工具,我们还关注大大小小的开源项目, 如 OpenBLAS、 阿里 Pouch、中科院 MatchZoo 等。 此外, 这一栏目也集结了腾讯、 阿里、英特尔等公司的深度学习解决方案。 三是赛事。 在与人工智能相关的一系列比赛中, 也能见到 AI 科技评论报道的身影。2017 年,我们对多个数据科学类的比赛进行跟踪报导, 涵盖 KDD Cup、 ICCV 2017 COCO&Places; 挑战赛、 DARPA 挑战赛、 京东金融全球数据探索者大赛等, 内容覆盖冠军解决方案、 赛事难点等多个层次, 冠军团队包括微软、 旷视等多家企业,清华大学、北京大学等多所高校。 希望大家能通过「理论」、「工具」、「赛事」 这 3 个栏目、 50 篇文章, 不断进阶,不断思考, 从理论走向实践, 成为一名优秀的开发者, 为人工智能社群贡献自己的一份力量。 也希望, AI 科技评论的这份总结和梳理能帮助到读者朋友们, 大家都能真正学有所获。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

南楚巫妖

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值