aj1

#--coding:utf-8--

%%----------------------------------------------- import---------------------------------------

from collections import defaultdict
from gensim import corpora
from gensim.models.doc2vec import Doc2Vec
from gensim.models.doc2vec import TaggedLineDocument
from gensim.models.lsimodel import LsiModel
from gensim.models.tfidfmodel import TfidfModel
from gensim.models.word2vec import Word2Vec
import numpy as np
import pandas as pd
import jieba
import jieba.posseg
import jieba.analyse
import time
import re
import codecs
import xlrd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer

import seaborn as sns

from sklearn.cross_validation import train_test_split

import os

import copy

debugflag = False

def debug(stri):
if debugflag:
print stri

pwd =os.getcwd()

#base_path = os.path.abspath(os.path.dirname(pwd)+os.path.sep+".")

data_path=’…/…/data/’

#code_path = base_path+u’/code/’

feature_path= ‘…/…/feature/ljh/’

%%------------------------------------------------- split word -------------------------------------------

def splitWord(query, stopwords):
wordList = jieba.cut(query)
num = 0
result = ‘’
for word in wordList:
word = word.rstrip()
word = word.rstrip(’"’)
if word not in stopwords:
if num == 0:
result = word
num = 1
else:
result = result + ’ ’ + word
return result.encode(‘utf-8’)

def splitword(query, stopwords):
wordList = jieba.cut(query)
num = 0
result = ‘’
for word in wordList:
word = word.rstrip()
word = word.rstrip(’"’)
if word not in stopwords:
if num == 0:
result = word
num = 1
else:
result = result + ’ ’ + word
return result

def filterword(x):
return x.replace(u’万万元’,u’万元’).replace(u’万多万元’,u’万元’).replace(u’万余万元’,u’万元’).replace(u’十万八万元’,u’八万元’).replace(u’一万两万元’,u’一万元’).replace(u’一万多两万元’,u’两万元’)

def filter():
train = pd.read_csv(data_path + ‘train_content.csv’, encoding=‘utf-8’)
test = pd.read_csv(data_path + ‘test_content.csv’, encoding=‘utf-8’)
train[‘text’] = train[‘text’].map(filterword)
test[‘text’] = test[‘text’].map(filterword)
train.to_csv(data_path + ‘train_content.csv’, index=False, encoding=‘utf-8’)
test.to_csv(data_path + ‘test_content.csv’, index=False, encoding=‘utf-8’)

def preprocess(mode=‘train’):
stopwords = {}
for line in codecs.open(data_path + ‘stop.txt’, ‘r’, ‘utf-8’):
stopwords[line.rstrip()] = 1
if mode == ‘train’:
data = pd.read_csv(data_path + ‘train.txt’, delimiter=’\t’, header=None,
names=(‘id’, ‘text’, ‘penalty’, ‘laws’), encoding=‘utf-8’)
if mode == ‘test’:
data = pd.read_csv(data_path + ‘test.txt’, delimiter=’\t’, header=None, names=(‘id’, ‘text’),
encoding=‘utf-8’)
if mode == ‘law’:
data = pd.read_csv(data_path + ‘form-laws.txt’, delimiter=’\t’, header=None, names=(‘id’, ‘text’),
encoding=‘utf-8’)
data[‘prefix_text’] = data[‘text’].map(lambda x: x[x.index(u’【’) + 1:x.index(u’】’)])
data[‘prefix_text’] = data[‘prefix_text’].map(lambda x: splitWord(x, stopwords))
data[‘doc’] = data[‘text’].map(lambda x: splitWord(x, stopwords))
data[‘text’] = data[‘text’].map(filterword)
data.to_csv(data_path + ‘%s_content.csv’ % (mode), index=False, encoding=‘utf-8’)

%%----------------------------------get num feature---------------------------------------------------

def getweight(x):
r1 = re.compile(u’[0-9]\d*.?\d克’)
r2 = re.compile(u’[0-9]\d
.?\dg’)
r3 = re.compile(u’[0-9]\d
.?\dmg’)
r4 = re.compile(u’[0-9]\d
.?\d*千克’)
w1 = r1.findall(x)
w2 = r2.findall(x)
w3 = r3.findall(x)
w4 = r4.findall(x)
total = 0
for w in w1:
w = float(w.replace(u’克’, ‘’))
total = total + w
for w in w2:
w = float(w.replace(‘g’, ‘’))
total = total + w
for w in w3:
w = float(w.replace(‘mg’, ‘’))
total = total + w / 1000
for w in w4:
w = float(w.replace(u’千克’, ‘’))
total = total + w * 1000
return total

def getsquare(x):
r1 = re.compile(u’[0-9]\d*.?\d*立方米’)
mon1 = r1.findall(x)
total = 0
for mon in mon1:
mon = float(mon.replace(u’立方米’, ‘’))
total = total + mon
return total

def gettree(x):
r1 = re.compile(u’[0-9]\d*.?\d*株’)
mon1 = r1.findall(x)
total = 0
for mon in mon1:
mon = float(mon.replace(u’株’, ‘’))
total = total + mon
return total

%% ------------------------------- law one hot ---------------------------------------

def getmoney_prefix_file(arr):

r1 = re.compile(u'(.{7})\d[\d,]*(?:\.\d+)?[余多]?元')
r2 = re.compile(u'(.{7})(?:\d[\d,]*(?:\.\d+)?)[余多]?[十百千万][余多]?元')
r3 = re.compile(u'(.{7})[一二两三零四五六七八九万千百十][余多]?[一二两三零四五六七八九万千百十]+[余多]?元')

r1=re.compile(u’(.{7})(?:\d+,?)?\d+(?:.\d+)?元’)

r2=re.compile(u’(.{7})(?:\d+,?)?\d+(?:.\d+)?余元’)

r3=re.compile(u’(.{7})(?:\d+,?)?\d+(?:.\d+)?万元’)

r4=re.compile(u’(.{7})(?:\d+,?)?\d+(?:.\d+)?余万元’)

for  x in arr:
    mon1 = r1.findall(x)
    mon2 = r2.findall(x)
    mon3 = r3.findall(x)
    mon1="\n".join(mon1)
    mon2="\n".join(mon2)
    mon3="\n".join(mon3)
    lawfile=codecs.open(data_path+u'mon.txt','a','utf-8')
    lawfile.write(mon1)
    lawfile.write(mon2)
    lawfile.write(mon3)

def getmoneykeyword_file():
stopwords = {}
for line in codecs.open(data_path+‘stop.txt’,‘r’,‘utf-8’):
stopwords[line.rstrip()]=1
keydict ={}
for line in codecs.open(data_path+u’mon.txt’,‘r’,‘utf-8’):
keylist=splitword(line.rstrip(),stopwords)
keylist = keylist.split(’ ')
for x in keylist:
if x in keydict:
keydict[x]=keydict[x]+1
else:
keydict[x]=1

lawkeydictsorted=sorted(keydict.iteritems(), key=lambda d:d[1],reverse=True)

lawkeys=[]

r =re.compile('\d')

for lawkey in  lawkeydictsorted:
    if lawkey[1]>1000 and len(lawkey[0])>1 and u'某' not in lawkey[0] and len(r.findall(lawkey[0]))==0:
        lawkeys.append(lawkey[0])

content = '\n'.join(lawkeys)
lawfile=codecs.open(data_path+u'monkey.txt','w','utf-8')

lawfile.write(content)

def getlaw1(arr, i=100):
r = re.compile(u’犯.{2,10}罪’)
law_dict = {}
for x in arr:
lawlist = r.findall(x)
for law in lawlist:
law = law.replace(u’犯’, ‘’).replace(u’罪’, ‘’)
if law not in law_dict:
law_dict[law] = 1
else:
law_dict[law] = law_dict[law] + 1

law_list = sorted(law_dict.iteritems(), key=lambda d: d[1], reverse=True)

law_form_data = pd.read_csv(data_path + '/1-train/form-laws.txt', delimiter='\t', header=None, names=('id', 'text'),
                            encoding='utf-8')
law_from = law_form_data[101:448]['text'].map(lambda x: x[x.index(u'【') + 1:x.index(u'】') - 1]).values
law_from = ' '.join(law_from)
laws = []
for law in law_list[0:i]:
    if law[0] in law_from:
        laws.append(law[0])

content = "\n".join(laws)

lawfile = codecs.open(data_path + 'law_list.txt', 'w', 'utf-8')

lawfile.write(content)

def getlawkey(arr):
law_form_data = pd.read_csv(data_path + ‘law_content.csv’, encoding=‘utf-8’)
# law_from = law_data[0:101][‘text’].map(lambda x: x[x.index(u’【’)+1:x.index(u’】’)]).values

lawkeydict = {}
lawkeylist = []
law_from = law_form_data['prefix_text'][0:101].values

for law in law_from:
    if pd.isnull(law):
        continue
    templist = law.split(' ')
    for temp in templist:
        if temp not in lawkeylist:
            lawkeylist.append(temp)

for x in arr:
    for lawkey in lawkeylist:
        if lawkey in x:
            if lawkey in lawkeydict:
                lawkeydict[lawkey] = lawkeydict[lawkey] + 1
            else:
                lawkeydict[lawkey] = 0

lawkeydictsorted = sorted(lawkeydict.iteritems(), key=lambda d: d[1], reverse=True)

lawkeys = []

for lawkey in lawkeydictsorted:
    lawkeys.append(lawkey[0])

content = '\n'.join(lawkeys)
lawfile = codecs.open(data_path + 'law_key_list.txt', 'w', 'utf-8')

lawfile.write(content)

common_used_numerals = {u’零’: 0, u’一’: 1, u’二’: 2, u’两’: 2, u’三’: 3, u’四’: 4, u’五’: 5,
u’六’: 6, u’七’: 7, u’八’: 8, u’九’: 9, u’十’: 10, u’百’: 100, u’千’: 1000, u’万’: 10000,
u’亿’: 100000000}

def subFunc(uchars_chinese):
try:
total = 0
r = 1 # 表示单位:个十百千…
for i in range(len(uchars_chinese) - 1, -1, -1):
val = common_used_numerals.get(uchars_chinese[i])
if val >= 10 and i == 0: # 应对 十三 十四 十*之类
if val > r:
r = val
total = total + val
else:
r = r * val
# total =total + r * x
elif val >= 10:
if val > r:
r = val
else:
r = r * val
else:
total = total + r * val
return total
except:
return 0.0

def chinese2digits(chinese):
p2 = chinese
p11, p12 = ‘’, ‘’
if u’万’ in p2:
p12, p2 = chinese.split(u’万’)
if u’亿’ in p12:
p11, p12 = p12.split(u’亿’)
ps = [p11, p12, p2]
ms = [0.0, 0.0, 0.0]
# print ‘call1’
for i, p in enumerate(ps):
ms[i] = subFunc§
# print ‘return1’
return ms[0] * 100000000 + ms[1] * 10000 + ms[2]

r1 = re.compile(u’(\d[\d,](?:.\d+)?)[余多]?元’)
r2 = re.compile(u’((?:\d[\d,]
(?:.\d+)?)[余多]?[十百千万])[余多]?元’)
r3 = re.compile(u’([一二两三零四五六七八九万千百十][多余]?[一二两三零四五六七八九万千百十]+[余多]?)元’)

r4 = re.compile(u’(?:一共|共计|合计|总价值|总计|累计|总共|总额|总金额)[^\d]{0,7}(\d[\d,](?:.\d+)?)[余多]?元’)
r5 = re.compile(u’(?:一共|共计|合计|总价值|总计|累计|总共|总额|总金额)[^\d]{0,7}((?:\d[\d,]
(?:.\d+)?)[余多]?[十百千万])[余多]?元’)
r6 = re.compile(u’(?:一共|共计|合计|总价值|总计|累计|总共|总额|总金额)[^\d]{0,7}([一二两三零四五六七八九万千百十][余多]?[一二两三零四五六七八九万千百十]+[余多]?)元’)

r7 = re.compile(u’(?:海洛因|毒资|冰毒|毒品|手机|轿车|摩托车|香烟|电脑|电动车|动车|车辆|项链|电瓶|香烟|包内|钱包|抽屉|身上|家中|工资|被盗|赃款|获利|盗走|窃得|涉案|销赃|赌资|诈骗|盗得|骗得|渔利|赃物|窃取|戒指|自行车|抢走|赌博|手表|电缆线|骗走)[^\d]{0,7}(\d[\d,](?:.\d+)?)[余多]?元’)
r8 = re.compile(u’(?:海洛因|毒资|冰毒|毒品|手机|轿车|摩托车|香烟|电脑|电动车|动车|车辆|项链|电瓶|香烟|包内|钱包|抽屉|身上|家中|工资|被盗|赃款|获利|盗走|窃得|涉案|销赃|赌资|诈骗|盗得|骗得|渔利|赃物|窃取|戒指|自行车|抢走|赌博|手表|电缆线|骗走)[^\d]{0,7}((?:\d[\d,]
(?:.\d+)?)[余多]?[十百千万])[余多]?元’)
r9 = re.compile(u’(?:海洛因|毒资|冰毒|毒品|手机|轿车|摩托车|香烟|电脑|电动车|动车|车辆|项链|电瓶|香烟|包内|钱包|抽屉|身上|家中|工资|被盗|赃款|获利|盗走|窃得|涉案|销赃|赌资|诈骗|盗得|骗得|渔利|赃物|窃取|戒指|自行车|抢走|赌博|手表|电缆线|骗走)[^\d]{0,7}([一二两三零四五六七八九万千百十][余多]?[一二两三零四五六七八九万千百十]+[余多]?)元’)

r10 = re.compile(u’(?:支付|利息|本金|所得|存款|透支|资金|贷款|账户|经济损失|转账|汇款|银行|抽头|投资|取款|借款|退赔|索要|还款|送给|赔偿|借给|损失|购买|公司|收取|归还|退还|缴纳|消费|出资|额度|税款|欠款|税额|合同|价税|偿还)[^\d]{0,7}(\d[\d,](?:.\d+)?)[余多]?元’)
r11 = re.compile(u’(?:支付|利息|本金|所得|存款|透支|资金|贷款|账户|经济损失|转账|汇款|银行|抽头|投资|取款|借款|退赔|索要|还款|送给|赔偿|借给|损失|购买|公司|收取|归还|退还|缴纳|消费|出资|额度|税款|欠款|税额|合同|价税|偿还)[^\d]{0,7}v((?:\d[\d,]
(?:.\d+)?)[余多]?[十百千万])[余多]?元’)
r12 = re.compile(u’(?:支付|利息|本金|所得|存款|透支|资金|贷款|账户|经济损失|转账|汇款|银行|抽头|投资|取款|借款|退赔|索要|还款|送给|赔偿|借给|损失|购买|公司|收取|归还|退还|缴纳|消费|出资|额度|税款|欠款|税额|合同|价税|偿还)[^\d]{0,7}([一二两三零四五六七八九万千百十][余多]?[一二两三零四五六七八九万千百十]+[余多]?)元’)

r13 = re.compile(u’(?:罚金|罚款|保证金)[^\d]{0,7}(\d[\d,](?:.\d+)?)[余多]?元’)
r14 = re.compile(u’(?:罚金|罚款|保证金)[^\d]{0,7}((?:\d[\d,]
(?:.\d+)?)[余多]?[十百千万])[余多]?元’)
r15 = re.compile(u’(?:罚金|罚款|保证金)[^\d]{0,7}([一二两三零四五六七八九万千百十][余多]?[一二两三零四五六七八九万千百十]+[余多]?)元’)

regulardict = {‘all’: [r1, r2, r3], ‘total’: [r4, r5, r6], ‘fanzui’: [r7, r8, r9], ‘jinron’: [r10, r11, r12],
‘fakuan’: [r13, r14, r15]}
keylist = [‘all’, ‘total’, ‘fanzui’, ‘jinron’, ‘fakuan’]

def getkeymoney(x):
mondict = {}
for res in regulardict:

    mon1 = regulardict[res][0].findall(x)
    mon2 = regulardict[res][1].findall(x)
    mon3 = regulardict[res][2].findall(x)
    monlist = []

    debug('mon1 start')
    debug(mon1)

    for mon in mon1:
        debug('||' + mon)
        temp = mon
        temp = float(temp.replace(u',', '').replace(u'余', '').replace(u'多', ''))
        debug(temp)
        monlist.append(temp)

    #
    #
    debug('mon2 start')
    debug(mon2)
    for mon in mon2:
        debug('||' + mon)
        unitpow = 0
        temp = mon
        if u'十' in mon:
            unitpow = 10
            temp = temp.replace(u'十', '')
        if u'百' in mon:
            unitpow = 100
            temp = temp.replace(u'百', '')
        if u'千' in mon:
            unitpow = 1000
            temp = temp.replace(u'千', '')
        if u'万' in mon:
            unitpow = 10000
            temp = temp.replace(u'万', '')
        temp = temp.replace(u',', '')
        temp = temp.replace(u'余', '')
        temp = temp.replace(u'多', '')

        temp = float(temp) * unitpow

        debug(temp)
        monlist.append(temp)

    debug('mon3 start')
    debug(mon3)
    for mon in mon3:
        debug('||' + mon)
        temp = mon
        temp = chinese2digits((temp.replace(u',', '').replace(u'余', '').replace(u'多', '')))
        debug(temp)
        monlist.append(temp)

    mondict[res] = monlist

return mondict

def getmon_dict(x, key, func):
if key in x:
if len(x[key]) == 0:
return 0
return func(x[key])
else:
return 0

def getmon_list(x, func):
if len(x) > 0:
return func(x)
else:
return 0

def getkeymon(data):
LogInfo(‘start’)
data[‘mon_dict’] = data[‘text’].map(getkeymoney)

LogInfo('all start')
for key in keylist:
    data[key + '_sum'] = data['mon_dict'].map(lambda x: getmon_dict(x, key, np.sum))
    data[key + '_mean'] = data['mon_dict'].map(lambda x: getmon_dict(x, key, np.mean))
    data[key + '_max'] = data['mon_dict'].map(lambda x: getmon_dict(x, key, np.max))
    data[key + '_min'] = data['mon_dict'].map(lambda x: getmon_dict(x, key, np.min))
    data[key + '_std'] = data['mon_dict'].map(lambda x: getmon_dict(x, key, np.std))
    data[key + '_ptp'] = data['mon_dict'].map(lambda x: getmon_dict(x, key, np.ptp))
    data[key + '_median'] = data['mon_dict'].map(lambda x: getmon_dict(x, key, np.median))

LogInfo('all end')

data.drop(['mon_dict'], axis=1, inplace=True)

def getlaw_key_onehot(data):
law_list = []
for line in codecs.open(data_path + ‘law_key_list.txt’, ‘r’, ‘utf-8’):
law_list.append(line.rstrip())
for i in range(0, len(law_list)):
ri = re.compile(law_list[i].rstrip(’’))
data[‘lawkey_’ + str(i)] = data[‘text’].map(lambda x: len(ri.findall(x)))

def gettfidf_random(data):
tfidf = pd.read_csv(data_path + ‘tf_all.csv’, encoding=‘utf-8’)
part1 = []
part2 = []
part3 = []
while len(part1) < 600:
tmp = np.random.randint(0, 1000)
if tmp not in part1:
part1.append(tmp)
while len(part2) < 300:
tmp = np.random.randint(1000, 2000)
if tmp not in part2:
part2.append(tmp)
while len(part3) < 100:
tmp = np.random.randint(2000, 3000)
if tmp not in part3:
part3.append(tmp)
part = part1 + part2 + part3
word_list = tfidf[‘word’][part].values
for i in range(0, len(word_list)):
ri = re.compile(word_list[i].rstrip(’’))
data[‘tfidf_’ + word_list[i]] = data[‘doc’].map(lambda x: len(ri.findall(x)))

def gettfidf_onehot(data, num):
tfidf = pd.read_csv(data_path + ‘tf_all.csv’, encoding=‘utf-8’)
word_list = tfidf[‘word’][0:num + 1].values
for i in range(0, len(word_list)):
ri = re.compile(word_list[i].rstrip(’’))
data[‘tfidf_’ + word_list[i]] = data[‘doc’].map(lambda x: len(ri.findall(x)))

def getlaw_onehot(data):
law_list = []
for line in codecs.open(data_path + ‘law_list_new.txt’, ‘r’, ‘utf-8’):
law_list.append(line.rstrip())
data[‘law_sum’] = 0
print law_list
for i in range(0, len(law_list)):
ri = re.compile(law_list[i])
print i, law_list[i]
data[‘law_’ + str(i)] = data[‘text’].map(lambda x: len(ri.findall(x)))
data[‘law_sum’] = data[‘law_sum’] + data[‘law_’ + str(i)]

%% -------------------------------line num ;yuan;district ----------------------------------------------

def getlinenum(x):
return x.count(’,’) + x.count(’.’) + x.count(’;’) + x.count(u’,’) + x.count(u’。’) + x.count(u’;’)

def getyuan(s):
if s[0] == u’原’:
return 1
if s[0] == u’罪’:
return 2
return 0

def getdocprovince(x):
# x= codecs.utf_8_decode(x)[0]
x = x.split(’ ')
i = 0
while i < 7 and len(x) > i:

    if x[i].find(u'省') != -1 or x[i].find(u'自治区') != -1:
        return x[i]
    i = i + 1
return " "

def getdoccity(x):
# x= codecs.utf_8_decode(x)[0]
x = x.split(’ ‘)
i = 0
while i < 7 and len(x) > i:
if x[i].find(u’市’) != -1:
return x[i]
i = i + 1
return " "

def getdocdistrict(x):
# x= codecs.utf_8_decode(x)[0]
x = x.split(’ ‘)
i = 0
while i < 7 and len(x) > i:
if x[i].find(u’县’) != -1:
return x[i]
if x[i].find(u’区’) != -1 and x[i].find(u’自治区’) == -1:
return x[i]
i = i + 1

return " "

def getcourtlevel(x):
if (not pd.isnull(x[2])) and x[2] != “” and x[2] != " ":
return 0

if (not pd.isnull(x[1])) and x[1] != "" and x[1] != " ":
    return 1

if (not pd.isnull(x[0])) and x[0] != "" and x[0] != " ":
    return 2

return 3

%%-----------------------------------get feature--------------------------------------------

def getcontent():
preprocess(‘train’)
preprocess(‘test’)
preprocess(‘law’)

map_code = pd.read_excel(data_path + ‘map.xls’)

def getprovince_code(x):
tmpstr = ‘’

if (not pd.isnull(x[2])) and x[2] != "" and x[2] != " ":
    tmpstr = x[2]

if (not pd.isnull(x[1])) and x[1] != "" and x[1] != " ":
    tmpstr = x[1]

if (not pd.isnull(x[0])) and x[0] != "" and x[0] != " ":
    tmpstr = x[0]

if tmpstr == '':
    return 0

debug(tmpstr)
for tmp in map_code.values:
    if tmpstr in tmp[1]:
        debug(tmpstr)
        return str(tmp[0])[0:2]
return 0

def getprovince_onehot():
train = pd.read_csv(data_path + ‘train_base.csv’, encoding=‘utf-8’)
test = pd.read_csv(data_path + ‘test_base.csv’, encoding=‘utf-8’)
tmp = np.append(train.province_code.values, test.province_code)

tmp = pd.get_dummies(tmp, prefix='province')

train_temp = tmp[0:120000].reset_index().drop('index', axis=1)
test_temp = tmp[120000:210000].reset_index().drop('index', axis=1)

train = pd.concat([train, train_temp], axis=1)
test = pd.concat([test, test_temp], axis=1)
#
#    train =train.drop('province_code',axis=1)
#    test=test.drop('province_code',axis=1)
#
train.to_csv(data_path + 'train_baseafter.csv', encoding='utf-8', index=None)

test.to_csv(data_path + 'test_baseafter.csv', encoding='utf-8', index=None)

def getbasefeature(mode=‘train’):
if mode == ‘train’:
data = pd.read_csv(data_path + ‘train_content.csv’, encoding=‘utf-8’)
if mode == ‘test’:
data = pd.read_csv(data_path + ‘test_content.csv’, encoding=‘utf-8’)

data['doc_len'] = data['doc'].map(lambda x: len(x))
data['linenum'] = data['text'].map(getlinenum)
data['is_contain_yuan'] = data['text'].map(getyuan)

data['province'] = data['doc'].map(getdocprovince)

data['city'] = data['doc'].map(getdoccity)
data['district'] = data['doc'].map(getdocdistrict)

data['courtlevel'] = data[['province', 'city', 'district']].apply(getcourtlevel, axis=1)

data['province'] = data['province'].map(
    lambda x: x.replace(u'自治区', '').replace(u'回族', '').replace(u'壮族', '').replace(u'维吾尔', ''))
data['city'] = data['city'].map(
    lambda x: x.replace(u'自治区', '').replace(u'回族', '').replace(u'壮族', '').replace(u'维吾尔', ''))
data['district'] = data['district'].map(
    lambda x: x.replace(u'自治区', '').replace(u'回族', '').replace(u'壮族', '').replace(u'维吾尔', ''))

data['province_code'] = data[['province', 'city', 'district']].apply(getprovince_code, axis=1)

data = data.drop(['doc', 'province', 'city', 'district', 'text'], axis=1)

if mode == 'train':
    data = data.drop(['laws', 'penalty'], axis=1)

data.to_csv(data_path + mode + '_base.csv', encoding='utf-8', index=None)

def getnumfeature(mode=‘train’):
if mode == ‘train’:
data = pd.read_csv(data_path + ‘train_content.csv’, encoding=‘utf-8’)
if mode == ‘test’:
data = pd.read_csv(data_path + ‘test_content.csv’, encoding=‘utf-8’)

# data['money'] = data['text'].map(getmoney)

data['weight'] = data['text'].map(getweight)
data['tree'] = data['text'].map(gettree)
data['square'] = data['text'].map(getsquare)

data = data.drop(['doc', 'text'], axis=1)

if mode == 'train':
    data = data.drop(['laws', 'penalty'], axis=1)

data.to_csv(data_path  + mode + '_num.csv', encoding='utf-8', index=None)

def getlaw_list_file(i=100):
data = pd.read_csv(data_path + ‘train_content.csv’, encoding=‘utf-8’)
text_arr = data.text.values
data = pd.read_csv(data_path + ‘test_content.csv’, encoding=‘utf-8’)

text_arr = np.append(text_arr, data.text.values)
getlaw1(text_arr, i)

def getzerocount(x):
i = 0
for tmp in x:
if tmp == 0:
i = i + 1
return i

def get_tfidf_file():
train_data = pd.read_csv(data_path + ‘train_content.csv’, encoding=‘utf-8’)
documents = []
for i in range(1, 9):
temp = train_data[‘doc’][train_data.penalty == i].values
temp = " ".join(temp)
documents.append(temp)

vectorizer = CountVectorizer(encoding='utf-8')  # 该类会将文本中的词语转换为词频矩阵,矩阵元素a[i][j] 表示j词在i类文本下的词频
transformer = TfidfTransformer()  # 该类会统计每个词语的tf-idf权值
tfidf = transformer.fit_transform(
    vectorizer.fit_transform(documents))  # 第一个fit_transform是计算tf-idf,第二个fit_transform是将文本转为词频矩阵
word = vectorizer.get_feature_names()  # 获取词袋模型中的所有词语
weight = tfidf.toarray()  # 将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重

temp = pd.DataFrame({'word': word})

for i in range(len(weight)):  # 打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重
    temp['weight_' + str(i + 1)] = weight[i]
temp['std'] = temp[
    ['weight_1', 'weight_2', 'weight_3', 'weight_4', 'weight_5', 'weight_6', 'weight_7', 'weight_8']].apply(
    lambda x: np.std(x), axis=1)
temp['ptp'] = temp[
    ['weight_1', 'weight_2', 'weight_3', 'weight_4', 'weight_5', 'weight_6', 'weight_7', 'weight_8']].apply(
    lambda x: np.ptp(x), axis=1)
temp['mean'] = temp[
    ['weight_1', 'weight_2', 'weight_3', 'weight_4', 'weight_5', 'weight_6', 'weight_7', 'weight_8']].apply(
    lambda x: np.mean(x), axis=1)
temp['max'] = temp[
    ['weight_1', 'weight_2', 'weight_3', 'weight_4', 'weight_5', 'weight_6', 'weight_7', 'weight_8']].apply(
    lambda x: np.max(x), axis=1)
temp['min'] = temp[
    ['weight_1', 'weight_2', 'weight_3', 'weight_4', 'weight_5', 'weight_6', 'weight_7', 'weight_8']].apply(
    lambda x: np.min(x), axis=1)
temp['median'] = temp[
    ['weight_1', 'weight_2', 'weight_3', 'weight_4', 'weight_5', 'weight_6', 'weight_7', 'weight_8']].apply(
    lambda x: np.median(x), axis=1)

temp['zero_count'] = temp[
    ['weight_1', 'weight_2', 'weight_3', 'weight_4', 'weight_5', 'weight_6', 'weight_7', 'weight_8']].apply(
    getzerocount, axis=1)
temp = temp[temp['zero_count'] <= 4]

temp = temp.sort_values(by=['std', 'max', 'median', 'ptp'], ascending=False)
temp.to_csv(data_path + 'tf_all.csv', index=None, encoding='utf-8')

再手动过滤了一部分

def getlaw_key_list_file():
data = pd.read_csv(data_path + ‘train_content.csv’, encoding=‘utf-8’)
text_arr = data.text.values
data = pd.read_csv(data_path + ‘test_content.csv’, encoding=‘utf-8’)

text_arr = np.append(text_arr, data.text.values)
getlawkey(text_arr)

def gettdidfonehotfeature(mode=‘train’, num=500):
if mode == ‘train’:
data = pd.read_csv(data_path + ‘train_content.csv’, encoding=‘utf-8’)
if mode == ‘test’:
data = pd.read_csv(data_path + ‘test_content.csv’, encoding=‘utf-8’)
gettfidf_onehot(data, num)
data = data.drop([‘doc’, ‘text’], axis=1)

if mode == 'train':
    data = data.drop(['laws', 'penalty'], axis=1)

data.to_csv(data_path  + mode + '_tfidf_'+str(num)+'.csv', encoding='utf-8', index=None)

def gettdidf_random_onehotfeature(mode=‘train’):
if mode == ‘train’:
data = pd.read_csv(data_path + ‘train_content.csv’, encoding=‘utf-8’)
if mode == ‘test’:
data = pd.read_csv(data_path + ‘test_content.csv’, encoding=‘utf-8’)
gettfidf_random(data)
data = data.drop([‘doc’, ‘text’], axis=1)

if mode == 'train':
    data = data.drop(['laws', 'penalty'], axis=1)

data.to_csv(data_path  + mode + '_tfidf.csv', encoding='utf-8', index=None)

def getkeymonfeature(mode=‘train’):
if mode == ‘train’:
data = pd.read_csv(data_path + ‘train_content.csv’, encoding=‘utf-8’)
if mode == ‘test’:
data = pd.read_csv(data_path + ‘test_content.csv’, encoding=‘utf-8’)
getkeymon(data)

data = data.drop(['doc', 'text'], axis=1)
if mode == 'train':
    data = data.drop(['laws', 'penalty'], axis=1)
data.to_csv(data_path  + mode + '_keymontotal.csv', encoding='utf-8', index=None)

def LogInfo(stri):
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print str(now) + ’ ’ + str(stri)

def combineFeature():

train = pd.read_csv(data_path+'train_content.csv',encoding='utf-8')
test =pd.read_csv(data_path+'test_content.csv',encoding='utf-8')

train = train.drop(['doc','text','laws'],axis=1)
test = test.drop(['doc','text'],axis=1)


feature = ['num','baseafter','keymontotal','tfidf_2000']


for fea in feature:
    
        
        
    train_feature = pd.read_csv(data_path+'train_'+fea+'.csv',encoding='utf-8')
    test_feature = pd.read_csv(data_path+'test_'+fea+'.csv',encoding='utf-8')
    
    
    if 'base' in fea:
        
        train_feature =train_feature.drop('province_code',axis=1)
        test_feature=test_feature.drop('province_code',axis=1)
    


    train = pd.merge(train,train_feature,how='left',on='id')
    test = pd.merge(test,test_feature,how='left',on='id')
train.to_csv(feature_path+'train_feature.csv',encoding='utf-8',index=None)
test.to_csv(feature_path+'test_feature.csv',encoding='utf-8',index=None)

def getUseColumn(data):
d = copy.deepcopy(data)
k = d.var()
# print k
print (k[(k == np.nan) | (k == 0)].index.values)
col1 = k[(k != np.nan) & (k != 0)].index.values
return col1

def get_diff_num_feature(num):
train = pd.read_csv(feature_path+‘train_feature.csv’,encoding=‘utf-8’)
test =pd.read_csv(feature_path+‘test_feature.csv’,encoding=‘utf-8’)

LogInfo(train.shape)
LogInfo(test.shape)



cols  = train.columns

test_cols = test.columns

tfidf_cols =[ True if 'tfidf_' in col else False for col in cols]
other_cols =[ False if x else True for x in tfidf_cols]

test_tfidf_cols =[ True if 'tfidf_' in col else False for col in test_cols]
test_other_cols =[ False if x else True for x in test_tfidf_cols]

tfidf = cols[tfidf_cols]
other = cols[other_cols]

test_tfidf= test_cols[test_tfidf_cols]
test_other  = test_cols[test_other_cols]

    
fea_col =other.append(tfidf[0:num])
test_fea_col = test_other.append(test_tfidf[0:num])
train_feature = train[fea_col]
test_feature = test[test_fea_col]
train_feature.drop(['penalty'],axis=1,inplace=True)
train_feature.to_csv(feature_path+'train_feature_'+str(num)+'.csv',encoding='utf-8',index=None)
test_feature.to_csv(feature_path+'test_feature_'+str(num)+'.csv',encoding='utf-8',index=None)

rule = re.compile(u’第[一二三四五六七八两九十百千万]{1,10}条’)

def getrule(x):
rulelist =[]
rules = rule.findall(x)
if len(rules)!=0:
for tmp in rules:
rulelist.append(tmp)
return ‘,’.join(rulelist)

def getrules(data):
data[‘rules’]=data[‘text’].map(getrule)

def get_rule_data():

train = pd.read_csv(data_path + 'train.txt', delimiter='\t', header=None,names=('id', 'text', 'penalty', 'laws'), encoding='utf-8')
test = pd.read_csv(data_path + 'test.txt', delimiter='\t', header=None, names=('id', 'text'),encoding='utf-8')

getrules(train)
getrules(test)

train.drop(['text','penalty','laws'],axis=1,inplace=True)
test.drop(['text'],axis=1,inplace=True)
train.to_csv(feature_path+'train_rule.csv',index=None,encoding='utf-8')
test.to_csv(feature_path+'test_rule.csv',index=None,encoding='utf-8')

def run():
get_rule_data() ## 规则提取法文

modes = ['train','test']

for mode in modes:
    preprocess(mode) ##预处理,分词


get_tfidf_file()##分组tf-idf选词

filter()##文本清洗

for mode in modes:
    getbasefeature(mode)  ##基本统计量特征
    getkeymonfeature(mode)  ## 金额特征
    gettdidfonehotfeature(mode,1000) ## 关键词特征
    getnumfeature(mode)## 数字类 统计量特征
getprovince_onehot() ## 省份做one-hot
combineFeature()## 合并各部分feature

get_diff_num_feature(1000) ## 提取 1000个词的feature 用于深度学习 wide

-- coding: utf-8 --

from future import division
import pandas as pd
import numpy as np
import gensim
from gensim.models.word2vec import Word2Vec
import jieba
import warnings
import codecs
from collections import defaultdict
#import xgboost as xgb

from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras import backend
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, Flatten, Merge,concatenate,Activation,Lambda
from keras.layers import Convolution1D, MaxPooling1D, Embedding,BatchNormalization,Dropout
from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional
from keras.models import Model
from keras.layers.pooling import GlobalMaxPooling1D
from keras.callbacks import EarlyStopping,ModelCheckpoint,LearningRateScheduler
from keras import regularizers
from keras.models import Sequential, Model
from keras.layers.advanced_activations import PReLU
from keras import optimizers
from keras import initializers

MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 100000
EMBEDDING_DIM = 300
VALIDATION_SPLIT = 0.1
LAW_COUNT = 452
INPUT_LEN = 1000
JACCARD_TH = 0.5
PROB_PATH = ‘…/…/result/ljh/laws/’
FEATURE_PATH = ‘…/…/feature/ljh/’

def getLawLabel(data):
#data = df[2].values
#n = df.shape[0]
n = len(data)
matrix = np.zeros((n,LAW_COUNT))
for i,laws in enumerate(data):
seq = laws.split(’,’)
for l in seq:
try:
matrix[i,int(l)-1] = 1
except IndexError:
print laws
return matrix

from keras import backend as K
def macro_f1(y_true, y_pred):
true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
predicted_positives = K.sum(K.one_hot(K.argmax(y_pred),8), axis = 0)
precision = true_positives / (predicted_positives + K.epsilon())

true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
possible_positives = K.sum(y_true, axis = 0)
recall = true_positives / (possible_positives + K.epsilon())

f1 = 2*((precision*recall)/(precision+recall+ K.epsilon()))
macro_f1 = K.sum((K.sum(y_true, axis = 0)*f1)/(K.sum(y_true)+ K.epsilon()))
return macro_f1

def Jaccard_Sim(y_true,y_pred):
y_pred = K.greater_equal(y_pred,JACCARD_TH)
y_pred = K.cast(y_pred,dtype=‘float32’)
intersection = K.sum(y_true*y_pred,axis=1)
pred = K.sum(y_pred,axis=1)
true = K.sum(y_true,axis=1)
union = pred + true - intersection
jaccard = intersection / (union+ K.epsilon())
jaccard = K.mean(jaccard)
return jaccard

#prelu = PReLU()

def cnnModel():
embedding_layer = Embedding(MAX_NB_WORDS+1,EMBEDDING_DIM,
embeddings_initializer=initializers.he_uniform(20),
#weights=[embedding_matrix],
input_length=INPUT_LEN,
trainable=True)

#trainable=False))

model1 = Sequential()
model1.add(embedding_layer)
model1.add(Convolution1D(128, 4,padding=‘same’,init=‘he_normal’))
model1.add(BatchNormalization())
model1.add(Activation(‘relu’))
model1.add(Convolution1D(128, 4, padding=‘same’,activation=‘relu’,init=‘he_normal’))
model1.add(GlobalMaxPooling1D())

model2= Sequential()
model2.add(embedding_layer)
model2.add(Convolution1D(128, 3,padding=‘same’,init=‘he_normal’))
model2.add(BatchNormalization())
model2.add(Activation(‘relu’))
model2.add(Convolution1D(128, 3, padding=‘same’,activation=‘relu’,init=‘he_normal’))
model2.add(GlobalMaxPooling1D())

model3 = Sequential()
model3.add(embedding_layer)
model3.add(Convolution1D(128, 5,padding=‘same’,init=‘he_normal’))
model3.add(BatchNormalization())
model3.add(Activation(‘relu’))
model3.add(Convolution1D(128, 5, padding=‘same’,activation=‘relu’,init=‘he_normal’))
model3.add(GlobalMaxPooling1D())

model4 = Sequential()
model4.add(embedding_layer)
model4.add(Convolution1D(128, 7,padding=‘same’,init=‘he_normal’))
model4.add(BatchNormalization())
model4.add(Activation(‘relu’))
model4.add(Convolution1D(128, 7, padding=‘same’,activation=‘relu’,init=‘he_normal’))
model4.add(GlobalMaxPooling1D())

model = Sequential()
model.add(Merge([model1,model2,model3,model4],mode=‘concat’,concat_axis=1))
#model.add(GRU(128, dropout=0.2, recurrent_dropout=0.1))
model.add(Dropout(0.3))
model.add(Dense(128,activation=‘relu’,init=‘he_normal’))
model.add(Dense(LAW_COUNT,activation=‘sigmoid’))
model.compile(loss=‘binary_crossentropy’,
optimizer=‘adamax’,
metrics=[Jaccard_Sim])
model.summary()
return model

def widecnnModel():
embedding_layer = Embedding(MAX_NB_WORDS+1,EMBEDDING_DIM,
embeddings_initializer=initializers.he_uniform(20),
#weights=[embedding_matrix],
input_length=INPUT_LEN,
trainable=True)

#trainable=False))

model1 = Sequential()
model1.add(embedding_layer)
model1.add(Convolution1D(128, 4,padding=‘same’,init=‘he_normal’))
model1.add(BatchNormalization())
model1.add(Activation(‘relu’))
model1.add(Convolution1D(128, 4, padding=‘same’,activation=‘relu’,init=‘he_normal’))
model1.add(GlobalMaxPooling1D())

model2= Sequential()
model2.add(embedding_layer)
model2.add(Convolution1D(128, 3,padding=‘same’,init=‘he_normal’))
model2.add(BatchNormalization())
model2.add(Activation(‘relu’))
model2.add(Convolution1D(128, 3, padding=‘same’,activation=‘relu’,init=‘he_normal’))
model2.add(GlobalMaxPooling1D())

model3 = Sequential()
model3.add(embedding_layer)
model3.add(Convolution1D(128, 5,padding=‘same’,init=‘he_normal’))
model3.add(BatchNormalization())
model3.add(Activation(‘relu’))
model3.add(Convolution1D(128, 5, padding=‘same’,activation=‘relu’,init=‘he_normal’))
model3.add(GlobalMaxPooling1D())

model4 = Sequential()
model4.add(embedding_layer)
model4.add(Convolution1D(128, 7,padding=‘same’,init=‘he_normal’))
model4.add(BatchNormalization())
model4.add(Activation(‘relu’))
model4.add(Convolution1D(128, 7, padding=‘same’,activation=‘relu’,init=‘he_normal’))
model4.add(GlobalMaxPooling1D())

#prelu = PReLU()
wide = Sequential()
wide.add(Dense(512,input_shape=(WIDE_LEN,),activation=‘tanh’,init=‘he_normal’ ))
wide.add(Dropout(0.3))
wide.add(BatchNormalization())
wide.add(Dense(32,activation=‘tanh’,init=‘he_normal’)) #128
#wide.add(Dropout(0.3))
wide.add(BatchNormalization())
wide.add(Dense(8,activation=‘tanh’,init=‘he_normal’))

model = Sequential()
model.add(Merge([model1,model2,model3,model4,wide],mode=‘concat’,concat_axis=1))
model.add(Dropout(0.3))
model.add(Dense(128,activation=‘relu’,init=‘he_normal’))
model.add(Dense(LAW_COUNT,activation=‘sigmoid’))
model.compile(loss=‘binary_crossentropy’,
optimizer=‘adamax’,
metrics=[Jaccard_Sim])
model.summary()
return model

def getData():
train = pd.read_csv(FEATURE_PATH+‘train.tsv’,header=None)
test = pd.read_csv(FEATURE_PATH+‘test.tsv’,header=None)
wide_train = pd.read_csv(FEATURE_PATH+‘train_feature.csv’)
wide_test = pd.read_csv(FEATURE_PATH+‘test_feature.csv’)
print wide_train.shape
wide_train.drop([‘id’,‘penalty’],axis=1,inplace=True)
wide_test.drop([‘id’,],axis=1,inplace=True)
x_train,x_test,y_train = train[range(3,1003)].values,test[range(3,1003)].values,train[2]
y_train = getLawLabel(y_train)
testIndex= test[[0]]
testIndex.columns = [‘ID’]
return x_train, x_test, y_train, wide_train.values, wide_test.values, testIndex

def getCNNlaws():
def scheduler(epoch):
epo = [2,5,7,12,20]
#epo = [2,5,15,30,40]
lrs = [.001,.0002,.00002,.000002,.0000001]
for i,e in enumerate(epo):
if epoch==epo:
K.set_value(model.optimizer.lr, lrs[i])
return K.get_value(model.optimizer.lr)
X_train, X_test, Y_train, wide_train, wide_test,testIndex = getData()
WIDE_LEN = wide_train.shape[1]
TIMES = 8 #10
for SEED in range(TIMES):
print SEED
np.random.seed(SEED)
#model = getModel()

change_lr = LearningRateScheduler(scheduler)  
#model = TextCNN(num_words, EMBEDDING_DIM, INPUT_LEN,config)
model = cnnModel()
print 'training cnn model..'
#model_check = ModelCheckpoint(filepath='models/seed.'+str(SEED)+'.{val_Jaccard_Sim:.4f}.weights.{epoch:02d}.hdf5', save_best_only=True, verbose=1) 
model.fit([X_train],Y_train, 
#model.fit([train[range(2,1002)]],law_label, 
          batch_size=16, 
          epochs=6,
          callbacks = [change_lr])
          #callbacks=snapshot.get_callbacks(model_prefix=model_prefix),
          #callbacks = [EarlyStopping(monitor='val_Jaccard_Sim',patience=5,mode='max'),change_lr],
          #validation_data=([X_val,wide_val], laws_val)) 
#mode = 'model NOT updated!!!'

print ‘saving model’,SEED

model.save(‘models/1202_cnn_’+str(SEED)+’.h5’)

print 'predicting test..'
predict = model.predict(X_test)
if SEED==0:
  pred = predict
else:
  pred +=predict
#del model

print ‘storing cnn prob…’
df = pd.DataFrame(pred)
for col in df.columns:
df[col] /= TIMES
df = testIndex.join(df)
df.to_csv(PROB_PATH+‘jh_laws_cnn_blending_prob.csv’,index=0,header=None,float_format = ‘%.6f’)

print ‘+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++’

def getWideCNNlaws():
X_train, X_test, Y_train, wide_train, wide_test,testIndex = getData()
global WIDE_LEN
WIDE_LEN = wide_train.shape[1]
def scheduler(epoch):
#epo = [2,5,7,12,20]
epo= [2,5,15,30,40]
lrs = [.001,.0002,.00002,.000002,.0000001]
for i,e in enumerate(epo):
if epoch==epo:
K.set_value(model.optimizer.lr, lrs[i])
return K.get_value(model.optimizer.lr)

TIMES = 8#10
for SEED in range(TIMES):
print SEED
np.random.seed(SEED)
#model = getModel()
change_lr = LearningRateScheduler(scheduler)
#model = TextCNN(num_words, EMBEDDING_DIM, INPUT_LEN,config)
model = widecnnModel()
#model_check = ModelCheckpoint(filepath=‘models/seed.’+str(SEED)+’.{val_Jaccard_Sim:.4f}.weights.{epoch:02d}.hdf5’, save_best_only=True, verbose=1)
#model.fit([X_train,wide_train],laws_train,
model.fit([X_train,wide_train],Y_train,
batch_size=16,
epochs=8,
callbacks = [change_lr])
#callbacks=snapshot.get_callbacks(model_prefix=model_prefix),
#callbacks = [EarlyStopping(monitor=‘val_Jaccard_Sim’,patience=5,mode=‘max’),change_lr],
#validation_data=([X_val,wide_val], laws_val))
#mode = ‘model NOT updated!!!’
#print ‘saving model’,SEED
#model.save(‘models/1209_wide&cnn_’+str(SEED)+’.h5’)
print ‘predicting test…’
predict = model.predict([X_test,wide_test])
if SEED==0:
pred = predict
else:
pred +=predict
#del model

df = pd.DataFrame(pred)
for col in df.columns:
df[col] /= TIMES
df = testIndex.join(df)
df.to_csv(PROB_PATH+‘jh_laws_wide&cnn_blending_prob.csv’,index=0,header=None,float_format = ‘%.6f’)

-- coding: utf-8 --

from future import division
import pandas as pd
import numpy as np
import gensim
from gensim.models.word2vec import Word2Vec
import jieba
import warnings
import codecs
from collections import defaultdict
#import xgboost as xgb

from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import keras
from keras import backend
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, Flatten, Merge,concatenate,Activation,Lambda
from keras.layers import Convolution1D, MaxPooling1D, Embedding,BatchNormalization,Dropout
from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional
from keras.models import Model
from keras.layers.pooling import GlobalMaxPooling1D
from keras.callbacks import EarlyStopping,ModelCheckpoint,LearningRateScheduler
from keras import regularizers
from keras.models import Sequential, Model
from keras.layers.advanced_activations import PReLU
from keras import optimizers
from keras import initializers

from keras import backend as K

MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 100000
EMBEDDING_DIM = 300
VALIDATION_SPLIT = 0.1
INPUT_LEN = 1000
PROB_PATH = ‘…/…/result/ljh/money/’
FEATURE_PATH = ‘…/…/feature/ljh/’

config = {
‘status’:‘online’,
‘mode’:‘penalty’,
‘word_embed’:False,
‘check_jaccard’:False,
}

def macro_f1(y_true, y_pred):
true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
predicted_positives = K.sum(K.one_hot(K.argmax(y_pred),8), axis = 0)
precision = true_positives / (predicted_positives + K.epsilon())

true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
possible_positives = K.sum(y_true, axis = 0)
recall = true_positives / (possible_positives + K.epsilon())

f1 = 2*((precision*recall)/(precision+recall+ K.epsilon()))
macro_f1 = K.sum((K.sum(y_true, axis = 0)*f1)/(K.sum(y_true)+ K.epsilon()))
return macro_f1

def Jaccard_Sim(y_true,y_pred):
y_pred = K.greater_equal(y_pred,JACCARD_TH)
y_pred = K.cast(y_pred,dtype=‘float32’)
intersection = K.sum(y_true*y_pred,axis=1)
pred = K.sum(y_pred,axis=1)
true = K.sum(y_true,axis=1)
union = pred + true - intersection
jaccard = intersection / (union+ K.epsilon())
jaccard = K.mean(jaccard)
return jaccard

def textCNNmodel(config):

embedding_layer = Embedding(MAX_NB_WORDS+1,EMBEDDING_DIM,
embeddings_initializer=initializers.he_uniform(20),
input_length=INPUT_LEN,
trainable=True)

model1 = Sequential()
model1.add(embedding_layer)
model1.add(Convolution1D(128, 2,padding=‘same’))
model1.add(GlobalMaxPooling1D())

model2= Sequential()
model2.add(embedding_layer)
model2.add(Convolution1D(128, 3,padding=‘same’))
model2.add(GlobalMaxPooling1D())

model3 = Sequential()
model3.add(embedding_layer)
model3.add(Convolution1D(128, 5,padding=‘same’))
model3.add(GlobalMaxPooling1D())

model4 = Sequential()
model4.add(embedding_layer)
model4.add(Convolution1D(128, 7,padding=‘same’))
model4.add(GlobalMaxPooling1D())

model = Sequential()
model.add(Merge([model1,model2,model3,model4],mode=‘concat’,concat_axis=1))
model.add(Dropout(0.3))
model.add(Dense(128,activation=‘relu’))

if config[‘mode’]==‘penalty’:
model.add(Dense(8,activation=‘softmax’))
model.compile(loss=‘categorical_crossentropy’,
optimizer=‘adamax’,
metrics=[macro_f1])
else:
model.add(Dense(452,activation=‘sigmoid’))
model.compile(loss=‘binary_crossentropy’,
optimizer=‘adam’,
metrics=[Jaccard_Sim])
print model.summary()
return model

def k_maxpooling(conv, topk, dim):
def _top_k(x):
x = tf.transpose(x, [0, 2, 1])
k_max = tf.nn.top_k(x, k=topk)
return tf.reshape(k_max[0], (-1, dim * topk))
k_max = Lambda(_top_k, output_shape=(dim * topk,))(conv)
return k_max

def add_layer(L, outdim=32):
c = BatchNormalization()(L)
c = Dense(outdim)©
c = PReLU()©
L = Merge(mode=‘concat’, concat_axis=-1)([L, c])
return L

def dense_cnn_model():
my_embedding = Embedding(input_dim=MAX_NB_WORDS+1, output_dim=EMBEDDING_DIM, input_length=None) #128
#---------keyword 1 -------------------------
in1 = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype=‘int32’)
emb1 = my_embedding(in1)

cnn1 = Convolution1D(filters=256, kernel_size=7, kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1) # relu   

x1 = GlobalMaxPooling1D()(cnn1)


cnn3 = Convolution1D(filters=256, kernel_size=3,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x3 = GlobalMaxPooling1D()(cnn3)

cnn5 = Convolution1D(filters=256, kernel_size=5,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x5 = GlobalMaxPooling1D()(cnn5)
# cnn4 = Convolution1D(filters=256, kernel_size=2,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x1 = Merge(mode='concat', concat_axis=-1)([x1, x3, x5])

#block1
for i in range(4):
    x1 = add_layer(x1, 128) #128
x1 = BatchNormalization()(x1)
x1 = PReLU()(x1)
x1 = Dense(128)(x1)

#block2
for i in range(4):
    x1 = add_layer(x1, 128)
#x1 = BatchNormalization()(x1)
#x1 = Dense(128)(x1)

x = BatchNormalization()(x1)
x = Dense(256)(x) #128


x = PReLU()(x)
x = Dropout(0.35)(x)  #0.25
y = Dense(8, activation='softmax')(x)
#y = Dense(8, activation='sigmoid')(x)

#model = Model(inputs=[in1, in2], outputs=y)
model = Model(inputs=[in1], outputs=y)
rmsprop = keras.optimizers.Adadelta(lr=1.0, rho=0.9, epsilon=1e-06) #lr=1.0 rho=0.95
model.compile(optimizer=rmsprop,  loss='categorical_crossentropy',   metrics=[macro_f1])
print model.summary()
return model

def getData():
train = pd.read_csv(FEATURE_PATH+‘train.tsv’,header=None)
test = pd.read_csv(FEATURE_PATH+‘test.tsv’,header=None)
testIndex = test[[0]]
testIndex.columns = [‘ID’]

x_train,x_test,y_train = train[range(3,1003)].values,test[range(3,1003)].values,train[1]
y_labels = list(y_train.value_counts().index)
le = LabelEncoder()
le.fit(y_labels)
num_labels = len(y_labels)
y_train = to_categorical(y_train.map(lambda x: le.transform([x])[0]), num_labels)

return x_train,x_test,y_train,testIndex

def getCNNpenalty():
X_train,X_test,Y_train,testIndex = getData()
print X_train.shape,X_test.shape

TIMES = 8 #10
for SEED in range(TIMES):
print SEED
M = SEED
#X_train, X_val, Y_train, Y_val = train_test_split(train[range(2,1002)], train[1], test_size=VALIDATION_SPLIT, random_state=SEED)
#X_train, X_val, Y_train, Y_val = train_test_split(pd.DataFrame(x_train), y_train, test_size=VALIDATION_SPLIT, random_state=SEED)
np.random.seed(SEED)
#model = getModel()
#model = TextCNN(num_words, EMBEDDING_DIM, INPUT_LEN,config)
model = textCNNmodel(config)
#model_check = ModelCheckpoint(filepath=‘models/seed.’+str(SEED)+’.{val_Jaccard_Sim:.4f}.weights.{epoch:02d}.hdf5’, save_best_only=True, verbose=1)
#model.fit([X_train],laws_train,
##best epochs is 4
print ‘training cnn model…’
model.fit([X_train],Y_train,
batch_size=16,
epochs=4,)
#callbacks = [change_lr],
#callbacks=snapshot.get_callbacks(model_prefix=model_prefix),
#callbacks = [EarlyStopping(monitor=‘val_Jaccard_Sim’,patience=0,mode=‘max’),change_lr],
#validation_data=([X_val.values,wide_val], Y_val))
#mode = ‘model NOT updated!!!’
# print ‘saving model’,SEED
# model.save(‘models/1203_penalty_cnn_’+str(SEED)+’.h5’)
print ‘predicting test…’
predict = model.predict(X_test)
if SEED==0:
pred = predict
else:
pred +=predict
del model

print ‘storing result…’

res = np.argmax(pred,axis=1)

storeResult(testID, res, ‘1203-penalty-cnn*10’)

df = pd.DataFrame(pred)
sums = df.sum(axis=1)
for col in df.columns:
df[col] /=sums
df = testIndex.join(df)
df.to_csv(PROB_PATH+‘jh_penalty_cnn_blending_prob.csv’,index=0,header=None,float_format = ‘%.6f’)
print ‘+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++’

def getDenseCNNpenalty():
X_train,X_test,Y_train,testIndex = getData()
print X_train.shape,X_test.shape
TIMES = 8#10
for SEED in range(TIMES):
print SEED

np.random.seed(SEED)
#model = getModel()
#change_lr = LearningRateScheduler(scheduler)  
#model = TextCNN(num_words, EMBEDDING_DIM, INPUT_LEN,config)
model = dense_cnn_model()    #model_check = ModelCheckpoint(filepath='models/seed.'+str(SEED)+'.{val_Jaccard_Sim:.4f}.weights.{epoch:02d}.hdf5', save_best_only=True, verbose=1) 

#model.fit([X_train],laws_train,
##best epochs is 4
print ‘training dense cnn model…’
model.fit([X_train],Y_train,
batch_size=16,
epochs=11,)
#callbacks = [change_lr],
#callbacks=snapshot.get_callbacks(model_prefix=model_prefix),
#callbacks = [EarlyStopping(monitor=‘val_Jaccard_Sim’,patience=0,mode=‘max’),change_lr],
#validation_data=([X_val.values], Y_val))
#mode = ‘model NOT updated!!!’
# print ‘saving model’,SEED
# model.save(‘models/1210_penalty_dense_’+str(SEED)+’.h5’)
print ‘predicting test…’
predict = model.predict(X_test)
if SEED==0:
pred = predict
else:
pred +=predict
del model

print ‘storing result…’

res = np.argmax(pred,axis=1)

storeResult(testID, res, ‘1210-penalty-dense*5’)

df = pd.DataFrame(pred)

df.to_csv(‘result/1210-densenet-penalty-5-blending_prob.csv’,index=0,header=None)

sums = df.sum(axis=1)
for col in df.columns:
df[col] /=sums
df = testIndex.join(df)
df.to_csv(PROB_PATH +‘jh_penalty_dense_cnn_blending_prob.csv’,index=0,header=None,float_format = ‘%.6f’)

-- coding: utf-8 --

import pandas as pd

import numpy as np
import re

#train_path = ‘data/train.txt’
#test_path = ‘data/test.txt’

train_path = ‘…/…/data/train.new.txt’
test_path = ‘…/…/data/test.new.txt’
data_path = ‘…/…/data/’
feature_path = ‘…/…/feature/ljh/’

cnt_thre = 4
max_ci = 1500

def get_word_id():
df = pd.read_csv(train_path, sep=’\t’, header=None, encoding=‘utf8’)

X = df.values
dic = {}

for i in range(len(X)):
    if i%1000 == 0:
        print (i)
        
    x = X[i]
    se = set()
    xx = x[1].split(' ')
    #for z in x[1]:
    for z in xx:
        if z in se:
            continue
        se.add(z)
        
        if z not in dic:
            dic[z] = 0
        dic[z] += 1

T = []
for k, v in dic.items():
    if v > cnt_thre:
        T.append([k, v])
df = pd.DataFrame(T)
df.to_csv(feature_path+'zi.id.tsv', index=False, header=None, encoding='utf8')

def read_dic():
df = pd.read_csv(feature_path+‘zi.id.tsv’, header=None, encoding=‘utf8’)
dic = {}
i = 1
for x in df.values:
dic[x[0]] = i
i += 1
return dic, i

def cnn_feature(flag):
if flag == ‘train’:
df = pd.read_csv(train_path, sep=’\t’, header=None, encoding=‘utf8’)
else:
df = pd.read_csv(test_path, sep=’\t’, header=None, encoding=‘utf8’)

dic, max_id = read_dic()

T = []
X = df.values
for i in range(len(X)):
    if i%1000 == 0:
        print (i)

if i == 2000:

break

    x = X[i]
    zz = x[1].split(' ')
    t = [x[0], -1, -1]
    if flag == 'train':
        t = [x[0], x[2], x[3]]
   
    #if len(x[1]) > 10000:
    #if len(zz) > max_ci:
    #    zz = zz[:max_ci]
    if len(zz) > max_ci:
        zz = zz[-max_ci:]

    sn = []
    #for z in x[1]:
    for z in zz:
        if z in dic:
            sn.append(dic[z])
        else:
            sn.append(max_id)
    t = t + sn
    T.append(t)
df = pd.DataFrame(T)
#df = df.astype('int')
df.to_csv(feature_path+flag+'.tsv', index=False, header=None, encoding='utf8')

def get_alpha(number, flag=False):
number = float(number.replace(u’,’,’’).replace(u’,’,’’))
if flag:
number *= 10000

list1 = [1000, 2000, 3000, 4000, 5000, 10000, 500000]

list2 = [‘A’, ‘B’, ‘C’, ‘D’, ‘E’, ‘F’, ‘G’, ‘H’]

list1 = [30, 100, 300, 1000, 2000, 3000, 4000, 5000, 7000, 10000, 20000, 50000, 100000, 500000]
list2 = ['QA', 'QB', 'QC', 'QD', 'QE', 'QF', 'QG', 'QH', 'QI', 'QJ', 'QK', 'QL', 'QM', 'QN', 'QO']

i = 0
while i<len(list1):
    if number<list1[i]:
        break
    
    i += 1
        
return list2[i]

def replace_money(string):
string = string.replace(u’余元’, u’元’).replace(u’万余元’, u’万元’).replace(u’余万元’, u’万元’)
r = re.compile(u’(\d+((,\d+)|(,\d+))*(.\d+)?)元’)
numbers = r.findall(string)

for number in numbers:
    number = number[0]
    alpha = get_alpha(number)
    string = string.replace(number+u'元', alpha+u'元')
    
r = re.compile(u'(\\d+((,\\d+)|(,\\d+))*(\.\\d+)?)万元')
numbers = r.findall(string)

for number in numbers:
    number = number[0]
    alpha = get_alpha(number, True)
    string = string.replace(number+u'万元', alpha+u'万元').replace(u'万元',u'元')
    
return string

import jieba,codecs
def splitWord(query, stopwords):
wordList = jieba.cut(query)
num = 0
result = ‘’
for word in wordList:
word = word.rstrip()
word = word.rstrip(’"’)
if word not in stopwords:
if num == 0:
result = word
num = 1
else:
result = result + ’ ’ + word
return result.encode(‘utf-8’)

from tqdm import tqdm
def replace_train_test():
files = [data_path+‘test.txt’, data_path+‘train.txt’]
files1 = [data_path+‘test.new.txt’, data_path+‘train.new.txt’]

stopwords = {}
for line in codecs.open(data_path+'stop.txt', 'r', 'utf-8'):
    stopwords[line.rstrip()] = 1
              
for i in range(len(files)):
    print (files[i])
    df = pd.read_csv(files[i], sep='\t', header=None, encoding='utf8')

    X = df.values
    for j in tqdm(range(len(X))):
        if len(X[j][1]) > 0:
            X[j][1] = replace_money(X[j][1])
            X[j][1] = splitWord(X[j][1],stopwords).decode('utf8')
    df = pd.DataFrame(X, columns=df.columns)
    print (files1[i])
    df.to_csv(files1[i], sep='\t', index=False, header=False, encoding='utf8')

def run():
replace_train_test()

global cnt_thre
cnt_thre = 6
get_word_id()

cnn_feature('train')
cnn_feature('test')

#-- encoding:utf-8 --
from penalty import *
from laws import *
import processing
import feature

def process():
##数据预处理
processing.run()

# ##特征提取
print 'getting features..'
feature.run()

##罚金的模型
getCNNpenalty()
getDenseCNNpenalty()

##法文的模型
getCNNlaws()
getWideCNNlaws()

if name == ‘main’:
process()

-- coding: utf-8 --

import numpy as np

import keras
from keras.layers import Dense, Dropout, BatchNormalization, Convolution1D,
GlobalMaxPooling1D, Embedding, Input, Merge, merge, Dot, dot, Reshape, Lambda, Masking, Activation, GlobalAveragePooling1D, PReLU
from keras.models import Sequential, Model
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.optimizers import RMSprop, SGD
from keras.regularizers import l2
from keras import backend as K
from keras.layers.merge import Concatenate
from keras.layers.core import Flatten

import random
import pandas as pd
import numpy as np
import math
import gc
import tensorflow as tf

#from Layers import ConvBlockLayer
#from utils import get_conv_shape
from keras.layers.pooling import MaxPooling1D
from sklearn.cross_validation import train_test_split
import os

train_path = ‘…/…/feature/lzp/fajin.train.ci2.txt’
test_path = ‘…/…/feature/lzp/fajin.test.ci2.txt’

result_path = ‘…/…/result/lzp/money/fajin.ci.json’
model_path = ‘…/…/model/lzp/money/’

np.random.seed(666)

#1:6:42162
max_char_id = 84621 #9:32748 #4:54105 #2:81171 #84621

max_seq_len = 1000
epoch = 9
ts = 0

def macro_f1(y_true, y_pred):
true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
predicted_positives = K.sum(K.one_hot(K.argmax(y_pred),8), axis = 0)
precision = true_positives / (predicted_positives + K.epsilon())

true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
possible_positives = K.sum(y_true, axis = 0)
recall = true_positives / (possible_positives + K.epsilon())

f1 = 2*((precision*recall)/(precision+recall+ K.epsilon()))
macro_f1 = K.sum((K.sum(y_true, axis = 0)*f1)/(K.sum(y_true)+ K.epsilon()))
return macro_f1

def k_maxpooling(conv, topk, dim):
def _top_k(x):
x = tf.transpose(x, [0, 2, 1])
k_max = tf.nn.top_k(x, k=topk)
return tf.reshape(k_max[0], (-1, dim * topk))
k_max = Lambda(_top_k, output_shape=(dim * topk,))(conv)
return k_max

def add_layer(L, outdim=32):
c = BatchNormalization()(L)
c = Dense(outdim)©
c = PReLU()©
L = Merge(mode=‘concat’, concat_axis=-1)([L, c])
return L

def build_model(train_x, train_y, test_x = []):
#—共享
my_embedding = Embedding(input_dim=max_char_id+3, output_dim=128, input_length=None) #128

#---------keyword 1 -------------------------
in1 = Input(shape=(max_seq_len,), dtype='int32')
emb1 = my_embedding(in1)


cnn1 = Convolution1D(filters=256, kernel_size=7, kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1) # relu   

#xa = Flatten()(cnn1)

cnn1 = emb1

num_filters = [256, 256, 256]

#---------------

# Each ConvBlock with one MaxPooling Layer

for i in range(len(num_filters)):

cnn1 = ConvBlockLayer(get_conv_shape(cnn1), num_filters[i])(cnn1)

cnn1 = MaxPooling1D(pool_size=3, strides=2, padding=“same”)(cnn1)

x1 = GlobalMaxPooling1D()(cnn1)
#x1 = k_maxpooling(cnn1, 15, 256)

#x1 = GlobalAveragePooling1D()(cnn1)



cnn3 = Convolution1D(filters=256, kernel_size=3,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x3 = GlobalMaxPooling1D()(cnn3)

cnn5 = Convolution1D(filters=256, kernel_size=5,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x5 = GlobalMaxPooling1D()(cnn5)

cnn4 = Convolution1D(filters=256, kernel_size=7,kernel_initializer = ‘he_uniform’, padding=‘valid’, activation=‘relu’)(emb1)

x4 = GlobalMaxPooling1D()(cnn4)

x1 = Merge(mode=‘concat’, concat_axis=-1)([x1, x2, x3, x4])

x1 = Merge(mode='concat', concat_axis=-1)([x1, x3, x5])

#block1
for i in range(4):
    x1 = add_layer(x1, 128) #128
x1 = BatchNormalization()(x1)
x1 = PReLU()(x1)
x1 = Dense(128)(x1)

#block2
for i in range(4):
    x1 = add_layer(x1, 128)
#x1 = BatchNormalization()(x1)
#x1 = Dense(128)(x1)

x = BatchNormalization()(x1)
x = Dense(256)(x) #128


x = PReLU()(x)
x = Dropout(0.35)(x)  #0.25
y = Dense(8, activation='softmax')(x)
#y = Dense(8, activation='sigmoid')(x)


#model = Model(inputs=[in1, in2], outputs=y)
model = Model(inputs=[in1], outputs=y)

#rmsprop = RMSprop(lr=0.005, rho=0.9, epsilon=1e-08, decay=0.0)
#rmsprop = keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False) #0.430
#rmsprop = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)
#rmsprop = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
#rmsprop = keras.optimizers.Adamax(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)


rmsprop = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06) #lr=1.0 rho=0.95
model.compile(optimizer=rmsprop,  loss='categorical_crossentropy',   metrics=[macro_f1])


global seed
#model_name = 'model/dnn.seed.'+str(seed)+'.h5'
if ts > 0:
    model_check = ModelCheckpoint(filepath=model_path + 'seed.'+str(seed)+'.weights.{epoch:02d}-{val_macro_f1:.4f}.hdf5', save_best_only=False, verbose=1) #False
else:
    model_check = ModelCheckpoint(filepath=model_path + 'seed.'+str(seed)+'.weights.fajin.ci.{epoch:02d}.hdf5', save_best_only=False, verbose=1) #False

if len(test_x) == 0:
    flag = 0
    if flag == 1:
        model.fit(train_x, train_y, batch_size=32, epochs=epoch, shuffle=True, validation_split=0.2, callbacks=[model_check]) #16
    else:
        if ts > 0:
            train_X, test_X, train_y, test_y = train_test_split(train_x[0], train_y, test_size = ts, random_state = seed)
            model.fit([train_X], train_y, batch_size=32, epochs=epoch, shuffle=True, validation_data = ([test_X], test_y), callbacks=[model_check])
        else:
            model.fit(train_x, train_y, batch_size=32, epochs=epoch, shuffle=True, callbacks=[model_check])
            
    del model
    gc.collect()   
else:
    #model.fit(train_x, train_y, batch_size=16, epochs=epoch, shuffle=True) #16
    global model_name
    model.load_weights(model_name)
    
    preds = model.predict(test_x, batch_size=32, verbose=0)
    #print (preds)
    del model
    gc.collect()
    return preds

def load_feat(flag):
if flag == ‘train’:
df = pd.read_csv(train_path, header=None, encoding=‘utf8’) #, nrows=1000
#df = pd.read_csv(‘feature/train.tsv’, header=None, encoding=‘utf8’, nrows=1000) #, nrows=1000
else:
df = pd.read_csv(test_path, header=None, encoding=‘utf8’) #, nrows=1000
#df = pd.read_csv(‘feature/test.tsv’, header=None, encoding=‘utf8’, nrows=1000) #, nrows=1000

#df = df.fillna(max_char_id+2)

df = df.fillna(0)
df = df.astype('int')

X = []
Y = []
if flag == 'train':
    train_x = df[df.columns[2:max_seq_len+2]].values
    y = df[1].values
    train_y = []
    for yi in y:
        t = [0]*8
        t[yi-1] = 1
        train_y.append(t)
        
    X, Y = [np.array(train_x)], np.array(train_y)
    #return [np.array(train_x)], np.array(train_y)
else:
    test_x = df[df.columns[2:max_seq_len+2]].values
    test_id = df[0].values
    X, Y = [np.array(test_x)], test_id
    #return [test_x], test_id
    

return X, Y

def save_res(ids, preds):
res_path = result_path
sw = open(res_path, ‘w’)
T= []
for i in range(len(ids)):
T.append([ids[i]] + list(preds[i]))
t = list(preds[i])
t = t.index(max(t)) + 1
#{“id”: “32”, “penalty”: 1, “laws”: [1, 2, 3, 4]}
sw.write(’{“id”: “’+str(ids[i])+’”, “penalty”: ‘+str(t)+’, “laws”: []}’ + ‘\n’)
sw.close()
df = pd.DataFrame(T)
df.to_csv(res_path+‘prob.tsv’, index=False, header=False, float_format = ‘%.8f’)

model_name = ‘’
seed = 0
ts = 0.2

def run(mon):
global seed, model_name, ts

online = mon
ts = 0

print ('load data...')
#train_x, train_y = load_train()
train_x, train_y = load_feat('train')

#0, 8, 77, 128 1024
if online == 0:
    seeds = [1 ,4, 16, 64, 99, 128, 666, 999, 1225, 10101]
    for seed in seeds:
        print ('use seed:', seed)
        np.random.seed(seed) #666
        print ('strat traing...')
        build_model(train_x, train_y)
else:
    #test_x, test_id = load_test()
    test_x, test_id = load_feat('test')
    
    #weis = []

    print ('strat traing...')
    preds = []
    #for i in range(len(models)):
    path = model_path
    
    model_cnt = 0
    for dirpath,dirnames,filenames in os.walk(path):
        for file in filenames:
            mepo = int(file[-7:-5]) #选择6,7,8个epoch
            if mepo in [6,7,8] and file.find('fajin.ci') >= 0:
                model_cnt += 1
                fullpath = os.path.join(dirpath,file)
                model_name = fullpath
                #model_name = models[i]
                print (model_name)
                np.random.seed(seed)
                predsi = build_model(train_x, train_y, test_x)
                if len(preds) == 0:
                    preds = predsi #*weis[i]
                else:
                    for i in range(len(preds)):
                        preds[i] += predsi[i] #*weis[i]
                #break #
                
    for i in range(len(preds)):
        preds[i] /= model_cnt
        #preds[i] /= len(models)
        
    print (preds)
    
    save_res(test_id, preds)

def main():
global seed, model_name, ts, mfeat
mfeat = ‘feature/’
online = 1
ts = 0

print ('load data...')
#train_x, train_y = load_train()
train_x, train_y = load_feat('train')

#0, 8, 77, 128 1024
if online == 0:
    seeds = [1 ,4, 16, 64, 99, 128, 666, 999, 1225, 10101]
    for seed in seeds:
        print ('use seed:', seed)
        np.random.seed(seed) #666
        print ('strat traing...')
        build_model(train_x, train_y)
else:
    #test_x, test_id = load_test()
    test_x, test_id = load_feat('test')
    
    #weis = []

    print ('strat traing...')
    preds = []
    #for i in range(len(models)):
    path = 'model/10top3'
    model_cnt = 0
    for dirpath,dirnames,filenames in os.walk(path):
        for file in filenames:
            model_cnt += 1
            fullpath = os.path.join(dirpath,file)
            model_name = fullpath
            #model_name = models[i]
            print (model_name)
            np.random.seed(seed)
            predsi = build_model(train_x, train_y, test_x)
            if len(preds) == 0:
                preds = predsi #*weis[i]
            else:
                for i in range(len(preds)):
                    preds[i] += predsi[i] #*weis[i]

    for i in range(len(preds)):
        preds[i] /= model_cnt
        #preds[i] /= len(models)
        
    print (preds)
    
    save_res(test_id, preds)

if name == ‘main’:
main()

-- coding: utf-8 --

import numpy as np

import keras
from keras.layers import Dense, Dropout, BatchNormalization, Convolution1D,
GlobalMaxPooling1D, Embedding, Input, Merge, merge, Dot, dot, Reshape, Lambda, Masking, Activation, GlobalAveragePooling1D, PReLU
from keras.models import Sequential, Model
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.optimizers import RMSprop, SGD
from keras.regularizers import l2
from keras import backend as K
from keras.layers.merge import Concatenate
from keras.layers.core import Flatten

import random
import pandas as pd
import numpy as np
import math
import gc
import tensorflow as tf

#from Layers import ConvBlockLayer
#from utils import get_conv_shape

from keras.layers.pooling import MaxPooling1D
from sklearn.cross_validation import train_test_split
import os

train_path = ‘…/…/feature/lzp/fajin.train.zi2.txt’
test_path = ‘…/…/feature/lzp/fajin.test.zi2.txt’

result_path = ‘…/…/result/lzp/money/fajin.zi.json’
model_path = ‘…/…/model/lzp/money/’

np.random.seed(666)

#1:6:42162
max_char_id = 5388 #9:32748 #4:54105 #2:81171 #84621

max_seq_len = 3000
epoch = 9
ts = 0

def macro_f1(y_true, y_pred):
true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
predicted_positives = K.sum(K.one_hot(K.argmax(y_pred),8), axis = 0)
precision = true_positives / (predicted_positives + K.epsilon())

true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
possible_positives = K.sum(y_true, axis = 0)
recall = true_positives / (possible_positives + K.epsilon())

f1 = 2*((precision*recall)/(precision+recall+ K.epsilon()))
macro_f1 = K.sum((K.sum(y_true, axis = 0)*f1)/(K.sum(y_true)+ K.epsilon()))
return macro_f1

def k_maxpooling(conv, topk, dim):
def _top_k(x):
x = tf.transpose(x, [0, 2, 1])
k_max = tf.nn.top_k(x, k=topk)
return tf.reshape(k_max[0], (-1, dim * topk))
k_max = Lambda(_top_k, output_shape=(dim * topk,))(conv)
return k_max

def add_layer(L, outdim=32):
c = BatchNormalization()(L)
c = Dense(outdim)©
c = PReLU()©
L = Merge(mode=‘concat’, concat_axis=-1)([L, c])
return L

def build_model(train_x, train_y, test_x = []):
#—共享
my_embedding = Embedding(input_dim=max_char_id+3, output_dim=128, input_length=None) #128

#---------keyword 1 -------------------------
in1 = Input(shape=(max_seq_len,), dtype='int32')
emb1 = my_embedding(in1)


cnn1 = Convolution1D(filters=256, kernel_size=7, kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1) # relu   

#xa = Flatten()(cnn1)

cnn1 = emb1

num_filters = [256, 256, 256]

#---------------

# Each ConvBlock with one MaxPooling Layer

for i in range(len(num_filters)):

cnn1 = ConvBlockLayer(get_conv_shape(cnn1), num_filters[i])(cnn1)

cnn1 = MaxPooling1D(pool_size=3, strides=2, padding=“same”)(cnn1)

x1 = GlobalMaxPooling1D()(cnn1)
#x1 = k_maxpooling(cnn1, 15, 256)

#x1 = GlobalAveragePooling1D()(cnn1)



cnn3 = Convolution1D(filters=256, kernel_size=3,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x3 = GlobalMaxPooling1D()(cnn3)

cnn5 = Convolution1D(filters=256, kernel_size=5,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x5 = GlobalMaxPooling1D()(cnn5)

cnn4 = Convolution1D(filters=256, kernel_size=7,kernel_initializer = ‘he_uniform’, padding=‘valid’, activation=‘relu’)(emb1)

x4 = GlobalMaxPooling1D()(cnn4)

x1 = Merge(mode=‘concat’, concat_axis=-1)([x1, x2, x3, x4])

x1 = Merge(mode='concat', concat_axis=-1)([x1, x3, x5])

#block1
for i in range(4):
    x1 = add_layer(x1, 128) #128
x1 = BatchNormalization()(x1)
x1 = PReLU()(x1)
x1 = Dense(128)(x1)

#block2
for i in range(4):
    x1 = add_layer(x1, 128)
#x1 = BatchNormalization()(x1)
#x1 = Dense(128)(x1)

x = BatchNormalization()(x1)
x = Dense(256)(x) #128


x = PReLU()(x)
x = Dropout(0.35)(x)  #0.25
y = Dense(8, activation='softmax')(x)
#y = Dense(8, activation='sigmoid')(x)


#model = Model(inputs=[in1, in2], outputs=y)
model = Model(inputs=[in1], outputs=y)

#rmsprop = RMSprop(lr=0.005, rho=0.9, epsilon=1e-08, decay=0.0)
#rmsprop = keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False) #0.430
#rmsprop = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)
#rmsprop = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
#rmsprop = keras.optimizers.Adamax(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)


rmsprop = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06) #lr=1.0 rho=0.95
model.compile(optimizer=rmsprop,  loss='categorical_crossentropy',   metrics=[macro_f1])


global seed
#model_name = 'model/dnn.seed.'+str(seed)+'.h5'

if ts > 0:
    model_check = ModelCheckpoint(filepath=model_path + 'seed.'+str(seed)+'.weights.{epoch:02d}-{val_macro_f1:.4f}.hdf5', save_best_only=False, verbose=1) #False
else:
    model_check = ModelCheckpoint(filepath=model_path + 'seed.'+str(seed)+'.weights.fajin.zi.{epoch:02d}.hdf5', save_best_only=False, verbose=1) #False
    

if len(test_x) == 0:
    flag = 0
    if flag == 1:
        model.fit(train_x, train_y, batch_size=32, epochs=epoch, shuffle=True, validation_split=0.2, callbacks=[model_check]) #16
    else:
        
        if ts > 0:
            train_X, test_X, train_y, test_y = train_test_split(train_x[0], train_y, test_size = ts, random_state = seed)
            model.fit([train_X], train_y, batch_size=32, epochs=epoch, shuffle=True, validation_data = ([test_X], test_y), callbacks=[model_check])
        else:
            model.fit(train_x, train_y, batch_size=32, epochs=epoch, shuffle=True, callbacks=[model_check])
     
    del model
    gc.collect()
else:
    #model.fit(train_x, train_y, batch_size=16, epochs=epoch, shuffle=True) #16
    global model_name
    model.load_weights(model_name)
    
    preds = model.predict(test_x, batch_size=32, verbose=0)
    #print (preds)
    
    del model
    gc.collect()

    return preds

def load_feat(flag):
if flag == ‘train’:
df = pd.read_csv(train_path, header=None, encoding=‘utf8’) #, nrows=1000
#df = pd.read_csv(‘feature/train.tsv’, header=None, encoding=‘utf8’, nrows=1000) #, nrows=1000
else:
df = pd.read_csv(test_path, header=None, encoding=‘utf8’) #, nrows=1000
#df = pd.read_csv(‘feature/test.tsv’, header=None, encoding=‘utf8’, nrows=1000) #, nrows=1000

#df = df.fillna(max_char_id+2)

df = df.fillna(0)
df = df.astype('int')

X = []
Y = []
if flag == 'train':
    train_x = df[df.columns[2:max_seq_len+2]].values
    y = df[1].values
    train_y = []
    for yi in y:
        t = [0]*8
        t[yi-1] = 1
        train_y.append(t)
        
    X, Y = [np.array(train_x)], np.array(train_y)
    #return [np.array(train_x)], np.array(train_y)
else:
    test_x = df[df.columns[2:max_seq_len+2]].values
    test_id = df[0].values
    X, Y = [np.array(test_x)], test_id
    #return [test_x], test_id
    

return X, Y

def save_res(ids, preds):
res_path = result_path
sw = open(res_path, ‘w’)
T= []
for i in range(len(ids)):
T.append([ids[i]] + list(preds[i]))
t = list(preds[i])
t = t.index(max(t)) + 1
#{“id”: “32”, “penalty”: 1, “laws”: [1, 2, 3, 4]}
sw.write(’{“id”: “’+str(ids[i])+’”, “penalty”: ‘+str(t)+’, “laws”: []}’ + ‘\n’)
sw.close()
df = pd.DataFrame(T)
df.to_csv(res_path+‘prob.tsv’, index=False, header=False, float_format = ‘%.8f’)

model_name = ‘’
seed = 0
ts = 0.2

def run(mon):
global seed, model_name, ts

online = mon
ts = 0

print ('load data...')
#train_x, train_y = load_train()
train_x, train_y = load_feat('train')

#0, 8, 77, 128 1024
if online == 0:
    seeds = [1 ,4, 16, 64, 99, 128, 666, 999, 1225, 10101]
    #seeds = [27]
    for seed in seeds:
        print ('use seed:', seed)
        np.random.seed(seed) #666
        print ('strat traing...')
        build_model(train_x, train_y)
else:
    #test_x, test_id = load_test()
    test_x, test_id = load_feat('test')
    

    print ('strat traing...')
    preds = []
    #for i in range(len(models)):
    #path = 'model/10top3'
    path = model_path
    
    model_cnt = 0
    for dirpath,dirnames,filenames in os.walk(path):
        for file in filenames:
            mepo = int(file[-7:-5]) #选择6,7,8个epoch
            if mepo in [6,7,8] and file.find('fajin.zi') >= 0:   
                model_cnt += 1
                fullpath = os.path.join(dirpath,file)
                model_name = fullpath
                #model_name = models[i]
                print (model_name)
                np.random.seed(seed)
                predsi = build_model(train_x, train_y, test_x)
                if len(preds) == 0:
                    preds = predsi #*weis[i]
                else:
                    for i in range(len(preds)):
                        preds[i] += predsi[i] #*weis[i]
                        
                #break #
    for i in range(len(preds)):
        preds[i] /= model_cnt
        #preds[i] /= len(models)
        
    print (preds)
    
    save_res(test_id, preds)

def main():
global seed, model_name, ts

online = 1
ts = 0

print ('load data...')
#train_x, train_y = load_train()
train_x, train_y = load_feat('train')

#0, 8, 77, 128 1024
if online == 0:
    seeds = [1 ,4, 16, 64, 99, 128, 666, 999, 1225, 10101]
    #seeds = [27]
    for seed in seeds:
        print ('use seed:', seed)
        np.random.seed(seed) #666
        print ('strat traing...')
        build_model(train_x, train_y)
else:
    #test_x, test_id = load_test()
    test_x, test_id = load_feat('test')
    

    print ('strat traing...')
    preds = []
    #for i in range(len(models)):
    path = 'model/10top3'
    
    model_cnt = 0
    for dirpath,dirnames,filenames in os.walk(path):
        for file in filenames:
            
                model_cnt += 1
                fullpath = os.path.join(dirpath,file)
                model_name = fullpath
                #model_name = models[i]
                print (model_name)
                np.random.seed(seed)
                predsi = build_model(train_x, train_y, test_x)
                if len(preds) == 0:
                    preds = predsi #*weis[i]
                else:
                    for i in range(len(preds)):
                        preds[i] += predsi[i] #*weis[i]

    for i in range(len(preds)):
        preds[i] /= model_cnt
        #preds[i] /= len(models)
        
    print (preds)
    
    save_res(test_id, preds)

if name == ‘main’:
main()

--encoding:utf-8 --

import pandas as pd

import numpy as np
import re

input_train_path = ‘…/…/data/train.txt’
input_test_path = ‘…/…/data/test.txt’

train1_path = ‘…/…/feature/lzp/fajin.train.ci1.txt’
test1_path = ‘…/…/feature/lzp/fajin.test.ci1.txt’

stop_path = ‘…/…/data/stop.txt’

train2_path = ‘…/…/feature/lzp/fajin.train.ci2.txt’
test2_path = ‘…/…/feature/lzp/fajin.test.ci2.txt’

id_path = ‘…/…/feature/lzp/fajin.ci.id.tsv’

cnt_thre = 4

def get_word_id():
df = pd.read_csv(train1_path, sep=’\t’, header=None, encoding=‘utf8’)

X = df.values
dic = {}

for i in range(len(X)):
    if i%1000 == 0:
        print (i)
        
    x = X[i]
    se = set()
    xx = x[1].split(' ')
    #for z in x[1]:
    for z in xx:
        if z in se:
            continue
        se.add(z)
        
        if z not in dic:
            dic[z] = 0
        dic[z] += 1

T = []
for k, v in dic.items():
    if v > cnt_thre:
        T.append([k, v])
df = pd.DataFrame(T)
df.to_csv(id_path, index=False, header=None, encoding='utf8')

def read_dic():
df = pd.read_csv(id_path, header=None, encoding=‘utf8’)
dic = {}
i = 1
for x in df.values:
dic[x[0]] = i
i += 1
return dic, i

def cnn_feature(flag, nrows=None):
outpath = ‘’
if flag == ‘train’:
df = pd.read_csv(train1_path, sep=’\t’, header=None, encoding=‘utf8’, nrows=None)
outpath = train2_path
else:
df = pd.read_csv(test1_path, sep=’\t’, header=None, encoding=‘utf8’, nrows=None)
outpath = test2_path

dic, max_id = read_dic()

T = []
X = df.values
for i in range(len(X)):
    if i%1000 == 0:
        print (i)

if i == 2000:

break

    x = X[i]
    zz = x[1].split(' ')
    t = [x[0], -1]
    if flag == 'train':
        t = [x[0], x[2]]
   
    #if len(x[1]) > 10000:
    if len(zz) > 1000:
        zz = zz[:1000]

    sn = []
    #for z in x[1]:
    for z in zz:
        if z in dic:
            sn.append(dic[z])
        else:
            sn.append(max_id)
    t = t + sn
    T.append(t)
df = pd.DataFrame(T)
#df = df.astype('int')
df.to_csv(outpath, index=False, header=None, encoding='utf8')

def get_alpha(number, flag=False):
number = float(number.replace(’,’,’’).replace(’,’,’’))
if flag:
number *= 10000

list1 = [1000, 2000, 3000, 4000, 5000, 10000, 500000]

list2 = [‘A’, ‘B’, ‘C’, ‘D’, ‘E’, ‘F’, ‘G’, ‘H’]

list1 = [30, 100, 300, 1000, 2000, 3000, 4000, 5000, 7000, 10000, 20000, 50000, 100000, 500000]
list2 = ['QA', 'QB', 'QC', 'QD', 'QE', 'QF', 'QG', 'QH', 'QI', 'QJ', 'QK', 'QL', 'QM', 'QN', 'QO']

i = 0
while i<len(list1):
    if number<list1[i]:
        break
    
    i += 1
        
return list2[i]

def replace_money(string):
string = string.encode(‘utf-8’)
string = string.replace(‘余元’, ‘元’).replace(‘万余元’, ‘万元’).replace(‘余万元’, ‘万元’)
r = re.compile(’(\d+((,\d+)|(,\d+))*(.\d+)?)元’)
numbers = r.findall(string)

for number in numbers:
    number = number[0]
    alpha = get_alpha(number)
    string = string.replace(number, alpha)
    
r = re.compile('(\\d+((,\\d+)|(,\\d+))*(\.\\d+)?)万元')
numbers = r.findall(string)

for number in numbers:
    number = number[0]
    alpha = get_alpha(number, True)
    string = string.replace(number, alpha).replace('万元','元')
    
return string

import jieba,codecs
def splitWord(query, stopwords):
wordList = jieba.cut(query)
num = 0
result = ‘’
for word in wordList:
word = word.rstrip()
word = word.rstrip(’"’)
if word not in stopwords:
if num == 0:
result = word
num = 1
else:
result = result + ’ ’ + word
return result.encode(‘utf-8’)

from tqdm import tqdm
def replace_train_test(nrows=None):
files = [input_test_path, input_train_path]
files1 = [test1_path, train1_path]

stopwords = {}
for line in codecs.open(stop_path, 'r', 'utf-8'):
    stopwords[line.rstrip()] = 1
              
for i in range(len(files)):
    print (files[i])
    df = pd.read_csv(files[i], sep='\t', header=None, encoding='utf8', nrows=nrows)

    X = df.values
    for j in tqdm(range(len(X))):
        if len(X[j][1]) > 0:
            X[j][1] = replace_money(X[j][1])
            X[j][1] = splitWord(X[j][1],stopwords).decode('utf8')
    df = pd.DataFrame(X, columns=df.columns)
    print (files1[i])
    df.to_csv(files1[i], sep='\t', index=False, header=False, encoding='utf8')

def run(nrows=None):
replace_train_test(nrows) #替换金额类数值, 分词

global cnt_thre
cnt_thre = 6
get_word_id() #给词编id

cnn_feature('train', nrows) #获取训练集的特征
cnn_feature('test', nrows) #获取测试集的特征

def main():
replace_train_test() #替换金额类数值, 分词

global cnt_thre
cnt_thre = 6
get_word_id() #给词编id

cnn_feature('train') #获取训练集的特征
cnn_feature('test') #获取测试集的特征


print ('tt')

if name == ‘main’:
main()

--encoding:utf-8 --

import pandas as pd

import numpy as np
import re

input_train_path = ‘…/…/data/train.txt’
input_test_path = ‘…/…/data/test.txt’

train1_path = ‘…/…/feature/lzp/fajin.train.zi1.txt’
test1_path = ‘…/…/feature/lzp/fajin.test.zi1.txt’

stop_path = ‘…/…/data/stop.txt’

train2_path = ‘…/…/feature/lzp/fajin.train.zi2.txt’
test2_path = ‘…/…/feature/lzp/fajin.test.zi2.txt’

id_path = ‘…/…/feature/lzp/fajin.zi.id.tsv’

max_seq_len = 3000

def get_word_id():
df = pd.read_csv(train1_path, sep=’\t’, header=None, encoding=‘utf8’)

X = df.values
dic = {}

for i in range(len(X)):
    if i%1000 == 0:
        print (i)
        
    x = X[i]
    se = set()
    
    if len(x[1]) > max_seq_len:
        x[1] = x[1][:max_seq_len]

    for z in x[1]:
        if z in se:
            continue
        se.add(z)
        
        if z not in dic:
            dic[z] = 0
        dic[z] += 1

T = []
for k, v in dic.items():
    if v > 2:
        T.append([k, v])
df = pd.DataFrame(T)
df.to_csv(id_path, index=False, header=None, encoding='utf8')

def read_dic():
df = pd.read_csv(id_path, header=None, encoding=‘utf8’)
dic = {}
i = 1
for x in df.values:
dic[x[0]] = i
i += 1
return dic, i

def cnn_feature(flag, nrows=None):
outpath = ‘’
if flag == ‘train’:
df = pd.read_csv(train1_path, sep=’\t’, header=None, encoding=‘utf8’, nrows=None)
outpath = train2_path
else:
df = pd.read_csv(test1_path, sep=’\t’, header=None, encoding=‘utf8’, nrows=None)
outpath = test2_path

dic, max_id = read_dic()

T = []
X = df.values
for i in range(len(X)):
    if i%1000 == 0:
        print (i)

if i == 2000:

break

    x = X[i]

    t = [x[0], -1]
    if flag == 'train':
        t = [x[0], x[2]]

if len(x[1]) > 10000:

x[1] = x[1][-10000:]

    if len(x[1]) > max_seq_len:
        x[1] = x[1][:max_seq_len]

    sn = []
    for z in x[1]:
        if z in dic:
            sn.append(dic[z])
        else:
            sn.append(max_id)
    t = t + sn
    T.append(t)
df = pd.DataFrame(T)
#df = df.astype('int')
df.to_csv(outpath, index=False, header=None, encoding='utf8')

def get_alpha(number, flag=False):
number = float(number.replace(’,’,’’).replace(’,’,’’))
if flag:
number *= 10000

list1 = [1000, 2000, 3000, 4000, 5000, 10000, 500000]

list2 = [‘A’, ‘B’, ‘C’, ‘D’, ‘E’, ‘F’, ‘G’, ‘H’]

list1 = [30, 100, 300, 1000, 2000, 3000, 4000, 5000, 7000, 10000, 20000, 50000, 100000, 500000]
list2 = ['QA', 'QB', 'QC', 'QD', 'QE', 'QF', 'QG', 'QH', 'QI', 'QJ', 'QK', 'QL', 'QM', 'QN', 'QO']

i = 0
while i<len(list1):
    if number<list1[i]:
        break
    
    i += 1
        
return list2[i]

def replace_money(string):
string = string.encode(‘utf-8’)
string = string.replace(‘余元’, ‘元’).replace(‘万余元’, ‘万元’).replace(‘余万元’, ‘万元’)
r = re.compile(’(\d+((,\d+)|(,\d+))*(.\d+)?)元’)
numbers = r.findall(string)

for number in numbers:
    number = number[0]
    alpha = get_alpha(number)
    string = string.replace(number, alpha)
    
r = re.compile('(\\d+((,\\d+)|(,\\d+))*(\.\\d+)?)万元')
numbers = r.findall(string)

for number in numbers:
    number = number[0]
    alpha = get_alpha(number, True)
    string = string.replace(number, alpha).replace('万元','元')
    
return string

import codecs
from tqdm import tqdm
def replace_train_test(nrows=None):
files = [input_test_path, input_train_path]
files1 = [test1_path, train1_path]

stopwords = {}
for line in codecs.open(stop_path, 'r', 'utf-8'):
    stopwords[line.rstrip()] = 1
              
for i in range(len(files)):
    print (files[i])
    df = pd.read_csv(files[i], sep='\t', header=None, encoding='utf8', nrows=nrows)

    X = df.values
    for j in tqdm(range(len(X))):
        if len(X[j][1]) > 0:
            X[j][1] = replace_money(X[j][1])
    df = pd.DataFrame(X, columns=df.columns)
    print (files1[i])
    df.to_csv(files1[i], sep='\t', index=False, header=False, encoding='utf8')

def run(nrows=None):
replace_train_test(nrows) #step1

get_word_id() #step2

cnn_feature('train', nrows) #step3
cnn_feature('test', nrows) #step4

def main():
replace_train_test() #step1

get_word_id() #step2

cnn_feature('train') #step3
cnn_feature('test') #step4


#print (get_onehot(10000000))
#string = '公诉50元机关梅510,000余元州市梅52万余元江区人4,999余元民检察院'
#string = replace_money(string)
#print(string)

if name == ‘main’:
#run(100)

main()

-- coding: utf-8 --

import numpy as np

import keras
from keras.layers import Dense, Dropout, BatchNormalization, Convolution1D,
GlobalMaxPooling1D, Embedding, Input, Merge, merge, Dot, dot, Reshape, Lambda, Masking, Activation, GlobalAveragePooling1D, PReLU, TimeDistributed
from keras.models import Sequential, Model
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.optimizers import RMSprop, SGD
from keras.regularizers import l2
from keras import backend as K
from keras.layers.merge import Concatenate
from keras.layers.core import Flatten

import random
import pandas as pd
import numpy as np
import math
import gc
import tensorflow as tf
import os

#from Layers import ConvBlockLayer
#from utils import get_conv_shape

from keras.layers.pooling import MaxPooling1D
from sklearn.cross_validation import train_test_split

train_path = ‘…/…/feature/lzp/fawen.train.ci2.txt’
test_path = ‘…/…/feature/lzp/fawen.test.ci2.txt’

result_path = ‘…/…/result/lzp/laws/fawen.ci.tsv’
model_path = ‘…/…/model/lzp/laws/’

np.random.seed(666)

#1:6:42162
max_char_id = 84621 #9:32748 #4:54105 #2:81171 #84621

max_seq_len = 1500
epoch = 6
ts = 0
jac_thre = 0.5

def macro_f1(y_true, y_pred):
true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
predicted_positives = K.sum(K.one_hot(K.argmax(y_pred),8), axis = 0)
precision = true_positives / (predicted_positives + K.epsilon())

true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
possible_positives = K.sum(y_true, axis = 0)
recall = true_positives / (possible_positives + K.epsilon())

f1 = 2*((precision*recall)/(precision+recall+ K.epsilon()))
macro_f1 = K.sum((K.sum(y_true, axis = 0)*f1)/(K.sum(y_true)+ K.epsilon()))
return macro_f1

#def Jaccard_Sim(y_true,y_pred):

y_pred = K.greater_equal(y_pred, jac_thre)

y_pred = K.cast(y_pred,dtype=‘float32’)

intersection = K.sum(y_true*y_pred,axis=0)

pred = K.sum(y_pred,axis=0)

true = K.sum(y_true,axis=0)

union = pred + true - intersection

jaccard = intersection / (union+ K.epsilon())

jaccard = K.mean(jaccard)

return jaccard

#def Jaccard_Sim(y_true,y_pred):

y_pred_pos = K.round(K.clip(y_pred, 0, 1))

intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])

sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])

jac = (intersection + K.epsilon()) / (sum_ - intersection + K.epsilon())

return K.mean(jac)

def Jaccard_Sim(y_true,y_pred):
y_pred = K.greater_equal(y_pred, 0.5)
y_pred = K.cast(y_pred,dtype=‘float32’)
intersection = K.sum(y_true*y_pred,axis=1)
pred = K.sum(y_pred,axis=1)
true = K.sum(y_true,axis=1)
union = pred + true - intersection
jaccard = intersection / (union+ K.epsilon())
jaccard = K.mean(jaccard)
return jaccard

def k_maxpooling(conv, topk, dim):
def _top_k(x):
x = tf.transpose(x, [0, 2, 1])
k_max = tf.nn.top_k(x, k=topk)
return tf.reshape(k_max[0], (-1, dim * topk))
k_max = Lambda(_top_k, output_shape=(dim * topk,))(conv)
return k_max

def add_layer(L, outdim=32):
c = BatchNormalization()(L)
c = Dense(outdim)©
c = PReLU()©
L = Merge(mode=‘concat’, concat_axis=-1)([L, c])
return L

def jaccard_distance_loss(y_true, y_pred, smooth=100):

y_pred = K.greater_equal(y_pred, 0.5)

y_pred = K.cast(y_pred,dtype=‘float32’)

intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
jaccard = (intersection + smooth) / (sum_ - intersection + smooth)

y_pred = K.greater_equal(y_pred, 0.5)

y_pred = K.cast(y_pred,dtype=‘float32’)

intersection = K.sum(y_true*y_pred,axis=1)

pred = K.sum(y_pred,axis=1)

true = K.sum(y_true,axis=1)

union = pred + true - intersection

jaccard = intersection / (union+ K.epsilon())

jaccard = K.mean(jaccard)

return (1 - jaccard) * smooth*100

def build_model(train_x, train_y, test_x = []):
#—共享
my_embedding = Embedding(input_dim=max_char_id+3, output_dim=128, input_length=None) #128

#---------keyword 1 -------------------------
in1 = Input(shape=(max_seq_len,), dtype='int32')
emb1 = my_embedding(in1)


sgsize = 4
cnn1 = Convolution1D(filters=256, kernel_size=7, kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1) # relu
t1 = TimeDistributed(Dense(sgsize, activation='sigmoid'))(cnn1)
t1 = Flatten()(t1)
t1 = Dense(256)(t1)
t1 = Dropout(0.5)(t1)

tb = TimeDistributed(Dense(4, activation=‘sigmoid’))(cnn1)

tb = Flatten()(tb)

tb = Dense(256)(tb)

t1 = Merge(mode=‘mul’, concat_axis=-1)([ta, tb])

#xa = Flatten()(cnn1)

cnn1 = emb1

num_filters = [256, 256, 256]

#---------------

# Each ConvBlock with one MaxPooling Layer

for i in range(len(num_filters)):

cnn1 = ConvBlockLayer(get_conv_shape(cnn1), num_filters[i])(cnn1)

cnn1 = MaxPooling1D(pool_size=3, strides=2, padding=“same”)(cnn1)

x1 = GlobalMaxPooling1D()(cnn1)
#x1 = k_maxpooling(cnn1, 15, 256)

#x1 = GlobalAveragePooling1D()(cnn1)


cnn3 = Convolution1D(filters=256, kernel_size=3,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x3 = GlobalMaxPooling1D()(cnn3)
t3 = TimeDistributed(Dense(sgsize, activation='sigmoid'))(cnn3)
t3 = Flatten()(t3)
t3 = Dense(256)(t3)
t3 = Dropout(0.5)(t3)

cnn5 = Convolution1D(filters=256, kernel_size=5,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x5 = GlobalMaxPooling1D()(cnn5)
t5 = TimeDistributed(Dense(sgsize, activation='sigmoid'))(cnn5)
t5 = Flatten()(t5)
t5 = Dense(256)(t5)
t5 = Dropout(0.5)(t5)

cnn4 = Convolution1D(filters=256, kernel_size=7,kernel_initializer = ‘he_uniform’, padding=‘valid’, activation=‘relu’)(emb1)

x4 = GlobalMaxPooling1D()(cnn4)

x1 = Merge(mode=‘concat’, concat_axis=-1)([x1, x2, x3, x4])

x1 = Merge(mode='concat', concat_axis=-1)([x1, x3, x5])

#block1

for i in range(4):

x1 = add_layer(x1, 128) #128

x1 = BatchNormalization()(x1)

x1 = PReLU()(x1)

x1 = Dense(128)(x1)

#block2
for i in range(4):
    x1 = add_layer(x1, 128)
#x1 = BatchNormalization()(x1)
#x1 = Dense(256)(x1)

   
x = BatchNormalization()(x1)
x = Dense(256)(x) #128


#0.7987
#t1s = Merge(mode='mul', concat_axis=-1)([x, t1])
#t3s = Merge(mode='mul', concat_axis=-1)([x, t3])
#t5s = Merge(mode='mul', concat_axis=-1)([x, t5])
#tall = Merge(mode='mul', concat_axis=-1)([t1, t3, t5])
x = Merge(mode='concat', concat_axis=-1)([x, t1, t3, t5])



x = PReLU()(x)
x = Dropout(0.35)(x)  #0.25
#y = Dense(8, activation='softmax')(x)
y = Dense(452, activation='sigmoid')(x)


#model = Model(inputs=[in1, in2], outputs=y)
model = Model(inputs=[in1], outputs=y)

#print (model.summary())
#rmsprop = RMSprop(lr=0.005, rho=0.9, epsilon=1e-08, decay=0.0)
#rmsprop = keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False) #0.430
#rmsprop = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)
rmsprop = keras.optimizers.Adam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
#rmsprop = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)


#rmsprop = keras.optimizers.Adadelta(lr=0.1, rho=0.95, epsilon=1e-06) #lr=1.0 rho=0.95
#model.compile(optimizer=rmsprop,  loss='categorical_crossentropy',   metrics=[Jaccard_Sim]) #binary_crossentropy
model.compile(optimizer=rmsprop,  loss='binary_crossentropy',   metrics=[Jaccard_Sim]) #use1
#model.compile(optimizer=rmsprop,  loss=jaccard_distance_loss,   metrics=[Jaccard_Sim])


global seed
#model_name = 'model/dnn.seed.'+str(seed)+'.h5'
sbo = False
if online == 1:
    sbo = True
    
if ts > 0:
    model_check = ModelCheckpoint(filepath=model_path + 'seed.'+str(seed)+'.{val_Jaccard_Sim:.4f}.weights.{epoch:02d}.hdf5', save_best_only=sbo, verbose=1) #False
else:
    model_check = ModelCheckpoint(filepath=model_path + 'seed.'+str(seed)+'.weights.fawen.ci.{epoch:02d}.hdf5', save_best_only=sbo, verbose=1) #False

if len(test_x) == 0:
    flag = 0
    
    if flag == 1:
        model.fit(train_x, train_y, batch_size=32, epochs=epoch, shuffle=True, validation_split=0.1, callbacks=[model_check]) #16
    else:
        if ts > 0:
            train_X, test_X, train_y, test_y = train_test_split(train_x[0], train_y, test_size = ts, random_state = seed)
            #model.load_weights('model/seed.99.0.7989.weights.04.hdf5')
            model.fit([train_X], train_y, batch_size=32, epochs=epoch, shuffle=True, validation_data = ([test_X], test_y), callbacks=[model_check])
            
            preds = model.predict([test_X], batch_size=32, verbose=0)

sco = []

for i in range(len(preds)):

a = []

b = []

for j in range(len(test_y[0])):

if test_y[i][j] > 0:

a.append(j)

for j in range(len(preds[0])):

if preds[i][j] > jac_thre:

#preds[i][j] = int(round(preds[i][j]))

#if preds[i][j] == 1:

b.append(j)

a = set(a)

b = set(b)

sco.append(len(a&b)/len(a|b))

#print (a, b)

print (‘Test Jac:’, np.array(sco).mean())

        else:
            model.fit(train_x, train_y, batch_size=32, epochs=epoch, shuffle=True, callbacks=[model_check])
    
    del model
    gc.collect()           
else:
    #model.fit(train_x, train_y, batch_size=16, epochs=epoch, shuffle=True) #16
    global model_name
    model.load_weights(model_name)
    
    preds = model.predict(test_x, batch_size=32, verbose=0)
    #print (preds)
    
    del model
    gc.collect()
    return preds

def load_feat(flag):
if flag == ‘train’:
df = pd.read_csv(train_path, header=None, encoding=‘utf8’) #, nrows=1000 , nrows=10000
#df = pd.read_csv(‘feature/train.tsv’, header=None, encoding=‘utf8’, nrows=1000) #, nrows=1000
else:
df = pd.read_csv(test_path, header=None, encoding=‘utf8’) #, nrows=1000
#df = pd.read_csv(‘feature/test.tsv’, header=None, encoding=‘utf8’, nrows=1000) #, nrows=1000

#df = df.fillna(max_char_id+2)

df = df.fillna(0)
#df = df.astype('int')

X = []
Y = []
if flag == 'train':
    train_x = df[df.columns[2:max_seq_len+2]].values
    y = df[1].values
    train_y = []
    for yi in y:
        t = [0]*452
        
        sp = yi.split(',')
        for pi in sp:
            t[int(pi)-1] = 1
        train_y.append(t)
        
    X, Y = [np.array(train_x)], np.array(train_y)
    #return [np.array(train_x)], np.array(train_y)
else:
    test_x = df[df.columns[2:max_seq_len+2]].values
    test_id = df[0].values
    X, Y = [np.array(test_x)], test_id
    #return [test_x], test_id
    

return X, Y

def save_res(ids, preds):
res_path = result_path
prob_path = res_path + ‘.prob.tsv’
X = []
T = []
for i in range(len(ids)):
x = [ids[i]]
t = []

    T.append(x + list(preds[i]))
    
    for j in range(len(preds[i])):
        if preds[i][j] > 0.5:
            t.append(str(j+1))
    x.append(','.join(t))
    X.append(x)
    
df = pd.DataFrame(X)
df.to_csv(res_path, index=False, header=False)

df = pd.DataFrame(T)
df.to_csv(prob_path, index=False, header=False, float_format = '%.6f')

model_name = ‘’
online = 0
seed = 0
ts = 0.2

def run(mon):
global seed, model_name, ts, online

online = mon
ts = 0

print ('load data...')
#train_x, train_y = load_train()
train_x, train_y = load_feat('train')

#0, 8, 77, 128 1024
if online == 0:
    seeds = [1 ,4, 16, 64, 99, 128, 666, 999, 1225, 10101]
    #seeds = [4399]
    for seed in seeds:
        print ('use seed:', seed)
        np.random.seed(seed) #666
        print ('strat traing...')
        build_model(train_x, train_y)
else:
    #test_x, test_id = load_test()
    test_x, test_id = load_feat('test')
    

    print ('strat traing...')
    preds = []
    #for i in range(len(models)):
    #path = 'model/10top3'
    path = model_path
    
    #cnts = []

    model_cnt = 0
    for dirpath,dirnames,filenames in os.walk(path):
        for file in filenames:
            mepo = int(file[-7:-5]) #选择3,4,5个epoch
            if mepo in [3,4,5] and file.find('fawen.ci') >= 0:   
                
                model_cnt += 1
                fullpath = os.path.join(dirpath,file)
                model_name = fullpath
                #model_name = models[i]
                print (model_name)
                np.random.seed(seed)
                predsi = build_model(train_x, train_y, test_x)
                if len(preds) == 0:
                    preds = predsi #*weis[i]
                else:
                    for i in range(len(preds)):
                        preds[i] += predsi[i] #*weis[i]
                    
            #break

            
    for i in range(len(preds)):
        preds[i] /= model_cnt
        
    print (preds)
    
    save_res(test_id, preds)

def main():
global seed, model_name, ts, mfeat, online
mfeat = ‘feature/’
online = 1
ts = 0.1

print ('load data...')
#train_x, train_y = load_train()
train_x, train_y = load_feat('train')

#0, 8, 77, 128 1024
if online == 0:
    seeds = [1 ,4, 16, 64, 99, 128, 666, 999, 1225, 10101]
    #seeds = [4399]
    for seed in seeds:
        print ('use seed:', seed)
        np.random.seed(seed) #666
        print ('strat traing...')
        build_model(train_x, train_y)
else:
    #test_x, test_id = load_test()
    test_x, test_id = load_feat('test')
    

    print ('strat traing...')
    preds = []
    #for i in range(len(models)):
    path = 'model/10top3'
    #path = 'model/test'
    
    #cnts = []

    model_cnt = 0
    for dirpath,dirnames,filenames in os.walk(path):
        for file in filenames:
            model_cnt += 1
            fullpath = os.path.join(dirpath,file)
            model_name = fullpath
            #model_name = models[i]
            print (model_name)
            np.random.seed(seed)
            predsi = build_model(train_x, train_y, test_x)
            if len(preds) == 0:
                preds = predsi #*weis[i]
            else:
                for i in range(len(preds)):
                    preds[i] += predsi[i] #*weis[i]
                    
            #break

            
    for i in range(len(preds)):
        preds[i] /= model_cnt
        
    print (preds)
    
    save_res(test_id, preds)

if name == ‘main’:
main()

-- coding: utf-8 --

import numpy as np

import keras
from keras.layers import Dense, Dropout, BatchNormalization, Convolution1D,
GlobalMaxPooling1D, Embedding, Input, Merge, merge, Dot, dot, Reshape, Lambda, Masking, Activation, GlobalAveragePooling1D, PReLU, TimeDistributed
from keras.models import Sequential, Model
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.optimizers import RMSprop, SGD
from keras.regularizers import l2
from keras import backend as K
from keras.layers.merge import Concatenate
from keras.layers.core import Flatten

import random
import pandas as pd
import numpy as np
import math
import gc
import tensorflow as tf
import os

#from Layers import ConvBlockLayer
#from utils import get_conv_shape

from keras.layers.pooling import MaxPooling1D
from sklearn.cross_validation import train_test_split

train_path = ‘…/…/feature/lzp/fawen.train.zi2.txt’
test_path = ‘…/…/feature/lzp/fawen.test.zi2.txt’

result_path = ‘…/…/result/lzp/laws/fawen.zi.tsv’
model_path = ‘…/…/model/lzp/laws/’

np.random.seed(666)

#1:6:42162
max_char_id = 5388 #9:32748 #4:54105 #2:81171 #84621

max_seq_len = 3000
epoch = 6
ts = 0
jac_thre = 0.5

def macro_f1(y_true, y_pred):
true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
predicted_positives = K.sum(K.one_hot(K.argmax(y_pred),8), axis = 0)
precision = true_positives / (predicted_positives + K.epsilon())

true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
possible_positives = K.sum(y_true, axis = 0)
recall = true_positives / (possible_positives + K.epsilon())

f1 = 2*((precision*recall)/(precision+recall+ K.epsilon()))
macro_f1 = K.sum((K.sum(y_true, axis = 0)*f1)/(K.sum(y_true)+ K.epsilon()))
return macro_f1

def Jaccard_Sim(y_true,y_pred):
y_pred = K.greater_equal(y_pred, 0.5)
y_pred = K.cast(y_pred,dtype=‘float32’)
intersection = K.sum(y_true*y_pred,axis=1)
pred = K.sum(y_pred,axis=1)
true = K.sum(y_true,axis=1)
union = pred + true - intersection
jaccard = intersection / (union+ K.epsilon())
jaccard = K.mean(jaccard)
return jaccard

def k_maxpooling(conv, topk, dim):
def _top_k(x):
x = tf.transpose(x, [0, 2, 1])
k_max = tf.nn.top_k(x, k=topk)
return tf.reshape(k_max[0], (-1, dim * topk))
k_max = Lambda(_top_k, output_shape=(dim * topk,))(conv)
return k_max

def add_layer(L, outdim=32):
c = BatchNormalization()(L)
c = Dense(outdim)©
c = PReLU()©
L = Merge(mode=‘concat’, concat_axis=-1)([L, c])
return L

def jaccard_distance_loss(y_true, y_pred, smooth=100):

y_pred = K.greater_equal(y_pred, 0.5)

y_pred = K.cast(y_pred,dtype=‘float32’)

intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
jaccard = (intersection + smooth) / (sum_ - intersection + smooth)

y_pred = K.greater_equal(y_pred, 0.5)

y_pred = K.cast(y_pred,dtype=‘float32’)

intersection = K.sum(y_true*y_pred,axis=1)

pred = K.sum(y_pred,axis=1)

true = K.sum(y_true,axis=1)

union = pred + true - intersection

jaccard = intersection / (union+ K.epsilon())

jaccard = K.mean(jaccard)

return (1 - jaccard) * smooth*100

def build_model(train_x, train_y, test_x = []):
#—共享
my_embedding = Embedding(input_dim=max_char_id+3, output_dim=128, input_length=None) #128

#---------keyword 1 -------------------------
in1 = Input(shape=(max_seq_len,), dtype='int32')
emb1 = my_embedding(in1)


sgsize = 4
cnn1 = Convolution1D(filters=256, kernel_size=7, kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1) # relu
t1 = TimeDistributed(Dense(sgsize, activation='sigmoid'))(cnn1)
t1 = Flatten()(t1)
t1 = Dense(256)(t1)
t1 = Dropout(0.5)(t1)

tb = TimeDistributed(Dense(4, activation=‘sigmoid’))(cnn1)

tb = Flatten()(tb)

tb = Dense(256)(tb)

t1 = Merge(mode=‘mul’, concat_axis=-1)([ta, tb])

#xa = Flatten()(cnn1)

cnn1 = emb1

num_filters = [256, 256, 256]

#---------------

# Each ConvBlock with one MaxPooling Layer

for i in range(len(num_filters)):

cnn1 = ConvBlockLayer(get_conv_shape(cnn1), num_filters[i])(cnn1)

cnn1 = MaxPooling1D(pool_size=3, strides=2, padding=“same”)(cnn1)

x1 = GlobalMaxPooling1D()(cnn1)
#x1 = k_maxpooling(cnn1, 15, 256)

#x1 = GlobalAveragePooling1D()(cnn1)


cnn3 = Convolution1D(filters=256, kernel_size=3,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x3 = GlobalMaxPooling1D()(cnn3)
t3 = TimeDistributed(Dense(sgsize, activation='sigmoid'))(cnn3)
t3 = Flatten()(t3)
t3 = Dense(256)(t3)
t3 = Dropout(0.5)(t3)

cnn5 = Convolution1D(filters=256, kernel_size=5,kernel_initializer = 'he_uniform', padding='valid', activation='relu')(emb1)
x5 = GlobalMaxPooling1D()(cnn5)
t5 = TimeDistributed(Dense(sgsize, activation='sigmoid'))(cnn5)
t5 = Flatten()(t5)
t5 = Dense(256)(t5)
t5 = Dropout(0.5)(t5)

cnn4 = Convolution1D(filters=256, kernel_size=7,kernel_initializer = ‘he_uniform’, padding=‘valid’, activation=‘relu’)(emb1)

x4 = GlobalMaxPooling1D()(cnn4)

x1 = Merge(mode=‘concat’, concat_axis=-1)([x1, x2, x3, x4])

x1 = Merge(mode='concat', concat_axis=-1)([x1, x3, x5])

#block1

for i in range(4):

x1 = add_layer(x1, 128) #128

x1 = BatchNormalization()(x1)

x1 = PReLU()(x1)

x1 = Dense(128)(x1)

#block2
for i in range(4):
    x1 = add_layer(x1, 128)
#x1 = BatchNormalization()(x1)
#x1 = Dense(256)(x1)

   
x = BatchNormalization()(x1)
x = Dense(256)(x) #128


#0.7987
#t1s = Merge(mode='mul', concat_axis=-1)([x, t1])
#t3s = Merge(mode='mul', concat_axis=-1)([x, t3])
#t5s = Merge(mode='mul', concat_axis=-1)([x, t5])
#tall = Merge(mode='mul', concat_axis=-1)([t1, t3, t5])
x = Merge(mode='concat', concat_axis=-1)([x, t1, t3, t5])



x = PReLU()(x)
x = Dropout(0.35)(x)  #0.25
#y = Dense(8, activation='softmax')(x)
y = Dense(452, activation='sigmoid')(x)


#model = Model(inputs=[in1, in2], outputs=y)
model = Model(inputs=[in1], outputs=y)

#print (model.summary())
#rmsprop = RMSprop(lr=0.005, rho=0.9, epsilon=1e-08, decay=0.0)
#rmsprop = keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False) #0.430
#rmsprop = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)
rmsprop = keras.optimizers.Adam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
#rmsprop = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)


#rmsprop = keras.optimizers.Adadelta(lr=0.1, rho=0.95, epsilon=1e-06) #lr=1.0 rho=0.95
#model.compile(optimizer=rmsprop,  loss='categorical_crossentropy',   metrics=[Jaccard_Sim]) #binary_crossentropy
model.compile(optimizer=rmsprop,  loss='binary_crossentropy',   metrics=[Jaccard_Sim]) #use1
#model.compile(optimizer=rmsprop,  loss=jaccard_distance_loss,   metrics=[Jaccard_Sim])


global seed
#model_name = 'model/dnn.seed.'+str(seed)+'.h5'
sbo = False
if online == 1:
    sbo = True
    
    
if ts > 0:
    model_check = ModelCheckpoint(filepath=model_path + 'seed.'+str(seed)+'.{val_Jaccard_Sim:.4f}.weights.{epoch:02d}.hdf5', save_best_only=sbo, verbose=1) #False
else:
    model_check = ModelCheckpoint(filepath=model_path + 'seed.'+str(seed)+'.weights.fawen.zi.{epoch:02d}.hdf5', save_best_only=sbo, verbose=1) #False


if len(test_x) == 0:
    flag = 0
    if flag == 1:
        model.fit(train_x, train_y, batch_size=32, epochs=epoch, shuffle=True, validation_split=0.2, callbacks=[model_check]) #16
    else:
        
        if ts > 0 :
            train_X, test_X, train_y, test_y = train_test_split(train_x[0], train_y, test_size = ts, random_state = seed)

            #model.load_weights('model/seed.99.0.7989.weights.04.hdf5')
            
            model.fit([train_X], train_y, batch_size=32, epochs=epoch, shuffle=True, validation_data = ([test_X], test_y), callbacks=[model_check])
            
#            preds = model.predict([test_X], batch_size=32, verbose=0)
#            sco = []
#            for i in range(len(preds)):
#                a = []
#                b = []
#                for j in range(len(test_y[0])):
#                    if test_y[i][j] > 0:
#                        a.append(j)
#                for j in range(len(preds[0])):
#                    if preds[i][j] > jac_thre:
#                    #preds[i][j] = int(round(preds[i][j]))
#                    #if preds[i][j] == 1:
#                        b.append(j)
#                a = set(a)
#                b = set(b)
#                sco.append(len(a&b)/len(a|b))
#                #print (a, b)
#            print ('Test Jac:', np.array(sco).mean())
        else:
            model.fit(train_x, train_y, batch_size=32, epochs=epoch, shuffle=True, callbacks=[model_check])
    del model
    gc.collect()                     
else:
    #model.fit(train_x, train_y, batch_size=16, epochs=epoch, shuffle=True) #16
    global model_name
    model.load_weights(model_name)
    
    preds = model.predict(test_x, batch_size=32, verbose=0)
    #print (preds)
    del model
    gc.collect()        
    return preds

def load_feat(flag):
if flag == ‘train’:
df = pd.read_csv(train_path, header=None, encoding=‘utf8’) #, nrows=1000 , nrows=10000
#df = pd.read_csv(‘feature/train.tsv’, header=None, encoding=‘utf8’, nrows=1000) #, nrows=1000
else:
df = pd.read_csv(test_path, header=None, encoding=‘utf8’) #, nrows=1000
#df = pd.read_csv(‘feature/test.tsv’, header=None, encoding=‘utf8’, nrows=1000) #, nrows=1000

#df = df.fillna(max_char_id+2)

df = df.fillna(0)
#df = df.astype('int')

X = []
Y = []
if flag == 'train':
    train_x = df[df.columns[2:max_seq_len+2]].values
    y = df[1].values
    train_y = []
    for yi in y:
        t = [0]*452
        
        sp = yi.split(',')
        for pi in sp:
            t[int(pi)-1] = 1
        train_y.append(t)
        
    X, Y = [np.array(train_x)], np.array(train_y)
    #return [np.array(train_x)], np.array(train_y)
else:
    test_x = df[df.columns[2:max_seq_len+2]].values
    test_id = df[0].values
    X, Y = [np.array(test_x)], test_id
    #return [test_x], test_id
    

return X, Y

def save_res(ids, preds):
res_path = result_path
prob_path = res_path + ‘.prob.tsv’
X = []
T = []
for i in range(len(ids)):
x = [ids[i]]
t = []

    T.append(x + list(preds[i]))
    
    for j in range(len(preds[i])):
        if preds[i][j] > 0.5:
            t.append(str(j+1))
    x.append(','.join(t))
    X.append(x)
    
df = pd.DataFrame(X)
df.to_csv(res_path, index=False, header=False)

df = pd.DataFrame(T)
df.to_csv(prob_path, index=False, header=False, float_format = '%.6f')

model_name = ‘’
online = 0
seed = 0
ts = 0.2

def run(mon):
global seed, model_name, ts, online

online = mon
ts = 0

print ('load data...')
#train_x, train_y = load_train()
train_x, train_y = load_feat('train')

#0, 8, 77, 128 1024
if online == 0:
    seeds = [1 ,4, 16, 64, 99, 128, 666, 999, 1225, 10101]
    #seeds = [4399]
    for seed in seeds:
        print ('use seed:', seed)
        np.random.seed(seed) #666
        print ('strat traing...')
        build_model(train_x, train_y)
        #break
else:
    #test_x, test_id = load_test()
    test_x, test_id = load_feat('test')
    

    print ('strat traing...')
    preds = []
    #for i in range(len(models)):
    #path = 'model/10top3'
    path = model_path
    
    #cnts = []

    model_cnt = 0
    for dirpath,dirnames,filenames in os.walk(path):
        for file in filenames:
            mepo = int(file[-7:-5]) #选择3,4,5个epoch
            if mepo in [3,4,5] and file.find('fawen.zi') >= 0:   
                
                model_cnt += 1
                fullpath = os.path.join(dirpath,file)
                model_name = fullpath
                #model_name = models[i]
                print (model_name)
                np.random.seed(seed)
                predsi = build_model(train_x, train_y, test_x)
                if len(preds) == 0:
                    preds = predsi #*weis[i]
                else:
                    for i in range(len(preds)):
                        preds[i] += predsi[i] #*weis[i]
                    
            #break

            
    for i in range(len(preds)):
        preds[i] /= model_cnt
        
    print (preds)
    
    save_res(test_id, preds)

def main():
global seed, model_name, ts, online

online = 1
ts = 0.1

print ('load data...')
#train_x, train_y = load_train()
train_x, train_y = load_feat('train')

#0, 8, 77, 128 1024
if online == 0:
    seeds = [1 ,4, 16, 64, 99, 128, 666, 999, 1225, 10101]
    #seeds = [4399]
    for seed in seeds:
        print ('use seed:', seed)
        np.random.seed(seed) #666
        print ('strat traing...')
        build_model(train_x, train_y)
else:
    #test_x, test_id = load_test()
    test_x, test_id = load_feat('test')
    

    print ('strat traing...')
    preds = []
    #for i in range(len(models)):
    path = 'model/10top3'
    #path = 'model/test'
    
    #cnts = []

    model_cnt = 0
    for dirpath,dirnames,filenames in os.walk(path):
        for file in filenames:
            model_cnt += 1
            fullpath = os.path.join(dirpath,file)
            model_name = fullpath
            #model_name = models[i]
            print (model_name)
            np.random.seed(seed)
            predsi = build_model(train_x, train_y, test_x)
            if len(preds) == 0:
                preds = predsi #*weis[i]
            else:
                for i in range(len(preds)):
                    preds[i] += predsi[i] #*weis[i]
                    
            #break

            
    for i in range(len(preds)):
        preds[i] /= model_cnt
        
    print (preds)
    
    save_res(test_id, preds)

if name == ‘main’:
main()

--encoding:utf-8 --

import pandas as pd

import numpy as np
import re

input_train_path = ‘…/…/data/train.txt’
input_test_path = ‘…/…/data/test.txt’

train1_path = ‘…/…/feature/lzp/fawen.train.ci1.txt’
test1_path = ‘…/…/feature/lzp/fawen.test.ci1.txt’

stop_path = ‘…/…/data/stop.txt’

train2_path = ‘…/…/feature/lzp/fawen.train.ci2.txt’
test2_path = ‘…/…/feature/lzp/fawen.test.ci2.txt’

id_path = ‘…/…/feature/lzp/fawen.ci.id.tsv’

cnt_thre = 4
max_ci = 1500

def get_word_id():
df = pd.read_csv(train1_path, sep=’\t’, header=None, encoding=‘utf8’)

X = df.values
dic = {}

for i in range(len(X)):
    if i%1000 == 0:
        print (i)
        
    x = X[i]
    se = set()
    xx = x[1].split(' ')
    #for z in x[1]:
    for z in xx:
        if z in se:
            continue
        se.add(z)
        
        if z not in dic:
            dic[z] = 0
        dic[z] += 1

T = []
for k, v in dic.items():
    if v > cnt_thre:
        T.append([k, v])
df = pd.DataFrame(T)
df.to_csv(id_path, index=False, header=None, encoding='utf8')

def read_dic():
df = pd.read_csv(id_path, header=None, encoding=‘utf8’)
dic = {}
i = 1
for x in df.values:
dic[x[0]] = i
i += 1
return dic, i

def cnn_feature(flag, nrows=None):
outpath = ‘’
if flag == ‘train’:
df = pd.read_csv(train1_path, sep=’\t’, header=None, encoding=‘utf8’, nrows=None)
outpath = train2_path
else:
df = pd.read_csv(test1_path, sep=’\t’, header=None, encoding=‘utf8’, nrows=None)
outpath = test2_path

dic, max_id = read_dic()

T = []
X = df.values
for i in range(len(X)):
    if i%1000 == 0:
        print (i)

if i == 2000:

break

    x = X[i]
    zz = x[1].split(' ')
    t = [x[0], -1]
    if flag == 'train':
        t = [x[0], x[3]]
   
    #if len(x[1]) > 10000:
    if len(zz) > max_ci:
        zz = zz[:max_ci]

    sn = []
    #for z in x[1]:
    for z in zz:
        if z in dic:
            sn.append(dic[z])
        else:
            sn.append(max_id)
    t = t + sn
    T.append(t)
df = pd.DataFrame(T)
#df = df.astype('int')
df.to_csv(outpath, index=False, header=None, encoding='utf8')

def get_alpha(number, flag=False):
number = float(number.replace(’,’,’’).replace(’,’,’’))
if flag:
number *= 10000

list1 = [1000, 2000, 3000, 4000, 5000, 10000, 500000]

list2 = [‘A’, ‘B’, ‘C’, ‘D’, ‘E’, ‘F’, ‘G’, ‘H’]

list1 = [30, 100, 300, 1000, 2000, 3000, 4000, 5000, 7000, 10000, 20000, 50000, 100000, 500000]
list2 = ['QA', 'QB', 'QC', 'QD', 'QE', 'QF', 'QG', 'QH', 'QI', 'QJ', 'QK', 'QL', 'QM', 'QN', 'QO']

i = 0
while i<len(list1):
    if number<list1[i]:
        break
    
    i += 1
        
return list2[i]

def replace_money(string):
string = string.encode(‘utf-8’)
string = string.replace(‘余元’, ‘元’).replace(‘万余元’, ‘万元’).replace(‘余万元’, ‘万元’)
r = re.compile(’(\d+((,\d+)|(,\d+))*(.\d+)?)元’)
numbers = r.findall(string)

for number in numbers:
    number = number[0]
    alpha = get_alpha(number)
    string = string.replace(number+'元', alpha+'元')
    
r = re.compile('(\\d+((,\\d+)|(,\\d+))*(\.\\d+)?)万元')
numbers = r.findall(string)

for number in numbers:
    number = number[0]
    alpha = get_alpha(number, True)
    string = string.replace(number+'万元', alpha+'万元').replace('万元','元')
    
return string

import jieba,codecs
def splitWord(query, stopwords):
wordList = jieba.cut(query)
num = 0
result = ‘’
for word in wordList:
word = word.rstrip()
word = word.rstrip(’"’)
if word not in stopwords:
if num == 0:
result = word
num = 1
else:
result = result + ’ ’ + word
return result.encode(‘utf-8’)

from tqdm import tqdm
def replace_train_test(nrows=None):
files = [input_test_path, input_train_path]
files1 = [test1_path, train1_path]

stopwords = {}
for line in codecs.open(stop_path, 'r', 'utf-8'):
    stopwords[line.rstrip()] = 1
              
for i in range(len(files)):
    print (files[i])
    df = pd.read_csv(files[i], sep='\t', header=None, encoding='utf8', nrows=nrows)

    X = df.values
    for j in tqdm(range(len(X))):
        if len(X[j][1]) > 0:
            X[j][1] = replace_money(X[j][1])
            X[j][1] = splitWord(X[j][1],stopwords).decode('utf8')
    df = pd.DataFrame(X, columns=df.columns)
    print (files1[i])
    df.to_csv(files1[i], sep='\t', index=False, header=False, encoding='utf8')

def run(nrows=None):
replace_train_test(nrows) #step1

get_word_id() #step2

cnn_feature('train', nrows) #step3
cnn_feature('test', nrows) #step4    

def main():
replace_train_test()

global cnt_thre
cnt_thre = 6
get_word_id()

cnn_feature('train')
cnn_feature('test')


print ('tt')

if name == ‘main’:
main()

--encoding:utf-8 --

import pandas as pd

import numpy as np
import re

input_train_path = ‘…/…/data/train.txt’
input_test_path = ‘…/…/data/test.txt’

train1_path = ‘…/…/feature/lzp/fawen.train.zi1.txt’
test1_path = ‘…/…/feature/lzp/fawen.test.zi1.txt’

stop_path = ‘…/…/data/stop.txt’

train2_path = ‘…/…/feature/lzp/fawen.train.zi2.txt’
test2_path = ‘…/…/feature/lzp/fawen.test.zi2.txt’

id_path = ‘…/…/feature/lzp/fawen.zi.id.tsv’

max_seq_len = 3000

def get_word_id():
df = pd.read_csv(train1_path, sep=’\t’, header=None, encoding=‘utf8’)

X = df.values
dic = {}

for i in range(len(X)):
    if i%1000 == 0:
        print (i)
        
    x = X[i]
    se = set()
    
    if len(x[1]) > max_seq_len:
        x[1] = x[1][:max_seq_len]

    for z in x[1]:
        if z in se:
            continue
        se.add(z)
        
        if z not in dic:
            dic[z] = 0
        dic[z] += 1

T = []
for k, v in dic.items():
    if v > 2:
        T.append([k, v])
df = pd.DataFrame(T)
df.to_csv(id_path, index=False, header=None, encoding='utf8')

def read_dic():
df = pd.read_csv(id_path, header=None, encoding=‘utf8’)
dic = {}
i = 1
for x in df.values:
dic[x[0]] = i
i += 1
return dic, i

#def cnn_feature(flag):

if flag == ‘train’:

df = pd.read_csv(train_path, sep=’\t’, header=None, encoding=‘utf8’)

else:

df = pd.read_csv(test_path, sep=’\t’, header=None, encoding=‘utf8’)

dic, max_id = read_dic()

T = []

X = df.values

for i in range(len(X)):

if i%1000 == 0:

print (i)

if i == 2000:

break

x = X[i]

t = [x[0], -1]

if flag == ‘train’:

t = [x[0], x[2]]

if len(x[1]) > 10000:

x[1] = x[1][-10000:]

if len(x[1]) > max_seq_len:

x[1] = x[1][:max_seq_len]

sn = []

for z in x[1]:

if z in dic:

sn.append(dic[z])

else:

sn.append(max_id)

t = t + sn

T.append(t)

df = pd.DataFrame(T)

#df = df.astype(‘int’)

df.to_csv(‘feature/’+flag+’.tsv’, index=False, header=None, encoding=‘utf8’)

def cnn_feature(flag, nrows=None):
outpath = ‘’
if flag == ‘train’:
df = pd.read_csv(train1_path, sep=’\t’, header=None, encoding=‘utf8’, nrows=None)
outpath = train2_path
else:
df = pd.read_csv(test1_path, sep=’\t’, header=None, encoding=‘utf8’, nrows=None)
outpath = test2_path

dic, max_id = read_dic()

T = []
X = df.values
for i in range(len(X)):
    if i%1000 == 0:
        print (i)

if i == 2000:

break

    x = X[i]
    zz = x[1][:]
    t = [x[0], -1]
    if flag == 'train':
        t = [x[0], x[3]]
   
    #if len(x[1]) > 10000:
    if len(zz) > max_seq_len:
        zz = zz[:max_seq_len]

    sn = []
    #for z in x[1]:
    for z in zz:
        if z in dic:
            sn.append(dic[z])
        else:
            sn.append(max_id)
    t = t + sn
    T.append(t)
df = pd.DataFrame(T)
#df = df.astype('int')
df.to_csv(outpath, index=False, header=None, encoding='utf8')

def get_alpha(number, flag=False):
number = float(number.replace(’,’,’’).replace(’,’,’’))
if flag:
number *= 10000

list1 = [1000, 2000, 3000, 4000, 5000, 10000, 500000]

list2 = [‘A’, ‘B’, ‘C’, ‘D’, ‘E’, ‘F’, ‘G’, ‘H’]

list1 = [30, 100, 300, 1000, 2000, 3000, 4000, 5000, 7000, 10000, 20000, 50000, 100000, 500000]
list2 = ['QA', 'QB', 'QC', 'QD', 'QE', 'QF', 'QG', 'QH', 'QI', 'QJ', 'QK', 'QL', 'QM', 'QN', 'QO']

i = 0
while i<len(list1):
    if number<list1[i]:
        break
    
    i += 1
        
return list2[i]

def replace_money(string):
string = string.encode(‘utf-8’)
string = string.replace(‘余元’, ‘元’).replace(‘万余元’, ‘万元’).replace(‘余万元’, ‘万元’)
r = re.compile(’(\d+((,\d+)|(,\d+))*(.\d+)?)元’)
numbers = r.findall(string)

for number in numbers:
    number = number[0]
    alpha = get_alpha(number)
    string = string.replace(number, alpha)
    
r = re.compile('(\\d+((,\\d+)|(,\\d+))*(\.\\d+)?)万元')
numbers = r.findall(string)

for number in numbers:
    number = number[0]
    alpha = get_alpha(number, True)
    string = string.replace(number, alpha).replace('万元','元')
    
return string

import codecs
from tqdm import tqdm
def replace_train_test(nrows=None):
files = [input_test_path, input_train_path]
files1 = [test1_path, train1_path]

stopwords = {}
for line in codecs.open(stop_path, 'r', 'utf-8'):
    stopwords[line.rstrip()] = 1
              
for i in range(len(files)):
    print (files[i])
    df = pd.read_csv(files[i], sep='\t', header=None, encoding='utf8', nrows=nrows)

    X = df.values
    for j in tqdm(range(len(X))):
        if len(X[j][1]) > 0:
            X[j][1] = replace_money(X[j][1])
    df = pd.DataFrame(X, columns=df.columns)
    print (files1[i])
    df.to_csv(files1[i], sep='\t', index=False, header=False, encoding='utf8')

def run(nrows=None):
replace_train_test(nrows) #step1

get_word_id() #step2

cnn_feature('train', nrows) #step3
cnn_feature('test', nrows) #step4     

def main():
#replace_train_test()

get_word_id()

cnn_feature('train')
cnn_feature('test')

#dense_feat()
#find_zuiming()
#read_zui(30)

zuiming_feat()

#print (get_onehot(10000000))
#string = '公诉50元机关梅510,000余元州市梅52万余元江区人4,999余元民检察院'
#string = replace_money(string)
#print(string)

#t = '公诉机关梅州市梅江区人民检察院。被告人钟某。辩护人吴亦辉,系广东法泰律师事务所律师。梅州市梅江区人民检察院以区检诉刑诉(2014)257号起诉书指控被告人钟某犯信用卡诈骗罪,于2014年10月30日向本院提起公诉。送达起诉书副本及开庭审理时,被告人钟某自愿认罪,同意本案适用简易程序审理。本院适用简易程序,依法组成合议庭,公开开庭审理了本案。梅州市梅江区人民检察院指派代理检察员李丽玲出庭支持公诉,被告人钟某及其辩护人吴亦辉到庭参加了诉讼。现已审理终结。被告人钟某对公诉机关指控的犯罪事实、罪名无异议,请求从轻处罚。辩护人提出公诉机关指控的信用卡诈骗数额中有利息和滞纳金,应予以剔除,并提出公诉机关起诉书指控的第2、3项的犯罪事实,系被告人在司法机关未掌握的情况下主动交代的同种罪行和同种较重罪行,可酌情从轻和应当从轻处罚的。1、2012年2月,被告人钟某在中国建设银行梅州分行申请办理了卡号为5324582022516053(2013年1月更换为5324582023332427)的建设银行龙卡信用卡后透支用于赌博,经发卡银行多次催收均未还款,截止2014年7月21日欠本金人民币29681.07元、欠利息和滞纳金共人民币11753.27元。2、2012年8月,被告人钟某在广发银行梅州分行申请办理了卡号为6225581180004264的广发银行白金信用卡后透支用于赌博,经发卡银行多次催收均未还款,截止2014年8月14日欠本金人民币62393.71元、利息和滞纳金共人民币23675.95元。3、2012年3月,被告人钟某在中国工商银行梅州分行申请办理了卡号为6222300225966624的中国工商银行牡丹贷记信用卡后透支用于赌博,经发卡银行多次催收均未还款,截止2014年8月18日欠本金人民币9909.57元、利息和滞纳金共人民币6761.55元。被告人钟某使用以上三张信用卡透支,欠银行本金人民币101984.35元、欠利息和滞纳金共人民币42190.77元。被告人钟某归案后,其家属向中国建设银行梅州分行退赔了人民币40000元。上述事实,被告人钟某在开庭审理过程中亦无异议,且有书证被害单位银行的报案材料、申请、交易记录、账户追讨情况表,被告人钟某被抓获经过证明,身份证明,被害人被害单位负责人陈某新、潘某球、黄某政的陈述,退赔款项的情况证明,被告人钟某的供述等证据证实,足以认定。'
#x = extract_amount_involved(t)
#print (x)

if name == ‘main’:
main()

--encoding:utf-8 --

import fajin_feat_zi, fajin_feat_ci
import fajin_dnn_zi, fajin_dnn_ci

import fawen_feat_zi, fawen_feat_ci
import fawen_dnn_zi, fawen_dnn_ci

def process():
# nrows = 100 #test code 读取1000行进行代码测试
nrows = None #run 使用全量数据

#基于字的罚金
fajin_feat_zi.run(nrows)
fajin_dnn_zi.run(0) #training
fajin_dnn_zi.run(1) #predict



#基于词的罚金
fajin_feat_ci.run(nrows)
fajin_dnn_ci.run(0) #training
fajin_dnn_ci.run(1) #predict



#基于字的法文
fawen_feat_zi.run(nrows)
fawen_dnn_zi.run(0)
fawen_dnn_zi.run(1)


#基于词的法文
fawen_feat_ci.run(nrows)
fawen_dnn_ci.run(0)
fawen_dnn_ci.run(1)

if name == ‘main’:
process()

-- encoding:utf-8 --

from future import division
from future import absolute_import
import pandas as pd
import numpy as np
import gensim
from gensim.models.word2vec import Word2Vec
import jieba
import warnings
import codecs
import copy
import re
from tqdm import tqdm

def convertChineseDigitsToArabic (chinese_digits, encoding=“utf-8”):
chs_arabic_map = {u’零’:0, u’一’:1, u’二’:2, u’三’:3, u’四’:4,
u’五’:5, u’六’:6, u’七’:7, u’八’:8, u’九’:9,
u’十’:10, u’百’:100, u’千’:10 ** 3, u’万’:10 ** 4,
u’〇’:0, u’壹’:1, u’贰’:2, u’叁’:3, u’肆’:4,
u’伍’:5, u’陆’:6, u’柒’:7, u’捌’:8, u’玖’:9,
u’拾’:10, u’佰’:100, u’仟’:10 ** 3, u’萬’:10 ** 4,
u’亿’:10 ** 8, u’億’:10 ** 8, u’幺’: 1,
u’0’:0, u’1’:1, u’2’:2, u’3’:3, u’4’:4,
u’5’:5, u’6’:6, u’7’:7, u’8’:8, u’9’:9}

if isinstance (chinese_digits, str):
    chinese_digits = chinese_digits.decode (encoding)

result  = 0
tmp     = 0
hnd_mln = 0
for count in range(len(chinese_digits)):
    curr_char  = chinese_digits[count]
    curr_digit = chs_arabic_map.get(curr_char, None)
    # meet 「亿」 or 「億」
    if curr_digit == 10 ** 8:
        result  = result + tmp
        result  = result * curr_digit
        # get result before 「亿」 and store it into hnd_mln
        # reset `result`
        hnd_mln = hnd_mln * 10 ** 8 + result
        result  = 0
        tmp     = 0
    # meet 「万」 or 「萬」
    elif curr_digit == 10 ** 4:
        result = result + tmp
        result = result * curr_digit
        tmp    = 0
    # meet 「十」, 「百」, 「千」 or their traditional version
    elif curr_digit >= 10:
        tmp    = 1 if tmp == 0 else tmp
        result = result + curr_digit * tmp
        tmp    = 0
    # meet single digit
    elif curr_digit is not None:
        tmp = tmp * 10 + curr_digit
    else:
        return result
result = result + tmp
result = result + hnd_mln
return result

def get_alpha(number, flag=False):
if flag:
number *= 10000

list1 = [30, 100, 300, 1000, 2000, 3000, 4000, 5000, 7000, 10000, 20000, 50000, 100000, 500000]
list2 = ['QA', 'QB', 'QC', 'QD', 'QE', 'QF', 'QG', 'QH', 'QI', 'QJ', 'QK', 'QL', 'QM', 'QN', 'QO','QP']

i = 0
while i<len(list1):
    if number<list1[i]:
        break
    
    i += 1       
return list2[i]

def get_date(date, flag=False):
year = int(date.split(‘年’)[0])
month = int(date.split(‘年’)[1].split(‘月’)[0])

list11 = [1970, 1980, 1990, 1993, 1999, 2003, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018]
list21 = ['YA', 'YB', 'YC', 'YD', 'YE', 'YF', 'YG', 'YH', 'YI', 'YJ', 'YK', 'YL', 'YM', 'YN', 'YO', 'YP', 'YQ','YR']

list12 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
list22 = ['MA', 'MB', 'MC', 'MD', 'ME', 'MF', 'MG', 'MH', 'MI', 'MJ', 'MK', 'ML','MM']

i = 0
while i<len(list11):
    if year<list11[i]:
        break
    
    i += 1  

j = 0
while j<len(list12):
    if month<list12[j]:
        break
    
    j += 1
return list21[i]+' '+list22[j]

def get_ke(ke, flag=‘KE’):
ke = float(ke.replace(’,’,’’).replace(’,’,’’))

list11 = [1, 5, 10, 20, 50, 100, 200, 500, 1000]
list21 = [flag+'A', flag+'B', flag+'C', flag+'D', flag+'E', flag+'F', flag+'G', flag+'H', flag+'I', flag+'J']


i = 0
while i<len(list11):
    if ke<list11[i]:
        break
    
    i += 1  

return list21[i]

def docPreprocess(doc):
doc = doc.decode(‘utf-8’)
doc = doc.replace(u’余元’, u’元’).replace(u’万余元’, u’万元’).replace(u’余万元’, u’万元’)

r1 = re.compile(u'(\\d+((,\\d+)|(,\\d+))*(\.\\d+)?)元')
numbers = r1.findall(doc)
for number in numbers:
    number = number[0]
    k = float(number.replace(u',',u'').replace(u',',u''))
    alpha = get_alpha(k)
    doc = doc.replace(number+u'元', alpha)
    
r2 = re.compile(u'(\\d+((,\\d+)|(,\\d+))*(\.\\d+)?)万元')
numbers = r2.findall(doc)
for number in numbers:
    number = number[0]
    k = float(number.replace(u',',u'').replace(u',',u''))
    alpha = get_alpha(k, True)
    doc = doc.replace(number+u'万元', alpha)

r = re.compile(u'[一二三四五六七八九]+[零十百千万亿一二三四五六七八九]*元')
numbers = r.findall(doc)
for number in numbers:
    k = convertChineseDigitsToArabic(number[:-1])
    alpha = get_alpha(k)
    doc = doc.replace(number, alpha)
    
return doc

def splitWord(query, stopwords):
query = docPreprocess(query)
wordList = jieba.cut(query)
num = 0
result = ‘’
for word in wordList:
if word.rstrip() not in stopwords:
word = word.replace(’ ‘,’’)
word = word.replace(’"’,’’)
if num == 0:
result = word
num = 1
else:
result = result + ’ ’ + word
return result.encode(‘utf-8’)

def getDoc(data,stopwords):
df = copy.deepcopy(data)
doc = []
for des in tqdm(df.description.values):
res = splitWord(des, stopwords)
doc.append(res)
df[‘doc’] = doc

return df

def run():
stopwords = {}
for line in codecs.open(’…/…/data/stop.txt’, ‘r’, ‘utf-8’):
stopwords[line.rstrip()] = 1

train_raw = pd.read_table('../../data/train.txt',sep='\t',header=None)#,nrows = 200)
test_raw = pd.read_table('../../data/test.txt',sep='\t',header=None)#, nrows = 200)

train_raw.columns = ['ID','description','penalty','laws']
test_raw.columns = ['ID','description']


train = getDoc(train_raw,stopwords)
test = getDoc(test_raw,stopwords)
train.to_csv('../../feature/yyt/traindoc_money_law.csv', index = False)
test.to_csv('../../feature/yyt/testdoc_money_law.csv', index = False)

-- encoding:utf-8 --

import pandas as pd
import numpy as np
import warnings
import codecs
import copy
from tqdm import tqdm
import textCNN_money
import textCNN_laws
import preprocess

if name == ‘main’:
textCNN_laws.log(‘preprocess…’)
preprocess.run()
textCNN_laws.log(‘get laws result…’)
textCNN_laws.run()
textCNN_laws.log(‘get money result…’)
textCNN_money.run()

-- encoding:utf-8 --

from future import division
from future import absolute_import
import pandas as pd
import numpy as np
import gensim
from gensim.models.word2vec import Word2Vec
import jieba
import warnings
import codecs
import copy
from tqdm import tqdm
from collections import defaultdict
import random as rn
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, Flatten, Merge,concatenate,Reshape
from keras.layers import Convolution1D,Convolution2D, MaxPooling1D, Embedding,BatchNormalization,Dropout,MaxPooling2D
from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional
from keras.models import Model
from keras.layers.pooling import GlobalMaxPooling1D,GlobalMaxPooling2D,GlobalAveragePooling1D
from keras.callbacks import EarlyStopping,LearningRateScheduler,ModelCheckpoint
from keras import regularizers
from keras.models import Sequential, Model
from keras import backend as K
from keras.optimizers import Adamax, RMSprop, Adam
from keras.layers import Activation,add
from sklearn.metrics import f1_score
import copy
import re
import h5py
import gc
import time
warnings.filterwarnings(‘ignore’)

def getLawLabel(df):
data = df[‘laws’].values
n = df.shape[0]
matrix = np.zeros((n,452))
for i,laws in enumerate(data):
seq = laws.split(’,’)
for l in seq:
try:
matrix[i,int(l)-1] = 1
except IndexError:
print laws
return matrix

def evalF1(p,l):
preds = copy.deepcopy§
label = copy.deepcopy(np.argmax(l, axis = 1))
score = f1_score(label, preds, average=‘weighted’)
return score

def Jaccard_Sim(y_true,y_pred):
y_pred = K.greater_equal(y_pred,0.5)
y_pred = K.cast(y_pred,dtype=‘float32’)
intersection = K.sum(y_true*y_pred,axis=1)
pred = K.sum(y_pred,axis=1)
true = K.sum(y_true,axis=1)
union = pred + true - intersection
jaccard = intersection / (union+ K.epsilon())
jaccard = K.mean(jaccard)
return jaccard

def getData(config):
log(‘Read data…’)
train = pd.read_csv(’…/…/feature/yyt/traindoc_money_law.csv’)
test = pd.read_csv(’…/…/feature/yyt/testdoc_money_law.csv’)
labels = getLawLabel(train)

log('Tokenizer..')
#'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
MAX_NB_WORDS = config['MAX_NB_WORDS']
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(train['doc'].append(test.doc))
vocab = tokenizer.word_index
num_words = min(MAX_NB_WORDS, len(vocab))

log('Split Data..')
VALIDATION_SPLIT = config['VALIDATION_SPLIT']
split_seed = config['SPLIT_SEED']
X_train, X_val, y_train, y_val = train_test_split(train, labels, test_size=VALIDATION_SPLIT, random_state=split_seed)


log('Get word2id..')
X_train_word_ids = tokenizer.texts_to_sequences(X_train.doc)
X_val_word_ids = tokenizer.texts_to_sequences(X_val.doc)

X_online_train_word_ids = tokenizer.texts_to_sequences(train.doc)
X_test_word_ids = tokenizer.texts_to_sequences(test.doc)

log('Get Padding..')
INPUT_LEN = config['INPUT_LEN']
x_train = pad_sequences(X_train_word_ids, maxlen=INPUT_LEN,padding='post')
x_val = pad_sequences(X_val_word_ids, maxlen=INPUT_LEN,padding='post')

x_online_train = pad_sequences(X_online_train_word_ids, maxlen=INPUT_LEN,padding='post')
x_test = pad_sequences(X_test_word_ids, maxlen=INPUT_LEN,padding='post')
log('Return LabelEncoder..')

ID = test[['ID']].reset_index(drop = True)
return x_train, x_val, y_train, y_val, x_online_train, x_test, labels,ID

def TextCNN(config, model_view = True):
inputs = Input(shape=(config[‘INPUT_LEN’],), dtype=‘int32’,name = ‘input’)
embedding_cnn = Embedding(config[‘MAX_NB_WORDS’]+1,config[‘EMBEDDING_DIM’], input_length=config[‘INPUT_LEN’])(inputs)
conv_out = []
for filter in [1,2,3,4,5,6]:
conv1 = Convolution1D(256,filter, padding=‘same’)(embedding_cnn)
conv1 = Activation(‘relu’)(conv1)
x_conv1 = GlobalMaxPooling1D()(conv1)
conv_out.append(x_conv1)

x = concatenate(conv_out)
x = Dense(128)(x) 
x = Dropout(0.5)(x)
x = Activation('relu')(x)
x = Dense(452)(x)
outputs = Activation('sigmoid',name='outputs')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='binary_crossentropy',optimizer= 'adamax' ,metrics=[Jaccard_Sim])
if model_view:
    model.summary()
return model

def get_lr(epoch):
if epoch < 4:
return 0.0005
elif epoch < 8:
return 0.0002
elif epoch < 12:
return 0.0001
else:
return 0.00008

def log(stri):
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print str(now)+’ '+str(stri)

def run():
config = {}
config[‘MAX_NB_WORDS’] = 100000
config[‘EMBEDDING_DIM’] = 256
config[‘INPUT_LEN’] = 1100
config[‘SPLIT_SEED’] = 30
config[‘VALIDATION_SPLIT’] = 0.1
log(config)
log(‘Get DataSet…’)
local_train_x, local_test_x, local_train_y, local_test_y, online_train_x, online_test_x, online_train_y,ID = getData(config)
gc.collect()
lr = LearningRateScheduler(get_lr)
np.random.seed(30)
#–get Local Test
log(‘Local Validate Model…’)
# model = TextCNN(config, model_view = True)
# model.fit(local_train_x,local_train_y, batch_size=100, epochs=30,
# callbacks = [EarlyStopping(monitor=‘Jaccard_Sim’,patience=2,mode = ‘max’), lr],
# validation_data=(local_test_x, local_test_y))
# del model
#–get Online Result
log(‘Online Train Model With Different Seed…’)
for i in range(10):
log(‘Seed_%d’%i)
lr = LearningRateScheduler(get_lr)
np.random.seed(i)
# model_check = ModelCheckpoint(filepath=’…/model/textCNN_money_all_seed.’+str(i)+’.hdf5’,mode = ‘min’,monitor=‘val_loss’, save_best_only=False, verbose=1)
model = TextCNN(config, model_view = False)
model.fit(online_train_x,online_train_y,
batch_size=100,
epochs=20,
callbacks = [lr])
model.save(’…/…/model/yyt/laws/textCNN_laws_all_seed.’+str(i)+’.hdf5’)
predict_online = model.predict(online_test_x, batch_size=100)
pd.DataFrame(predict_online).to_csv(’…/…/result/yyt/laws/textCNN_laws_all_%d’%(i), index = False, header = False, float_format = ‘%.8f’)
if i == 0:
res = predict_online
else:
res += predict_online
del model
gc.collect()
res = pd.DataFrame(res/10)
res = pd.concat([ID, res], axis = 1)
res.to_csv(’…/…/result/yyt/laws/textCNN_laws_all_prob_blend.csv’, index = False, header = False, float_format = ‘%.8f’)

-- encoding:utf-8 --

from future import division
from future import absolute_import
import pandas as pd
import numpy as np
import gensim
from gensim.models.word2vec import Word2Vec
import jieba
import warnings
import codecs
import copy
from tqdm import tqdm
from collections import defaultdict
import random as rn
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, Flatten, Merge,concatenate,Reshape
from keras.layers import Convolution1D,Convolution2D, MaxPooling1D, Embedding,BatchNormalization,Dropout,MaxPooling2D
from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional
from keras.models import Model
from keras.layers.pooling import GlobalMaxPooling1D,GlobalMaxPooling2D,GlobalAveragePooling1D
from keras.callbacks import EarlyStopping,LearningRateScheduler,ModelCheckpoint
from keras import regularizers
from keras.models import Sequential, Model
from keras import backend as K
from keras.optimizers import Adamax, RMSprop, Adam
from keras.layers import Activation,add
from sklearn.metrics import f1_score
import copy
import re
import h5py
import gc
import time
warnings.filterwarnings(‘ignore’)

def evalF1(p,l):
preds = copy.deepcopy§
label = copy.deepcopy(np.argmax(l, axis = 1))
score = f1_score(label, preds, average=‘weighted’)
return score

def macro_f1(y_true, y_pred):
true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
predicted_positives = K.sum(K.one_hot(K.argmax(y_pred),8), axis = 0)
precision = true_positives / (predicted_positives + K.epsilon())

true_positives = K.sum(y_true * K.one_hot(K.argmax(y_pred),8), axis = 0)
possible_positives = K.sum(y_true, axis = 0)
recall = true_positives / (possible_positives + K.epsilon())

f1 = 2*((precision*recall)/(precision+recall+ K.epsilon()))
macro_f1 = K.sum((K.sum(y_true, axis = 0)*f1)/(K.sum(y_true)+ K.epsilon()))
return macro_f1

def storeResult(rid, predict, name):
file = open(’…/…/result/%s.re’%name, ‘w’)
for i in range(len(rid)):
file.write(’{“id”:"%s",“penalty”:%d,“laws”:[0]}\n’%(str(rid[i]), int(predict[i]+1)))

def getData(config):
log(‘Read data…’)
train = pd.read_csv(’…/…/feature/yyt/traindoc_money_law.csv’)
test = pd.read_csv(’…/…/feature/yyt/testdoc_money_law.csv’)

log('Tokenizer..')
#'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
MAX_NB_WORDS = config['MAX_NB_WORDS']
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(train['doc'].append(test.doc))
vocab = tokenizer.word_index
num_words = min(MAX_NB_WORDS, len(vocab))

log('Split Data..')
VALIDATION_SPLIT = config['VALIDATION_SPLIT']
split_seed = config['SPLIT_SEED']
X_train, X_val, y_train, y_val = train_test_split(train, train.penalty, test_size=VALIDATION_SPLIT, random_state=split_seed)

y_labels = list(train.penalty.value_counts().index)
le = LabelEncoder()
le.fit(y_labels)
num_labels = len(y_labels)

log('Get LabelEncoder..')
y_online_train = to_categorical(train.penalty.map(lambda x: le.transform([x])[0]), num_labels)
y_train = to_categorical(y_train.map(lambda x: le.transform([x])[0]), num_labels)
y_val = to_categorical(y_val.map(lambda x: le.transform([x])[0]), num_labels)

log('Get word2id..')
X_train_word_ids = tokenizer.texts_to_sequences(X_train.doc)
X_val_word_ids = tokenizer.texts_to_sequences(X_val.doc)

X_online_train_word_ids = tokenizer.texts_to_sequences(train.doc)
X_test_word_ids = tokenizer.texts_to_sequences(test.doc)

log('Get Padding..')
INPUT_LEN = config['INPUT_LEN']
x_train = pad_sequences(X_train_word_ids, maxlen=INPUT_LEN,padding='post')
x_val = pad_sequences(X_val_word_ids, maxlen=INPUT_LEN,padding='post')

x_online_train = pad_sequences(X_online_train_word_ids, maxlen=INPUT_LEN,padding='post')
x_test = pad_sequences(X_test_word_ids, maxlen=INPUT_LEN,padding='post')
log('Return LabelEncoder..')

ID = test[['ID']].reset_index(drop = True)
return x_train, x_val, y_train, y_val, x_online_train, x_test, y_online_train,ID

def TextCNN_SoftMax(config, model_view = True):
inputs = Input(shape=(config[‘INPUT_LEN’],), dtype=‘int32’,name = ‘input’)
embedding_cnn = Embedding(config[‘MAX_NB_WORDS’]+1, config[‘EMBEDDING_DIM’], input_length=config[‘INPUT_LEN’])(inputs)
conv_out = []
for filter in [1,2,3,4,5,6]:
conv1 = Convolution1D(256,filter, padding=‘same’)(embedding_cnn)
conv1 = Activation(‘relu’)(conv1)
x_conv1 = GlobalMaxPooling1D()(conv1)
conv_out.append(x_conv1)

x = concatenate(conv_out)
x = Dense(128)(x) # x = Dropout(0.25)(x)
x = Activation('relu')(x)
x = Dense(8)(x)
outputs = Activation('softmax',name='outputs')(x)

model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy',optimizer= 'adamax' ,metrics=[macro_f1])
if model_view:
    model.summary()
return model

def TextCNN_SigMoid(config, model_view = True):
inputs = Input(shape=(config[‘INPUT_LEN’],), dtype=‘int32’,name = ‘input’)
embedding_cnn = Embedding(config[‘MAX_NB_WORDS’]+1, config[‘EMBEDDING_DIM’], input_length=config[‘INPUT_LEN’])(inputs)
conv_out = []
for filter in [1,2,3,4,5,6]:
conv1 = Convolution1D(256,filter, padding=‘same’)(embedding_cnn)
conv1 = Activation(‘relu’)(conv1)
x_conv1 = GlobalMaxPooling1D()(conv1)
conv_out.append(x_conv1)

x = concatenate(conv_out)
x = Dense(128)(x) # x = Dropout(0.25)(x)
x = Activation('relu')(x)
x = Dense(8)(x)
outputs = Activation('sigmoid',name='outputs')(x)

model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy',optimizer= 'adamax' ,metrics=[macro_f1])
if model_view:
    model.summary()
return model

def get_lr(epoch):
if epoch < 4:
return 0.0005
elif epoch < 8:
return 0.0002
elif epoch < 12:
return 0.0001
else:
return 0.00008

def log(stri):
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print str(now)+’ '+str(stri)

def run():
config = {}
config[‘MAX_NB_WORDS’] = 90000
config[‘EMBEDDING_DIM’] = 256
config[‘INPUT_LEN’] = 1100
config[‘SPLIT_SEED’] = 30
config[‘VALIDATION_SPLIT’] = 0.1
log(config)
log(‘Get DataSet…’)
local_train_x, local_test_x, local_train_y, local_test_y, online_train_x, online_test_x, online_train_y,ID = getData(config)
gc.collect()
lr = LearningRateScheduler(get_lr)
np.random.seed(30)
#–get Local Test
log(‘Local Validate Model…’)
# model = TextCNN_SoftMax(config, model_view = True)
# model.fit(local_train_x,local_train_y, batch_size=100, epochs=20,
# callbacks = [EarlyStopping(monitor=‘val_macro_f1’,patience=1,mode = ‘max’), lr],
# validation_data=(local_test_x, local_test_y))
# del model

log('Online Train Model(9/1 Data) SoftMax With Different Seed...')
for i in range(10):
	log('Seed_%d'%i)
	config['SPLIT_SEED'] = i
	local_train_x, local_test_x, local_train_y, local_test_y, online_train_x, online_test_x, online_train_y, ID = getData(config)
	lr = LearningRateScheduler(get_lr)
	np.random.seed(i)
	model = TextCNN_SoftMax(config, model_view = False)
	model.fit(local_train_x,local_train_y, 
	          batch_size=100, 
	          epochs=8,
	          callbacks = [lr])
	model.save('../../model/yyt/money/textCNN(SoftMax)_money_9_seed.'+str(i)+'.hdf5')
	predict_online = model.predict(online_test_x, batch_size=100)
	pd.DataFrame(predict_online).to_csv('../../result/yyt/money/textCNN(SoftMax)_9_%d'%(i), index = False)
	if i == 0:
		res = predict_online
	else:
		res += predict_online
	del model
	gc.collect()
res = pd.DataFrame(res/10)
res = pd.concat([ID, res], axis = 1)
res.to_csv('../../result/yyt/money/textCNN(SoftMax)_9_prob_blend.csv', index = False, header = False, float_format = '%.8f')


log('Online Train Model(ALL Data) SigMoid With Different Seed...')
for i in range(10):
	log('Seed_%d'%i)
	lr = LearningRateScheduler(get_lr)
	np.random.seed(i)
	model = TextCNN_SigMoid(config, model_view = False)
	model.fit(online_train_x,online_train_y, 
	          batch_size=100, 
	          epochs=8,
	          callbacks = [lr])
	model.save('../../model/yyt/money/textCNN(SigMoid)_money_all_seed.'+str(i)+'.hdf5')
	predict_online = model.predict(online_test_x, batch_size=100)
	pd.DataFrame(predict_online).to_csv('../../result/yyt/money/textCNN(SigMoid)_all_%d'%(i), index = False)
	if i == 0:
		res = predict_online
	else:
		res += predict_online
	del model
	gc.collect()
res = pd.DataFrame(res/10)
res = res.apply(lambda x: x/sum(x))
res = pd.concat([ID, res], axis = 1)
res.to_csv('../../result/yyt/money/textCNN(SigMoid)_all_prob_blend.csv', index = False, header = False, float_format = '%.8f')

	
#--get Online Result
config['MAX_NB_WORDS'] = 150000
local_train_x, local_test_x, local_train_y, local_test_y, online_train_x, online_test_x, online_train_y, ID = getData(config)
log('Online Train Model(ALL Data) SoftMax With Different Seed...')
for i in range(10):
	log('Seed_%d'%i)
	lr = LearningRateScheduler(get_lr)
	np.random.seed(i)
	model = TextCNN_SoftMax(config, model_view = False)
	model.fit(online_train_x,online_train_y, 
	          batch_size=100, 
	          epochs=8,
	          callbacks = [lr])
	model.save('../../model/yyt/money/textCNN(SoftMax)_money_all_seed.'+str(i)+'.hdf5')
	predict_online = model.predict(online_test_x, batch_size=100)
	pd.DataFrame(predict_online).to_csv('../../result/yyt/money/textCNN(SoftMax)_all_%d'%(i), index = False)
	if i == 0:
		res = predict_online
	else:
		res += predict_online
	del model
	gc.collect()
res = pd.DataFrame(res/10)
res = pd.concat([ID, res], axis = 1)
res.to_csv('../../result/yyt/money/textCNN(SoftMax)_all_prob_blend.csv', index = False, header = False, float_format = '%.8f')
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值