裁判文书上诉理由分类统计

一、统计结果

在这里插入图片描述
对这12927份裁判文书进行清洗后,有效文书数量约为10858份,有效率约为84%。

二、实现方法

(一)裁判文书上诉理由提取

1、分类标准

不认罪、罪名异议、量刑过重,量刑异议、程序瑕疵、其它。
这一上诉理由分类的依据为罪名、量刑、程序。即犯罪嫌疑人被告人的上诉理由为一审事实不清、证据不足,不构成该罪的即为不认罪;犯罪嫌疑人被告人的上诉理由为构成它罪而不构成一审裁判罪名的即为罪名异议;犯罪嫌疑人被告人的上诉理由仅为量刑过重,没有提出相关理由的即为量刑过重;犯罪嫌疑人被告人的上诉理由为量刑过重并提出理由的为量刑异议,如提出“自首、初犯、偶犯”等;犯罪嫌疑人被告人的上诉理由为一审程序出错如需要进行非法证据排除等涉及程序违法的理由时即为程序瑕疵:犯罪嫌疑人被告人的上诉理由不明(这一不明既有源自其上诉理由本身不明也是对上诉理由提取程序出现问题导致的不明)即无法归为前面几类的内容;

2、裁判文书内容提取方式

首先将裁判文书按照句号进行切片并抛弃前两句和后两句,因为这几句大多是与上诉理由无关的内容。其次裁判文书中上诉理由的表述模式为上诉人加"关于上诉人", “对上述人”, “上诉称:”, “上诉理由”, “上诉提出”, “辩护人提出”, “辩护人发表意见”, “辩护意见”, “为由,提出上诉”, “辩护提出”, “辩称”, “提出”, "辩护认为"等动作词,因而上诉理由的提取就是依据词规则对上诉理由进行提取的.

3、实现代码

docx读取

import docx2txt、

def docx_read_simple(path):
 	if "~$" in path:
    	pass
	else:
    	text = docx2txt.process(path)
    	return text

句号分句

    def period_cutword(self,path):
    	# 上诉
    	"""简单句号分词"""
    	content = []
    	if read(path) == None:
        	pass
    	else:
        	text = read(path).replace("\n", "").replace("\u3000", "").replace(" ", "").strip()
        	#print(text,type(text))#str
        	for i in text.split("。"):
           	 #for i2 in i.split(";"):
            	content.append(i)
        	if len(content) <=2:
            	return content
        	else:
            	return content[4:-2]

人工贴标签

class Get_message():
def __init__(self,index):
    self.file = os.path.dirname(__file__)
    self.index = index

def get_name(self, file):
    names = []
    path, file = os.path.split(file)

    #hanlp
    res = HanLP.segment(file)
    for t in res:
        if str(t.nature) == "nr":
            names.append(str(t.word))
    #jieba
    jieba.load_userdict(r"J:\PyCharm项目\项目\中国裁判文书网\selenium路线\文书分类标签\jieba_dic.txt")
    for x in psg.lcut(file.strip()):
        if x.flag is "nr":
            names.append(x.word)

    #jiagu
    words = jiagu.seg(file)  # 分词
    pos = jiagu.pos(words)  # 词性标注
    for index, word in enumerate(pos):
        if word == "nh":
            names.append(words[index])
    # 名字清洗
    if len(names) <= 1:
        pass
    else:
        names = sorted(names)
        for index_, name in enumerate(names):
            clear_name = names.copy()
            clear_name.remove(clear_name[index_])
            for i in clear_name:
                if name in i:
                    return clear_name
                else:
                    continue
    return names



def get_text_from_wenshu(self, file,change=1):
    global words, names, content, tag
    words,txt = [],[]

    texts = sy().period_cutword(file)
    #print(texts)
    if texts is None:
        print("内容为:",texts)
    else:
        for name in list(set(self.get_name(file))):
            """"""
            if texts == [] or len(texts)<=10:
                txt.append(str(texts)+"文书无内容")
            for text in texts:
                if name in text or "上诉单位" in text or "哎拉" in text:
                    tags = ["关于上诉人", "对上述人" ,"上诉称:" , "上诉理由" , "上诉提出","对于上诉","辩护人提出",
                            "辩护人发表意见","辩护意见","提出上诉","辩护提出","辩称","提出","辩护认为"]
                    for tag in tags:
                        if tag in text:
                            txt.append(text)

        if self.get_name(file) == []:
            for text in texts:
                tags = ["关于上诉人", "对上述人", "上诉称:", "上诉理由", "上诉提出","辩护人提出","辩护人发表意见"
                        ,"辩护意见","为由,提出上诉","辩护提出","辩称","提出","辩护认为"]
                for tag in tags:
                    if tag in text:
                        txt.append(text)
        #print(self.get_name(file),txt)
        #去重
        txt = sorted(list(set(txt)),key=lambda x:len(x),reverse=True)
        del_ = ["经查","不开庭审理","综上"]
        clear_content = txt.copy()
        for i in txt:
            for i_ in del_:
                if i_ in i:
                    clear_content.remove(i)
        # 对某些要素进行针对去重
        if clear_content != []:
            if len(clear_content) >=2:
                content = clear_content[0]
            else:
                content = clear_content
        else:
            content = clear_content
        if int(change) == 2:
            content = "\n".join(txt).strip()
        # 清除掉方位词
        res = HanLP.segment(str(content))
        for t in res:
            drop_pos_set = ['xu', 'xx', 'y', 'yg', 'wh', 'wky', 'wkz', 'wp', 'ws', 'wyy', 'wyz', 'wb', 'u',
                            'ud', 'ude1', 'ude2', 'ude3', 'udeng', 'udh', 'p', 'rr', 'f', 'nis', 'nic',
                            'nnd', 'ns']
            if not str(t.nature) in drop_pos_set:
                words.append(str(t.word))
        #去掉停用词
        with open(r"J:\PyCharm项目\项目\中国裁判文书网\selenium路线\文书分类标签\stop_words.txt","r",encoding="utf-8")as f:
            stop = f.readlines()
        for i in stop:
            for a in words:
                if i.strip("\n") in a:
                    words.remove(a)

        content = pprint.pformat("".join(words))
        number = input(f"{content},\n 1:不认罪;  2:罪名异议;  3:量刑过重;  4:量刑异议;  5:程序瑕疵;  6:输出文件;  7:其它 ;"
                       f"8:重来 \n 请输入标签:")
        number = int(number)
        if number == 1:
            tag = "不认罪"
        elif number == 2:
            tag = "罪名异议"
        elif number == 3:
            tag = "量刑过重"
        elif number == 4:
            tag = "量刑异议"
        elif number == 5:
            tag = "程序瑕疵"
        elif number == 6:
            print(file)
            sys.exit()
        elif number == 8:
            os.system("taskkill /F /IM wps.exe")
            change = 2
            self.get_text_from_wenshu(file,change)
        else:
            tag = "其它"
        path = os.path.dirname(__file__)+"\\"+tag
        if not os.path.exists(path):
            os.makedirs(path)


        """删除姓名"""
        res = HanLP.segment(str(content))
        for t in res:
            drop_pos_set = ['nr', 'm',"w",'q']
            if not str(t.nature) in drop_pos_set:
                words.append(str(t.word))
        write = "".join(words)

        #上诉理由预测
        #cnn().predict(write)
        # 写入txt文档
        with open(path+f"\\{os.path.split(file)[-1]}.txt", "a+", encoding="utf-8")as f:
            f.write(write)
        with open("记录.txt", "a+",encoding="utf-8")as f:
            f.write(file+"\n")
        sys.exit()





def docx_clear(self,file):
    global words, names, content, tag
    words, txt = [], []

    texts = sy().period_cutword(file)
    # print(texts)
    if texts is None:
        print("内容为:", texts)
    else:
        for name in list(set(self.get_name(file))):
            """"""
            if texts == [] or len(texts) <= 10:
                txt.append(str(texts) + "文书无内容")
            for text in texts:
                if name in text or "上诉单位" in text or "哎拉" in text:
                    tags = ["关于上诉人", "对上述人", "上诉称:", "上诉理由", "上诉提出", "对于上诉", "辩护人提出",
                            "辩护人发表意见", "辩护意见", "提出上诉", "辩护提出", "辩称", "提出", "辩护认为"]
                    for tag in tags:
                        if tag in text:
                            txt.append(text)

        if self.get_name(file) == []:
            for text in texts:
                tags = ["关于上诉人", "对上述人", "上诉称:", "上诉理由", "上诉提出", "辩护人提出", "辩护人发表意见"
                    , "辩护意见", "为由,提出上诉", "辩护提出", "辩称", "提出", "辩护认为"]
                for tag in tags:
                    if tag in text:
                        txt.append(text)
        # print(self.get_name(file),txt)
        # 去重
        txt = sorted(list(set(txt)), key=lambda x: len(x), reverse=True)
        del_ = ["经查", "不开庭审理"]
        clear_content = txt.copy()
        for i in txt:
            for i_ in del_:
                if i_ in i:
                    try:
                        clear_content.remove(i)
                    except:
                        print(file)
        # 对某些要素进行针对去重
        if clear_content != []:
            if len(clear_content) >= 2:
                content = clear_content[0]
            else:
                content = clear_content
        else:
            content = clear_content
        # 清除掉方位词
        res = HanLP.segment(str(content))
        for t in res:
            drop_pos_set = ['xu', 'xx', 'y', 'yg', 'wh', 'wky', 'wkz', 'wp', 'ws', 'wyy', 'wyz', 'wb', 'u',
                            'ud', 'ude1', 'ude2', 'ude3', 'udeng', 'udh', 'p', 'rr', 'f', 'nis', 'nic',
                            'nnd', 'ns',"nr","w","m","q"]
            if not str(t.nature) in drop_pos_set:
                words.append(str(t.word))
        # 去掉停用词
        with open(r"J:\PyCharm项目\项目\中国裁判文书网\selenium路线\文书分类标签\stop_words.txt", "r", encoding="utf-8")as f:
            stop = f.readlines()
        for i in stop:
            for a in words:
                if i.strip("\n") in a:
                    words.remove(a)

        content = pprint.pformat("".join(words))
    return content


def main(self,files):
    for i in tqdm(glob.glob(os.path.join(files,"*.docx"))):
        with open("J:\记录.txt", "r+")as f:
            past = f.readlines()
        if i+"\n" in past:
            pass
        else:
            self.docx_clear(i.strip('\u2022'))





if __name__ == "__main__":
	index = str(random.randint(1, 50)) + 	random.choice('abcdefghijklmnopqrstuvwxyz!@#$%^&())')
 files = "E:\Firefox\docx"
	Get_message(index).main(files)

其中

get_text_from_wenshu()
是为了贴上标签作为cnn分类的基础

docx_clear()
是为了在训练好的文本分类模型上进行数据传递

4、标签贴好后

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
最终都是一条上诉理由为主,同时去掉了名字、一些数值、标点符号、停用词、助动词、副词等对于分类关系不大的内容。
贴标签是一个比较繁琐的事情,为了实现尽可能少的数据而尽可能高的准确率,自行去掉无关因素是一种较好的方法,因此,在自行贴标签时仅贴上了201份裁判文书的类别而且每个txt文本只有一个上诉理由。
虽然裁判文书的上诉理由不止一个,但是为了分类便利只能采取内容最为充分的那一个,否者不仅机器无法分类,人工也做不到。

三、文本分类

(一)文本分类源码来源

文本分类使用的是gaussic的text-classification-cnn-rnn,地址:链接: https://github.com/gaussic/text-classification-cnn-rnn.

其原本分类的任务是:
10个分类,每个分类6500条数据。

类别如下:

体育, 财经, 房产, 家居, 教育, 科技, 时尚, 时政, 游戏, 娱乐

在这里插入图片描述
他的结果还是较好的
在这里插入图片描述

所以,相应的参数没有进行大的调整

(二)数据结构和修改的地方

提取后的数据结构即输入的数据结构

在这里插入图片描述

对预测的那个文件predict.py进行了修改,为了能够看见准确率

原:
#return self.categories[y_pred_cls[0]]注释掉

改为

max_key = max([key for value in max_predict for key in value.keys()])#无法直接由值找键
    #print(max_key)
    for value in max_predict:
        for key in value.keys():
            if key == max_key:
                #print("最佳预测类别为:{},最佳预测率为{:.5%}".format(value[max_key],max_key))
                if max_key < 0.5:
                    with open("预测不准.txt","a+",encoding="utf-8")as f:
                        f.write(str(value[max_key]+"\n"))
                return value[max_key]

其实在

print("最佳预测类别为:{},最佳预测率为{:.5%}".format(value[max_key],max_key))
处可以对单个文本分类的结果以及准确率进行输出,此处为了找出预测准确率在50%y以下的而做了这样的调整

(三)预测过程

在这里插入图片描述
在这里插入图片描述

四、结语

从最后的结果来看,预测的概率与自行贴标签的概率极为接近;
而预测准确率在50%以下的不到1000份(总份数12000左右即失误率在10%以下),应该来说还可以接受

  • 3
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
function getKey(cookie) { // var cookie = getCookie('vjkl5'); var arrFun = [makeKey_0, makeKey_1, makeKey_2, makeKey_3, makeKey_4, makeKey_5, makeKey_6, makeKey_7, makeKey_8, makeKey_9, makeKey_10, makeKey_11, makeKey_12, makeKey_13, makeKey_14, makeKey_15, makeKey_16, makeKey_17, makeKey_18, makeKey_19, makeKey_20, makeKey_21, makeKey_22, makeKey_23, makeKey_24, makeKey_25, makeKey_26, makeKey_27, makeKey_28, makeKey_29, makeKey_30, makeKey_31, makeKey_32, makeKey_33, makeKey_34, makeKey_35, makeKey_36, makeKey_37, makeKey_38, makeKey_39, makeKey_40, makeKey_41, makeKey_42, makeKey_43, makeKey_44, makeKey_45, makeKey_46, makeKey_47, makeKey_48, makeKey_49, makeKey_50, makeKey_51, makeKey_52, makeKey_53, makeKey_54, makeKey_55, makeKey_56, makeKey_57, makeKey_58, makeKey_59, makeKey_60, makeKey_61, makeKey_62, makeKey_63, makeKey_64, makeKey_65, makeKey_66, makeKey_67, makeKey_68, makeKey_69, makeKey_70, makeKey_71, makeKey_72, makeKey_73, makeKey_74, makeKey_75, makeKey_76, makeKey_77, makeKey_78, makeKey_79, makeKey_80, makeKey_81, makeKey_82, makeKey_83, makeKey_84, makeKey_85, makeKey_86, makeKey_87, makeKey_88, makeKey_89, makeKey_90, makeKey_91, makeKey_92, makeKey_93, makeKey_94, makeKey_95, makeKey_96, makeKey_97, makeKey_98, makeKey_99, makeKey_100, makeKey_101, makeKey_102, makeKey_103, makeKey_104, makeKey_105, makeKey_106, makeKey_107, makeKey_108, makeKey_109, makeKey_110, makeKey_111, makeKey_112, makeKey_113, makeKey_114, makeKey_115, makeKey_116, makeKey_117, makeKey_118, makeKey_119, makeKey_120, makeKey_121, makeKey_122, makeKey_123, makeKey_124, makeKey_125, makeKey_126, makeKey_127, makeKey_128, makeKey_129, makeKey_130, makeKey_131, makeKey_132, makeKey_133, makeKey_134, makeKey_135, makeKey_136, makeKey_137, makeKey_138, makeKey_139, makeKey_140, makeKey_141, makeKey_142, makeKey_143, makeKey_144, makeKey_145, makeKey_146, makeKey_147, makeKey_148, makeKey_149, makeKey_150, makeKey_151, makeKey_152, makeKey_153, makeKey_154, makeKey_155, makeKey_156, makeKey_157, makeKey_158, makeKey_159, makeKey_160, makeKey_161, makeKey_162, makeKey_163, makeKey_164, makeKey_165, makeKey_166, makeKey_167, makeKey_168, makeKey_169, makeKey_170, makeKey_171, makeKey_172, makeKey_173, makeKey_174, makeKey_175, makeKey_176, makeKey_177, makeKey_178, makeKey_179, makeKey_180, makeKey_181, makeKey_182, makeKey_183, makeKey_184, makeKey_185, makeKey_186, makeKey_187, makeKey_188, makeKey_189, makeKey_190, makeKey_191, makeKey_192, makeKey_193, makeKey_194, makeKey_195, makeKey_196, makeKey_197, makeKey_198, makeKey_199, makeKey_200, makeKey_201, makeKey_202, makeKey_203, makeKey_204, makeKey_205, makeKey_206, makeKey_207, makeKey_208, makeKey_209, makeKey_210, makeKey_211, makeKey_212, makeKey_213, makeKey_214, makeKey_215, makeKey_216, makeKey_217, makeKey_218, makeKey_219, makeKey_220, makeKey_221, makeKey_222, makeKey_223, makeKey_224, makeKey_225, makeKey_226, makeKey_227, makeKey_228, makeKey_229, makeKey_230, makeKey_231, makeKey_232, makeKey_233, makeKey_234, makeKey_235, makeKey_236, makeKey_237, makeKey_238, makeKey_239, makeKey_240, makeKey_241, makeKey_242, makeKey_243, makeKey_244, makeKey_245, makeKey_246, makeKey_247, makeKey_248, makeKey_249, makeKey_250, makeKey_251, makeKey_252, makeKey_253, makeKey_254, makeKey_255, makeKey_256, makeKey_257, makeKey_258, makeKey_259, makeKey_260, makeKey_261, makeKey_262, makeKey_263, makeKey_264, makeKey_265, makeKey_266, makeKey_267, makeKey_268, makeKey_269, makeKey_270, makeKey_271, makeKey_272, makeKey_273, makeKey_274, makeKey_275, makeKey_276, makeKey_277, makeKey_278, makeKey_279, makeKey_280, makeKey_281, makeKey_282, makeKey_283, makeKey_284, makeKey_285, makeKey_286, makeKey_287, makeKey_288, makeKey_289, makeKey_290, makeKey_291, makeKey_292, makeKey_293, makeKey_294, makeKey_295, makeKey_296, makeKey_297, makeKey_298, makeKey_299, makeKey_300, makeKey_301, makeKey_302, makeKey_303, makeKey_304, makeKey_305, makeKey_306, makeKey_307, makeKey_308, makeKey_309, makeKey_310, makeKey_311, makeKey_312, makeKey_313, makeKey_314, makeKey_315, makeKey_316, makeKey_317, makeKey_318, makeKey_319, makeKey_320, makeKey_321, makeKey_322, makeKey_323, makeKey_324, makeKey_325, makeKey_326, makeKey_327, makeKey_328, makeKey_329, makeKey_330, makeKey_331, makeKey_332, makeKey_333, makeKey_334, makeKey_335, makeKey_336, makeKey_337, makeKey_338, makeKey_339, makeKey_340, makeKey_341, makeKey_342, makeKey_343, makeKey_344, makeKey_345, makeKey_346, makeKey_347, makeKey_348, makeKey_349, makeKey_350, makeKey_351, makeKey_352, makeKey_353, makeKey_354, makeKey_355, makeKey_356, makeKey_357, makeKey_358, makeKey_359, makeKey_360, makeKey_361, makeKey_362, makeKey_363, makeKey_364, makeKey_365, makeKey_366, makeKey_367, makeKey_368, makeKey_369, makeKey_370, makeKey_371, makeKey_372, makeKey_373, makeKey_374, makeKey_375, makeKey_376, makeKey_377, makeKey_378, makeKey_379, makeKey_380, makeKey_381, makeKey_382, makeKey_383, makeKey_384, makeKey_385, makeKey_386, makeKey_387, makeKey_388, makeKey_389, makeKey_390, makeKey_391, makeKey_392, makeKey_393, makeKey_394, makeKey_395, makeKey_396, makeKey_397, makeKey_398, makeKey_399]; var funIndex = strToLong(cookie) % arrFun.length; var fun = arrFun[funIndex]; var result = fun(cookie); return result } function Navi(id) { var unzipid = unzip(id); var realid = com.str.Decrypt(unzipid); return realid }
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值