使用Python爬取知网信息

该博客介绍了如何使用Python爬虫从知网获取特定年份(1998-2022)的科研论文信息,包括题目、作者、单位、所属期刊、发表时间、下载数、摘要、关键词、专题和分类号。爬虫通过模拟POST请求,解析HTML页面,提取所需数据并保存到CSV文件中。内容涉及网页解析库如lxml和正则表达式,以及使用requests库进行网络请求。
摘要由CSDN通过智能技术生成

使用Python爬取知网信息

import requests
from urllib import request
from lxml import etree
import re
import csv, time
from w3lib.html import remove_tags


def write_data(name):
    #timenow = time.strftime("%Y-%m-%d-%H%M%S", time.localtime())
    file = name
    with open(file, 'a+', encoding='utf-8-sig', newline='') as f:
        writer_f = csv.writer(f)
        writer_f.writerow(['题目','作者','单位','所属期刊','发表时间','下载数','摘要','关键词','专题','分类号'])
    return file
def get_one(year):
    data = {
        'IsSearch': 'true',
        #CO33_1,_2
        'QueryJson': '{"Platform":"","DBCode":"CJFQ","KuaKuCode":"","QNode":{"QGroup":[{"Key":"Subject","Title":"","Logic":4,"Items":[],"ChildItems":[]},{"Key":"ControlGroup","Title":"","Logic":1,"Items":[],"ChildItems":[{"Key":".extend-tit-checklist","Title":"","Logic":1,"Items":[{"Key":0,"Title":"SCI","Logic":2,"Name":"SI","Operate":"=","Value":"Y","ExtendType":14,"ExtendValue":"","Value2":"","BlurType":""},{"Key":0,"Title":"EI","Logic":2,"Name":"EI","Operate":"=","Value":"Y","ExtendType":14,"ExtendValue":"","Value2":"","BlurType":""},{"Key":0,"Title":"北大核心","Logic":2,"Name":"HX","Operate":"=","Value":"Y","ExtendType":14,"ExtendValue":"","Value2":"","BlurType":""},{"Key":0,"Title":"CSSCI","Logic":2,"Name":"CSI","Operate":"=","Value":"Y","ExtendType":14,"ExtendValue":"","Value2":"","BlurType":""},{"Key":0,"Title":"CSCD","Logic":2,"Name":"CSD","Operate":"=","Value":"Y","ExtendType":14,"ExtendValue":"","Value2":"","BlurType":""}],"ChildItems":[]}]},{"Key":"NaviParam","Title":"","Logic":1,"Items":[{"Key":"navi","Title":"","Logic":1,"Name":"专题子栏目代码","Operate":"=","Value":"C033_8?","ExtendType":13,"ExtendValue":"","Value2":"","BlurType":""}],"ChildItems":[]},{"Key":"MutiGroup","Title":"","Logic":1,"Items":[],"ChildItems":[{"Key":"3","Title":"","Logic":1,"Items":[{"Key":"'+year+'","Title":"'+year+'","Logic":2,"Name":"年","Operate":"","Value":"'+year+'","ExtendType":0,"ExtendValue":"","Value2":"","BlurType":""}],"ChildItems":[]}]}]}}',
        'PageName': 'AdvSearch',
        'DBCode': 'CJFQ',
        # 'KuaKuCodes': '',
        'CurPage': '1'
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.70',
        'Referer': 'https://kns.cnki.net/KNS8/AdvSearch?dbcode=CJFQ',
        #'Cookie' : 'Ecp_ClientId=2210720103601729746; Ecp_IpLoginFail=210720106.117.97.127; cnkiUserKey=3c237cd1-59ab-91cb-22ce-bbfc3ad33332; Ecp_ClientIp=106.117.97.127; RsPerPage=20; ASP.NET_SessionId=fkwgrvclzgkpqsagarxa5mdb; SID_kcms=124120; _pk_ref=["","",1626830042,"https://www.cnki.net/"]; _pk_ses=*; SID_kns_new=kns123112; SID_kns8=123122; CurrSortField=发表时间/(发表时间,'TIME')+desc; CurrSortFieldType=desc; _pk_id=2de25f01-dfc6-40ba-888f-beabd2c0efb8.1626748599.8.1626830093.1626830042.'
    }
    url = 'https://kns.cnki.net/KNS8/Brief/GetGridTableHtml'
    response = requests.post(url,headers=headers,data=data).text
    #print(response)
    pattern = re.compile(
        '<div class=\'pages\'> <span class="total">共(.*?)页</span>.*?</div>',
        re.S
    )
    item = re.findall(pattern,response)
    return item[0]

def get_two(year,i,file):
    data = {
        'IsSearch': 'true',
        'QueryJson': '{"Platform":"","DBCode":"CJFQ","KuaKuCode":"","QNode":{"QGroup":[{"Key":"Subject","Title":"","Logic":4,"Items":[],"ChildItems":[]},{"Key":"ControlGroup","Title":"","Logic":1,"Items":[],"ChildItems":[{"Key":".extend-tit-checklist","Title":"","Logic":1,"Items":[{"Key":0,"Title":"SCI","Logic":2,"Name":"SI","Operate":"=","Value":"Y","ExtendType":14,"ExtendValue":"","Value2":"","BlurType":""},{"Key":0,"Title":"EI","Logic":2,"Name":"EI","Operate":"=","Value":"Y","ExtendType":14,"ExtendValue":"","Value2":"","BlurType":""},{"Key":0,"Title":"北大核心","Logic":2,"Name":"HX","Operate":"=","Value":"Y","ExtendType":14,"ExtendValue":"","Value2":"","BlurType":""},{"Key":0,"Title":"CSSCI","Logic":2,"Name":"CSI","Operate":"=","Value":"Y","ExtendType":14,"ExtendValue":"","Value2":"","BlurType":""},{"Key":0,"Title":"CSCD","Logic":2,"Name":"CSD","Operate":"=","Value":"Y","ExtendType":14,"ExtendValue":"","Value2":"","BlurType":""}],"ChildItems":[]}]},{"Key":"NaviParam","Title":"","Logic":1,"Items":[{"Key":"navi","Title":"","Logic":1,"Name":"专题子栏目代码","Operate":"=","Value":"C033_8?","ExtendType":13,"ExtendValue":"","Value2":"","BlurType":""}],"ChildItems":[]},{"Key":"MutiGroup","Title":"","Logic":1,"Items":[],"ChildItems":[{"Key":"3","Title":"","Logic":1,"Items":[{"Key":"'+year+'","Title":"'+year+'","Logic":2,"Name":"年","Operate":"","Value":"'+year+'","ExtendType":0,"ExtendValue":"","Value2":"","BlurType":""}],"ChildItems":[]}]}]}}',
        'PageName': 'AdvSearch',
        'DBCode': 'CJFQ',
        #'KuaKuCodes': '',
        'CurPage': i
    }
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.70',
        'Referer': 'https://kns.cnki.net/KNS8/AdvSearch?dbcode=CJFQ',
        # 'Cookie' : ''
    }
    url = 'https://kns.cnki.net/KNS8/Brief/GetGridTableHtml'

    response = requests.post(url, headers=headers, data=data).text
    #print(response)
    pattern = re.compile(
        '<tr.*?<td class="name">.*?<a class="fz14" href=.*?&DbCode=(.*?)&dbname=(.*?)&filename=(.*?)&urlid.*?>.*?</a>.*?</td>.*?<td class="source">(.*?)</td>.*?<td class="date">(.*?)</td>.*?<td class="download">(.*?)</td>.*?</tr>',
        re.S
    )
    items = re.findall(pattern,response)
    for item in items:
        key_words = ' '
        dbcode= item[0]
        dbname = item[1]
        filename = item[2]
        periodical = remove_tags(item[3]).strip() # 所属期刊
        date = str(item[4]).strip()  #发表日期
        download = remove_tags(item[5]).strip()  #下载数量
        if len(download) == 0 :
            download = 0
        #print(date,download)
        url_2 = 'https://kns.cnki.net/kcms/detail/detail.aspx?dbcode=' + dbcode + '&dbname=' + dbname + '&filename=' + filename
        print(url_2)
        # print(subject)
        response_2 = requests.get(url_2)
        text = response_2.content.decode('utf-8')
        pattern_2 = re.compile(
            '<div class="brief">.*?<div class="wx-tit">.*?<h1>(.*?)</h1>.*?<h3.*?>(.*?)</h3>.*?<h3.*?>(.*?)</h3>.*?</div>.*?</div>',
            re.S
        )
        items_2 = re.findall(pattern_2, text)
        for item_2 in items_2:
            subject = remove_tags(item_2[0])  #题目
            author = remove_tags(item_2[1].replace('</span>','|')).replace('1','').replace('2','').replace('3','').replace('4','').replace('5','').replace('6','').replace('7','').replace('8','').replace('','').replace(",",'')
            print(author)
            company = remove_tags(item_2[2].replace('</span>','|')).replace('&nbsp;','').replace('1. ','').replace('2. ','|').replace('3. ','|').replace('4. ','|').replace('5. ','|').replace('6. ','|')#单位
        pattern_3 = re.compile(
            '<div class="row"><span class="rowtit">.*?class="abstract-text">(.*?)</span>.*?</script>',
            re.S
        )
        items_3 = re.findall(pattern_3,text)
        if len(items_3)==0:
            abstract = ' '
        else:
            abstract = items_3[0].replace('&lt;正&gt;','')

        html = etree.HTML(text)
        kkk = html.xpath("//p[@class='keywords']")  #关键词
        ggg = html.xpath("//div[@class='row']/ul/li")
        if len(ggg) == 0:
            classification_Number = ' '
            special = ' '
        else:
            classification_Number = str(ggg[-1].xpath("./p/text()")).replace('\'','').replace('[','').replace(']','').replace(';',' ')#分类号
            special = str(ggg[-2].xpath("./p/text()")).replace('\'','').replace('[','').replace(']','').replace(';',' ') #专题
        if len(kkk) == 0:
            with open(file, 'a+', encoding='utf-8-sig', newline='') as f:
                writer = csv.writer(f)
                writer.writerow([subject,author,company,periodical,date,download,abstract,key_words,special,classification_Number])
        else:
            key_words = str(kkk[0].xpath("./a/text()"))
            key_words = key_words.replace(' ','').replace('\'','').replace(';','').replace('[','').replace(']','').replace('\\','').replace('rn','').replace(',',' ')
            with open(file, 'a+', encoding='utf-8-sig', newline='') as f:
                writer = csv.writer(f)
                writer.writerow([subject,author,company,periodical,date,download,abstract,key_words,special,classification_Number])
name = '铁路运输管理工程.csv'
file = write_data(name)
for year in range(1998,2022):
    year=str(year)
    number = int(get_one(year))
    for i in range(1,number+1):
        get_two(year,i,file)
    time.sleep(1)
print('下载完成')
# year = '2021' 8051
# file = write_data(year)
# number = get_one(year)
# number = int(number)
# for i in range(number):
#     get_two(i,file)
#     time.sleep(1)
# print('下载完成')


#改进:1.按年份自动搜索
#     2.年份进入之后自动统计总页数 然后按页数进行爬取




评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

孤落时辰·

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值