python爬虫得到谷歌学术搜索结果

python 爬虫实现

本文使用python3 实现从谷歌学术获得搜索结果

模拟浏览器发送请求

网络访问的模型使用请求应答的模型。客户端发送请求,浏览器相应请求。

使用chrome浏览器获得请求方式

在f12开发者模式下,查看请求头,发现是使用get方法。复制为url得到请求内容
为了模拟浏览器,所以使用headers。
在headers中可以将cookies删除,测试不影响

在python中实现

使用rullib中的模块

数据分析

使用正则表达式
分析html文件。通过正则表达式匹配

代码块

import urllib.parse
import urllib.request

import re


keyword=input("keywords is?\n")
print(keyword)

url='https://scholar.google.com/scholar?&hl=en&q='+keyword+'&btnG=&lr='
header_dict={'Host': 'scholar.google.com',
             'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0',
             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
             'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
             'Referer': 'https://scholar.google.com/schhp?hl=zh-CN',
             'Connection': 'keep-alive'}
req = urllib.request.Request(url=url,headers=header_dict)            
response = urllib.request.urlopen(req,timeout=120)
#print(f.read())
#with open('aaa.html', 'wb') as f:
#    f.write(response.read())

print("conneect succeed!")

'''data=response.read().decode('utf-8')
pattern = re.compile(r'<div class="gs_r"><div class="gs_ri"><h3.*?<a onclick',re.S)

for m in re.finditer(pattern,data):
    print (m.group())
'''
#print(response.read())
data=response.read()

data=data.decode()

pattern = re.compile(r'<div class="gs_ri">.*?</div></div></div>')

#print(data)
# 使用re.match匹配文本,获得匹配结果,无法匹配时将返回None
result1 = re.search(pattern,data)

'''
if result1:
    # 使用Match获得分组信息
    print (result1.group().encode('utf_8'))
else:
    print ('1匹配失败!')
 '''
m=re.findall(pattern,data)
print("data get")
print(len(m))


address = re.compile(r'<a href=".*?"')
author= re.compile(r'<div class="gs_a">.*?</div>')
abstruct=re.compile(r'<div class="gs_rs">.*?</div>')

for s in m:
    net=re.search(address,s)
    temp=net.group()
    print("url:")
    print(temp[9:-1])
    net=re.search(author,s)
    temp=net.group()
    a1 = re.compile(r'<a.*?>')
    print("author:")
    #replacedStr = re.sub("\d+", "222", inputStr)
    temp= re.sub(a1,'',temp)
    print(temp[18:-6])
    net=re.search(abstruct,s)
    if(net):
        print("abstruct:")
        temp=net.group()
        temp=temp.replace("<b>"," ").replace("<br>"," ").replace("</b>"," ")
        print(temp[19:-6])

    else:
        print("no abstrutct")
    print('')





url='https://scholar.google.com/scholar?start=20&hl=en&q='+keyword+'234&btnG=&lr='
header_dict={'Host': 'scholar.google.com',
             'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0',
             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
             'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
             'Referer': 'https://scholar.google.com/schhp?hl=zh-CN',
             'Connection': 'keep-alive'}
req = urllib.request.Request(url=url,headers=header_dict)            
response = urllib.request.urlopen(req,timeout=120)
#print(f.read())
#with open('aaa.html', 'wb') as f:
#    f.write(response.read())

print("conneect succeed!")

'''data=response.read().decode('utf-8')
pattern = re.compile(r'<div class="gs_r"><div class="gs_ri"><h3.*?<a onclick',re.S)

for m in re.finditer(pattern,data):
    print (m.group())
'''
#print(response.read())
data=response.read()

data=data.decode()

pattern = re.compile(r'<div class="gs_ri">.*?</div></div></div>')

#print(data)
# 使用re.match匹配文本,获得匹配结果,无法匹配时将返回None
result1 = re.search(pattern,data)

'''
if result1:
    # 使用Match获得分组信息
    print (result1.group().encode('utf_8'))
else:
    print ('1匹配失败!')
 '''
m=re.findall(pattern,data)
print("data get")
print(len(m))


address = re.compile(r'<a href=".*?"')
author= re.compile(r'<div class="gs_a">.*?</div>')
abstruct=re.compile(r'<div class="gs_rs">.*?</div>')

for s in m:
    net=re.search(address,s)
    temp=net.group()
    print("url:")
    print(temp[9:-1])



    net=re.search(author,s)
    temp=net.group()
    a1 = re.compile(r'<a.*?>')
    print("author:")
    #replacedStr = re.sub("\d+", "222", inputStr)
    temp= re.sub(a1,'',temp)
    print(temp[18:-6])




    net=re.search(abstruct,s)
    if(net):
        print("abstruct:")
        temp=net.group()
        temp=temp.replace("<b>"," ").replace("<br>"," ").replace("</b>"," ")
        print(temp[19:-6])

    else:
        print("no abstrutct")
    print('')




start=20
start+=10


url='https://scholar.google.com/scholar?start='+str(start)+'&hl=en&q='+keyword+'234&btnG=&lr='
header_dict={'Host': 'scholar.google.com',
             'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0',
             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
             'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
             'Referer': 'https://scholar.google.com/schhp?hl=zh-CN',
             'Connection': 'keep-alive'}
req = urllib.request.Request(url=url,headers=header_dict)            
response = urllib.request.urlopen(req,timeout=120)
#print(f.read())
#with open('aaa.html', 'wb') as f:
#    f.write(response.read())

print("conneect succeed!")

'''data=response.read().decode('utf-8')
pattern = re.compile(r'<div class="gs_r"><div class="gs_ri"><h3.*?<a onclick',re.S)

for m in re.finditer(pattern,data):
    print (m.group())
'''
#print(response.read())
data=response.read()

data=data.decode()

pattern = re.compile(r'<div class="gs_ri">.*?</div></div></div>')

#print(data)
# 使用re.match匹配文本,获得匹配结果,无法匹配时将返回None
result1 = re.search(pattern,data)

'''
if result1:
    # 使用Match获得分组信息
    print (result1.group().encode('utf_8'))
else:
    print ('1匹配失败!')
 '''
m=re.findall(pattern,data)
print("data get")
print(len(m))


address = re.compile(r'<a href=".*?"')
author= re.compile(r'<div class="gs_a">.*?</div>')
abstruct=re.compile(r'<div class="gs_rs">.*?</div>')

for s in m:
    net=re.search(address,s)
    temp=net.group()
    print("url:")
    print(temp[9:-1])



    net=re.search(author,s)
    temp=net.group()
    a1 = re.compile(r'<a.*?>')
    print("author:")
    #replacedStr = re.sub("\d+", "222", inputStr)
    temp= re.sub(a1,'',temp)
    print(temp[18:-6])




    net=re.search(abstruct,s)
    if(net):
        print("abstruct:")
        temp=net.group()
        temp=temp.replace("<b>"," ").replace("<br>"," ").replace("</b>"," ")
        print(temp[19:-6])

    else:
        print("no abstrutct")
    print('')


  • 5
    点赞
  • 52
    收藏
    觉得还不错? 一键收藏
  • 7
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 7
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值