python抓取百度搜索列表的实际网址和网站标题

# coding=utf8
import sys
import random
import string
import urllib
import urllib2
import re
#设置多个user_agents,防止百度限制IP
user_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0', \
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0', \
    'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533+ \
    (KHTML, like Gecko) Element Browser 5.0', \
    'IBM WebExplorer /v0.94', 'Galaxy/1.0 [en] (Mac OS X 10.5.6; U; en)', \
    'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', \
    'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14', \
    'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) \
    Version/6.0 Mobile/10A5355d Safari/8536.25', \
    'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) \
    Chrome/28.0.1468.0 Safari/537.36', \
    'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)']
def baidu_search(keyword,pn):
  p= {'wd': keyword}
  url = ("http://www.baidu.com/s?"+urllib.urlencode(p)+"&pn={0}").format(pn)
  res=urllib2.urlopen(url)
  html=res.read()
  return html
def getList(regex,text):
  arr = []
  res = re.findall(regex, text)
  if res:
    for r in res:
      arr.append(r)
  return arr
def getMatch(regex,text):
  res = re.findall(regex, text)
  if res:
    return res[0]
  return ""
def clearTag(text):
  p = re.compile(u'<[^>]+>')
  retval = p.sub("",text)
  return retval
def geturl(keyword):
  for page in range(2):
    pn=page*10+1
    html = baidu_search(keyword,pn)
    content = unicode(html, 'utf-8','ignore')
    arrList = getList(u"<div class=\"result c-container \"[\s\S]*?>([\s\S]*?</a>)", content)
    for item in arrList:
      regex = u"<h3.*?class=\".*\".*?><a[\s\S]*?href = \"([\s\S]*?)\"[\s\S]*?>([\s\S]*?)</a>"
      link = getMatch(regex,item)
      url = link[0]
      #获取标题
      title = clearTag(link[1]).encode('utf8')
      print title
      try:
        domain=urllib2.Request(url)
        r=random.randint(0,11)
        domain.add_header('User-agent', user_agents[r])
        domain.add_header('connection','keep-alive')
        response=urllib2.urlopen(domain)
        uri=response.geturl()
        print uri
      except:
        continue
if __name__=='__main__':

  geturl(sys.argv[1])

 

用法举例: python xxx.py "关联分析"

效果如图:

参考:

https://www.2cto.com/kf/201306/223584.html  代码大体没变,我对原代码的抽取标签内容的部分有修改。

 

表格内容抽取:

https://pypi.org/project/scrapely/

http://item.jd.com/26793464002.html

https://www.jianshu.com/p/136a714d5382

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值