这个爬虫只限于搜狗浏览器
#这个直接输关键词就可以爬取搜狗的资料
def gaojipachong():
import requests, urllib
from bs4 import BeautifulSoup
from urllib.request import urlopen
# if__name__=='__main__'
headers = {
'user-agent': 'Mzilla/5.0'
}
url = 'http://www.sogou.com/web'
kw = input('input the word:')
param = {
'query': kw
}
response = requests.get(url=url, params=param, headers=headers)
html = response.text
# html = urlopen(k)
bs = BeautifulSoup(html, 'html.parser') # 解析网页
hyperlink = bs.find_all('a') # 获取所有超链接
for h in hyperlink:
hh = h.get('href')
# print(hh)
try:
html = urlopen(str(hh))
bs = BeautifulSoup(html, 'html.parser') # 解析网页
hyperlink = bs.find_all('title')
print(hyperlink[0], ':', hh)
except ValueError:
print('')
except Exception:
print('')
# 没有预先判断到的错误怎么办?
# ZeroDivisionError
finally:
# 无论是否有异常,都会执行的代码
print('')
gaojipachong()