爬取百度百科词条并存入mysql
目标是从一个百度百科链接进去,从当前页面寻找所有内链(跳转到当前网站的其他词条),随机挑选一个进入,并重复上述循环
在每个页面里只爬 h1 标题和下面的一段简介
准备工作:
- 数据库需要三个字段,id,标题,内容
- 数据库一定要在建立的时候加入 character set utf8 ,不然会引发好多错误
开始爬!!
先找到当前页面的所有内链(找规律),是\item\开头的,所以利用正则表达式刷刷刷,之后利用beatuiful很容易分析到页面的简介部分,存入库就好啦。但是没有加入多线程,速度不是很快。
#-*- coding:utf-8 -*-
'''Zheng 's BUG'''
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.error import HTTPError
import random
import re
from urllib.parse import quote
import string
import pymysql
baseUrl = "https://baike.baidu.com"
def getLinks(link):
try:
link = quote(link, safe= string.printable) ##读取中英混编的url
page = urlopen(baseUrl + link).read().decode("utf-8")
except HTTPError as e:
return "https://baike.baidu.com"
bsObj = BeautifulSoup(page, "html.parser")
return bsObj.findAll("a", href=re.compile("^(/item/)((?!:).)*$")) ##内层嵌套找不到???????
def getSum(newPage):
try:
newPage = quote(newPage, safe= string.printable)
newPage = urlopen(baseUrl + newPage).read().decode("utf-8")
except HTTPError as e:
return
bsObj = BeautifulSoup(newPage, "html.parser")
#print(bsObj.body.h1.get_text())
title = bsObj.body.h1.get_text()
content = ""
if bsObj.find("div", {"class": "lemma-summary"}) is not None:
for cont in bsObj.find("div", {"class":"lemma-summary"}).findAll("div", {"class":"para"}):
if cont is not None:
#print(con.get_text())
content += cont.get_text()
store(title, content)
def store(title, content):
cur.execute("insert into pages (title, summary) values (\"%s\",\"%s\")", (title, content))
cur.connection.commit()
link = getLinks("/item/%E7%99%BE%E5%BA%A6%E7%99%BE%E7%A7%91/85895?fr=aladdin")
conn = pymysql.connect(host='127.0.0.1', port=3306,
user='root', passwd='password', db='mysql', charset="utf8")
cur = conn.cursor()
cur.execute("use baidu")
while(len(link) > 0):
newPage = link[random.randint(0, len(link)-1)].attrs['href']
print(newPage)
getSum(newPage)
link = getLinks(newPage)
cur.close()
conn.close()