from urllib.request import urlopen
from bs4 import BeautifulSoup
import datetime
import random
random.seed(datetime.datetime.now())
page = set()
def getLinks(articleUrl):
html = urlopen("http://www.baike.com"+articleUrl)
bsObj = BeautifulSoup(html, 'html.parser')
try:
print(bsObj.h1.get_text())
print(bsObj.find(id="anchor").findAll('p')[0].get_text())
except AttributeError:
print("Something Wrong!Don't worry!")
return bsObj.findAll("a", {'class':'innerlink'})
links = getLinks("/wiki/%E5%B0%8F%E6%A0%97%E6%97%AC&prd=so_1_doc")
while len(links) > 0:
newArticle = links[random.randint(0, len(links)-1)].attrs['href']
if newArticle not in page:
page.add(newArticle)
print("----------------------------------------\n"+newArticle)
links = getLinks(newArticle[20:])
随机获取互动百科从首页到最后一个页面的标题与第一段描述 书中第13行最后没有加.get_text(),这样输出的内容会加<p></p>
#所以我提取的是小栗旬的数据。。。。谁叫我最近入了栗子的坑