from bs4 import BeautifulSoup
import urllib.request
import re
import os,time
def getUrls(url):
urls = []
#url = 'http://www.ahzww.net/0/178/'
req = urllib.request.Request(url)
page = urllib.request.urlopen(req)
html = page.read()
soup = BeautifulSoup(html,'html.parser')
i = 0
for k in soup.find_all(href=re.compile('.html')):
#print('www.qu.la'+k['href'],k.get_text())
if i != 0:
urls.append('http://www.ahzww.net'+k['href'])
i = i+1
return urls
def getContent(url):
#url = 'http://www.ahzww.net/0/178/355185.html'
headers = ('User-Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11')
opener = urllib.request.build_opener()
opener.addheaders = [headers]
html = opener.open(url).read()
soup = BeautifulSoup(html,'html.parser')
content = soup.find('div',id='content')
title = soup.find('h1')
return title.get_text(),content.get_text()
if __name__ == '__main__':
urls = getUrls('http://www.ahzww.net/0/178/')
#print(urls)
fp = open("不负娇宠.txt","w")
for url in urls:
print(url)
title,content = getContent(url)
fp.write(title+"\n")
fp.write(content.replace(' ','\n')+"\n")
time.sleep(2)
fp.close()
print("Done")