思路
- url,通过指定的url下载页面的源代码,request库
- 拿到源代码后,取获取包含我们抓取数据部分的标记内容
- 遍历这个list,
- 把解析之后的信息写入本地文件
- 段子的标题
- 段子的内容
import requests
import io
from bs4 import BeautifulSoup
url='http://duanziwang.com/category/duanzi/page/'
headers={'User Agent':'Mozilla/4.0 (compatible MSIE 6.0 Windows NT)'}
class PC():
def __init__(self,url,pageIndex):
self.url=url+str(pageIndex)
self.headers={'User Agent':'Mozilla/4.0 (compatible MSIE 6.0 Windows NT)'}
def get_one_page_html(self):
re=requests.get(self.url,self.headers)
print(re)
html=re.text
return html
def get_all_h2(self):
for i in range(1,2):
html=self.get_one_page_html()
soup=BeautifulSoup(html,'lxml')
all_h2=soup.find_all('h2')
return all_h2
def get_content(self):
all_a =[]
all_title=[]
all_h2 = self.get_all_h2()
for h2 in all_h2:
all_a.append(h2.find('a').get('href'))
all_title.append(h2.find('a').get('title'))
for (title,a) in zip(all_title,all_a):
with io.open("d:/python/pachong/joke.txt","a+",encoding="utf8") as f:
f.write('biaoti'+title[:-4]+'\n')
re=requests.get(a,headers=self.headers)
html=re.text
soup=BeautifulSoup(html,'lxml')
all_p=soup.find('div',class_='content').find_all('p')
for p in all_p:
with io.open("d:/python/pachong/joke.txt","a+",encoding="utf8") as f:
f.write(p.text + '\n')
if __name__ == '__main__':
url='http://duanziwang.com/category/duanzi/page/'
for i in range(1,4):
app = PC(url,i)
app.get_content()
注意
python 2环境