爬虫

import requests
import urllib.request
from bs4 import BeautifulSoup
import re
import sys
def saveFile(data):
    save_path='D://5550.txt'
    f_obj=open(save_path,'a')
    f_obj.write(data)
    f_obj.close()

for page in range(1,100):

    url="http://www.zhujiage.com.cn/article/showlist.php?tid=24&TotalResult=95997&PageNo={}".format(page)
    r=requests.get(url)
    r.raise_for_status()
    r.encoding=r.apparent_encoding
    demo=r.text
    soup=BeautifulSoup(demo,"html.parser")
#print(demo)
    for link in soup.find_all('li'):
        try:
            for link2 in link.find_all('div'):
            #print(link2)
                try:
                    for link3 in link2.find_all('a',href=re.compile('20')):
                        url2=link3.get('href')
                        #print(type(url2))

                        r2=requests.get(url2)
                        r2.raise_for_status()
                        r2.encoding=r2.apparent_encoding
                        demo2=r2.text
                        #print(demo2)

                        
                        soup2=BeautifulSoup(demo2,"html.parser")
                        '''for lin4 in soup2.find_all('p'):
                            try:
                                print(str(link.a.attrs['style']))
                            except:
                                print('啊啊')'''
                        #.find_all('p',style="text-align:center")
                        for link4 in soup2.find('div',id="content").find_all('p',style="text-align: center"):
                            try:
                                #print("")
                                print(link4.string)
                                saveFile(str(link4.string)+'\n')

                            except:
                                print("爬取失败")
                        for page2 in range(2,16):
                            try:
                                url3=url2.replace('.html','_{}.html').format(page2)
                                r3=requests.get(url3)
                                r3.raise_for_status()
                                r3.encoding=r3.apparent_encoding
                                demo3=r3.text
                                #print(demo3)
                                soup3=BeautifulSoup(demo3,"lxml")
                                #.find_all('p',style="text-align: center")
                                for link5 in soup3.find('div',id="content").find_all('p',style="text-align: center"):
                                    try:
                                        print(link5.string)
                                        saveFile(str(link5.string)+'\n')

                                    except:
                                        print("爬取失败")


                            except:
                                print("爬取失败")
                                
                except:
                    print("爬取失败")
        except:
            print("爬取失败")       





阅读更多
想对作者说点什么?

博主推荐

换一批

没有更多推荐了,返回首页