爬虫

import requests
import urllib.request
from bs4 import BeautifulSoup
import re
import sys
def saveFile(data):
    save_path='D://5550.txt'
    f_obj=open(save_path,'a')
    f_obj.write(data)
    f_obj.close()

for page in range(1,100):

    url="http://www.zhujiage.com.cn/article/showlist.php?tid=24&TotalResult=95997&PageNo={}".format(page)
    r=requests.get(url)
    r.raise_for_status()
    r.encoding=r.apparent_encoding
    demo=r.text
    soup=BeautifulSoup(demo,"html.parser")
#print(demo)
    for link in soup.find_all('li'):
        try:
            for link2 in link.find_all('div'):
            #print(link2)
                try:
                    for link3 in link2.find_all('a',href=re.compile('20')):
                        url2=link3.get('href')
                        #print(type(url2))

                        r2=requests.get(url2)
                        r2.raise_for_status()
                        r2.encoding=r2.apparent_encoding
                        demo2=r2.text
                        #print(demo2)

                        
                        soup2=BeautifulSoup(demo2,"html.parser")
                        '''for lin4 in soup2.find_all('p'):
                            try:
                                print(str(link.a.attrs['style']))
                            except:
                                print('啊啊')'''
                        #.find_all('p',style="text-align:center")
                        for link4 in soup2.find('div',id="content").find_all('p',style="text-align: center"):
                            try:
                                #print("")
                                print(link4.string)
                                saveFile(str(link4.string)+'\n')

                            except:
                                print("爬取失败")
                        for page2 in range(2,16):
                            try:
                                url3=url2.replace('.html','_{}.html').format(page2)
                                r3=requests.get(url3)
                                r3.raise_for_status()
                                r3.encoding=r3.apparent_encoding
                                demo3=r3.text
                                #print(demo3)
                                soup3=BeautifulSoup(demo3,"lxml")
                                #.find_all('p',style="text-align: center")
                                for link5 in soup3.find('div',id="content").find_all('p',style="text-align: center"):
                                    try:
                                        print(link5.string)
                                        saveFile(str(link5.string)+'\n')

                                    except:
                                        print("爬取失败")


                            except:
                                print("爬取失败")
                                
                except:
                    print("爬取失败")
        except:
            print("爬取失败")       





  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值