爬虫

import requests
import urllib.request
from bs4 import BeautifulSoup
import re
import sys
def saveFile(data):
    save_path='D://5550.txt'
    f_obj=open(save_path,'a')
    f_obj.write(data)
    f_obj.close()

for page in range(1,100):

    url="http://www.zhujiage.com.cn/article/showlist.php?tid=24&TotalResult=95997&PageNo={}".format(page)
    r=requests.get(url)
    r.raise_for_status()
    r.encoding=r.apparent_encoding
    demo=r.text
    soup=BeautifulSoup(demo,"html.parser")
#print(demo)
    for link in soup.find_all('li'):
        try:
            for link2 in link.find_all('div'):
            #print(link2)
                try:
                    for link3 in link2.find_all('a',href=re.compile('20')):
                        url2=link3.get('href')
                        #print(type(url2))

                        r2=requests.get(url2)
                        r2.raise_for_status()
                        r2.encoding=r2.apparent_encoding
                        demo2=r2.text
                        #print(demo2)

                        
                        soup2=BeautifulSoup(demo2,"html.parser")
                        '''for lin4 in soup2.find_all('p'):
                            try:
                                print(str(link.a.attrs['style']))
                            except:
                                print('啊啊')'''
                        #.find_all('p',style="text-align:center")
                        for link4 in soup2.find('div',id="content").find_all('p',style="text-align: center"):
                            try:
                                #print("")
                                print(link4.string)
                                saveFile(str(link4.string)+'\n')

                            except:
                                print("爬取失败")
                        for page2 in range(2,16):
                            try:
                                url3=url2.replace('.html','_{}.html').format(page2)
                                r3=requests.get(url3)
                                r3.raise_for_status()
                                r3.encoding=r3.apparent_encoding
                                demo3=r3.text
                                #print(demo3)
                                soup3=BeautifulSoup(demo3,"lxml")
                                #.find_all('p',style="text-align: center")
                                for link5 in soup3.find('div',id="content").find_all('p',style="text-align: center"):
                                    try:
                                        print(link5.string)
                                        saveFile(str(link5.string)+'\n')

                                    except:
                                        print("爬取失败")


                            except:
                                print("爬取失败")
                                
                except:
                    print("爬取失败")
        except:
            print("爬取失败")       





阅读更多
想对作者说点什么? 我来说一句

java爬虫爬虫爬虫

2010年06月08日 12.8MB 下载

java爬虫 获得源码程序

2010年12月26日 2KB 下载

好用的爬虫好用的爬虫

2011年03月25日 132KB 下载

java爬虫漫爬,微博爬虫

2018年05月21日 576.14MB 下载

ZhiHu-master.zip

2017年09月07日 697KB 下载

爬虫依赖包

2018年02月06日 12.36MB 下载

scrapy-1.4.pdf

2017年11月22日 1.15MB 下载

基于VC的爬虫(网络蜘蛛)源码

2009年02月24日 1.52MB 下载

没有更多推荐了,返回首页

加入CSDN,享受更精准的内容推荐,与500万程序员共同成长!
关闭
关闭