[07]抓取简书|博客园|CSDN个人主页制作目录

抓取简书个人主页制作目录

本文代码运行环境pyhton2,代码注释的很详细,直接看代码即可。

#-*- coding:utf-8 -*-

import urllib2
from lxml import etree

class CrawlJs():
    #定义函数,爬取对应的数据
    def getArticle(self,url):
        print '█████████████◣开始爬取数据'
        my_headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
        }
        request = urllib2.Request(url,headers=my_headers)
        content = urllib2.urlopen(request).read()
        return content

    #定义函数,筛选和保存爬取到的数据
    def save(self,content):
        xml = etree.HTML(content)
        title = xml.xpath('//div[@class="content"]/a[@class="title"]/text()')
        link = xml.xpath('//div[@class="content"]/a[@class="title"]/@href')
        print link
        i=-1
        for data in title:
            print data
            i+=1
            with open('JsIndex.txt','a+') as f:
                f.write('['+data.encode('utf-8')+']'+'('+'http://www.jianshu.com'+link[i]+')'+ '\n')
        print '█████████████◣爬取完成!'

#定义主程序接口
if __name__ == '__main__':
    page = int(raw_input('请输入你要抓取的页码总数:'))
    for num in range(page):
        #这里输入个人主页,如:u/c475403112ce
        url = 'http://www.jianshu.com/u/c475403112ce?order_by=shared_at&page=%s'%(num+1)
        #调用上边的函数
        js = CrawlJs()
        #获取页面内容
        content = js.getArticle(url)
        #保存内容到文本中
        js.save(content)

运行结果

运行结果

简书目录实际效果

python3代码

#-*- coding:utf-8 -*-

import urllib.request
from lxml import etree

class CrawlJs():
    #定义函数,爬取对应的数据
    def getArticle(self,url):
        print ('█████████████◣开始爬取数据')
        my_headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
        }
        request = urllib.request.Request(url,headers=my_headers)
        content = urllib.request.urlopen(request).read()
        return content

    #定义函数,筛选和保存爬取到的数据
    def save(self,content):
        xml = etree.HTML(content)
        title = xml.xpath('//div[@class="content"]/a[@class="title"]/text()')
        link = xml.xpath('//div[@class="content"]/a[@class="title"]/@href')
        print (link)
        i=-1
        for data in title:
            print (data)
            i+=1
            with open('JsIndex.txt','a+') as f:
                f.write('['+data+']'+'('+'http://www.jianshu.com'+link[i]+')'+ '\n')
        print ('█████████████◣爬取完成!')

#定义主程序接口
if __name__ == '__main__':
    page = int(input('请输入你要抓取的页码总数:'))
    for num in range(page):
        #这里输入个人主页,如:u/c475403112ce
        url = 'http://www.jianshu.com/u/c475403112ce?order_by=shared_at&page=%s'%(num+1)
        js = CrawlJs()
        content = js.getArticle(url)
        js.save(content)

抓取博客园个人主页制作目录

python2代码

#-*- coding:utf-8 -*-

import urllib2
from lxml import etree

class CrawlJs():
    #定义函数,爬取对应的数据
    def getArticle(self,url):
        print '█████████████◣开始爬取数据'
        my_headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
        }
        request = urllib2.Request(url,headers=my_headers)
        content = urllib2.urlopen(request).read()
        return content

    #定义函数,筛选和保存爬取到的数据
    def save(self,content):
        xml = etree.HTML(content)
        title = xml.xpath('//*[@class="postTitle"]/a/text()')
        link = xml.xpath('//*[@class="postTitle"]/a/@href')
        print (title,link)
        # print(zip(title,link))
        # print(map(lambda x,y:[x,y], title,link))
        for t,li in zip(title,link):
            print(t+li)
            with open('bokeyuan.txt','a+') as f:
                f.write(t.encode('utf-8')+li+ '\n')
        print '█████████████◣爬取完成!'

#定义主程序接口
if __name__ == '__main__':
    page = int(raw_input('请输入你要抓取的页码总数:'))
    for num in range(page):
        #这里输入个人主页,
        url = 'http://www.cnblogs.com/zhouxinfei/default.html?page=%s'%(num+1)
        js = CrawlJs()
        content = js.getArticle(url)
        js.save(content)

python3代码

#-*- coding:utf-8 -*-

import urllib.request
from lxml import etree

class CrawlJs():
    #定义函数,爬取对应的数据
    def getArticle(self,url):
        print ('█████████████◣开始爬取数据')
        my_headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
        }
        request = urllib.request.Request(url,headers=my_headers)
        content = urllib.request.urlopen(request).read()
        return content

    #定义函数,筛选和保存爬取到的数据
    def save(self,content):
        xml = etree.HTML(content)
        title = xml.xpath('//*[@class="postTitle"]/a/text()')
        link = xml.xpath('//*[@class="postTitle"]/a/@href')
        print (title,link)
        # print(zip(title,link))
        # print(map(lambda x,y:[x,y], title,link))
        for t,li in zip(title,link):
            print(t+li)
            with open('bokeyuan.txt','a+') as f:
                f.write(t+'  '+li+ '\n')
        print('█████████████◣爬取完成!')

#定义主程序接口
if __name__ == '__main__':
    page = int(input('请输入你要抓取的页码总数:'))
    for num in range(page):
        #这里输入个人主页,
        url = 'http://www.cnblogs.com/zhouxinfei/default.html?page=%s'%(num+1)
        js = CrawlJs()
        content = js.getArticle(url)
        js.save(content)

CSDN个人目录制作

#-*- coding:utf-8 -*-

import urllib.request
from lxml import etree

class CrawlJs():
    #定义函数,爬取对应的数据
    def getArticle(self,url):
        print ('█████████████◣开始爬取数据')
        my_headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
        }
        request = urllib.request.Request(url,headers=my_headers)
        content = urllib.request.urlopen(request).read()
        return content

    #定义函数,筛选和保存爬取到的数据
    def save(self,content):
        xml = etree.HTML(content)
        title = xml.xpath('//div[@class="article-list"]/div/h4/a/text()[2]')
        link = xml.xpath('//div[@class="article-list"]/div/h4/a/@href')
        if title==None:
            return 
        # print(map(lambda x,y:[x,y], title,link))
        for t,li in zip(title,link):
            print(t+li)
            with open('csdn.txt','a+') as f:
                f.write(t.strip()+'  '+li+ '\n')
        print('█████████████◣爬取完成!')

#定义主程序接口
if __name__ == '__main__':
    page = int(input('请输入你要抓取的页码总数:'))
    for num in range(page):
        #这里输入个人主页,
        url = 'https://blog.csdn.net/xc_zhou/article/list/%s'%(num+1)
        js = CrawlJs()
        content = js.getArticle(url)
        js.save(content)

效果图

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

周小董

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值