原生爬虫爬取数据存入数据库的简单过程

本文章实现用Python3 借助pymysql模块爬取斗鱼一些直播信息 并存入数据库中的简单操作

第一步:爬取网站的直播信息并处理

第二步:存入本地*.(txt)

第三步:存入数据库

import  re
from urllib import request
import pymysql

class Spider():
    url = 'https://www.huya.com/g/wzry'
    root_pattern = '<span class="txt">([\s\S]*?)</li>'
    name_pattern = '<i class="nick" title="([\s\S]*?)">'
    number_pattern = '<i class="js-num">([\s\S]*?)</i>'
    def _fetch_content(self):
        r = request.urlopen(Spider.url)
        htmls = r.read()
        htmls = str(htmls,encoding='utf-8')
        #htmls = r.read().decode('utf-8')
        return htmls
    def refine(self,anchors):
        l = lambda anchor:{'name':anchor['name'][0].strip(),'number':anchor['number'][0]}
        return map(l,anchors)
    def sort__seed(self,anchor):
        r = re.findall('\d*',anchor['number'])
        number = float(r[0])
        if '万' in anchor['number']:
               number *= 10000
        return  number
    def sort__rank(self,anchors):
        return sorted(anchors,key = self.sort__seed,reverse = True)
    def __analysis(self,htmls):
        root_html = re.findall(Spider.root_pattern,htmls)
        anchors = []
        for html in root_html:
            number = re.findall(Spider.number_pattern, html)
            name = re.findall(Spider.name_pattern,html)
            anchor = {'name':name,'number':number}
            anchors.append(anchor)
        return anchors
    def __show(self,anchors):
        for anchor in anchors:
            print('name : '+ anchor['name']+' number : '+anchor['number'])
    def __write(self,anchors):
        fr = open("d:\spider__write.txt", "w")
        fr.write('id+^+name+^+viewer__number\n')
        for anchor in anchors:
            fr.write(anchor['name']+'^'+anchor['number']+'\n')
        fr.close()
    def read__db(self,anchors):
        db = pymysql.connect("localhost", "root", "123456", "imooc")
        cursor = db.cursor()
        count = 0
        fr = open("d:\spider__write.txt", "r")
        try:
            for line in fr:
                count += 1
                if(count == 1):
                    continue
                line = line.strip().split('^')
                # cursor.execute("insert into rank__list(name,viewer__number) values(%s,%s)", [line[0],line[1]])
                cursor.execute("insert into rank__list(name,viewer__number) values('%s','%s')" %(line[0],line[1]))
                db.commit()
        except Exception as e:
            print(e)
            db.rollback()
        cursor.close()
        db.close()
        fr.close()
    def go(self):
        htmls =  self._fetch_content()
        anchors = self.__analysis(htmls)
        anchors = list(self.refine(anchors))
        anchors = self.sort__rank(anchors)
        # self.__show(anchors)
        self.__write(anchors)
        self.read__db(anchors)
spider = Spider()
spider.go()

 

  • 1
    点赞
  • 16
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值