爬取虎扑步行街的帖子并加入Mongo数据库

import requests
from Mongom import MongoAPI
from bs4 import BeautifulSoup

import datetime

def getpage(link):
    headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',}
    r=requests.get(link,headers=headers)
    html=r.content
    html=html.decode('utf-8')
    soup=BeautifulSoup(html,'lxml')
    return soup
def get_data(post_list):
    datalist=[]
    for each in post_list:

        title=each.find('div',class_='titlelink box').a.text.strip()#标题
        title_link=each.find('div',class_='titlelink box').a['href']
        title_link='https://bbs.hupu.com'+title_link  #标题链接
        author=each.find('div','author box').a.text.strip()#作者
        author_page=each.find('div','author box').a['href']#作者首页

        start_date=each.find('div','author box').contents[5].text.strip()#开始时间
        reply_view=each.find('span','ansour box').text.strip()
        reply=reply_view.split('/')[0].strip()#回复数
        view=reply_view.split('/')[1].strip()#浏览数
        reply_time=each.find('div','endreply box').a.text.strip()#最后回复时间,分情况处理,当天,当年,之前年
        if ':' in reply_time:
            date_time=str(datetime.date.today())+' '+reply_time
            date_time=datetime.datetime.strptime(date_time,'%Y-%m-%d %H:%M')
            print(date_time)
        elif reply_time.find('-')==4:
            date_time = datetime.datetime.strptime(reply_time, '%Y-%m-%d %H:%M').date()
        else:
            date_time = datetime.datetime.strptime('2019-'+reply_time, '%Y-%m-%d %H:%M').date()
        reply_user=each.find('div','endreply box').span.text.strip()#最后回复用户
        datalist.append([title,title_link,author,author_page,start_date,reply,view,reply_user,date_time])
    return datalist


def main():
    link='https://bbs.hupu.com/bxj'
    soup=getpage(link)
    post_list=soup.find('ul','for-list').find_all('li')
    datalist=get_data(post_list)
    for eachone in datalist:
        print(eachone)
    hupu_post=MongoAPI("localhost",27017,"hupu","post")#加入数据库
    for each in datalist:
        hupu_post.add(
            {"title":each[0],
             "post_link":each[1],
             "author":each[2],
             "author_page":each[3],
             "start_date":str(each[4]),
             "reply":each[5],
             "view":each[6],
             "last_reply":each[7],
             "last_reply_time":str(each[8]),
             }
        )

if __name__=='__main__':
    main()

对应的Mongo数据库操作类

from pymongo import MongoClient


class MongoAPI(object):
    def __init__(self, db_ip, db_port, db_name, table_name):
        self.db_ip = db_ip
        self.db_port = db_port
        self.db_name = db_name
        self.table_name = table_name
        self.conn = MongoClient(host=self.db_ip, port=self.db_port)
        self.db = self.conn[self.db_name]
        self.table = self.db[self.table_name]

    def get_one(self, query):
        return self.table.find_one(query, projection={"_id": False})

    def get_all(self, query):
        return self.table.find(query)

    def delete(self, query):
        return self.table.delete_many(query)

    def add(self, kv_dict):
        return self.table.insert(kv_dict)

    def check_exist(self, query):
        ret = self.table.find_one(query)
        return ret != None

    def update(self, query, kv_dict):
        self.table.update_one(query, {'$set': kv_dict
                                      }, upsert=True)

遍历多页

 

不过这里只能爬10页,可能是虎扑对这个有限制吧

import time

import bs4
import requests
from Mongom import MongoAPI
from bs4 import BeautifulSoup

import datetime


def getpage(link):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36', }
    r = requests.get(link, headers=headers)
    html = r.content
    html = html.decode('utf-8')
    soup = BeautifulSoup(html, 'lxml')
    return soup


def get_data(post_list):
    datalist = []
    for each in post_list:

        title = each.find('div', class_='titlelink box').a.text.strip()  # 标题
        title_link = each.find('div', class_='titlelink box').a['href']
        title_link = 'https://bbs.hupu.com' + title_link  # 标题链接
        author = each.find('div', 'author box').a.text.strip()  # 作者
        author_page = each.find('div', 'author box').a['href']  # 作者首页

        start_date = each.find('div', 'author box').contents[5].text.strip()  # 开始时间
        reply_view = each.find('span', 'ansour box').text.strip()
        reply = reply_view.split('/')[0].strip()  # 回复数
        view = reply_view.split('/')[1].strip()  # 浏览数
        reply_time = each.find('div', 'endreply box').a.text.strip()  # 最后回复时间,分情况处理,当天,当年,之前年
        if ':' in reply_time:
            date_time = str(datetime.date.today()) + ' ' + reply_time
            date_time = datetime.datetime.strptime(date_time, '%Y-%m-%d %H:%M')

        elif reply_time.find('-') == 4:
            date_time = datetime.datetime.strptime(reply_time, '%Y-%m-%d %H:%M').date()
        else:
            date_time = datetime.datetime.strptime('2019-' + reply_time, '%Y-%m-%d %H:%M').date()
        reply_user = each.find('div', 'endreply box').span.text.strip()  # 最后回复用户
        datalist.append([title, title_link, author, author_page, start_date, reply, view, reply_user, date_time])
    return datalist


def main():
    hupu_post = MongoAPI("localhost", 27017, "hupu", "post")  # 加入数据库
    for i in range(1, 100):
        link = 'https://bbs.hupu.com/bxj-' + str(i)
        soup = getpage(link)
        post = soup.find('ul', 'for-list')

        post_list=post.find_all('li')

        datalist = get_data(post_list)

        for each in datalist:
            hupu_post.update(
                {"post_link":each[1]},
                {"title": each[0],
                 "post_link": each[1],
                 "author": each[2],
                 "author_page": each[3],
                 "start_date": str(each[4]),
                 "reply": each[5],
                 "view": each[6],
                 "last_reply": each[7],
                 "last_reply_time": str(each[8]),
                 }
            )
        time.sleep(3)
        print("第%d页完成,休息3秒" % i)



if __name__ == '__main__':
    main()

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值