python爬虫,爬取一系列新闻

这个作业的要求来自于:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE2/homework/2941

由于存在多次请求,所以稍微将请求封装如下

def tranfrom_dom_tree(url):
    '''
        将获取的html文本转化为dom树
    '''
    response = requests.get(url);
    response.encoding = "utf-8";
    return BeautifulSoup(response.text, "html.parser");

 

将具体新闻内容封装如下

class News(object):
    '''
        广商校园新闻数据模型
    '''
    def __init__(self, url):
        self._url = url;             #新闻网页地址
        self._dom_tree = tranfrom_dom_tree(url);
        self._show_infos = self._dom_tree.select(".show-info")[0].text.split();
        self._update_time = "";
        self._auditor = "";
        self._auothor = "";
        self._origin = "";

        for index, args in enumerate(self._show_infos):
            if args.startswith("发布时间"):
                self._update_time = args[5:] + " " + self._show_infos[index+1];
                continue;
            elif args.startswith("作者"):
                self._auothor = args[3:];
                continue;
            elif args.startswith("审核"):
                self._auditor = args[3:];
                continue;
            elif args.startswith("来源"):
                self._origin = args[3:];
                continue;

    @property
    def title(self):
        '''
            :return:  新闻标题
        '''
        return self._dom_tree.select(".show-title")[0].text;

    @property
    def auothor(self):
        '''
            :return:  新闻作者
        '''
        return self._auothor;

    @property
    def auditor(self):
        '''
            :return:  新闻审核
        '''
        return self._auditor;

    @property
    def origin(self):
        '''
            :return:  新闻发布单位
        '''
        return self._origin;

    @property
    def update_time(self):
        '''
            :return:  新闻最后更新时间
        '''
        return self._update_time;

    @update_time.setter
    def update_time(self, time):
        '''
            设置最后更新时间
            :param time:  时间
        '''
        self._update_time = time;

    @property
    def times(self):
        '''
            :return:  点击次数
        '''
        clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(self.news_id);
        response = requests.get(clickUrl);
        click = re.findall('(\d+)', response.text)[-1];
        return click;

    @property
    def news_id(self):
        '''
            :return:  新闻标识
        '''
        time = datetime.strptime(self._update_time, '%Y-%m-%d %H:%M:%S');
        time = time.strftime("%m%d");
        return re.match('http://news.gzcc.cn/html/.*/.*/(\d+).html', self._url).group(1);

    @property
    def summary(self):
        '''
            :return:  新闻摘要内容
        '''
        return self._summary;

    @summary.setter
    def summary(self, text):
        '''
            设置新闻摘要
            :param text:  新闻摘要
        '''
        self._summary = text;

    def to_dict(self):
        '''
            将此类实例转换为字典
            :return: 转换后的字典
        '''
        dict = {};
        dict["news_url"] = self._url;
        dict["news_id"] = self.news_id
        dict["news_title"] = self.title;
        dict["news_summary"] = self.summary;
        dict["news_update_time"] = self.update_time;
        dict["news_times"] = self.times;
        dict["news_auothor"] = self.auothor;
        dict["news_auditor"] = self.auditor;
        dict["news_origin"] = self.origin;
        return dict;

 

  

对新闻进行批操作代码如下

class GZCCNewsReptile(object):
    '''
        广州商学院校园新闻获取工具
    '''
    def __init__(self):
        self._news_type = "dict";
        self._root_url = "http://news.gzcc.cn/html/xiaoyuanxinwen/";
        self._url = self.page_url();
        self._dom_tree = tranfrom_dom_tree(self._url);

    def page_url(self, page=1):
        '''
            将指定页面解析数值解析对应url
            :param page:  指定的一个新页面
        '''
        if page == 1:
            self._now_page = "index";
        else:
            self._now_page = page;
        return self._root_url+str(self._now_page)+".html";

    @property
    def count(self):
        '''
            :return:  返回校园新闻总条数
        '''
        count = self._dom_tree.select(".a1")[0].text;
        return int(count[0:-1]);

    @property
    def page(self):
        '''
            :return:  获取校园新闻总页数
        '''
        page = int(self.count) / 10;
        int_page = int(page);
        if page > int_page:
            return ( int_page + 1);
        else:
            return int_page;

    def get_news_from_news_page_size(self, start_page, end_page):
        '''
            设置爬取页数范围,
            start_page 小于 1 抛出    异常
            start_page 大于 end_page 抛出 异常
            end_page 大于 总页数 抛出 异常
            :param start_page:  要爬取范围的开始页
            :param end_page:    结束爬取范围的页面(不包括该页面)
        '''
        if start_page < 1:
            raise IndexError("start_page不在指定范围内");
        if start_page > end_page:
            raise IndexError("start_page大于end_page");
        if end_page > self.page:
            raise IndexError("end_page不在指定范围内");
        news_list = [];
        times = (index for index in range(start_page, end_page));
        #  爬取指定范围数据
        for index in times:
            #  news_page_list = self.get_news_from_page_url(self.page_url(index));
            news_page_list = self.get_page_news(index);  #  较上句更利于封装
            news_list.append(news_page_list);
        news_list = sum(news_list, []);
        return news_list;

    def get_page_news(self, page):
        '''
            获取指定页数
            若指定的页数在可爬取页数的范围之外,则抛出运行异常异常
            :param pages:  指定的页数
            :return: 返回指定页的新闻列表
        '''
        if page < 1 or page > self.count:
            raise IndexError("page不在指定范围内");
        else:
            print("\r当前正在%d页" % page, end="");
            return self.get_news_from_page_url(self.page_url(page));

    def get_news_from_page_url(self, url):
        '''
            获取指定url的所有新闻列表
            :param pages:  指定的页面url
            :return:  news_page_list的列表
        '''
        dom_tree = tranfrom_dom_tree(url);
        news_ui = dom_tree.select(".news-list li a");
        news_page_list = [];
        for index in range(0, 10):
            try:
                a_tag = news_ui[index];
                href = a_tag.get("href");

                news = News(href);
                news.summary = a_tag.select(".news-list-description")[0].text;
                news_page_list.append(eval("news.to_"+self.news_type+"()"));
            except Exception:
                error_log = "此页面不正常: %s"%href;
                print("此页面不正常: %s"%href, end="");
                with open("./logger.txt", "a", encoding="utf-8") as file:
                    now_time = time.time();
                    file.write(error_log + " 错误时间:" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(now_time)) );

        return news_page_list;

    @property
    def news_type(self):
        '''
            新闻默认类型设置
            :return: 返回默认类型
        '''
        return self._news_type;
    @news_type.setter
    def news_type(self, type="dict"):
        '''
            新闻可选类型
            :param type:  选择的类型
        '''
        if type == "dict":
            self._news_type = type;
        else :
            raise Exception("未能匹配该类型");

测试代码如下

    #  测试GZCCNewsReptil可用性
    start_page = 106
    # news_list = GZCCNewsReptile().get_news_from_news_page_size(start_page,start_page+10);
    news_list = GZCCNewsReptile().get_news_from_news_page_size(1,  256);
    pandas_date = pandas.DataFrame(news_list);
    print(news_list);
    # #csv
    pandas_date.to_csv(".\pandas_date.csv", encoding="utf-8_sig");
    # #sql
    # with sqlite3.connect("test.sqlite") as db:
    #     pandas_date.to_sql("test", db);
    #     date = pandas_date.read_sql_query('SELECT * FROM gzccnewsdb5', con=db)
    #     date[date["news_times"]>380];

 

转载于:https://www.cnblogs.com/destinymingyun/p/10674744.html

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值