python爬新闻并保存_记录爬取一个网站新闻的python脚本

下面是代码,关键部分都有注释。

# coding=utf-8

from lxml import etree

import requests

import re

import time

import xlwt

headers = {

'authority': 'www.xforange.com',

'method': 'GET',

'path': '/article/3304.html',

'scheme': 'https',

'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',

'accept-encoding': 'gzip, deflate, br',

'accept-language': 'zh-CN,zh;q=0.9',

'cache-control': 'max-age=0',

'sec-fetch-dest': 'document',

'sec-fetch-mode': 'navigate',

'sec-fetch-site': 'none',

'sec-fetch-user': '?1',

'upgrade-insecure-requests': '1',

'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'

}

headerss = {

'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',

'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',

}

url_1 = "https://www.xforange.com/news/index"

url_2 = ".html"

# 创建一个workbook 设置编码

workbook = xlwt.Workbook(encoding='utf-8')

# 创建一个worksheet

worksheet = workbook.add_sheet('My Worksheet', cell_overwrite_ok=True)

worksheet.write(0, 0, "标题")

worksheet.write(0, 1, "副标题")

worksheet.write(0, 2, "时间")

worksheet.write(0, 3, "来源")

worksheet.write(0, 4, "内容")

d=1

for i in range(1,8):

i = str(i)

url = url_1 + i + url_2

print(url)#输出列表的地址

html = requests.get(url, headers=headerss).text

harf = re.findall('https://www.xforange.com/news/[0-9]{4}.html', html)

for x in range(len(harf)):

if x<10:

url = harf[x]

time.sleep(1)

#print(url)

response = requests.get(url, headers=headers)

html = etree.HTML(response.text)

article = html.xpath('//div[contains(@class,"article_show")]')[0]

title = article.xpath('.//div[@class="left"]/h1/text()')[0]

Subtitle = article.xpath('.//div[@class="content"]/p[1]/text()')

date = article.xpath(

'.//div[@class="info"]/span[1]/text()')[0].split(':')[1]

source = article.xpath(

'.//div[@class="info"]/span[2]/text()')[0].split(':')[1]

#content1 = ''.join([text for text in article.xpath('.//div[@class="content"]//text()') if '\t' not in text])

a = response.text

content = re.search(

r'

.*?
', a).group(0)

# print(content)

# 写入excel

# 参数对应 行, 列, 值

print(Subtitle)

worksheet.write(d, 0, title)#标题

worksheet.write(d, 1, Subtitle)#副标题

worksheet.write(d, 2, date)#时间

worksheet.write(d, 3, "转自"+source)#来源

worksheet.write(d, 4, content)#内容:富文本

d+=1

# 保存

workbook.save('信丰脐橙果农网行业动态数据.xls')

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值