from os import write
import os
import requests
import json
from time import sleep
import time
from lxml import etree
import csv
from tqdm import tqdm
class WangYi():
def __init__(self):
self.headers = {
"accept": "application/json, text/plain, */*",
# "cookie": "P_OINFO=\"ogm9zs_mt438bf4bbebce2696b575da23e27ef97b1@wx.163.com|1638333043|0|newsapp_weapp|00&99|null#0|null|newsapp_weapp|ogm9zs_mt438bf4bbebce2696b575da23e27ef97b1@wx.163.com\"",
"user-agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36 MicroMessenger/7.0.9.501 NetType/WIFI MiniProgramEnv/Windows WindowsWechat",
# "user-d": "MTYzODMyNjIzMDIyNTAuNjgzNzc4NjU5NDIwMTA4NQ==",
# "user-did": "MTYzODMyNjIzMDIyNTAuNjgzNzc4NjU5NDIwMTA4NQ==",
"x-requested-with": "XMLHttpRequest",
"content-type": "application/json",
"referer": "https://servicewechat.com/wx6fb15d8da981e528/53/page-frame.html",
"accept-encoding": "gzip, deflate, br"
}
self.params = {
"fn": 1,
"from": "T1348649079062",
"offset": 10,
"ts": 1638334391922,
"size": 10,
"req_platform": "xiaochengxu",
# "devId": "MTYzODMyNjIzMDIyNTAuNjgzNzc4NjU5NDIwMTA4NQ=="
}
self.url = 'https://gw.m.163.com/nc/api/v1/feed/dynamic/normal-list'
def data_get(self):
for num in tqdm(range(10,10000)):
self.params['offset']=num
resp = requests.get(self.url,headers=self.headers,params=self.params).json()
items = resp['data']['items']
for i in tqdm(items):
a = {}
a['title'] = i['title']
a['id'] = i['docid']
# print(a['title'])
with open('1.txt','a',encoding='utf-8')as f:
f.write(str(a)+'\n')
self.detail(a)
# return
def detail(self,a):
title = a['title']
id = a['id']
url = 'https://c.m.163.com/nc/article/{0}/full.html'.format(id)
try:
resp = requests.get(url,headers=self.headers).json()
except:
return
title = resp[id]['title']
body = self.body_get(resp[id]['body'])
ptime = resp[id]['ptime']
source = '网易 '+resp[id]['category']
# print(body)
self.save(url,title,body,ptime,source)
def body_get(self,body):
tree = etree.HTML(body)
txt = ''.join(tree.xpath('//text()'))
return txt
def save(self,url,title,body,ptime,source):
with open('2.txt','a',encoding='utf-8')as f:
f.write(url+'\n')
with open('2.txt','a',encoding='utf-8')as f:
f.write(title+'\n')
with open('2.txt','a',encoding='utf-8')as f:
f.write(body+'\n')
with open('2.txt','a',encoding='utf-8')as f:
f.write(ptime+'\n')
with open('2.txt','a',encoding='utf-8')as f:
f.write(source+'\n')
#https://gw.m.163.com/nc/api/v1/feed/dynamic/normal-list?fn=2&from=T1348649079062&offset=20&ts=1638340831232&size=10&req_platform=xiaochengxu&devId=MTYzODMyNjIzMDIyNTAuNjgzNzc4NjU5NDIwMTA4NQ==
# def save(self,url,title,body,ptime,source):
# csv_path = './网易新闻.csv'
# if not os.path.exists ( csv_path ) :
# with open ( csv_path, 'a',encoding='utf-8-sig',newline='') as csvFile :
# writer = csv.writer ( csvFile )
# writer.writerow ( ['序号','预售证号','项目名称','开发企业名称','房屋栋号及层数'] )#
# with open ( csv_path, 'a', encoding='utf-8-sig' ,newline='') as csvFile :
# writer = csv.writer ( csvFile )
# writer.writerow ( [url,title,body,ptime,source] )
#url、标题、正文、发布时间、来源网站
if __name__=="__main__":
w = WangYi()
w.data_get()
网易新闻最新爬虫
最新推荐文章于 2024-07-30 09:53:24 发布