cha = (datetime.now() - datetime(int(result_time[0]), int(result_time[1]),
int(result_time[2]))).days
print(cha)
if 30 < cha <= 32:
print(‘完成’)
break_flag.append(1)
break
continue
if cha > 32:
print(‘完成’)
break_flag.append(1)
break
row = {‘发表时间’: article_time[0], ‘标题’: article_title[0].strip(‘"’),
‘来源’: article_source[0],‘所有图片’:article_image,
‘文章内容’: article_content.strip()}
with open(‘/toutiao/’ + str(csv_name) + ‘文章.csv’, ‘a’, newline=‘’, encoding=‘gb18030’)as f:
f_csv = csv.DictWriter(f, headers1)
f_csv.writeheader()
f_csv.writerow(row)
print(‘正在爬取文章:’, article_title[0].strip(‘"’), article_time[0],
‘https://www.toutiao.com/i’ + i[‘group_id’])
time.sleep(1)
else:
pass
except Exception as e:
print(e, ‘https://www.toutiao.com/i’ + i[‘group_id’])
wenzhang(url=url, max_behot_time=max_behot_time, csv_name=csv_name, n=n)
else:
pass
except KeyError:
n += 1
print(‘第’ + str(n) + ‘次请求’, first_url)
time.sleep(1)
if n == max_qingqiu:
print(‘请求超过最大次数’)
break_flag.append(1)
else:
pass
except Exception as e:
print(e)
else:
pass
print(max_behot_time)
print(data)
文章详情页数据(已合并到文章数据)
def get_wenzhang_detail(url, csv_name=0):
headers1 = [‘发表时间’, ‘标题’, ‘来源’, ‘文章内容’]
res = requests.get(url, headers=headers_a, cookies=cookies)
time.sleep(1)
article_title = re.findall(“title: ‘(.*?)’”, res.text)
article_content = re.findall(“content: ‘(.*?)’”, res.text, re.S)
pattern = re.compile(r"[(a-zA-Z~-_!@#$%^+*&\/?|:.<>{}()';=)*|\d]")
article_content = re.sub(pattern, ‘’, article_content[0])
article_time = re.findall(“time: ‘(.*?)’”, res.text)
article_source = re.findall(“source: ‘(.*?)’”, res.text, re.S)
result_time = []
[result_time.append(i) for i in str(article_time[0]).split(’ ‘)[0].replace(’-‘, ‘,’).split(’,')]
print(result_time)
cha = (datetime.now() - datetime(int(result_time[0]), int(result_time[1]), int(result_time[2]))).days
print(cha)
if cha > 8:
return None
row = {‘发表时间’: article_time[0], ‘标题’: article_title[0].strip(‘"’), ‘来源’: article_source[0],
‘文章内容’: article_content.strip()}
with open(‘/toutiao/’ + str(csv_n