# -*- coding: utf-8 -*-
import requests
import re
import time
import csv
import random
#获取链接
def get_one_page(url):
try:
agent_1='Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0'
agent_2='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36'
agent=random.choice([agent_1,agent_2])
print(agent)
headers = {'User-Agent':agent,
'cookie': '__mta=88887716.1626503019866.1631289120211.1631289120649.51; _lxsdk_cuid=17ab72c9d0bc8-0323f5d1a31945-6373264-144000-17ab72c9d0bc8; uuid_n_v=v1; uuid=9248FEE0124A11EC880A97B5D02488E59B54C4437E2F42A987E0244568678E2B; _csrf=2654218597f6b13bfbe15d6a1d2d58bfe62265052b83c302c426ce9484874361; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1631287203; _lxsdk=9248FEE0124A11EC880A97B5D02488E59B54C4437E2F42A987E0244568678E2B; __mta=88887716.1626503019866.1631287217070.1631287315139.15; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1631289121; _lxsdk_s=17bd04b5446-304-1b6-122%7C%7C83',
'Accept-Language': 'zh-CN,zh;q=0.9'}
response = requests.get(url,headers=headers,timeout=10)
if response.status_code == 200:
return response.text
return 0
except requests.exceptions.ConnectionError:
print('ConnectionError -- please wait 3 seconds')
time.sleep(3)
except requests.exceptions.ChunkedEncodingError:
print('ChunkedEncodingError -- please wait 3 seconds')
time.sleep(3)
except:
print('Unfortunitely -- An Unknow Error Happened, Please wait 3 seconds')
time.sleep(3)
#爬取写入
def spider(offset):
url = 'https://maoyan.com/board/'+offset
print(url)
html = get_one_page(url)
for item in parse_one_page(html):
write_to_file(item)
def parse_one_page(html):
pattern = re.compile(
'<dd>.*?board-index.*?>(\d+)<.*?<a.*?title="(.*?)"'
+ '.*?data-src="(.*?)".*?</a>.*?star">[\\s]*(.*?)[\\n][\\s]*</p>.*?'
+ 'releasetime">(.*?)</p>.*?integer">(.*?)</i>.*?'
+ 'fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern,html)
for item in items:
yield{
'index':item[0],
'title':item[1],
'image':item[2],
'actor':item[3].strip()[3:] if len(item[3]) > 3 else '',
'time':item[4].strip()[5:] if len(item[4]) > 5 else '',
'score':item[5].strip()+item[6].strip()
}
#csv表写属性名称
def write_to_filestart(x):
with open('top10.csv','a+',newline='')as csvfile:
writer = csv.writer(csvfile)
writer.writerow(x)
#写csv表数据
def write_to_file(content):
with open('top10.csv','a+',newline='')as csvfile:
writer = csv.writer(csvfile)
values = list(content.values())
print(values)
writer.writerow(values)
#主函数
if __name__=='__main__':
x = ['index', 'title', 'image', 'actor', 'time and place', 'score']
write_to_filestart(x)
for i in range(10):
spider('4?offset='+str(10*i))
time.sleep(2)
未修复BUG
1.爬取到TOP10的时候直接报错
如果爬取同时刷新浏览器(F5)一定概率出现下图结果
在Top10和Top84处容易报错,但掌握时机刷新浏览器可以跳过报错分页
敬请指正!