前些日子写了个简单爬虫,想了想还是放上来吧
爬取电影网站排行榜前100电影信息,因为网站爬的人多了,所以增加了反爬机制。这里在请求头中添加cookie,然后将网址改成https即可
添加cookie的步骤:
效果图展示如下:
接下来是代码:
import requests
from requests.exceptions import RequestException
import json
import re
import time
def get_one_page(url):
try:
headers = {
'User-Agent': 'Mozilla/5.0(Macintosh; Intel Mac OS X 10_13_3) AppleWebkit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36',
'Cookie' : '__mta=48659097.1613224859256.1613224859256.1613224859256.1; uuid_n_v=v1; uuid=E576D6A06E0311EBBFF0B5C7CD64C7244E2F4B86DD3149F4AF3A5279EF8E7FE1; _csrf=f29d1e06f5f9fc7bcfa915f363d41bdc7109ffb52f3fd7fecf573ff4ff2f164d; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1613224859; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1613224859; _lxsdk_cuid=1779bb1d60fc8-0b3a8d1319103c-303464-e1000-1779bb1d60fc8; _lxsdk=E576D6A06E0311EBBFF0B5C7CD64C7244E2F4B86DD3149F4AF3A5279EF8E7FE1; _lxsdk_s=1779bb1d612-217-330-1de%7C%7C3',
}
response = requests.get(url, headers = headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a'
+'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2],
'actor': item[3].strip()[3:],
'time': item[4].strip()[5:],
'score': item[5] + item[6]
}
def write_to_file(content):
with open('result.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n')
def main(offset):
url = 'https://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
print(html)
for item in parse_one_page(html):
print(item)
write_to_file(item)
if __name__ == '__main__':
# main(10)
for i in range(10):
main(offset = (i*10))
time.sleep(1)
整体来说主要难的地方在于配环境,代码方面并无什么值得注意的地方。。