爬虫入门教程–豆瓣网
- 爬虫分为三个步骤,爬取地址,分析内容,存储文件
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
#从豆瓣上获取地址与自己需要存储的文件地址
def main():
# 豆瓣电影Top250的URL
url = 'https://movie.douban.com/top250?start='
data_list=getdata(url)
save_csv(data_list)
1、爬取地址
#一共10页,每页25个
def getdata(html):
data_list = []
for i in range(0, 10):
url = 'https://movie.douban.com/top250?start=' + str(i*25)
html = ask_url(url)
parse_html(html,data_list)
return data_list
2、分析页面
def parse_html(html,data_list):
soup = BeautifulSoup(html, 'html.parser')
# 找到所有电影的标题和链接
movies = soup.find_all('div', class_='item')
for movie in movies:
# csv 里面每一行都是 名字 ,链接 ,评分 ,评价 ,评价人数
name = movie.find('span', class_='title').text.strip()
link = movie.find('a')['href']
score = movie.find('span', class_='rating_num').text.strip()
try:
comment = movie.find('span', class_='inq').text.strip()
except Exception as e:
comment = ''
# 评价人数在'span',property='v:best'下面那个 用这个方法获取评价人数
comment_num = movie.find('div', class_='star').find('span',property='v:best').next_sibling.next_sibling.text.strip()
comment_num = comment_num.replace('人评价', '')
data_list.append([name, link, score, comment, comment_num])
3、存储到文件
def save_csv(data_list):
if os.path.exists('../resource/douban_top250.csv'):
os.remove('../resource/douban_top250.csv')
#将数据保存到本地文件
with open('../resource/douban_top250.csv', 'w', encoding='utf-8') as f:
f.write('名称,链接,评分,评论,评价人数\n')
for item in data_list:
f.write('{},{},{},{},{}\n'.format(item[0], item[1], item[2], item[3], item[4]))
源码是这样的
# 爬取豆瓣电影Top250,并保存到本地文件
import requests
from bs4 import BeautifulSoup
import os
# 发送请求,获取网页内容
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
def main():
# 豆瓣电影Top250的URL
url = 'https://movie.douban.com/top250?start='
data_list=getdata(url)
save_csv(data_list)
def save_csv(data_list):
if os.path.exists('../resource/douban_top250.csv'):
os.remove('../resource/douban_top250.csv')
#将数据保存到本地文件
with open('../resource/douban_top250.csv', 'w', encoding='utf-8') as f:
f.write('名称,链接,评分,评论,评价人数\n')
for item in data_list:
f.write('{},{},{},{},{}\n'.format(item[0], item[1], item[2], item[3], item[4]))
def getdata(html):
data_list = []
for i in range(0, 10):
url = 'https://movie.douban.com/top250?start=' + str(i*25)
html = ask_url(url)
parse_html(html,data_list)
return data_list
def ask_url(url):
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.content.decode('utf-8')
else:
return None
def parse_html(html,data_list):
soup = BeautifulSoup(html, 'html.parser')
# 找到所有电影的标题和链接
movies = soup.find_all('div', class_='item')
for movie in movies:
# csv 里面每一行都是 名字 ,链接 ,评分 ,评价 ,评价人数
name = movie.find('span', class_='title').text.strip()
link = movie.find('a')['href']
score = movie.find('span', class_='rating_num').text.strip()
try:
comment = movie.find('span', class_='inq').text.strip()
except Exception as e:
comment = ''
# 评价人数在'span',property='v:best'下面那个 用这个方法获取评价人数
comment_num = movie.find('div', class_='star').find('span',property='v:best').next_sibling.next_sibling.text.strip()
comment_num = comment_num.replace('人评价', '')
data_list.append([name, link, score, comment, comment_num])
if __name__ == "__main__":
main()