第一个爬虫程序
import requests
from bs4 import BeautifulSoup
import pandas as pd
##网页源码的获取
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36 Edg/101.0.1210.32'
}
li_lists1 = []
li_lists2 = []
li_list3 = []
for i in range(0,11):
res = requests.get("https://movie.douban.com/top250?start=" + str(i*25) + "&filter=",headers = headers)
html = res.text
##内容解析
soup = BeautifulSoup(html,'lxml')
li_list = soup.select(".grid_view li")
for li in li_list:
li_lists1.append(li.select('.title')[0].text)
li_lists2.append(li.select('.other')[0].text)
li_list3.append(li.select('.rating_num')[0].text)
## 写入到文件
data={
"国内名:": li_lists1,
"国外名:": li_lists2,
"评分": li_list3
}
df = pd.DataFrame(data)
df.to_csv('e:/test.csv',index=None,encoding="UTF_8_sig")