import requests
from bs4 import BeautifulSoup
import csv
url = "https://baike.baidu.com/item/易烊千玺"
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36"
}
resp = requests.get(url, headers=header)
resp.encoding = "UTF-8"
#print(resp.text)
page = BeautifulSoup(resp.text, "lxml")
#print(page)
f = open("03_1_qianxi.csv", "w", encoding="UTF-8", newline="")
csv_writer = csv.writer(f)
'''
# 获取到 杂志写真 中图片的所有链接
alist = page.find("ul", class_="magazine-slider maqueeCanvas", style="width:7200px").find_all("img") #范围第一次缩小后的img标签信息
#print(alist)
for src in alist:
print(src.get("src")) #采用get可以获取到img标签中src属性对应的数据
'''
# 获取到 参演电影 中的信息 电影名字,时间,导演
div1 = page.find("div", class_="viewport", id="marqueeViewport_2886060508")#范围第一次缩小
divs = div1.find_all("div", class_="info")
for div in divs:
bs = div.find_all("b", class_="title")
ps = div.find_all("p")
dds = div.find_all("dd")
bs1 = str(bs[0].text).split("[")[0]
#bs2 = bs[0].contents[0]
#print(bs[0].text)
#print(ps[0].text, dds[0].text, dds[1].text)
#print(bs[0].text, dds[0].text, dds[1].text)
#csv_writer.writerow([bs[0].text, dds[0].text, dds[1].text])
csv_writer.writerow([bs1, dds[0].text, dds[1].text])
#print(type(bs1))
#print(bs1)
#break #单纯的控制循环
#print(bs1)
f.close()
python3学习之路 -- 9.2.1)- 获取yemian信息
于 2022-05-19 08:39:41 首次发布
关键词由CSDN通过智能技术生成