看了实验楼的项目发现五八同城爬取还是有点难度所以转战链家
实验代码如下
from bs4 import BeautifulSoup
from urllib.request import urlopen
import csv
url = 'https://gz.lianjia.com/zufang/rs{page}/'
page = 0
csv_file = open('fang.csv','w')
#这里open打开需要‘w’不要‘wb’,使用‘wb’会出现一下错误:
#TypeError: a bytes-like object is required, not 'str'
csv_write = csv.writer(csv_file,delimiter=',')
while True:
page += 1
print('正在下载网页:',url.format(page=page))
respone = urlopen(url.format(page=page))
html = BeautifulSoup(respone)
house_list = html.find('div',class_='list-wrap').find_all('div',class_="info-panel")
#采用先取大后取小的原则
#这里提取记得爬取有用的,在这我是提取‘info-panel’这的
for house in house_list:
house_title = house.find('a',target=