以爬取一个租房网站的每一页的每一个租房信息为例。
分别使用集合和列表在csv文件中展示为例。
编程中需注意的是:
- 有的时候会失败,这时候需要去刷新一下网页
- soup.select()中的路径如果很长,可以去除一部分前半部分,路径过长反而会出些一些错误(具体为什么我也不知道)
- soup.select()返回列表类型
- csv文件保存为UTF-8格式会乱码,使用‘utf_8_sig’就行了
from bs4 import BeautifulSoup
import requests
import pandas as pd
import time
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
def judgement_sex(class_name):
if class_name == ['member_boy_ico']:
return '男'
else:
return '女'
def get_links(url):
wb_data = requests.get(url, headers=headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
links = soup.select('#page_list > ul > li > a')
# print(links)
# page_list > ul > li:nth-child(1) > a
# page_list > ul > li:nth-child(2) > a 此处归一化,把后面的child删除
for link in links:
href = link.get("href")
# print(href) # right
get_info(href)
# 获取链接地址的信息
def get_info(url):
wb_data = requests.get(url, headers=headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
tittles = soup.select('div.pho_info > h4 > em')
# body > div.wrap.clearfix.con_bg > div.con_l > div.pho_info > h4 > em
addresses = soup.select('div.pho_info > p > span')
prices = soup.select('#pricePart > div.day_l > span')
imgs = soup.select('#curBigImage')
names = soup.select('#floatRightBox > div.js_box.clearfix > div.w_240 > h6 > a')
sexs = soup.select('#floatRightBox > div.js_box.clearfix > div.w_240 > h6 > span')
# for tittle,address,price,img,name,sex in zip(tittles,addresses,prices,imgs,names,sexs):
# data = {
# 'tittle':tittle.get_text().strip(), # strip()去除两侧多余空格
# 'address':address.get_text().strip(),
# 'price':price.get_text(),
# 'img':img.get("src"),
# 'name':name.get_text(),
# 'sex':judgement_sex(sex.get('class'))
# }
for tittle, address, price, img, name, sex in zip(tittles, addresses, prices, imgs, names, sexs):
data.append([
tittle.get_text().strip(), # strip()去除两侧多余空格
address.get_text().strip(),
price.get_text(),
img.get("src"),
name.get_text(),
judgement_sex(sex.get('class'))
])
# print(data)
data = []
if __name__ == '__main__':
urls = ['http://bj.xiaozhu.com/search-duanzufang-p{}-0/'.format(number) for number in range(1, 5)]
for single_url in urls:
# print(single_url)
get_links(single_url)
time.sleep(2) # 睡眠两秒,防止请求过快导致爬虫崩坏
df = pd.DataFrame(data)
df.columns = ['tittle', 'address', 'price', 'img', 'name', 'sex']
df.to_csv('D:/机器学习实现/网络爬虫/output.csv', encoding='utf_8_sig', index=False)