request方法:
import json
import requests
from lxml import etree
import xlwings as xw
import re
# 爬取网址
url ="https://hz.zu.anjuke.com/?from=navigation"
# 模拟浏览器访问
headers =headers={'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/\
537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36','referer':url}
# 获取正文
response =requests.get(url ,headers=headers)
response.encoding='utf-8'
body =response.text
# print(body)
# 定位内容
html =etree.HTML(body ,etree.HTMLParser())
gethtml=html.xpath('//div[contains(@class,"zu-info")]')
print(gethtml)
for item in gethtml:
print(item.xpath('//h3/a/attribute::href'))
print(item.xpath('//h3/a/b[contains(@class,"strongbox")]/text()'))
beautifulSoul方法:
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import json
import xlwings as xw
# 新闻链接
url = "https://hz.zu.anjuke.com/?from=navigation"
res = requests.get(url)
res.encoding = 'utf-8'
# 完整HTML
# html = BeautifulSoup(res.text, 'html.parser')
html = BeautifulSoup(res.text, 'html.parser')
# print(html)
# print(html.text)
#select方法
# str=html.select('html')[0]
# str=html.select('h1')
# str=html.select('#list-content')#获取id
# str=html.select('.zu-info')
#find方法
# str=html.find_all('title')
# str=html.find_all('a')
# str=html.find(id='list-content')
# str=html.find(attrs={'id':'list-content'})
# str=html.find('a').name
# str=html.find('a').attrs
# str=html.find('a').text
# str=html.find('a').get('_soj')
# str=html.find('a',attrs={'class':'sid-nav'}).get('title')#get('href')
str=html.find_all('a')
# print(str)
for i,item in enumerate(str):
try:
print(item.select('.sidebar-nav-hover'))
str1=item.select('.sidebar-nav-hover')[0].text
str2=item.select('.sidebar-nav-hover')[0].get('href')
print(str1,str2,sep=',')
print(i)
i = i + 1
except:
continue
# ('a', attrs={'class': 'bbb'}, href = True).attrs['href'])
# with open('test.html','w',encoding='utf-8') as f:
# f.write(res.content.decode('utf-8','ignore'))
# 新闻列表
# print(html.select('.zu-info'))
# for item in html.select('.zu-info'):
# print(item.select('h3 a')[0].get('href'))
# print(item.select('.strongbox')[0].text)
# print(item.select('a')[0].text)
beautifulSoul的find方法:
import requests
import bs4
import time
import random
import pandas as pd
import openpyxl
house_info=[]
for i in range(1,100):
url="https://bj.anjuke.com/sale/p"+str(i)+"/#filtersort"
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36"
}
print("开始爬取安居客平台北京二手房第%s页信息....." %(str(i)))
response = requests.get(url=url, headers=headers)
#生成bs4对象
bsoup=bs4.BeautifulSoup(response.text,'lxml')
house_list=bsoup.find_all('li', class_="list-item")
for house in house_list:
#bs4解析文件
titile = house.find('a').text.strip()
house_type = house.find('div', class_='details-item').span.text
area = house.find('div', class_='details-item').contents[3].text
try:
address = house.find('span',class_='comm-address').text.strip()
except AttributeError:
address='error'
#爬取过程中,有些房子缺少地址信息,使用try except的方法可以防止报错
price = house.find('span', class_='price-det').text.strip()
unit_price = house.find('span', class_='unit-price').text.strip()
pd1= pd.DataFrame({'titile': titile, 'house_type': house_type,
'area': area, 'address': address, 'price': price, 'unit_price': unit_price},index=[0])
house_info.append(pd1)
second=random.randrange(3,5)
time.sleep(second)
house_info2=pd.concat(house_info)
house_info2.to_excel('beijing.xlsx',index=False)