python爬虫练习,使用beautifulsoup +sublime text 3 + python3.6版本。
上图!!!
只抓取了少量图片
一、爬虫网址:https://mm.taobao.com/json/request_top_list.htm?page=1
使用urllib库中的request获取网页信息
def __init__(self):
self.siteUrl = "https://mm.taobao.com/json/request_top_list.htm"
def getHtml(self,pageIndex):
url = self.siteUrl+"?page="+pageIndex
print("\nurl="+url+"\n")
req = request.Request(url)
response = request.urlopen(req)
return response.read().decode('gbk')
二、提取页面中的标签信息(div,class=‘list-item’)
def getListItems(self,pageIndex):
'使用beautifulsoup提取div标签'
soup = BeautifulSoup(self.getHtml(str(pageIndex)),'html.parser')
divs = soup.find_all('div',class_='list-item')
return divs
三、根据提取到的img标签中url地址,保存图片文件到本地
def saveFile(self,imgurl):
'保存图片文件到目录images下'
if imgurl == None:
return
if not imgurl.startswith('https:'):
imgurl = 'https:' + imgurl
path = 'images'
isExist = os.path.exists(path)
if not isExist:
os.makedirs(path)
req = request.Request(imgurl)
response = request.urlopen(req)
data = response.read()
if data == None:
return
path = path + '/' + str(time.time())+".jpg"
f = open(path,'wb')
f.write(data)
f.close()
四、保存提取到的个人信息简介到info.txt文件
def savePersonalInfo(self,div):
'保存个人信息到文件 info.txt'
ladyNames = div.find('a',class_='lady-name')
name = ladyNames.get_text()
print('name='+str(name))
data = str(name) +':'+ str(div.find('p',class_='description').get_text())+'\n'
f = open('info.txt','a+')
f.write(data)
f.close
----------------------至此就完成了,简单爬虫功能-----------------------------------------------
完整代码:
#!usr/bin/python
from urllib import request
from urllib import parse
from bs4 import BeautifulSoup
import time
import os
class Spider(object):
"""docstring for Spider"""
def __init__(self):
self.siteUrl = "https://mm.taobao.com/json/request_top_list.htm"
def getHtml(self,pageIndex):
url = self.siteUrl+"?page="+pageIndex
print("\nurl="+url+"\n")
req = request.Request(url)
response = request.urlopen(req)
return response.read().decode('gbk')
def getListItems(self,pageIndex):
'使用beautifulsoup提取div标签'
soup = BeautifulSoup(self.getHtml(str(pageIndex)),'html.parser')
divs = soup.find_all('div',class_='list-item')
return divs
def saveFile(self,imgurl):
'保存图片文件到目录images下'
if imgurl == None:
return
if not imgurl.startswith('https:'):
imgurl = 'https:' + imgurl
path = 'images'
isExist = os.path.exists(path)
if not isExist:
os.makedirs(path)
req = request.Request(imgurl)
response = request.urlopen(req)
data = response.read()
if data == None:
return
path = path + '/' + str(time.time())+".jpg"
f = open(path,'wb')
f.write(data)
f.close()
def savePersonalInfo(self,div):
'保存个人信息到文件 info.txt'
ladyNames = div.find('a',class_='lady-name')
name = ladyNames.get_text()
print('name='+str(name))
data = str(name) +':'+ str(div.find('p',class_='description').get_text())+'\n'
f = open('info.txt','a+')
f.write(data)
f.close
def start(self,start,ends):
if start < 0:
start = 1
if ends < 0:
ends = 1
for i in range(start,ends+1):
divs = self.getListItems(i)
for div in divs:
self.savePersonalInfo(div)
link = div.find('img')
imgurl = str(link.get('src'))
if not imgurl.__contains__('spaceball'):
print("img="+imgurl)
self.saveFile(imgurl)
pass
spider = Spider()
spider.start(2,3)