源码
#!/usr/bin/env python
#coding:utf-8
"""用selenium&PhantomJS 完成的网络爬虫,最适合使用的情形是爬取有JS的网站,但是用来爬取其他网站同样给力"""
from selenium import webdriver
from myLog import MyLog as mylog
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class Item(object): #用于存储代理的属性
ip = None
port = None
anonymouns = None
type = None
support = None
local = None
speed = None
class GetProxy(object):
def __init__(self):
self.starturl = "http://www.kuaidaili.com/free/inha/" #目标网址
self.log = mylog() #二次封装的日志模块
self.urls = self.getUrls()
self.proxyList = self.getProxyList(self.urls)
self.fileName = 'proxy.txt'
self.saveFile(self.fileName,self.proxyList)
def getUrls(self): ##生成目标页面代理的网页页面URL
urls = []
for i in xrange(1,3):
url = self.starturl + str(i)
urls.append(url)
self.log.info('get URL %s to urls' %url)
return urls
def getProxyList(self,urls): ##分析网页源代码
browser = webdriver.PhantomJS() ## 使用selenium&PhantomJS
proxyList = []
for url in urls:
browser.get(url) #打开网页
browser.implicitly_wait(5) #智能等待加载JS模块
elements = browser.find_elements_by_xpath('//tbody/tr') #使用xpath来解析网页源码
for element in elements:
item = Item()
item.ip = element.find_element_by_xpath('./td[1]').text
item.port = element.find_element_by_xpath('./td[2]').text
item.anonymouns = element.find_element_by_xpath('./td[3]').text
item.type = element.find_element_by_xpath('./td[4]').text
item.support = element.find_element_by_xpath('./td[5]').text
item.local = element.find_element_by_xpath('./td[6]').text
item.speed = element.find_element_by_xpath('./td[7]').text
proxyList.append(item)
self.log.info('add proxy %s: %s to list'%(item.ip,item.port))
browser.quit() #浏览器的退出
return proxyList
def saveFile(self,fileName,proxyList): #生成文件
self.log.info('add all proxy to %s'%self.fileName)
with open(fileName,'w') as fp:
for item in proxyList:
# print item.ip
fp.write(item.ip+'\t')
fp.write(item.port + '\t')
fp.write(item.anonymouns + '\t')
fp.write(item.type + '\t')
fp.write(item.support + '\t')
fp.write(item.local + '\t')
fp.write(item.speed + '\n')
self.log.info('write %s:%s to file successfuly......'%(item.ip,item.port))
if __name__ == "__main__":
USE = GetProxy()
日志输出
#!/usr/bin/env python
#coding:utf-8
#date 2017_11_30
#author chenjisheng
import logging
import getpass
class MyLog(object):
'''此类用来封装已有logging类,方便自己使用'''
def __init__(self):
self.user = getpass.getuser() #获取使用的用户
self.logger = logging.getLogger(self.user) #初始化一个logger 对象
self.logger.setLevel(logging.DEBUG) #设置logger 的日志级别
self.logfile = "progress_log.log" #设置日志保存的文件
self.formates = logging.Formatter(
"[%(asctime)s] - USERNAME:[%(name)s] - [%(levelname)s] - %(message)s"
) #设置日志的格式
self.Hand = logging.FileHandler(self.logfile) #设置日志的输出为文件
self.Hand.setFormatter(self.formates) #使用日志的格式
self.Hand.setLevel(logging.ERROR) #设置输出为文件的日志级别
self.HandStream = logging.StreamHandler() #设置日志的输出格式为console
self.HandStream.setFormatter(self.formates) #使用日志的格式
self.HandStream.setLevel(logging.DEBUG) #设置输出为窗口的日志级别
self.logger.addHandler(self.HandStream) #增加日志的输出对象给logger(负责发送消息)
self.logger.addHandler(self.Hand) #增加日志的输出对象给logger
def debug(self,messages):
self.logger.debug(messages)
def info(self,messages):
self.logger.info(messages)
def warning(self,messages):
self.logger.warning(messages)
def error(self,messages):
self.logger.error(messages)
def critical(self,messages):
self.logger.critical(messages)
if __name__ == "__main__":
mylog = MyLog()
mylog.debug("I'm debug")
mylog.info("I'm info")
mylog.warning("I'm warning")
mylog.error("I'm error ")
mylog.critical("I'm critical")