代码后续优化请关注:https://github.com/guosimin/python-spider
注意:请限制使用爬虫频率,做一条有道德的爬虫
一,首先要先准备一定量的代理ip并存入到数据库
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests,threading,datetime
from bs4 import BeautifulSoup
import random
import pymongo
from pymongo import MongoClient
"""
1、抓取西刺代理网站的代理ip
2、并根据指定的目标url,对抓取到ip的有效性进行验证
3、最后存到指定的path
"""
# ------------------------------------------------------数据处理--------------------------------------------------------
#1.链接本地数据库服务
name = MongoClient('localhost')
#2.链接本地数据库 demo 没有会创建
db = name.demo #demo数据库名
# 3.创建,连接集合
emp = db.employees # employees集合名
# 写入文档
def write(path,text):
emp.insert_one({'ip': text})
# 清空文档
def truncatefile(path):
# 4.根据情况清空聚合
emp.remove(None)
# 读取文档
def read():
# with open(path, 'r', encoding='utf-8') as f:
# txt = []
# for s in f.readlines():
# txt.append(s.strip())
txt = emp.find()
list = [];
for each in txt:
if each['ip'] != '':
list.append(each['ip'])
return list
# ----------------------------------------------------------------------------------------------------------------------
# 计算时间差,格式: 时分秒
def gettimediff(start,end):
seconds = (end - start).seconds
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
diff = ("%02d:%02d:%02d" % (h, m, s))
return diff
# ----------------------------------------------------------------------------------------------------------------------
# 返回一个随机的请求头 headers
def getheaders():
user_agent_list = [ \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1&#