#首先用终端创建一个文件夹,然后在文件夹里创建scrapy爬虫项目
cd proxyip;
scrapy startproject proxyip;#创建项目
scrapy genspider -t basic proxy xicidaili.com #建立框架如果是自动爬虫的话basic可以换成crawl
建立完成后可以用pycharm或用vscode看到这样的文件目录
我们要在proxy.py文件里写上我们的爬取ip的指令如下:
import scrapy
from proxyip.items import ProxyipItem
from scrapy.http import Request
import re
import pymysql
import time
class ProxySpider(scrapy.Spider):
name = 'proxy'
allowed_domains = ['xicidaili.com']
start_urls = ['http://www.xicidaili.com/']
def start_requests(self):
# userang={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
# yield Request("http://www.xicidaili.com/",headers=userang)
# ,callback=self.parse,method='GET'
urls=[]
for i in range(1,500):
urls.append('http://www.xicidaili.com/nn/'+str(i))
for url in urls:
userang={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
yield Request(url,headers=userang)
def parse(self, response):
ip_list=response.xpath("//table[@id='ip_list']/tr")
for ips in ip_list[1:]:
time.sleep(0.2)
item=ProxyipItem()
time1=ips.xpath("./td[8]/div/@title")[0].extract()
times = re.sub(r'\D','',time1)
if int(times)<=1000 :
item['ip']=ips.xpath("./td[2]/text()").extract()
item['port']=ips.xpath("./td[3]/text()").extract()
item['htype']=ips.xpath("./td[6]/text()").extract()
# print(item['ip'])
yield item
else:
pass
在setting.py文件里把这段指令打开
在items.py文件下设置容器用来放你爬取到的数据
import scrapy
class ProxyipItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
ip=scrapy.Field()
port=scrapy.Field()
htype=scrapy.Field()
在piplines.py文件里写上存入python与数据库交互的指令将爬取得数据存入数据库
import pymysql
from urllib.error import HTTPError
class ProxyipPipeline(object):
def process_item(self, item, spider):
conn=pymysql.connect(host="localhost",user="root",passwd="mysql",db="wuyanzu")
for i in range(0,len(item['ip'])):
ip=item['ip'][i]
port=item['port'][i]
htype=item['htype'][i]
sql="insert into proxyip values(0,'"+ip+"','"+port+"','"+htype+"')"
try:
cur=conn.cursor()
cur.execute(sql)
conn.commit()
except HTTPError as e :
if hasattr(e,"code"):
print(e.code)
cur.close()
conn.close()
return item
这样我们就得到了我们想要的ip了。