python漏洞扫描毕设_基于python爬虫的github-exploitdb漏洞库监控与下载

#!/usr/bin/env python

# -*- coding:utf-8 -*-

import re

import time

import urllib.request

import conf as cf

BASE_URL = 'https://github.com/offensive-security/exploitdb/releases'

DOWNLOAD_LINK_PATTERN = 'href="(.*?)zip" rel="nofollow">'

FIRST_PATTERN = r'Next.*'

PAGE_PATTERN = r'>PreviousNext.*'

class MyCrawler:

def __init__(self, base_url=BASE_URL, start_page="first 1 page"):

self.base_url = base_url

self.start_page = start_page

# self.headers = apache_request_headers();

# 对首页的爬取

def first_page(self):

try:

req = urllib.request.Request(self.base_url)

html = urllib.request.urlopen(req)

doc = html.read().decode('utf8', 'ignore')

next_page = re.search(FIRST_PATTERN, doc, re.M | re.I)

print('Now working on page = {}\n'.format(self.start_page))

time.sleep(5)

self.fetch_download_link(self.base_url)

self.start_page = next_page.group(1)

# re.search(r'after = (.*?) ">Next.*', next_page.group(1), re.M | re.I).group(1)

self.base_url = next_page.group(1)

# self.fetch_download_link(next_url)

except urllib.error.HTTPError as err:

print(err.msg)

self.fetch_next_page()

# 翻页

def fetch_next_page(self):

while True:

try:

req = urllib.request.Request(self.base_url)

html = urllib.request.urlopen(req)

doc = html.read().decode('utf8', 'ignore')

next_page = re.search(PAGE_PATTERN, doc, re.M | re.I)

print('Now working on page {}\n'.format(self.start_page))

time.sleep(5)

#翻页时等待5秒

self.fetch_download_link(self.base_url)

self.start_page = next_page.group(1)

# re.search(r'after = (.*?) ">Next.*', next_page.group(1), re.M | re.I).group(1)

self.base_url = next_page.group(1)

# self.fetch_download_link(next_url)

except urllib.error.HTTPError as err:

print(err.msg)

break

# 文件下载:将下载链接存到文件中

def fetch_download_link(self, Aurl):

f = open('result.txt', 'a')

req = urllib.request.Request(Aurl)

html = urllib.request.urlopen(req)

doc = html.read().decode('utf8')

alist = list(set(re.findall(DOWNLOAD_LINK_PATTERN, doc)))

for item in alist:

url = "https://github.com/" + item + "zip"

print('Storing {}'.format(url))

f.write(url + '\n')

time.sleep(7)

f.close()

def run(self):

self.fetch_download_link()

if __name__ == '__main__':

mc = MyCrawler()

mc.first_page()

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值