参考文章在这儿
https://segmentfault.com/a/1190000010823538
这个爬虫是下载智能车官网的技术报告的
# _*_ coding:utf-8_*_
# auther :nsy12
# date :2018/2/25
# time :11:20
import requests
import re, os
from bs4 import BeautifulSoup
import time
import random
FILE_DIR = r'E:\1smart Car\paper'#文件存储地址
url_datas = [
'https://.cn',
'https://.cn',
'https://.cn',
'https://.cn',
'https://.cn',
'https://.cn',
'https://.cn'
]
#显示下载的文档名称
def showPdf(pdf_name):
print(pdf_name + '...')
#保存文档
def savePdf(url, pdf_name):
response = requests.get(url, data=None, stream=True)
if not os.path.exists(FILE_DIR):
os.makedirs(FILE_DIR)
with open(os.path.join(FILE_DIR, pdf_name), "wb") as pdf_file:
for content in response.iter_content():
pdf_file.write(content)
def downOne(url, pdf_name):
# showPdf(url, pdf_name)
savePdf(url, pdf_name)
print(pdf_name + "has been downloaded!!")
def get_urls(url):
print("Please wait for second ...")
html = requests.get(url, data=None)
# html.encoding = 'utf-8' # 指定网页编码方式(查看网页源代码)
# print(html.encoding)
# print(html.status_code)
# print(html.text)
soup = BeautifulSoup(html.text, 'lxml')
# all_a = soup.find('div', class_='cvideotitle').find_all('a')
all_a = soup.find('div').find_all('a')
for a in all_a:
title = a.get_text()
url_pdf = a['href']
name = title[19:-18]
print('------开始保存:', name)
downOne(url_pdf, str(name))
# time.sleep(random.randint(1, 2))
"""
#将数据写入记事本
# with open(r'D:jishubaogao\date.txt', 'a', encoding='gbk') as f:
f.write(name + '\n')
"""
if __name__ == "__main__":
for url_data in url_datas:
get_urls(url_data)
print("finsh"+url_data)
print("finsh download")
在这儿,
soup = BeautifulSoup(html.text, 'lxml')
all_a = soup.find('div').find_all('a')
for a in all_a:
title = a.get_text()
url_pdf = a['href']
#在这儿,用bs对div标签解析,得到所有a标签中的名称和链接,其中名称是这样的
\r\n \t\t\t001大连海事大学 电航 phi2017.docx\r\n \t\t 于是对其切片处理,否则会报错
name = title[19:-18]
print('------开始保存:', name)
downOne(url_pdf, str(name))
# time.sleep(random.randint(1, 2))
#将数据写入记事本
with open(r'D:jishubaogao\date.txt', 'a', encoding='gbk') as f:
f.write(name + '\n')