IOError: [Errno 2] No such file or directory的解决方法@TOC
一。报错
学习爬虫https://blog.csdn.net/MattaLi/article/details/108154734
,想copy,paste, shift+ctrl+F10直接运行,结果遇到问题,
with open(’./images3/’ + image_name,‘wb’) as f:,报错“IOError: [Errno 2] No such file or directory”。 网上搜索加思考了半天,怀疑是没有建目录所致。
手工建目录image3搞定。虽然是基础错误,但是解决了之后,还是很有成就感。发文庆祝。
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:StevenC
# datetime:2021/5/2 15:23
# software: PyCharm
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import time
import requests
import re
def get_Data(base_url):
Rheaders = {
# 这里需要爬取网站首页的cookie。(没有cookie图片可能会出错,用自己的cookie)
"cookie": "__cfduid=d5411884fe8db207f3f03481febfe233e1598005261; __gads=ID=7a29c02809efefed:T=1598005261:S=ALNI_MZ6fuEEp6EyyCZwFhe5REJdwC5IsQ; _ga=GA1.2.1470230754.1598005306; _gid=GA1.2.1918223717.1598005307; Hm_lvt_a5eba7a40c339f057e1c5b5ac4ab4cc9=1597913712,1598001373,1598005230,1598007422; _GPSLSC=; xcat_sign_cookie=2097c21e431a2a007b125e4fc599a50c; Hm_lpvt_a5eba7a40c339f057e1c5b5ac4ab4cc9=1598008115",
# "cookie":"__cfduid=d5411884fe8db207f3f03481febfe233e1598005261; Hm_lvt_a5eba7a40c339f057e1c5b5ac4ab4cc9=1597396642,1597913712,1598001373,1598005230; __gads=ID=7a29c02809efefed:T=1598005261:S=ALNI_MZ6fuEEp6EyyCZwFhe5REJdwC5IsQ; _GPSLSC=; xcat_sign_cookie=2097c21e431a2a007b125e4fc599a50c; _ga=GA1.2.1470230754.1598005306; _gid=GA1.2.1918223717.1598005307; Hm_lpvt_a5eba7a40c339f057e1c5b5ac4ab4cc9=1598006751; _gat_gtag_UA_127463675_2=1",
# 设置代理,伪装成浏览器。
"user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"
}
response = requests.get(base_url,headers = Rheaders)
response.encoding = 'utf-8'
html = response.text
#print(html)
image_urls = re.findall('<a href="(.*?)" alt=".*?" title=".*?">', html)
#print(image_urls)
return image_urls
# print(get_Data('https://www.vmgirls.com/12985.html'))
def save_Data(image_urls):
Rheaders = {
"cookie":"__cfduid=d5411884fe8db207f3f03481febfe233e1598005261; __gads=ID=7a29c02809efefed:T=1598005261:S=ALNI_MZ6fuEEp6EyyCZwFhe5REJdwC5IsQ; _ga=GA1.2.1470230754.1598005306; _gid=GA1.2.1918223717.1598005307; Hm_lvt_a5eba7a40c339f057e1c5b5ac4ab4cc9=1597913712,1598001373,1598005230,1598007422; _GPSLSC=; xcat_sign_cookie=2097c21e431a2a007b125e4fc599a50c; Hm_lpvt_a5eba7a40c339f057e1c5b5ac4ab4cc9=1598008115",
# "cookie":"__cfduid=d5411884fe8db207f3f03481febfe233e1598005261; Hm_lvt_a5eba7a40c339f057e1c5b5ac4ab4cc9=1597396642,1597913712,1598001373,1598005230; __gads=ID=7a29c02809efefed:T=1598005261:S=ALNI_MZ6fuEEp6EyyCZwFhe5REJdwC5IsQ; _GPSLSC=; xcat_sign_cookie=2097c21e431a2a007b125e4fc599a50c; _ga=GA1.2.1470230754.1598005306; _gid=GA1.2.1918223717.1598005307; Hm_lpvt_a5eba7a40c339f057e1c5b5ac4ab4cc9=1598006751; _gat_gtag_UA_127463675_2=1",
"user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"
}
path = 'https://www.vmgirls.com/'
#爬取图片开始
print("Begin >> >> >>")
for url in image_urls:
time.sleep(3)
image_name = url.split('/')[-1]
#print(image_name)
response = requests.get(path + url,headers=Rheaders)
with open('./images3/' + image_name,'wb') as f:
f.write(response.content)
print("<< << << Finish")
if __name__ == '__main__':
# 网址为你想要爬取的页面(注意:必须为五个数的页面,比如:14012.html)
save_Data(get_Data('https://www.vmgirls.com/14012.html'))
#get_Data('https://www.vmgirls.com/12985.html')
搜索到的博文链接:感谢作者https://blog.csdn.net/YHP_DANIEL/article/details/82930654
二。原文链接
https://blog.csdn.net/MattaLi/article/details/108154734
感谢作者,及作者无意设置的给初学者的直接copy/使用障碍。