爬虫:
-一段程序
-下载网上的程序
-按照我们自己的规则
-伪装
import requests
url = "http://seputu.com/biji1/1.html"
#图片 二进制
img_url = "https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1511020637846&di=1c2e102ce1f748a625b04ec12d940912&imgtype=0&src=http%3A%2F%2Fdynamic-image.yesky.com%2F740x-%2FuploadImages%2F2014%2F247%2F55%2FE5G8ZI5J9976.jpg"
response = requests.get(url)
response.encoding = "GBK"
with open('1.txt','w',encoding='utf-8') as f:
f.write(response.text)
response = requests.get(img_url)
#content 二进制
print(response.content)
#写文件,系统文件处理 write b二进制
with open("girl.jpg","wb") as f:
f.write(response.content)
import requests
import re
first_url = "http://www.xiaohuar.com"
#http响应
response = requests.get(first_url)
#设置正确的编码
response.encoding = 'GBK'
#获取文本信息
html = response.text
# print(html)
# 找所有图片的url ()是如果匹配到,则返回
# src="http://www.xiaohuar.com/d/file/20140811101923185.jpg"。findall查找全部r标识代表后面是正则的语句
img_urls = re.findall(r'src="(/d/file/\w+\.jpg)"', html)
print(img_urls)
#拼接完整的url
for i in range(len(img_urls)):
img_urls[i] = "http://www.xiaohuar.com%s"%img_urls[i]
for img_url in img_urls:
img_file_name = img_url.split('/')[-1]
img_file_data = requests.get(img_url).content
with open(img_file_name,'wb') as f:
f.write(img_file_data)
print(img_url)
改进版本
import requests,re,json
from bs4 import BeautifulSoup
url = 'http://www.xiaohuar.com/hua/'
response = requests.get(url)
response.encoding = 'gbk'
html = response.text
htmls = json.dumps(html)
img_urls = re.findall(r'd/file/\w+/\w+\.\w+',htmls) #得到每张照片的url
soup = BeautifulSoup(html,'html.parser')
spanlist = soup.find_all('span',attrs={'class':'price'}) #获得span标签下的内容
for i in range(len(img_urls)):
img_urls[i] = 'http://www.xiaohuar.com/%s'%img_urls[i]
count = 0
for img_url in img_urls:
img_name = '%s.%s'%(spanlist[count].string,img_url.split('.')[-1])
img_data = requests.get(img_url).content
with open(img_name,'wb') as f:
f.write(img_data)
count += 1
print(img_name)