爬虫篇-1 基础爬虫部分
直接上代码(目标网站有点…洁癖慎入)
import requests
import os
import pymysql
from bs4 import BeautifulSoup
# headers和cookies直接由网站在线生成
headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Referer': 'http://3332v.com/htm/Video4/2.htm',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
}
cookies = {
'__cfduid': 'd6d436ea9fe3d93c1797cc330731e05431568007644',
'UM_distinctid': '16d1489ef09c3-0fe586a117f4f8-5373e62-19fa51-16d1489ef0a23',
'_ga': 'GA1.2.1680426548.1568007647',
'_gid': 'GA1.2.1488742013.1568007647',
'Hm_lvt_a9058c4d8240a8d5d9d68d3d116c7697': '1568014355',
'Hm_lvt_ae79515eb4cc983e50647ac494bae4d6': '1568014356',
'Hm_lpvt_ae79515eb4cc983e50647ac494bae4d6': '1568014356',
'CNZZDATA1278008611': '1196233660-1568009704-%7C1568015105',
'Hm_lpvt_a9058c4d8240a8d5d9d68d3d116c7697': '1568015967',
}
# requests的get请求 返回请求内容
response = requests.get('http://3332v.com/htm/Video4/1.htm', headers=headers, cookies=cookies)
# 解码为str串
html = response.content.decode("utf-8")
# soup 使用lxml解析
soup = BeautifulSoup(html, "lxml")
print(soup)
关于爬虫的请求的header部分的构造 参考链接
关于 content的编码问题
python3里面的str类型的字符编码为Unicode, bytes字符串是Unicode转化为的某种编码 如UTF-8 GBK
在上述代码的content内容是bytes类型的串 查看这个网站的源代码发现是 UTF-8 所以直接而将content这个bytes(由UTF-8编码的)转化为Unicode编码即是python3的字符串)
encode()函数是将Unicode编码转化为某种编码 decode()是将某种非Unicode编码转化为Unicode 由于字符串是Unicode形式表示的 所以我们需要将UTF-8(本网站的源代码)编码转化为Unicode编码,从而正确地显示字符串