python 爬虫
步骤:
设定抓取地址
确定是否有ip限制
设置用户代理或隐藏真实IP
对页面进行解码操作,抓取出需要的信息。
在获取的页面中通过正则表达式获取目标数据。
存储
get方式·
import requests
url = '域名'
#自定义header
header = {"Host": "",
"Referer": "从哪个网站来的",
"User-Agent": "设备",
"X-Requested-With": "XMLHttpRequest"
}
# 自定义代理ip
proxie = {代理一, 代理二"} #"https": "代理"
response = requests.get(url=url, headers=header, proxies=proxie)
print(response) # 返回值:<Response [200]>
post方式
import requests
url = '目标域名t'
#模拟header
header = {
"Accept": "允许类型",
"Accept-Language": "",
"Cache-Control": "",
"Connection": "",
"Cookie": "",
"Host": "目标域名",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "设备名称",
}
# 构造form表单 将请求的参数构造成一个字典
data = {"action": "",
"type": "",
"name": "",
"page": "",
"id": ""}
response = requests.post(url=url, data=data, headers=header)
print(response) # 返回值:<Response [200]>
网上一个从“搜狐体育”上获取NBA新闻标题和链接的爬虫
from urllib.error import URLError
from urllib.request import urlopen
import re #正则匹配
import pymysql #mysql数据库
import ssl #ssl
from pymysql import Error
# 通过指定的字符集对页面进行解码
def decode_page(page_bytes, charsets=('utf-8',)):
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
# logging.error('Decode:', error)
return page_html
# 获取页面的HTML代码(通过递归实现3次重试操作)
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets) #urlopen函数用于实现对目标url的访问
except URLError:
# logging.error('URL:', error)
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
# 从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
pattern_regex = re.compile(pattern_str, pattern_ignore_case)
return pattern_regex.findall(page_html) if page_html else []
#compile()函数的定义中,可以看出返回的是一个匹配对象,它单独使用就没有任何意义,需要和findall(), search(), match()搭配使用
# 开始执行爬虫程序并对指定的数据进行持久化操作
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
conn = pymysql.connect(host='localhost', port=3306,
database='test', user='root',
password='123456', charset='utf8') #连接数据库
try:
with conn.cursor() as cursor: #游标
url_list = [seed_url]
# 通过下面的字典避免重复抓取并控制抓取深度
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
# 尝试用utf-8/gbk/gb2312三种字符集进行页面解码
page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
links_list = get_matched_parts(page_html, match_pattern)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html, r'<h1>(.*)<span')
if headings:
param_list.append((headings[0], link))
cursor.executemany('insert into tb_result values (default, %s, %s)',
param_list)
conn.commit()
except Error:
pass
# logging.error('SQL:', error)
finally:
conn.close()
def main():
ssl._create_default_https_context = ssl._create_unverified_context #设置全局的取消证书验证
start_crawl('地址',
r'<a[^>]+test=a\s[^>]*href=["\'](.*?)["\']',
max_depth=2)
if __name__ == '__main__':
main()