Python爬虫小demo
```python
import requests
from bs4 import BeautifulSoup as bs
import time
import pymysql
import sys
# 1 发送请求,得到返回结果
url = "https://www.runoob.com/python/python-100-examples.html"
headesr = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"}
response = requests.get(url, headesr).text
# 2 解析结果为html
example_html = bs(response, "lxml")
# 3 获取二级请求链接
a_html_list = example_html.find(id="content").find_all("a")
a_url_list = ["https://www.runoob.com" + i.attrs["href"] for i in a_html_list]
# a_url_list = []
# for i in a_html_list:
# a_url_list.append("https://www.runoob.com"+i.attrs["href"])
# print(a_url_list)
# 7.0 获取数据库连接,和游标
try:
db = pymysql.connect("localhost", "root", "root", "pydb")
print("数据库连接成功!")
except Exception as e:
print(e)
print("数据库连接失败!")
sys.exit(1)
cursor = db.cursor()
for url in a_url_list:
# 4 请求二级页面
a_response = requests.get(url, headesr).text
# 5 解析二级页面
info_html = bs(a_response, "lxml")
# 6 获取目标数据
info_content = info_html.find(id="content")
title = info_content.h1.text
info = info_content.find_all("p")[1].text
# print(title,end="---")
# print(info)
# 7.1 插入数据库
sql = "insert into pyquestion values (NULL,'%s','%s')"% (title,info)
try:
cursor.execute(sql)
db.commit()
print("%s:插入成功!"% title)
except Exception as e:
print(e)
print("%s:插入失败!!!"% title)
db.rollback()
time.sleep(1)
# 7.2 关闭数据连接
db.close()
``