简单抓取网站、存储本地数据库示例。刚开始Python脚本,不妥之处望指出。
1、第一步
简单的抓取一个图书下载网站,输出书名、下载url地址。
import urllib3
import re
import os
from pyquery import PyQuery as pq
weburl = 'http://www.ireadweek.com'
if __name__ == "__main__":
http = urllib3.PoolManager()
r = http.request('GET', weburl);
for link, t in re.findall(r'(/index.php/bookInfo[^\s]*?(html))', str(r.data)):
downloadurl = weburl + link
dr = http.request('GET', downloadurl)
doc = pq(downloadurl)
print("book:" + doc('.hanghang-za-title').eq(0).text() + " Url: " + doc('.downloads').attr('href'))
2、第二步
支持下载本地、数据库支持
import urllib3
import re
import os
import pymysql
from pyquery import PyQuery as pq
weburl = 'http://www.ireadweek.com'
# 写入文件文件
localFile = open("./booklist.txt", "w")
# 连接Mysql ip,username,password,db_name,charset
db = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd="123456", db="testdb", charset='UTF8')
# 获取cursor
cursor = db.cursor()
# execute 执行语句 表存在删除
cursor.execute("DROP TABLE IF EXISTS BOOK")
# 重新创建表
sql = """CREATE TABLE BOOK(bookname CHAR(30) NOT NULL,
bookurl CHAR(100) NOT NULL)"""
cursor.execute(sql)
if __name__ == "__main__":
# 从连接池中取一个http连接
http = urllib3.PoolManager()
# 请求
r = http.request('GET', weburl);
# re解析到网页中的html地址
for link, t in re.findall(r'(/index.php/bookInfo[^\s]*?(html))', str(r.data)):
# 拼接地址,进行跳转
downloadurl = weburl + link
dr = http.request('GET', downloadurl)
# 通过pyquery解析网页html标签
doc = pq(downloadurl)
# pyquery直接解析,使用方式类似jquery,获取书名 以及下载地址
name = doc('.hanghang-za-title').eq(0).text()
url = doc('.downloads').attr('href')
# 写入文件
s = "book:" + name + " url: " + url + "\n";
localFile.write(s)
# 写入数据库
sql = "insert into BOOK VALUES(%s,%s)"
param = (name, url)
try:
cursor.execute(sql, param)
db.commit()
except Exception as e:
print(e)
db.rollback()
cursor.close()
db.close()
3、第三步
线程池加入