一时兴起,学着写了个python爬虫,记录一下。
我使用python版本是v3.6.1,主要用来Beautiful Soup库,版本是v4.5.3。爬的是古诗文网(如果古诗文网的小伙伴介意,在此表示抱歉)
代码如下:
#!/usr/bin/python
import requests
import pymysql
import time
from bs4 import BeautifulSoup
def insertDb(db, title, str):
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
create_at = time.strftime("%Y%m%d%H%M%S", time.localtime())
sql = "INSERT INTO POETRY(TITLE,CONTENT, CREATE_AT, SOURCE)\
VALUES ('%s', '%s', '%s', '%d')" % (title ,str, create_at, 1)
cursor.execute(sql)
# 打开数据库连接
db = pymysql.connect(host = "localhost", user = "root", passwd = "", db = "test", charset = "utf8")
n = 1
while(n < 1000):
web_url = "http://so.gushiwen.org/view_%s.aspx" % (n)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'}
r = requests.get(web_url, headers=headers)
title = BeautifulSoup(r.text, 'lxml').find('h1')
content = BeautifulSoup(r.text, 'lxml').find('div', id="cont")
n = n+1
if not title: continue
insertDb(db, title.text, content.text)
if not n%50 :
print(n)
db.commit()
time.sleep(1)
db.close()