(爬虫小项目,爬取重庆城市管理职业学院官网校园新闻信息数据)
import requests
from lxml import etree
import pymysql
def crawl_url(url):
print("begin")
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}
result = requests.get(url, headers=headers).text
html = etree.HTML(result)
titles = html.xpath('//*[@id="wp_news_w6"]/ul/li/div[1]/span[2]/a/text()')
authors = html.xpath('//*[@id="wp_news_w6"]/ul/li/span[1]/text()')
adates = html.xpath('//*[@id="wp_news_w6"]/ul/li/div[2]/span/text()')
print(titles, authors, adates)
for title, author, adate in zip(titles, authors, adates):
print("aaa")
sql = "insert into aaa(title,author,adate) values ('%s','%s','%s')" % (title, author, adate)
print(sql)
cursor.execute(sql)
if __name__ == '__main__':
con = pymysql.connect(host='localhost', user='root', password='', db='mydbb', charset='utf8')
cursor = con.cursor()
for i in range(1,11):
url = 'http://www.cswu.cn/34/list'+str(i)+'.htm'
crawl_url(url)
con.commit()
cursor.close()
print("关闭成功1")
con.close()
print("关闭成功2")