将数据爬取到mysql中
#@Time : 2021/5/1117:17
#@File : ConnectMysql.py
import pymysql
# def creatTable():
# #创建连接
# conn = pymysql.connect(host="localhost",
# port=3306,
# user="root",
# passwd="Ys04050016",
# database="movies",
# charset="utf8")
#
# cursor = conn.cursor()
# sql='''
# create table movie250
# (
# id integer primary key AUTO_INCREMENT,
# info_link text,
# pic_link text,
# cname varchar(255),
# ename varchar(255),
# score numeric,
# rated numeric,
# introduction text,
# info text
# )
# '''
# cursor.execute(sql)
# conn.commit()
# cursor.close()
# conn.close()
def connect():
conn = pymysql.connect(host="localhost",
port=3306,
user="root",
passwd="Ys04050016",
database="movies",
charset="utf8")
return conn
def close(conn,cursor):
cursor.close()
conn.close()
def creatTable():
conn=connect()
cursor = conn.cursor()
sql = '''
create table movie250
(
id integer primary key AUTO_INCREMENT,
info_link text,
pic_link text,
cname varchar(255),
ename varchar(255),
score numeric,
rated numeric,
introduction text,
info text
)
'''
cursor.execute(sql)
conn.commit()
close(conn,cursor)
#@Time : 2021/5/522:10
#@File : spiderToDataBase.py
#@Time : 2021/5/214:40
#@File : spider.py
from bs4 import BeautifulSoup
import re
import urllib.request,urllib.error
import xlwt
import _sqlite3
import ConnectMysql
# 指定电影网址的正则表达式
findLink=re.compile(r'<a href="(.*?)">')#r是防止错误解析/
#找图片 re.s是为了让换行符包含在字符内
findImgSrc=re.compile(r'<img.*src="(.*?)"',re.S)
#片名
findTitle=re.compile(r'<span class="title">(.*)</span>')
#评分
findRating=re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
#评价人数
findJudge=re.compile(r'<span>(\d*)人评价</span>')
#影片概况
findInq=re.compile(r'<span class="inq">(.*)</span>')
#影片简介
findBd=re.compile(r'<p class="">(.*?)</p>',re.S)
def main():
baseUrl="https://movie.douban.com/top250?start="
#1.爬取网页
dataList=getData(baseUrl)
#
#3.保存数据
saveDataToDB(dataList)
#爬取网页
def getData(baseUrl):
dataList = []
for i in range(0,10):
url=baseUrl+str(i*25)
html=askURL(url)
#2.逐一解析数据
soup=BeautifulSoup(html,"html.parser")
for item in soup.find_all('div',class_="item"):#查找符合要求的字符串,形成列表
#print(item)查看
data=[]
item=str(item)
link=re.findall(findLink,item)[0]# 这个是图片超链接。re库通过正则表达式来查找指定的字符串(标准,串)
data.append(link)
imgSrc=re.findall(findImgSrc,item)[0]
data.append(imgSrc)
titles=re.findall(findTitle,item)
if len(titles)==2:
ctitle=titles[0]
data.append(ctitle)
otitle=titles[1].replace("/","")
data.append(otitle)
else:
data.append(titles[0])
data.append("")#留空
rating=re.findall(findRating,item)[0]
data.append(rating)
judgeNum=re.findall(findJudge,item)[0]
data.append(judgeNum)
inq=re.findall(findInq,item)
if len(inq)!=0:
data.append(inq[0])
else:
data.append(" ")
bd = re.findall(findBd, item)[0]
bd = re.sub('<br(1s+)?/>(ls+)?', " ", bd) # 去除br
bd = re.sub('/', " ", bd) # 替换
data.append(bd.strip()) # 去掉空格
dataList.append(data)
#print(dataList)
return dataList
#指定一个URL网页内容
def askURL(url):
head={
# User-Agent中间没用空格!
"User-Agent": "Mozilla / 5.0(Windows NT 10.0;WOW64) AppleWebKit / 537.36(KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36 SLBrowser/7.0.0.4071 SLBChan/21"
}
request=urllib.request.Request(url,headers=head)
html=""
try:
response=urllib.request.urlopen(request)
html=response.read().decode("utf-8")
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e, "reason"):
print(e.reason)
return html
#保存数据
def saveDataToDB(datalist):
init_db()
conn=ConnectMysql.connect()
cur=conn.cursor()
for data in datalist:
for index in range(len(data)):
if index == 4 or index == 5:
continue
data[index]='"'+str(data[index])+'"'
sql='''
insert into movie250
(
info_link,pic_link,cname,ename,score,rated,introduction,info
)
value(%s)'''%",".join(data)
cur.execute(sql)
conn.commit()
ConnectMysql.close(conn,cur)
#创建数据库
def init_db():
#在ConnectMysql.py中已经创建
ConnectMysql.creatTable()
if __name__ =="__main__":
main()
print("爬取完毕!")
其中有个小问题 那个 inq是个集合所以添加的时候 要些inq[0]
改了挺长时间的bug 终于成功了 (▽)