python爬取猫眼top100,将数据保存在mysql中
MySQL数据库操作
- 创建maoyan数据库——create database maoyan charset utf8;
- 选择maoyan数据库——use maoyan;
- 创建movies表——
create table movies( rank varchar(100), imagesurl varchar(200), name varchar(100), star varchar(200), time varchar(50), score varchar(50) )charset=utf8;
python爬虫部分
#使用到的库:
import re
import requests
import pymysql
import json
#发起请求的函数:
如果状态码不是200,返回异常
def gethttptext(url):
try:
header = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Host": "maoyan.com",
"Referer": "http://maoyan.com/board",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36"}
r=requests.get(url,headers=header,timeout=30)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.text
except:
print("请求失败\n")
return ""
#解析网页的函数:
通过正则表达式提取字典型数据
def parsepage(html):
try:
pattern=re.compile('<dd>.*?board-index.*?>(\d*)</i>.*?data-src="(.*?)".*?name"><a'+
'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'+
'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>',re.S)
item=re.findall(pattern,html)
#print(item)
for infomation in item:
dic={
'index':infomation[0],
'image-url':infomation[1],
'title':infomation[2],
'star':infomation[3].strip()[3:],
'time':infomation[4].strip()[5:],
'score':infomation[5]+infomation[6],
}
yield dic
except:
#print("解析失败")
return ""
#将数据写入本地txt文件中:
使用json.dumps序列化写入内容
def write_to_file(item):
with open('./猫眼电影Top100.txt','a',encoding='utf-8') as f:
f.write(json.dumps(item,ensure_ascii=False)+'\n\n')
#将数据写入mysql中:
数据库连接和操作
def write_to_mysql(item):
db=pymysql.connect("localhost","root","root","maoyan",charset='utf8')
cursor=db.cursor()
print(item)
sql="insert into movies(rank,imagesurl,name,star,time,score) values(%s,%s,%s,%s,%s,%s)"
try:
cursor.execute(sql,item)
db.commit()
print("success")
except:
print("lose")
cursor.close()
db.close()
#主函数:
将字典转化为列表,写入数据库中,且通过循环实现深度挖掘
def main():
start_url="https://maoyan.com/board/4?offset="
depth=10
for i in range(depth):
url=start_url+str(10*i)
html=gethttptext(url)
items=parsepage(html)
for item in items:
list_1=list(item.values())
write_to_file(item)
#print(list_1)
write_to_mysql(list_1)
if __name__=="__main__":
main()
完整源码
import re
import requests
import pymysql
import json
import time
def gethttptext(url):
try:
header = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Host": "maoyan.com",
"Referer": "http://maoyan.com/board",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36"}
r=requests.get(url,headers=header,timeout=30)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.text
except:
print("请求失败\n")
return ""
def parsepage(html):
try:
pattern=re.compile('<dd>.*?board-index.*?>(\d*)</i>.*?data-src="(.*?)".*?name"><a'+
'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'+
'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>',re.S)
item=re.findall(pattern,html)
#print(item)
for infomation in item:
dic={
'index':infomation[0],
'image-url':infomation[1],
'title':infomation[2],
'star':infomation[3].strip()[3:],
'time':infomation[4].strip()[5:],
'score':infomation[5]+infomation[6],
}
yield dic
except:
#print("解析失败")
return ""
def write_to_file(item):
with open('./猫眼电影Top100.txt','a',encoding='utf-8') as f:
f.write(json.dumps(item,ensure_ascii=False)+'\n\n')
def write_to_mysql(item):
db=pymysql.connect("localhost","root","root","maoyan",charset='utf8')
cursor=db.cursor()
print(item)
sql="insert into movies(rank,imagesurl,name,star,time,score) values(%s,%s,%s,%s,%s,%s)"
try:
cursor.execute(sql,item)
db.commit()
print("success")
except:
print("lose")
cursor.close()
db.close()
def main():
start_url="https://maoyan.com/board/4?offset="
depth=10
for i in range(depth):
url=start_url+str(10*i)
html=gethttptext(url)
items=parsepage(html)
for item in items:
list_1=list(item.values())
write_to_file(item)
#print(list_1)
write_to_mysql(list_1)
if __name__=="__main__":
main()