分享关于学习Python,跟着书籍敲的代码。
第一本书:《Byte Of Python》,给出代码笔记链接:ByteOfPython笔记代码,链接中有此书的PDF格式资源。
第二本书:《Python网络数据采集》,给出此书PDF格式的资源链接:https://pan.baidu.com/s/1eSq6x5g 密码:a46q
此篇给出《Python网络数据采集》第五章:存储数据 的代码笔记,供大家参考。
第五章:存储数据
# -*-coding:utf-8-*-
# #############存储数据
from urllib.error import HTTPError
from urllib.request import urlopen
from urllib.request import urlretrieve
from bs4 import BeautifulSoup
import urllib
import os
import json
import re
import datetime
import random
import csv
# ## urllib.request.urlretrieve 可以根据文件的 URL 下载文件
html=urlopen("https://github.com")
bshtml=BeautifulSoup(html,"html.parser")
logo=bshtml.find("link",{"rel":"fluid-icon"}).attrs["href"]
urlretrieve(logo,"gitCat.jpg")
print(logo)
# ###下载所有的“src”中的链接文件
downLoadDirectory="downloaded"
baseUrl="http://pythonscraping.com"
def getAllSrcFile(baseUrl,source):
if source.startswith("http://www"):
url="http://"+source[11:]
elif(source.startswith("http://")):
url=source
elif(source.startswith("www.")):
url="http://"+source[4:]
else:
url=baseUrl+"/"+source
if baseUrl not in source:
return None
return url
def getDownloadPath(baseUrl,absoludeUrl,downLoadDirectory):
print(absoludeUrl)
path=absoludeUrl.replace("www.","")
path=path.replace(baseUrl,"")
path=downLoadDirectory+path
print("path:{0}".format(path))
directory = os.path.dirname(path)
if not os.path.exists(directory):
print(directory)
os.makedirs(directory)
return path
html = urlopen("http://www.pythonscraping.com")
bsObj = BeautifulSoup(html,"html.parser")
print(bsObj)
downloadList = bsObj.findAll(src=True)
for download in downloadList:
fileUrl = getAllSrcFile(baseUrl, download["src"])
if fileUrl is not None:
print(fileUrl)
urlretrieve(fileUrl, getDownloadPath(baseUrl, fileUrl, downLoadDirectory))
else:
print("None")
# ##获取 HTML 表格并写入 CSV 文件
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://en.wikipedia.org/wiki/Comparison_of_text_editors")
bsObj = BeautifulSoup(html,"html.parser")
# # 主对比表格是当前页面上的第一个表格
table = bsObj.findAll("table",{"class":"wikitable"})[0]
rows = table.findAll("tr")
csvFile = open("files/editors.csv", 'wt',newline='',encoding='utf-8')
writer = csv.writer(csvFile)
try:
for row in rows:
csvRow = []
for cell in row.findAll(['td', 'th']):
csvRow.append(cell.get_text())
writer.writerow(csvRow)
finally:
csvFile.close()
# ##使用pymysql链接MySQL数据库
import pymysql
conn=pymysql.connect(host="localhost",user="root",passwd="123456",db="pymysql",charset='utf8')
conn2=pymysql.connect(host="127.0.0.1",user="root",passwd="123456",db="pymysql",charset='utf8')
try:
cur = conn.cursor()
cur2=conn2.cursor()
# 使用execute执行sql语句
reCount = cur.execute('select * from user;')
reCount2=cur2.execute('select name,phone from user where id in(1,2)')
# 使用fetchone 获取一条数据
# data = cur.fetchone()
# 使用fetchall 获取所有数据
data = cur.fetchall()
data2=cur2.fetchall()
# 提交命令
conn.commit()
conn2.commit()
except:
# 发生错误回滚
conn.rollback()
conn2.rollback()
finally:
# 关闭游标
cur.close()
cur2.close()
# 关闭数据库连接
conn.close()
conn2.close()
print(reCount)
print(data)
print(data2)
for user in data2:
print("{0}的电话是:{1}".format(user[0],user[1]))
# ##爬取网络数据,存储到MySQL数据库中
import pymysql
def getConnect(user,passwd,host="localhost",db="pymysql"):
conn=pymysql.Connect(user=user,passwd=passwd,host=host,db=db,charset="utf8")
return conn
conn = getConnect("root", "123456")
cur=conn.cursor()
def insertSql(value1,value2,table="user",avg1="name",avg2="phone"):
try:
count=cur.execute("insert into {0} ({1},{2}) VALUES ('{3}','{4}')".format(table,avg1,avg2,value1,value2))
conn.commit()
print("存储成功")
except:
conn.rollback()
print("存储失败")
def getLinks(articleUrl):
html=urlopen("http://en.wikipedia.org{0}".format(articleUrl))
bshtml=BeautifulSoup(html,"html.parser")
title=bshtml.find("h1").get_text()
content=bshtml.find("div",{"id":"mw-content-text"}).find("p").get_text()
insertSql(title,content,"wiki_pages","title","content")
return bshtml.find("div",{"id":"mw-content-text"}).findAll("a",href=re.compile("^(/wiki/)((?!:)(?!%).)*$"))
links = getLinks("/wiki/Kevin_Bacon")
try:
while(len(links)>0):
newArticleUrl=links[random.randint(0,len(links)-1)].attrs["href"]
print(newArticleUrl)
links=getLinks(newArticleUrl)
except:
print("_ _ _ _ _失败")
finally:
cur.close()
conn.close()
print("光标已关闭_ _ _ 连接已断开_ _ _")
第六章笔记代码:读取文档