1.用requests库获取源代码
2.用BeautifulSoup库遍历、分析得到需要数据
3.根据要求使用.format格式化输出
import requests
from bs4 import BeautifulSoup
url = "http://www.zuihaodaxue.com/zuihaodaxuepaiming2020.html"
def getHTMLText(url):
try:
r = requests.get(url, timeout = 20)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def fillUnivList(ulist, html):
soup = BeautifulSoup(html, "html.parser")
import bs4
for tr in soup.find('tbody').children: # 首先根据源代码分析出所有学校信息都在tbody标签下的tr标签下
if isinstance(tr, bs4.element.Tag): #判断tr是不是bs4中的标签类型
tds = tr('td') #取出所有的td标签
ulist.append([tds[0].string,tds[1].string,tds[2].string,tds[4].string]) #根据html中实际td标签里面的内容
def printUnivList(ulist, num):
tplt = "{0:{4}^10}\t{1:{4}<10}\t{2:{4}<10}\t{3:{4}<10}"
# print("{:^10}\t{:^10}\t{:^10}\t{:^10}".format("排名","校名","地址","总分"))
print(tplt.format("排名", "校名", "地址", "总分",chr(12288)))
for i in range(num):
u = ulist[i]
print(tplt.format(u[0],u[1],u[2],u[3],chr(12288)))
print("Suc" + str(num))
if __name__ == '__main__':
uinfo = [] #定义一个空的列表
num = 30
url = "http://www.zuihaodaxue.com/zuihaodaxuepaiming2020.html"
html = getHTMLText(url)
fillUnivList(uinfo,html)
printUnivList(uinfo,num)
关于 .format格式输出:
中文字符的空格填充码 chr(12288)
参考:https://blog.csdn.net/a1007720052/article/details/81354130
^ 居中 后面带宽度,
< 左对齐 后面带宽度,
> 右对齐
: 号后面带填充的字符,只能是一个字符,不指定则默认是用空格填充,用chr(12288)是中文空格。
print("{1:*>20}\n{0:{2}<20}".format("apple", "panda", chr(12288)))
out:
***************panda
apple