题目:
对TXT文件中的每一条URL进去截取域名并记录出现的次数,如:http://www.baidu.com,最后进行存储
def cut(rows):
"""
同样可以利用正则表达式提取
"""
contect = rows.rsplit("http")[1]
contect = str(contect.rsplit("com/")[0] + "com")
contect = str("http" + contect)
return contect
#判断listHttp_dict 是否存在这个元素
def elementDecide(contect):
num = 1
if contect in listHttp_dict:
listHttp_dict[contect] = int(listHttp_dict.get(contect)) + 1
else:
listHttp_dict.update({contect: num})
pass
def openFind():
# ss = open("/Users/admin/Desktop/Host.txt", 'r').readline() decide
try:
ss = open("/Users/admin/Desktop/Host.txt", 'r').readlines() #读取TXT文本每一行数据
for i, rows in enumerate(ss):
if i in range(len(ss)):
if rows.find("http") != -1:
contect = cut(rows)
elementDecide(contect)
else:
print("不是正确的url")
return listHttp_dict
except:
return False
if __name__ == '__main__':
listHttp_dict = dict()
print(openFind())
url_file = "/Users/admin/Desktop/url.txt"
fileObject = open(url_file, 'w')
fileObject.write(str(listHttp_dict))