获取url

import requests
from bs4 import BeautifulSoup
import re


class Get_links(object):
def __init__(self):
self.url = 'http://www.jianfeiketang.com'
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36",
}

def parse(self):
response = requests.get(url=self.url, headers=self.headers)
html = response.content.decode()

links = []
soup = BeautifulSoup(html, "html.parser")
url_list = soup.find_all('a')
for link in url_list:
links.append(link.get('href'))
return links

def save(self, links):
for link in links:
with open('../result/jf.txt', 'a', encoding='utf-8') as f:
f.write(link + '\n')
print('保存成功...')

def guolv(self):
with open('../result/jf.txt', 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines: # 循环处理每行数据
urls = re.findall(r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+",
line)
for url in urls:
with open('./jfresult.txt', 'a', encoding='utf-8') as f:
f.write(url + '\n')
print('清洗完毕。。。')


if __name__ == '__main__':
get_links = Get_links()
links = get_links.parse()
get_links.save(links)
get_links.guolv()

转载于:https://www.cnblogs.com/victorstudy/p/11425895.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值