import requests
from bs4 import BeautifulSoup
# 登录页面的URL
login_url = 'login-url'
# 登录所需的用户名和密码
payload = {
"username": "1",
"password": "2",
}
# 创建一个会话对象来保持登录状态
session = requests.Session()
# 发送POST请求来模拟登录
login_response = session.post(login_url, data=payload)
# 检查登录是否成功(可以根据返回的内容或状态码来判断)
if 'Welcome' in login_response.text:
print('登录成功')
else:
print('登录失败')
# 访问需要登录才能访问的页面
target_url = 'https://target-url'
response = session.get(target_url)
# 处理获取到的网页内容
if response.status_code == 200:
# 使用Beautiful Soup解析网页内容
soup = BeautifulSoup(response.text, 'html.parser')
# 进一步解析网页以提取更多信息
issueList = []
# 例如,提取所有链接
tables = soup.find_all('table')
for table in tables:
trs = table.find_all('tr')
for tr in trs:
issueClass = {}
tds = tr.find_all('td')
for td in tds:
issueClass[td.attrs['class'][0]] = td.text
issueList.append(issueClass)
print(issueList)
else:
print('无法访问目标页面')
python爬虫:模拟网站登录,获取指定页面信息
于 2023-09-14 15:04:22 首次发布