爬取百度翻译:
#局部请求,ajax (network->过滤器-XHR
import requests
import json
post_url='https://fanyi.baidu.com/sug'# /v2transapi?from=en&to=zh'
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0'
}
word=input('请输入要翻译的英文:')
data={
'kw':word# query 'cap'
}
response=requests.post(url=post_url,data=data,headers=headers)
#json返回对象(Content-Type:application/json)
dic_obj=response.json()
with open(f'./百度翻译/{word}.json','w',encoding='utf-8') as f:
json.dump(dic_obj,fp=f,ensure_ascii=False)
print('over!')
爬取邮电出版社:
登录邮电出版社:http://www.ptpress.com.cn
任选一种方法爬取期刊类别中“学术类”中的期刊信息。
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
url='http://www.ptpress.com.cn'
browse=webdriver.Chrome()
browse.get(url)
time.sleep(3)
# 点击期刊
a=browse.find_elements(By.LINK_TEXT,'期刊')[0]
a.click()
time.sleep(3)
books=[]
# 用By.Xpath查找
bs=browse.find_elements(by=By.XPATH,value='//*[@id="per_content"]/div[1]/div//p')
for i in bs:
print(i.text) #显示输出
books.append(i.text) #存入列表
# 显示输出
print(books)
# 将列表写入文本文件
with open('journal.txt','w',encoding='utf-8') as f:
for i in books:
f.write(i)
f.write('\n')