废话不多说直接上代码
import time
import csv
from selenium import webdriver
import string
import zipfile
from lxml import etree
# 代理服务器
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
# 代理隧道验证信息
proxyUser = "HD2A47190U2xxxx"
proxyPass = "CB1FDB9303ABxxxx"
def create_proxy_auth_extension(proxy_host, proxy_port,
proxy_username, proxy_password,
scheme='http', plugin_path=None):
if plugin_path is None:
plugin_path = '{}_{}@http-dyn.abuyun.com_9020.zip'.format(proxy_username, proxy_password)
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Abuyun Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = string.Template(
"""
var config = {
mode: "fixed_servers",
rules: {
singleProxy: {
scheme: "${scheme}",
host: "${host}",
port: parseInt(${port})
},
bypassList: ["foobar.com"]
}
};
chrome.proxy.settings.set({value: config, scope: "regular"}, function() {});
function callbackFn(details) {
return {
authCredentials: {
username: "${username}",
password: "${password}"
}
};
}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking']
);
"""
).substitute(
host=proxy_host,
port=proxy_port,
username=proxy_username,
password=proxy_password,
scheme=scheme,
)
with zipfile.ZipFile(plugin_path, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_path
proxy_auth_plugin_path = create_proxy_auth_extension(
proxy_host=proxyHost,
proxy_port=proxyPort,
proxy_username=proxyUser,
proxy_password=proxyPass)
option = webdriver.ChromeOptions()
prefs = {
'profile.managed_default_content_settings.images': 2,
'permissions.default.stylesheet': 2
}
option.add_argument("--start-maximized")
option.add_extension(proxy_auth_plugin_path)
option.add_experimental_option('prefs', prefs)
path = '/Users/lin/Desktop/demo/chromedriver'
driver = webdriver.Chrome(executable_path=path,chrome_options=option)
link = 'https://www.lagou.com/'
driver.get(link)
time.sleep(5)
# 搜索传智播客
driver.find_element_by_id("search_input").send_keys("python")
# 点击搜索按钮
driver.find_element_by_id("search_button").click()
time.sleep(5)
pageSource = driver.page_source
et = etree.HTML(pageSource)
list_li = et.xpath('//*[@id="s_position_list"]/ul/li')
list_csvs = []
for i in list_li:
headline = i.xpath('./div[1]/div[1]/div/a/h3/text()')[0] #标题
place=i.xpath('./div[1]/div[1]/div[1]/a/span/em/text()')[0] #地方
company=i.xpath('./div[1]/div[2]/div[1]/a/text()')[0] #公司
pay=i.xpath('./div[1]/div[1]/div[2]/div/span/text()')[0]#薪资
experience_background = i.xpath('./div[1]/div[1]/div[2]/div/text()[3]')#经验学历
experience=experience_background[0].split('/')[0] #经验
background=experience_background[0].split('/')[1].replace('\t','').replace(' ','').replace('\n','') #经验
release = i.xpath('./div[1]/div[1]/div[1]/span/text()')[0] #发布日期
list_csvs.append([headline,place,company,pay,experience,background,release])
# python2可以用file替代open
with open("test.csv", "a") as csvfile:
writer = csv.writer(csvfile)
# 先写入columns_name
writer.writerow(["标题", "地点", "公司名称","经验",'薪资',"学历",'发布时间'])
# 写入多行用writerows
writer.writerows(list_csvs)
结果: