import requests
import re
import json
from lxml import etree
from MySQLbao import MysqlHelper
#连接数据库储存数据
myhelper = MysqlHelper()
sql = "insert into lagouwang (company,salary,jingyan,xueli,description) values (%s,%s,%s,%s,%s)"
url = 'https://www.lagou.com/jobs/positionAjax.json?px=default&city=%E5%8C%97%E4%BA%AC&needAddtionalResult=false'
headers = {
'Accept':'application/json, text/javascript, */*; q=0.01',
'Accept-Language':'zh-CN,zh;q=0.9',
'Connection':'keep-alive',
'Cookie':'WEBTJ-ID=20180817144920-16546a544c6135-0dddc6f4ac7209-2711639-1049088-16546a544c7e0; _ga=GA1.2.10894523.1534488562; _gid=GA1.2.1461560699.1534488562; user_trace_token=20180817144931-b18e45cc-a1e9-11e8-a9f0-5254005c3644; PRE_HOST=www.baidu.com; LGUID=20180817144931-b18e4991-a1e9-11e8-a9f0-5254005c3644; X_HTTP_TOKEN=c7943969cb6c7080f4a9483619d27c0f; LGSID=20180817145005-c5b6633a-a1e9-11e8-a9f0-5254005c3644; PRE_UTM=m_cf_cpc_baidu_pc; PRE_SITE=https%3A%2F%2Fwww.baidu.com%2Fbaidu.php%3Fsc.a000000pGFTBfqUxhdsLQQHGQPtm0FlmZuPiBSmB96XvSF0gsSJ-XXYAsI1tvw-I6rUY8xMiEVD4LYRAoGpeAhME_SAWhCRZemkO175w__MtIII4Pgip-OVtf2JJ0RK51gw45Da6mz7kIi5VMJhFk5bFuvZ7wpyHeTJWAGh4FIYLrFlRYs.DD_NR2Ar5Od663rj6tJQrGvKD7ZZKNfYYmcgpIQC8xxKfYt_U_DY2yP5Qjo4mTT5QX1BsT8rZoG4XL6mEukmryZZjzL4XNPIIhExzLu2SMcM-sSxH9vX8ZuEsSXej_qT5o43x5ksSEzseld2s1f_U2qS4f.U1Yk0ZDqs2v4VnL30ZKGm1Yk0Zfqs2v4VnL30A-V5HcsP0KM5yF-TZns0ZNG5yF9pywd0ZKGujYk0APGujYs0AdY5HDsnHIxnH0krNtknjc1g1DsPjuxn1msnfKopHYs0ZFY5HTsP0K-pyfqnHfvr7tznH04P7tkrjRvn7tzPWndn7tznjbzr0KBpHYznjf0UynqP1m1nW03Pjnsg1Dsnj7xnNtknjFxn0KkTA-b5H00TyPGujYs0ZFMIA7M5H00mycqn7ts0ANzu1Ys0ZKs5H00UMus5H08nj0snj0snj00Ugws5H00uAwETjYs0ZFJ5HD0uANv5gKW0AuY5H00TA6qn0KET1Ys0AFL5HDs0A4Y5H00TLCq0ZwdT1Y1n16dPHTsnWR4Pjm3njTsP1cs0ZF-TgfqnHR1Pjf3rjRvPWcvnsK1pyfquH61P1c1njmsnj04m1Fhu0KWTvYqrRDdPHFDwRFAnYcznRDvP0K9m1Yk0ZK85H00TydY5H00Tyd15H00XMfqn0KVmdqhThqV5HKxn7tsg1Kxn0Kbmy4dmhNxTAk9Uh-bT1Ysg1Kxn7t1nHb4n1Nxn0Ksmgwxuhk9u1Ys0AwWpyfqn0K-IA-b5iYk0A71TAPW5H00IgKGUhPW5H00Tydh5H00uhPdIjYs0AulpjYs0Au9IjYs0ZGsUZN15H00mywhUA7M5HD0UAuW5H00mLFW5HT1n10%26ck%3D8803.1.121.324.567.324.563.824%26shh%3Dwww.baidu.com%26sht%3Dbaidu%26us%3D1.0.2.0.1.300.0%26ie%3Dutf-8%26f%3D8%26tn%3Dbaidu%26wd%3D%25E6%258B%2589%25E5%258B%25BE%25E7%25BD%2591%26rqlang%3Dcn%26inputT%3D2968%26bc%3D110101; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Flp%2Fhtml%2Fcommon.html%3Futm_source%3Dm_cf_cpc_baidu_pc%26m_kw%3Dbaidu_cpc_bj_e110f9_d2162e_%25E6%258B%2589%25E5%258B%25BE%25E7%25BD%2591; JSESSIONID=ABAAABAAAGFABEF0FA09730921B5CA3AE4E13D3F684A19A; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1534488563,1534488595,1534488606; index_location_city=%E5%85%A8%E5%9B%BD; TG-TRACK-CODE=search_code; SEARCH_ID=ab62d667074042ddbc0294a97e39152c; _gat=1; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1534490018; LGRID=20180817151348-1625b6a1-a1ed-11e8-a9f0-5254005c3644',
'Host':'www.lagou.com',
'Origin':'https://www.lagou.com',
'Referer':'https://www.lagou.com/jobs/list_%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90?px=default&city=%E5%8C%97%E4%BA%AC',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'X-Anit-Forge-Code':'0',
'X-Anit-Forge-Token':'None',
'X-Requested-With':'XMLHttpRequest',
}
for page in range(1,31):
form = {
'first': 'false',
'pn': page,
'kd': '数据分析',
}
response = requests.post(url,data=form,headers=headers)
# print(type(response))
html_str = response.text
# print(html_str)
# 通过etree这个包下面的HTML函数, 就能够将element对象获取出来
html_dict = json.loads(html_str)
# print(html_dict)
# #列表页的每条信息都携带者进入详情页的ID 每页有15个 positionId
for j in range(15):
list_url = html_dict['content']['positionResult']['result'][j]['positionId']
# print(list_url)
#拼接详情页的url
detail_url = 'https://www.lagou.com/jobs/'+str(list_url)+'.html'
# print(detail_url)
#第一次测试 只爬取一页
# detail_url = 'https://www.lagou.com/jobs/4605300.html'
form1 = {
'positionId': list_url,
'pageSize': 500,
}
#开始发送第二次请求,获取详情页
response = requests.post(detail_url,data=form1,headers=headers)
detail_str = response.text
# print(detail_str)
html_ele = etree.HTML(detail_str)
# print(html_ele)
#公司
company_err = html_ele.xpath('//div[@class="company"]')
if company_err == None:
break
company = html_ele.xpath('//div[@class="company"]')[0].text
# print(company)
#工资
salary = html_ele.xpath('//div[@class="position-head"]/div/div[1]/dd/p/span[1]')[0].text
# print(salary)
#经验
jingyan = html_ele.xpath('//div[@class="position-head"]/div/div[1]/dd/p/span[3]')[0].text
# print(jingyan)
#学历
xueli = html_ele.xpath('//div[@class="position-head"]/div/div[1]/dd/p/span[4]')[0].text
# print(xueli)
#职位描述
pinjie = '//div[@class="content_l fl"]/dl[1]/dd[2]/div//text()'
zhize = html_ele.xpath(pinjie)
#添加到数据库
data = (company,salary,jingyan,xueli,str(zhize))
myhelper.execute_modify_sql(sql, data)