使用python爬取招聘网的职位信息并分析
import requests
import random
import time
import pandas as pd
import matplotlib.pylab as plt
import jieba
##from wordcloud import WordCloud
import math
# 该爬虫爬取拉勾网用户想要查询的地区的python相关招聘信息,并且进行数据处理与分析.
# 获取请求结果
# kind 搜索关键字
# page 页码, 默认是1
def get_json(kind, city, page=1,):
# post请求参数
data = {
'first':"true",
'pn':page,
'kd':kind
}
# 想要搜索的城市名称
kw = {'px':'default', 'city':city, 'needAddtionalResult':'false'}
user_agent_list = ['Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1 QQBrowser/6.9.11079.201']
user_agent = random.choice(user_agent_list)
print('1:',user_agent)
header = {
'Host': 'www.lagou.com',
'Referer': 'https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput=',
'User-Agent': user_agent
}
print(header)
# 设置代理
proxies = [
{'http': '140.143.96.216:80', 'https': '140.143.96.216:80'},