import urllib.request
import ssl
import re
import xlwt
import xlrd
from xlutils.copy import copy
import DBUtils
def getContent(k,l):
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36" ,
'Connection': 'keep-alive'
}
url = "https://search.51job.com/list/080700,000000,0000,00,9,99,%s,2,%s.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare="%(k,l)
##请求对象(url+请求头)
req = urllib.request.Request(url,headers = headers)
##获取页面内容
page = urllib.request.urlopen(req).read()
page = page.decode("GBK")
return page
# print(getContent())
def getItem(content):
pattern = re.compile(
r'"job_href":"(.+?)","job_name":"(.+?)".+?"company_href":"(.+?)","company_name":"(.+?)","providesalary_text":"(.*?)".+?"workarea_text":"(.*?)","updatedate":"(.*?)".*?"companytype_text":"(.*?)","degreefrom":"(.*?)".*?"attribute_text":(.*?),"companysize_text":"(.*?)","companyind_text":"(.*?)".*?')
res = re.findall(pattern,content)
return res;
#
# content = getContent()
# print(getItem(content))
def saveExcel():
wb = xlwt.Workbook()
sheet = wb.add_sheet("数据分析50")
header
python爬虫作业
最新推荐文章于 2023-10-06 15:00:00 发布
本文详细介绍了使用Python进行网络爬虫的实践过程,包括数据抓取、解析及存储等关键步骤。通过实例解析爬虫作业,帮助读者掌握Python爬虫的基本技巧和常见问题解决策略。
摘要由CSDN通过智能技术生成