WennanDu 2017-12-05 15:26:28 565 收藏
版权
一、使用python编写爬虫——使用urllib库下载网页,使用xpath解析
提取页面中我们所需的信息,公司名称、薪资待遇、工作地点、工作经验、最低学历、招聘人数、公司规模,然后结构化输出到txt文件中,以备之后的分析使用。
#! /usr/bin/env python3
-- coding:utf-8 --
from urllib.request import Request
import string
import urllib
import lxml.html
def getHtml(url):
headers = {‘user-agent’: ‘Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36’}
url = urllib.parse.quote(url, safe=string.printable)
req = Request(url, headers=headers)
page = urllib.request.urlopen(req)
html = page.read()
return html
def getImg(html):
flag = 0
tree = lxml.html.fromstring(html)
src0 = tree.xpath(’//div[@class=“s-btop s-bb1 “]//p/text()’)
src1 = tree.xpath(’//div[@class=“s-butt s-bb1”]//li[1]/text()’)
src2 = tree.xpath(’//div[@class=“s-butt s-bb1”]//li[2]/text()’)
src3 = tree.xpath(’//div[@class=“s-butt s-bb1”]//li[3]/text()’)
src4 = tree.xpath(’//div[@class=“s-butt s-bb1”]//li[4]/text()’)
src5 = tree.xpath(’//div[@class=“s-butt s-bb1”]//li[5]/text()’)
src6 = tree.xpath(’//div[@class=“s-butt s-bb1”]//li[6]/text()’)
while flag < len(src0):
with open(‘ganji_info.txt’, ‘ab’) as jb:
jb.write(src0[flag].encode(‘utf8’))
jb.write(”\t”.encode(‘utf8’))
jb.write(src1[flag].strip().encode(‘utf8’))
jb.write("\t".encode(‘utf8’))
jb.write(src2[flag].encode(‘utf8’))
jb.write("\t".encode(‘utf8’))
jb.write(src3[flag].encode(‘utf8’))
jb.write("\t".encode(‘utf8’))
jb.write(src4[flag].encode(‘utf8’))
jb.write("\t".encode(‘utf8’))
jb.write(src5[flag].encode(‘utf8’))
jb.write("\t".encode(‘utf8’))
jb.write(src6[flag].encode(‘utf8’))
jb.write("\r\n".encode(‘utf8’))
flag += 1
if name == ‘main’:
str = “http://bj.ganji.com/zpruanjianhulianwang/o{}/”
pn = 1
while pn <= 300: # 爬取300页
print(‘爬取第’, pn, ‘页’)
url = str.format(pn)
html = getHtml(url)
getImg(html)
pn += 1
print(‘爬取完成’)
————————————————
版权声明:本文为CSDN博主「WennanDu」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/DuWennan/article/details/78540344