爬虫(一)

'''
import urllib.request as ur
from lxml import etree
import xlrd
from xlutils.copy import copy
import random
import requests

index = 0
headers = {
              "accept": "image/webp,image/apng,image/*,*/*;q=0.8",
              "content-type": "text/html; charset=UTF-8",
              "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 "
              "(KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3704.400 QQBrowser/10.4.3587.400"
          }
#添加代理IP

for i in range(1,100):
    ips = ["188.131.137.105","60.217.73.238","113.232.23.129"]
    ip = random.choice(ips)
    proxy_support = ur.ProxyHandler({"http":ip})
    opener = ur.build_opener(proxy_support)
    ur.install_opener(opener)
    url = "http://bj.ganji.com/zpshichangyingxiao/o%d/"%i
    print("正在爬取第",i ,"页  ",url)

    html_str = requests.get(url,headers = headers).text

    html = etree.HTML(html_str)
    dls = html.xpath("//dl[@class = 'list-noimg job-list clearfix new-dl']")
    work_book = xlrd.open_workbook("xiaoshou.xls")
    work_book_copy = copy(work_book)
    sheet_copy = work_book_copy.get_sheet(0)

    for dl in dls:
        try:
            name = dl.xpath("./dt/a[@class='list_title gj_tongji']/text()")[0]
            company = dl.xpath("./dt/div/a/@title")[0]
            salary =  dl.xpath("./dd[@class = 'company']/div[@class='new-dl-salary']/text()")[0]

            sheet_copy.write(index,0,name)
            sheet_copy.write(index, 1, company)
            sheet_copy.write(index, 2, salary)

            work_book_copy.save("xiaoshou.xls")
            index += 1
        except:
            pass

for i in range(1,47):
    ips = ["188.131.137.105","60.217.73.238","113.232.23.129"]
    ip = random.choice(ips)
    proxy_support = ur.ProxyHandler({"http":ip})
    opener = ur.build_opener(proxy_support)
    ur.install_opener(opener)
    url = "http://bj.ganji.com/zpjigongyibangongren/o%d/"%i
    print("正在爬取第",i ,"页  ",url)

    html_str = requests.get(url,headers = headers).text

    html = etree.HTML(html_str)
    dls = html.xpath("//dl[@class = 'list-noimg job-list clearfix new-dl']")
    work_book = xlrd.open_workbook("xiaoshou.xls")
    work_book_copy = copy(work_book)
    sheet_copy = work_book_copy.get_sheet(0)

    for dl in dls:
        try:
            name = dl.xpath("./dt/a[@class='list_title gj_tongji']/text()")[0]
            company = dl.xpath("./dt/div/a/@title")[0]
            salary =  dl.xpath("./dd[@class = 'company']/div[@class='new-dl-salary']/text()")[0]

            sheet_copy.write(index,0,name)
            sheet_copy.write(index, 1, company)
            sheet_copy.write(index, 2, salary)

            work_book_copy.save("xiaoshou.xls")
            index += 1
        except:
            pass

for i in range(1,50):
    ips = ["188.131.137.105","60.217.73.238","113.232.23.129"]
    ip = random.choice(ips)
    proxy_support = ur.ProxyHandler({"http":ip})
    opener = ur.build_opener(proxy_support)
    ur.install_opener(opener)
    url = "http://bj.ganji.com/zpyingyeyuan/o%d/"%i
    print("正在爬取第",i ,"页  ",url)

    html_str = requests.get(url,headers = headers).text

    html = etree.HTML(html_str)
    dls = html.xpath("//dl[@class = 'list-noimg job-list clearfix new-dl']")
    work_book = xlrd.open_workbook("xiaoshou.xls")
    work_book_copy = copy(work_book)
    sheet_copy = work_book_copy.get_sheet(0)

    for dl in dls:
        try:
            name = dl.xpath("./dt/a[@class='list_title gj_tongji']/text()")[0]
            company = dl.xpath("./dt/div/a/@title")[0]
            salary =  dl.xpath("./dd[@class = 'company']/div[@class='new-dl-salary']/text()")[0]

            sheet_copy.write(index,0,name)
            sheet_copy.write(index, 1, company)
            sheet_copy.write(index, 2, salary)

            work_book_copy.save("xiaoshou.xls")
            index += 1
        except:
            pass

for i in range(1,15):
    ips = ["188.131.137.105","60.217.73.238","113.232.23.129"]
    ip = random.choice(ips)
    proxy_support = ur.ProxyHandler({"http":ip})
    opener = ur.build_opener(proxy_support)
    ur.install_opener(opener)
    url = "http://bj.ganji.com/zptaobao/o%d/"%i
    print("正在爬取第",i ,"页  ",url)

    html_str = requests.get(url,headers = headers).text

    html = etree.HTML(html_str)
    dls = html.xpath("//dl[@class = 'list-noimg job-list clearfix new-dl']")
    work_book = xlrd.open_workbook("xiaoshou.xls")
    work_book_copy = copy(work_book)
    sheet_copy = work_book_copy.get_sheet(0)

    for dl in dls:
        try:
            name = dl.xpath("./dt/a[@class='list_title gj_tongji']/text()")[0]
            company = dl.xpath("./dt/div/a/@title")[0]
            salary =  dl.xpath("./dd[@class = 'company']/div[@class='new-dl-salary']/text()")[0]

            sheet_copy.write(index,0,name)
            sheet_copy.write(index, 1, company)
            sheet_copy.write(index, 2, salary)

            work_book_copy.save("xiaoshou.xls")
            index += 1
        except:
            pass


for i in range(1,2447):
    ips = ["188.131.137.105","60.217.73.238","113.232.23.129"]
    ip = random.choice(ips)
    proxy_support = ur.ProxyHandler({"http":ip})
    opener = ur.build_opener(proxy_support)
    ur.install_opener(opener)
    url = "https://www.kuaidaili.com/free/o%d/"%i
    print("正在爬取第",i ,"页  ",url)

    html_str = requests.get(url,headers = headers).text

    html = etree.HTML(html_str)
    dls = html.xpath("//dl[@class = 'list-noimg job-list clearfix new-dl']")
    work_book = xlrd.open_workbook("xiaoshou.xls")
    work_book_copy = copy(work_book)
    sheet_copy = work_book_copy.get_sheet(0)

    for dl in dls:
        try:
            name = dl.xpath("./dt/a[@class='list_title gj_tongji']/text()")[0]
            company = dl.xpath("./dt/div/a/@title")[0]
            salary =  dl.xpath("./dd[@class = 'company']/div[@class='new-dl-salary']/text()")[0]

            sheet_copy.write(index,0,name)
            sheet_copy.write(index, 1, company)
            sheet_copy.write(index, 2, salary)

            work_book_copy.save("xiaoshou.xls")
            index += 1
        except:
            pass
'''

'''
import urllib.request as ur
from lxml import etree
for i in range(1,5):
    url = "http://bj.ganji.com/zpshichangyingxiao/o%d/"%i
    print("正在爬取第",i,"页  ",url)
    response = ur.urlopen(url)
    html_str = response.read().decode("utf-8")
    html = etree.HTML(html_str)
    names = html.xpath("//a[@class='list_title gj_tongji']/text()")
    print(names)
    break
'''

'''
import urllib.request as ur
from lxml import etree
import xlrd
from xlutils.copy import copy
index = 0
for i in range(1,5):
    url = "http://bj.ganji.com/zpshichangyingxiao/o%d/"%i
    print("正在爬取第",i,"页  ",url)
    response = ur.urlopen(url)
    html_str = response.read().decode("utf-8")
    html = etree.HTML(html_str)
    dls = html.xpath("//dl[@class='list-noimg job-list clearfix new-dl']")
    work_book = xlrd.open_workbook("000.xls")
    work_book_copy= copy(work_book)
    sheet_copy = work_book_copy.get_sheet(0)
    for dl in dls:
        try:
            name = dl.xpath("./dt/a[@class='list_title gj_tongji']/text()")[0]
            company = dl.xpath("./dt/div/a/@title")[0]
            sheet_copy.write(index,0,name)
            sheet_copy.write(index,1,company)
            work_book_copy.save("000.xls")
            index += 1
        except:
            pass
'''

'''
import urllib.request as ur
from lxml import etree
import xlrd
from xlutils.copy import copy
import random

index = 0
headers = {
    "Accept": "image/webp,image/apng,image/*,*/*;q=0.8",
    "Content-Type": "image/gif",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3704.400 QQBrowser/10.4.3587.400"
}

#添加代理IP
for i in range(1,5):
    ips = ["60.205.188.24","163.125.235.214","123.206.30.254","117.90.252.252"]
    ip = random.choice(ips)
    proxy_support = ur.ProxyHandler({"http":ip})
    opener = ur.build_opener(proxy_support)
    url = "http://bj.ganji.com/zpshichangyingxiao/o%d/"% i
    print("正在爬取第", i, "页  ", url)
    request = ur.Request(url = url,headers = headers)
    response = ur.urlopen(request)
    html_str = response.read().decode("utf-8")
    html = etree.HTML(html_str)
    dls = html.xpath("//dl[@class='list-noimg job-list clearfix new-dl']")
    work_book = xlrd.open_workbook("000.xls")
    work_book_copy = copy(work_book)
    sheet_copy = work_book_copy.get_sheet(0)
    for dl in dls:
        try:
            name = dl.xpath("./dt/a[@class='list_title gj_tongji']/text()")[0]
            company = dl.xpath("./dt/div/a/@title")[0]
            sheet_copy.write(index, 0, name)
            sheet_copy.write(index, 1, company)
            work_book_copy.save("000.xls")
            index += 1
        except:
            pass
'''

'''
#requests
import urllib.request as ur
from lxml import etree
import xlrd
from xlutils.copy import copy
import random
import requests

index = 0
headers = {
    "Accept": "image/webp,image/apng,image/*,*/*;q=0.8",
    "Content-Type": "image/gif",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3704.400 QQBrowser/10.4.3587.400"
}

#添加代理IP
for i in range(1,5):
    ips = ["60.205.188.24","163.125.235.214","123.206.30.254","117.90.252.252"]
    ip = random.choice(ips)
    proxy_support = ur.ProxyHandler({"http":ip})
    opener = ur.build_opener(proxy_support)
    url = "http://bj.ganji.com/zpshichangyingxiao/o%d/"% i
    print("正在爬取第", i, "页  ", url)
    html_str = requests.get(url,headers = headers).text
    html = etree.HTML(html_str)
    dls = html.xpath("//dl[@class='list-noimg job-list clearfix new-dl']")
    work_book = xlrd.open_workbook("000.xls")
    work_book_copy = copy(work_book)
    sheet_copy = work_book_copy.get_sheet(0)
    for dl in dls:
        try:
            name = dl.xpath("./dt/a[@class='list_title gj_tongji']/text()")[0]
            company = dl.xpath("./dt/div/a/@title")[0]
            sheet_copy.write(index, 0, name)
            sheet_copy.write(index, 1, company)
            work_book_copy.save("000.xls")
            index += 1
        except:
            pass
'''

'''
import urllib.request as ur
from lxml import etree
import xlrd
from xlutils.copy import copy
import random
import requests
from telnetlib import Telnet

index = 0
headers = {
    "Accept": "*/*",
    "Content-Type": "text/plain;charset=UTF-8",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
              "(KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}
#添加代理IP
ip_ports = ["188.131.137.105:8118","114.234.80.188","47.107.190.212","61.164.39.69"]
def get_ip_port():
    ip_port = random.choice(ip_ports)
    #检测IP是否可用
    try:
        Telnet(ip_port.split(":")[0],ip_port.split(":")[1],timeout = 0.5)
        print(ip_port,"有效")
        result = True
    except:
        result = False
    if result:
        proxy_support = ur.ProxyHandler({"http":ip_port})
        opener = ur.build_opener(proxy_support)
        ur.install_opener(opener)
    else:
        print(ip_port,"无效")
        ip_ports.remove(ip_port)
        get_ip_port()
get_ip_port()
for i in range(1,5):
    url = "http://bj.ganji.com/zpdiangong/o%d/"%i
    print("正在爬第",i,"页",url)
    html_str = requests.get(url,headers = headers).text
    html = etree.HTML(html_str)
    dls = html.xpath("//dl[@class='list-noimg job-list clearfix new-dl']")
    work_book = xlrd.open_workbook("000.xls")
    work_book_copy = copy(work_book)
    sheet_copy = work_book_copy.get_sheet(0)
    for dl in dls:
        try:
            name = dl.xpath("./dt/a[@class='list_title gj_tongji']/text()")[0]
            company = dl.xpath("./dt/div/a/@title")[0]
            sheet_copy.write(index,0,name)
            sheet_copy.write(index,1,company)
            work_book_copy.save("000.xls")
            index += 1
        except:
            pass
'''

'''
import requests
from lxml import etree
#获取赶集网职位级别
def get_job_type(url):
    html_str = requests.get(url).text
    html = etree.HTML(html_str)
    print(html_str)

if __name__ == "__main__":
    url = "http://bj.ganji.com/jianzhi/"
    get_job_type(url)
'''

'''
import requests
from lxml import etree
#获取赶集网职位级别
def get_job_type(url):
    html_str = requests.get(url).text
    html = etree.HTML(html_str)
    types = html.xpath("//div[@class='f-hot']/dl/dd/i/a/@href")
    names = html.xpath("//div[@class='f-hot']/dl/dd/i/a/text()")
    return types

def get_data(url):
    html_str = requests.get(url).text
    html = etree.HTML(html_str)
    print(html_str)

def get_total_pages(url):
    html_str = requests.get(url).text
    html = etree.HTML(html_str)
    print(html_str)
if __name__ == "__main__":
    get_total_pages("http://bj.ganji.com/zpshichangyingxiao/")
    #url_base = "http://bj.ganji.com"
    #url = url_base+"/zhaopin/"
    #ts = get_job_type(url)
    #for t in ts:
'''

'''
import requests
from lxml import etree
import time

#获取赶集网职位类别
def get_job_type(url):
    html_str = requests.get(url).text
    html = etree.HTML(html_str)
    types = html.xpath("//div[@class='f-hot']/dl/dd/i/a/@href")
    names = html.xpath("//div[@class='f-hot']/dl/dd/i/a/text()")
    return types

def get_data(url):
        html_str = requests.get(url).text
        html = etree.HTML(html_str)
        print(html_str)

def get_total_pages(url):
    html_str = requests.get(url).text
    html = etree.HTML(html_str)
    total_page = html.xpath("//div[@class='pageBox']/ul/li[last()-1]/a/span/text()")[0]
    return total_page

if __name__ == "__main__":
    headers = {
        "Accept": "*/*",
        "Content-Type": "text/plain;charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
                      "(KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
    }
    url_base = "http://bj.ganji.com"
    url = url_base+"/zhaopin/"
    ts = get_job_type(url)

    for t in ts:
        url0 = url_base + t
        tp = int(get_total_pages(url0))
        for i in range(1,tp+1):
            print("正在获取", t,"类型的第",i,"页,共",tp,"页")
            url0_0 = url0+"o%d/"%i
            get_data(url0_0)
            time.sleep(10000000000000)
'''

'''
import requests
from lxml import etree
import time
import xlrd
from xlutils.copy import copy
import random
from telnetlib import Telnet
import urllib.request as ur
#添加代理ip
ip_ports = ["210.5.10.87:53281", "60.13.42.116:9999","183.129.207.86:14002"]
def get_ip_port():
    ip_port = random.choice(ip_ports)
    # 检测ip是否可用
    try:
        Telnet(ip_port.split(":")[0], ip_port.split(":")[1], timeout=0.5)
        print(ip_port,"有效")
        result = True
    except:
        result = False
    if result:
        proxy_support = ur.ProxyHandler({"http": ip_port})
        opener = ur.build_opener(proxy_support)
        ur.install_opener(opener)
    else:
        print(ip_port,"无效")
        ip_ports.remove(ip_port)
        get_ip_port()
#获取赶集网职位类别
def get_job_type(url,headers):
    html_str = requests.get(url,headers=headers).text
    html = etree.HTML(html_str)
    types = html.xpath("//div[@class='f-hot']/dl/dd/i/a/@href")
    names = html.xpath("//div[@class='f-hot']/dl/dd/i/a/text()")
    return types

def get_data(url,headers):
    with xlrd.open_workbook("000.xls") as work_book:
        sheet = work_book.sheet_by_index(0)
        index = sheet.nrows
        work_book_copy = copy(work_book)
        sheet_copy = work_book_copy.get_sheet(0)

        html_str = requests.get(url,headers=headers).text
        html = etree.HTML(html_str)
        dls = html.xpath("//dl[@class='list-noimg job-list clearfix new-dl']")
        for dl in dls:
            try:
                name = dl.xpath("./dt/a/text()")[0]
                company = dl.xpath("./dt/div/a/@title")[0]
                sheet_copy.write(index, 0, name)
                sheet_copy.write(index, 1, company)
                work_book_copy.save("000.xls")
                index += 1
            except:
                pass

def get_total_pages(url,headers):
    html_str = requests.get(url,headers=headers).text
    html = etree.HTML(html_str)
    total_page = html.xpath("//div[@class='pageBox']/ul/li[last()-1]/a/span/text()")[0]
    return total_page

if __name__ == "__main__":
    headers = {
        "Accept": "*/*",
        "Content-Type": "text/plain;charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
                      "(KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
    }
    url_base = "http://bj.ganji.com"
    url = url_base+"/zhaopin/"
    ts = get_job_type(url,headers)

    for t in ts:
        get_ip_port()
        url0 = url_base + t
        tp = int(get_total_pages(url0,headers))
        for i in range(1,tp+1):
            print("正在获取", t,"类型的第",i,"页,共",tp,"页")
            url0_0 = url0+"o%d/"%i
            get_data(url0_0,headers)
            #time.sleep(10000000000000)
'''


'''
import requests
from lxml import etree
import random
from telnetlib import Telnet
import urllib.request as ur
#添加代理ip
ip_ports = ["47.98.237.129:80", "175.44.159.172:9000","221.2.174.3:8060"]
def get_ip_port():
    ip_port = random.choice(ip_ports)
    # 检测ip是否可用
    try:
        Telnet(ip_port.split(":")[0], ip_port.split(":")[1], timeout=0.1)
        print(ip_port,"有效")
        result = True
    except:
        result = False
    if result:
        proxy_support = ur.ProxyHandler({"http": ip_port})
        opener = ur.build_opener(proxy_support)
        ur.install_opener(opener)
    else:
        print(ip_port,"无效")
        ip_ports.remove(ip_port)
        get_ip_port()
#获取赶集网职位类别
def get_job_type(url,headers):
    html_str = requests.get(url,headers=headers).text
    html = etree.HTML(html_str)
    types = html.xpath("//div[@class='f-hot']/dl/dd/i/a/@href")
    names = html.xpath("//div[@class='f-hot']/dl/dd/i/a/text()")
    return types

def get_data(url,headers):
    html_str = requests.get(url,headers=headers).text
    html = etree.HTML(html_str)
    dls = html.xpath("//dl[@class='list-noimg job-list clearfix new-dl']")
    for dl in dls:
        try:
            name = dl.xpath("./dt/a/text()")[0]
            with open("1.txt", "a", encoding="utf-8") as f:  # 格式化字符串还能这么用!
                f.write(name)
            print("1")
            with open("1.txt", "a", encoding="utf-8") as f:
                f.write('\n')
            company = dl.xpath("./dt/div/a/@title")[0]
            with open("1.txt", "a", encoding="utf-8") as f:  # 格式化字符串还能这么用!
                f.write(company)
                #f.strip().replace(' ', '').replace('\n', '').replace('\t', '').replace('\r', '').strip()
            print("2")
            salary = dl.xpath("./dd/div[@class='new-dl-salary']/text()")[0]
            with open("1.txt", "a", encoding="utf-8") as f:  # 格式化字符串还能这么用!
                f.write(i)
            print("3")
            tags = dl.xpath("./dd/div[@class='new-dl-tags']/i/text()")[0]
            with open("1.txt", "a", encoding="utf-8") as f:  # 格式化字符串还能这么用!
                f.write(tags)
            print("4")
            pay = dl.xpath("./dd[@class='pay']/@title")[0]
            with open("1.txt", "a", encoding="utf-8") as f:  # 格式化字符串还能这么用!
                f.write(pay)
            print("5")
            time = dl.xpath("./dd[@class='pub-time']/span/text()")[0]
            with open("1.txt", "a", encoding="utf-8") as f:  # 格式化字符串还能这么用!
                f.write(time)
            print("6")

        except:
            pass

def get_total_pages(url,headers):
    try:
        html_str = requests.get(url,headers=headers).text
        html = etree.HTML(html_str)
        total_page = html.xpath("//div[@class='pageBox']/ul/li[last()-1]/a/span/text()")[0]
        return total_page
    except:
        pass

if __name__ == "__main__":
    headers = {
        "Accept": "*/*",
        "Content-Type": "text/plain;charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
                      "(KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
    }
    url_base = "http://bj.ganji.com"
    url = url_base+"/zhaopin/"
    ts = get_job_type(url,headers)

    for t in ts:
        get_ip_port()
        url0 = url_base + t
        tp = int(get_total_pages(url0,headers))
        for i in range(1,tp+1):
            print("正在获取", t,"类型的第",i,"页,共",tp,"页")
            url0_0 = url0+"o%d/"%i
            get_data(url0_0,headers)
'''


'''
import requests
from lxml import etree
import time
import xlrd
from xlutils.copy import copy
import random
from telnetlib import Telnet
import urllib.request as ur
#添加代理ip
ip_ports = ["183.129.207.86:14002", "180.118.86.118:9000","221.2.174.3:8060"]
def get_ip_port():
    ip_port = random.choice(ip_ports)
    # 检测ip是否可用
    try:
        Telnet(ip_port.split(":")[0], ip_port.split(":")[1], timeout=0.1)
        print(ip_port,"有效")
        result = True
    except:
        result = False
    if result:
        proxy_support = ur.ProxyHandler({"http": ip_port})
        opener = ur.build_opener(proxy_support)
        ur.install_opener(opener)
    else:
        print(ip_port,"无效")
        ip_ports.remove(ip_port)
        get_ip_port()
#获取赶集网职位类别
def get_job_type(url,headers):
    html_str = requests.get(url,headers=headers).text
    html = etree.HTML(html_str)
    types = html.xpath("//div[@class='f-hot']/dl/dd/i/a/@href")
    names = html.xpath("//div[@class='f-hot']/dl/dd/i/a/text()")
    return types

def get_data(url,headers):
    with xlrd.open_workbook("001.xls") as work_book:
        sheet = work_book.sheet_by_index(0)
        index = sheet.nrows
        work_book_copy = copy(work_book)
        sheet_copy = work_book_copy.get_sheet(0)

        html_str = requests.get(url,headers=headers).text
        html = etree.HTML(html_str)
        dls = html.xpath("//dl[@class='list-noimg job-list clearfix new-dl']")
        for dl in dls:
            try:
                name = dl.xpath("./dt/a/text()")[0]
                company = dl.xpath("./dt/div/a/@title")[0]
                salary = dl.xpath("./dd/div[@class='new-dl-salary']/text()")[0]
                tags = dl.xpath("./dd/div[@class='new-dl-tags']/i/text()")[0]
                pay = dl.xpath("./dd[@class='pay']/@title")[0]
                time = dl.xpath("./dd[@class='pub-time']/span/text()")[0]
                sheet_copy.write(index, 0, name)
                sheet_copy.write(index, 1, company)
                sheet_copy.write(index, 2, salary)
                sheet_copy.write(index, 3, tags)
                sheet_copy.write(index, 4, pay)
                sheet_copy.write(index, 5, time)
                work_book_copy.save("001.xls")
                index += 1
            except:
                pass

def get_total_pages(url,headers):
    try:
        html_str = requests.get(url,headers=headers).text
        html = etree.HTML(html_str)
        total_page = html.xpath("//div[@class='pageBox']/ul/li[last()-1]/a/span/text()")[0]
        return total_page
    except:
        pass

if __name__ == "__main__":
    headers = {
        "Accept": "*/*",
        "Content-Type": "text/plain;charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
                      "(KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
    }
    url_base = "http://sh.ganji.com"
    url = url_base+"/zhaopin/"
    ts = get_job_type(url,headers)

    for t in ts:
        get_ip_port()
        url0 = url_base + t
        tp = int(get_total_pages(url0,headers))
        for i in range(1,tp+1):
            print("正在获取", t,"类型的第",i,"页,共",tp,"页")
            url0_0 = url0+"o%d/"%i
            get_data(url0_0,headers)
'''

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值