day6-爬虫流程整理+线程与进程

爬虫流程整理

1.获取网络数据 - request  selenium
找到目标网站   -  直接用request ,这是请求头 - user-agent  -  cookie -- 设置代理

请求拒绝 - 使用 - selenium  - cookie -  设置代理

2.解析数据
正则表达式。css选择器(bs4,pyquery)。xpath

3.保存数据
csv、excel
"""

import requests

url = 'https://www.pinduoduo.com/'

response = requests.get(url)
print(response.status_code)

excel文件写操作


import openpyxl

# 1.获取工作簿对象(工作簿对应的就是一个excel文件)
# 新建(默认有一个工作表)
# work_book = openpyxl.Workbook()
# 打开
# work_book = openpyxl.load_workbook(文件路径)


# work_book = openpyxl.Workbook()
work_book = openpyxl.load_workbook('files/test2.xlsx')

# 2.获取所有的表的表名
all_names = work_book.sheetnames
print(all_names)

# 3.获取表
# sheet = work_book['Sheet']

# 4.新建表
# 工作簿对象.create_sheet(表名, 表的下标)
# work_book.create_sheet('学生表')
# work_book.create_sheet('学生表2', 0)

# 5.删除表
# 工作簿对象.remove(表对象)
# work_book.remove(work_book[表名])

# 6.表重命名
# sheet = work_book['学生表2']
# sheet = work_book.active
# sheet.title = 'Student'

# 7.写入数据到单元格
# 单元格对象.value   -   获取单元格中的内容
# 单元格对象.value = 新值   -   修改单元格中的内容
sheet = work_book['Student']

# 1)获取单元格方法一
# 工作表对象.cell(行号: int, 列号: int)
cell1 = sheet.cell(1, 1)
# 修改单元格中的内容
# cell1.value = '姓名'

cell2 = sheet['B1']
# cell2.value = '年龄'

# 清空单元格
cell2.value = ''


# 5.保存文件
work_book.save('./files/test2.xlsx')

excel文件读操作

import openpyxl

# 1.打开文件
wb = openpyxl.load_workbook('files/test1.xlsx')

# 2.获取工作表
# sheet = wb['学生表']
sheet = wb.active

# 3.获取单元格
# 1)获取单个单元格
# sheet.cell(行号, 列号)    -  行号和列号都是从1开始的数字
# sheet[位置信息]  - 位置信息是类似:'A1'、'B2'的行列信息,其中字母是列信息,数字是行号

# 2)以行为单位获取单元格对象
# 工作表.iter_rows(最小行号, 最大行号, 最小列号, 最大列号)
cells = sheet.iter_rows(1, 4, 1, 4)
print(list(cells))

row_4 = sheet.iter_rows(4, 4)
print(list(row_4))

cells = sheet.iter_rows(2, 4, 1, 2)
print(list(cells))

# 3)以列为单位获取单元格对象
cells = sheet.iter_cols(1, 4, 1, 4)
print(list(cells))

all_scores = sheet.iter_cols(4, 4, 2, 4)
# print(list(all_scores))
for score_cell in next(all_scores):
    print(score_cell.value)

51job数据分析岗位爬取


import requests
from re import findall
from json import loads
import time
import os
import openpyxl
# url = 'https://search.51job.com/list/000000,000000,0000,00,9,99,数据分析,2,2001.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
# headers = {
#     'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'
# }
# response = requests.get(url, headers=headers)
# print(response.text)
headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36 Edg/90.0.818.66'
    }


def get_one_page(page):
    url = f'https://search.51job.com/list/000000,000000,0000,00,9,99,数据分析,2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        json_data = findall(r'window.__SEARCH_RESULT__\s*=\s*(\{.+?\})</script>', response.text)[0]
        return loads(json_data)['engine_search_result']
    else:
        print('请求失败!')


def get_all_data():
    all_data = []
    page = 1
    while True:
        result = get_one_page(page)
        if not result:
            print('没有更多数据')
            break

        page += 1
        # all_data.extend(result)
        # yield result
        # 保存到excel文件中
        save_page_data(result)

        print(f'获取第{page}页数据成功!')
        time.sleep(1)

    # return all_data


def get_work_book():
    # 1. 判断文件是否存在, 存在就加载,不存在就创建
    if os.path.exists('files/招聘信息.xlsx'):
        wb = openpyxl.load_workbook('files/招聘信息.xlsx')
    else:
        wb = openpyxl.Workbook()

    # 2. 判断是否存在数据分析的表
    names = wb.sheetnames
    if '数据分析' in names:
        sheet = wb['数据分析']
    else:
        sheet = wb.create_sheet('数据分析')
        titles = ['岗位名称', '薪资', '公司名称', '公司性质', '公司地址', '要求', '福利']
        for col in range(1, len(titles) + 1):
            sheet.cell(1, col).value = titles[col - 1]

    return wb, sheet


def save_page_data(data: list):
    row = sheet.max_row + 1
    for job in data:
        # 写入对应的数据
        # titles = ['岗位名称', '薪资', '公司名称', '公司性质', '公司地址', '要求', '福利']
        job_info = [
            job.get('job_name', ''),
            job.get('providesalary_text', ''),
            job.get('company_name', ''),
            job.get('companytype_text', ''),
            job.get('workarea_text', ''),
            '/'.join(job.get('attribute_text', ['-', '-', '-', '-', '-'])),
            job.get('jobwelf', '')
        ]
        for col in range(1, len(job_info)+1):
            sheet.cell(row, col).value = job_info[col-1]

        print(job)
        row += 1
    wb.save('files/招聘信息.xlsx')


if __name__ == '__main__':
    wb, sheet = get_work_book()
    get_all_data()




selenium 设置选项


from selenium import webdriver
# from selenium.webdriver import ChromeOptions

url = 'https://www.jd.com'

# 1.创建设置选项
options = webdriver.ChromeOptions()

# 2.添加选项参数
# 1) 取消测试环境
options.add_experimental_option('excludeSwitches', ['enable-automation'])

# 2) 取消图片加载(提高爬虫效率)
options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})

b = webdriver.Chrome(options=options)
b.get(url)
print(b.page_source)

frame 切换


from selenium import webdriver

url = 'https://mail.163.com/'

b = webdriver.Chrome()
b.get(url)

"""
有的时候会遇到这样的网页:一个网页对应的html标签嵌套了其他的html标签
(前端如果要实现嵌套的功能必须要将被嵌套的html放在iframe标签中),
如果需要爬取网页内容在嵌套的html里面,需要先让浏览器选中内容嵌套的html。
(浏览器对象默认选中的是最外面的html标签)
"""
# 1. 获取提供html标签的iframe标签
box = b.find_element_by_css_selector('#loginDiv>iframe')

# 2.切换frame
b.switch_to.frame(box)

print(b.page_source)

51job


from selenium import webdriver
from selenium.webdriver.common.keys import Keys

url = 'https://www.51job.com/'
b = webdriver.Chrome()

b.get(url)

input = b.find_element_by_css_selector('#kwdselectid')
input.send_keys('数据分析')
input.send_keys(Keys.ENTER)

allcity = b.find_element_by_css_selector('.allcity')
allcity.click()

print(b.page_source)

beijing = b.find_element_by_css_selector('.panel_lnp.panel_py.panel_ct2 .de.d3 tbody tr td em')
print(beijing.text)
beijing.click()

sure = b.find_element_by_css_selector('#popop > div > div.but_box > span')
sure.click()

线程与进程

"""
Time:2021/6/1  17:03
Author:Spectre
"""
"""
如果CPU调度(切换)足够快,就造成了多线程并发执行的假象
线程很多的情况下,会消耗大量CPU资源
"""
# 一个进程默认有一个线程,这个线程叫主线程。其他的线程(需要手动创建)都叫子线程。
# 如果一个python程序需要子线程需要手动创建线程类Thread的对象
# 代码实现多线程原理:

import time
from datetime import datetime
from threading import Thread
# Thread类  - 线程类  Threa类的对象  - 子线程


def download(name):
    print(f'{name}开始下载:{datetime.now()}')
    time.sleep(2)
    print(f'{name}结束下载:{datetime.now()}')

# 1.在一个线程(主线程)中下载三个电影:消耗6秒
download('肖申克的救赎')
download('霸王别姬')
download('阿甘正传')
# 肖申克的救赎开始下载:2021-06-01 17:16:28.267844
# 肖申克的救赎结束下载:2021-06-01 17:16:30.287582
# 霸王别姬开始下载:2021-06-01 17:16:30.287582
# 霸王别姬结束下载:2021-06-01 17:16:32.293531
# 阿甘正传开始下载:2021-06-01 17:16:32.293531
# 阿甘正传结束下载:2021-06-01 17:16:34.298189


# 2.在三个子线程中分别下载三个电影(2秒)
# 1)创建线程对象
t1 = Thread(target=download,args=('肖申克的救赎',))
t2 = Thread(target=download,args=('霸王别姬',))
t3 = Thread(target=download,args=('阿甘正传',))

# 2)启动线程
t1.start()
t2.start()
t3.start()


# 10个线程

homework

"""
Time:2021/6/1  19:12
Author:Spectre
"""


from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from bs4 import BeautifulSoup
import csv

import time
from datetime import datetime
from threading import Thread



def get_net_data():
    global b
    # b = webdriver.Chrome()
    # b.get('https://search.51job.com/list/090200,000000,0000,00,9,99,%25E6%2595%25B0%25E6%258D%25AE%25E5%2588%2586%25E6%259E%2590,2,2.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=')
    #
    # input = b.find_element_by_css_selector('#kwdselectid')
    # input.send_keys('数据分析')
    # input.send_keys(Keys.ENTER)
    # time.sleep(1)

    #提前设置最大高度
    # max_height = 7500
    # 每次滚动的位置
    # y = 0
    # while True:
    #     y += 200
    #     b.execute_script(f'window.scrollTo(0,{y})')
    #     if y>max_height:
    #         break
    #     time.sleep(0.5)

    # print(b.page_source)
    # for i in range(2):
    #     b = webdriver.Chrome()
    #     b.get('https://search.51job.com/list/090200,000000,0000,00,9,99,%25E6%2595%25B0%25E6%258D%25AE%25E5%2588%2586%25E6%259E%2590,2,{i+1}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=')
    #
    #     print(b.page_source)
    # return b.page_source

# def fanye():
#     time.sleep(5)
#     search_btn = b.find_element_by_css_selector('.p_in>ul>.next>a')
#     # 点击按钮
#     search_btn.click()


def an_data(data):
    soup = BeautifulSoup(data,'lxml')
    li_list = soup.select('.j_joblist>.e')
    # print(len(li_list))
    # print(li_list)

    # name_list = li_list[0].select_one('.jname.at').attrs['title']
    # print(name_list)

    all_item = []
    for li in li_list:
        item = {}
        name_list = li.select_one('.jname.at').attrs['title']
        item['name'] = name_list
        a_list = li.select_one('a').attrs['href']
        item['link'] = a_list
        time_list =li.select_one('.time').get_text()
        item['time'] = time_list
        price_list = li.select_one('.sal').get_text()
        item['price'] = price_list
        info_list = li.select_one('.d.at').get_text().replace(' ','')
        item['info'] = info_list
        tags_list =li.select_one('.tags')
        tags_list = li.select_one('.tags').attrs['title'] if tags_list else ''
        item['tags'] = tags_list
        co_name_list = li.select_one('.er>a').get_text()
        item['co_name'] = co_name_list
        co_link_list = li.select_one('.er>a').attrs['href']
        item['co_link'] = co_link_list
        co_type_list = li.select_one('.er>.dc.at').get_text()
        item['co_type'] = co_type_list
        co_intro_list = li.select_one('.er>.int.at').get_text()
        item['co_intro'] = co_intro_list

        all_item.append(item)

    print(all_item)
    # print(len(all_item))
    return all_item

def download(str):
    print(f'开始下载:{datetime.now()}{str}')
    time.sleep(2)
    print(f'结束下载:{datetime.now()}')



if __name__ == '__main__':

    t = []
    b = webdriver.Chrome()
    for i in range(10):
        b.get(f'https://search.51job.com/list/090200,000000,0000,00,9,99,%25E6%2595%25B0%25E6%258D%25AE%25E5%2588%2586%25E6%259E%2590,2,{i+1}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=')
        t.append(Thread(target=download, args=(an_data(b.page_source),)))
        # print(an_data(get_net_data()))
        # time.sleep(1)

    for x in t:
        x.start()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值