HashTable、HashMap、Synchronized、ConcurrentHashMap性能测试分析

目的

本文目的是为了对HashTable、HashMap、SynchronizedMap、ConcurrentHashMap做一个性能测试,以比较理论与实践的差距

方法

首先我准备了一个Junit测试类,可以配置不同线程并发数,以及插入到Map中的数据量大小。具体代码如下:

package com.ethan.juc.container;

import org.junit.BeforeClass;
import org.junit.Test;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;

public class TestCollection {

    // 待插入数据总量
    static int count = 1000000;
    // 并发线程数
    static final int THREAD_COUNT = 10;
    // 待插入的key
    static UUID[] keys = new UUID[count];
    // 待插入的value
    static UUID[] values = new UUID[count];
    // 每个线程负责插入的数量
    static int gap = count / THREAD_COUNT;
    // 为等待线程执行全部执行完成而使用的门闩锁
    static CountDownLatch latch;

    // 初始化数据
    @BeforeClass
    public static void beforeClass() {
        for (int i = 0; i < count; i++) {
            keys[i] = UUID.randomUUID();
            values[i] = UUID.randomUUID();
        }
    }

    @Test
    public void testHashTable() throws InterruptedException {
        Map<UUID, UUID> param = new Hashtable<UUID, UUID>();
        // 开始写操作
        Thread[] threads = new Thread[THREAD_COUNT];

        latch = new CountDownLatch(THREAD_COUNT);
        long start = System.currentTimeMillis();
        for (int i = 0; i < THREAD_COUNT; i++) {
            threads[i] = new WriteThread(param, i * gap, latch);
        }
        for (int i = 0; i < threads.length; i++) {
            threads[i].start();
        }
        latch.await();

        long middle = System.currentTimeMillis();
        // 开始读操作
        latch = new CountDownLatch(THREAD_COUNT);
        threads = new Thread[THREAD_COUNT];
        for (int i = 0; i < threads.length; i ++) {
            threads[i] = new ReadThread(param, latch);
        }
        for (int i = 0; i < threads.length; i++) {
            threads[i].start();
        }
        latch.await();

        long end = System.currentTimeMillis();
        System.out.println("write cost:" + (middle - start) + ", read cost: " + (end - middle) + ", size: " + param.size() + "  ->Hashtable");
    }

    /**
     * 多线程死锁问题
     * @throws InterruptedException
     */
    @Test
    public void testHashMap() throws InterruptedException {
        Map<UUID, UUID> param = new HashMap<>();
        // 开始写操作
        Thread[] threads = new Thread[THREAD_COUNT];

        latch = new CountDownLatch(THREAD_COUNT);
        long start = System.currentTimeMillis();
        for (int i = 0; i < THREAD_COUNT; i++) {
            threads[i] = new WriteThread(param, i * gap, latch);
        }
        for (int i = 0; i < threads.length; i++) {
            threads[i].start();
        }
        latch.await();

        // 开始读操作
        long middle = System.currentTimeMillis();
        latch = new CountDownLatch(THREAD_COUNT);
        for (int i = 0; i < threads.length; i ++) {
            threads[i] = new ReadThread(param, latch);
        }
        for (int i = 0; i < threads.length; i++) {
            threads[i].start();
        }
        latch.await();

        long end = System.currentTimeMillis();
        System.out.println("write cost:" + (middle - start) + ", read cost: " + (end - middle) + ", size: " + param.size() + "  ->HashMap");
    }

    @Test
    public void testSynchronizedMap() throws InterruptedException {
        Map<UUID, UUID> param = Collections.synchronizedMap(new HashMap<UUID, UUID>());
        // 开始写操作
        Thread[] threads = new Thread[THREAD_COUNT];

        latch = new CountDownLatch(THREAD_COUNT);
        long start = System.currentTimeMillis();

        for (int i = 0; i < THREAD_COUNT; i++) {
            threads[i] = new WriteThread(param, i * gap, latch);
        }
        for (int i = 0; i < threads.length; i++) {
            threads[i].start();
        }
        latch.await();

        // 开始读操作
        long middle = System.currentTimeMillis();
        latch = new CountDownLatch(THREAD_COUNT);
        threads = new Thread[THREAD_COUNT];
        for (int i = 0; i < threads.length; i ++) {
            threads[i] = new ReadThread(param, latch);
        }
        for (int i = 0; i < threads.length; i++) {
            threads[i].start();
        }
        latch.await();

        long end = System.currentTimeMillis();
        System.out.println("write cost:" + (middle - start) + ", read cost: " + (end - middle) + ", size: " + param.size() + "  ->SynchronizedMap");
    }

    @Test
    public void testConcurrentHashMap() throws InterruptedException {
        Map<UUID, UUID> param = new ConcurrentHashMap<UUID, UUID>();
        // 开始写操作
        Thread[] threads = new Thread[THREAD_COUNT];

        latch = new CountDownLatch(THREAD_COUNT);
        long start = System.currentTimeMillis();
        for (int i = 0; i < THREAD_COUNT; i++) {
            threads[i] = new WriteThread(param, i * gap, latch);
        }
        for (int i = 0; i < threads.length; i++) {
            threads[i].start();
        }
        latch.await();

        // 开始读操作
        long middle = System.currentTimeMillis();
        latch = new CountDownLatch(THREAD_COUNT);
        threads = new Thread[THREAD_COUNT];
        for (int i = 0; i < threads.length; i ++) {
            threads[i] = new ReadThread(param, latch);
        }
        for (int i = 0; i < threads.length; i++) {
            threads[i].start();
        }
        latch.await();

        long end = System.currentTimeMillis();
        System.out.println("write cost:" + (middle - start) + ", read cost: " + (end - middle) + ", size: " + param.size() + "  ->ConcurrentHashMap");
    }

    static class WriteThread extends Thread {
        Map<UUID, UUID> m;
        CountDownLatch latch;
        int start;

        public WriteThread(Map<UUID, UUID> m, int start, CountDownLatch latch) {
            this.m = m;
            this.start = start;
            this.latch = latch;
        }

        @Override
        public void run() {
            for(int i = start; i < gap + start; i++) {
                m.put(keys[i], values[i]);
            }
            latch.countDown();
        }
    }

    static class ReadThread extends Thread {
        Map<UUID, UUID> m;
        CountDownLatch latch;

        public ReadThread(Map<UUID, UUID> m, CountDownLatch latch) {
            this.m = m;
            this.latch = latch;
        }

        @Override
        public void run() {
            for (int j = 0; j < count; j++) {
                m.get(keys[j]);
            }
            latch.countDown();
        }
    }

}

执行结果

分别统计一下情况:

  1. 线程数为1,写入总数为1000000
  2. 线程数为10,写入总数为1000000
  3. 线程数为100,写入总数为1000000
    在这里插入图片描述
    从结果可以看出:
  • 当线程数为1时,不存在并发的情况,四种写入的时间差距不大,使用synchronzied的HashTable,略快于ConcurrentHashMap
  • 当线程数增多时,达到10或100时,四种的写入时间差距不多,但是对于ConcurrentHashMap的读取时间,大大优于其他两种.
  • 对于HashMap,在多线程并发写的情况下,容易造成死锁的情况。

结论

验证结果如下:

  1. 在单线程环境中,或无并发的情况下,使用HashMap的效率优于其他三种
  2. 在多线程的环境中,不能使用HashMap,容易造成死锁的现象
  3. 在多线程的环境中,若读取多的情况下,建议使用ConcurrentHashMap
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值