在第一版的基础上,如果需要实现线程安全,需要在增加、删除、扩容的时候加锁。扩容的时候,需要对整个数组加锁。增加和删除的时候,只需要对链表的头结点进行加锁。具体实现代码如下:
package www.dustin.li.collections;
import java.util.concurrent.atomic.AtomicInteger;
/**
* 手写集合
*
* @param <K>
* @param <V>
*/
public class ConcurrentMyHashMap<K, V> {
private volatile int CAPACITY = 16;
private volatile AtomicInteger SIZE = new AtomicInteger(0);
private volatile AtomicInteger USED_SIZE = new AtomicInteger(0);
private float threshold = 0.75F;
volatile Node<K, V>[] nodes = new Node[CAPACITY];
/**
* 存储键值对
*
* @param key
* @param value
* @return
*/
public synchronized boolean put(K key, V value) {
int index = this.getIndex(key);
Node<K, V> node = null;
try {
node = nodes[index];
} catch (Exception e) {
System.out.println("hashCode=" + key.hashCode());
System.out.println("index=" + index);
}
if (node == null) {
System.out.println("插入 ,index=" + index + ",数组首位");
node = new Node<>();
node.setKey(key);
node.setValue(value);
nodes[index] = node;
SIZE.incrementAndGet();
USED_SIZE.incrementAndGet();
if (checkThreshold()) {
this.resize();
}
} else {
while (node.nextNode != null) {
if (node.getValue().equals(value)) {
System.out.println("插入,index=" + index + ",相同的键值对,直接返回");
return true;
}
node = node.nextNode;
}
Node<K, V> nextNode = new Node<>();
nextNode.setKey(key);
nextNode.setValue(value);
node.nextNode = nextNode;
SIZE.incrementAndGet();
}
return true;
}
/**
* 根据key获取元素
*
* @param key
* @return
*/
public V get(K key) {
int index = this.getIndex(key);
Node<K, V> headNode = nodes[index];
if (headNode == null) {
return null;
}
if (headNode.getKey().equals(key)) {
return headNode.getValue();
}
for (; ; ) {
Node<K, V> nextNode = headNode.nextNode;
if (nextNode == null) {
System.out.println("根据key没有找到对应的value");
return null;
}
if (nextNode.getKey().equals(key)) {
System.out.println("根据key找到对应的value");
return nextNode.getValue();
}
headNode = nextNode;
if (headNode == null) {
return null;
}
}
}
/**
* 移除元素
*
* @param key
* @return
*/
public synchronized V remove(K key) {
int index = this.getIndex(key);
Node<K, V> headNode = nodes[index];
if (headNode == null) {
System.out.println("根据key删除键值对失败,没有找到对应数据,index=" + index + ",key=" + key);
return null;
}
if (headNode.getKey().equals(key)) {
SIZE.decrementAndGet();
USED_SIZE.decrementAndGet();
nodes[index] = headNode.nextNode;
System.out.println("删除键值对成功,直接删除数组上的元素,index=" + index + ",key=" + key);
return headNode.getValue();
}
for (; ; ) {
Node<K, V> nextNode = headNode.nextNode;
if (nextNode == null) {
System.out.println("没有找到对应的数据,index=" + index + ",key=" + key);
return null;
}
if (key.equals(nextNode.getKey())) {
SIZE.decrementAndGet();
headNode.nextNode = nextNode.nextNode;
System.out.println("删除键值对成功,删除链表上的元素,index=" + index + ",key=" + key);
return nextNode.getValue();
}
headNode = headNode.nextNode;
}
}
/**
* 检查是否超过阀值
*
* @return
*/
private boolean checkThreshold() {
return USED_SIZE.intValue() / Float.valueOf(CAPACITY) > threshold;
}
/**
* 当元素超过阀值,重新扩容
*/
private synchronized void resize() {
int oldCapacity = CAPACITY;
CAPACITY = 2 * CAPACITY;
Node<K, V>[] oldNodes = nodes;
nodes = new Node[CAPACITY];
SIZE = new AtomicInteger(0);
for (int i = 0; i < oldCapacity; i++) {
Node<K, V> oldHeadNode = oldNodes[i];
if (oldHeadNode == null) {
continue;
}
System.out.println("重新扩容,迁移数组上的数据");
this.put(oldHeadNode.getKey(), oldHeadNode.getValue());
System.out.println("重新扩容,开始迁移链表上的数据");
for (; ; ) {
Node<K, V> oldNextNode = oldHeadNode.nextNode;
if (oldNextNode == null) {
break;
}
if (oldNextNode != null) {
this.put(oldNextNode.getKey(), oldNextNode.getValue());
}
oldHeadNode = oldNextNode;
}
}
}
private static class Node<K, V> {
private K key;
private V value;
private Node<K, V> nextNode;
public K getKey() {
return key;
}
public void setKey(K key) {
this.key = key;
}
public V getValue() {
return value;
}
public void setValue(V value) {
this.value = value;
}
}
/**
* 计算元素所在的数组下标
*
* @param key
* @return
*/
private int getIndex(K key) {
int hashCode = key.hashCode();
if (hashCode < 0) {
hashCode = -hashCode;
}
return hashCode % CAPACITY;
}
public int getSIZE() {
return SIZE.intValue();
}
}
测试代码:
package www.dustin.li.collections;
public class TestMyHashMap {
public static void main(String[] args) throws InterruptedException {
ConcurrentMyHashMap<Integer, Integer> map = new ConcurrentMyHashMap<>();
//线程1
Thread t1 = new Thread(new Runnable() {
@Override
public void run() {
for (int i = 1; i <= 40000; i++) {
int result = i;
new Thread(new Runnable() {
@Override
public void run() {
map.put(result, result);
}
}, "ftf" + i).start();
}
}
});
t1.start();
t1.join();
//让主线程睡眠100秒,保证线程1和线程2执行完毕
Thread.sleep(100000);
int count = 0;
for (int i = 1; i <= 40000; i++) {
//检测数据是否发生丢失
Integer value = map.get(i);
if (value == null) {
count++;
System.out.println(i + "数据丢失");
}
}
System.out.println(map.getSIZE());
System.out.println("数据丢失数量为:" + count);
System.out.println("end...");
}
}
对照jdk1.7和1.8的实现,上面手写的代码,性能非常差,有点类似于hashtable的实现原理:
区别:
1.hashtable不允许键值对为null,concurrentHashMap也不允许价值对为null,hashmap允许价值对为null,hashmap为null的存储位置在hashmap的数组第一个位置上
2.我自己实现的hashmap与hashtable一样,都没有在链表上使用红黑树,防止链表太长,链表太长,每次查询的时候查找较耗时
3.我实现的hashmap与hashtable也一样,加的锁都是在方法上,加锁粒度较大,发生阻塞的概率更大。jdk1.7使用分段,用reentrantLock来进行加锁,将一个大数组分成一个个小的数组来减小加锁的粒度。到jdk1.8,放弃了reentrantlock,使用synchronized来进行加锁,使用cas保证操作的原子性。以前是synchronized的效率不高,后面优化过之后,synchronized的效率很高了,而且synchronized是jvm里面定义的,能更好的随着jvm来进行升级。jdk1.8中,加锁是在每个链表头上进行加锁,这样粒度更小了。如何保证数组的某个index上数组为null时,如何保证原子操作呢,使用的是cas,保证只有一个线程能够操作成功。