- 代码
import java.util.*;
/**
* @Description
* @Author YMJ
* @DateTime 2020-11-17 22:02
* @Version V1.0.0
*/
public class ListMapDeleteDuplicatedMap {
/**
* 该方法可根据指定字段对List<Map>中的数据去重
* @param originMapList 源Map数组
* @param keys 依据的去重字段
* @return
*/
public static List deleteDuplicatedMapFromListByKeys(List<Map> originMapList,List keys){
Map tempMap = new HashMap();
for (Map originMap : originMapList) {
String objHashCode = "";
for (Object key : keys) {
String value = originMap.get(key) != null ? originMap.get(key).toString() : "";
objHashCode += value.hashCode();
}
tempMap.put(objHashCode,originMap);
}
List valueList = new ArrayList<>(tempMap.values());
return valueList;
}
public static void main(String[] args) {
Map<String,Object> stu1= new HashMap<>();
Map<String,Object> stu2= new HashMap<>();
Map<String,Object> stu3= new HashMap<>();
Map<String,Object> stu4= new HashMap<>();
Map<String,Object> stu5= new HashMap<>();
stu1.put("name","张三");
stu1.put("age",18);
stu1.put("school","山东科技大学");
stu2.put("name","李四");
stu2.put("age",18);
stu2.put("school","北京大学");
stu3.put("name","张三");
stu3.put("age",22);
stu3.put("school","山东大学");
stu4.put("name","张三");
stu4.put("age",18);
stu4.put("school","山东科技大学");
stu5.put("name","王五");
stu5.put("age",20);
stu5.put("school","青岛大学");
List<Map> stuList = new ArrayList<>();
stuList.add(stu1);
stuList.add(stu2);
stuList.add(stu3);
stuList.add(stu4);
stuList.add(stu5);
System.out.println("原始集合");
stuList.forEach(System.out::println);
List<Object> deleteDuplicatedKeys = new ArrayList<>();
deleteDuplicatedKeys.add("school");
deleteDuplicatedKeys.add("name");
deleteDuplicatedKeys.add("age");
List result = deleteDuplicatedMapFromListByKeys(stuList,deleteDuplicatedKeys);
System.out.println("------------\n去重参数:" +deleteDuplicatedKeys+ "-去重后集合:");
result.forEach(System.out::println);
deleteDuplicatedKeys.clear();
deleteDuplicatedKeys.add("school");
deleteDuplicatedKeys.add("name");
result = deleteDuplicatedMapFromListByKeys(stuList,deleteDuplicatedKeys);
System.out.println("------------\n去重参数:" +deleteDuplicatedKeys+ "-去重后集合:");
result.forEach(System.out::println);
deleteDuplicatedKeys.clear();
deleteDuplicatedKeys.add("age");
result = deleteDuplicatedMapFromListByKeys(stuList,deleteDuplicatedKeys);
System.out.println("------------\n去重参数:" +deleteDuplicatedKeys+ "-去重后集合:");
result.forEach(System.out::println);
}
}
2.输出
原始集合
{school=山东科技大学, name=张三, age=18}
{school=北京大学, name=李四, age=18}
{school=山东大学, name=张三, age=22}
{school=山东科技大学, name=张三, age=18}
{school=青岛大学, name=王五, age=20}
------------
去重参数:[school, name, age]-去重后集合:
{school=山东大学, name=张三, age=22}
{school=北京大学, name=李四, age=18}
{school=青岛大学, name=王五, age=20}
{school=山东科技大学, name=张三, age=18}
------------
去重参数:[school, name]-去重后集合:
{school=山东大学, name=张三, age=22}
{school=北京大学, name=李四, age=18}
{school=山东科技大学, name=张三, age=18}
{school=青岛大学, name=王五, age=20}
------------
去重参数:[age]-去重后集合:
{school=山东大学, name=张三, age=22}
{school=山东科技大学, name=张三, age=18}
{school=青岛大学, name=王五, age=20}Process finished with exit code 0
3.缺点
数据量大的时候有性能问题。还有什么好一点的方法吗????