引入:敏感词是许多网站需要处理的功能点,以下介绍两种处理办法。敏感词过滤,系统会有一个敏感词库,需要做的功能是发送的语句中是否包含敏感词,包含哪些敏感词,将语句中的敏感词进行替换。方法一:语句采用分词工具进行分词,再与敏感词库进行匹配查找。方法二:采用DFA算法进行敏感词匹配。
方法一:采用分词工具实现敏感词过滤(IKAnalyzer3.2.5Stable.jar)
package com.example.sensitive01;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import org.wltea.analyzer.IKSegmentation;
import org.wltea.analyzer.Lexeme;
/**
* 敏感词处理工具-IKAnalyzer 中文分词工具,借助分词进行敏感词过滤
*/
public class SensitiveWordUtil {
//敏感词集合
public static HashMap sensitiveWordMap;
//初始化敏感词库
public static synchronized void init(Set<String> sensitiveWordSet){
//初始化敏感词容器,减少扩容操作
sensitiveWordMap=new HashMap(sensitiveWordSet.size());
for(String sensitiveWord :sensitiveWordSet){
sensitiveWordMap.put(sensitiveWord, sensitiveWord);
}
}
//判断文字是否包含敏感字符
public static boolean contains(String txt) throws IOException{
boolean flag=false;
List<String> wordList=segment(txt);
for(String word:wordList){
if(sensitiveWordMap.get(word)!=null){
return true;
}
}
return flag;
}
/**
* 对语句进行分词
* @param txt
* @return
* @throws IOException
*/
private static List<String> segment(String txt) throws IOException {
List<String> list=new ArrayList<>();
StringReader re=new StringReader(txt);
IKSegmentation ik=new IKSegmentation(re,true);
Lexeme lex;
while ((lex = ik.next()) != null) {
list.add(lex.getLexemeText());
}
return list;
}
/**
* 获取语句中的敏感词,存入set集合
* @param str
* @return 敏感词集合
* @throws IOException
*/
private static Set<String> getSensitiveWord(String txt) throws IOException {
Set<String> sensitiveWordSet=new HashSet<>();
//将语句进行分词
List<String> wordList=segment(txt);
for(String word:wordList){
if(sensitiveWordMap.get(word)!=null){
sensitiveWordSet.add(word);
}
}
return sensitiveWordSet;
}
/**
* 将语句中的敏感词进行替换
* @param txt 语句
* @param replaceChar 替换字符
* @return 替换之后的语句
* @throws IOException
*/
private static String replaceSensitiveWord(String txt, char replaceChar) throws IOException {
String resultTxt=txt;
//获取所有敏感词
Set<String> sensitiveWordList=getSensitiveWord(txt);
String replaceString;
for(String sensitiveWord :sensitiveWordList){
replaceString=getReplaceChars(replaceChar,sensitiveWord.length());
resultTxt=resultTxt.replaceAll(sensitiveWord, replaceString);
}
return resultTxt;
}
/**
* 将敏感词进行替换成指定字符
* @param replaceChar
* @param length
* @return
*/
private static String getReplaceChars(char replaceChar, int length) {
String resultReplace=String.valueOf(replaceChar);
for(int i=1;i<length;i++){
resultReplace+=replaceChar;
}
return resultReplace;
}
/**
* 将敏感词替换成指定字符串
* @param txt
* @param replaceStr
* @return
* @throws IOException
*/
private static String replaceSensitiveWord(String txt, String replaceStr) throws IOException {
String resultTxt=txt;
Set<String> sensitiveWordList=getSensitiveWord(txt);
for(String sensitiveWord:sensitiveWordList){
resultTxt=resultTxt.replaceAll(sensitiveWord, replaceStr);
}
return resultTxt;
}
//测试
public static void main(String[] args) {
Set<String> sensitiveWordSet = new HashSet<>();
sensitiveWordSet.add("饲养");
sensitiveWordSet.add("基地");
sensitiveWordSet.add("红客");
sensitiveWordSet.add("联盟");
sensitiveWordSet.add("贱人");
sensitiveWordSet.add("手机卡");
sensitiveWordSet.add("复制器");
sensitiveWordSet.add("发呆");
//初始化敏感词库
SensitiveWordUtil.init(sensitiveWordSet);
//需要处理的目标字符串
String str="太多的伤感情怀也许只局限于饲养基地 荧幕中的情节。"
+ "然后 我们的扮演的角色就是跟随着主人公的喜红客联盟 怒哀乐而过于牵强的把自己的情感也附加于银幕情节中,然后感动就流泪,"
+ "难过就躺在某一个人的怀里尽情的阐述心扉或者手机卡复制器一个贱人一杯红酒一部电影在夜 深人静的晚上,关上电话静静的发呆着。";
//判断是否包含敏感词
try {
boolean result=SensitiveWordUtil.contains(str);
System.out.println("目标中是否含有敏感词"+result);
} catch (IOException e) {
e.printStackTrace();
}
//获取语句中的敏感词
Set<String> set;
try {
set = SensitiveWordUtil.getSensitiveWord(str);
System.out.println("语句中包含的敏感词的个数为:"+set.size()+".包含:"+set);
} catch (IOException e) {
e.printStackTrace();
}
//替换语句中的敏感词
String filterStr;
try {
filterStr = SensitiveWordUtil.replaceSensitiveWord(str,'*');
System.out.println(filterStr);
} catch (IOException e) {
e.printStackTrace();
}
String filterStr2;
try {
filterStr2 = SensitiveWordUtil.replaceSensitiveWord(str, "*敏感词*");
System.out.println(filterStr2);
} catch (IOException e) {
e.printStackTrace();
}
}
}
分析:采用IKAnalyzer分词工具实现的敏感词过滤,有时候分词结果不是并不是很理想或者说是有局限。例如“饲养基地”为敏感词,分词会将其分成“饲养”和“基地”,导致“饲养基地”不能匹配出来,导致敏感词匹配不完整。
方法二:DFA算法实现的敏感词过滤
敏感词初始化类
package com.example.test03;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
/**
* 初始化敏感词库,将敏感词加入到HashMap中,构建DFA算法模型
*/
public class SensitiveWordInit {
private String ENCODING = "GBK"; //字符编码
public HashMap sensitiveWordMap;//敏感词库
public SensitiveWordInit(){
}
/*
* 初始化敏感词库
*/
public Map initKeyWord(){
try {
//读取敏感词库
Set<String> keyWordSet = readSensitiveWordFile();
//将敏感词库加入到HashMap中
addSensitiveWordToHashMap(keyWordSet);
//spring获取application,然后application.setAttribute("sensitiveWordMap",sensitiveWordMap);
} catch (Exception e) {
e.printStackTrace();
}
return sensitiveWordMap;
}
/**
* 读取敏感词库,将敏感词放入HashSet中,构建一个DFA算法模型:<br>
* 中 = {
* isEnd = 0
* 国 = {<br>
* isEnd = 1
* 人 = {isEnd = 0
* 民 = {isEnd = 1}
* }
* 男 = {
* isEnd = 0
* 人 = {
* isEnd = 1
* }
* }
* }
* }
* 五 = {
* isEnd = 0
* 星 = {
* isEnd = 0
* 红 = {
* isEnd = 0
* 旗 = {
* isEnd = 1
* }
* }
* }
* }
*/
private void addSensitiveWordToHashMap(Set<String> keyWordSet) {
sensitiveWordMap = new HashMap(keyWordSet.size()); //初始化敏感词容器,减少扩容操作:为什么要减少扩容操作呢?因为扩容操作比较耗时
String key = null;
Map nowMap = null;
Map<String, String> newWorMap = null;
//迭代keyWordSet
Iterator<String> iterator = keyWordSet.iterator();
while(iterator.hasNext()){
key = iterator.next(); //关键字
nowMap = sensitiveWordMap;
for(int i = 0 ; i < key.length() ; i++){
char keyChar = key.charAt(i); //转换成char型
Object wordMap = nowMap.get(keyChar); //获取
if(wordMap != null){ //如果存在该key,直接赋值
nowMap = (Map) wordMap;
}else{ //不存在则,则构建一个map,同时将isEnd设置为0,因为他不是最后一个
newWorMap = new HashMap<String,String>();
newWorMap.put("isEnd", "0"); //不是最后一个
nowMap.put(keyChar, newWorMap);
nowMap = newWorMap;
}
if(i == key.length() - 1){
nowMap.put("isEnd", "1"); //最后一个
}
}
}
}
/**
* 读取敏感词库中的内容,将内容添加到set集合中
*/
private Set<String> readSensitiveWordFile() throws Exception{
Set<String> set = null;
File file = new File("D:\\SensitiveWord.txt"); //读取文件
InputStreamReader read = new InputStreamReader(new FileInputStream(file),ENCODING);
try {
if(file.isFile() && file.exists()){ //文件流是否存在
set = new HashSet<String>();
BufferedReader bufferedReader = new BufferedReader(read);
String txt = null;
while((txt = bufferedReader.readLine()) != null){ //读取文件,将文件内容放入到set中
set.add(txt);
}
}
else{ //不存在抛出异常信息
throw new Exception("敏感词库文件不存在");
}
} catch (Exception e) {
throw e;
}finally{
read.close(); //关闭文件流
}
return set;
}
}
敏感词处理类:
package com.example.test03;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
/**
* @Description: 敏感词过滤
*/
public class SensitivewordFilter {
private Map sensitiveWordMap = null;
public static int minMatchTYpe = 1; //最小匹配规则,查询到敏感词就返回
public static int maxMatchType = 2; //最大匹配规则
/**
* 构造函数,初始化敏感词库
*/
public SensitivewordFilter(){
sensitiveWordMap = new SensitiveWordInit().initKeyWord();
}
/**
* 判断文字是否包含敏感字符
* @param txt 文字
* @param matchType
* @return
*/
public boolean isContaintSensitiveWord(String txt,int matchType){
boolean flag = false;
for(int i = 0 ; i < txt.length() ; i++){
int matchFlag = this.CheckSensitiveWord(txt, i, matchType); //判断是否包含敏感字符
if(matchFlag > 0){ //大于0存在,返回true
flag = true;
}
}
return flag;
}
/**
* 获取文字中的敏感词
*/
public Set<String> getSensitiveWord(String txt , int matchType){
Set<String> sensitiveWordList = new HashSet<String>();
for(int i = 0 ; i < txt.length() ; i++){
int length = CheckSensitiveWord(txt, i, matchType); //判断是否包含敏感字符
if(length > 0){ //存在,加入list中
sensitiveWordList.add(txt.substring(i, i+length));
i = i + length - 1; //减1的原因,是因为for会自增
}
}
return sensitiveWordList;
}
/**
* 替换敏感字字符
*/
public String replaceSensitiveWord(String txt,int matchType,String replaceChar){
String resultTxt = txt;
Set<String> set = getSensitiveWord(txt, matchType); //获取所有的敏感词
Iterator<String> iterator = set.iterator();
String word = null;
String replaceString = null;
while (iterator.hasNext()) {
word = iterator.next();
replaceString = getReplaceChars(replaceChar, word.length());
resultTxt = resultTxt.replaceAll(word, replaceString);
}
return resultTxt;
}
/**
* 获取替换字符串
*/
private String getReplaceChars(String replaceChar,int length){
String resultReplace = replaceChar;
for(int i = 1 ; i < length ; i++){
resultReplace += replaceChar;
}
return resultReplace;
}
/**
* 检查文字中是否包含敏感字符,检查规则如下:
*/
public int CheckSensitiveWord(String txt,int beginIndex,int matchType){
boolean flag = false; //敏感词结束标识位:用于敏感词只有1位的情况
int matchFlag = 0; //匹配标识数默认为0
char word = 0;
Map nowMap = sensitiveWordMap;
for(int i = beginIndex; i < txt.length() ; i++){
word = txt.charAt(i);
nowMap = (Map) nowMap.get(word); //获取指定key
if(nowMap != null){ //存在,则判断是否为最后一个
matchFlag++; //找到相应key,匹配标识+1
if("1".equals(nowMap.get("isEnd"))){ //如果为最后一个匹配规则,结束循环,返回匹配标识数
flag = true; //结束标志位为true
if(SensitivewordFilter.minMatchTYpe == matchType){ //最小规则,直接返回,最大规则还需继续查找
break;
}
}
}else{ //不存在,直接返回
break;
}
}
if(matchFlag < 2 || !flag){ //长度必须大于等于1,为词
matchFlag = 0;
}
return matchFlag;
}
public static void main(String[] args) {
SensitivewordFilter filter = new SensitivewordFilter();
System.out.println("敏感词的数量:" + filter.sensitiveWordMap.size());
String string = "太多的伤感情怀也许只局限于饲养基地 荧幕中的情节。"
+ "然后 我们的扮演的角色就是跟随着主人公的喜红客联盟 怒哀乐而过于牵强的把自己的情感也附加于银幕情节中,然后感动就流泪,"
+ "难过就躺在某一个人的怀里尽情的阐述心扉或者手机卡复制器一个贱人一杯红酒一部电影在夜 深人静的晚上,关上电话静静的发呆着。";
System.out.println("待检测语句字数:" + string.length());
long beginTime = System.currentTimeMillis();
Set<String> set = filter.getSensitiveWord(string, 1);
String txt=filter.replaceSensitiveWord(string,1,"*");
long endTime = System.currentTimeMillis();
System.out.println("语句中包含敏感词的个数为:" + set.size() + "。包含:" + set);
System.out.println(txt);
System.out.println("总共消耗时间为:" + (endTime - beginTime));
}
}
txt文件:
转载自:https://blog.csdn.net/chenssy/article/details/26961957 (对于DFA算法讲解比较详细)