DFA、OCR笔记

7 篇文章 0 订阅

DFA算法

Deterministic Finite Automation 有穷自动机,本质是一个数据结构,一个Map

存储: 一次性的把所有的敏感词存储到了多个map

例:

敏感词: 答辩

文章内容: 我明天要答辩、毕设答辩、毕业答辩

{
    "我": {
        "明": {
            "天": {
                "要": {
                    "答": {
                        "辩": {
                            "isEnd": 1
                        },
                        "isEnd": 0
                    },
                    "isEnd": 0
                },
                "isEnd": 0
            },
            "isEnd": 0
        }
        ,
        "isEnd": 0
    },
    "毕": {
    	"设": {
            "答": {
                "辩": {
                    "isEnd": 1
                },
                "isEnd": 0
            },
            "isEnd": 0
        },
        "业": {
            "答": {
                "辩": {
                    "isEnd": 1
                },
                "isEnd": 0
            },
            "isEnd": 0
        },
        "isEnd": 0
	}
}

工具类

import org.apache.commons.lang3.RandomStringUtils;

import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.locks.ReentrantReadWriteLock;

public class SensitiveWordUtil {

    public static Map<String, Object> dictionaryMap = new HashMap<>();

    public static ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock();


    /**
     * 生成关键词字典库
     *
     * @param words
     * @return
     */
    public static void initMap(Collection<String> words) {

        ReentrantReadWriteLock.WriteLock writeLock = reentrantReadWriteLock.writeLock();

        writeLock.lock();
        try{
            if (words == null) {
                System.out.println("敏感词列表不能为空");
                return;
            }

            // map初始长度words.size(),整个字典库的入口字数(小于words.size(),因为不同的词可能会有相同的首字)
            Map<String, Object> map = new HashMap<>(words.size());
            // 遍历过程中当前层次的数据
            Map<String, Object> curMap = null;
            Iterator<String> iterator = words.iterator();

            while (iterator.hasNext()) {
                String word = iterator.next();
                curMap = map;
                int len = word.length();
                for (int i = 0; i < len; i++) {
                    // 遍历每个词的字
                    String key = String.valueOf(word.charAt(i));
                    // 当前字在当前层是否存在, 不存在则新建, 当前层数据指向下一个节点, 继续判断是否存在数据
                    Map<String, Object> wordMap = (Map<String, Object>) curMap.get(key);
                    if (wordMap == null) {
                        // 每个节点存在两个数据: 下一个节点和isEnd(是否结束标志)
                        wordMap = new HashMap<>(2);
                        wordMap.put("isEnd", "0");
                        curMap.put(key, wordMap);
                    }
                    curMap = wordMap;
                    // 如果当前字是词的最后一个字,则将isEnd标志置1
                    if (i == len - 1) {
                        curMap.put("isEnd", "1");
                    }
                }
            }

            dictionaryMap = map;
        }finally {
            writeLock.unlock();
        }

    }

    /**
     * 搜索文本中某个文字是否匹配关键词
     *
     * @param text
     * @param beginIndex
     * @return
     */
    private static int checkWord(String text, int beginIndex) {
        if (dictionaryMap == null) {
            throw new RuntimeException("字典不能为空");
        }
        boolean isEnd = false;
        int wordLength = 0;
        Map<String, Object> curMap = dictionaryMap;
        int len = text.length();
        // 从文本的第beginIndex开始匹配
        for (int i = beginIndex; i < len; i++) {
            String key = String.valueOf(text.charAt(i));
            // 获取当前key的下一个节点
            curMap = (Map<String, Object>) curMap.get(key);
            if (curMap == null) {
                break;
            } else {
                wordLength++;
                if ("1".equals(curMap.get("isEnd"))) {
                    isEnd = true;
                }
            }
        }
        if (!isEnd) {
            wordLength = 0;
        }
        return wordLength;
    }

    /**
     * 获取匹配的关键词和命中次数
     *
     * @param text
     * @return
     */
    public static Map<String, Integer> matchWords(String text) {
        ReentrantReadWriteLock.ReadLock readLock = reentrantReadWriteLock.readLock();
        readLock.lock();
        try{
            Map<String, Integer> wordMap = new HashMap<>();
            int len = text.length();
            for (int i = 0; i < len; i++) {
                int wordLength = checkWord(text, i);
                if (wordLength > 0) {
                    String word = text.substring(i, i + wordLength);
                    // 添加关键词匹配次数
                    if (wordMap.containsKey(word)) {
                        wordMap.put(word, wordMap.get(word) + 1);
                    } else {
                        wordMap.put(word, 1);
                    }

                    i += wordLength - 1;
                }
            }
            return wordMap;
        }finally {
            readLock.unlock();
        }

    }

    public static void main(String[] args) {
//        List<String> list = new ArrayList<>();
//        list.add("**");
//        list.add("***");
//        list.add("****");
//        initMap(list);
//        String content="我是一个好人,并不会***,也不****,我真的**";
//        Map<String, Integer> map = matchWords(content);
//        System.out.println(map);
        int size = 10000;
        CountDownLatch countDownLatch = new CountDownLatch(size);
        ExecutorService executorService = Executors.newFixedThreadPool(size);
        for (int i = 0; i < size / 2; i++) {
            executorService.submit(() -> {
                countDownLatch.countDown();
                try {
                    countDownLatch.await();
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
                List<String> list = new ArrayList<>();
                for (int j = 0; j < size; j++) {
                    list.add(RandomStringUtils.random(100));
                }
                initMap(list);
            });
            executorService.submit(() -> {
                countDownLatch.countDown();
                try {
                    countDownLatch.await();
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
                String content = "我是一个好人,并不会卖**";
                Map<String, Integer> map = matchWords(content);
                System.out.println(map);
            });
        }
    }
}

OCR

光学字符识别,是指电子设备检查纸上打印的字符,通过检测暗、亮的模式确定其形状,然后用字符识别的方式将形状翻译成计算机文字的过程

常见的OCR技术

方案说明
百度OCR收费
Tesseract-OCRGoogle维护的开源OCR引擎,支持Java、Python等语言调用
Tess4J封装了Tesseract-OCR,支持Java调用

使用步骤

  1. 导入依赖
<dependency>
    <groupId>net.sourceforge.tess4j</groupId>
    <artifactId>tess4j</artifactId>
</dependency>
  1. 导入中文字体库,将文件放在自己的工作路径 资源地址

  2. 编写测试类进行测试

    // 获取要进行图文识别的图片
    File file = new File("Path/To/Image");
    // 创建Tesseract对象
    ITesseract tesseract = new Tesseract();
    // 设置字体库路径,字体库文件所在的文件夹
    tesseract.setDatapath("Path/To/tessdata");
    // 中文识别
    tesseract.setLanguage("chi_sim");
    // 执行OCR识别
    String result = tesseract.doOCR(file);
    // 替换回车和tal键,使结果为一行
    result = result.replaceAll("\\r|\\n", "-").replaceAll(" ", "-");
    

与SpringBoot整合:

  1. 导入依赖

  2. 编写配置文件

    tess4j: 
    	language: chi_sim
    	dataPath: Path\to\tessdata
    
  3. 测试

    @Autowired
    private Tess4JClient tess4jClient;
    
    @Test
    public void test() throws Exception {
        BufferedImage bufferedImage = ImageIO.read(new File("Path/to/Image"));
        String s = tess4jClient.doOCR(bufferedImage);
    }
    
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值