批处理框架springbatch读取文件入库mysql比flink更快

一、SpringBatch概述

官网介绍:SpringBatch是一个轻量级、全面的批处理框架,旨在支持开发对企业系统的日常操作至关重要的健壮的批处理应用程序。
Spring Batch 提供了处理大量记录所必需的可重用功能,包括日志记录/跟踪、事务管理、作业处理统计、作业重启、跳过和资源管理。它还提供更高级的技术服务和功能,将通过优化和分区技术实现极高容量和高性能的批处理作业。简单和复杂的大批量批处理作业都可以以高度可扩展的方式利用框架来处理大量信息。

二、程序实战

  1. 创建springboot项目并在pom.xml文件中增加SpringBatch等相关依赖
    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-batch</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-validation</artifactId>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <optional>true</optional>
        </dependency>
        <dependency>
            <groupId>cn.hutool</groupId>
            <artifactId>hutool-all</artifactId>
            <version>5.3.8</version>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.72</version>
        </dependency>
        <dependency>
            <groupId>commons-io</groupId>
            <artifactId>commons-io</artifactId>
            <version>2.6</version>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-jdbc</artifactId>
        </dependency>
        <dependency>
            <groupId>org.apache.commons</groupId>
            <artifactId>commons-lang3</artifactId>
            <version>3.1</version>
        </dependency>

    </dependencies>
  1. 修改配置文件application.yml增加数据库相关配置
spring:
  datasource:
    driver-class-name: com.mysql.cj.jdbc.Driver
    url: jdbc:mysql://localhost:3306/sms?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=UTC
    username: root
    password: 123456
  batch:
    initialize-schema: always #允许springboot操作数据库创建默认数据表
  1. 根据文件内容抽象创建实体类,并创建对应数据库表
@Data

public class SeaDove {

    private String send_time;

    private String phone;

    private String msg;

    private String id;

    private String business;

    private String source;
}

-- sms.sea_dove3 definition

CREATE TABLE `sea_dove3` (
  `id` bigint NOT NULL AUTO_INCREMENT,
  `send_time` varchar(100) DEFAULT NULL,
  `phone` varchar(100) DEFAULT NULL,
  `msg` varchar(500) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT NULL,
  `business` varchar(100) DEFAULT NULL,
  `source` varchar(100) DEFAULT NULL,
  KEY `sea_dove_id_IDX` (`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=94308865453199388 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;
  1. 编写读取,处理,入库三部分的程序并形成一个步骤
@Configuration
@EnableBatchProcessing
public class DataJob {

    private static final Logger log = LoggerFactory.getLogger(DataJob.class);

    @Resource
    private JobBuilderFactory jobBuilderFactory;    //用于构建JOB

    @Resource
    private StepBuilderFactory stepBuilderFactory;  //用于构建Step

    @Resource
    private JobListener jobListener;            //简单的JOB listener
    @Resource
    private SetpListenter stepListener;          //简单的Step listener

    @Autowired
    DataSource dataSource;
    /**
     * 一个简单基础的Job通常由一个或者多个Step组成
     */
    @Bean
    public Job DataJob() {
        return jobBuilderFactory.get("DataJob").
                incrementer(new RunIdIncrementer()). //设置Job的唯一标识
                start(handleDataStep()).    //start是JOB执行的第一个step
                listener(jobListener).      //设置了一个简单JobListener

                        build();
    }

    /**
     * 一个简单基础的Step主要分为三个部分
     * ItemReader : 用于读取数据
     * ItemProcessor : 用于处理数据
     * ItemWriter : 用于写数据
     */
    @Bean
    public Step handleDataStep() {
        return stepBuilderFactory.get("getData").
                <SeaDove, SeaDove>chunk(1000).        // <输入,输出> 。chunk通俗的讲类似于SQL的commit; 这里表示处理(processor)100条后写入(writer)一次。
                faultTolerant().retryLimit(3). //容错处理,重试3次
                retry(Exception.class).
                skipLimit(100).skip(Exception.class). //捕捉到异常就重试,重试10次还是异常,JOB就停止并标志失败
                reader(getDataReader()).         //指定ItemReader
                processor(getDataProcessor()).   //指定ItemProcessor
                writer(writer()).         //指定ItemWriter
                listener(stepListener).
                build();
    }

    @Bean
    public ItemReader<? extends SeaDove> getDataReader() {
        //这里可以指定从数据库读取数据,也可以指定从文件读取数据

        FlatFileItemReader<SeaDove> reader=new FlatFileItemReader<>();
        File file = new File("D:\\sendlog\\seadove\\seadove\\8yue.json");
        reader.setResource(new FileSystemResource(file))    ;
        reader.setLineMapper(new SeaDoveReadLineMapper());
        return reader;

    }

    @Bean
    public ItemProcessor<SeaDove, SeaDove> getDataProcessor() {
        return new ItemProcessor<SeaDove, SeaDove>() {
            @Override
            public SeaDove process(SeaDove seaDove) throws Exception {
                if (StrUtil.isNotEmpty(seaDove.getMsg())) {
                    return seaDove;
                }
                return null;

            }
        };
    }



    @Bean
    public JdbcBatchItemWriter<SeaDove> writer() {
        return new JdbcBatchItemWriterBuilder<SeaDove>()
                .itemSqlParameterSourceProvider(new BeanPropertyItemSqlParameterSourceProvider<>())
                .sql("insert into sea_dove3 (send_time,phone,msg,business,source) values(:send_time,:phone,:msg,:business,:source)")
                .dataSource(dataSource)
                .build();
    }
    @Bean
    public ThreadPoolTaskExecutor taskExecutor() {
        ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
        executor.setCorePoolSize(5);//核心线程数
        executor.setMaxPoolSize(20);//设置最大线程数
        executor.setQueueCapacity(100);//队列大小
        executor.setThreadNamePrefix("batch-"); //线程名称前缀
        executor.initialize();//初始化
        return executor;
    }

}
  1. 读取过程中的每行文本操作,根据实际情况编写
public class SeaDoveReadLineMapper implements LineMapper<SeaDove> {
    @Override
    public SeaDove mapLine(String s, int i) throws Exception {
        JSONObject jsonObject = JSONObject.parseObject(s);
        String content = jsonObject.getString("content");
        if (content.contains("step=0")) {
            SeaDove seaDove = new SeaDove();
            final String[] split = content.split("\\|");
            seaDove.setSend_time(split[0]);
            String collect = StreamUtils.handle5(split[1]);
            String[] split1 = collect.split("\\|");
            seaDove.setPhone(split1[0]);
            String msg = URLUtil.decode(split1[1]);
            seaDove.setBusiness(getBusiness(msg));
            seaDove.setMsg(msg);
            seaDove.setSource(split1[2]);
            return seaDove;
        }
        return new SeaDove();
    }
    public  String getBusiness(String msg) {
        final List<String> collect = SeaDoveConstant.list.stream().filter(s -> {
            if (msg.contains(s)) {
                return true;
            }
            return false;
        }).collect(Collectors.toList());
        if (!collect.isEmpty()) {
            return String.join("|", collect);
        } else {
            return "other";
        }
    }
}
  1. 编写step和job监听器可以在执行前和执行后进行操作,比如计时
@Component
@Slf4j
public class SetpListenter implements StepExecutionListener {

    @Override
    public void beforeStep(StepExecution stepExecution) {
        System.out.println("beforeStep");
        log.info("beforeStep : {}", JSONObject.toJSONString(stepExecution));
    }

    @Override
    public ExitStatus afterStep(StepExecution stepExecution) {
        log.info("afterStep : {}", JSONObject.toJSONString(stepExecution));
        System.out.println("afterStep");
        if (stepExecution.getReadSkipCount() > 0 || stepExecution.getProcessSkipCount() > 0 || stepExecution.getWriteSkipCount() > 0) {
            return new ExitStatus(ExitStatus.FAILED.getExitCode());
        } else {
            return ExitStatus.COMPLETED;
        }
    }
}
@Component
public class JobListener implements JobExecutionListener {

    private static final Logger log = LoggerFactory.getLogger(JobListener.class);

    @Resource
    private ThreadPoolTaskExecutor threadPoolTaskExecutor;

    private long startTime;

    @Override
    public void beforeJob(JobExecution jobExecution) {
        startTime = System.currentTimeMillis();
        log.info( jobExecution.getJobInstance().getJobName() +" Job 任务处理开始 " + jobExecution.getJobParameters());
    }

    @Override
    public void afterJob(JobExecution jobExecution) {
        log.info("JOB STATUS : {}", jobExecution.getStatus());
        if (jobExecution.getStatus() == BatchStatus.COMPLETED) {
            log.info("JOB FINISHED");
            threadPoolTaskExecutor.destroy();
        } else if (jobExecution.getStatus() == BatchStatus.FAILED) {
            log.info("JOB FAILED");
        }
        log.info("Job Cost Time : {}ms" , (System.currentTimeMillis() - startTime));
    }
}
  1. 程序启动时就会执行job,可在配置中设置不执行,编写控制类,可以通过请求来执行job
@Slf4j
@RestController
public class TestController {

    @Autowired
    DataJob dataJob;

    @Autowired
    private JobLauncher jobLauncher;


    @RequestMapping("/data")
    public String data() throws JobParametersInvalidException, JobExecutionAlreadyRunningException, JobRestartException, JobInstanceAlreadyCompleteException {
        // 根据时间配置每个job完成的使用参数,因为时间不一样所以参数不一样,所以任务可以重复执行
        JobParameters jobParameters = new JobParametersBuilder()
                .addDate("date", new Date())
                .addString("data", "data") // 传入参数 可以带入到job中
                .toJobParameters();
        JobExecution run = jobLauncher.run(dataJob.DataJob(), jobParameters);
        log.info("job执行结果:{}", JSONObject.toJSONString(run));
        BatchStatus status = run.getStatus();
        return JSONObject.toJSONString(status);
    }
}

三、SpringBatch和flink比较

  1. 执行环境
    • 默认框架配置无有优化调参
    • 入库都为空表
    • 文本处理逻辑一致
  2. 简单测试批处理SpringBatch明显快于flink
文件数据处理后入库数据springcatchflink
343行数据入库93行耗时234ms3026ms
1102000行数据入库275500行耗时45927ms184978ms
6362288行数据入库1590572行耗时329079ms1283280ms

注:flink相关程序见flink批处理入库mysql

  • 2
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个简单的 Flink 读取 MySQL 数据并将其插入到 MySQL 的示例代码。 首先,你需要在 pom.xml 文件中添加 MySQLFlink Connector for MySQL 的依赖: ```xml <dependencies> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-java</artifactId> <version>1.11.2</version> </dependency> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-streaming-java_2.12</artifactId> <version>1.11.2</version> </dependency> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-connector-jdbc_2.12</artifactId> <version>1.11.2</version> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>8.0.23</version> </dependency> </dependencies> ``` 然后,可以使用以下代码读取 MySQL 数据并将其插入到另一个 MySQL 数据库中: ```java import org.apache.flink.api.common.functions.MapFunction; import org.apache.flink.api.common.functions.RuntimeContext; import org.apache.flink.api.common.serialization.SimpleStringSchema; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer; import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema; import org.apache.flink.streaming.connectors.kafka.KafkaSink; import org.apache.flink.streaming.connectors.kafka.KafkaTopicPartition; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; import org.apache.flink.streaming.util.serialization.JSONKeyValueDeserializationSchema; import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema; import org.apache.flink.streaming.util.serialization.KeyedSerializationSchemaWrapper; import org.apache.flink.types.Row; import org.apache.flink.util.Preconditions; import java.nio.charset.StandardCharsets; import java.util.Optional; import java.util.Properties; public class FlinkMySQLExample { public static void main(String[] args) throws Exception { // set up the streaming execution environment final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); // configure properties for MySQL source Properties sourceProps = new Properties(); sourceProps.setProperty("driverClassName", "com.mysql.cj.jdbc.Driver"); sourceProps.setProperty("url", "jdbc:mysql://localhost:3306/mydatabase"); sourceProps.setProperty("username", "myuser"); sourceProps.setProperty("password", "mypassword"); sourceProps.setProperty("query", "SELECT * FROM mytable"); // create a MySQL source DataStream<Row> mysqlSource = env.createInput(new JDBCInputFormat() .setDBUrl(sourceProps.getProperty("url")) .setDrivername(sourceProps.getProperty("driverClassName")) .setUsername(sourceProps.getProperty("username")) .setPassword(sourceProps.getProperty("password")) .setQuery(sourceProps.getProperty("query")) .setRowTypeInfo(new RowTypeInfo(...))); // configure properties for MySQL sink Properties sinkProps = new Properties(); sinkProps.setProperty("driverClassName", "com.mysql.cj.jdbc.Driver"); sinkProps.setProperty("url", "jdbc:mysql://localhost:3306/mydatabase"); sinkProps.setProperty("username", "myuser"); sinkProps.setProperty("password", "mypassword"); sinkProps.setProperty("table", "mytable"); // create a MySQL sink mysqlSource.addSink(new JDBCOutputFormat() .setDBUrl(sinkProps.getProperty("url")) .setDrivername(sinkProps.getProperty("driverClassName")) .setUsername(sinkProps.getProperty("username")) .setPassword(sinkProps.getProperty("password")) .setQuery(String.format("INSERT INTO %s VALUES (?, ?, ...)", sinkProps.getProperty("table"))) .setBatchInterval(1000) .setBatchSize(1000) .setSqlTypes(new int[]{Types.INTEGER, Types.VARCHAR, ...})); // execute the program env.execute("Flink MySQL Example"); } } ``` 注意,需要将代码中的 `mydatabase`、`mytable`、`myuser` 和 `mypassword` 替换为你自己的数据库名称、表名称和登录凭据。 此外,还需要根据你的表结构修改 `setQuery` 和 `setSqlTypes` 方法中的参数。在 `setSqlTypes` 中,可以使用 `java.sql.Types` 中的常量指定每个列的 SQL 类型。 最后,运行程序并检查新的 MySQL 数据库是否包含来自源数据库的数据。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值