cluster_dispatcher.sh

配置Hadoop集群
本文提供了一个bash脚本示例,用于自动同步Hadoop、HBase和Zookeeper等配置文件到集群中的各个节点。通过简单的命令行参数指定组件名称,即可完成配置文件的分发。
#!/bin/bash


if [ $1 = "hadoop" ]; then
for s in `cat $HADOOP_INSTALL/etc/hadoop/slaves`
{
  echo "Slave: $s"
  scp -r $HADOOP_INSTALL/etc/hadoop/slaves hadoop@$s:$HADOOP_INSTALL/etc/hadoop/
  scp -r $HADOOP_INSTALL/etc/hadoop/hadoop-env.sh hadoop@$s:$HADOOP_INSTALL/etc/hadoop/
  scp -r $HADOOP_INSTALL/etc/hadoop/core-site.xml hadoop@$s:$HADOOP_INSTALL/etc/hadoop/
  scp -r $HADOOP_INSTALL/etc/hadoop/hdfs-site.xml hadoop@$s:$HADOOP_INSTALL/etc/hadoop/
  scp -r $HADOOP_INSTALL/etc/hadoop/mapred-site.xml hadoop@$s:$HADOOP_INSTALL/etc/hadoop/
  scp -r $HADOOP_INSTALL/etc/hadoop/yarn-site.xml hadoop@$s:$HADOOP_INSTALL/etc/hadoop/
  #scp -r $HADOOP_INSTALL/lib/native/* hadoop@$s:$HADOOP_INSTALL/lib/native
}
fi


if [ $1 = "hbase" ]; then
for s in `cat $HBASE_INSTALL/conf/regionservers`
{
  echo "Slave: $s"
  scp -r $HBASE_INSTALL/conf/regionservers hadoop@$s:$HBASE_INSTALL/conf/
  scp -r $HBASE_INSTALL/conf/hbase-env.sh hadoop@$s:$HBASE_INSTALL/conf/
  scp -r $HBASE_INSTALL/conf/hbase-site.xml hadoop@$s:$HBASE_INSTALL/conf/
}
fi


if [ $1 = "zookeeper" ]; then
for s in `cat $ZOOKEEPER_INSTALL/conf/zoo.cfg|grep "server."|awk -F '=' '{print $2}'|awk -F ':' '{print $1}'`
{
  echo "Server: $s"
  scp -r $ZOOKEEPER_INSTALL/conf/zoo.cfg hadoop@$s:$ZOOKEEPER_INSTALL/conf/
}
fi


if [ $1 = "file" ]; then
  for s in `cat $HADOOP_INSTALL/etc/hadoop/slaves`
  {
    echo "Slave: $s"
    scp -r $2/$3 hadoop@$s:$2
  }
fi
exit 0
"C:\Program Files\Java\jdk1.8.0_241\bin\java.exe" "-javaagent:D:\IntelliJ IDEA 2023.3.4\lib\idea_rt.jar=49897:D:\IntelliJ IDEA 2023.3.4\bin" -Dfile.encoding=UTF-8 -classpath "C:\Program Files\Java\jdk1.8.0_241\jre\lib\charsets.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\deploy.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\access-bridge-64.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\cldrdata.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\dnsns.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\jaccess.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\jfxrt.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\localedata.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\nashorn.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\sunec.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\sunjce_provider.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\sunmscapi.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\sunpkcs11.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\ext\zipfs.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\javaws.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\jce.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\jfr.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\jfxswt.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\jsse.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\management-agent.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\plugin.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\resources.jar;C:\Program Files\Java\jdk1.8.0_241\jre\lib\rt.jar;C:\Users\18795\IdeaProjects\demo11\target\classes;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-scala_2.12\1.14.0\flink-scala_2.12-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-core\1.14.0\flink-core-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-annotations\1.14.0\flink-annotations-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-metrics-core\1.14.0\flink-metrics-core-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\commons\commons-lang3\3.3.2\commons-lang3-3.3.2.jar;D:\Maven安装包\Maven_Repository\com\esotericsoftware\kryo\kryo\2.24.0\kryo-2.24.0.jar;D:\Maven安装包\Maven_Repository\com\esotericsoftware\minlog\minlog\1.2\minlog-1.2.jar;D:\Maven安装包\Maven_Repository\org\objenesis\objenesis\2.1\objenesis-2.1.jar;D:\Maven安装包\Maven_Repository\commons-collections\commons-collections\3.2.2\commons-collections-3.2.2.jar;D:\Maven安装包\Maven_Repository\org\apache\commons\commons-compress\1.21\commons-compress-1.21.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-shaded-guava\30.1.1-jre-14.0\flink-shaded-guava-30.1.1-jre-14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-java\1.14.0\flink-java-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\commons\commons-math3\3.5\commons-math3-3.5.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-shaded-asm-7\7.1-14.0\flink-shaded-asm-7-7.1-14.0.jar;D:\Maven安装包\Maven_Repository\org\scala-lang\scala-reflect\2.12.7\scala-reflect-2.12.7.jar;D:\Maven安装包\Maven_Repository\org\scala-lang\scala-library\2.12.7\scala-library-2.12.7.jar;D:\Maven安装包\Maven_Repository\org\scala-lang\scala-compiler\2.12.7\scala-compiler-2.12.7.jar;D:\Maven安装包\Maven_Repository\org\scala-lang\modules\scala-xml_2.12\1.0.6\scala-xml_2.12-1.0.6.jar;D:\Maven安装包\Maven_Repository\com\twitter\chill_2.12\0.7.6\chill_2.12-0.7.6.jar;D:\Maven安装包\Maven_Repository\com\twitter\chill-java\0.7.6\chill-java-0.7.6.jar;D:\Maven安装包\Maven_Repository\org\slf4j\slf4j-api\1.7.15\slf4j-api-1.7.15.jar;D:\Maven安装包\Maven_Repository\com\google\code\findbugs\jsr305\1.3.9\jsr305-1.3.9.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-shaded-force-shading\14.0\flink-shaded-force-shading-14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-streaming-scala_2.12\1.14.0\flink-streaming-scala_2.12-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-streaming-java_2.12\1.14.0\flink-streaming-java_2.12-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-file-sink-common\1.14.0\flink-file-sink-common-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-clients_2.12\1.14.0\flink-clients_2.12-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-runtime\1.14.0\flink-runtime-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-rpc-core\1.14.0\flink-rpc-core-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-rpc-akka-loader\1.14.0\flink-rpc-akka-loader-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-queryable-state-client-java\1.14.0\flink-queryable-state-client-java-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-hadoop-fs\1.14.0\flink-hadoop-fs-1.14.0.jar;D:\Maven安装包\Maven_Repository\commons-io\commons-io\2.8.0\commons-io-2.8.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-shaded-netty\4.1.65.Final-14.0\flink-shaded-netty-4.1.65.Final-14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-shaded-jackson\2.12.4-14.0\flink-shaded-jackson-2.12.4-14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-shaded-zookeeper-3\3.4.14-14.0\flink-shaded-zookeeper-3-3.4.14-14.0.jar;D:\Maven安装包\Maven_Repository\org\javassist\javassist\3.24.0-GA\javassist-3.24.0-GA.jar;D:\Maven安装包\Maven_Repository\org\xerial\snappy\snappy-java\1.1.8.3\snappy-java-1.1.8.3.jar;D:\Maven安装包\Maven_Repository\org\lz4\lz4-java\1.8.0\lz4-java-1.8.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-optimizer\1.14.0\flink-optimizer-1.14.0.jar;D:\Maven安装包\Maven_Repository\commons-cli\commons-cli\1.3.1\commons-cli-1.3.1.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-client\3.3.1\hadoop-client-3.3.1.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-common\3.3.1\hadoop-common-3.3.1.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\thirdparty\hadoop-shaded-protobuf_3_7\1.1.1\hadoop-shaded-protobuf_3_7-1.1.1.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\thirdparty\hadoop-shaded-guava\1.1.1\hadoop-shaded-guava-1.1.1.jar;D:\Maven安装包\Maven_Repository\com\google\guava\guava\27.0-jre\guava-27.0-jre.jar;D:\Maven安装包\Maven_Repository\com\google\guava\failureaccess\1.0\failureaccess-1.0.jar;D:\Maven安装包\Maven_Repository\com\google\guava\listenablefuture\9999.0-empty-to-avoid-conflict-with-guava\listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar;D:\Maven安装包\Maven_Repository\org\checkerframework\checker-qual\2.5.2\checker-qual-2.5.2.jar;D:\Maven安装包\Maven_Repository\com\google\j2objc\j2objc-annotations\1.1\j2objc-annotations-1.1.jar;D:\Maven安装包\Maven_Repository\org\codehaus\mojo\animal-sniffer-annotations\1.17\animal-sniffer-annotations-1.17.jar;D:\Maven安装包\Maven_Repository\org\apache\httpcomponents\httpclient\4.5.13\httpclient-4.5.13.jar;D:\Maven安装包\Maven_Repository\org\apache\httpcomponents\httpcore\4.4.13\httpcore-4.4.13.jar;D:\Maven安装包\Maven_Repository\commons-codec\commons-codec\1.11\commons-codec-1.11.jar;D:\Maven安装包\Maven_Repository\commons-net\commons-net\3.6\commons-net-3.6.jar;D:\Maven安装包\Maven_Repository\jakarta\activation\jakarta.activation-api\1.2.1\jakarta.activation-api-1.2.1.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\jetty-servlet\9.4.40.v20210413\jetty-servlet-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\jetty-security\9.4.40.v20210413\jetty-security-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\jetty-util-ajax\9.4.40.v20210413\jetty-util-ajax-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\jetty-webapp\9.4.40.v20210413\jetty-webapp-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\jetty-xml\9.4.40.v20210413\jetty-xml-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\javax\servlet\jsp\jsp-api\2.1\jsp-api-2.1.jar;D:\Maven安装包\Maven_Repository\com\sun\jersey\jersey-servlet\1.19\jersey-servlet-1.19.jar;D:\Maven安装包\Maven_Repository\commons-logging\commons-logging\1.1.3\commons-logging-1.1.3.jar;D:\Maven安装包\Maven_Repository\log4j\log4j\1.2.17\log4j-1.2.17.jar;D:\Maven安装包\Maven_Repository\commons-beanutils\commons-beanutils\1.9.4\commons-beanutils-1.9.4.jar;D:\Maven安装包\Maven_Repository\org\apache\commons\commons-configuration2\2.1.1\commons-configuration2-2.1.1.jar;D:\Maven安装包\Maven_Repository\org\apache\commons\commons-text\1.4\commons-text-1.4.jar;D:\Maven安装包\Maven_Repository\org\apache\avro\avro\1.7.7\avro-1.7.7.jar;D:\Maven安装包\Maven_Repository\org\codehaus\jackson\jackson-core-asl\1.9.13\jackson-core-asl-1.9.13.jar;D:\Maven安装包\Maven_Repository\org\codehaus\jackson\jackson-mapper-asl\1.9.13\jackson-mapper-asl-1.9.13.jar;D:\Maven安装包\Maven_Repository\com\thoughtworks\paranamer\paranamer\2.3\paranamer-2.3.jar;D:\Maven安装包\Maven_Repository\com\google\re2j\re2j\1.1\re2j-1.1.jar;D:\Maven安装包\Maven_Repository\com\google\protobuf\protobuf-java\2.5.0\protobuf-java-2.5.0.jar;D:\Maven安装包\Maven_Repository\com\google\code\gson\gson\2.2.4\gson-2.2.4.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-auth\3.3.1\hadoop-auth-3.3.1.jar;D:\Maven安装包\Maven_Repository\com\nimbusds\nimbus-jose-jwt\9.8.1\nimbus-jose-jwt-9.8.1.jar;D:\Maven安装包\Maven_Repository\com\github\stephenc\jcip\jcip-annotations\1.0-1\jcip-annotations-1.0-1.jar;D:\Maven安装包\Maven_Repository\net\minidev\json-smart\2.4.2\json-smart-2.4.2.jar;D:\Maven安装包\Maven_Repository\net\minidev\accessors-smart\2.4.2\accessors-smart-2.4.2.jar;D:\Maven安装包\Maven_Repository\org\ow2\asm\asm\8.0.1\asm-8.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\curator\curator-framework\4.2.0\curator-framework-4.2.0.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerb-simplekdc\1.0.1\kerb-simplekdc-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerb-client\1.0.1\kerb-client-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerby-config\1.0.1\kerby-config-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerb-common\1.0.1\kerb-common-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerb-crypto\1.0.1\kerb-crypto-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerb-util\1.0.1\kerb-util-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\token-provider\1.0.1\token-provider-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerb-admin\1.0.1\kerb-admin-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerb-server\1.0.1\kerb-server-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerb-identity\1.0.1\kerb-identity-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerby-xdr\1.0.1\kerby-xdr-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\curator\curator-client\4.2.0\curator-client-4.2.0.jar;D:\Maven安装包\Maven_Repository\org\apache\curator\curator-recipes\4.2.0\curator-recipes-4.2.0.jar;D:\Maven安装包\Maven_Repository\org\apache\htrace\htrace-core4\4.1.0-incubating\htrace-core4-4.1.0-incubating.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerb-core\1.0.1\kerb-core-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerby-pkix\1.0.1\kerby-pkix-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerby-asn1\1.0.1\kerby-asn1-1.0.1.jar;D:\Maven安装包\Maven_Repository\org\apache\kerby\kerby-util\1.0.1\kerby-util-1.0.1.jar;D:\Maven安装包\Maven_Repository\com\fasterxml\jackson\core\jackson-databind\2.10.5.1\jackson-databind-2.10.5.1.jar;D:\Maven安装包\Maven_Repository\com\fasterxml\jackson\core\jackson-core\2.10.5\jackson-core-2.10.5.jar;D:\Maven安装包\Maven_Repository\org\codehaus\woodstox\stax2-api\4.2.1\stax2-api-4.2.1.jar;D:\Maven安装包\Maven_Repository\com\fasterxml\woodstox\woodstox-core\5.3.0\woodstox-core-5.3.0.jar;D:\Maven安装包\Maven_Repository\dnsjava\dnsjava\2.1.7\dnsjava-2.1.7.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-hdfs-client\3.3.1\hadoop-hdfs-client-3.3.1.jar;D:\Maven安装包\Maven_Repository\com\squareup\okhttp\okhttp\2.7.5\okhttp-2.7.5.jar;D:\Maven安装包\Maven_Repository\com\squareup\okio\okio\1.6.0\okio-1.6.0.jar;D:\Maven安装包\Maven_Repository\com\fasterxml\jackson\core\jackson-annotations\2.10.5\jackson-annotations-2.10.5.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-yarn-api\3.3.1\hadoop-yarn-api-3.3.1.jar;D:\Maven安装包\Maven_Repository\javax\xml\bind\jaxb-api\2.2.11\jaxb-api-2.2.11.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-yarn-client\3.3.1\hadoop-yarn-client-3.3.1.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\websocket\websocket-client\9.4.40.v20210413\websocket-client-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\jetty-client\9.4.40.v20210413\jetty-client-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\jetty-http\9.4.40.v20210413\jetty-http-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\jetty-util\9.4.40.v20210413\jetty-util-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\jetty-io\9.4.40.v20210413\jetty-io-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\websocket\websocket-common\9.4.40.v20210413\websocket-common-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\eclipse\jetty\websocket\websocket-api\9.4.40.v20210413\websocket-api-9.4.40.v20210413.jar;D:\Maven安装包\Maven_Repository\org\jline\jline\3.9.0\jline-3.9.0.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-mapreduce-client-core\3.3.1\hadoop-mapreduce-client-core-3.3.1.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-yarn-common\3.3.1\hadoop-yarn-common-3.3.1.jar;D:\Maven安装包\Maven_Repository\javax\servlet\javax.servlet-api\3.1.0\javax.servlet-api-3.1.0.jar;D:\Maven安装包\Maven_Repository\com\sun\jersey\jersey-core\1.19\jersey-core-1.19.jar;D:\Maven安装包\Maven_Repository\javax\ws\rs\jsr311-api\1.1.1\jsr311-api-1.1.1.jar;D:\Maven安装包\Maven_Repository\com\sun\jersey\jersey-client\1.19\jersey-client-1.19.jar;D:\Maven安装包\Maven_Repository\com\fasterxml\jackson\module\jackson-module-jaxb-annotations\2.10.5\jackson-module-jaxb-annotations-2.10.5.jar;D:\Maven安装包\Maven_Repository\jakarta\xml\bind\jakarta.xml.bind-api\2.3.2\jakarta.xml.bind-api-2.3.2.jar;D:\Maven安装包\Maven_Repository\com\fasterxml\jackson\jaxrs\jackson-jaxrs-json-provider\2.10.5\jackson-jaxrs-json-provider-2.10.5.jar;D:\Maven安装包\Maven_Repository\com\fasterxml\jackson\jaxrs\jackson-jaxrs-base\2.10.5\jackson-jaxrs-base-2.10.5.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-mapreduce-client-jobclient\3.3.1\hadoop-mapreduce-client-jobclient-3.3.1.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-mapreduce-client-common\3.3.1\hadoop-mapreduce-client-common-3.3.1.jar;D:\Maven安装包\Maven_Repository\org\apache\hadoop\hadoop-annotations\3.3.1\hadoop-annotations-3.3.1.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-connector-kafka_2.12\1.14.0\flink-connector-kafka_2.12-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-connector-base\1.14.0\flink-connector-base-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\apache\kafka\kafka-clients\2.4.1\kafka-clients-2.4.1.jar;D:\Maven安装包\Maven_Repository\com\github\luben\zstd-jni\1.4.3-1\zstd-jni-1.4.3-1.jar;D:\Maven安装包\Maven_Repository\org\apache\flink\flink-json\1.14.0\flink-json-1.14.0.jar;D:\Maven安装包\Maven_Repository\org\slf4j\slf4j-simple\1.7.36\slf4j-simple-1.7.36.jar" KafkaToKafkaJob [main] WARN org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer - Property [transaction.timeout.ms] not specified. Setting it to 3600000 ms [main] INFO org.apache.flink.runtime.taskexecutor.TaskExecutorResourceUtils - The configuration option taskmanager.cpu.cores required for local execution is not set, setting it to the maximal possible value. [main] INFO org.apache.flink.runtime.taskexecutor.TaskExecutorResourceUtils - The configuration option taskmanager.memory.task.heap.size required for local execution is not set, setting it to the maximal possible value. [main] INFO org.apache.flink.runtime.taskexecutor.TaskExecutorResourceUtils - The configuration option taskmanager.memory.task.off-heap.size required for local execution is not set, setting it to the maximal possible value. [main] INFO org.apache.flink.runtime.taskexecutor.TaskExecutorResourceUtils - The configuration option taskmanager.memory.network.min required for local execution is not set, setting it to its default value 64 mb. [main] INFO org.apache.flink.runtime.taskexecutor.TaskExecutorResourceUtils - The configuration option taskmanager.memory.network.max required for local execution is not set, setting it to its default value 64 mb. [main] INFO org.apache.flink.runtime.taskexecutor.TaskExecutorResourceUtils - The configuration option taskmanager.memory.managed.size required for local execution is not set, setting it to its default value 128 mb. [main] INFO org.apache.flink.runtime.minicluster.MiniCluster - Starting Flink Mini Cluster [main] INFO org.apache.flink.runtime.minicluster.MiniCluster - Starting Metrics Registry [main] INFO org.apache.flink.runtime.metrics.MetricRegistryImpl - No metrics reporter configured, no metrics will be exposed/reported. [main] INFO org.apache.flink.runtime.minicluster.MiniCluster - Starting RPC Service(s) [main] INFO org.apache.flink.runtime.rpc.akka.AkkaRpcServiceUtils - Trying to start local actor system [flink-akka.actor.default-dispatcher-4] INFO akka.event.slf4j.Slf4jLogger - Slf4jLogger started [main] INFO org.apache.flink.runtime.rpc.akka.AkkaRpcServiceUtils - Actor system started at akka://flink [main] INFO org.apache.flink.runtime.rpc.akka.AkkaRpcServiceUtils - Trying to start local actor system [flink-metrics-4] INFO akka.event.slf4j.Slf4jLogger - Slf4jLogger started [main] INFO org.apache.flink.runtime.rpc.akka.AkkaRpcServiceUtils - Actor system started at akka://flink-metrics [main] INFO org.apache.flink.runtime.rpc.akka.AkkaRpcService - Starting RPC endpoint for org.apache.flink.runtime.metrics.dump.MetricQueryService at akka://flink-metrics/user/rpc/MetricQueryService . [main] INFO org.apache.flink.runtime.minicluster.MiniCluster - Starting high-availability services [main] INFO org.apache.flink.runtime.blob.BlobServer - Created BLOB server storage directory C:\Users\18795\AppData\Local\Temp\blobStore-395858f9-6e79-4996-b985-dabf8fa8d8ea [main] INFO org.apache.flink.runtime.blob.BlobServer - Started BLOB server at 0.0.0.0:49901 - max concurrent requests: 50 - max backlog: 1000 [main] INFO org.apache.flink.runtime.blob.PermanentBlobCache - Created BLOB cache storage directory C:\Users\18795\AppData\Local\Temp\blobStore-16c800cc-b3b0-4b70-ac5c-7861ba41fdfd [main] INFO org.apache.flink.runtime.blob.TransientBlobCache - Created BLOB cache storage directory C:\Users\18795\AppData\Local\Temp\blobStore-e7acd0f0-084b-485b-809f-b82728bd7695 [main] INFO org.apache.flink.runtime.minicluster.MiniCluster - Starting 1 TaskManger(s) [main] INFO org.apache.flink.runtime.taskexecutor.TaskManagerRunner - Starting TaskManager with ResourceID: 4afd33fe-2e15-4a98-a576-d013345b4b7c [main] INFO org.apache.flink.runtime.taskexecutor.TaskManagerServices - Temporary file directory 'C:\Users\18795\AppData\Local\Temp': total 198 GB, usable 88 GB (44.44% usable) [main] INFO org.apache.flink.runtime.io.disk.iomanager.IOManager - Created a new FileChannelManager for spilling of task related data to disk (joins, sorting, ...). Used directories: C:\Users\18795\AppData\Local\Temp\flink-io-66b5f27e-b3b0-463f-a1bd-ea29f5390c52 [main] INFO org.apache.flink.runtime.io.network.NettyShuffleServiceFactory - Created a new FileChannelManager for storing result partitions of BLOCKING shuffles. Used directories: C:\Users\18795\AppData\Local\Temp\flink-netty-shuffle-a407813a-f186-4386-baf3-ec9244c38fcb [main] INFO org.apache.flink.runtime.io.network.buffer.NetworkBufferPool - Allocated 64 MB for network buffer pool (number of memory segments: 2048, bytes per segment: 32768). [main] INFO org.apache.flink.runtime.io.network.NettyShuffleEnvironment - Starting the network environment and its components. [main] INFO org.apache.flink.runtime.taskexecutor.KvStateService - Starting the kvState service and its components. [main] INFO org.apache.flink.configuration.Configuration - Config uses fallback configuration key 'akka.ask.timeout' instead of key 'taskmanager.slot.timeout' [main] INFO org.apache.flink.runtime.rpc.akka.AkkaRpcService - Starting RPC endpoint for org.apache.flink.runtime.taskexecutor.TaskExecutor at akka://flink/user/rpc/taskmanager_0 . [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.taskexecutor.DefaultJobLeaderService - Start job leader service. [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.filecache.FileCache - User file cache uses directory C:\Users\18795\AppData\Local\Temp\flink-dist-cache-2a4c4b41-bb2f-4c1e-88a0-cf5ef1549b97 [main] INFO org.apache.flink.runtime.dispatcher.DispatcherRestEndpoint - Starting rest endpoint. [main] INFO org.apache.flink.runtime.dispatcher.DispatcherRestEndpoint - Failed to load web based job submission extension. Probable reason: flink-runtime-web is not in the classpath. [main] WARN org.apache.flink.runtime.webmonitor.WebMonitorUtils - Log file environment variable 'log.file' is not set. [main] WARN org.apache.flink.runtime.webmonitor.WebMonitorUtils - JobManager log files are unavailable in the web dashboard. Log file location not found in environment variable 'log.file' or configuration key 'web.log.path'. [main] INFO org.apache.flink.runtime.dispatcher.DispatcherRestEndpoint - Rest endpoint listening at localhost:49954 [main] INFO org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedLeaderService - Proposing leadership to contender http://localhost:49954 [mini-cluster-io-thread-1] INFO org.apache.flink.runtime.dispatcher.DispatcherRestEndpoint - http://localhost:49954 was granted leadership with leaderSessionID=1242e32c-a913-428b-916a-1672c2a1bbd1 [mini-cluster-io-thread-1] INFO org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedLeaderService - Received confirmation of leadership for leader http://localhost:49954 , session=1242e32c-a913-428b-916a-1672c2a1bbd1 [main] INFO org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedLeaderService - Proposing leadership to contender LeaderContender: DefaultDispatcherRunner [main] INFO org.apache.flink.runtime.resourcemanager.ResourceManagerServiceImpl - Starting resource manager service. [main] INFO org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedLeaderService - Proposing leadership to contender LeaderContender: ResourceManagerServiceImpl [mini-cluster-io-thread-2] INFO org.apache.flink.runtime.dispatcher.runner.DefaultDispatcherRunner - DefaultDispatcherRunner was granted leadership with leader id 3494d5a0-1484-46c6-af3d-11f76998217f. Creating new DispatcherLeaderProcess. [pool-2-thread-1] INFO org.apache.flink.runtime.resourcemanager.ResourceManagerServiceImpl - Resource manager service is granted leadership with session id 0a684d7f-95f7-4df1-964c-b4caa47cab32. [main] INFO org.apache.flink.runtime.minicluster.MiniCluster - Flink Mini Cluster started successfully [mini-cluster-io-thread-2] INFO org.apache.flink.runtime.dispatcher.runner.SessionDispatcherLeaderProcess - Start SessionDispatcherLeaderProcess. [mini-cluster-io-thread-1] INFO org.apache.flink.runtime.dispatcher.runner.SessionDispatcherLeaderProcess - Recover all persisted job graphs. [mini-cluster-io-thread-1] INFO org.apache.flink.runtime.dispatcher.runner.SessionDispatcherLeaderProcess - Successfully recovered 0 persisted job graphs. [mini-cluster-io-thread-1] INFO org.apache.flink.runtime.rpc.akka.AkkaRpcService - Starting RPC endpoint for org.apache.flink.runtime.dispatcher.StandaloneDispatcher at akka://flink/user/rpc/dispatcher_1 . [pool-2-thread-1] INFO org.apache.flink.runtime.rpc.akka.AkkaRpcService - Starting RPC endpoint for org.apache.flink.runtime.resourcemanager.StandaloneResourceManager at akka://flink/user/rpc/resourcemanager_2 . [mini-cluster-io-thread-1] INFO org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedLeaderService - Received confirmation of leadership for leader akka://flink/user/rpc/dispatcher_1 , session=3494d5a0-1484-46c6-af3d-11f76998217f [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.resourcemanager.StandaloneResourceManager - Starting the resource manager. [mini-cluster-io-thread-2] INFO org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedLeaderService - Received confirmation of leadership for leader akka://flink/user/rpc/resourcemanager_2 , session=0a684d7f-95f7-4df1-964c-b4caa47cab32 [flink-akka.actor.default-dispatcher-6] INFO org.apache.flink.runtime.taskexecutor.TaskExecutor - Connecting to ResourceManager akka://flink/user/rpc/resourcemanager_2(964cb4caa47cab320a684d7f95f74df1). [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.taskexecutor.TaskExecutor - Resolved ResourceManager address, beginning registration [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.dispatcher.StandaloneDispatcher - Received JobGraph submission 'Kafka Forward: ExamTopic01 → ExamTopic02' (df2dea9c36325aded3c724b0720bd6c2). [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.dispatcher.StandaloneDispatcher - Submitting job 'Kafka Forward: ExamTopic01 → ExamTopic02' (df2dea9c36325aded3c724b0720bd6c2). [flink-akka.actor.default-dispatcher-6] INFO org.apache.flink.runtime.resourcemanager.StandaloneResourceManager - Registering TaskManager with ResourceID 4afd33fe-2e15-4a98-a576-d013345b4b7c (akka://flink/user/rpc/taskmanager_0) at ResourceManager [flink-akka.actor.default-dispatcher-6] INFO org.apache.flink.runtime.taskexecutor.TaskExecutor - Successful registration at resource manager akka://flink/user/rpc/resourcemanager_2 under registration id b2e86c78c25728134d6c4ecb271aa712. [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedLeaderService - Proposing leadership to contender LeaderContender: JobMasterServiceLeadershipRunner [jobmanager-io-thread-1] INFO org.apache.flink.runtime.rpc.akka.AkkaRpcService - Starting RPC endpoint for org.apache.flink.runtime.jobmaster.JobMaster at akka://flink/user/rpc/jobmanager_3 . [jobmanager-io-thread-1] INFO org.apache.flink.runtime.jobmaster.JobMaster - Initializing job 'Kafka Forward: ExamTopic01 → ExamTopic02' (df2dea9c36325aded3c724b0720bd6c2). [jobmanager-io-thread-1] INFO org.apache.flink.runtime.jobmaster.JobMaster - Using restart back off time strategy NoRestartBackoffTimeStrategy for Kafka Forward: ExamTopic01 → ExamTopic02 (df2dea9c36325aded3c724b0720bd6c2). [jobmanager-io-thread-1] INFO org.apache.flink.runtime.jobmaster.JobMaster - Running initialization on master for job Kafka Forward: ExamTopic01 → ExamTopic02 (df2dea9c36325aded3c724b0720bd6c2). [jobmanager-io-thread-1] INFO org.apache.flink.runtime.jobmaster.JobMaster - Successfully ran initialization on master in 0 ms. [jobmanager-io-thread-1] INFO org.apache.flink.runtime.scheduler.adapter.DefaultExecutionTopology - Built 1 pipelined regions in 2 ms [jobmanager-io-thread-1] INFO org.apache.flink.runtime.jobmaster.JobMaster - No state backend has been configured, using default (HashMap) org.apache.flink.runtime.state.hashmap.HashMapStateBackend@1e4cd263 [jobmanager-io-thread-1] INFO org.apache.flink.runtime.state.StateBackendLoader - State backend loader loads the state backend as HashMapStateBackend [jobmanager-io-thread-1] INFO org.apache.flink.runtime.jobmaster.JobMaster - Checkpoint storage is set to 'jobmanager' [jobmanager-io-thread-1] INFO org.apache.flink.runtime.checkpoint.CheckpointCoordinator - No checkpoint found during restore. [jobmanager-io-thread-1] INFO org.apache.flink.runtime.jobmaster.JobMaster - Using failover strategy org.apache.flink.runtime.executiongraph.failover.flip1.RestartPipelinedRegionFailoverStrategy@44c902ad for Kafka Forward: ExamTopic01 → ExamTopic02 (df2dea9c36325aded3c724b0720bd6c2). [jobmanager-io-thread-1] INFO org.apache.flink.runtime.highavailability.nonha.embedded.EmbeddedLeaderService - Received confirmation of leadership for leader akka://flink/user/rpc/jobmanager_3 , session=fa6c5f3c-c29b-4667-9a9e-4f126e184889 [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.jobmaster.JobMaster - Starting execution of job 'Kafka Forward: ExamTopic01 → ExamTopic02' (df2dea9c36325aded3c724b0720bd6c2) under job master id 9a9e4f126e184889fa6c5f3cc29b4667. [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.jobmaster.JobMaster - Starting scheduling with scheduling strategy [org.apache.flink.runtime.scheduler.strategy.PipelinedRegionSchedulingStrategy] [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Job Kafka Forward: ExamTopic01 → ExamTopic02 (df2dea9c36325aded3c724b0720bd6c2) switched from state CREATED to RUNNING. [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Source: Custom Source -> Sink: Unnamed (1/1) (6c5fc0fa56d9b0b02eeab15e4ab8a649) switched from CREATED to SCHEDULED. [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.jobmaster.JobMaster - Connecting to ResourceManager akka://flink/user/rpc/resourcemanager_2(964cb4caa47cab320a684d7f95f74df1) [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.jobmaster.JobMaster - Resolved ResourceManager address, beginning registration [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.resourcemanager.StandaloneResourceManager - Registering job manager 9a9e4f126e184889fa6c5f3cc29b4667@akka://flink/user/rpc/jobmanager_3 for job df2dea9c36325aded3c724b0720bd6c2. [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.resourcemanager.StandaloneResourceManager - Registered job manager 9a9e4f126e184889fa6c5f3cc29b4667@akka://flink/user/rpc/jobmanager_3 for job df2dea9c36325aded3c724b0720bd6c2. [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.jobmaster.JobMaster - JobManager successfully registered at ResourceManager, leader id: 964cb4caa47cab320a684d7f95f74df1. [flink-akka.actor.default-dispatcher-8] INFO org.apache.flink.runtime.resourcemanager.slotmanager.DeclarativeSlotManager - Received resource requirements from job df2dea9c36325aded3c724b0720bd6c2: [ResourceRequirement{resourceProfile=ResourceProfile{UNKNOWN}, numberOfRequiredSlots=1}] [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.taskexecutor.TaskExecutor - Receive slot request 0628cd6703b3b400e296bfd92281fe07 for job df2dea9c36325aded3c724b0720bd6c2 from resource manager with leader id 964cb4caa47cab320a684d7f95f74df1. [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.taskexecutor.TaskExecutor - Allocated slot for 0628cd6703b3b400e296bfd92281fe07. [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.taskexecutor.DefaultJobLeaderService - Add job df2dea9c36325aded3c724b0720bd6c2 for job leader monitoring. [mini-cluster-io-thread-1] INFO org.apache.flink.runtime.taskexecutor.DefaultJobLeaderService - Try to register at job manager akka://flink/user/rpc/jobmanager_3 with leader id fa6c5f3c-c29b-4667-9a9e-4f126e184889. [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.taskexecutor.DefaultJobLeaderService - Resolved JobManager address, beginning registration [flink-akka.actor.default-dispatcher-7] INFO org.apache.flink.runtime.taskexecutor.DefaultJobLeaderService - Successful registration at job manager akka://flink/user/rpc/jobmanager_3 for job df2dea9c36325aded3c724b0720bd6c2. [flink-akka.actor.default-dispatcher-7] INFO org.apache.flink.runtime.taskexecutor.TaskExecutor - Establish JobManager connection for job df2dea9c36325aded3c724b0720bd6c2. [flink-akka.actor.default-dispatcher-7] INFO org.apache.flink.runtime.taskexecutor.TaskExecutor - Offer reserved slots to the leader of job df2dea9c36325aded3c724b0720bd6c2. [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Source: Custom Source -> Sink: Unnamed (1/1) (6c5fc0fa56d9b0b02eeab15e4ab8a649) switched from SCHEDULED to DEPLOYING. [flink-akka.actor.default-dispatcher-4] INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Deploying Source: Custom Source -> Sink: Unnamed (1/1) (attempt #0) with attempt id 6c5fc0fa56d9b0b02eeab15e4ab8a649 to 4afd33fe-2e15-4a98-a576-d013345b4b7c @ www.Brenz.pl (dataPort=-1) with allocation id 0628cd6703b3b400e296bfd92281fe07 [flink-akka.actor.default-dispatcher-7] INFO org.apache.flink.runtime.taskexecutor.slot.TaskSlotTableImpl - Activate slot 0628cd6703b3b400e296bfd92281fe07. [flink-akka.actor.default-dispatcher-7] INFO org.apache.flink.runtime.state.changelog.StateChangelogStorageLoader - StateChangelogStorageLoader initialized with shortcut names {memory}. [flink-akka.actor.default-dispatcher-7] INFO org.apache.flink.runtime.state.changelog.StateChangelogStorageLoader - Creating a changelog storage with name 'memory'. [flink-akka.actor.default-dispatcher-7] INFO org.apache.flink.runtime.taskexecutor.TaskExecutor - Received task Source: Custom Source -> Sink: Unnamed (1/1)#0 (6c5fc0fa56d9b0b02eeab15e4ab8a649), deploy into slot with allocation id 0628cd6703b3b400e296bfd92281fe07. [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.runtime.taskmanager.Task - Source: Custom Source -> Sink: Unnamed (1/1)#0 (6c5fc0fa56d9b0b02eeab15e4ab8a649) switched from CREATED to DEPLOYING. [flink-akka.actor.default-dispatcher-7] INFO org.apache.flink.runtime.taskexecutor.slot.TaskSlotTableImpl - Activate slot 0628cd6703b3b400e296bfd92281fe07. [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.runtime.taskmanager.Task - Loading JAR files for task Source: Custom Source -> Sink: Unnamed (1/1)#0 (6c5fc0fa56d9b0b02eeab15e4ab8a649) [DEPLOYING]. [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.streaming.runtime.tasks.StreamTask - No state backend has been configured, using default (HashMap) org.apache.flink.runtime.state.hashmap.HashMapStateBackend@536aad7a [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.runtime.state.StateBackendLoader - State backend loader loads the state backend as HashMapStateBackend [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.streaming.runtime.tasks.StreamTask - Checkpoint storage is set to 'jobmanager' [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.runtime.taskmanager.Task - Source: Custom Source -> Sink: Unnamed (1/1)#0 (6c5fc0fa56d9b0b02eeab15e4ab8a649) switched from DEPLOYING to INITIALIZING. [flink-akka.actor.default-dispatcher-7] INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Source: Custom Source -> Sink: Unnamed (1/1) (6c5fc0fa56d9b0b02eeab15e4ab8a649) switched from DEPLOYING to INITIALIZING. [Source: Custom Source -> Sink: Unnamed (1/1)#0] WARN org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer - Using AT_LEAST_ONCE semantic, but checkpointing is not enabled. Switching to NONE semantic. [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.streaming.api.functions.sink.TwoPhaseCommitSinkFunction - FlinkKafkaProducer 1/1 - no state to restore [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.clients.producer.ProducerConfig - ProducerConfig values: acks = 1 batch.size = 16384 bootstrap.servers = [node01:9092, node02:9092, node03:9092] buffer.memory = 33554432 client.dns.lookup = default client.id = compression.type = none connections.max.idle.ms = 540000 delivery.timeout.ms = 120000 enable.idempotence = false interceptor.classes = [] key.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer linger.ms = 0 max.block.ms = 60000 max.in.flight.requests.per.connection = 5 max.request.size = 1048576 metadata.max.age.ms = 300000 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner receive.buffer.bytes = 32768 reconnect.backoff.max.ms = 1000 reconnect.backoff.ms = 50 request.timeout.ms = 30000 retries = 2147483647 retry.backoff.ms = 100 sasl.client.callback.handler.class = null sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism = GSSAPI security.protocol = PLAINTEXT security.providers = null send.buffer.bytes = 131072 ssl.cipher.suites = null ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = https ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.timeout.ms = 3600000 transactional.id = null value.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer [Source: Custom Source -> Sink: Unnamed (1/1)#0] WARN org.apache.kafka.clients.producer.ProducerConfig - The configuration 'key.deserializer' was supplied but isn't a known config. [Source: Custom Source -> Sink: Unnamed (1/1)#0] WARN org.apache.kafka.clients.producer.ProducerConfig - The configuration 'value.deserializer' was supplied but isn't a known config. [Source: Custom Source -> Sink: Unnamed (1/1)#0] WARN org.apache.kafka.clients.producer.ProducerConfig - The configuration 'group.id' was supplied but isn't a known config. [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka version: 2.4.1 [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka commitId: c57222ae8cd7866b [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka startTimeMs: 1758282681035 [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer - Starting FlinkKafkaInternalProducer (1/1) to produce into default topic ExamTopic02 [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase - Consumer subtask 0 has no restore state. [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.clients.consumer.ConsumerConfig - ConsumerConfig values: allow.auto.create.topics = true auto.commit.interval.ms = 5000 auto.offset.reset = latest bootstrap.servers = [node01:9092, node02:9092, node03:9092] check.crcs = true client.dns.lookup = default client.id = client.rack = connections.max.idle.ms = 540000 default.api.timeout.ms = 60000 enable.auto.commit = true exclude.internal.topics = true fetch.max.bytes = 52428800 fetch.max.wait.ms = 500 fetch.min.bytes = 1 group.id = exam-consumer-group group.instance.id = null heartbeat.interval.ms = 3000 interceptor.classes = [] internal.leave.group.on.close = true isolation.level = read_uncommitted key.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer max.partition.fetch.bytes = 1048576 max.poll.interval.ms = 300000 max.poll.records = 500 metadata.max.age.ms = 300000 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor] receive.buffer.bytes = 65536 reconnect.backoff.max.ms = 1000 reconnect.backoff.ms = 50 request.timeout.ms = 30000 retry.backoff.ms = 100 sasl.client.callback.handler.class = null sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism = GSSAPI security.protocol = PLAINTEXT security.providers = null send.buffer.bytes = 131072 session.timeout.ms = 10000 ssl.cipher.suites = null ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = https ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS value.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer [Source: Custom Source -> Sink: Unnamed (1/1)#0] WARN org.apache.kafka.clients.consumer.ConsumerConfig - The configuration 'value.serializer' was supplied but isn't a known config. [Source: Custom Source -> Sink: Unnamed (1/1)#0] WARN org.apache.kafka.clients.consumer.ConsumerConfig - The configuration 'transaction.timeout.ms' was supplied but isn't a known config. [Source: Custom Source -> Sink: Unnamed (1/1)#0] WARN org.apache.kafka.clients.consumer.ConsumerConfig - The configuration 'key.serializer' was supplied but isn't a known config. [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka version: 2.4.1 [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka commitId: c57222ae8cd7866b [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka startTimeMs: 1758282681119 [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.clients.Metadata - [Consumer clientId=consumer-exam-consumer-group-1, groupId=exam-consumer-group] Cluster ID: B0bBZVzzQ9GkwwAzlHzH9A [kafka-producer-network-thread | producer-1] INFO org.apache.kafka.clients.Metadata - [Producer clientId=producer-1] Cluster ID: B0bBZVzzQ9GkwwAzlHzH9A [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase - Consumer subtask 0 will start reading the following 1 partitions from the committed group offsets in Kafka: [KafkaTopicPartition{topic='ExamTopic01', partition=0}] [Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.runtime.taskmanager.Task - Source: Custom Source -> Sink: Unnamed (1/1)#0 (6c5fc0fa56d9b0b02eeab15e4ab8a649) switched from INITIALIZING to RUNNING. [flink-akka.actor.default-dispatcher-7] INFO org.apache.flink.runtime.executiongraph.ExecutionGraph - Source: Custom Source -> Sink: Unnamed (1/1) (6c5fc0fa56d9b0b02eeab15e4ab8a649) switched from INITIALIZING to RUNNING. [Legacy Source Thread - Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase - Consumer subtask 0 creating fetcher with offsets {KafkaTopicPartition{topic='ExamTopic01', partition=0}=-915623761773}. [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.clients.consumer.ConsumerConfig - ConsumerConfig values: allow.auto.create.topics = true auto.commit.interval.ms = 5000 auto.offset.reset = latest bootstrap.servers = [node01:9092, node02:9092, node03:9092] check.crcs = true client.dns.lookup = default client.id = client.rack = connections.max.idle.ms = 540000 default.api.timeout.ms = 60000 enable.auto.commit = true exclude.internal.topics = true fetch.max.bytes = 52428800 fetch.max.wait.ms = 500 fetch.min.bytes = 1 group.id = exam-consumer-group group.instance.id = null heartbeat.interval.ms = 3000 interceptor.classes = [] internal.leave.group.on.close = true isolation.level = read_uncommitted key.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer max.partition.fetch.bytes = 1048576 max.poll.interval.ms = 300000 max.poll.records = 500 metadata.max.age.ms = 300000 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor] receive.buffer.bytes = 65536 reconnect.backoff.max.ms = 1000 reconnect.backoff.ms = 50 request.timeout.ms = 30000 retry.backoff.ms = 100 sasl.client.callback.handler.class = null sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.login.callback.handler.class = null sasl.login.class = null sasl.login.refresh.buffer.seconds = 300 sasl.login.refresh.min.period.seconds = 60 sasl.login.refresh.window.factor = 0.8 sasl.login.refresh.window.jitter = 0.05 sasl.mechanism = GSSAPI security.protocol = PLAINTEXT security.providers = null send.buffer.bytes = 131072 session.timeout.ms = 10000 ssl.cipher.suites = null ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = https ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS value.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] WARN org.apache.kafka.clients.consumer.ConsumerConfig - The configuration 'value.serializer' was supplied but isn't a known config. [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] WARN org.apache.kafka.clients.consumer.ConsumerConfig - The configuration 'transaction.timeout.ms' was supplied but isn't a known config. [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] WARN org.apache.kafka.clients.consumer.ConsumerConfig - The configuration 'key.serializer' was supplied but isn't a known config. [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka version: 2.4.1 [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka commitId: c57222ae8cd7866b [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka startTimeMs: 1758282681381 [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.clients.consumer.KafkaConsumer - [Consumer clientId=consumer-exam-consumer-group-2, groupId=exam-consumer-group] Subscribed to partition(s): ExamTopic01-0 [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.clients.Metadata - [Consumer clientId=consumer-exam-consumer-group-2, groupId=exam-consumer-group] Cluster ID: B0bBZVzzQ9GkwwAzlHzH9A [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.clients.consumer.internals.AbstractCoordinator - [Consumer clientId=consumer-exam-consumer-group-2, groupId=exam-consumer-group] Discovered group coordinator node01:9092 (id: 2147483646 rack: null) [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.clients.consumer.internals.ConsumerCoordinator - [Consumer clientId=consumer-exam-consumer-group-2, groupId=exam-consumer-group] Found no committed offset for partition ExamTopic01-0 [Kafka Fetcher for Source: Custom Source -> Sink: Unnamed (1/1)#0] INFO org.apache.kafka.clients.consumer.internals.SubscriptionState - [Consumer clientId=consumer-exam-consumer-group-2, groupId=exam-consumer-group] Resetting offset for partition ExamTopic01-0 to offset 0. 对吗
09-20
<think>我们有一个明确的用户问题:如何使用Uvicorn结合FastAPI调用streamable HTTP来调用MCP工具?用户提供了代码示例,但实际消息中并没有给出代码内容。因此,我将基于之前的对话历史来推断。 回顾之前的对话: - 用户询问如何使用Uvicorn调用一个基于HTTP的流式传输(streamable HTTP)工具,该工具用于调用MCP(可能是某个处理工具)。 - 我们给出了一个示例,包含服务端和客户端代码。服务端使用FastAPI创建了一个流式端点,客户端使用httpx异步调用。 现在,用户再次询问,并提到“代码示例如下:[提供的代码内容]”,但代码内容未提供。因此,我将重新整理之前的代码示例并加以说明,确保回答用户的问题。 关键点: 1. 服务端:使用FastAPI的StreamingResponse,通过生成器函数模拟MCP工具的处理流。 2. 客户端:使用httpx异步流式接收数据。 3. 使用Uvicorn运行服务。 步骤: 1. 创建FastAPI应用,并添加一个流式端点。 2. 在端点函数中,返回StreamingResponse对象,并传入一个异步生成器(async generator)作为内容。这个生成器将调用MCP工具并逐步产生结果。 3. 启动Uvicorn服务器运行该应用。 假设MCP工具是一个需要异步调用的工具,这里我们用模拟的生成器来代表。实际应用中,生成器内部应替换为真正的MCP调用。 代码示例: 服务端代码(server.py): ```python from fastapi import FastAPI from fastapi.responses import StreamingResponse import asyncio app = FastAPI() # 模拟MCP工具的处理:这是一个异步生成器函数 async def mock_mcp_processing_stream(): # 这里模拟调用MCP工具,实际应替换为真正的MCP调用 for i in range(10): # 假设每次处理返回一个数据块 processed_data = f"Processed data chunk {i} by MCP" yield processed_data.encode() # 转换为bytes await asyncio.sleep(0.5) # 模拟处理延迟 @app.get("/mcp_stream") async def get_mcp_stream(): # 创建流式响应 return StreamingResponse( mock_mcp_processing_stream(), media_type="text/plain" # 根据实际内容类型设置,如果是文本用text/plain,如果是二进制流可能需要application/octet-stream ) ``` 启动服务: ```bash uvicorn server:app --host 0.0.0.0 --port 8000 ``` 客户端代码(client.py): ```python import httpx import asyncio async def main(): async with httpx.AsyncClient() as client: # 发起GET请求,接收流式响应 async with client.stream('GET', 'http://localhost:8000/mcp_stream') as response: # 检查响应状态 response.raise_for_status() # 异步迭代响应内容 async for chunk in response.aiter_bytes(): print(f"Received chunk: {chunk.decode()}") if __name__ == "__main__": asyncio.run(main()) ``` 说明: - 服务端使用`StreamingResponse`,它接受一个异步生成器(或普通生成器)作为内容,这样就可以实现流式传输。 - 在生成器函数`mock_mcp_processing_stream`中,我们模拟了MCP工具的处理过程,每次产生一个数据块,并等待0.5秒(模拟处理时间)。 - 客户端使用httpx的异步流式API,通过`client.stream`上下文管理器,然后使用`aiter_bytes`(或`aiter_text`)来逐个接收数据块。 注意:如果MCP工具是同步的,为了避免阻塞事件循环,应该将其放在线程池中执行,然后通过异步包装器来调用。例如: ```python from fastapi import FastAPI from fastapi.responses import StreamingResponse import asyncio from concurrent.futures import ThreadPoolExecutor app = FastAPI() # 同步的MCP工具处理函数(假设) def sync_mcp_processing(): # 这是一个同步的、可能阻塞的函数 for i in range(10): # 模拟同步处理 time.sleep(0.5) # 阻塞操作 yield f"Processed data chunk {i} by MCP" # 异步包装器 async def async_mcp_generator(): loop = asyncio.get_event_loop() # 创建一个线程池执行器 with ThreadPoolExecutor(max_workers=1) as pool: # 将同步生成器转换为异步生成器 gen = sync_mcp_processing() while True: try: # 在线程池中获取下一个数据块 chunk = await loop.run_in_executor(pool, next, gen) yield chunk.encode() except StopIteration: break @app.get("/mcp_stream") async def get_mcp_stream(): return StreamingResponse( async_mcp_generator(), media_type="text/plain" ) ``` 这样,即使MCP工具是同步的,也能在FastAPI中实现流式响应。 最后,用户可能关心如何实际调用MCP工具。由于MCP工具的具体调用方式未知,代码中仅以模拟方式展示。实际应用中,需要将生成器函数中的内容替换为调用MCP工具的代码,并确保以流式方式产生数据块。 相关问题: 1. 如何处理Uvicorn中流式传输的异常? 2. 如何提高MCP工具流式调用的性能? 3. 如何在流式传输过程中加入身份验证? </think>### 使用 Uvicorn 和 FastAPI 调用 Streamable HTTP 集成 MCP 工具 以下是完整实现方案,通过 Uvicorn 运行 FastAPI 服务,创建流式 HTTP 接口调用 MCP 工具: #### 1. 服务端实现 (`server.py`) ```python from fastapi import FastAPI, HTTPException from fastapi.responses import StreamingResponse import httpx import asyncio app = FastAPI() # MCP 工具调用函数(模拟实际工具) async def call_mcp_tool(params: dict): """ 调用 MCP 工具的异步函数 :param params: 请求参数 :return: 流式生成器 """ # 实际应用中替换为真正的 MCP 工具调用 async with httpx.AsyncClient() as client: # 模拟流式处理 for i in range(1, 6): # 此处应调用实际 MCP 工具 API # response = await client.post("https://mcp-api.com/process", json=params) # 模拟数据处理延迟 await asyncio.sleep(0.5) yield f"data: MCP processed item {i}/5\n\n" # SSE 格式 @app.get("/mcp-stream") async def stream_mcp_data(): """ 流式接口入口 """ try: # 构建 MCP 工具请求参数 mcp_params = {"operation": "analyze", "priority": "high"} return StreamingResponse( call_mcp_tool(mcp_params), media_type="text/event-stream", headers={"X-Stream-Type": "MCP-Data"} ) except Exception as e: raise HTTPException(500, f"MCP tool error: {str(e)}") if __name__ == "__main__": import uvicorn uvicorn.run( app, host="0.0.0.0", port=8000, # 关键性能优化参数 loop="uvloop", # 使用高效事件循环 timeout_keep_alive=300 # 保持长连接 ) ``` #### 2. 客户端调用示例 (`client.py`) ```python import httpx import asyncio async def consume_mcp_stream(): async with httpx.AsyncClient(timeout=60.0) as client: try: # 调用流式接口 async with client.stream( "GET", "http://localhost:8000/mcp-stream" ) as response: # 实时处理数据流 async for chunk in response.aiter_lines(): if chunk.startswith("data:"): processed = chunk.split("data:")[1].strip() print(f"[MCP OUTPUT] {processed}") except httpx.ReadTimeout: print("Stream connection timeout") except Exception as e: print(f"Error: {str(e)}") if __name__ == "__main__": asyncio.run(consume_mcp_stream()) ``` #### 3. 启动与服务调用流程 ```mermaid graph TD A[启动服务] --> B[uvicorn server:app] B --> C[监听端口 8000] D[客户端请求] --> E[GET /mcp-stream] E --> F[FastAPI 路由] F --> G[调用 call_mcp_tool] G --> H[流式生成数据] H --> I[StreamingResponse] I --> J[HTTP 分块传输] J --> K[客户端实时处理] ``` #### 4. 关键配置说明 1. **Uvicorn 优化参数**: ```python uvicorn.run( app, workers=4, # CPU 核心数 limit_concurrency=1000, # 最大并发连接 log_level="warning" # 减少日志输出 ) ``` 2. **MCP 集成要点**: - 替换 `call_mcp_tool` 中的模拟逻辑为实际 API 调用 - 处理 MCP 工具的错误响应: ```python if response.status_code != 200: yield "data: [ERROR] MCP service unavailable\n\n" ``` 3. **性能优化技巧**: - 使用 `uvloop` 替代默认事件循环(性能提升 2-3 倍)[^1] - 设置合理的 `timeout_keep_alive` 保持长连接 - 对大数据集使用增量处理: ```python async for chunk in mcp_response.aiter_bytes(1024): # 1KB 块 yield chunk ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值