承接上文“spark-core_02: spark-submit、spark-class脚本分析“
launcher.Main主要作用是就是检测,注入spark-submit参数到spark环境中,然后返回SparkSubmit需要执行的参数,给spark-class脚本中的exec "${CMD[@]}"进行执行
class Main { public static void main(String[] argsArray) throws Exception { checkArgument(argsArray.length > 0, "Not enough arguments: missing class name."); /** * java -cp spark_home/lib/spark-assembly-1.6.0-hadoop2.6.0.jar org.apache.spark.launcher.Main org.apache.spark.deploy.SparkSubmit * --class org.apache.spark.repl.Main --name "Spark shell" --master spark://luyl152:7077 这个main方法最终会将org.apache.spark.deploy.SparkSubmit --class org.apache.spark.repl.Main
--name "Spark shell" --master spark://luyl152:7077 给spark-class的 exec "${CMD[@]}"执行 */ List<String> args = new ArrayList<String>(Arrays.asList(argsArray)); String className = args.remove(0); //可以在spark-class或别的配制文件中 export SPARK_PRINT_LAUNCH_COMMAND=任何值,只要不为空即可
可以用它来打印cmd,也就是spark-class的 exec "${CMD[@]}"中的值
boolean printLaunchCommand = !isEmpty(System.getenv("SPARK_PRINT_LAUNCH_COMMAND"));
AbstractCommandBuilder builder;//创建命令解析器
//spark-shell执行时第1个参数就是SparkSubmit
if (className.equals("org.apache.spark.deploy.SparkSubmit")) {
try {
//将参数解析到spark对应的变量中,如 --class的值 放到mainClass变量中。
//如果有多出来的参数则将该参数放到SparkSubmitCommandBuilder成员sparkArgs这个集合中
builder = new SparkSubmitCommandBuilder(args);
一、分析new SparkSubmitCommandBuilder(args)的代码:
// args参数是这些:--class org.apache.spark.repl.Main--name "Spark shell" --master spark://luyl152:7077
SparkSubmitCommandBuilder(List<String> args) {
this.sparkArgs = new ArrayList<String>();
List<String> submitArgs = args;
//第一个参数值是pyspark-shell-main,如果python执行的
if (args.size() > 0 && args.get(0).equals(PYSPARK_SHELL)) {
this.allowsMixedArguments = true;
appResource = PYSPARK_SHELL_RESOURCE;
submitArgs = args.subList(1, args.size());
//第一个参数值是:sparkr-shell-main,如果是R执行
} else if (args.size() > 0 &&args.get(0).equals(SPARKR_SHELL)) {
this.allowsMixedArguments = true;
appResource = SPARKR_SHELL_RESOURCE;
submitArgs = args.subList(1, args.size());
} else{
//如果不是python或r,则allowsMixedArguments值是false
this.allowsMixedArguments = false;
}
OptionParser parser = new OptionParser();
//作用就是将spark-submit放进来的参数对应值赋到spark对应的变量中,如 --class的值 放到mainClass变量中
parser.parse(submitArgs); //它的父类方法SparkSubmitOptionParser实现的
this.printInfo = parser.infoRequested;
}
1,查看一下OptionParser的父类SparkSubmitOptionParser对parse的实现
/**
* Parse a list of spark-submit commandline options.
* <p>
* See SparkSubmitArguments.scala for a more formaldescription of available options.
*
* @throws IllegalArgumentExceptionIf an error is found during parsing
* 参数是这些:--class org.apache.spark.repl.Main --name"Spark shell" --master spark://luyl152:7077.
* 作用就是将spark-submit放进来的参数对应值赋到spark对应的变量中,如 --class的值 放到mainClass变量中
*/
protected finalvoid parse(List<String> args) {
//spark-submit可以传sparkConf参数:--confPROP=VALUE ,参数可以看org.apache.spark.deploy.SparkSubmitArguments类最后面
//或spark-submit-h就可以查看
Pattern eqSeparatedOpt = Pattern.compile("(--[^=]+)=(.+)");
int idx = 0;
for (idx = 0; idx < args.size(); idx++) {
String arg = args.get(idx);
String value = null;
//当出现--conf PROP=VALUE这种类型的参数arg、value值变成if代码里面的值
Matcher m = eqSeparatedOpt.matcher(arg);
if (m.matches()) {
arg = m.group(1); //--conf PROP
value = m.group(2); //VALUE
}
// Look for options with a value.
//该方法主要是找到spark-submit后面的带有--参数,如args 放进"--class",和opts二维数组进行匹配
//匹配到的还是返回--class,如果没有匹配到则null
String name = findCliOption(arg, opts);
if (name != null) {
if (value== null) {
if (idx== args.size() - 1) { //如果匹配了并且没有参数值则报错,如:只有 --class ,则size是1,idx此时0, 1-1=0
throw new IllegalArgumentException(
String.format("Missing argument for option'%s'.", arg));
}
idx++;
value = args.get(idx); //如果有值,则idx索引的下一位就是参数对应的值
}
//name就是spark-submit的参数如--class,而value就是参数对应的值
//在它的自身OptionParser做的实现,作用就是将spark-submit放进来的参数对应值赋到spark对应的变量中
//如 --class的值放到mainClass变量中(里面实现很easy,就不写了)
if (!handle(name, value)) {
break;
}
continue; //只有匹配到才会让idx再次加1
}
// Look for aswitch. 如果上面没有匹配到,会再去匹配一下是否有出现-verbose这样参数
name = findCliOption(arg, switches);
if (name != null) {
if (!handle(name, null)) {
break;
}
continue;
}
if (!handleUnknown(arg)){
break;
}
}
if (idx< args.size()) {
idx++;
}
// 将多出来的参数加到 SparkSubmitCommandBuilder() {his.sparkArgs = new ArrayList<String>();..}
handleExtraArgs(args.subList(idx, args.size()));
}
===》上面handle(name,value)在OptionParser的实现如下
/**
*作用就是将spark-submit放进来的参数对应值赋到spark对应的变量中
*/
@Override
protected boolean handle(String opt, String value) {
if (opt.equals(MASTER)) {
master =value;
} elseif (opt.equals(DEPLOY_MODE)) {
deployMode =value;
} elseif (opt.equals(PROPERTIES_FILE)) {
propertiesFile = value;
} elseif (opt.equals(DRIVER_MEMORY)) {
conf.put(SparkLauncher.DRIVER_MEMORY, value);
} elseif (opt.equals(DRIVER_JAVA_OPTIONS)) {
conf.put(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, value);
} elseif (opt.equals(DRIVER_LIBRARY_PATH)) {
conf.put(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH, value);。。。。。
2,此时就从new SparkSubmitCommandBuilder(args)主构造方法返回了,接着launcher.Main$main()下面的方法进行分析:
} catch(IllegalArgumentException e) {
//初始化SparkSubmitCommandBuilder出现异常的容错代码
printLaunchCommand = false;
System.err.println("Error: " + e.getMessage());
System.err.println();
MainClassOptionParser parser = new MainClassOptionParser();
try {
parser.parse(args);
} catch(Exception ignored) {
// Ignore parsing exceptions.
}
List<String> help = new ArrayList<String>();
if (parser.className!= null) {
help.add(parser.CLASS);
help.add(parser.className);
}
help.add(parser.USAGE_ERROR);
builder = new SparkSubmitCommandBuilder(help);
}
} else {
//第一个参数如果不是:org.apache.spark.deploy.SparkSubmit,则使用SparkClassCommandBuilder,解析器
builder = new SparkClassCommandBuilder(className, args);
}
Map<String, String>env = new HashMap<String, String>();
//将所有和jvm及SparkSubmit相关的参数返回
List<String> cmd = builder.buildCommand(env);
二、此处看一下SparkSubmitOptionParser.buildCommand(Map)这个方法
@Override
public List<String> buildCommand(Map<String, String> env) throws IOException{
//PYSPARK_SHELL_RESOURCE表示python,SPARKR_SHELL_RESOURCE表示r语言
if (PYSPARK_SHELL_RESOURCE.equals(appResource)&& !printInfo) {
return buildPySparkShellCommand(env);
} elseif (SPARKR_SHELL_RESOURCE.equals(appResource) && !printInfo) {
return buildSparkRCommand(env);
} else{
//这个env就是一个空的Map,会调用buildSparkSubmitCommand()方法
return buildSparkSubmitCommand(env);
}
}
1,查看一下buildSparkSubmitCommand(env)
private List<String> buildSparkSubmitCommand(Map<String, String> env) throws IOException {
// Load the properties file and check whether spark-submitwill be running the app's driver
// or just launching a cluster app.When running the driver, the JVM's argument will be
// modified to cover the driver'sconfiguration.
//加载属性文件,并检查spark-submit是否正在运行driver的应用程序或仅启动集群应用程序。
// 在运行驱动程序时,JVM的参数将被修改以涵盖驱动程序的配置。
Map<String, String> config = getEffectiveConfig();
boolean isClientMode = isClientMode(config);
//默认如果standalone不匹配--deploy-mode cluster就是client,所以这个值是true
//这个DRIVER_EXTRA_CLASSPATH在client模式是不能直接在SparkConf中设置的,因为driver的JVM已经被Spark-submit通过反射启动起来了
// 而是通过参数:--driver-class-path来设置的
String extraClassPath = isClientMode ? config.get(SparkLauncher. DRIVER_EXTRA_CLASSPATH ) : null;
List<String> cmd = buildJavaCommand(extraClassPath) ;
// Take Thrift Server as daemon
if (isThriftServer( mainClass )) {
addOptionString(cmd , System.getenv( "SPARK_DAEMON_JAVA_OPTS" )) ;
}
//SPARK_SUBMIT_OPTS就是在spark-shell中提到的,需要将java的classpath手动设置到scala中 SPARK_SUBMIT_OPTS="$SPARK_SUBMIT_OPTS-Dscala.usejavacp=true"
addOptionString(cmd , System.getenv( "SPARK_SUBMIT_OPTS" )) ;
addOptionString(cmd , System.getenv( "SPARK_JAVA_OPTS" )) ;
if (isClientMode) {
// Figuring out where the memory value come from is alittle tricky due to precedence.
// Precedence is observed in thefollowing order:
// - explicit configuration (setConf()), which also covers--driver-memory cli argument.
// - properties file.
// - SPARK_DRIVER_MEMORY env variable
// - SPARK_MEM env variable
// - default value (1g)
// Take Thrift Server as daemon
String tsMemory =
isThriftServer( mainClass ) ? System.getenv( "SPARK_DAEMON_MEMORY" ) : null;
String memory = firstNonEmpty(tsMemory , config.get(SparkLauncher. DRIVER_MEMORY ) ,
System.getenv( "SPARK_DRIVER_MEMORY" ) , System.getenv( "SPARK_MEM" ) , DEFAULT_MEM ) ;
cmd.add( "-Xms" + memory) ; // 最大、小堆内存默认是1g
cmd.add( "-Xmx" + memory) ;
addOptionString(cmd , config.get(SparkLauncher. DRIVER_EXTRA_JAVA_OPTIONS )) ;
mergeEnvPathList (env , getLibPathEnvName () ,
config.get(SparkLauncher. DRIVER_EXTRA_LIBRARY_PATH )) ;
}
addPermGenSizeOpt(cmd) ;
cmd.add( "org.apache.spark.deploy.SparkSubmit" ) ;
//buildSparkSubmitArgs()返回list将上面spark-submit参数注入进来的参数及对应值取出来
cmd.addAll(buildSparkSubmitArgs()) ;
return cmd ;
}
==》此时将要执行的cmd参数返回,接着launcher.Main$main()下面的方法进行分析
if (printLaunchCommand) {
System.err.println("Spark Command: " + join(" ", cmd));
System.err.println("========================================");
}
if (isWindows()){
System.out.println(prepareWindowsCommand(cmd, env));
} else{
// In bash, use NULL as the arg separator since it cannotbe used in an argument.
//返回有效的参数,会通过打印的方式给spark-class的 exec "${CMD[@]}"执行
/**
* '\0'和空格不是同一个概念。
'\0'表示字符串结束符,代表字符串结束,而空格是一个普通字符,显示在文本中可以选中。
'\0'的ASCII码为0,空格的ASCII码为32,两个不是同一个字符
在计算机程序中通常使用'\0'表示字符串结束,空格为文本字符,二者完全不同
*/
List<String> bashCmd = prepareBashCommand(cmd, env);
for (String c : bashCmd) {
System.out.print(c);
System.out.print('\0');
}
}
}
CMD的内容如下
/usr/local/java/jdk1.8.0_91/bin/java-cp
/data/spark-1.6.0-bin-hadoop2.6/conf/:/data/spark-1.6.0-bin-hadoop2.6/lib/spark-assembly-1.6.0-hadoop2.6.0.jar:/data/spark-1.6.0-bin-hadoop2.6/lib/datanucleus-api-jdo-3.2.6.jar:/data/spark-1.6.0-bin-hadoop2.6/lib/datanucleus-rdbms-3.2.9.jar:/data/spark-1.6.0-bin-hadoop2.6/lib/datanucleus-core-3.2.10.jar:/data/hadoop-2.6.5/etc/hadoop/
-Xms1g-Xmx1g -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005
org.apache.spark.deploy.SparkSubmit
--classorg.apache.spark.repl.Main
--nameSpark shell
--masterspark://luyl152:7077,luyl153:7077,luyl154:7077
--verbose/tool/jarDir/maven_scala-1.0-SNAPSHOT.jar
接收下分析一下“org.apache.spark.deploy.SparkSubmit“源码