Azkaban源码阅读之AzkabanApplication

14 篇文章 0 订阅
7 篇文章 0 订阅

Azkaban是一个Hadoop批处理调度器,可以通过访问其前台展示页面来进行部署、运行job等操作。


//IndexServlet.java
 
   @Override
    protected void doPost(HttpServletRequest req, HttpServletResponse resp)
            throws ServletException, IOException {

        /* set runtime properties from request and response */
        super.setRuntimeProperties(req, resp);

        AzkabanApplication app = getApplication();
        String action = getParam(req, "action");
        if ("loadjobs".equals(action)) {
        	resp.setContentType("application/json");
        	String folder = getParam(req, "folder");
        	resp.getWriter().print(getJSONJobsForFolder(app.getAllFlows(), folder));
        	resp.getWriter().flush();
        	return;
        }
        else if("unschedule".equals(action)) {
            String jobid = getParam(req, "job");
            app.getScheduleManager().removeScheduledJob(jobid);
        } else if("cancel".equals(action)) {
            cancelJob(app, req);
        } else if("schedule".equals(action)) {
            String redirect = scheduleJobs(app, req, resp);
            if (!redirect.isEmpty()) {
            	resp.sendRedirect(redirect);
            	return;
            }
        } else {
            throw new ServletException("Unknown action: " + action);
        }
        resp.sendRedirect(req.getContextPath());
    }

看文件名就可以看出了这个是整个系统运行的源头,一切从这里开始. . . . . . .


-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------


AzkabanApplication中是一系列用于整个系统初始化的代码,接下来就来看一下吧~


-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------


    public AzkabanApplication(List<File> jobDirs, File logDir, File tempDir, boolean enableDevMode) throws IOException {
        this._jobDirs = Utils.nonNull(jobDirs);
        this._logsDir = Utils.nonNull(logDir);
        this._tempDir = Utils.nonNull(tempDir);

        if(!this._logsDir.exists())
            this._logsDir.mkdirs();

        if(!this._tempDir.exists())
            this._tempDir.mkdirs();

        for(File jobDir: _jobDirs) {
            if(!jobDir.exists()) {
                logger.warn("Job directory " + jobDir + " does not exist. Creating.");
                jobDir.mkdirs();
            }
        }

        if(jobDirs.size() < 1)
            throw new IllegalArgumentException("No job directory given.");
        /**
         * 加载默认的属性文件
         * 实际上只用.properties为后缀的文件
         */
        Props defaultProps = PropsUtils.loadPropsInDirs(_jobDirs, ".properties", ".schema");

        /**
         * 主要功能就是加载HADOOP_HOME
         */
        _baseClassLoader = getBaseClassloader();

        String defaultTimezoneID = defaultProps.getString(DEFAULT_TIMEZONE_ID, null);
        if (defaultTimezoneID != null) {
        	DateTimeZone.setDefault(DateTimeZone.forID(defaultTimezoneID));
        	TimeZone.setDefault(TimeZone.getTimeZone(defaultTimezoneID));
        }
        
        /**
         * 由于Azkaban支持锁shezhi,在这里进行锁管理的初始化
         */
        NamedPermitManager permitManager = getNamedPermitManager(defaultProps);
        /**
         * 初始化Azkaban支持的job类型
         */
        JobWrappingFactory factory = new JobWrappingFactory(
                permitManager,
                new ReadWriteLockManager(),
                _logsDir.getAbsolutePath(),
                "java",
                new ImmutableMap.Builder<String, Class<? extends Job>>()
                 .put("java", JavaJob.class)
                 .put("command", ProcessJob.class)
                 .put("javaprocess", JavaProcessJob.class)
                 .put("pig", PigProcessJob.class)
                 .put("propertyPusher", NoopJob.class)
                 .put("python", PythonJob.class)
                 .put("ruby", RubyJob.class)
                 .put("script", ScriptJob.class).build());

        _hdfsUrl = defaultProps.getString("hdfs.instance.url", null);
        _jobManager = new JobManager(factory,
                                     _logsDir.getAbsolutePath(),
                                     defaultProps,
                                     _jobDirs,
                                     _baseClassLoader);

        _mailer = new Mailman(defaultProps.getString("mail.host", "localhost"),
                              defaultProps.getString("mail.user", ""),
                              defaultProps.getString("mail.password", ""));

        String failureEmail = defaultProps.getString("job.failure.email", null);
        String successEmail = defaultProps.getString("job.success.email", null);
        int schedulerThreads = defaultProps.getInt("scheduler.threads", 50);
        _instanceName = defaultProps.getString(INSTANCE_NAME, "");
        
        final File initialJobDir = _jobDirs.get(0);
        File schedule = getScheduleFile(defaultProps, initialJobDir);
        File backup = getBackupFile(defaultProps, initialJobDir);
        File executionsStorageDir = new File(
                defaultProps.getString("azkaban.executions.storage.dir", initialJobDir.getAbsolutePath() + "/executions")
        );
        if (! executionsStorageDir.exists()) executionsStorageDir.mkdirs();
        /**
         * 获得上次启动Azkaban时执行的最大jobId
         */
        long lastExecutionId = getLastExecutionId(executionsStorageDir);
        logger.info(String.format("Using path[%s] for storing executions.", executionsStorageDir));
        logger.info(String.format("Last known execution id was [%s]", lastExecutionId));

        final ExecutableFlowSerializer flowSerializer = new DefaultExecutableFlowSerializer();
        final ExecutableFlowDeserializer flowDeserializer = new DefaultExecutableFlowDeserializer(_jobManager, factory);

        FlowExecutionSerializer flowExecutionSerializer = new FlowExecutionSerializer(flowSerializer);
        FlowExecutionDeserializer flowExecutionDeserializer = new FlowExecutionDeserializer(flowDeserializer);

        /**
         * 初始化管理工作流的缓存,默认将cache大小设置为1000
         * 这里是整个初始化过程中的重点!!!
         */
        _allFlows = new CachingFlowManager(
                new RefreshableFlowManager(
                        _jobManager,
                        flowExecutionSerializer,
                        flowExecutionDeserializer, 
                        executionsStorageDir,
                        lastExecutionId
                ),
                defaultProps.getInt("azkaban.flow.cache.size", 1000)
        );
        _jobManager.setFlowManager(_allFlows);

        _jobExecutorManager = new JobExecutorManager(
				        		_allFlows, 
				        		_jobManager, 
				        		_mailer, 
				        		failureEmail, 
				        		successEmail,
				        		schedulerThreads
				        	);
        
        this._schedulerManager = new ScheduleManager(_jobExecutorManager, new LocalFileScheduleLoader(schedule, backup));

        /* set predefined log url prefix 
        */
        String server_url = defaultProps.getString("server.url", null) ;
        if (server_url != null) {
            if (server_url.endsWith("/"))
            	_jobExecutorManager.setRuntimeProperty(AppCommon.DEFAULT_LOG_URL_PREFIX, server_url + "logs?file=" );
            else 
            	_jobExecutorManager.setRuntimeProperty(AppCommon.DEFAULT_LOG_URL_PREFIX, server_url + "/logs?file=" );
        }

        this._velocityEngine = configureVelocityEngine(enableDevMode);
    }

这部分将系统所需的各个部分,各个属性都进行了初始化的操作。


注: 本文是由本人原创,如需引用请注明出处:     http://blog.csdn.net/zhang__bing/article/details/8721834

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值