Azkaban是一個Hadoop批處理調度器,可以通過訪問其前台展示頁面來進行部署、運行job等操作。
//IndexServlet.java @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { /* set runtime properties from request and response */ super.setRuntimeProperties(req, resp); AzkabanApplication app = getApplication(); String action = getParam(req, "action"); if ("loadjobs".equals(action)) { resp.setContentType("application/json"); String folder = getParam(req, "folder"); resp.getWriter().print(getJSONJobsForFolder(app.getAllFlows(), folder)); resp.getWriter().flush(); return; } else if("unschedule".equals(action)) { String jobid = getParam(req, "job"); app.getScheduleManager().removeScheduledJob(jobid); } else if("cancel".equals(action)) { cancelJob(app, req); } else if("schedule".equals(action)) { String redirect = scheduleJobs(app, req, resp); if (!redirect.isEmpty()) { resp.sendRedirect(redirect); return; } } else { throw new ServletException("Unknown action: " + action); } resp.sendRedirect(req.getContextPath()); }
看檔案名稱就可以看出了這個是整個系統啟動並執行源頭,一切從這裡開始. . . . . . .
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
AzkabanApplication中是一系列用於整個系統初始化的代碼,接下來就來看一下吧~
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
public AzkabanApplication(List<File> jobDirs, File logDir, File tempDir, boolean enableDevMode) throws IOException { this._jobDirs = Utils.nonNull(jobDirs); this._logsDir = Utils.nonNull(logDir); this._tempDir = Utils.nonNull(tempDir); if(!this._logsDir.exists()) this._logsDir.mkdirs(); if(!this._tempDir.exists()) this._tempDir.mkdirs(); for(File jobDir: _jobDirs) { if(!jobDir.exists()) { logger.warn("Job directory " + jobDir + " does not exist. Creating."); jobDir.mkdirs(); } } if(jobDirs.size() < 1) throw new IllegalArgumentException("No job directory given."); /** * 載入預設的屬性檔案 * 實際上只用.properties為尾碼的檔案 */ Props defaultProps = PropsUtils.loadPropsInDirs(_jobDirs, ".properties", ".schema"); /** * 主要功能就是載入HADOOP_HOME */ _baseClassLoader = getBaseClassloader(); String defaultTimezoneID = defaultProps.getString(DEFAULT_TIMEZONE_ID, null); if (defaultTimezoneID != null) { DateTimeZone.setDefault(DateTimeZone.forID(defaultTimezoneID)); TimeZone.setDefault(TimeZone.getTimeZone(defaultTimezoneID)); } /** * 由於Azkaban支援鎖shezhi,在這裡進行鎖管理的初始化 */ NamedPermitManager permitManager = getNamedPermitManager(defaultProps); /** * 初始化Azkaban支援的job類型 */ JobWrappingFactory factory = new JobWrappingFactory( permitManager, new ReadWriteLockManager(), _logsDir.getAbsolutePath(), "java", new ImmutableMap.Builder<String, Class<? extends Job>>() .put("java", JavaJob.class) .put("command", ProcessJob.class) .put("javaprocess", JavaProcessJob.class) .put("pig", PigProcessJob.class) .put("propertyPusher", NoopJob.class) .put("python", PythonJob.class) .put("ruby", RubyJob.class) .put("script", ScriptJob.class).build()); _hdfsUrl = defaultProps.getString("hdfs.instance.url", null); _jobManager = new JobManager(factory, _logsDir.getAbsolutePath(), defaultProps, _jobDirs, _baseClassLoader); _mailer = new Mailman(defaultProps.getString("mail.host", "localhost"), defaultProps.getString("mail.user", ""), defaultProps.getString("mail.password", "")); String failureEmail = defaultProps.getString("job.failure.email", null); String successEmail = defaultProps.getString("job.success.email", null); int schedulerThreads = defaultProps.getInt("scheduler.threads", 50); _instanceName = defaultProps.getString(INSTANCE_NAME, ""); final File initialJobDir = _jobDirs.get(0); File schedule = getScheduleFile(defaultProps, initialJobDir); File backup = getBackupFile(defaultProps, initialJobDir); File executionsStorageDir = new File( defaultProps.getString("azkaban.executions.storage.dir", initialJobDir.getAbsolutePath() + "/executions") ); if (! executionsStorageDir.exists()) executionsStorageDir.mkdirs(); /** * 獲得上次啟動Azkaban時執行的最大jobId */ long lastExecutionId = getLastExecutionId(executionsStorageDir); logger.info(String.format("Using path[%s] for storing executions.", executionsStorageDir)); logger.info(String.format("Last known execution id was [%s]", lastExecutionId)); final ExecutableFlowSerializer flowSerializer = new DefaultExecutableFlowSerializer(); final ExecutableFlowDeserializer flowDeserializer = new DefaultExecutableFlowDeserializer(_jobManager, factory); FlowExecutionSerializer flowExecutionSerializer = new FlowExecutionSerializer(flowSerializer); FlowExecutionDeserializer flowExecutionDeserializer = new FlowExecutionDeserializer(flowDeserializer); /** * 初始化管理工作流程的緩衝,預設將cache大小設定為1000 * 這裡是整個初始化過程中的重點!!! */ _allFlows = new CachingFlowManager( new RefreshableFlowManager( _jobManager, flowExecutionSerializer, flowExecutionDeserializer, executionsStorageDir, lastExecutionId ), defaultProps.getInt("azkaban.flow.cache.size", 1000) ); _jobManager.setFlowManager(_allFlows); _jobExecutorManager = new JobExecutorManager( _allFlows, _jobManager, _mailer, failureEmail, successEmail, schedulerThreads ); this._schedulerManager = new ScheduleManager(_jobExecutorManager, new LocalFileScheduleLoader(schedule, backup)); /* set predefined log url prefix */ String server_url = defaultProps.getString("server.url", null) ; if (server_url != null) { if (server_url.endsWith("/")) _jobExecutorManager.setRuntimeProperty(AppCommon.DEFAULT_LOG_URL_PREFIX, server_url + "logs?file=" ); else _jobExecutorManager.setRuntimeProperty(AppCommon.DEFAULT_LOG_URL_PREFIX, server_url + "/logs?file=" ); } this._velocityEngine = configureVelocityEngine(enableDevMode); }
這部分將系統所需的各個部分,各個屬性都進行了初始化的操作。