JobInProgress初始化为TaskInProgress

来源:互联网 发布:缤特力 捷波朗 知乎 编辑:程序博客网 时间:2024/06/11 22:59

在http://blog.csdn.net/lizhe10177/article/details/17763879这篇文章中曾跟踪作业的提交,最终是通过listener.jobAdded(job)将作业提交到listener的初始化队列里

  public void jobAdded(JobInProgress job) {    synchronized (jobInitQueue) {      jobInitQueue.add(job);      resortInitQueue();      jobInitQueue.notifyAll();    }  }


那么jobInitQueue里面的job什么时候开始初始化成TaskInProgress呢?

 

 

http://blog.csdn.net/lizhe10177/article/details/18901043中讲到JobTracker启动后会在offerService函数中国调用taskScheduler.start();

  public synchronized void start() throws IOException {    super.start();    taskTrackerManager.addJobInProgressListener(jobQueueJobInProgressListener);    eagerTaskInitializationListener.setTaskTrackerManager(taskTrackerManager);    eagerTaskInitializationListener.start();    taskTrackerManager.addJobInProgressListener(        eagerTaskInitializationListener);  }


而 eagerTaskInitializationListener.start();


 

  public void start() throws IOException {    this.jobInitManagerThread = new Thread(jobInitManager, "jobInitManager");    jobInitManagerThread.setDaemon(true);    this.jobInitManagerThread.start();  }


以上this.jobInitManagerThread.start();其实就是调用jobInitManager的run方法

 class JobInitManager implements Runnable {       public void run() {      JobInProgress job = null;      while (true) {        try {          synchronized (jobInitQueue) {            while (jobInitQueue.isEmpty()) {              jobInitQueue.wait();            }            job = jobInitQueue.remove(0);          }          threadPool.execute(new InitJob(job));        } catch (InterruptedException t) {          LOG.info("JobInitManagerThread interrupted.");          break;        }       }      LOG.info("Shutting down thread pool");      threadPool.shutdownNow();    }  }


threadPool.execute(new InitJob(job));

  class InitJob implements Runnable {      private JobInProgress job;        public InitJob(JobInProgress job) {      this.job = job;    }        public void run() {      ttm.initJob(job);    }  }


其run方法调用JobTracker的initJob方法

 public void initJob(JobInProgress job) {    if (null == job) {      LOG.info("Init on null job is not valid");      return;    }            try {      JobStatus prevStatus = (JobStatus)job.getStatus().clone();      LOG.info("Initializing " + job.getJobID());      job.initTasks();      // Inform the listeners if the job state has changed      // Note : that the job will be in PREP state.      JobStatus newStatus = (JobStatus)job.getStatus().clone();      if (prevStatus.getRunState() != newStatus.getRunState()) {        JobStatusChangeEvent event =           new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED, prevStatus,               newStatus);        synchronized (JobTracker.this) {          updateJobInProgressListeners(event);        }      }    } catch (KillInterruptedException kie) {      //   If job was killed during initialization, job state will be KILLED      LOG.error("Job initialization interrupted:\n" +          StringUtils.stringifyException(kie));      killJob(job);    } catch (Throwable t) {      // If the job initialization is failed, job state will be FAILED      LOG.error("Job initialization failed:\n" +          StringUtils.stringifyException(t));      failJob(job);    }}


最终调用的是JobInprogress自身的initTaskers()方法

  public synchronized void initTasks()   throws IOException, KillInterruptedException {    if (tasksInited.get() || isComplete()) {      return;    }    synchronized(jobInitKillStatus){      if(jobInitKillStatus.killed || jobInitKillStatus.initStarted) {        return;      }      jobInitKillStatus.initStarted = true;    }    LOG.info("Initializing " + jobId);    // log job info    JobHistory.JobInfo.logSubmitted(getJobID(), conf, jobFile.toString(),                                     this.startTime, hasRestarted());    // log the job priority    setPriority(this.priority);        //    // read input splits and create a map per a split    //    String jobFile = profile.getJobFile();    Path sysDir = new Path(this.jobtracker.getSystemDir());    FileSystem fs = sysDir.getFileSystem(conf);    DataInputStream splitFile =      fs.open(new Path(conf.get("mapred.job.split.file")));    JobClient.RawSplit[] splits;    try {      splits = JobClient.readSplitFile(splitFile);    } finally {      splitFile.close();    }    numMapTasks = splits.length;    // if the number of splits is larger than a configured value    // then fail the job.    int maxTasks = jobtracker.getMaxTasksPerJob();    if (maxTasks > 0 && numMapTasks + numReduceTasks > maxTasks) {      throw new IOException(                "The number of tasks for this job " +                 (numMapTasks + numReduceTasks) +                " exceeds the configured limit " + maxTasks);    }    jobtracker.getInstrumentation().addWaiting(        getJobID(), numMapTasks + numReduceTasks);    maps = new TaskInProgress[numMapTasks];    for(int i=0; i < numMapTasks; ++i) {      inputLength += splits[i].getDataLength();      maps[i] = new TaskInProgress(jobId, jobFile,                                    splits[i],                                    jobtracker, conf, this, i);    }    LOG.info("Input size for job " + jobId + " = " + inputLength        + ". Number of splits = " + splits.length);    if (numMapTasks > 0) {       nonRunningMapCache = createCache(splits, maxLevel);    }            // set the launch time    this.launchTime = System.currentTimeMillis();    //    // Create reduce tasks    //    this.reduces = new TaskInProgress[numReduceTasks];    for (int i = 0; i < numReduceTasks; i++) {      reduces[i] = new TaskInProgress(jobId, jobFile,                                       numMapTasks, i,                                       jobtracker, conf, this);      nonRunningReduces.add(reduces[i]);    }    // Calculate the minimum number of maps to be complete before     // we should start scheduling reduces    completedMapsForReduceSlowstart =       (int)Math.ceil(          (conf.getFloat("mapred.reduce.slowstart.completed.maps",                          DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART) *            numMapTasks));    // create cleanup two cleanup tips, one map and one reduce.    cleanup = new TaskInProgress[2];    // cleanup map tip. This map doesn't use any splits. Just assign an empty    // split.    JobClient.RawSplit emptySplit = new JobClient.RawSplit();    cleanup[0] = new TaskInProgress(jobId, jobFile, emptySplit,             jobtracker, conf, this, numMapTasks);    cleanup[0].setJobCleanupTask();    // cleanup reduce tip.    cleanup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,                       numReduceTasks, jobtracker, conf, this);    cleanup[1].setJobCleanupTask();    // create two setup tips, one map and one reduce.    setup = new TaskInProgress[2];    // setup map tip. This map doesn't use any split. Just assign an empty    // split.    setup[0] = new TaskInProgress(jobId, jobFile, emptySplit,             jobtracker, conf, this, numMapTasks + 1 );    setup[0].setJobSetupTask();    // setup reduce tip.    setup[1] = new TaskInProgress(jobId, jobFile, numMapTasks,                       numReduceTasks + 1, jobtracker, conf, this);    setup[1].setJobSetupTask();        synchronized(jobInitKillStatus){      jobInitKillStatus.initDone = true;      if(jobInitKillStatus.killed) {        throw new KillInterruptedException("Job " + jobId + " killed in init");      }    }        tasksInited.set(true);    JobHistory.JobInfo.logInited(profile.getJobID(), this.launchTime,                                  numMapTasks, numReduceTasks);  }


 

0 0
原创粉丝点击