gpt4 book ai didi

org.apache.helix.task.WorkflowConfig.isTerminable()方法的使用及代码示例

转载 作者:知者 更新时间:2024-03-24 02:15:05 28 4
gpt4 key购买 nike

本文整理了Java中org.apache.helix.task.WorkflowConfig.isTerminable()方法的一些代码示例,展示了WorkflowConfig.isTerminable()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。WorkflowConfig.isTerminable()方法的具体详情如下:
包路径:org.apache.helix.task.WorkflowConfig
类名称:WorkflowConfig
方法名:isTerminable

WorkflowConfig.isTerminable介绍

暂无

代码示例

代码示例来源:origin: apache/helix

/**
 * Checks if the workflow has completed.
 * @param ctx Workflow context containing job states
 * @param cfg Workflow config containing set of jobs
 * @return returns true if all tasks are {@link TaskState#COMPLETED}, false otherwise.
 */
private static boolean isWorkflowComplete(WorkflowContext ctx, WorkflowConfig cfg) {
 if (!cfg.isTerminable()) {
  return false;
 }
 for (String job : cfg.getJobDag().getAllNodes()) {
  if (ctx.getJobState(job) != TaskState.COMPLETED) {
   return false;
  }
 }
 return true;
}

代码示例来源:origin: org.apache.helix/helix-core

/**
 * Checks if the workflow has completed.
 * @param ctx Workflow context containing job states
 * @param cfg Workflow config containing set of jobs
 * @return returns true if all tasks are {@link TaskState#COMPLETED}, false otherwise.
 */
private static boolean isWorkflowComplete(WorkflowContext ctx, WorkflowConfig cfg) {
 if (!cfg.isTerminable()) {
  return false;
 }
 for (String job : cfg.getJobDag().getAllNodes()) {
  if (ctx.getJobState(job) != TaskState.COMPLETED) {
   return false;
  }
 }
 return true;
}

代码示例来源:origin: apache/helix

/**
 * Remove all jobs that are in final states (ABORTED, FAILED, COMPLETED) from the job queue. The
 * job config, job context will be removed from Zookeeper.
 *
 * @param queue The name of job queue
 */
public void cleanupQueue(String queue) {
 WorkflowConfig workflowConfig = TaskUtil.getWorkflowConfig(_accessor, queue);
 if (workflowConfig == null) {
  throw new IllegalArgumentException("Queue " + queue + " does not yet exist!");
 }
 boolean isTerminable = workflowConfig.isTerminable();
 if (isTerminable) {
  throw new IllegalArgumentException(queue + " is not a queue!");
 }
 WorkflowContext wCtx = TaskUtil.getWorkflowContext(_propertyStore, queue);
 if (wCtx == null || wCtx.getWorkflowState() == null) {
  throw new IllegalStateException("Queue " + queue + " does not have a valid work state!");
 }
 Set<String> jobs = new HashSet<String>();
 for (String jobNode : workflowConfig.getJobDag().getAllNodes()) {
  TaskState curState = wCtx.getJobState(jobNode);
  if (curState != null && (curState == TaskState.ABORTED || curState == TaskState.COMPLETED
    || curState == TaskState.FAILED)) {
   jobs.add(jobNode);
  }
 }
 TaskUtil.removeJobsFromWorkflow(_accessor, _propertyStore, queue, jobs, true);
}

代码示例来源:origin: org.apache.helix/helix-core

/**
 * Remove all jobs that are in final states (ABORTED, FAILED, COMPLETED) from the job queue. The
 * job config, job context will be removed from Zookeeper.
 *
 * @param queue The name of job queue
 */
public void cleanupQueue(String queue) {
 WorkflowConfig workflowConfig = TaskUtil.getWorkflowConfig(_accessor, queue);
 if (workflowConfig == null) {
  throw new IllegalArgumentException("Queue " + queue + " does not yet exist!");
 }
 boolean isTerminable = workflowConfig.isTerminable();
 if (isTerminable) {
  throw new IllegalArgumentException(queue + " is not a queue!");
 }
 WorkflowContext wCtx = TaskUtil.getWorkflowContext(_propertyStore, queue);
 if (wCtx == null || wCtx.getWorkflowState() == null) {
  throw new IllegalStateException("Queue " + queue + " does not have a valid work state!");
 }
 Set<String> jobs = new HashSet<String>();
 for (String jobNode : workflowConfig.getJobDag().getAllNodes()) {
  TaskState curState = wCtx.getJobState(jobNode);
  if (curState != null && (curState == TaskState.ABORTED || curState == TaskState.COMPLETED
    || curState == TaskState.FAILED)) {
   jobs.add(jobNode);
  }
 }
 TaskUtil.removeJobsFromWorkflow(_accessor, _propertyStore, queue, jobs, true);
}

代码示例来源:origin: org.apache.helix/helix-core

/**
 * Clean up a workflow. This removes the workflow config, idealstate, externalview and workflow
 * contexts associated with this workflow, and all jobs information, including their configs,
 * context, IS and EV.
 */
private void cleanupWorkflow(String workflow, WorkflowConfig workflowcfg) {
 LOG.info("Cleaning up workflow: " + workflow);
 if (workflowcfg.isTerminable() || workflowcfg.getTargetState() == TargetState.DELETE) {
  Set<String> jobs = workflowcfg.getJobDag().getAllNodes();
  // Remove all pending timer tasks for this workflow if exists
  _rebalanceScheduler.removeScheduledRebalance(workflow);
  for (String job : jobs) {
   _rebalanceScheduler.removeScheduledRebalance(job);
  }
  if (!TaskUtil.removeWorkflow(_manager.getHelixDataAccessor(),
    _manager.getHelixPropertyStore(), workflow, jobs)) {
   LOG.warn("Failed to clean up workflow " + workflow);
  }
 } else {
  LOG.info("Did not clean up workflow " + workflow
    + " because neither the workflow is non-terminable nor is set to DELETE.");
 }
}

代码示例来源:origin: apache/helix

@Override
 public void execute(ClusterEvent event) {
  ClusterDataCache clusterDataCache = event.getAttribute(AttributeName.ClusterDataCache.name());
  HelixManager manager = event.getAttribute(AttributeName.helixmanager.name());

  if (clusterDataCache == null || manager == null) {
   LOG.warn(
     "ClusterDataCache or HelixManager is null for event {}({}) in cluster {}. Skip TaskGarbageCollectionStage.",
     event.getEventId(), event.getEventType(), event.getClusterName());
   return;
  }

  Set<WorkflowConfig> existingWorkflows =
    new HashSet<>(clusterDataCache.getWorkflowConfigMap().values());
  for (WorkflowConfig workflowConfig : existingWorkflows) {
   // clean up the expired jobs if it is a queue.
   if (workflowConfig != null && (!workflowConfig.isTerminable() || workflowConfig
     .isJobQueue())) {
    try {
     TaskUtil.purgeExpiredJobs(workflowConfig.getWorkflowId(), workflowConfig,
       clusterDataCache.getWorkflowContext(workflowConfig.getWorkflowId()), manager,
       _rebalanceScheduler);
    } catch (Exception e) {
     LOG.warn(String.format("Failed to purge job for workflow %s with reason %s",
       workflowConfig.getWorkflowId(), e.toString()));
    }
   }
  }
 }
}

代码示例来源:origin: apache/helix

/**
 * Clean up a workflow. This removes the workflow config, idealstate, externalview and workflow
 * contexts associated with this workflow, and all jobs information, including their configs,
 * context, IS and EV.
 */
private void cleanupWorkflow(String workflow) {
 LOG.info("Cleaning up workflow: " + workflow);
 WorkflowConfig workflowcfg = _taskDataCache.getWorkflowConfig(workflow);
 if (workflowcfg.isTerminable() || workflowcfg.getTargetState() == TargetState.DELETE) {
  Set<String> jobs = workflowcfg.getJobDag().getAllNodes();
  // Remove all pending timer tasks for this workflow if exists
  _rebalanceScheduler.removeScheduledRebalance(workflow);
  for (String job : jobs) {
   _rebalanceScheduler.removeScheduledRebalance(job);
  }
  if (!TaskUtil.removeWorkflow(_manager.getHelixDataAccessor(),
    _manager.getHelixPropertyStore(), workflow, jobs)) {
   LOG.warn("Failed to clean up workflow " + workflow);
  } else {
   // Only remove from cache when remove all workflow success. Otherwise, batch write will
   // clean all the contexts even if Configs and IdealStates are exists. Then all the workflows
   // and jobs will rescheduled again.
   removeContexts(workflow, jobs, _taskDataCache);
  }
  } else {
  LOG.info("Did not clean up workflow " + workflow
    + " because neither the workflow is non-terminable nor is set to DELETE.");
 }
}

代码示例来源:origin: com.linkedin.gobblin/gobblin-cluster

throw new IllegalArgumentException("Queue " + queueName + " does not yet exist!");
if (workflowCfg.isTerminable()) {
 throw new IllegalArgumentException(queueName + " is not a queue!");

代码示例来源:origin: apache/helix

if (workflowCfg.isTerminable()) {
 throw new IllegalArgumentException(queue + " is not a queue!");

代码示例来源:origin: org.apache.helix/helix-core

if (workflowCfg.isTerminable()) {
 throw new IllegalArgumentException(queue + " is not a queue!");

代码示例来源:origin: apache/helix

if (currentConfig.isTerminable()) {
 throw new HelixException(
   "Workflow " + workflow + " is terminable, not allow to change its configuration!");

代码示例来源:origin: org.apache.helix/helix-core

if (currentConfig.isTerminable()) {
 throw new HelixException(
   "Workflow " + workflow + " is terminable, not allow to change its configuration!");

代码示例来源:origin: apache/helix

if (!incomplete && cfg.isTerminable()) {
 ctx.setWorkflowState(TaskState.COMPLETED);
 return true;

代码示例来源:origin: org.apache.helix/helix-core

if (!incomplete && cfg.isTerminable()) {
 ctx.setWorkflowState(TaskState.COMPLETED);
 return true;

代码示例来源:origin: org.apache.helix/helix-core

if (lastInWorkflow && (cfg.isTerminable() || cfg.getTargetState() == TargetState.DELETE)) {

代码示例来源:origin: org.apache.helix/helix-core

public WorkflowConfig(WorkflowConfig cfg, String workflowId) {
 this(workflowId, cfg.getJobDag(), cfg.getParallelJobs(), cfg.getTargetState(), cfg.getExpiry(),
   cfg.getFailureThreshold(), cfg.isTerminable(), cfg.getScheduleConfig(), cfg.getCapacity(),
   cfg.getWorkflowType(), cfg.isJobQueue(), cfg.getJobTypes(), cfg.getJobPurgeInterval(),
   cfg.isAllowOverlapJobAssignment(), cfg.getTimeout());
}

代码示例来源:origin: apache/helix

public WorkflowConfig(WorkflowConfig cfg, String workflowId) {
 this(workflowId, cfg.getJobDag(), cfg.getParallelJobs(), cfg.getTargetState(), cfg.getExpiry(),
   cfg.getFailureThreshold(), cfg.isTerminable(), cfg.getScheduleConfig(), cfg.getCapacity(),
   cfg.getWorkflowType(), cfg.isJobQueue(), cfg.getJobTypes(), cfg.getJobPurgeInterval(),
   cfg.isAllowOverlapJobAssignment(), cfg.getTimeout());
}

代码示例来源:origin: apache/helix

public Builder(WorkflowConfig workflowConfig) {
 _workflowId = workflowConfig.getWorkflowId();
 _taskDag = workflowConfig.getJobDag();
 _parallelJobs = workflowConfig.getParallelJobs();
 _targetState = workflowConfig.getTargetState();
 _expiry = workflowConfig.getExpiry();
 _isTerminable = workflowConfig.isTerminable();
 _scheduleConfig = workflowConfig.getScheduleConfig();
 _capacity = workflowConfig.getCapacity();
 _failureThreshold = workflowConfig.getFailureThreshold();
 _workflowType = workflowConfig.getWorkflowType();
 _isJobQueue = workflowConfig.isJobQueue();
 _jobTypes = workflowConfig.getJobTypes();
 _jobPurgeInterval = workflowConfig.getJobPurgeInterval();
 _allowOverlapJobAssignment = workflowConfig.isAllowOverlapJobAssignment();
 _timeout = workflowConfig.getTimeout();
}

代码示例来源:origin: org.apache.helix/helix-core

public Builder(WorkflowConfig workflowConfig) {
 _workflowId = workflowConfig.getWorkflowId();
 _taskDag = workflowConfig.getJobDag();
 _parallelJobs = workflowConfig.getParallelJobs();
 _targetState = workflowConfig.getTargetState();
 _expiry = workflowConfig.getExpiry();
 _isTerminable = workflowConfig.isTerminable();
 _scheduleConfig = workflowConfig.getScheduleConfig();
 _capacity = workflowConfig.getCapacity();
 _failureThreshold = workflowConfig.getFailureThreshold();
 _workflowType = workflowConfig.getWorkflowType();
 _isJobQueue = workflowConfig.isJobQueue();
 _jobTypes = workflowConfig.getJobTypes();
 _jobPurgeInterval = workflowConfig.getJobPurgeInterval();
 _allowOverlapJobAssignment = workflowConfig.isAllowOverlapJobAssignment();
 _timeout = workflowConfig.getTimeout();
}

代码示例来源:origin: org.apache.helix/helix-core

if (!workflowCfg.isTerminable() || workflowCfg.isJobQueue()) {
 purgeExpiredJobs(workflow, workflowCfg, workflowCtx);

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com