use of org.apache.helix.PropertyKey in project helix by apache.
the class TestHelixDataAccessor method beforeClass.
@BeforeClass
public void beforeClass() {
_zkClient = new MockZkClient(ZK_ADDR);
baseDataAccessor = new ZkBaseDataAccessor<>(_zkClient);
accessor = new ZKHelixDataAccessor("HELIX", baseDataAccessor);
Map<String, HelixProperty> paths = new TreeMap<>();
propertyKeys = new ArrayList<>();
for (int i = 0; i < 5; i++) {
PropertyKey key = accessor.keyBuilder().idealStates("RESOURCE" + i);
propertyKeys.add(key);
paths.put(key.getPath(), new HelixProperty("RESOURCE" + i));
accessor.setProperty(key, paths.get(key.getPath()));
}
List<HelixProperty> data = accessor.getProperty(new ArrayList<>(propertyKeys), true);
Assert.assertEquals(data.size(), 5);
PropertyKey key = accessor.keyBuilder().idealStates("RESOURCE6");
propertyKeys.add(key);
_zkClient.putData(key.getPath(), null);
}
use of org.apache.helix.PropertyKey in project helix by apache.
the class TestHelixDataAccessor method testHelixDataAccessorReadData.
@Test
public void testHelixDataAccessorReadData() {
accessor.getProperty(new ArrayList<>(propertyKeys), false);
try {
accessor.getProperty(new ArrayList<>(propertyKeys), true);
Assert.fail();
} catch (HelixMetaDataAccessException ex) {
}
PropertyKey idealStates = accessor.keyBuilder().idealStates();
accessor.getChildValues(idealStates, false);
try {
accessor.getChildValues(idealStates, true);
Assert.fail();
} catch (HelixMetaDataAccessException ex) {
}
accessor.getChildValuesMap(idealStates, false);
try {
accessor.getChildValuesMap(idealStates, true);
Assert.fail();
} catch (HelixMetaDataAccessException ex) {
}
}
use of org.apache.helix.PropertyKey in project helix by apache.
the class TestBucketizedResource method testBucketizedResource.
@Test()
public void testBucketizedResource() {
// Logger.getRootLogger().setLevel(Level.INFO);
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
List<String> instanceNames = Arrays.asList("localhost_12918", "localhost_12919", "localhost_12920", "localhost_12921", "localhost_12922");
int n = instanceNames.size();
String dbName = "TestDB0";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
MockParticipantManager[] participants = new MockParticipantManager[5];
setupCluster(clusterName, instanceNames, dbName, 3, 10, 1);
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, _baseAccessor);
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName);
controller.syncStart();
// start participants
for (int i = 0; i < n; i++) {
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceNames.get(i));
participants[i].syncStart();
}
PropertyKey evKey = accessor.keyBuilder().externalView(dbName);
boolean result = ClusterStateVerifier.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(result);
HelixClusterVerifier _clusterVerifier = new BestPossibleExternalViewVerifier.Builder(clusterName).setZkAddr(ZK_ADDR).build();
Assert.assertTrue(_clusterVerifier.verify());
ExternalView ev = accessor.getProperty(evKey);
int v1 = ev.getRecord().getVersion();
// disable the participant
_gSetupTool.getClusterManagementTool().enableInstance(clusterName, participants[0].getInstanceName(), false);
// wait for change in EV
Assert.assertTrue(_clusterVerifier.verify());
// read the version in EV
ev = accessor.getProperty(evKey);
int v2 = ev.getRecord().getVersion();
Assert.assertEquals(v2 > v1, true);
// clean up
controller.syncStop();
for (int i = 0; i < n; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.PropertyKey in project helix by apache.
the class DeprecatedTaskRebalancer method computeBestPossiblePartitionState.
@Override
public ResourceAssignment computeBestPossiblePartitionState(ClusterDataCache clusterData, IdealState taskIs, Resource resource, CurrentStateOutput currStateOutput) {
final String resourceName = resource.getResourceName();
LOG.debug("Computer Best Partition for resource: " + resourceName);
// Fetch job configuration
JobConfig jobCfg = (JobConfig) clusterData.getResourceConfig(resourceName);
if (jobCfg == null) {
LOG.debug("Job configuration is NULL for " + resourceName);
return emptyAssignment(resourceName, currStateOutput);
}
String workflowResource = jobCfg.getWorkflow();
// Fetch workflow configuration and context
WorkflowConfig workflowCfg = clusterData.getWorkflowConfig(workflowResource);
if (workflowCfg == null) {
LOG.debug("Workflow configuration is NULL for " + resourceName);
return emptyAssignment(resourceName, currStateOutput);
}
WorkflowContext workflowCtx = clusterData.getWorkflowContext(workflowResource);
// Initialize workflow context if needed
if (workflowCtx == null) {
workflowCtx = new WorkflowContext(new ZNRecord(TaskUtil.WORKFLOW_CONTEXT_KW));
workflowCtx.setStartTime(System.currentTimeMillis());
workflowCtx.setName(workflowResource);
LOG.info("Workflow context for " + resourceName + " created!");
}
// check ancestor job status
int notStartedCount = 0;
int inCompleteCount = 0;
for (String ancestor : workflowCfg.getJobDag().getAncestors(resourceName)) {
TaskState jobState = workflowCtx.getJobState(ancestor);
if (jobState == null || jobState == TaskState.NOT_STARTED) {
++notStartedCount;
} else if (jobState == TaskState.IN_PROGRESS || jobState == TaskState.STOPPED) {
++inCompleteCount;
}
}
if (notStartedCount > 0 || (workflowCfg.isJobQueue() && inCompleteCount >= workflowCfg.getParallelJobs())) {
LOG.debug("Job is not ready to be scheduled due to pending dependent jobs " + resourceName);
return emptyAssignment(resourceName, currStateOutput);
}
// Clean up if workflow marked for deletion
TargetState targetState = workflowCfg.getTargetState();
if (targetState == TargetState.DELETE) {
LOG.info("Workflow is marked as deleted " + workflowResource + " cleaning up the workflow context.");
cleanup(_manager, resourceName, workflowCfg, workflowResource);
return emptyAssignment(resourceName, currStateOutput);
}
// Check if this workflow has been finished past its expiry.
if (workflowCtx.getFinishTime() != WorkflowContext.UNFINISHED && workflowCtx.getFinishTime() + workflowCfg.getExpiry() <= System.currentTimeMillis()) {
LOG.info("Workflow " + workflowResource + " is completed and passed expiry time, cleaning up the workflow context.");
markForDeletion(_manager, workflowResource);
cleanup(_manager, resourceName, workflowCfg, workflowResource);
return emptyAssignment(resourceName, currStateOutput);
}
// Fetch any existing context information from the property store.
JobContext jobCtx = clusterData.getJobContext(resourceName);
if (jobCtx == null) {
jobCtx = new JobContext(new ZNRecord(TaskUtil.TASK_CONTEXT_KW));
jobCtx.setStartTime(System.currentTimeMillis());
jobCtx.setName(resourceName);
}
// Check for expired jobs for non-terminable workflows
long jobFinishTime = jobCtx.getFinishTime();
if (!workflowCfg.isTerminable() && jobFinishTime != WorkflowContext.UNFINISHED && jobFinishTime + workflowCfg.getExpiry() <= System.currentTimeMillis()) {
LOG.info("Job " + resourceName + " is completed and passed expiry time, cleaning up the job context.");
cleanup(_manager, resourceName, workflowCfg, workflowResource);
return emptyAssignment(resourceName, currStateOutput);
}
// The job is already in a final state (completed/failed).
if (workflowCtx.getJobState(resourceName) == TaskState.FAILED || workflowCtx.getJobState(resourceName) == TaskState.COMPLETED) {
LOG.debug("Job " + resourceName + " is failed or already completed.");
return emptyAssignment(resourceName, currStateOutput);
}
// Check for readiness, and stop processing if it's not ready
boolean isReady = scheduleIfNotReady(workflowCfg, workflowCtx, workflowResource, resourceName, clusterData);
if (!isReady) {
LOG.debug("Job " + resourceName + " is not ready to be scheduled.");
return emptyAssignment(resourceName, currStateOutput);
}
// Grab the old assignment, or an empty one if it doesn't exist
ResourceAssignment prevAssignment = getPrevResourceAssignment(_manager, resourceName);
if (prevAssignment == null) {
prevAssignment = new ResourceAssignment(resourceName);
}
// Will contain the list of partitions that must be explicitly dropped from the ideal state that
// is stored in zk.
// Fetch the previous resource assignment from the property store. This is required because of
// HELIX-230.
Set<Integer> partitionsToDrop = new TreeSet<Integer>();
ResourceAssignment newAssignment = computeResourceMapping(resourceName, workflowCfg, jobCfg, prevAssignment, clusterData.getLiveInstances().keySet(), currStateOutput, workflowCtx, jobCtx, partitionsToDrop, clusterData);
if (!partitionsToDrop.isEmpty()) {
for (Integer pId : partitionsToDrop) {
taskIs.getRecord().getMapFields().remove(pName(resourceName, pId));
}
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
PropertyKey propertyKey = accessor.keyBuilder().idealStates(resourceName);
accessor.setProperty(propertyKey, taskIs);
}
// Update Workflow and Job context in data cache and ZK.
clusterData.updateJobContext(resourceName, jobCtx, _manager.getHelixDataAccessor());
clusterData.updateWorkflowContext(workflowResource, workflowCtx, _manager.getHelixDataAccessor());
setPrevResourceAssignment(_manager, resourceName, newAssignment);
LOG.debug("Job " + resourceName + " new assignment " + Arrays.toString(newAssignment.getMappedPartitions().toArray()));
return newAssignment;
}
use of org.apache.helix.PropertyKey in project helix by apache.
the class DeprecatedTaskRebalancer method cleanup.
/**
* Cleans up all Helix state associated with this job, wiping workflow-level information if this
* is the last remaining job in its workflow, and the workflow is terminable.
*/
private static void cleanup(HelixManager mgr, final String resourceName, WorkflowConfig cfg, String workflowResource) {
LOG.info("Cleaning up job: " + resourceName + " in workflow: " + workflowResource);
HelixDataAccessor accessor = mgr.getHelixDataAccessor();
// Remove any DAG references in workflow
PropertyKey workflowKey = getConfigPropertyKey(accessor, workflowResource);
DataUpdater<ZNRecord> dagRemover = new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
JobDag jobDag = JobDag.fromJson(currentData.getSimpleField(WorkflowConfig.WorkflowConfigProperty.Dag.name()));
for (String child : jobDag.getDirectChildren(resourceName)) {
jobDag.getChildrenToParents().get(child).remove(resourceName);
}
for (String parent : jobDag.getDirectParents(resourceName)) {
jobDag.getParentsToChildren().get(parent).remove(resourceName);
}
jobDag.getChildrenToParents().remove(resourceName);
jobDag.getParentsToChildren().remove(resourceName);
jobDag.getAllNodes().remove(resourceName);
try {
currentData.setSimpleField(WorkflowConfig.WorkflowConfigProperty.Dag.name(), jobDag.toJson());
} catch (Exception e) {
LOG.equals("Could not update DAG for job: " + resourceName);
}
return currentData;
}
};
accessor.getBaseDataAccessor().update(workflowKey.getPath(), dagRemover, AccessOption.PERSISTENT);
// Delete resource configs.
PropertyKey cfgKey = getConfigPropertyKey(accessor, resourceName);
if (!accessor.removeProperty(cfgKey)) {
throw new RuntimeException(String.format("Error occurred while trying to clean up job %s. Failed to remove node %s from Helix. Aborting further clean up steps.", resourceName, cfgKey));
}
// Delete property store information for this resource.
// For recurring workflow, it's OK if the node doesn't exist.
String propStoreKey = getRebalancerPropStoreKey(resourceName);
mgr.getHelixPropertyStore().remove(propStoreKey, AccessOption.PERSISTENT);
// Delete the ideal state itself.
PropertyKey isKey = getISPropertyKey(accessor, resourceName);
if (!accessor.removeProperty(isKey)) {
throw new RuntimeException(String.format("Error occurred while trying to clean up task %s. Failed to remove node %s from Helix.", resourceName, isKey));
}
// Delete dead external view
// because job is already completed, there is no more current state change
// thus dead external views removal will not be triggered
PropertyKey evKey = accessor.keyBuilder().externalView(resourceName);
accessor.removeProperty(evKey);
LOG.info(String.format("Successfully cleaned up job resource %s.", resourceName));
boolean lastInWorkflow = true;
for (String job : cfg.getJobDag().getAllNodes()) {
// check if property store information or resource configs exist for this job
if (mgr.getHelixPropertyStore().exists(getRebalancerPropStoreKey(job), AccessOption.PERSISTENT) || accessor.getProperty(getConfigPropertyKey(accessor, job)) != null || accessor.getProperty(getISPropertyKey(accessor, job)) != null) {
lastInWorkflow = false;
break;
}
}
// clean up workflow-level info if this was the last in workflow
if (lastInWorkflow && (cfg.isTerminable() || cfg.getTargetState() == TargetState.DELETE)) {
// delete workflow config
PropertyKey workflowCfgKey = getConfigPropertyKey(accessor, workflowResource);
if (!accessor.removeProperty(workflowCfgKey)) {
throw new RuntimeException(String.format("Error occurred while trying to clean up workflow %s. Failed to remove node %s from Helix. Aborting further clean up steps.", workflowResource, workflowCfgKey));
}
// Delete property store information for this workflow
String workflowPropStoreKey = getRebalancerPropStoreKey(workflowResource);
if (!mgr.getHelixPropertyStore().remove(workflowPropStoreKey, AccessOption.PERSISTENT)) {
throw new RuntimeException(String.format("Error occurred while trying to clean up workflow %s. Failed to remove node %s from Helix. Aborting further clean up steps.", workflowResource, workflowPropStoreKey));
}
// Remove pending timer for this workflow if exists
if (SCHEDULED_TIMES.containsKey(workflowResource)) {
SCHEDULED_TIMES.remove(workflowResource);
}
}
}
Aggregations