use of org.apache.helix.HelixProperty in project helix by apache.
the class BasicClusterDataCache method updateReloadProperties.
/**
* Selective update Helix Cache by version
* @param accessor the HelixDataAccessor
* @param reloadKeys keys needs to be reload
* @param cachedKeys keys already exists in the cache
* @param cachedPropertyMap cached map of propertykey -> property object
* @param <T> the type of metadata
* @return
*/
public static <T extends HelixProperty> Map<PropertyKey, T> updateReloadProperties(HelixDataAccessor accessor, List<PropertyKey> reloadKeys, List<PropertyKey> cachedKeys, Map<PropertyKey, T> cachedPropertyMap) {
// All new entries from zk not cached locally yet should be read from ZK.
Map<PropertyKey, T> refreshedPropertyMap = Maps.newHashMap();
List<HelixProperty.Stat> stats = accessor.getPropertyStats(cachedKeys);
for (int i = 0; i < cachedKeys.size(); i++) {
PropertyKey key = cachedKeys.get(i);
HelixProperty.Stat stat = stats.get(i);
if (stat != null) {
T property = cachedPropertyMap.get(key);
if (property != null && property.getBucketSize() == 0 && property.getStat().equals(stat)) {
refreshedPropertyMap.put(key, property);
} else {
// need update from zk
reloadKeys.add(key);
}
} else {
LOG.warn("Stat is null for key: " + key);
reloadKeys.add(key);
}
}
List<T> reloadedProperty = accessor.getProperty(reloadKeys, true);
Iterator<PropertyKey> reloadKeyIter = reloadKeys.iterator();
for (T property : reloadedProperty) {
PropertyKey key = reloadKeyIter.next();
if (property != null) {
refreshedPropertyMap.put(key, property);
} else {
LOG.warn("Reload property is null for key: " + key);
}
}
return refreshedPropertyMap;
}
use of org.apache.helix.HelixProperty in project helix by apache.
the class JobResource method getHostedEntitiesRepresentation.
StringRepresentation getHostedEntitiesRepresentation(String clusterName, String jobQueueName, String jobName) throws Exception {
ZkClient zkClient = ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
// Get job queue config
String namespacedJobName = TaskUtil.getNamespacedJobName(jobQueueName, jobName);
HelixProperty jobConfig = accessor.getProperty(keyBuilder.resourceConfig(namespacedJobName));
TaskDriver taskDriver = new TaskDriver(zkClient, clusterName);
// Get job queue context
JobContext ctx = taskDriver.getJobContext(namespacedJobName);
// Create the result
ZNRecord hostedEntitiesRecord = new ZNRecord(namespacedJobName);
if (jobConfig != null) {
hostedEntitiesRecord.merge(jobConfig.getRecord());
}
if (ctx != null) {
hostedEntitiesRecord.merge(ctx.getRecord());
}
StringRepresentation representation = new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(hostedEntitiesRecord), MediaType.APPLICATION_JSON);
return representation;
}
use of org.apache.helix.HelixProperty in project helix by apache.
the class DeprecatedTaskRebalancer method cloneWorkflow.
/**
* Create a new workflow based on an existing one
* @param manager connection to Helix
* @param origWorkflowName the name of the existing workflow
* @param newWorkflowName the name of the new workflow
* @param newStartTime a provided start time that deviates from the desired start time
* @return the cloned workflow, or null if there was a problem cloning the existing one
*/
private Workflow cloneWorkflow(HelixManager manager, String origWorkflowName, String newWorkflowName, Date newStartTime) {
// Read all resources, including the workflow and jobs of interest
HelixDataAccessor accessor = manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
Map<String, HelixProperty> resourceConfigMap = accessor.getChildValuesMap(keyBuilder.resourceConfigs());
if (!resourceConfigMap.containsKey(origWorkflowName)) {
LOG.error("No such workflow named " + origWorkflowName);
return null;
}
if (resourceConfigMap.containsKey(newWorkflowName)) {
LOG.error("Workflow with name " + newWorkflowName + " already exists!");
return null;
}
// Create a new workflow with a new name
HelixProperty workflowConfig = resourceConfigMap.get(origWorkflowName);
Map<String, String> wfSimpleFields = workflowConfig.getRecord().getSimpleFields();
JobDag jobDag = JobDag.fromJson(wfSimpleFields.get(WorkflowConfig.WorkflowConfigProperty.Dag.name()));
Map<String, Set<String>> parentsToChildren = jobDag.getParentsToChildren();
Workflow.Builder builder = new Workflow.Builder(newWorkflowName);
// Set the workflow expiry
builder.setExpiry(Long.parseLong(wfSimpleFields.get(WorkflowConfig.WorkflowConfigProperty.Expiry.name())));
// Set the schedule, if applicable
ScheduleConfig scheduleConfig;
if (newStartTime != null) {
scheduleConfig = ScheduleConfig.oneTimeDelayedStart(newStartTime);
} else {
scheduleConfig = WorkflowConfig.parseScheduleFromConfigMap(wfSimpleFields);
}
if (scheduleConfig != null) {
builder.setScheduleConfig(scheduleConfig);
}
// Add each job back as long as the original exists
Set<String> namespacedJobs = jobDag.getAllNodes();
for (String namespacedJob : namespacedJobs) {
if (resourceConfigMap.containsKey(namespacedJob)) {
// Copy over job-level and task-level configs
String job = TaskUtil.getDenamespacedJobName(origWorkflowName, namespacedJob);
HelixProperty jobConfig = resourceConfigMap.get(namespacedJob);
Map<String, String> jobSimpleFields = jobConfig.getRecord().getSimpleFields();
// overwrite workflow name
jobSimpleFields.put(JobConfig.JobConfigProperty.WorkflowID.name(), newWorkflowName);
for (Map.Entry<String, String> e : jobSimpleFields.entrySet()) {
builder.addConfig(job, e.getKey(), e.getValue());
}
Map<String, Map<String, String>> rawTaskConfigMap = jobConfig.getRecord().getMapFields();
List<TaskConfig> taskConfigs = Lists.newLinkedList();
for (Map<String, String> rawTaskConfig : rawTaskConfigMap.values()) {
TaskConfig taskConfig = TaskConfig.Builder.from(rawTaskConfig);
taskConfigs.add(taskConfig);
}
builder.addTaskConfigs(job, taskConfigs);
// Add dag dependencies
Set<String> children = parentsToChildren.get(namespacedJob);
if (children != null) {
for (String namespacedChild : children) {
String child = TaskUtil.getDenamespacedJobName(origWorkflowName, namespacedChild);
builder.addParentChildDependency(job, child);
}
}
}
}
return builder.build();
}
use of org.apache.helix.HelixProperty in project helix by apache.
the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordStreamingSerializer.
@Test
public void testZNRecordSizeLimitUseZNRecordStreamingSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
ZkClient zkClient = new ZkClient(ZK_ADDR);
zkClient.setZkSerializer(serializer);
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
zkClient.createPersistent(path1, true);
zkClient.writeData(path1, smallRecord);
ZNRecord record = zkClient.readData(path1);
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
zkClient.createPersistent(path2, true);
try {
zkClient.writeData(path2, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
record = zkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = zkClient.readData(path1);
try {
zkClient.writeData(path1, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
ZNRecord recordNew = zkClient.readData(path1);
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
Builder keyBuilder = accessor.keyBuilder();
// ZNRecord statusUpdates = new ZNRecord("statusUpdates");
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_1"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(keyBuilder.idealStates("TestDB_1"));
Assert.assertNotNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_2"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
arr = serializer.serialize(record);
arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
System.out.println("END testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.HelixProperty in project helix by apache.
the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordSerializer.
@Test
public void testZNRecordSizeLimitUseZNRecordSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
ZNRecordSerializer serializer = new ZNRecordSerializer();
ZkClient zkClient = new ZkClient(ZK_ADDR);
zkClient.setZkSerializer(serializer);
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
zkClient.createPersistent(path1, true);
zkClient.writeData(path1, smallRecord);
ZNRecord record = zkClient.readData(path1);
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
zkClient.createPersistent(path2, true);
try {
zkClient.writeData(path2, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
record = zkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = zkClient.readData(path1);
try {
zkClient.writeData(path1, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
ZNRecord recordNew = zkClient.readData(path1);
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
Assert.assertNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
arr = serializer.serialize(record);
arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
}
Aggregations