Search in sources :

Example 1 with HelixProperty

use of org.apache.helix.HelixProperty in project helix by apache.

the class BasicClusterDataCache method updateReloadProperties.

/**
 * Selective update Helix Cache by version
 * @param accessor the HelixDataAccessor
 * @param reloadKeys keys needs to be reload
 * @param cachedKeys keys already exists in the cache
 * @param cachedPropertyMap cached map of propertykey -> property object
 * @param <T> the type of metadata
 * @return
 */
public static <T extends HelixProperty> Map<PropertyKey, T> updateReloadProperties(HelixDataAccessor accessor, List<PropertyKey> reloadKeys, List<PropertyKey> cachedKeys, Map<PropertyKey, T> cachedPropertyMap) {
    // All new entries from zk not cached locally yet should be read from ZK.
    Map<PropertyKey, T> refreshedPropertyMap = Maps.newHashMap();
    List<HelixProperty.Stat> stats = accessor.getPropertyStats(cachedKeys);
    for (int i = 0; i < cachedKeys.size(); i++) {
        PropertyKey key = cachedKeys.get(i);
        HelixProperty.Stat stat = stats.get(i);
        if (stat != null) {
            T property = cachedPropertyMap.get(key);
            if (property != null && property.getBucketSize() == 0 && property.getStat().equals(stat)) {
                refreshedPropertyMap.put(key, property);
            } else {
                // need update from zk
                reloadKeys.add(key);
            }
        } else {
            LOG.warn("Stat is null for key: " + key);
            reloadKeys.add(key);
        }
    }
    List<T> reloadedProperty = accessor.getProperty(reloadKeys, true);
    Iterator<PropertyKey> reloadKeyIter = reloadKeys.iterator();
    for (T property : reloadedProperty) {
        PropertyKey key = reloadKeyIter.next();
        if (property != null) {
            refreshedPropertyMap.put(key, property);
        } else {
            LOG.warn("Reload property is null for key: " + key);
        }
    }
    return refreshedPropertyMap;
}
Also used : HelixProperty(org.apache.helix.HelixProperty) PropertyKey(org.apache.helix.PropertyKey)

Example 2 with HelixProperty

use of org.apache.helix.HelixProperty in project helix by apache.

the class JobResource method getHostedEntitiesRepresentation.

StringRepresentation getHostedEntitiesRepresentation(String clusterName, String jobQueueName, String jobName) throws Exception {
    ZkClient zkClient = ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
    HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName);
    PropertyKey.Builder keyBuilder = accessor.keyBuilder();
    // Get job queue config
    String namespacedJobName = TaskUtil.getNamespacedJobName(jobQueueName, jobName);
    HelixProperty jobConfig = accessor.getProperty(keyBuilder.resourceConfig(namespacedJobName));
    TaskDriver taskDriver = new TaskDriver(zkClient, clusterName);
    // Get job queue context
    JobContext ctx = taskDriver.getJobContext(namespacedJobName);
    // Create the result
    ZNRecord hostedEntitiesRecord = new ZNRecord(namespacedJobName);
    if (jobConfig != null) {
        hostedEntitiesRecord.merge(jobConfig.getRecord());
    }
    if (ctx != null) {
        hostedEntitiesRecord.merge(ctx.getRecord());
    }
    StringRepresentation representation = new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(hostedEntitiesRecord), MediaType.APPLICATION_JSON);
    return representation;
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) HelixDataAccessor(org.apache.helix.HelixDataAccessor) HelixProperty(org.apache.helix.HelixProperty) StringRepresentation(org.restlet.representation.StringRepresentation) TaskDriver(org.apache.helix.task.TaskDriver) JobContext(org.apache.helix.task.JobContext) PropertyKey(org.apache.helix.PropertyKey) ZNRecord(org.apache.helix.ZNRecord)

Example 3 with HelixProperty

use of org.apache.helix.HelixProperty in project helix by apache.

the class DeprecatedTaskRebalancer method cloneWorkflow.

/**
 * Create a new workflow based on an existing one
 * @param manager connection to Helix
 * @param origWorkflowName the name of the existing workflow
 * @param newWorkflowName the name of the new workflow
 * @param newStartTime a provided start time that deviates from the desired start time
 * @return the cloned workflow, or null if there was a problem cloning the existing one
 */
private Workflow cloneWorkflow(HelixManager manager, String origWorkflowName, String newWorkflowName, Date newStartTime) {
    // Read all resources, including the workflow and jobs of interest
    HelixDataAccessor accessor = manager.getHelixDataAccessor();
    PropertyKey.Builder keyBuilder = accessor.keyBuilder();
    Map<String, HelixProperty> resourceConfigMap = accessor.getChildValuesMap(keyBuilder.resourceConfigs());
    if (!resourceConfigMap.containsKey(origWorkflowName)) {
        LOG.error("No such workflow named " + origWorkflowName);
        return null;
    }
    if (resourceConfigMap.containsKey(newWorkflowName)) {
        LOG.error("Workflow with name " + newWorkflowName + " already exists!");
        return null;
    }
    // Create a new workflow with a new name
    HelixProperty workflowConfig = resourceConfigMap.get(origWorkflowName);
    Map<String, String> wfSimpleFields = workflowConfig.getRecord().getSimpleFields();
    JobDag jobDag = JobDag.fromJson(wfSimpleFields.get(WorkflowConfig.WorkflowConfigProperty.Dag.name()));
    Map<String, Set<String>> parentsToChildren = jobDag.getParentsToChildren();
    Workflow.Builder builder = new Workflow.Builder(newWorkflowName);
    // Set the workflow expiry
    builder.setExpiry(Long.parseLong(wfSimpleFields.get(WorkflowConfig.WorkflowConfigProperty.Expiry.name())));
    // Set the schedule, if applicable
    ScheduleConfig scheduleConfig;
    if (newStartTime != null) {
        scheduleConfig = ScheduleConfig.oneTimeDelayedStart(newStartTime);
    } else {
        scheduleConfig = WorkflowConfig.parseScheduleFromConfigMap(wfSimpleFields);
    }
    if (scheduleConfig != null) {
        builder.setScheduleConfig(scheduleConfig);
    }
    // Add each job back as long as the original exists
    Set<String> namespacedJobs = jobDag.getAllNodes();
    for (String namespacedJob : namespacedJobs) {
        if (resourceConfigMap.containsKey(namespacedJob)) {
            // Copy over job-level and task-level configs
            String job = TaskUtil.getDenamespacedJobName(origWorkflowName, namespacedJob);
            HelixProperty jobConfig = resourceConfigMap.get(namespacedJob);
            Map<String, String> jobSimpleFields = jobConfig.getRecord().getSimpleFields();
            // overwrite workflow name
            jobSimpleFields.put(JobConfig.JobConfigProperty.WorkflowID.name(), newWorkflowName);
            for (Map.Entry<String, String> e : jobSimpleFields.entrySet()) {
                builder.addConfig(job, e.getKey(), e.getValue());
            }
            Map<String, Map<String, String>> rawTaskConfigMap = jobConfig.getRecord().getMapFields();
            List<TaskConfig> taskConfigs = Lists.newLinkedList();
            for (Map<String, String> rawTaskConfig : rawTaskConfigMap.values()) {
                TaskConfig taskConfig = TaskConfig.Builder.from(rawTaskConfig);
                taskConfigs.add(taskConfig);
            }
            builder.addTaskConfigs(job, taskConfigs);
            // Add dag dependencies
            Set<String> children = parentsToChildren.get(namespacedJob);
            if (children != null) {
                for (String namespacedChild : children) {
                    String child = TaskUtil.getDenamespacedJobName(origWorkflowName, namespacedChild);
                    builder.addParentChildDependency(job, child);
                }
            }
        }
    }
    return builder.build();
}
Also used : SortedSet(java.util.SortedSet) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) Set(java.util.Set) HelixDataAccessor(org.apache.helix.HelixDataAccessor) HelixProperty(org.apache.helix.HelixProperty) HashMap(java.util.HashMap) Map(java.util.Map) BiMap(com.google.common.collect.BiMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashBiMap(com.google.common.collect.HashBiMap) TreeMap(java.util.TreeMap) PropertyKey(org.apache.helix.PropertyKey)

Example 4 with HelixProperty

use of org.apache.helix.HelixProperty in project helix by apache.

the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordStreamingSerializer.

@Test
public void testZNRecordSizeLimitUseZNRecordStreamingSerializer() {
    String className = getShortClassName();
    System.out.println("START testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
    ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
    ZkClient zkClient = new ZkClient(ZK_ADDR);
    zkClient.setZkSerializer(serializer);
    String root = className;
    byte[] buf = new byte[1024];
    for (int i = 0; i < 1024; i++) {
        buf[i] = 'a';
    }
    String bufStr = new String(buf);
    // test zkClient
    // legal-sized data gets written to zk
    // write a znode of size less than 1m
    final ZNRecord smallRecord = new ZNRecord("normalsize");
    smallRecord.getSimpleFields().clear();
    for (int i = 0; i < 900; i++) {
        smallRecord.setSimpleField(i + "", bufStr);
    }
    String path1 = "/" + root + "/test1";
    zkClient.createPersistent(path1, true);
    zkClient.writeData(path1, smallRecord);
    ZNRecord record = zkClient.readData(path1);
    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
    // oversized data doesn't create any data on zk
    // prepare a znode of size larger than 1m
    final ZNRecord largeRecord = new ZNRecord("oversize");
    largeRecord.getSimpleFields().clear();
    for (int i = 0; i < 1024; i++) {
        largeRecord.setSimpleField(i + "", bufStr);
    }
    String path2 = "/" + root + "/test2";
    zkClient.createPersistent(path2, true);
    try {
        zkClient.writeData(path2, largeRecord);
    } catch (HelixException e) {
        Assert.fail("Should not fail because data size is larger than 1M since compression applied");
    }
    record = zkClient.readData(path2);
    Assert.assertNotNull(record);
    // oversized write doesn't overwrite existing data on zk
    record = zkClient.readData(path1);
    try {
        zkClient.writeData(path1, largeRecord);
    } catch (HelixException e) {
        Assert.fail("Should not fail because data size is larger than 1M since compression applied");
    }
    ZNRecord recordNew = zkClient.readData(path1);
    byte[] arr = serializer.serialize(record);
    byte[] arrNew = serializer.serialize(recordNew);
    Assert.assertFalse(Arrays.equals(arr, arrNew));
    // test ZkDataAccessor
    ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
    admin.addCluster(className, true);
    InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
    admin.addInstance(className, instanceConfig);
    // oversized data should not create any new data on zk
    ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
    Builder keyBuilder = accessor.keyBuilder();
    // ZNRecord statusUpdates = new ZNRecord("statusUpdates");
    IdealState idealState = new IdealState("currentState");
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 0; i < 1024; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_1"), idealState);
    Assert.assertTrue(succeed);
    HelixProperty property = accessor.getProperty(keyBuilder.idealStates("TestDB_1"));
    Assert.assertNotNull(property);
    // legal sized data gets written to zk
    idealState.getRecord().getSimpleFields().clear();
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 0; i < 900; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_2"), idealState);
    Assert.assertTrue(succeed);
    record = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
    // oversized data should not update existing data on zk
    idealState.getRecord().getSimpleFields().clear();
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 900; i < 1024; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    // System.out.println("record: " + idealState.getRecord());
    succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState);
    Assert.assertTrue(succeed);
    recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
    arr = serializer.serialize(record);
    arrNew = serializer.serialize(recordNew);
    Assert.assertFalse(Arrays.equals(arr, arrNew));
    System.out.println("END testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) ZkBaseDataAccessor(org.apache.helix.manager.zk.ZkBaseDataAccessor) ZNRecordStreamingSerializer(org.apache.helix.manager.zk.ZNRecordStreamingSerializer) Builder(org.apache.helix.PropertyKey.Builder) Date(java.util.Date) IdealState(org.apache.helix.model.IdealState) HelixException(org.apache.helix.HelixException) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) InstanceConfig(org.apache.helix.model.InstanceConfig) HelixProperty(org.apache.helix.HelixProperty) ZNRecord(org.apache.helix.ZNRecord) ZKHelixDataAccessor(org.apache.helix.manager.zk.ZKHelixDataAccessor) Test(org.testng.annotations.Test)

Example 5 with HelixProperty

use of org.apache.helix.HelixProperty in project helix by apache.

the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordSerializer.

@Test
public void testZNRecordSizeLimitUseZNRecordSerializer() {
    String className = getShortClassName();
    System.out.println("START testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
    ZNRecordSerializer serializer = new ZNRecordSerializer();
    ZkClient zkClient = new ZkClient(ZK_ADDR);
    zkClient.setZkSerializer(serializer);
    String root = className;
    byte[] buf = new byte[1024];
    for (int i = 0; i < 1024; i++) {
        buf[i] = 'a';
    }
    String bufStr = new String(buf);
    // test zkClient
    // legal-sized data gets written to zk
    // write a znode of size less than 1m
    final ZNRecord smallRecord = new ZNRecord("normalsize");
    smallRecord.getSimpleFields().clear();
    for (int i = 0; i < 900; i++) {
        smallRecord.setSimpleField(i + "", bufStr);
    }
    String path1 = "/" + root + "/test1";
    zkClient.createPersistent(path1, true);
    zkClient.writeData(path1, smallRecord);
    ZNRecord record = zkClient.readData(path1);
    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
    // oversized data doesn't create any data on zk
    // prepare a znode of size larger than 1m
    final ZNRecord largeRecord = new ZNRecord("oversize");
    largeRecord.getSimpleFields().clear();
    for (int i = 0; i < 1024; i++) {
        largeRecord.setSimpleField(i + "", bufStr);
    }
    String path2 = "/" + root + "/test2";
    zkClient.createPersistent(path2, true);
    try {
        zkClient.writeData(path2, largeRecord);
    } catch (HelixException e) {
        Assert.fail("Should not fail because data size is larger than 1M since compression applied");
    }
    record = zkClient.readData(path2);
    Assert.assertNotNull(record);
    // oversized write doesn't overwrite existing data on zk
    record = zkClient.readData(path1);
    try {
        zkClient.writeData(path1, largeRecord);
    } catch (HelixException e) {
        Assert.fail("Should not fail because data size is larger than 1M since compression applied");
    }
    ZNRecord recordNew = zkClient.readData(path1);
    byte[] arr = serializer.serialize(record);
    byte[] arrNew = serializer.serialize(recordNew);
    Assert.assertFalse(Arrays.equals(arr, arrNew));
    // test ZkDataAccessor
    ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
    admin.addCluster(className, true);
    InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
    admin.addInstance(className, instanceConfig);
    // oversized data should not create any new data on zk
    ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
    Builder keyBuilder = accessor.keyBuilder();
    IdealState idealState = new IdealState("currentState");
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 0; i < 1024; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
    Assert.assertTrue(succeed);
    HelixProperty property = accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
    Assert.assertNull(property);
    // legal sized data gets written to zk
    idealState.getRecord().getSimpleFields().clear();
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 0; i < 900; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
    Assert.assertTrue(succeed);
    record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
    // oversized data should not update existing data on zk
    idealState.getRecord().getSimpleFields().clear();
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 900; i < 1024; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    // System.out.println("record: " + idealState.getRecord());
    succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
    Assert.assertTrue(succeed);
    recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
    arr = serializer.serialize(record);
    arrNew = serializer.serialize(recordNew);
    Assert.assertFalse(Arrays.equals(arr, arrNew));
    System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) ZkBaseDataAccessor(org.apache.helix.manager.zk.ZkBaseDataAccessor) Builder(org.apache.helix.PropertyKey.Builder) Date(java.util.Date) IdealState(org.apache.helix.model.IdealState) HelixException(org.apache.helix.HelixException) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) InstanceConfig(org.apache.helix.model.InstanceConfig) HelixProperty(org.apache.helix.HelixProperty) ZNRecord(org.apache.helix.ZNRecord) ZNRecordSerializer(org.apache.helix.manager.zk.ZNRecordSerializer) ZKHelixDataAccessor(org.apache.helix.manager.zk.ZKHelixDataAccessor) Test(org.testng.annotations.Test)

Aggregations

HelixProperty (org.apache.helix.HelixProperty)27 PropertyKey (org.apache.helix.PropertyKey)16 ZNRecord (org.apache.helix.ZNRecord)15 HelixDataAccessor (org.apache.helix.HelixDataAccessor)9 ZKHelixDataAccessor (org.apache.helix.manager.zk.ZKHelixDataAccessor)6 ArrayList (java.util.ArrayList)5 Map (java.util.Map)5 PropertyType (org.apache.helix.PropertyType)5 ZkClient (org.apache.helix.manager.zk.ZkClient)5 IdealState (org.apache.helix.model.IdealState)5 HashMap (java.util.HashMap)4 InstanceConfig (org.apache.helix.model.InstanceConfig)4 List (java.util.List)3 HelixException (org.apache.helix.HelixException)3 Builder (org.apache.helix.PropertyKey.Builder)3 ZNRecordAssembler (org.apache.helix.ZNRecordAssembler)3 Test (org.testng.annotations.Test)3 StatsSnapshot (com.github.ambry.server.StatsSnapshot)2 Date (java.util.Date)2 Set (java.util.Set)2