Search in sources :

Example 41 with ZkClient

use of org.apache.helix.manager.zk.ZkClient in project helix by apache.

the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordStreamingSerializer.

@Test
public void testZNRecordSizeLimitUseZNRecordStreamingSerializer() {
    String className = getShortClassName();
    System.out.println("START testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
    ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
    ZkClient zkClient = new ZkClient(ZK_ADDR);
    zkClient.setZkSerializer(serializer);
    String root = className;
    byte[] buf = new byte[1024];
    for (int i = 0; i < 1024; i++) {
        buf[i] = 'a';
    }
    String bufStr = new String(buf);
    // test zkClient
    // legal-sized data gets written to zk
    // write a znode of size less than 1m
    final ZNRecord smallRecord = new ZNRecord("normalsize");
    smallRecord.getSimpleFields().clear();
    for (int i = 0; i < 900; i++) {
        smallRecord.setSimpleField(i + "", bufStr);
    }
    String path1 = "/" + root + "/test1";
    zkClient.createPersistent(path1, true);
    zkClient.writeData(path1, smallRecord);
    ZNRecord record = zkClient.readData(path1);
    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
    // oversized data doesn't create any data on zk
    // prepare a znode of size larger than 1m
    final ZNRecord largeRecord = new ZNRecord("oversize");
    largeRecord.getSimpleFields().clear();
    for (int i = 0; i < 1024; i++) {
        largeRecord.setSimpleField(i + "", bufStr);
    }
    String path2 = "/" + root + "/test2";
    zkClient.createPersistent(path2, true);
    try {
        zkClient.writeData(path2, largeRecord);
    } catch (HelixException e) {
        Assert.fail("Should not fail because data size is larger than 1M since compression applied");
    }
    record = zkClient.readData(path2);
    Assert.assertNotNull(record);
    // oversized write doesn't overwrite existing data on zk
    record = zkClient.readData(path1);
    try {
        zkClient.writeData(path1, largeRecord);
    } catch (HelixException e) {
        Assert.fail("Should not fail because data size is larger than 1M since compression applied");
    }
    ZNRecord recordNew = zkClient.readData(path1);
    byte[] arr = serializer.serialize(record);
    byte[] arrNew = serializer.serialize(recordNew);
    Assert.assertFalse(Arrays.equals(arr, arrNew));
    // test ZkDataAccessor
    ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
    admin.addCluster(className, true);
    InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
    admin.addInstance(className, instanceConfig);
    // oversized data should not create any new data on zk
    ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
    Builder keyBuilder = accessor.keyBuilder();
    // ZNRecord statusUpdates = new ZNRecord("statusUpdates");
    IdealState idealState = new IdealState("currentState");
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 0; i < 1024; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_1"), idealState);
    Assert.assertTrue(succeed);
    HelixProperty property = accessor.getProperty(keyBuilder.idealStates("TestDB_1"));
    Assert.assertNotNull(property);
    // legal sized data gets written to zk
    idealState.getRecord().getSimpleFields().clear();
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 0; i < 900; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_2"), idealState);
    Assert.assertTrue(succeed);
    record = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
    // oversized data should not update existing data on zk
    idealState.getRecord().getSimpleFields().clear();
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 900; i < 1024; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    // System.out.println("record: " + idealState.getRecord());
    succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState);
    Assert.assertTrue(succeed);
    recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
    arr = serializer.serialize(record);
    arrNew = serializer.serialize(recordNew);
    Assert.assertFalse(Arrays.equals(arr, arrNew));
    System.out.println("END testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) ZkBaseDataAccessor(org.apache.helix.manager.zk.ZkBaseDataAccessor) ZNRecordStreamingSerializer(org.apache.helix.manager.zk.ZNRecordStreamingSerializer) Builder(org.apache.helix.PropertyKey.Builder) Date(java.util.Date) IdealState(org.apache.helix.model.IdealState) HelixException(org.apache.helix.HelixException) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) InstanceConfig(org.apache.helix.model.InstanceConfig) HelixProperty(org.apache.helix.HelixProperty) ZNRecord(org.apache.helix.ZNRecord) ZKHelixDataAccessor(org.apache.helix.manager.zk.ZKHelixDataAccessor) Test(org.testng.annotations.Test)

Example 42 with ZkClient

use of org.apache.helix.manager.zk.ZkClient in project helix by apache.

the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordSerializer.

@Test
public void testZNRecordSizeLimitUseZNRecordSerializer() {
    String className = getShortClassName();
    System.out.println("START testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
    ZNRecordSerializer serializer = new ZNRecordSerializer();
    ZkClient zkClient = new ZkClient(ZK_ADDR);
    zkClient.setZkSerializer(serializer);
    String root = className;
    byte[] buf = new byte[1024];
    for (int i = 0; i < 1024; i++) {
        buf[i] = 'a';
    }
    String bufStr = new String(buf);
    // test zkClient
    // legal-sized data gets written to zk
    // write a znode of size less than 1m
    final ZNRecord smallRecord = new ZNRecord("normalsize");
    smallRecord.getSimpleFields().clear();
    for (int i = 0; i < 900; i++) {
        smallRecord.setSimpleField(i + "", bufStr);
    }
    String path1 = "/" + root + "/test1";
    zkClient.createPersistent(path1, true);
    zkClient.writeData(path1, smallRecord);
    ZNRecord record = zkClient.readData(path1);
    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
    // oversized data doesn't create any data on zk
    // prepare a znode of size larger than 1m
    final ZNRecord largeRecord = new ZNRecord("oversize");
    largeRecord.getSimpleFields().clear();
    for (int i = 0; i < 1024; i++) {
        largeRecord.setSimpleField(i + "", bufStr);
    }
    String path2 = "/" + root + "/test2";
    zkClient.createPersistent(path2, true);
    try {
        zkClient.writeData(path2, largeRecord);
    } catch (HelixException e) {
        Assert.fail("Should not fail because data size is larger than 1M since compression applied");
    }
    record = zkClient.readData(path2);
    Assert.assertNotNull(record);
    // oversized write doesn't overwrite existing data on zk
    record = zkClient.readData(path1);
    try {
        zkClient.writeData(path1, largeRecord);
    } catch (HelixException e) {
        Assert.fail("Should not fail because data size is larger than 1M since compression applied");
    }
    ZNRecord recordNew = zkClient.readData(path1);
    byte[] arr = serializer.serialize(record);
    byte[] arrNew = serializer.serialize(recordNew);
    Assert.assertFalse(Arrays.equals(arr, arrNew));
    // test ZkDataAccessor
    ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
    admin.addCluster(className, true);
    InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
    admin.addInstance(className, instanceConfig);
    // oversized data should not create any new data on zk
    ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
    Builder keyBuilder = accessor.keyBuilder();
    IdealState idealState = new IdealState("currentState");
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 0; i < 1024; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
    Assert.assertTrue(succeed);
    HelixProperty property = accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
    Assert.assertNull(property);
    // legal sized data gets written to zk
    idealState.getRecord().getSimpleFields().clear();
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 0; i < 900; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
    Assert.assertTrue(succeed);
    record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
    Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
    // oversized data should not update existing data on zk
    idealState.getRecord().getSimpleFields().clear();
    idealState.setStateModelDefRef("MasterSlave");
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setNumPartitions(10);
    for (int i = 900; i < 1024; i++) {
        idealState.getRecord().setSimpleField(i + "", bufStr);
    }
    // System.out.println("record: " + idealState.getRecord());
    succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
    Assert.assertTrue(succeed);
    recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
    arr = serializer.serialize(record);
    arrNew = serializer.serialize(recordNew);
    Assert.assertFalse(Arrays.equals(arr, arrNew));
    System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) ZkBaseDataAccessor(org.apache.helix.manager.zk.ZkBaseDataAccessor) Builder(org.apache.helix.PropertyKey.Builder) Date(java.util.Date) IdealState(org.apache.helix.model.IdealState) HelixException(org.apache.helix.HelixException) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) InstanceConfig(org.apache.helix.model.InstanceConfig) HelixProperty(org.apache.helix.HelixProperty) ZNRecord(org.apache.helix.ZNRecord) ZNRecordSerializer(org.apache.helix.manager.zk.ZNRecordSerializer) ZKHelixDataAccessor(org.apache.helix.manager.zk.ZKHelixDataAccessor) Test(org.testng.annotations.Test)

Example 43 with ZkClient

use of org.apache.helix.manager.zk.ZkClient in project helix by apache.

the class ClusterStateVerifier method verifyByZkCallback.

/**
 * This function should be always single threaded
 *
 * @param verifier
 * @param timeout
 * @return
 */
public static boolean verifyByZkCallback(ZkVerifier verifier, long timeout) {
    long startTime = System.currentTimeMillis();
    CountDownLatch countDown = new CountDownLatch(1);
    ZkClient zkClient = verifier.getZkClient();
    String clusterName = verifier.getClusterName();
    // so when analyze zk log, we know when a test ends
    try {
        zkClient.createEphemeral("/" + clusterName + "/CONFIGS/CLUSTER/verify");
    } catch (ZkNodeExistsException ex) {
        LOG.error("There is already a verification in progress", ex);
        throw ex;
    }
    ExtViewVeriferZkListener listener = new ExtViewVeriferZkListener(countDown, zkClient, verifier);
    String extViewPath = PropertyPathBuilder.externalView(clusterName);
    zkClient.subscribeChildChanges(extViewPath, listener);
    for (String child : zkClient.getChildren(extViewPath)) {
        String childPath = extViewPath.equals("/") ? extViewPath + child : extViewPath + "/" + child;
        zkClient.subscribeDataChanges(childPath, listener);
    }
    // do initial verify
    boolean result = verifier.verify();
    if (result == false) {
        try {
            result = countDown.await(timeout, TimeUnit.MILLISECONDS);
            if (result == false) {
                // make a final try if timeout
                result = verifier.verify();
            }
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }
    // clean up
    zkClient.unsubscribeChildChanges(extViewPath, listener);
    for (String child : zkClient.getChildren(extViewPath)) {
        String childPath = extViewPath.equals("/") ? extViewPath + child : extViewPath + "/" + child;
        zkClient.unsubscribeDataChanges(childPath, listener);
    }
    long endTime = System.currentTimeMillis();
    zkClient.delete("/" + clusterName + "/CONFIGS/CLUSTER/verify");
    // debug
    System.err.println(result + ": wait " + (endTime - startTime) + "ms, " + verifier);
    return result;
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) ZkNodeExistsException(org.I0Itec.zkclient.exception.ZkNodeExistsException) CountDownLatch(java.util.concurrent.CountDownLatch) ZkNodeExistsException(org.I0Itec.zkclient.exception.ZkNodeExistsException) ParseException(org.apache.commons.cli.ParseException)

Example 44 with ZkClient

use of org.apache.helix.manager.zk.ZkClient in project helix by apache.

the class IntegrationTestUtil method processCommandLineArgs.

static void processCommandLineArgs(String[] cliArgs) {
    CommandLineParser cliParser = new GnuParser();
    Options cliOptions = constructCommandLineOptions();
    CommandLine cmd = null;
    try {
        cmd = cliParser.parse(cliOptions, cliArgs);
    } catch (ParseException pe) {
        System.err.println("failed to parse command-line args: " + Arrays.asList(cliArgs) + ", exception: " + pe.toString());
        printUsage(cliOptions);
        System.exit(1);
    }
    String zkServer = cmd.getOptionValue(zkSvr);
    ZkClient zkclient = new ZkClient(zkServer, ZkClient.DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer());
    IntegrationTestUtil util = new IntegrationTestUtil(zkclient);
    if (cmd != null) {
        if (cmd.hasOption(verifyExternalView)) {
            String[] args = cmd.getOptionValues(verifyExternalView);
            util.verifyExternalView(args);
        } else if (cmd.hasOption(verifyLiveNodes)) {
            String[] args = cmd.getOptionValues(verifyLiveNodes);
            util.verifyLiveNodes(args);
        } else if (cmd.hasOption(readZNode)) {
            String path = cmd.getOptionValue(readZNode);
            util.readZNode(path);
        } else if (cmd.hasOption(readLeader)) {
            String clusterName = cmd.getOptionValue(readLeader);
            PropertyKey.Builder keyBuilder = new PropertyKey.Builder(clusterName);
            util.readZNode(keyBuilder.controllerLeader().getPath());
        } else {
            printUsage(cliOptions);
        }
    }
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) Options(org.apache.commons.cli.Options) OptionBuilder(org.apache.commons.cli.OptionBuilder) GnuParser(org.apache.commons.cli.GnuParser) CommandLine(org.apache.commons.cli.CommandLine) CommandLineParser(org.apache.commons.cli.CommandLineParser) ParseException(org.apache.commons.cli.ParseException) PropertyKey(org.apache.helix.PropertyKey) ZNRecordSerializer(org.apache.helix.manager.zk.ZNRecordSerializer)

Example 45 with ZkClient

use of org.apache.helix.manager.zk.ZkClient in project helix by apache.

the class TestHelper method setupCluster.

public static void setupCluster(String clusterName, String ZkAddr, int startPort, String participantNamePrefix, String resourceNamePrefix, int resourceNb, int partitionNb, int nodesNb, int replica, String stateModelDef, RebalanceMode mode, boolean doRebalance) throws Exception {
    ZkClient zkClient = new ZkClient(ZkAddr);
    if (zkClient.exists("/" + clusterName)) {
        LOG.warn("Cluster already exists:" + clusterName + ". Deleting it");
        zkClient.deleteRecursively("/" + clusterName);
    }
    ClusterSetup setupTool = new ClusterSetup(ZkAddr);
    setupTool.addCluster(clusterName, true);
    for (int i = 0; i < nodesNb; i++) {
        int port = startPort + i;
        setupTool.addInstanceToCluster(clusterName, participantNamePrefix + "_" + port);
    }
    for (int i = 0; i < resourceNb; i++) {
        String resourceName = resourceNamePrefix + i;
        setupTool.addResourceToCluster(clusterName, resourceName, partitionNb, stateModelDef, mode.toString());
        if (doRebalance) {
            setupTool.rebalanceStorageCluster(clusterName, resourceName, replica);
        }
    }
    zkClient.close();
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) ClusterSetup(org.apache.helix.tools.ClusterSetup)

Aggregations

ZkClient (org.apache.helix.manager.zk.ZkClient)109 ZNRecordSerializer (org.apache.helix.manager.zk.ZNRecordSerializer)39 ZNRecord (org.apache.helix.ZNRecord)29 StringRepresentation (org.restlet.representation.StringRepresentation)29 ClusterSetup (org.apache.helix.tools.ClusterSetup)26 HelixException (org.apache.helix.HelixException)23 Builder (org.apache.helix.PropertyKey.Builder)18 IOException (java.io.IOException)17 HelixDataAccessor (org.apache.helix.HelixDataAccessor)15 JsonGenerationException (org.codehaus.jackson.JsonGenerationException)15 JsonMappingException (org.codehaus.jackson.map.JsonMappingException)15 ZKHelixAdmin (org.apache.helix.manager.zk.ZKHelixAdmin)12 ZKHelixDataAccessor (org.apache.helix.manager.zk.ZKHelixDataAccessor)10 Test (org.testng.annotations.Test)10 Date (java.util.Date)9 BeforeClass (org.testng.annotations.BeforeClass)9 PropertyKey (org.apache.helix.PropertyKey)8 IdealState (org.apache.helix.model.IdealState)8 InstanceConfig (org.apache.helix.model.InstanceConfig)8 StateModelDefinition (org.apache.helix.model.StateModelDefinition)7