use of org.apache.helix.manager.zk.ZkClient in project helix by apache.
the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordStreamingSerializer.
@Test
public void testZNRecordSizeLimitUseZNRecordStreamingSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
ZkClient zkClient = new ZkClient(ZK_ADDR);
zkClient.setZkSerializer(serializer);
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
zkClient.createPersistent(path1, true);
zkClient.writeData(path1, smallRecord);
ZNRecord record = zkClient.readData(path1);
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
zkClient.createPersistent(path2, true);
try {
zkClient.writeData(path2, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
record = zkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = zkClient.readData(path1);
try {
zkClient.writeData(path1, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
ZNRecord recordNew = zkClient.readData(path1);
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
Builder keyBuilder = accessor.keyBuilder();
// ZNRecord statusUpdates = new ZNRecord("statusUpdates");
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_1"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(keyBuilder.idealStates("TestDB_1"));
Assert.assertNotNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_2"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
arr = serializer.serialize(record);
arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
System.out.println("END testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.manager.zk.ZkClient in project helix by apache.
the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordSerializer.
@Test
public void testZNRecordSizeLimitUseZNRecordSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
ZNRecordSerializer serializer = new ZNRecordSerializer();
ZkClient zkClient = new ZkClient(ZK_ADDR);
zkClient.setZkSerializer(serializer);
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
zkClient.createPersistent(path1, true);
zkClient.writeData(path1, smallRecord);
ZNRecord record = zkClient.readData(path1);
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
zkClient.createPersistent(path2, true);
try {
zkClient.writeData(path2, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
record = zkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = zkClient.readData(path1);
try {
zkClient.writeData(path1, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
ZNRecord recordNew = zkClient.readData(path1);
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
Assert.assertNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
arr = serializer.serialize(record);
arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.manager.zk.ZkClient in project helix by apache.
the class ClusterStateVerifier method verifyByZkCallback.
/**
* This function should be always single threaded
*
* @param verifier
* @param timeout
* @return
*/
public static boolean verifyByZkCallback(ZkVerifier verifier, long timeout) {
long startTime = System.currentTimeMillis();
CountDownLatch countDown = new CountDownLatch(1);
ZkClient zkClient = verifier.getZkClient();
String clusterName = verifier.getClusterName();
// so when analyze zk log, we know when a test ends
try {
zkClient.createEphemeral("/" + clusterName + "/CONFIGS/CLUSTER/verify");
} catch (ZkNodeExistsException ex) {
LOG.error("There is already a verification in progress", ex);
throw ex;
}
ExtViewVeriferZkListener listener = new ExtViewVeriferZkListener(countDown, zkClient, verifier);
String extViewPath = PropertyPathBuilder.externalView(clusterName);
zkClient.subscribeChildChanges(extViewPath, listener);
for (String child : zkClient.getChildren(extViewPath)) {
String childPath = extViewPath.equals("/") ? extViewPath + child : extViewPath + "/" + child;
zkClient.subscribeDataChanges(childPath, listener);
}
// do initial verify
boolean result = verifier.verify();
if (result == false) {
try {
result = countDown.await(timeout, TimeUnit.MILLISECONDS);
if (result == false) {
// make a final try if timeout
result = verifier.verify();
}
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
// clean up
zkClient.unsubscribeChildChanges(extViewPath, listener);
for (String child : zkClient.getChildren(extViewPath)) {
String childPath = extViewPath.equals("/") ? extViewPath + child : extViewPath + "/" + child;
zkClient.unsubscribeDataChanges(childPath, listener);
}
long endTime = System.currentTimeMillis();
zkClient.delete("/" + clusterName + "/CONFIGS/CLUSTER/verify");
// debug
System.err.println(result + ": wait " + (endTime - startTime) + "ms, " + verifier);
return result;
}
use of org.apache.helix.manager.zk.ZkClient in project helix by apache.
the class IntegrationTestUtil method processCommandLineArgs.
static void processCommandLineArgs(String[] cliArgs) {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
CommandLine cmd = null;
try {
cmd = cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
System.err.println("failed to parse command-line args: " + Arrays.asList(cliArgs) + ", exception: " + pe.toString());
printUsage(cliOptions);
System.exit(1);
}
String zkServer = cmd.getOptionValue(zkSvr);
ZkClient zkclient = new ZkClient(zkServer, ZkClient.DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer());
IntegrationTestUtil util = new IntegrationTestUtil(zkclient);
if (cmd != null) {
if (cmd.hasOption(verifyExternalView)) {
String[] args = cmd.getOptionValues(verifyExternalView);
util.verifyExternalView(args);
} else if (cmd.hasOption(verifyLiveNodes)) {
String[] args = cmd.getOptionValues(verifyLiveNodes);
util.verifyLiveNodes(args);
} else if (cmd.hasOption(readZNode)) {
String path = cmd.getOptionValue(readZNode);
util.readZNode(path);
} else if (cmd.hasOption(readLeader)) {
String clusterName = cmd.getOptionValue(readLeader);
PropertyKey.Builder keyBuilder = new PropertyKey.Builder(clusterName);
util.readZNode(keyBuilder.controllerLeader().getPath());
} else {
printUsage(cliOptions);
}
}
}
use of org.apache.helix.manager.zk.ZkClient in project helix by apache.
the class TestHelper method setupCluster.
public static void setupCluster(String clusterName, String ZkAddr, int startPort, String participantNamePrefix, String resourceNamePrefix, int resourceNb, int partitionNb, int nodesNb, int replica, String stateModelDef, RebalanceMode mode, boolean doRebalance) throws Exception {
ZkClient zkClient = new ZkClient(ZkAddr);
if (zkClient.exists("/" + clusterName)) {
LOG.warn("Cluster already exists:" + clusterName + ". Deleting it");
zkClient.deleteRecursively("/" + clusterName);
}
ClusterSetup setupTool = new ClusterSetup(ZkAddr);
setupTool.addCluster(clusterName, true);
for (int i = 0; i < nodesNb; i++) {
int port = startPort + i;
setupTool.addInstanceToCluster(clusterName, participantNamePrefix + "_" + port);
}
for (int i = 0; i < resourceNb; i++) {
String resourceName = resourceNamePrefix + i;
setupTool.addResourceToCluster(clusterName, resourceName, partitionNb, stateModelDef, mode.toString());
if (doRebalance) {
setupTool.rebalanceStorageCluster(clusterName, resourceName, replica);
}
}
zkClient.close();
}
Aggregations