use of org.apache.helix.manager.zk.ZNRecordSerializer in project helix by apache.
the class TestJacksonPayloadSerializer method testRawPayloadMissingIfUnspecified.
/**
* Test that the payload is not included whenever it is not null. This is mainly to maintain
* backward
* compatibility.
*/
@Test
public void testRawPayloadMissingIfUnspecified() {
final String RECORD_ID = "testRawPayloadMissingIfUnspecified";
ZNRecord znRecord = new ZNRecord(RECORD_ID);
ZNRecordSerializer znRecordSerializer = new ZNRecordSerializer();
byte[] serialized = znRecordSerializer.serialize(znRecord);
ZNRecordStreamingSerializer znRecordStreamingSerializer = new ZNRecordStreamingSerializer();
byte[] streamingSerialized = znRecordStreamingSerializer.serialize(znRecord);
ObjectMapper mapper = new ObjectMapper();
try {
JsonNode jsonNode = mapper.readTree(new String(serialized));
Assert.assertFalse(jsonNode.has("rawPayload"));
JsonNode streamingJsonNode = mapper.readTree(new String(streamingSerialized));
Assert.assertFalse(streamingJsonNode.has("rawPayload"));
} catch (JsonProcessingException e) {
Assert.fail();
} catch (IOException e) {
Assert.fail();
}
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project helix by apache.
the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordSerializer.
@Test
public void testZNRecordSizeLimitUseZNRecordSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
ZNRecordSerializer serializer = new ZNRecordSerializer();
ZkClient zkClient = new ZkClient(ZK_ADDR);
zkClient.setZkSerializer(serializer);
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
zkClient.createPersistent(path1, true);
zkClient.writeData(path1, smallRecord);
ZNRecord record = zkClient.readData(path1);
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
zkClient.createPersistent(path2, true);
try {
zkClient.writeData(path2, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
record = zkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = zkClient.readData(path1);
try {
zkClient.writeData(path1, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
ZNRecord recordNew = zkClient.readData(path1);
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
Assert.assertNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
arr = serializer.serialize(record);
arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project helix by apache.
the class MockSpectatorProcess method main.
public static void main(String[] args) throws Exception {
setup();
zkServer.getZkClient().setZkSerializer(new ZNRecordSerializer());
ZNRecord record = zkServer.getZkClient().readData(PropertyPathBuilder.idealState(clusterName, "TestDB"));
String externalViewPath = PropertyPathBuilder.externalView(clusterName, "TestDB");
MockSpectatorProcess process = new MockSpectatorProcess();
process.start();
// try to route, there is no master or slave available
process.routeRequest("TestDB", "TestDB_1");
// update the externalview on zookeeper
zkServer.getZkClient().createPersistent(externalViewPath, record);
// sleep for sometime so that the ZK Callback is received.
Thread.sleep(1000);
process.routeRequest("TestDB", "TestDB_1");
System.exit(1);
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project helix by apache.
the class ClusterSetup method getConfig.
/**
* get configs
* @param type config-scope-type, e.g. CLUSTER, RESOURCE, etc.
* @param scopeArgsCsv csv-formatted scope-args, e.g myCluster,testDB
* @param keysCsv csv-formatted keys. e.g. k1,k2
* @return json-formated key-value pairs, e.g. {k1=v1,k2=v2}
*/
public String getConfig(ConfigScopeProperty type, String scopeArgsCsv, String keysCsv) {
// ConfigScope scope = new ConfigScopeBuilder().build(scopesStr);
String[] scopeArgs = scopeArgsCsv.split("[\\s,]");
HelixConfigScope scope = new HelixConfigScopeBuilder(type, scopeArgs).build();
String[] keys = keysCsv.split("[\\s,]");
// parse keys
// String[] keys = keysStr.split("[\\s,]");
// Set<String> keysSet = new HashSet<String>(Arrays.asList(keys));
Map<String, String> keyValueMap = _admin.getConfig(scope, Arrays.asList(keys));
ZNRecord record = new ZNRecord(type.toString());
// record.setMapField(scopesStr, propertiesMap);
record.getSimpleFields().putAll(keyValueMap);
ZNRecordSerializer serializer = new ZNRecordSerializer();
return new String(serializer.serialize(record));
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project helix by apache.
the class IntegrationTestUtil method processCommandLineArgs.
static void processCommandLineArgs(String[] cliArgs) {
CommandLineParser cliParser = new GnuParser();
Options cliOptions = constructCommandLineOptions();
CommandLine cmd = null;
try {
cmd = cliParser.parse(cliOptions, cliArgs);
} catch (ParseException pe) {
System.err.println("failed to parse command-line args: " + Arrays.asList(cliArgs) + ", exception: " + pe.toString());
printUsage(cliOptions);
System.exit(1);
}
String zkServer = cmd.getOptionValue(zkSvr);
ZkClient zkclient = new ZkClient(zkServer, ZkClient.DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer());
IntegrationTestUtil util = new IntegrationTestUtil(zkclient);
if (cmd != null) {
if (cmd.hasOption(verifyExternalView)) {
String[] args = cmd.getOptionValues(verifyExternalView);
util.verifyExternalView(args);
} else if (cmd.hasOption(verifyLiveNodes)) {
String[] args = cmd.getOptionValues(verifyLiveNodes);
util.verifyLiveNodes(args);
} else if (cmd.hasOption(readZNode)) {
String path = cmd.getOptionValue(readZNode);
util.readZNode(path);
} else if (cmd.hasOption(readLeader)) {
String clusterName = cmd.getOptionValue(readLeader);
PropertyKey.Builder keyBuilder = new PropertyKey.Builder(clusterName);
util.readZNode(keyBuilder.controllerLeader().getPath());
} else {
printUsage(cliOptions);
}
}
}
Aggregations