use of org.apache.helix.model.InstanceConfig in project helix by apache.
the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordSerializer.
@Test
public void testZNRecordSizeLimitUseZNRecordSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
ZNRecordSerializer serializer = new ZNRecordSerializer();
ZkClient zkClient = new ZkClient(ZK_ADDR);
zkClient.setZkSerializer(serializer);
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
zkClient.createPersistent(path1, true);
zkClient.writeData(path1, smallRecord);
ZNRecord record = zkClient.readData(path1);
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
zkClient.createPersistent(path2, true);
try {
zkClient.writeData(path2, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
record = zkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = zkClient.readData(path1);
try {
zkClient.writeData(path1, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
ZNRecord recordNew = zkClient.readData(path1);
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
Assert.assertNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
arr = serializer.serialize(record);
arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.model.InstanceConfig in project helix by apache.
the class MockHelixAdmin method enableInstance.
@Override
public void enableInstance(String clusterName, String instanceName, boolean enabled) {
String instanceConfigsPath = PropertyPathBuilder.instanceConfig(clusterName);
if (!_baseDataAccessor.exists(instanceConfigsPath, 0)) {
_baseDataAccessor.create(instanceConfigsPath, new ZNRecord(instanceName), 0);
}
String instanceConfigPath = instanceConfigsPath + "/" + instanceName;
ZNRecord record = (ZNRecord) _baseDataAccessor.get(instanceConfigPath, null, 0);
InstanceConfig instanceConfig = new InstanceConfig(record);
instanceConfig.setInstanceEnabled(enabled);
_baseDataAccessor.set(instanceConfigPath, instanceConfig.getRecord(), 0);
}
use of org.apache.helix.model.InstanceConfig in project helix by apache.
the class TestTaskThrottling method setParticipantsCapacity.
private void setParticipantsCapacity(int perNodeTaskLimitation) {
for (int i = 0; i < _numNodes; i++) {
InstanceConfig instanceConfig = _setupTool.getClusterManagementTool().getInstanceConfig(CLUSTER_NAME, PARTICIPANT_PREFIX + "_" + (_startPort + i));
instanceConfig.setMaxConcurrentTask(perNodeTaskLimitation);
_setupTool.getClusterManagementTool().setInstanceConfig(CLUSTER_NAME, PARTICIPANT_PREFIX + "_" + (_startPort + i), instanceConfig);
}
}
use of org.apache.helix.model.InstanceConfig in project helix by apache.
the class ClusterSetup method addInstanceToCluster.
public void addInstanceToCluster(String clusterName, String instanceId) {
InstanceConfig config = InstanceConfig.toInstanceConfig(instanceId);
_admin.addInstance(clusterName, config);
}
use of org.apache.helix.model.InstanceConfig in project helix by apache.
the class YAMLClusterSetup method getInstanceCfg.
private static InstanceConfig getInstanceCfg(ParticipantConfig participant) {
if (participant == null || participant.name == null || participant.host == null || participant.port == null) {
throw new HelixException("Participant must have a specified name, host, and port!");
}
InstanceConfig instanceCfg = new InstanceConfig(participant.name);
instanceCfg.setHostName(participant.host);
instanceCfg.setPort(participant.port.toString());
return instanceCfg;
}
Aggregations