use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class ServerContext method getDataAccssor.
public HelixDataAccessor getDataAccssor(String clusterName) {
synchronized (_helixDataAccessorPool) {
if (!_helixDataAccessorPool.containsKey(clusterName)) {
ZkBaseDataAccessor<ZNRecord> baseDataAccessor = new ZkBaseDataAccessor<>(getZkClient());
_helixDataAccessorPool.put(clusterName, new ZKHelixDataAccessor(clusterName, InstanceType.ADMINISTRATOR, baseDataAccessor));
}
return _helixDataAccessorPool.get(clusterName);
}
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class HelixCustomCodeRunner method start.
/**
* This method will be invoked when there is a change in any subscribed
* notificationTypes
* @throws Exception
*/
public void start() throws Exception {
if (_callback == null || _notificationTypes == null || _notificationTypes.size() == 0 || _resourceName == null) {
throw new IllegalArgumentException("Require callback | notificationTypes | resourceName");
}
LOG.info("Register participantLeader on " + _notificationTypes + " using " + _resourceName);
_stateModelFty = new GenericLeaderStandbyStateModelFactory(_callback, _notificationTypes);
StateMachineEngine stateMach = _manager.getStateMachineEngine();
stateMach.registerStateModelFactory(LEADER_STANDBY, _stateModelFty, _resourceName);
ZkClient zkClient = null;
try {
// manually add ideal state for participant leader using LeaderStandby
// model
zkClient = new ZkClient(_zkAddr, ZkClient.DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer());
HelixDataAccessor accessor = new ZKHelixDataAccessor(_manager.getClusterName(), new ZkBaseDataAccessor<ZNRecord>(zkClient));
Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = new IdealState(_resourceName);
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setReplicas(IdealState.IdealStateConstants.ANY_LIVEINSTANCE.toString());
idealState.setNumPartitions(1);
idealState.setStateModelDefRef(LEADER_STANDBY);
idealState.setStateModelFactoryName(_resourceName);
List<String> prefList = new ArrayList<String>(Arrays.asList(IdealState.IdealStateConstants.ANY_LIVEINSTANCE.toString()));
idealState.getRecord().setListField(_resourceName + "_0", prefList);
List<String> idealStates = accessor.getChildNames(keyBuilder.idealStates());
while (idealStates == null || !idealStates.contains(_resourceName)) {
accessor.setProperty(keyBuilder.idealStates(_resourceName), idealState);
idealStates = accessor.getChildNames(keyBuilder.idealStates());
}
LOG.info("Set idealState for participantLeader:" + _resourceName + ", idealState:" + idealState);
} finally {
if (zkClient != null && zkClient.getConnection() != null) {
zkClient.close();
}
}
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordStreamingSerializer.
@Test
public void testZNRecordSizeLimitUseZNRecordStreamingSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
ZkClient zkClient = new ZkClient(ZK_ADDR);
zkClient.setZkSerializer(serializer);
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
zkClient.createPersistent(path1, true);
zkClient.writeData(path1, smallRecord);
ZNRecord record = zkClient.readData(path1);
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
zkClient.createPersistent(path2, true);
try {
zkClient.writeData(path2, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
record = zkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = zkClient.readData(path1);
try {
zkClient.writeData(path1, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
ZNRecord recordNew = zkClient.readData(path1);
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
Builder keyBuilder = accessor.keyBuilder();
// ZNRecord statusUpdates = new ZNRecord("statusUpdates");
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_1"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(keyBuilder.idealStates("TestDB_1"));
Assert.assertNotNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_2"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
arr = serializer.serialize(record);
arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
System.out.println("END testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordSerializer.
@Test
public void testZNRecordSizeLimitUseZNRecordSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
ZNRecordSerializer serializer = new ZNRecordSerializer();
ZkClient zkClient = new ZkClient(ZK_ADDR);
zkClient.setZkSerializer(serializer);
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
zkClient.createPersistent(path1, true);
zkClient.writeData(path1, smallRecord);
ZNRecord record = zkClient.readData(path1);
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
zkClient.createPersistent(path2, true);
try {
zkClient.writeData(path2, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
record = zkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = zkClient.readData(path1);
try {
zkClient.writeData(path1, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
ZNRecord recordNew = zkClient.readData(path1);
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
Builder keyBuilder = accessor.keyBuilder();
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB0"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(keyBuilder.stateTransitionStatus("localhost_12918", "session_1", "partition_1"));
Assert.assertNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB1"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB1")).getRecord();
arr = serializer.serialize(record);
arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
System.out.println("END testZNRecordSizeLimitUseZNRecordSerializer at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class TestZKPathDataDumpTask method testCapacityReached.
@Test
public void testCapacityReached() throws Exception {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
int n = 1;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// participant port
TestHelper.setupCluster(// participant port
clusterName, // participant port
ZK_ADDR, // participant port
12918, // participant name prefix
"localhost", // resource name prefix
"TestDB", // resources
1, // partitions per resource
2, // number of nodes
n, // replicas
1, "MasterSlave", // do rebalance
true);
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
BaseDataAccessor<ZNRecord> baseAccessor = accessor.getBaseDataAccessor();
HelixManager manager = mock(HelixManager.class);
when(manager.getHelixDataAccessor()).thenReturn(accessor);
when(manager.getClusterName()).thenReturn(clusterName);
// run dump task without statusUpdates and errors, should not remove any existing
// statusUpdate/error paths
ZKPathDataDumpTask task = new ZKPathDataDumpTask(manager, Long.MAX_VALUE, Long.MAX_VALUE, 1);
task.run();
PropertyKey controllerStatusUpdateKey = keyBuilder.controllerTaskStatuses();
Assert.assertTrue(baseAccessor.exists(controllerStatusUpdateKey.getPath(), 0));
PropertyKey controllerErrorKey = keyBuilder.controllerTaskErrors();
Assert.assertTrue(baseAccessor.exists(controllerErrorKey.getPath(), 0));
PropertyKey statusUpdateKey = keyBuilder.stateTransitionStatus("localhost_12918");
Assert.assertTrue(baseAccessor.exists(statusUpdateKey.getPath(), 0));
PropertyKey errorKey = keyBuilder.stateTransitionErrors("localhost_12918");
Assert.assertTrue(baseAccessor.exists(errorKey.getPath(), 0));
// add participant status updates and errors
statusUpdateKey = keyBuilder.stateTransitionStatus("localhost_12918", "session_0", "TestDB0", "TestDB0_0");
accessor.setProperty(statusUpdateKey, new StatusUpdate(new ZNRecord("statusUpdate")));
errorKey = keyBuilder.stateTransitionError("localhost_12918", "session_0", "TestDB0", "TestDB0_0");
accessor.setProperty(errorKey, new Error(new ZNRecord("error")));
// add controller status updates and errors (one of each, should not trigger anything)
controllerStatusUpdateKey = keyBuilder.controllerTaskStatus("session_0", "TestDB");
accessor.setProperty(controllerStatusUpdateKey, new StatusUpdate(new ZNRecord("controllerStatusUpdate")));
controllerErrorKey = keyBuilder.controllerTaskError("TestDB_error");
accessor.setProperty(controllerErrorKey, new Error(new ZNRecord("controllerError")));
// run dump task, should not remove anything because the threshold is not exceeded
task.run();
Assert.assertTrue(baseAccessor.exists(controllerStatusUpdateKey.getPath(), 0));
Assert.assertTrue(baseAccessor.exists(controllerErrorKey.getPath(), 0));
Assert.assertTrue(baseAccessor.exists(statusUpdateKey.getPath(), 0));
Assert.assertTrue(baseAccessor.exists(errorKey.getPath(), 0));
// add a second set of all status updates and errors
statusUpdateKey = keyBuilder.stateTransitionStatus("localhost_12918", "session_0", "TestDB0", "TestDB0_1");
accessor.setProperty(statusUpdateKey, new StatusUpdate(new ZNRecord("statusUpdate")));
errorKey = keyBuilder.stateTransitionError("localhost_12918", "session_0", "TestDB0", "TestDB0_1");
accessor.setProperty(errorKey, new Error(new ZNRecord("error")));
controllerStatusUpdateKey = keyBuilder.controllerTaskStatus("session_0", "TestDB1");
accessor.setProperty(controllerStatusUpdateKey, new StatusUpdate(new ZNRecord("controllerStatusUpdate")));
controllerErrorKey = keyBuilder.controllerTaskError("TestDB1_error");
accessor.setProperty(controllerErrorKey, new Error(new ZNRecord("controllerError")));
// run dump task, should remove everything since capacities are exceeded
task.run();
Assert.assertFalse(baseAccessor.exists(controllerStatusUpdateKey.getPath(), 0));
Assert.assertFalse(baseAccessor.exists(controllerErrorKey.getPath(), 0));
Assert.assertFalse(baseAccessor.exists(statusUpdateKey.getPath(), 0));
Assert.assertFalse(baseAccessor.exists(errorKey.getPath(), 0));
}
Aggregations