use of org.apache.helix.model.InstanceConfig in project helix by apache.
the class RoutingTableProvider method refresh.
public void refresh(List<ExternalView> externalViewList, NotificationContext changeContext) {
HelixDataAccessor accessor = changeContext.getManager().getHelixDataAccessor();
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
List<InstanceConfig> configList = accessor.getChildValues(keyBuilder.instanceConfigs());
List<LiveInstance> liveInstances = accessor.getChildValues(keyBuilder.liveInstances());
refresh(externalViewList, configList, liveInstances);
}
use of org.apache.helix.model.InstanceConfig in project helix by apache.
the class TestZkHelixAdmin method testGetResourcesWithTag.
@Test
public void testGetResourcesWithTag() {
String TEST_TAG = "TestTAG";
final String clusterName = getShortClassName();
String rootPath = "/" + clusterName;
if (_gZkClient.exists(rootPath)) {
_gZkClient.deleteRecursively(rootPath);
}
HelixAdmin tool = new ZKHelixAdmin(_gZkClient);
tool.addCluster(clusterName, true);
Assert.assertTrue(ZKUtil.isClusterSetup(clusterName, _gZkClient));
tool.addStateModelDef(clusterName, "OnlineOffline", new StateModelDefinition(StateModelConfigGenerator.generateConfigForOnlineOffline()));
for (int i = 0; i < 4; i++) {
String instanceName = "host" + i + "_9999";
InstanceConfig config = new InstanceConfig(instanceName);
config.setHostName("host" + i);
config.setPort("9999");
// set tag to two instances
if (i < 2) {
config.addTag(TEST_TAG);
}
tool.addInstance(clusterName, config);
tool.enableInstance(clusterName, instanceName, true);
String path = PropertyPathBuilder.instance(clusterName, instanceName);
AssertJUnit.assertTrue(_gZkClient.exists(path));
}
for (int i = 0; i < 4; i++) {
String resourceName = "database_" + i;
IdealState is = new IdealState(resourceName);
is.setStateModelDefRef("OnlineOffline");
is.setNumPartitions(2);
is.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO);
is.setReplicas("1");
is.enable(true);
if (i < 2) {
is.setInstanceGroupTag(TEST_TAG);
}
tool.addResource(clusterName, resourceName, is);
}
List<String> allResources = tool.getResourcesInCluster(clusterName);
List<String> resourcesWithTag = tool.getResourcesInClusterWithTag(clusterName, TEST_TAG);
AssertJUnit.assertEquals(allResources.size(), 4);
AssertJUnit.assertEquals(resourcesWithTag.size(), 2);
}
use of org.apache.helix.model.InstanceConfig in project helix by apache.
the class TestZkHelixAdmin method testEnableDisablePartitions.
@Test
public void testEnableDisablePartitions() {
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
String instanceName = "TestInstance";
String testResourcePrefix = "TestResource";
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
HelixAdmin admin = new ZKHelixAdmin(_gZkClient);
admin.addCluster(clusterName, true);
admin.addInstance(clusterName, new InstanceConfig(instanceName));
// Test disable instances with resources
admin.enablePartition(false, clusterName, instanceName, testResourcePrefix + "0", Arrays.asList(new String[] { "1", "2" }));
admin.enablePartition(false, clusterName, instanceName, testResourcePrefix + "1", Arrays.asList(new String[] { "2", "3", "4" }));
InstanceConfig instanceConfig = admin.getInstanceConfig(clusterName, instanceName);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "0").size(), 2);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "1").size(), 3);
// Test disable partition across resources
// TODO : Remove this part once setInstanceEnabledForPartition(partition, enabled) is removed
instanceConfig.setInstanceEnabledForPartition("10", false);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "0").size(), 3);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix + "1").size(), 4);
}
use of org.apache.helix.model.InstanceConfig in project helix by apache.
the class TestZkHelixAdmin method testLegacyEnableDisablePartition.
@Test
public void testLegacyEnableDisablePartition() {
String instanceName = "TestInstanceLegacy";
String testResourcePrefix = "TestResourceLegacy";
ZNRecord record = new ZNRecord(instanceName);
List<String> disabledPartitions = new ArrayList<String>(Arrays.asList(new String[] { "1", "2", "3" }));
record.setListField(InstanceConfig.InstanceConfigProperty.HELIX_DISABLED_PARTITION.name(), disabledPartitions);
InstanceConfig instanceConfig = new InstanceConfig(record);
instanceConfig.setInstanceEnabledForPartition(testResourcePrefix, "2", false);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix).size(), 3);
Assert.assertEquals(instanceConfig.getRecord().getListField(InstanceConfig.InstanceConfigProperty.HELIX_DISABLED_PARTITION.name()).size(), 3);
instanceConfig.setInstanceEnabledForPartition(testResourcePrefix, "2", true);
Assert.assertEquals(instanceConfig.getDisabledPartitions(testResourcePrefix).size(), 2);
Assert.assertEquals(instanceConfig.getRecord().getListField(InstanceConfig.InstanceConfigProperty.HELIX_DISABLED_PARTITION.name()).size(), 2);
}
use of org.apache.helix.model.InstanceConfig in project helix by apache.
the class TestZNRecordSizeLimit method testZNRecordSizeLimitUseZNRecordStreamingSerializer.
@Test
public void testZNRecordSizeLimitUseZNRecordStreamingSerializer() {
String className = getShortClassName();
System.out.println("START testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
ZNRecordStreamingSerializer serializer = new ZNRecordStreamingSerializer();
ZkClient zkClient = new ZkClient(ZK_ADDR);
zkClient.setZkSerializer(serializer);
String root = className;
byte[] buf = new byte[1024];
for (int i = 0; i < 1024; i++) {
buf[i] = 'a';
}
String bufStr = new String(buf);
// test zkClient
// legal-sized data gets written to zk
// write a znode of size less than 1m
final ZNRecord smallRecord = new ZNRecord("normalsize");
smallRecord.getSimpleFields().clear();
for (int i = 0; i < 900; i++) {
smallRecord.setSimpleField(i + "", bufStr);
}
String path1 = "/" + root + "/test1";
zkClient.createPersistent(path1, true);
zkClient.writeData(path1, smallRecord);
ZNRecord record = zkClient.readData(path1);
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data doesn't create any data on zk
// prepare a znode of size larger than 1m
final ZNRecord largeRecord = new ZNRecord("oversize");
largeRecord.getSimpleFields().clear();
for (int i = 0; i < 1024; i++) {
largeRecord.setSimpleField(i + "", bufStr);
}
String path2 = "/" + root + "/test2";
zkClient.createPersistent(path2, true);
try {
zkClient.writeData(path2, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
record = zkClient.readData(path2);
Assert.assertNotNull(record);
// oversized write doesn't overwrite existing data on zk
record = zkClient.readData(path1);
try {
zkClient.writeData(path1, largeRecord);
} catch (HelixException e) {
Assert.fail("Should not fail because data size is larger than 1M since compression applied");
}
ZNRecord recordNew = zkClient.readData(path1);
byte[] arr = serializer.serialize(record);
byte[] arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
// test ZkDataAccessor
ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
admin.addCluster(className, true);
InstanceConfig instanceConfig = new InstanceConfig("localhost_12918");
admin.addInstance(className, instanceConfig);
// oversized data should not create any new data on zk
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(className, new ZkBaseDataAccessor(zkClient));
Builder keyBuilder = accessor.keyBuilder();
// ZNRecord statusUpdates = new ZNRecord("statusUpdates");
IdealState idealState = new IdealState("currentState");
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
boolean succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_1"), idealState);
Assert.assertTrue(succeed);
HelixProperty property = accessor.getProperty(keyBuilder.idealStates("TestDB_1"));
Assert.assertNotNull(property);
// legal sized data gets written to zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 0; i < 900; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
succeed = accessor.setProperty(keyBuilder.idealStates("TestDB_2"), idealState);
Assert.assertTrue(succeed);
record = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
Assert.assertTrue(serializer.serialize(record).length > 900 * 1024);
// oversized data should not update existing data on zk
idealState.getRecord().getSimpleFields().clear();
idealState.setStateModelDefRef("MasterSlave");
idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
idealState.setNumPartitions(10);
for (int i = 900; i < 1024; i++) {
idealState.getRecord().setSimpleField(i + "", bufStr);
}
// System.out.println("record: " + idealState.getRecord());
succeed = accessor.updateProperty(keyBuilder.idealStates("TestDB_2"), idealState);
Assert.assertTrue(succeed);
recordNew = accessor.getProperty(keyBuilder.idealStates("TestDB_2")).getRecord();
arr = serializer.serialize(record);
arrNew = serializer.serialize(recordNew);
Assert.assertFalse(Arrays.equals(arr, arrNew));
System.out.println("END testZNRecordSizeLimitUseZNRecordStreamingSerializer at " + new Date(System.currentTimeMillis()));
}
Aggregations