Search in sources :

Example 66 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class ClusterChangeHandlerTest method instanceConfigInvalidInfoEntryTest.

/**
 * Test that {@link DynamicClusterChangeHandler} is able to handle invalid info entry in the InstanceConfig at runtime
 * or during initialization.
 */
@Test
public void instanceConfigInvalidInfoEntryTest() {
    Properties properties = new Properties();
    properties.putAll(props);
    properties.setProperty("clustermap.cluster.change.handler.type", "DynamicClusterChangeHandler");
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(properties));
    HelixClusterManager.HelixClusterManagerCallback mockManagerCallback = Mockito.mock(HelixClusterManager.HelixClusterManagerCallback.class);
    HelixClusterManager.ClusterChangeHandlerCallback mockHandlerCallback = Mockito.mock(HelixClusterManager.ClusterChangeHandlerCallback.class);
    doAnswer(returnsFirstArg()).when(mockHandlerCallback).addPartitionIfAbsent(any(), anyLong());
    Counter initFailureCount = new Counter();
    DynamicClusterChangeHandler dynamicChangeHandler = new DynamicClusterChangeHandler(clusterMapConfig, localDc, selfInstanceName, Collections.emptyMap(), mockManagerCallback, mockHandlerCallback, new HelixClusterManagerMetrics(new MetricRegistry(), mockManagerCallback), e -> initFailureCount.inc(), new AtomicLong());
    // create an InstanceConfig with invalid entry that mocks error info added by Helix controller
    PartitionId selectedPartition = testPartitionLayout.getPartitionLayout().getPartitions(null).get(0);
    Replica testReplica = (Replica) selectedPartition.getReplicaIds().get(0);
    DataNode testNode = (DataNode) testReplica.getDataNodeId();
    InstanceConfig instanceConfig = new InstanceConfig(getInstanceName(testNode.getHostname(), testNode.getPort()));
    instanceConfig.setHostName(testNode.getHostname());
    instanceConfig.setPort(Integer.toString(testNode.getPort()));
    instanceConfig.getRecord().setSimpleField(ClusterMapUtils.DATACENTER_STR, testNode.getDatacenterName());
    instanceConfig.getRecord().setSimpleField(ClusterMapUtils.RACKID_STR, testNode.getRackId());
    instanceConfig.getRecord().setSimpleField(ClusterMapUtils.SCHEMA_VERSION_STR, Integer.toString(ClusterMapUtils.CURRENT_SCHEMA_VERSION));
    instanceConfig.getRecord().setListField(ClusterMapUtils.SEALED_STR, Collections.emptyList());
    instanceConfig.getRecord().setListField(ClusterMapUtils.STOPPED_REPLICAS_STR, Collections.emptyList());
    Map<String, Map<String, String>> diskInfos = new HashMap<>();
    assertNotNull("testReplica should not be null", testReplica);
    Map<String, String> diskInfo = new HashMap<>();
    diskInfo.put(ClusterMapUtils.DISK_CAPACITY_STR, Long.toString(testReplica.getDiskId().getRawCapacityInBytes()));
    diskInfo.put(ClusterMapUtils.DISK_STATE, ClusterMapUtils.AVAILABLE_STR);
    String replicasStrBuilder = testReplica.getPartition().getId() + ClusterMapUtils.REPLICAS_STR_SEPARATOR + testReplica.getCapacityInBytes() + ClusterMapUtils.REPLICAS_STR_SEPARATOR + testReplica.getPartition().getPartitionClass() + ClusterMapUtils.REPLICAS_DELIM_STR;
    diskInfo.put(ClusterMapUtils.REPLICAS_STR, replicasStrBuilder);
    diskInfos.put(testReplica.getDiskId().getMountPath(), diskInfo);
    // add an invalid entry at the end of diskInfos
    Map<String, String> invalidEntry = new HashMap<>();
    invalidEntry.put("INVALID_KEY", "INVALID_VALUE");
    diskInfos.put("INVALID_MOUNT_PATH", invalidEntry);
    instanceConfig.getRecord().setMapFields(diskInfos);
    // we call onInstanceConfigChange() twice
    InstanceConfigToDataNodeConfigAdapter.Converter converter = new InstanceConfigToDataNodeConfigAdapter.Converter(clusterMapConfig);
    // 1st call, to verify initialization code path
    dynamicChangeHandler.onDataNodeConfigChange(Collections.singleton(converter.convert(instanceConfig)));
    // 2nd call, to verify dynamic update code path
    dynamicChangeHandler.onDataNodeConfigChange(Collections.singletonList(converter.convert(instanceConfig)));
    assertEquals("There shouldn't be initialization errors", 0, initFailureCount.getCount());
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) HashMap(java.util.HashMap) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) AtomicLong(java.util.concurrent.atomic.AtomicLong) Counter(com.codahale.metrics.Counter) InstanceConfig(org.apache.helix.model.InstanceConfig) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test) HelixClusterManagerTest(com.github.ambry.clustermap.HelixClusterManagerTest)

Example 67 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class StaticClusterManagerTest method getPartitionsTest.

/**
 * Tests for {@link PartitionLayout#getPartitions(String)} and {@link PartitionLayout#getWritablePartitions(String)}.
 * @throws IOException
 * @throws JSONException
 */
@Test
public void getPartitionsTest() throws IOException, JSONException {
    String specialPartitionClass = "specialPartitionClass";
    TestHardwareLayout hardwareLayout = new TestHardwareLayout("Alpha");
    String dc = hardwareLayout.getRandomDatacenter().getName();
    TestPartitionLayout testPartitionLayout = new TestPartitionLayout(hardwareLayout, dc);
    assertTrue("There should be more than 1 replica per partition in each DC for this test to work", testPartitionLayout.replicaCountPerDc > 1);
    PartitionRangeCheckParams defaultRw = new PartitionRangeCheckParams(0, testPartitionLayout.partitionCount, DEFAULT_PARTITION_CLASS, PartitionState.READ_WRITE);
    // add 15 RW partitions for the special class
    PartitionRangeCheckParams specialRw = new PartitionRangeCheckParams(defaultRw.rangeEnd + 1, 15, specialPartitionClass, PartitionState.READ_WRITE);
    testPartitionLayout.addNewPartitions(specialRw.count, specialPartitionClass, PartitionState.READ_WRITE, dc);
    // add 10 RO partitions for the default class
    PartitionRangeCheckParams defaultRo = new PartitionRangeCheckParams(specialRw.rangeEnd + 1, 10, DEFAULT_PARTITION_CLASS, PartitionState.READ_ONLY);
    testPartitionLayout.addNewPartitions(defaultRo.count, DEFAULT_PARTITION_CLASS, PartitionState.READ_ONLY, dc);
    // add 5 RO partitions for the special class
    PartitionRangeCheckParams specialRo = new PartitionRangeCheckParams(defaultRo.rangeEnd + 1, 5, specialPartitionClass, PartitionState.READ_ONLY);
    testPartitionLayout.addNewPartitions(specialRo.count, specialPartitionClass, PartitionState.READ_ONLY, dc);
    PartitionLayout partitionLayout = testPartitionLayout.getPartitionLayout();
    Properties props = new Properties();
    props.setProperty("clustermap.host.name", "localhost");
    props.setProperty("clustermap.cluster.name", "cluster");
    props.setProperty("clustermap.datacenter.name", dc);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
    File tempDir = Files.createTempDirectory("helixClusterManager-" + new Random().nextInt(1000)).toFile();
    String tempDirPath = tempDir.getAbsolutePath();
    String hardwareLayoutPath = tempDirPath + File.separator + "hardwareLayoutTest.json";
    String partitionLayoutPath = tempDirPath + File.separator + "partitionLayoutTest.json";
    Utils.writeJsonObjectToFile(hardwareLayout.getHardwareLayout().toJSONObject(), hardwareLayoutPath);
    Utils.writeJsonObjectToFile(partitionLayout.toJSONObject(), partitionLayoutPath);
    ClusterMap clusterMapManager = (new StaticClusterAgentsFactory(clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
    // "good" cases for getPartitions() and getWritablePartitions() only
    // getPartitions(), class null
    List<? extends PartitionId> returnedPartitions = clusterMapManager.getAllPartitionIds(null);
    checkReturnedPartitions(returnedPartitions, Arrays.asList(defaultRw, defaultRo, specialRw, specialRo));
    // getWritablePartitions(), class null
    returnedPartitions = clusterMapManager.getWritablePartitionIds(null);
    checkReturnedPartitions(returnedPartitions, Arrays.asList(defaultRw, specialRw));
    // getPartitions(), class default
    returnedPartitions = clusterMapManager.getAllPartitionIds(DEFAULT_PARTITION_CLASS);
    checkReturnedPartitions(returnedPartitions, Arrays.asList(defaultRw, defaultRo));
    // getWritablePartitions(), class default
    returnedPartitions = clusterMapManager.getWritablePartitionIds(DEFAULT_PARTITION_CLASS);
    checkReturnedPartitions(returnedPartitions, Collections.singletonList(defaultRw));
    // getPartitions(), class special
    returnedPartitions = clusterMapManager.getAllPartitionIds(specialPartitionClass);
    checkReturnedPartitions(returnedPartitions, Arrays.asList(specialRw, specialRo));
    // getWritablePartitions(), class special
    returnedPartitions = clusterMapManager.getWritablePartitionIds(specialPartitionClass);
    checkReturnedPartitions(returnedPartitions, Collections.singletonList(specialRw));
    // to test the dc affinity, we pick one datanode from "dc" and insert 1 replica for part1 (special class) in "dc"
    // and make sure that it is returned in getPartitions() but not in getWritablePartitions() (because all the other
    // partitions have more than 1 replica in "dc").
    DataNode dataNode = hardwareLayout.getRandomDataNodeFromDc(dc);
    Partition partition = partitionLayout.addNewPartition(dataNode.getDisks().subList(0, 1), testPartitionLayout.replicaCapacityInBytes, specialPartitionClass);
    Utils.writeJsonObjectToFile(partitionLayout.toJSONObject(), partitionLayoutPath);
    clusterMapManager = (new StaticClusterAgentsFactory(clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
    PartitionRangeCheckParams extraPartCheckParams = new PartitionRangeCheckParams(specialRo.rangeEnd + 1, 1, specialPartitionClass, PartitionState.READ_WRITE);
    // getPartitions(), class special
    returnedPartitions = clusterMapManager.getAllPartitionIds(specialPartitionClass);
    assertTrue("Added partition should exist in returned partitions", returnedPartitions.contains(partition));
    checkReturnedPartitions(returnedPartitions, Arrays.asList(specialRw, specialRo, extraPartCheckParams));
    // getWritablePartitions(), class special
    returnedPartitions = clusterMapManager.getWritablePartitionIds(specialPartitionClass);
    assertFalse("Added partition should not exist in returned partitions", returnedPartitions.contains(partition));
    checkReturnedPartitions(returnedPartitions, Collections.singletonList(specialRw));
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Random(java.util.Random) File(java.io.File) Test(org.junit.Test)

Example 68 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class StaticClusterManagerTest method validateSimpleConfig.

@Test
public void validateSimpleConfig() throws Exception {
    Properties props = new Properties();
    props.setProperty("clustermap.cluster.name", "OneDiskOneReplica");
    props.setProperty("clustermap.datacenter.name", "Datacenter");
    props.setProperty("clustermap.host.name", "localhost");
    String configDir = System.getProperty("user.dir");
    // this, we check the string suffix for the sub-project directory and append ".." to correctly set configDir.
    if (configDir.endsWith("ambry-clustermap")) {
        configDir += "/..";
    }
    configDir += "/config";
    String hardwareLayoutSer = configDir + "/HardwareLayout.json";
    String partitionLayoutSer = configDir + "/PartitionLayout.json";
    StaticClusterManager clusterMapManager = (new StaticClusterAgentsFactory(new ClusterMapConfig(new VerifiableProperties(props)), hardwareLayoutSer, partitionLayoutSer)).getClusterMap();
    assertEquals(clusterMapManager.getWritablePartitionIds(null).size(), 1);
    assertEquals(clusterMapManager.getUnallocatedRawCapacityInBytes(), 10737418240L);
    assertNotNull(clusterMapManager.getDataNodeId("localhost", 6661));
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Test(org.junit.Test)

Example 69 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class StaticClusterManagerTest method persistAndReadBack.

@Test
public void persistAndReadBack() throws Exception {
    String tmpDir = folder.getRoot().getPath();
    Properties props = new Properties();
    props.setProperty("clustermap.cluster.name", "test");
    props.setProperty("clustermap.datacenter.name", "dc1");
    props.setProperty("clustermap.host.name", "localhost");
    String hardwareLayoutSer = tmpDir + "/hardwareLayoutSer.json";
    String partitionLayoutSer = tmpDir + "/partitionLayoutSer.json";
    String hardwareLayoutDe = tmpDir + "/hardwareLayoutDe.json";
    String partitionLayoutDe = tmpDir + "/partitionLayoutDe.json";
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
    StaticClusterManager clusterMapManagerSer = getTestClusterMap(clusterMapConfig);
    clusterMapManagerSer.persist(hardwareLayoutSer, partitionLayoutSer);
    StaticClusterManager clusterMapManagerDe = (new StaticClusterAgentsFactory(clusterMapConfig, hardwareLayoutSer, partitionLayoutSer)).getClusterMap();
    assertEquals(clusterMapManagerSer, clusterMapManagerDe);
    clusterMapManagerDe.persist(hardwareLayoutDe, partitionLayoutDe);
    StaticClusterManager clusterMapManagerDeDe = (new StaticClusterAgentsFactory(clusterMapConfig, hardwareLayoutDe, partitionLayoutDe)).getClusterMap();
    assertEquals(clusterMapManagerDe, clusterMapManagerDeDe);
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Test(org.junit.Test)

Example 70 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class HardwareLayoutTest method failValidation.

public void failValidation(JSONObject jsonObject) throws JSONException {
    try {
        new HardwareLayout(jsonObject, new ClusterMapConfig(new VerifiableProperties(props)));
        fail("Should have failed validation: " + jsonObject.toString(2));
    } catch (IllegalStateException e) {
    // Expected.
    }
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig)

Aggregations

ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)100 VerifiableProperties (com.github.ambry.config.VerifiableProperties)81 Test (org.junit.Test)56 Properties (java.util.Properties)52 MetricRegistry (com.codahale.metrics.MetricRegistry)47 ArrayList (java.util.ArrayList)31 IOException (java.io.IOException)26 HashSet (java.util.HashSet)25 JSONObject (org.json.JSONObject)25 File (java.io.File)24 ClusterMap (com.github.ambry.clustermap.ClusterMap)23 HashMap (java.util.HashMap)21 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)19 ClusterAgentsFactory (com.github.ambry.clustermap.ClusterAgentsFactory)18 DataNodeId (com.github.ambry.clustermap.DataNodeId)18 StoreConfig (com.github.ambry.config.StoreConfig)18 ReplicaId (com.github.ambry.clustermap.ReplicaId)16 List (java.util.List)16 Map (java.util.Map)16 CountDownLatch (java.util.concurrent.CountDownLatch)16