use of org.apache.helix.HelixAdmin in project pinot by linkedin.
the class SegmentDeletionManagerTest method allPassed.
@Test
public void allPassed() throws Exception {
HelixAdmin helixAdmin = makeHelixAdmin();
ZkHelixPropertyStore<ZNRecord> propertyStore = makePropertyStore();
FakeDeletionManager deletionManager = new FakeDeletionManager(helixAdmin, propertyStore);
Set<String> segments = new HashSet<>();
segments.addAll(segmentsThatShouldBeDeleted());
deletionManager.deleteSegmenetsFromPropertyStoreAndLocal(tableName, segments);
Assert.assertEquals(deletionManager.segmentsToRetry.size(), 0);
Assert.assertEquals(deletionManager.segmentsRemovedFromStore.size(), segments.size());
Assert.assertTrue(deletionManager.segmentsRemovedFromStore.containsAll(segments));
}
use of org.apache.helix.HelixAdmin in project pinot by linkedin.
the class SegmentDeletionManagerTest method makeHelixAdmin.
HelixAdmin makeHelixAdmin() {
HelixAdmin admin = mock(HelixAdmin.class);
ExternalView ev = mock(ExternalView.class);
IdealState is = mock(IdealState.class);
when(admin.getResourceExternalView(clusterName, tableName)).thenReturn(ev);
when(admin.getResourceIdealState(clusterName, tableName)).thenReturn(is);
List<String> segmentsInIs = segmentsInIdealStateOrExtView();
Map<String, String> dummy = new HashMap<>(1);
dummy.put("someHost", "ONLINE");
for (String segment : segmentsInIs) {
when(is.getInstanceStateMap(segment)).thenReturn(dummy);
}
when(ev.getStateMap(anyString())).thenReturn(null);
return admin;
}
use of org.apache.helix.HelixAdmin in project pinot by linkedin.
the class SegmentDeletionManagerTest method testAllFailed.
private void testAllFailed(List<String> segments) throws Exception {
HelixAdmin helixAdmin = makeHelixAdmin();
ZkHelixPropertyStore<ZNRecord> propertyStore = makePropertyStore();
FakeDeletionManager deletionManager = new FakeDeletionManager(helixAdmin, propertyStore);
deletionManager.deleteSegmenetsFromPropertyStoreAndLocal(tableName, segments);
Assert.assertTrue(deletionManager.segmentsToRetry.containsAll(segments));
Assert.assertEquals(deletionManager.segmentsToRetry.size(), segments.size());
Assert.assertEquals(deletionManager.segmentsRemovedFromStore.size(), 0);
}
use of org.apache.helix.HelixAdmin in project pinot by linkedin.
the class ValidationManagerTest method testRebuildBrokerResourceWhenBrokerAdded.
@Test
public void testRebuildBrokerResourceWhenBrokerAdded() throws Exception {
// Check that the first table we added doesn't need to be rebuilt(case where ideal state brokers and brokers in broker resource are the same.
String partitionName = _offlineTableConfig.getTableName();
HelixAdmin helixAdmin = _helixManager.getClusterManagmentTool();
IdealState idealState = HelixHelper.getBrokerIdealStates(helixAdmin, HELIX_CLUSTER_NAME);
// Ensure that the broker resource is not rebuilt.
Assert.assertTrue(idealState.getInstanceSet(partitionName).equals(_pinotHelixResourceManager.getAllInstancesForBrokerTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME)));
_pinotHelixResourceManager.rebuildBrokerResourceFromHelixTags(partitionName);
// Add another table that needs to be rebuilt
String offlineTableTwoConfigJson = ControllerRequestBuilderUtil.buildCreateOfflineTableJSON(TEST_TABLE_TWO, null, null, 1).toString();
AbstractTableConfig offlineTableConfigTwo = AbstractTableConfig.init(offlineTableTwoConfigJson);
_pinotHelixResourceManager.addTable(offlineTableConfigTwo);
String partitionNameTwo = offlineTableConfigTwo.getTableName();
// Add a new broker manually such that the ideal state is not updated and ensure that rebuild broker resource is called
final String brokerId = "Broker_localhost_2";
InstanceConfig instanceConfig = new InstanceConfig(brokerId);
instanceConfig.setInstanceEnabled(true);
instanceConfig.setHostName("Broker_localhost");
instanceConfig.setPort("2");
helixAdmin.addInstance(HELIX_CLUSTER_NAME, instanceConfig);
helixAdmin.addInstanceTag(HELIX_CLUSTER_NAME, instanceConfig.getInstanceName(), ControllerTenantNameBuilder.getBrokerTenantNameForTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME));
idealState = HelixHelper.getBrokerIdealStates(helixAdmin, HELIX_CLUSTER_NAME);
// Assert that the two don't equal before the call to rebuild the broker resource.
Assert.assertTrue(!idealState.getInstanceSet(partitionNameTwo).equals(_pinotHelixResourceManager.getAllInstancesForBrokerTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME)));
_pinotHelixResourceManager.rebuildBrokerResourceFromHelixTags(partitionNameTwo);
idealState = HelixHelper.getBrokerIdealStates(helixAdmin, HELIX_CLUSTER_NAME);
// Assert that the two do equal after being rebuilt.
Assert.assertTrue(idealState.getInstanceSet(partitionNameTwo).equals(_pinotHelixResourceManager.getAllInstancesForBrokerTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME)));
}
use of org.apache.helix.HelixAdmin in project ambry by linkedin.
the class HelixBootstrapUpgradeUtil method addNewDataNodes.
/**
* Add nodes in the static cluster map that is not already present in Helix.
* Ignores those that are already present. This is to make upgrades smooth.
*
* Replica/Partition information is not updated by this method. That is updated when
* replicas and partitions are added.
*
* At this time, node removals are not dealt with.
*/
private void addNewDataNodes() {
for (Datacenter dc : staticClusterMap.hardwareLayout.getDatacenters()) {
HelixAdmin dcAdmin = adminForDc.get(dc.getName());
for (DataNode node : dc.getDataNodes()) {
String instanceName = getInstanceName(node);
if (!dcAdmin.getInstancesInCluster(clusterName).contains(instanceName)) {
InstanceConfig instanceConfig = new InstanceConfig(instanceName);
instanceConfig.setHostName(node.getHostname());
instanceConfig.setPort(Integer.toString(node.getPort()));
// populate mountPath -> Disk information.
Map<String, Map<String, String>> diskInfos = new HashMap<>();
for (Disk disk : node.getDisks()) {
Map<String, String> diskInfo = new HashMap<>();
diskInfo.put(ClusterMapUtils.DISK_CAPACITY_STR, Long.toString(disk.getRawCapacityInBytes()));
diskInfo.put(ClusterMapUtils.DISK_STATE, ClusterMapUtils.AVAILABLE_STR);
// Note: An instance config has to contain the information for each disk about the replicas it hosts.
// This information will be initialized to the empty string - but will be updated whenever the partition
// is added to the cluster.
diskInfo.put(ClusterMapUtils.REPLICAS_STR, "");
diskInfos.put(disk.getMountPath(), diskInfo);
}
// Add all instance configuration.
instanceConfig.getRecord().setMapFields(diskInfos);
if (node.hasSSLPort()) {
instanceConfig.getRecord().setSimpleField(ClusterMapUtils.SSLPORT_STR, Integer.toString(node.getSSLPort()));
}
instanceConfig.getRecord().setSimpleField(ClusterMapUtils.DATACENTER_STR, node.getDatacenterName());
instanceConfig.getRecord().setSimpleField(ClusterMapUtils.RACKID_STR, Long.toString(node.getRackId()));
instanceConfig.getRecord().setListField(ClusterMapUtils.SEALED_STR, new ArrayList<String>());
// Finally, add this node to the DC.
dcAdmin.addInstance(clusterName, instanceConfig);
}
}
System.out.println("Added all new nodes in datacenter " + dc.getName());
}
}
Aggregations