use of org.apache.helix.model.builder.CustomModeISBuilder in project pinot by linkedin.
the class PinotTableIdealStateBuilder method buildEmptyIdealStateFor.
/**
*
* Building an empty idealState for a given table.
* Used when creating a new table.
*
* @param tableName resource name
* @param numCopies is the number of replicas
* @return
*/
public static IdealState buildEmptyIdealStateFor(String tableName, int numCopies) {
final CustomModeISBuilder customModeIdealStateBuilder = new CustomModeISBuilder(tableName);
final int replicas = numCopies;
customModeIdealStateBuilder.setStateModel(PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL).setNumPartitions(0).setNumReplica(replicas).setMaxPartitionsPerNode(1);
final IdealState idealState = customModeIdealStateBuilder.build();
idealState.setInstanceGroupTag(tableName);
return idealState;
}
use of org.apache.helix.model.builder.CustomModeISBuilder in project pinot by linkedin.
the class PinotTableIdealStateBuilder method buildEmptyIdealStateForBrokerResource.
/**
*
* Building an empty idealState for a given table.
* Used when creating a new table.
*
* @param helixAdmin
* @param helixClusterName
* @return
*/
public static IdealState buildEmptyIdealStateForBrokerResource(HelixAdmin helixAdmin, String helixClusterName) {
final CustomModeISBuilder customModeIdealStateBuilder = new CustomModeISBuilder(CommonConstants.Helix.BROKER_RESOURCE_INSTANCE);
customModeIdealStateBuilder.setStateModel(PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL).setMaxPartitionsPerNode(Integer.MAX_VALUE).setNumReplica(Integer.MAX_VALUE).setNumPartitions(Integer.MAX_VALUE);
final IdealState idealState = customModeIdealStateBuilder.build();
return idealState;
}
use of org.apache.helix.model.builder.CustomModeISBuilder in project pinot by linkedin.
the class PinotTableIdealStateBuilder method buildEmptyKafkaConsumerRealtimeIdealStateFor.
public static IdealState buildEmptyKafkaConsumerRealtimeIdealStateFor(String realtimeTableName, int replicaCount) {
final CustomModeISBuilder customModeIdealStateBuilder = new CustomModeISBuilder(realtimeTableName);
customModeIdealStateBuilder.setStateModel(PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL).setNumPartitions(0).setNumReplica(replicaCount).setMaxPartitionsPerNode(1);
final IdealState idealState = customModeIdealStateBuilder.build();
idealState.setInstanceGroupTag(realtimeTableName);
return idealState;
}
use of org.apache.helix.model.builder.CustomModeISBuilder in project helix by apache.
the class WorkflowRebalancer method scheduleSingleJob.
/**
* Posts new job to cluster
*/
private void scheduleSingleJob(String jobResource, JobConfig jobConfig) {
HelixAdmin admin = _manager.getClusterManagmentTool();
IdealState jobIS = admin.getResourceIdealState(_manager.getClusterName(), jobResource);
if (jobIS != null) {
LOG.info("Job " + jobResource + " idealstate already exists!");
return;
}
// Set up job resource based on partitions from target resource
TaskUtil.createUserContent(_manager.getHelixPropertyStore(), jobResource, new ZNRecord(TaskUtil.USER_CONTENT_NODE));
int numIndependentTasks = jobConfig.getTaskConfigMap().size();
int numPartitions = numIndependentTasks;
if (numPartitions == 0) {
IdealState targetIs = admin.getResourceIdealState(_manager.getClusterName(), jobConfig.getTargetResource());
if (targetIs == null) {
LOG.warn("Target resource does not exist for job " + jobResource);
// do not need to fail here, the job will be marked as failure immediately when job starts running.
} else {
numPartitions = targetIs.getPartitionSet().size();
}
}
admin.addResource(_manager.getClusterName(), jobResource, numPartitions, TaskConstants.STATE_MODEL_NAME);
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
// Set the job configuration
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
HelixProperty resourceConfig = new HelixProperty(jobResource);
resourceConfig.getRecord().getSimpleFields().putAll(jobConfig.getResourceConfigMap());
Map<String, TaskConfig> taskConfigMap = jobConfig.getTaskConfigMap();
if (taskConfigMap != null) {
for (TaskConfig taskConfig : taskConfigMap.values()) {
resourceConfig.getRecord().setMapField(taskConfig.getId(), taskConfig.getConfigMap());
}
}
accessor.setProperty(keyBuilder.resourceConfig(jobResource), resourceConfig);
// Push out new ideal state based on number of target partitions
IdealStateBuilder builder = new CustomModeISBuilder(jobResource);
builder.setRebalancerMode(IdealState.RebalanceMode.TASK);
builder.setNumReplica(1);
builder.setNumPartitions(numPartitions);
builder.setStateModel(TaskConstants.STATE_MODEL_NAME);
if (jobConfig.getInstanceGroupTag() != null) {
builder.setNodeGroup(jobConfig.getInstanceGroupTag());
}
if (jobConfig.isDisableExternalView()) {
builder.disableExternalView();
}
jobIS = builder.build();
for (int i = 0; i < numPartitions; i++) {
jobIS.getRecord().setListField(jobResource + "_" + i, new ArrayList<String>());
jobIS.getRecord().setMapField(jobResource + "_" + i, new HashMap<String, String>());
}
jobIS.setRebalancerClassName(JobRebalancer.class.getName());
admin.setResourceIdealState(_manager.getClusterName(), jobResource, jobIS);
}
use of org.apache.helix.model.builder.CustomModeISBuilder in project helix by apache.
the class TestDrop method testDropErrorPartitionCustomIS.
@Test
public void testDropErrorPartitionCustomIS() throws Exception {
// Logger.getRootLogger().setLevel(Level.INFO);
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
final int n = 2;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
MockParticipantManager[] participants = new MockParticipantManager[n];
// participant port
TestHelper.setupCluster(// participant port
clusterName, // participant port
ZK_ADDR, // participant port
12918, // participant name prefix
"localhost", // resource name prefix
"TestDB", // resources
1, // partitions per resource
2, // number of nodes
n, // replicas
2, "MasterSlave", // do rebalance
false);
// set custom ideal-state
CustomModeISBuilder isBuilder = new CustomModeISBuilder("TestDB0");
isBuilder.setNumPartitions(2);
isBuilder.setNumReplica(2);
isBuilder.setStateModel("MasterSlave");
isBuilder.assignInstanceAndState("TestDB0_0", "localhost_12918", "MASTER");
isBuilder.assignInstanceAndState("TestDB0_0", "localhost_12919", "SLAVE");
isBuilder.assignInstanceAndState("TestDB0_1", "localhost_12919", "MASTER");
isBuilder.assignInstanceAndState("TestDB0_1", "localhost_12918", "SLAVE");
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.idealStates("TestDB0"), isBuilder.build());
// start controller
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0");
controller.syncStart();
// start participants
Map<String, Set<String>> errTransitions = new HashMap<String, Set<String>>();
errTransitions.put("SLAVE-MASTER", TestHelper.setOf("TestDB0_0"));
for (int i = 0; i < n; i++) {
String instanceName = "localhost_" + (12918 + i);
if (i == 0) {
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].setTransition(new ErrTransition(errTransitions));
} else {
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
}
participants[i].syncStart();
}
Map<String, Map<String, String>> errStateMap = new HashMap<String, Map<String, String>>();
errStateMap.put("TestDB0", new HashMap<String, String>());
errStateMap.get("TestDB0").put("TestDB0_0", "localhost_12918");
HelixClusterVerifier verifier = new BestPossibleExternalViewVerifier.Builder(clusterName).setZkAddr(ZK_ADDR).setErrStates(errStateMap).build();
Assert.assertTrue(verifier.verify());
// drop resource containing error partitions should drop the partition successfully
ClusterSetup.processCommandLineArgs(new String[] { "--zkSvr", ZK_ADDR, "--dropResource", clusterName, "TestDB0" });
// make sure TestDB0_0 partition is dropped
verifier = new BestPossibleExternalViewVerifier.Builder(clusterName).setZkAddr(ZK_ADDR).build();
Assert.assertTrue(verifier.verify(), "Should be empty exeternal-view");
assertEmptyCSandEV(clusterName, "TestDB0", participants);
// clean up
controller.syncStop();
for (int i = 0; i < n; i++) {
participants[i].syncStop();
}
System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
}
Aggregations