use of org.apache.helix.model.builder.HelixConfigScopeBuilder in project helix by apache.
the class TestTaskThrottling method testTaskThrottle.
@Test
public void testTaskThrottle() throws InterruptedException {
int numTasks = 30 * _numNodes;
int perNodeTaskLimitation = 5;
JobConfig.Builder jobConfig = generateLongRunJobConfig(numTasks);
// 1. Job executed in the participants with no limitation
String jobName1 = "Job1";
Workflow flow = WorkflowGenerator.generateSingleJobWorkflowBuilder(jobName1, jobConfig).build();
_driver.start(flow);
_driver.pollForJobState(flow.getName(), TaskUtil.getNamespacedJobName(flow.getName(), jobName1), TaskState.IN_PROGRESS);
// Wait for tasks to be picked up
Thread.sleep(1500);
Assert.assertEquals(countRunningPartition(flow, jobName1), numTasks);
_driver.stop(flow.getName());
_driver.pollForWorkflowState(flow.getName(), TaskState.STOPPED);
// 2. Job executed in the participants with max task limitation
// Configuring cluster
HelixConfigScope scope = new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.CLUSTER).forCluster(CLUSTER_NAME).build();
Map<String, String> properties = new HashMap<String, String>();
properties.put(ClusterConfig.ClusterConfigProperty.MAX_CONCURRENT_TASK_PER_INSTANCE.name(), new Integer(perNodeTaskLimitation).toString());
_setupTool.getClusterManagementTool().setConfig(scope, properties);
String jobName2 = "Job2";
flow = WorkflowGenerator.generateSingleJobWorkflowBuilder(jobName2, jobConfig).build();
_driver.start(flow);
_driver.pollForJobState(flow.getName(), TaskUtil.getNamespacedJobName(flow.getName(), jobName2), TaskState.IN_PROGRESS);
// Wait for tasks to be picked up
Thread.sleep(4000);
Assert.assertEquals(countRunningPartition(flow, jobName2), _numNodes * perNodeTaskLimitation);
_driver.stop(flow.getName());
_driver.pollForWorkflowState(flow.getName(), TaskState.STOPPED);
// 3. Ensure job can finish normally
jobConfig.setJobCommandConfigMap(ImmutableMap.of(MockTask.TIMEOUT_CONFIG, "10"));
String jobName3 = "Job3";
flow = WorkflowGenerator.generateSingleJobWorkflowBuilder(jobName3, jobConfig).build();
_driver.start(flow);
_driver.pollForJobState(flow.getName(), TaskUtil.getNamespacedJobName(flow.getName(), jobName3), TaskState.COMPLETED);
}
use of org.apache.helix.model.builder.HelixConfigScopeBuilder in project helix by apache.
the class ClusterSetup method setConfig.
/**
* set configs
* @param type config-scope type, e.g. CLUSTER, RESOURCE, etc.
* @param scopeArgsCsv scopeArgsCsv csv-formatted scope-args, e.g myCluster,testDB
* @param keyValuePairs csv-formatted key-value pairs. e.g. k1=v1,k2=v2
*/
public void setConfig(ConfigScopeProperty type, String scopeArgsCsv, String keyValuePairs) {
// ConfigScope scope = new ConfigScopeBuilder().build(scopesKeyValuePairs);
String[] scopeArgs = scopeArgsCsv.split("[\\s,]");
HelixConfigScope scope = new HelixConfigScopeBuilder(type, scopeArgs).build();
Map<String, String> keyValueMap = HelixUtil.parseCsvFormatedKeyValuePairs(keyValuePairs);
_admin.setConfig(scope, keyValueMap);
}
use of org.apache.helix.model.builder.HelixConfigScopeBuilder in project helix by apache.
the class ClusterSetup method getConfig.
/**
* get configs
* @param type config-scope-type, e.g. CLUSTER, RESOURCE, etc.
* @param scopeArgsCsv csv-formatted scope-args, e.g myCluster,testDB
* @param keysCsv csv-formatted keys. e.g. k1,k2
* @return json-formated key-value pairs, e.g. {k1=v1,k2=v2}
*/
public String getConfig(ConfigScopeProperty type, String scopeArgsCsv, String keysCsv) {
// ConfigScope scope = new ConfigScopeBuilder().build(scopesStr);
String[] scopeArgs = scopeArgsCsv.split("[\\s,]");
HelixConfigScope scope = new HelixConfigScopeBuilder(type, scopeArgs).build();
String[] keys = keysCsv.split("[\\s,]");
// parse keys
// String[] keys = keysStr.split("[\\s,]");
// Set<String> keysSet = new HashSet<String>(Arrays.asList(keys));
Map<String, String> keyValueMap = _admin.getConfig(scope, Arrays.asList(keys));
ZNRecord record = new ZNRecord(type.toString());
// record.setMapField(scopesStr, propertiesMap);
record.getSimpleFields().putAll(keyValueMap);
ZNRecordSerializer serializer = new ZNRecordSerializer();
return new String(serializer.serialize(record));
}
use of org.apache.helix.model.builder.HelixConfigScopeBuilder in project helix by apache.
the class ParticipantManager method joinCluster.
private void joinCluster() {
// Read cluster config and see if instance can auto join the cluster
boolean autoJoin = false;
try {
HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(_manager.getClusterName()).build();
autoJoin = Boolean.parseBoolean(_configAccessor.get(scope, ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN));
LOG.info("instance: " + _instanceName + " auto-joining " + _clusterName + " is " + autoJoin);
} catch (Exception e) {
// autoJoin is false
}
if (!ZKUtil.isInstanceSetup(_zkclient, _clusterName, _instanceName, _instanceType)) {
if (!autoJoin) {
throw new HelixException("Initial cluster structure is not set up for instance: " + _instanceName + ", instanceType: " + _instanceType);
} else {
LOG.info(_instanceName + " is auto-joining cluster: " + _clusterName);
InstanceConfig instanceConfig = new InstanceConfig(_instanceName);
String hostName = _instanceName;
String port = "";
int lastPos = _instanceName.lastIndexOf("_");
if (lastPos > 0) {
hostName = _instanceName.substring(0, lastPos);
port = _instanceName.substring(lastPos + 1);
}
instanceConfig.setHostName(hostName);
instanceConfig.setPort(port);
instanceConfig.setInstanceEnabled(true);
_helixAdmin.addInstance(_clusterName, instanceConfig);
}
}
}
use of org.apache.helix.model.builder.HelixConfigScopeBuilder in project helix by apache.
the class TestInvalidResourceRebalance method testResourceRebalanceSkipped.
/**
* Ensure that the Helix controller doesn't attempt to rebalance resources with invalid ideal
* states
*/
@Test
public void testResourceRebalanceSkipped() throws Exception {
final int NUM_PARTICIPANTS = 2;
final int NUM_PARTITIONS = 4;
final int NUM_REPLICAS = 2;
final String RESOURCE_NAME = "TestDB0";
String className = TestHelper.getTestClassName();
String methodName = TestHelper.getTestMethodName();
String clusterName = className + "_" + methodName;
System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
// Set up cluster
// participant port
TestHelper.setupCluster(// participant port
clusterName, // participant port
ZK_ADDR, // participant port
12918, // participant name prefix
"localhost", // resource name prefix
"TestDB", // resources
1, // partitions per resource
NUM_PARTITIONS, // number of nodes
NUM_PARTICIPANTS, // replicas
NUM_REPLICAS, // use SEMI_AUTO mode
"MasterSlave", // use SEMI_AUTO mode
RebalanceMode.SEMI_AUTO, // do rebalance
true);
// start controller
ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller");
controller.syncStart();
// add the ideal state spec (prevents non-CUSTOMIZED MasterSlave ideal states)
HelixAdmin helixAdmin = controller.getClusterManagmentTool();
Map<String, String> properties = Maps.newHashMap();
properties.put("IdealStateRule!sampleRuleName", "IDEAL_STATE_MODE=CUSTOMIZED,STATE_MODEL_DEF_REF=MasterSlave");
helixAdmin.setConfig(new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(clusterName).build(), properties);
// start participants
MockParticipantManager[] participants = new MockParticipantManager[NUM_PARTICIPANTS];
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
final String instanceName = "localhost_" + (12918 + i);
participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participants[i].syncStart();
}
Thread.sleep(1000);
boolean result = ClusterStateVerifier.verifyByZkCallback(new EmptyZkVerifier(clusterName, RESOURCE_NAME));
Assert.assertTrue(result, "External view and current state must be empty");
// cleanup
for (int i = 0; i < NUM_PARTICIPANTS; i++) {
participants[i].syncStop();
}
controller.syncStop();
}
Aggregations