use of com.linkedin.pinot.controller.ControllerConf in project pinot by linkedin.
the class PinotTableRestletResourceTest method setUp.
@BeforeClass
public void setUp() {
startZk();
ControllerConf config = ControllerTestUtils.getDefaultControllerConfiguration();
config.setTableMinReplicas(TABLE_MIN_REPLICATION);
startController(config);
}
use of com.linkedin.pinot.controller.ControllerConf in project pinot by linkedin.
the class ControllerTestUtils method getDefaultControllerConfiguration.
public static ControllerConf getDefaultControllerConfiguration() {
final ControllerConf conf = new ControllerConf();
conf.setControllerHost(DEFAULT_CONTROLLER_HOST_NAME);
conf.setControllerPort(DEFAULT_CONTROLLER_API_PORT);
conf.setDataDir(DEFAULT_DATA_DIR);
conf.setControllerVipHost("localhost");
conf.setControllerVipProtocol("http");
return conf;
}
use of com.linkedin.pinot.controller.ControllerConf in project pinot by linkedin.
the class ValidationManagerTest method testLLCValidation.
@Test
public void testLLCValidation() throws Exception {
final String topicName = "topic";
final int kafkaPartitionCount = 2;
final String realtimeTableName = "table_REALTIME";
final String tableName = TableNameBuilder.extractRawTableName(realtimeTableName);
// Server 1
final String S1 = "S1";
// Server 2
final String S2 = "S2";
// Server 3
final String S3 = "S3";
final List<String> hosts = Arrays.asList(new String[] { S1, S2, S3 });
final HelixAdmin helixAdmin = _pinotHelixResourceManager.getHelixAdmin();
ZNRecord znRecord = new ZNRecord(topicName);
for (int i = 0; i < kafkaPartitionCount; i++) {
znRecord.setListField(Integer.toString(i), hosts);
}
makeMockPinotLLCRealtimeSegmentManager(znRecord);
long msSinceEpoch = 1540;
LLCSegmentName p0s0 = new LLCSegmentName(tableName, 0, 0, msSinceEpoch);
LLCSegmentName p0s1 = new LLCSegmentName(tableName, 0, 1, msSinceEpoch);
LLCSegmentName p1s0 = new LLCSegmentName(tableName, 1, 0, msSinceEpoch);
LLCSegmentName p1s1 = new LLCSegmentName(tableName, 1, 1, msSinceEpoch);
IdealState idealstate = PinotTableIdealStateBuilder.buildEmptyIdealStateFor(realtimeTableName, 3);
idealstate.setPartitionState(p0s0.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p0s0.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p0s0.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
// idealstate.setPartitionState(p0s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
// idealstate.setPartitionState(p0s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
// idealstate.setPartitionState(p0s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p1s0.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p1s0.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p1s0.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p1s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p1s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p1s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
helixAdmin.addResource(HELIX_CLUSTER_NAME, realtimeTableName, idealstate);
FakeValidationMetrics validationMetrics = new FakeValidationMetrics();
ValidationManager validationManager = new ValidationManager(validationMetrics, _pinotHelixResourceManager, new ControllerConf(), _segmentManager);
Map<String, String> streamConfigs = new HashMap<String, String>(4);
streamConfigs.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.CONSUMER_TYPE), "highLevel,simple");
Field autoCreateOnError = ValidationManager.class.getDeclaredField("_autoCreateOnError");
autoCreateOnError.setAccessible(true);
autoCreateOnError.setBoolean(validationManager, false);
AbstractTableConfig tableConfig = mock(AbstractTableConfig.class);
IndexingConfig indexingConfig = mock(IndexingConfig.class);
when(tableConfig.getIndexingConfig()).thenReturn(indexingConfig);
when(indexingConfig.getStreamConfigs()).thenReturn(streamConfigs);
validationManager.validateLLCSegments(realtimeTableName, tableConfig);
Assert.assertEquals(validationMetrics.partitionCount, 1);
// Set partition 0 to have one instance in CONSUMING state, and others in OFFLINE.
// we should not flag any partitions to correct.
helixAdmin.dropResource(HELIX_CLUSTER_NAME, realtimeTableName);
idealstate.setPartitionState(p0s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p0s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.OFFLINE_STATE);
idealstate.setPartitionState(p0s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.OFFLINE_STATE);
helixAdmin.addResource(HELIX_CLUSTER_NAME, realtimeTableName, idealstate);
validationManager.validateLLCSegments(realtimeTableName, tableConfig);
Assert.assertEquals(validationMetrics.partitionCount, 0);
helixAdmin.dropResource(HELIX_CLUSTER_NAME, realtimeTableName);
}
use of com.linkedin.pinot.controller.ControllerConf in project pinot by linkedin.
the class HybridClusterIntegrationTest method startHybridCluster.
protected void startHybridCluster(int partitionCount) throws Exception {
// Start Zk and Kafka
startZk();
kafkaStarter = KafkaStarterUtils.startServer(KafkaStarterUtils.DEFAULT_KAFKA_PORT, KafkaStarterUtils.DEFAULT_BROKER_ID, KafkaStarterUtils.DEFAULT_ZK_STR, KafkaStarterUtils.getDefaultKafkaConfiguration());
// Create Kafka topic
KafkaStarterUtils.createTopic(KAFKA_TOPIC, KafkaStarterUtils.DEFAULT_ZK_STR, partitionCount);
// Start the Pinot cluster
ControllerConf config = ControllerTestUtils.getDefaultControllerConfiguration();
config.setTenantIsolationEnabled(false);
startController(config);
startBroker();
startServers(2);
// Create tenants
createBrokerTenant(TENANT_NAME, 1);
createServerTenant(TENANT_NAME, 1, 1);
}
Aggregations