use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class PinotLLCRealtimeSegmentManagerTest method testUpdatingKafkaPartitions.
@Test
public void testUpdatingKafkaPartitions() throws Exception {
FakePinotLLCRealtimeSegmentManager segmentManager = new FakePinotLLCRealtimeSegmentManager(false, null);
int nInstances = 3;
int nKafkaPartitions = 8;
int nReplicas = 3;
final String topic = "someTopic";
final String rtTableName = "table_REALTIME";
List<String> instances = getInstanceList(nInstances);
final String startOffset = KAFKA_OFFSET;
// Populate 'partitionSet' with all kafka partitions,
// As we find partitions in the assigment, we will remove the partition from this set.
Set<Integer> partitionSet = new HashSet<>(nKafkaPartitions);
for (int i = 0; i < nKafkaPartitions; i++) {
partitionSet.add(i);
}
// Setup initial entries
segmentManager.setupHelixEntries(topic, rtTableName, nKafkaPartitions, instances, nReplicas, startOffset, DUMMY_HOST, null, true, 1000);
ZNRecord partitionAssignment = segmentManager._partitionAssignment;
segmentManager._currentKafkaPartitionCount = nKafkaPartitions;
segmentManager._currentInstanceList = instances;
// Call to update the partition list should do nothing.
AbstractTableConfig tableConfig = makeTableConfig(nReplicas, topic);
segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
Assert.assertTrue(segmentManager._partitionAssignment == partitionAssignment);
// Change the number of kafka partitions to 9, and we should generate a new partition assignment
nKafkaPartitions = 9;
segmentManager._currentKafkaPartitionCount = nKafkaPartitions;
segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
partitionAssignment = validatePartitionAssignment(segmentManager, nKafkaPartitions, nReplicas);
// Now reduce the number of instances and, we should not be updating anything.
segmentManager._currentInstanceList = getInstanceList(nInstances - 1);
segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
Assert.assertTrue(partitionAssignment == segmentManager._partitionAssignment);
// Change the number of servers to 1 more, and we should update it again.
nInstances++;
segmentManager._currentInstanceList = getInstanceList(nInstances);
segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
Assert.assertTrue(partitionAssignment != segmentManager._partitionAssignment);
partitionAssignment = validatePartitionAssignment(segmentManager, nKafkaPartitions, nReplicas);
// Change the replica count to one more, and we should update the assignment
nReplicas++;
tableConfig = makeTableConfig(nReplicas, topic);
segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
Assert.assertTrue(partitionAssignment != segmentManager._partitionAssignment);
partitionAssignment = validatePartitionAssignment(segmentManager, nKafkaPartitions, nReplicas);
// Change the list of servers while keeping the number of servers the same.
// We should see a change in the partition assignment.
String server1 = segmentManager._currentInstanceList.get(0);
segmentManager._currentInstanceList.set(0, server1 + "_new");
Assert.assertEquals(nInstances, segmentManager._currentInstanceList.size());
segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
Assert.assertTrue(partitionAssignment != segmentManager._partitionAssignment);
partitionAssignment = validatePartitionAssignment(segmentManager, nKafkaPartitions, nReplicas);
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class ValidationManagerTest method testLLCValidation.
@Test
public void testLLCValidation() throws Exception {
final String topicName = "topic";
final int kafkaPartitionCount = 2;
final String realtimeTableName = "table_REALTIME";
final String tableName = TableNameBuilder.extractRawTableName(realtimeTableName);
// Server 1
final String S1 = "S1";
// Server 2
final String S2 = "S2";
// Server 3
final String S3 = "S3";
final List<String> hosts = Arrays.asList(new String[] { S1, S2, S3 });
final HelixAdmin helixAdmin = _pinotHelixResourceManager.getHelixAdmin();
ZNRecord znRecord = new ZNRecord(topicName);
for (int i = 0; i < kafkaPartitionCount; i++) {
znRecord.setListField(Integer.toString(i), hosts);
}
makeMockPinotLLCRealtimeSegmentManager(znRecord);
long msSinceEpoch = 1540;
LLCSegmentName p0s0 = new LLCSegmentName(tableName, 0, 0, msSinceEpoch);
LLCSegmentName p0s1 = new LLCSegmentName(tableName, 0, 1, msSinceEpoch);
LLCSegmentName p1s0 = new LLCSegmentName(tableName, 1, 0, msSinceEpoch);
LLCSegmentName p1s1 = new LLCSegmentName(tableName, 1, 1, msSinceEpoch);
IdealState idealstate = PinotTableIdealStateBuilder.buildEmptyIdealStateFor(realtimeTableName, 3);
idealstate.setPartitionState(p0s0.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p0s0.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p0s0.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
// idealstate.setPartitionState(p0s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
// idealstate.setPartitionState(p0s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
// idealstate.setPartitionState(p0s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p1s0.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p1s0.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p1s0.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p1s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p1s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p1s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
helixAdmin.addResource(HELIX_CLUSTER_NAME, realtimeTableName, idealstate);
FakeValidationMetrics validationMetrics = new FakeValidationMetrics();
ValidationManager validationManager = new ValidationManager(validationMetrics, _pinotHelixResourceManager, new ControllerConf(), _segmentManager);
Map<String, String> streamConfigs = new HashMap<String, String>(4);
streamConfigs.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.CONSUMER_TYPE), "highLevel,simple");
Field autoCreateOnError = ValidationManager.class.getDeclaredField("_autoCreateOnError");
autoCreateOnError.setAccessible(true);
autoCreateOnError.setBoolean(validationManager, false);
AbstractTableConfig tableConfig = mock(AbstractTableConfig.class);
IndexingConfig indexingConfig = mock(IndexingConfig.class);
when(tableConfig.getIndexingConfig()).thenReturn(indexingConfig);
when(indexingConfig.getStreamConfigs()).thenReturn(streamConfigs);
validationManager.validateLLCSegments(realtimeTableName, tableConfig);
Assert.assertEquals(validationMetrics.partitionCount, 1);
// Set partition 0 to have one instance in CONSUMING state, and others in OFFLINE.
// we should not flag any partitions to correct.
helixAdmin.dropResource(HELIX_CLUSTER_NAME, realtimeTableName);
idealstate.setPartitionState(p0s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p0s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.OFFLINE_STATE);
idealstate.setPartitionState(p0s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.OFFLINE_STATE);
helixAdmin.addResource(HELIX_CLUSTER_NAME, realtimeTableName, idealstate);
validationManager.validateLLCSegments(realtimeTableName, tableConfig);
Assert.assertEquals(validationMetrics.partitionCount, 0);
helixAdmin.dropResource(HELIX_CLUSTER_NAME, realtimeTableName);
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class LLRealtimeSegmentDataManagerTest method testTimeString.
@Test
public void testTimeString() throws Exception {
JSONObject tableConfigJson = new JSONObject(_tableConfigJson);
JSONObject tableIndexConfig = (JSONObject) tableConfigJson.get("tableIndexConfig");
JSONObject streamConfigs = (JSONObject) tableIndexConfig.get("streamConfigs");
{
streamConfigs.put(CommonConstants.Helix.DataSource.Realtime.REALTIME_SEGMENT_FLUSH_TIME, "3h");
AbstractTableConfig tableConfig = createTableConfig(tableConfigJson.toString());
InstanceZKMetadata instanceZKMetadata = new InstanceZKMetadata();
Schema schema = Schema.fromString(makeSchema());
KafkaLowLevelStreamProviderConfig config = new KafkaLowLevelStreamProviderConfig();
config.init(tableConfig, instanceZKMetadata, schema);
Assert.assertEquals(3 * 3600 * 1000L, config.getTimeThresholdToFlushSegment());
}
{
streamConfigs.put(CommonConstants.Helix.DataSource.Realtime.REALTIME_SEGMENT_FLUSH_TIME, "3h30m");
AbstractTableConfig tableConfig = createTableConfig(tableConfigJson.toString());
InstanceZKMetadata instanceZKMetadata = new InstanceZKMetadata();
Schema schema = Schema.fromString(makeSchema());
KafkaLowLevelStreamProviderConfig config = new KafkaLowLevelStreamProviderConfig();
config.init(tableConfig, instanceZKMetadata, schema);
Assert.assertEquals((3 * 3600 + 30 * 60) * 1000L, config.getTimeThresholdToFlushSegment());
}
{
final long segTime = 898789748357L;
streamConfigs.put(CommonConstants.Helix.DataSource.Realtime.REALTIME_SEGMENT_FLUSH_TIME, String.valueOf(segTime));
AbstractTableConfig tableConfig = createTableConfig(tableConfigJson.toString());
InstanceZKMetadata instanceZKMetadata = new InstanceZKMetadata();
Schema schema = Schema.fromString(makeSchema());
KafkaLowLevelStreamProviderConfig config = new KafkaLowLevelStreamProviderConfig();
config.init(tableConfig, instanceZKMetadata, schema);
Assert.assertEquals(segTime, config.getTimeThresholdToFlushSegment());
}
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class PinotSegmentRebalancer method rebalanceTable.
/**
* Rebalances a table
* @param tableName
* @throws Exception
*/
public void rebalanceTable(String tableName) throws Exception {
String tableConfigPath = "/CONFIGS/TABLE/" + tableName;
Stat stat = new Stat();
ZNRecord znRecord = propertyStore.get(tableConfigPath, stat, 0);
AbstractTableConfig tableConfig = AbstractTableConfig.fromZnRecord(znRecord);
String tenantName = tableConfig.getTenantConfig().getServer().replaceAll(TableType.OFFLINE.toString(), "").replace(TableType.OFFLINE.toString(), "");
rebalanceTable(tableName, tenantName);
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class LLRealtimeSegmentDataManagerTest method createFakeSegmentManager.
private FakeLLRealtimeSegmentDataManager createFakeSegmentManager() throws Exception {
LLCRealtimeSegmentZKMetadata segmentZKMetadata = createZkMetadata();
AbstractTableConfig tableConfig = createTableConfig();
InstanceZKMetadata instanceZKMetadata = new InstanceZKMetadata();
RealtimeTableDataManager tableDataManager = createTableDataManager();
String resourceDir = _segmentDir;
Schema schema = Schema.fromString(makeSchema());
ServerMetrics serverMetrics = new ServerMetrics(new MetricsRegistry());
FakeLLRealtimeSegmentDataManager segmentDataManager = new FakeLLRealtimeSegmentDataManager(segmentZKMetadata, tableConfig, instanceZKMetadata, tableDataManager, resourceDir, schema, serverMetrics);
return segmentDataManager;
}
Aggregations