use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class PinotLLCRealtimeSegmentManagerTest method makeTableConfig.
// Make a tableconfig that returns the topic name and nReplicas per partition as we need it.
private AbstractTableConfig makeTableConfig(int nReplicas, String topic) {
AbstractTableConfig mockTableConfig = mock(AbstractTableConfig.class);
SegmentsValidationAndRetentionConfig mockValidationConfig = mock(SegmentsValidationAndRetentionConfig.class);
when(mockValidationConfig.getReplicasPerPartition()).thenReturn(Integer.toString(nReplicas));
when(mockTableConfig.getValidationConfig()).thenReturn(mockValidationConfig);
Map<String, String> streamConfigMap = new HashMap<>(1);
String consumerTypesCsv = streamConfigMap.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.CONSUMER_TYPE), "simple");
streamConfigMap.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.TOPIC_NAME), topic);
IndexingConfig mockIndexConfig = mock(IndexingConfig.class);
when(mockIndexConfig.getStreamConfigs()).thenReturn(streamConfigMap);
when(mockTableConfig.getIndexingConfig()).thenReturn(mockIndexConfig);
TenantConfig mockTenantConfig = mock(TenantConfig.class);
when(mockTenantConfig.getServer()).thenReturn("freeTenant");
when(mockTableConfig.getTenantConfig()).thenReturn(mockTenantConfig);
return mockTableConfig;
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class ValidationManagerTest method testRebuildBrokerResourceWhenBrokerAdded.
@Test
public void testRebuildBrokerResourceWhenBrokerAdded() throws Exception {
// Check that the first table we added doesn't need to be rebuilt(case where ideal state brokers and brokers in broker resource are the same.
String partitionName = _offlineTableConfig.getTableName();
HelixAdmin helixAdmin = _helixManager.getClusterManagmentTool();
IdealState idealState = HelixHelper.getBrokerIdealStates(helixAdmin, HELIX_CLUSTER_NAME);
// Ensure that the broker resource is not rebuilt.
Assert.assertTrue(idealState.getInstanceSet(partitionName).equals(_pinotHelixResourceManager.getAllInstancesForBrokerTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME)));
_pinotHelixResourceManager.rebuildBrokerResourceFromHelixTags(partitionName);
// Add another table that needs to be rebuilt
String offlineTableTwoConfigJson = ControllerRequestBuilderUtil.buildCreateOfflineTableJSON(TEST_TABLE_TWO, null, null, 1).toString();
AbstractTableConfig offlineTableConfigTwo = AbstractTableConfig.init(offlineTableTwoConfigJson);
_pinotHelixResourceManager.addTable(offlineTableConfigTwo);
String partitionNameTwo = offlineTableConfigTwo.getTableName();
// Add a new broker manually such that the ideal state is not updated and ensure that rebuild broker resource is called
final String brokerId = "Broker_localhost_2";
InstanceConfig instanceConfig = new InstanceConfig(brokerId);
instanceConfig.setInstanceEnabled(true);
instanceConfig.setHostName("Broker_localhost");
instanceConfig.setPort("2");
helixAdmin.addInstance(HELIX_CLUSTER_NAME, instanceConfig);
helixAdmin.addInstanceTag(HELIX_CLUSTER_NAME, instanceConfig.getInstanceName(), ControllerTenantNameBuilder.getBrokerTenantNameForTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME));
idealState = HelixHelper.getBrokerIdealStates(helixAdmin, HELIX_CLUSTER_NAME);
// Assert that the two don't equal before the call to rebuild the broker resource.
Assert.assertTrue(!idealState.getInstanceSet(partitionNameTwo).equals(_pinotHelixResourceManager.getAllInstancesForBrokerTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME)));
_pinotHelixResourceManager.rebuildBrokerResourceFromHelixTags(partitionNameTwo);
idealState = HelixHelper.getBrokerIdealStates(helixAdmin, HELIX_CLUSTER_NAME);
// Assert that the two do equal after being rebuilt.
Assert.assertTrue(idealState.getInstanceSet(partitionNameTwo).equals(_pinotHelixResourceManager.getAllInstancesForBrokerTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME)));
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class RetentionManager method updateDeletionStrategyForOfflineTable.
/**
* Update deletion strategy for offline table.
* <ul>
* <li>Keep the current deletion strategy when one of the followings happened:
* <ul>
* <li>Failed to fetch the retention config.</li>
* <li>Push type is not valid (neither 'APPEND' nor 'REFRESH').</li>
* </ul>
* <li>
* Remove the deletion strategy when one of the followings happened:
* <ul>
* <li>Push type is set to 'REFRESH'.</li>
* <li>No valid retention time is set.</li>
* </ul>
* </li>
* <li>Update the deletion strategy when push type is set to 'APPEND' and valid retention time is set.</li>
* </ul>
*/
private void updateDeletionStrategyForOfflineTable(String offlineTableName) {
// Fetch table config.
AbstractTableConfig offlineTableConfig;
try {
offlineTableConfig = ZKMetadataProvider.getOfflineTableConfig(_pinotHelixResourceManager.getPropertyStore(), offlineTableName);
if (offlineTableConfig == null) {
LOGGER.error("Table config is null, skip updating deletion strategy for table: {}.", offlineTableName);
return;
}
} catch (Exception e) {
LOGGER.error("Caught exception while fetching table config, skip updating deletion strategy for table: {}.", offlineTableName, e);
return;
}
// Fetch validation config.
SegmentsValidationAndRetentionConfig validationConfig = offlineTableConfig.getValidationConfig();
if (validationConfig == null) {
LOGGER.error("Validation config is null, skip updating deletion strategy for table: {}.", offlineTableName);
return;
}
// Fetch push type.
String segmentPushType = validationConfig.getSegmentPushType();
if ((segmentPushType == null) || (!segmentPushType.equalsIgnoreCase("APPEND") && !segmentPushType.equalsIgnoreCase("REFRESH"))) {
LOGGER.error("Segment push type: {} is not valid ('APPEND' or 'REFRESH'), skip updating deletion strategy for table: {}.", segmentPushType, offlineTableName);
return;
}
if (segmentPushType.equalsIgnoreCase("REFRESH")) {
LOGGER.info("Segment push type is set to 'REFRESH', remove deletion strategy for table: {}.", offlineTableName);
_tableDeletionStrategy.remove(offlineTableName);
return;
}
// Fetch retention time unit and value.
String retentionTimeUnit = validationConfig.getRetentionTimeUnit();
String retentionTimeValue = validationConfig.getRetentionTimeValue();
if (((retentionTimeUnit == null) || retentionTimeUnit.isEmpty()) || ((retentionTimeValue == null) || retentionTimeValue.isEmpty())) {
LOGGER.info("Retention time unit/value is not set, remove deletion strategy for table: {}.", offlineTableName);
_tableDeletionStrategy.remove(offlineTableName);
return;
}
// Update time retention strategy.
try {
TimeRetentionStrategy timeRetentionStrategy = new TimeRetentionStrategy(retentionTimeUnit, retentionTimeValue);
_tableDeletionStrategy.put(offlineTableName, timeRetentionStrategy);
LOGGER.info("Updated deletion strategy for table: {} using retention time: {} {}.", offlineTableName, retentionTimeValue, retentionTimeUnit);
} catch (Exception e) {
LOGGER.error("Caught exception while building deletion strategy with retention time: {} {], remove deletion strategy for table: {}.", retentionTimeValue, retentionTimeUnit, offlineTableName);
_tableDeletionStrategy.remove(offlineTableName);
}
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class PinotFileUploadTest method setUp.
@BeforeClass
public void setUp() throws Exception {
startZk();
startController();
_pinotHelixResourceManager = _controllerStarter.getHelixResourceManager();
ControllerRequestBuilderUtil.addFakeBrokerInstancesToAutoJoinHelixCluster(getHelixClusterName(), ZkStarter.DEFAULT_ZK_STR, 5, true);
ControllerRequestBuilderUtil.addFakeDataInstancesToAutoJoinHelixCluster(getHelixClusterName(), ZkStarter.DEFAULT_ZK_STR, 5, true);
Assert.assertEquals(_helixAdmin.getInstancesInClusterWithTag(HELIX_CLUSTER_NAME, "DefaultTenant_BROKER").size(), 5);
// Adding table
String OfflineTableConfigJson = ControllerRequestBuilderUtil.buildCreateOfflineTableJSON(TABLE_NAME, null, null, 2, "RandomAssignmentStrategy").toString();
AbstractTableConfig offlineTableConfig = AbstractTableConfig.init(OfflineTableConfigJson);
_pinotHelixResourceManager.addTable(offlineTableConfig);
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class PinotTableRestletResourceTest method testTableMinReplicationInternal.
private void testTableMinReplicationInternal(String tableName, int tableReplication) throws JSONException, IOException {
JSONObject request = ControllerRequestBuilder.buildCreateOfflineTableJSON(tableName, "default", "default", "potato", "DAYS", "DAYS", "5", tableReplication, "BalanceNumSegmentAssignmentStrategy", Collections.<String>emptyList(), "MMAP", "v3");
sendPostRequest(ControllerRequestURLBuilder.baseUrl(CONTROLLER_BASE_API_URL).forTableCreate(), request.toString());
// table creation should succeed
AbstractTableConfig tableConfig = getTableConfig(tableName, "OFFLINE");
Assert.assertEquals(tableConfig.getValidationConfig().getReplicationNumber(), Math.max(tableReplication, TABLE_MIN_REPLICATION));
JSONObject metadata = new JSONObject();
metadata.put("streamType", "kafka");
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.CONSUMER_TYPE, Kafka.ConsumerType.highLevel.toString());
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.TOPIC_NAME, "fakeTopic");
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.DECODER_CLASS, "fakeClass");
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.ZK_BROKER_URL, "fakeUrl");
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.HighLevelConsumer.ZK_CONNECTION_STRING, "potato");
metadata.put(DataSource.Realtime.REALTIME_SEGMENT_FLUSH_SIZE, Integer.toString(1234));
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.KAFKA_CONSUMER_PROPS_PREFIX + "." + Kafka.AUTO_OFFSET_RESET, "smallest");
request = ControllerRequestBuilder.buildCreateRealtimeTableJSON(tableName, "default", "default", "potato", "DAYS", "DAYS", "5", tableReplication, "BalanceNumSegmentAssignmentStrategy", metadata, "fakeSchema", "fakeColumn", Collections.<String>emptyList(), "MMAP", false);
sendPostRequest(ControllerRequestURLBuilder.baseUrl(CONTROLLER_BASE_API_URL).forTableCreate(), request.toString());
tableConfig = getTableConfig(tableName, "REALTIME");
Assert.assertEquals(tableConfig.getValidationConfig().getReplicationNumber(), Math.max(tableReplication, TABLE_MIN_REPLICATION));
int replicasPerPartition = Integer.valueOf(tableConfig.getValidationConfig().getReplicasPerPartition());
Assert.assertEquals(replicasPerPartition, Math.max(tableReplication, TABLE_MIN_REPLICATION));
}
Aggregations