use of com.linkedin.pinot.common.config.SegmentsValidationAndRetentionConfig in project pinot by linkedin.
the class PinotTableRestletResource method addTable.
@HttpVerb("post")
@Summary("Adds a table")
@Tags({ "table" })
@Paths({ "/tables", "/tables/" })
private void addTable(AbstractTableConfig config) throws IOException {
// For self-serviced cluster, ensure that the tables are created with atleast
// min replication factor irrespective of table configuratio value.
SegmentsValidationAndRetentionConfig segmentsConfig = config.getValidationConfig();
int requestReplication = segmentsConfig.getReplicationNumber();
int configMinReplication = _controllerConf.getDefaultTableMinReplicas();
if (requestReplication < configMinReplication) {
LOGGER.info("Creating table with minimum replication factor of: {} instead of requested replication: {}", configMinReplication, requestReplication);
segmentsConfig.setReplication(String.valueOf(configMinReplication));
}
if (segmentsConfig.getReplicasPerPartition() != null) {
int replicasPerPartition = Integer.valueOf(segmentsConfig.getReplicasPerPartition());
if (replicasPerPartition < configMinReplication) {
LOGGER.info("Creating table with minimum replicasPerPartition of: {} instead of requested replicasPerPartition: {}", configMinReplication, requestReplication);
segmentsConfig.setReplicasPerPartition(String.valueOf(configMinReplication));
}
}
_pinotHelixResourceManager.addTable(config);
}
use of com.linkedin.pinot.common.config.SegmentsValidationAndRetentionConfig in project pinot by linkedin.
the class PinotHelixResourceManager method addTable.
/**
* Table APIs
*/
public void addTable(AbstractTableConfig config) throws JsonGenerationException, JsonMappingException, IOException {
TenantConfig tenantConfig = null;
TableType type = TableType.valueOf(config.getTableType().toUpperCase());
if (isSingleTenantCluster()) {
tenantConfig = new TenantConfig();
tenantConfig.setBroker(ControllerTenantNameBuilder.getBrokerTenantNameForTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME));
switch(type) {
case OFFLINE:
tenantConfig.setServer(ControllerTenantNameBuilder.getOfflineTenantNameForTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME));
break;
case REALTIME:
tenantConfig.setServer(ControllerTenantNameBuilder.getRealtimeTenantNameForTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME));
break;
default:
throw new RuntimeException("UnSupported table type");
}
config.setTenantConfig(tenantConfig);
} else {
tenantConfig = config.getTenantConfig();
if (tenantConfig.getBroker() == null || tenantConfig.getServer() == null) {
throw new RuntimeException("missing tenant configs");
}
}
SegmentsValidationAndRetentionConfig segmentsConfig = config.getValidationConfig();
switch(type) {
case OFFLINE:
final String offlineTableName = config.getTableName();
// now lets build an ideal state
LOGGER.info("building empty ideal state for table : " + offlineTableName);
final IdealState offlineIdealState = PinotTableIdealStateBuilder.buildEmptyIdealStateFor(offlineTableName, Integer.parseInt(segmentsConfig.getReplication()));
LOGGER.info("adding table via the admin");
_helixAdmin.addResource(_helixClusterName, offlineTableName, offlineIdealState);
LOGGER.info("successfully added the table : " + offlineTableName + " to the cluster");
// lets add table configs
ZKMetadataProvider.setOfflineTableConfig(_propertyStore, offlineTableName, AbstractTableConfig.toZnRecord(config));
_propertyStore.create(ZKMetadataProvider.constructPropertyStorePathForResource(offlineTableName), new ZNRecord(offlineTableName), AccessOption.PERSISTENT);
break;
case REALTIME:
final String realtimeTableName = config.getTableName();
// lets add table configs
ZKMetadataProvider.setRealtimeTableConfig(_propertyStore, realtimeTableName, AbstractTableConfig.toZnRecord(config));
/*
* PinotRealtimeSegmentManager sets up watches on table and segment path. When a table gets created,
* it expects the INSTANCE path in propertystore to be set up so that it can get the kafka group ID and
* create (high-level consumer) segments for that table.
* So, we need to set up the instance first, before adding the table resource for HLC new table creation.
*
* For low-level consumers, the order is to create the resource first, and set up the propertystore with segments
* and then tweak the idealstate to add those segments.
*
* We also need to support the case when a high-level consumer already exists for a table and we are adding
* the low-level consumers.
*/
IndexingConfig indexingConfig = config.getIndexingConfig();
ensureRealtimeClusterIsSetUp(config, realtimeTableName, indexingConfig);
LOGGER.info("Successfully added or updated the table {} ", realtimeTableName);
break;
default:
throw new RuntimeException("UnSupported table type");
}
handleBrokerResource(config);
}
use of com.linkedin.pinot.common.config.SegmentsValidationAndRetentionConfig in project pinot by linkedin.
the class TableRetentionValidator method run.
public void run() throws Exception {
// Get all resources in cluster
List<String> resourcesInCluster = _helixAdmin.getResourcesInCluster(_clusterName);
for (String tableName : resourcesInCluster) {
// Skip non-table resources
if (!tableName.endsWith("_OFFLINE") && !tableName.endsWith("_REALTIME")) {
continue;
}
// Skip tables that do not match the defined name pattern
if (_tableNamePattern != null && !tableName.matches(_tableNamePattern)) {
continue;
}
// Get the retention config
SegmentsValidationAndRetentionConfig retentionConfig = getTableConfig(tableName).getValidationConfig();
if (retentionConfig == null) {
LOGGER.error("Table: {}, \"segmentsConfig\" field is missing in table config", tableName);
continue;
}
String segmentPushType = retentionConfig.getSegmentPushType();
if (segmentPushType == null) {
LOGGER.error("Table: {}, null push type", tableName);
continue;
} else if (segmentPushType.equalsIgnoreCase("REFRESH")) {
continue;
} else if (!segmentPushType.equalsIgnoreCase("APPEND")) {
LOGGER.error("Table: {}, invalid push type: {}", tableName, segmentPushType);
continue;
}
// APPEND use case
// Get time unit
String timeUnitString = retentionConfig.getRetentionTimeUnit();
TimeUnit timeUnit;
try {
timeUnit = TimeUnit.valueOf(timeUnitString.toUpperCase());
} catch (Exception e) {
LOGGER.error("Table: {}, invalid time unit: {}", tableName, timeUnitString);
continue;
}
// Get time duration in days
String timeValueString = retentionConfig.getRetentionTimeValue();
long durationInDays;
try {
durationInDays = timeUnit.toDays(Long.valueOf(timeValueString));
} catch (Exception e) {
LOGGER.error("Table: {}, invalid time value: {}", tableName, timeValueString);
continue;
}
if (durationInDays <= 0) {
LOGGER.error("Table: {}, invalid retention duration in days: {}", tableName, durationInDays);
continue;
}
if (durationInDays > _durationInDaysThreshold) {
LOGGER.warn("Table: {}, retention duration in days is too large: {}", tableName, durationInDays);
}
// Skip segments metadata check for realtime tables
if (tableName.endsWith("REALTIME")) {
continue;
}
// Check segments metadata (only for offline tables)
List<String> segmentNames = getSegmentNames(tableName);
if (segmentNames == null || segmentNames.isEmpty()) {
LOGGER.warn("Table: {}, no segment metadata in property store", tableName);
continue;
}
List<String> errorMessages = new ArrayList<>();
for (String segmentName : segmentNames) {
OfflineSegmentZKMetadata offlineSegmentMetadata = getOfflineSegmentMetadata(tableName, segmentName);
TimeUnit segmentTimeUnit = offlineSegmentMetadata.getTimeUnit();
if (segmentTimeUnit == null) {
errorMessages.add("Segment: " + segmentName + " has null time unit");
continue;
}
long startTimeInMillis = segmentTimeUnit.toMillis(offlineSegmentMetadata.getStartTime());
if (!TimeUtils.timeValueInValidRange(startTimeInMillis)) {
errorMessages.add("Segment: " + segmentName + " has invalid start time in millis: " + startTimeInMillis);
}
long endTimeInMillis = segmentTimeUnit.toMillis(offlineSegmentMetadata.getEndTime());
if (!TimeUtils.timeValueInValidRange(endTimeInMillis)) {
errorMessages.add("Segment: " + segmentName + " has invalid end time in millis: " + endTimeInMillis);
}
}
if (!errorMessages.isEmpty()) {
LOGGER.error("Table: {}, invalid segments: {}", tableName, errorMessages);
}
}
}
use of com.linkedin.pinot.common.config.SegmentsValidationAndRetentionConfig in project pinot by linkedin.
the class RetentionManager method updateDeletionStrategyForRealtimeTable.
/**
* Update deletion strategy for realtime table.
* <ul>
* <li>Keep the current deletion strategy when failed to get a valid retention time</li>
* <li>Update the deletion strategy when valid retention time is set.</li>
* </ul>
* The reason for this is that we don't allow realtime table without deletion strategy.
*/
private void updateDeletionStrategyForRealtimeTable(String realtimeTableName) {
try {
AbstractTableConfig realtimeTableConfig = ZKMetadataProvider.getRealtimeTableConfig(_pinotHelixResourceManager.getPropertyStore(), realtimeTableName);
assert realtimeTableConfig != null;
SegmentsValidationAndRetentionConfig validationConfig = realtimeTableConfig.getValidationConfig();
TimeRetentionStrategy timeRetentionStrategy = new TimeRetentionStrategy(validationConfig.getRetentionTimeUnit(), validationConfig.getRetentionTimeValue());
_tableDeletionStrategy.put(realtimeTableName, timeRetentionStrategy);
} catch (Exception e) {
LOGGER.error("Caught exception while updating deletion strategy, skip updating deletion strategy for table: {}.", realtimeTableName, e);
}
}
use of com.linkedin.pinot.common.config.SegmentsValidationAndRetentionConfig in project pinot by linkedin.
the class PinotLLCRealtimeSegmentManagerTest method makeTableConfig.
// Make a tableconfig that returns the topic name and nReplicas per partition as we need it.
private AbstractTableConfig makeTableConfig(int nReplicas, String topic) {
AbstractTableConfig mockTableConfig = mock(AbstractTableConfig.class);
SegmentsValidationAndRetentionConfig mockValidationConfig = mock(SegmentsValidationAndRetentionConfig.class);
when(mockValidationConfig.getReplicasPerPartition()).thenReturn(Integer.toString(nReplicas));
when(mockTableConfig.getValidationConfig()).thenReturn(mockValidationConfig);
Map<String, String> streamConfigMap = new HashMap<>(1);
String consumerTypesCsv = streamConfigMap.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.CONSUMER_TYPE), "simple");
streamConfigMap.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.TOPIC_NAME), topic);
IndexingConfig mockIndexConfig = mock(IndexingConfig.class);
when(mockIndexConfig.getStreamConfigs()).thenReturn(streamConfigMap);
when(mockTableConfig.getIndexingConfig()).thenReturn(mockIndexConfig);
TenantConfig mockTenantConfig = mock(TenantConfig.class);
when(mockTenantConfig.getServer()).thenReturn("freeTenant");
when(mockTableConfig.getTenantConfig()).thenReturn(mockTenantConfig);
return mockTableConfig;
}
Aggregations