use of com.linkedin.pinot.common.utils.helix.PinotHelixPropertyStoreZnRecordProvider in project pinot by linkedin.
the class PinotHelixResourceManager method getSchema.
/**
*
* @param schemaName
* @return
* @throws JsonParseException
* @throws JsonMappingException
* @throws IOException
*/
@Nullable
public Schema getSchema(String schemaName) throws JsonParseException, JsonMappingException, IOException {
PinotHelixPropertyStoreZnRecordProvider propertyStoreHelper = PinotHelixPropertyStoreZnRecordProvider.forSchema(_propertyStore);
ZNRecord record = propertyStoreHelper.get(schemaName);
return record != null ? SchemaUtils.fromZNRecord(record) : null;
}
use of com.linkedin.pinot.common.utils.helix.PinotHelixPropertyStoreZnRecordProvider in project pinot by linkedin.
the class SegmentFetcherAndLoader method getSchema.
private Schema getSchema(String schemaName) throws IOException {
PinotHelixPropertyStoreZnRecordProvider propertyStoreHelper = PinotHelixPropertyStoreZnRecordProvider.forSchema(_propertyStore);
ZNRecord record = propertyStoreHelper.get(schemaName);
if (record != null) {
LOGGER.info("Found schema: {}", schemaName);
return SchemaUtils.fromZNRecord(record);
} else {
return null;
}
}
use of com.linkedin.pinot.common.utils.helix.PinotHelixPropertyStoreZnRecordProvider in project pinot by linkedin.
the class PinotHelixResourceManager method addOrUpdateSchema.
/**
* API 2.0
*/
/**
* Schema APIs
*/
/**
*
* @param schema
* @throws IllegalArgumentException
* @throws IllegalAccessException
*/
public void addOrUpdateSchema(Schema schema) throws IllegalArgumentException, IllegalAccessException {
ZNRecord record = SchemaUtils.toZNRecord(schema);
String name = schema.getSchemaName();
PinotHelixPropertyStoreZnRecordProvider propertyStoreHelper = PinotHelixPropertyStoreZnRecordProvider.forSchema(_propertyStore);
propertyStoreHelper.set(name, record);
}
use of com.linkedin.pinot.common.utils.helix.PinotHelixPropertyStoreZnRecordProvider in project pinot by linkedin.
the class RealtimeTableDataManager method addSegment.
/*
* This call comes in one of two ways:
* For HL Segments:
* - We are being directed by helix to own up all the segments that we committed and are still in retention. In this case
* we treat it exactly like how OfflineTableDataManager would -- wrap it into an OfflineSegmentDataManager, and put it
* in the map.
* - We are being asked to own up a new realtime segment. In this case, we wrap the segment with a RealTimeSegmentDataManager
* (that kicks off Kafka consumption). When the segment is committed we get notified via the notifySegmentCommitted call, at
* which time we replace the segment with the OfflineSegmentDataManager
* For LL Segments:
* - We are being asked to start consuming from a kafka partition.
* - We did not know about the segment and are being asked to download and own the segment (re-balancing, or
* replacing a realtime server with a fresh one, maybe). We need to look at segment metadata and decide whether
* to start consuming or download the segment.
*/
@Override
public void addSegment(ZkHelixPropertyStore<ZNRecord> propertyStore, AbstractTableConfig tableConfig, InstanceZKMetadata instanceZKMetadata, SegmentZKMetadata inputSegmentZKMetadata) throws Exception {
// TODO FIXME
// Hack. We get the _helixPropertyStore here and save it, knowing that we will get this addSegment call
// before the notifyCommitted call (that uses _helixPropertyStore)
this._helixPropertyStore = propertyStore;
final String segmentId = inputSegmentZKMetadata.getSegmentName();
final String tableName = inputSegmentZKMetadata.getTableName();
if (!(inputSegmentZKMetadata instanceof RealtimeSegmentZKMetadata)) {
LOGGER.warn("Got called with an unexpected instance object:{},table {}, segment {}", inputSegmentZKMetadata.getClass().getSimpleName(), tableName, segmentId);
return;
}
RealtimeSegmentZKMetadata segmentZKMetadata = (RealtimeSegmentZKMetadata) inputSegmentZKMetadata;
LOGGER.info("Attempting to add realtime segment {} for table {}", segmentId, tableName);
if (new File(_indexDir, segmentId).exists() && (segmentZKMetadata).getStatus() == Status.DONE) {
// segment already exists on file, and we have committed the realtime segment in ZK. Treat it like an offline segment
if (_segmentsMap.containsKey(segmentId)) {
LOGGER.warn("Got reload for segment already on disk {} table {}, have {}", segmentId, tableName, _segmentsMap.get(segmentId).getClass().getSimpleName());
return;
}
IndexSegment segment = ColumnarSegmentLoader.load(new File(_indexDir, segmentId), _readMode, _indexLoadingConfigMetadata);
addSegment(segment);
markSegmentAsLoaded(segmentId);
} else {
// on-disk segment next time
if (_segmentsMap.containsKey(segmentId)) {
LOGGER.warn("Got reload for segment not on disk {} table {}, have {}", segmentId, tableName, _segmentsMap.get(segmentId).getClass().getSimpleName());
return;
}
PinotHelixPropertyStoreZnRecordProvider propertyStoreHelper = PinotHelixPropertyStoreZnRecordProvider.forSchema(propertyStore);
ZNRecord record = propertyStoreHelper.get(tableConfig.getValidationConfig().getSchemaName());
LOGGER.info("Found schema {} ", tableConfig.getValidationConfig().getSchemaName());
Schema schema = SchemaUtils.fromZNRecord(record);
if (!isValid(schema, tableConfig.getIndexingConfig())) {
LOGGER.error("Not adding segment {}", segmentId);
throw new RuntimeException("Mismatching schema/table config for " + _tableName);
}
SegmentDataManager manager;
if (SegmentName.isHighLevelConsumerSegmentName(segmentId)) {
manager = new HLRealtimeSegmentDataManager(segmentZKMetadata, tableConfig, instanceZKMetadata, this, _indexDir.getAbsolutePath(), _readMode, SchemaUtils.fromZNRecord(record), _serverMetrics);
} else {
LLCRealtimeSegmentZKMetadata llcSegmentMetadata = (LLCRealtimeSegmentZKMetadata) segmentZKMetadata;
if (segmentZKMetadata.getStatus().equals(Status.DONE)) {
// TODO Remove code duplication here and in LLRealtimeSegmentDataManager
downloadAndReplaceSegment(segmentId, llcSegmentMetadata);
return;
}
manager = new LLRealtimeSegmentDataManager(segmentZKMetadata, tableConfig, instanceZKMetadata, this, _indexDir.getAbsolutePath(), SchemaUtils.fromZNRecord(record), _serverMetrics);
}
LOGGER.info("Initialize RealtimeSegmentDataManager - " + segmentId);
try {
_rwLock.writeLock().lock();
_segmentsMap.put(segmentId, manager);
} finally {
_rwLock.writeLock().unlock();
}
_loadingSegments.add(segmentId);
}
}
Aggregations