use of org.apache.carbondata.core.index.dev.IndexFactory in project carbondata by apache.
the class IndexWriterListener method registerAllWriter.
/**
* register all index writer for specified table and segment
*/
public void registerAllWriter(CarbonTable carbonTable, String segmentId, String taskNo, SegmentProperties segmentProperties) {
// clear cache in executor side
IndexStoreManager.getInstance().clearIndex(carbonTable.getTableId());
List<TableIndex> tableIndices;
try {
tableIndices = IndexStoreManager.getInstance().getAllCGAndFGIndexes(carbonTable);
} catch (IOException e) {
LOG.error("Error while retrieving indexes", e);
throw new RuntimeException(e);
}
tblIdentifier = carbonTable.getCarbonTableIdentifier();
for (TableIndex tableIndex : tableIndices) {
// will rebuild the index manually
if (!tableIndex.getIndexSchema().isLazy() && !tableIndex.getIndexSchema().getProviderName().equals(IndexType.SI.getIndexProviderName())) {
IndexFactory factory = tableIndex.getIndexFactory();
register(factory, segmentId, taskNo, segmentProperties);
}
}
}
use of org.apache.carbondata.core.index.dev.IndexFactory in project carbondata by apache.
the class BlockletIndexInputFormat method getSplits.
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
IndexFactory indexFactory = IndexStoreManager.getInstance().getDefaultIndex(table).getIndexFactory();
CacheableIndex factory = (CacheableIndex) indexFactory;
List<IndexInputSplit> validDistributables = factory.getAllUncached(validSegments, indexExprWrapper);
if (!validSegments.isEmpty()) {
this.readCommittedScope = validSegments.get(0).getReadCommittedScope();
}
CarbonBlockLoaderHelper instance = CarbonBlockLoaderHelper.getInstance();
int distributableSize = validDistributables.size();
List<InputSplit> inputSplits = new ArrayList<>(distributableSize);
keys = new HashSet<>();
Iterator<IndexInputSplit> iterator = validDistributables.iterator();
while (iterator.hasNext()) {
BlockletIndexInputSplit next = (BlockletIndexInputSplit) iterator.next();
String key = next.getSegmentPath();
if (instance.checkAlreadySubmittedBlock(table.getAbsoluteTableIdentifier(), key)) {
inputSplits.add(next);
keys.add(key);
}
}
int sizeOfDistToBeLoaded = inputSplits.size();
LOGGER.info("Submitted blocks " + sizeOfDistToBeLoaded + ", " + distributableSize + " . Rest already considered for load in other job.");
return inputSplits;
}
Aggregations