use of org.apache.carbondata.core.indexstore.PartitionSpec in project carbondata by apache.
the class CarbonOutputCommitter method overwritePartitions.
/**
* Overwrite the partitions in case of overwrite query. It just updates the partition map files
* of all segment files.
*
* @param loadModel
* @return
* @throws IOException
*/
private String overwritePartitions(CarbonLoadModel loadModel, LoadMetadataDetails newMetaEntry) throws IOException {
CarbonTable table = loadModel.getCarbonDataLoadSchema().getCarbonTable();
SegmentFileStore fileStore = new SegmentFileStore(loadModel.getTablePath(), loadModel.getSegmentId() + "_" + loadModel.getFactTimeStamp() + CarbonTablePath.SEGMENT_EXT);
List<PartitionSpec> partitionSpecs = fileStore.getPartitionSpecs();
if (partitionSpecs != null && partitionSpecs.size() > 0) {
List<Segment> validSegments = new SegmentStatusManager(table.getAbsoluteTableIdentifier()).getValidAndInvalidSegments().getValidSegments();
String uniqueId = String.valueOf(System.currentTimeMillis());
List<String> tobeUpdatedSegs = new ArrayList<>();
List<String> tobeDeletedSegs = new ArrayList<>();
// First drop the partitions from partition mapper files of each segment
for (Segment segment : validSegments) {
new SegmentFileStore(table.getTablePath(), segment.getSegmentFileName()).dropPartitions(segment, partitionSpecs, uniqueId, tobeDeletedSegs, tobeUpdatedSegs);
}
newMetaEntry.setUpdateStatusFileName(uniqueId);
// Commit the removed partitions in carbon store.
CarbonLoaderUtil.recordNewLoadMetadata(newMetaEntry, loadModel, false, false, "", Segment.toSegmentList(tobeDeletedSegs), Segment.toSegmentList(tobeUpdatedSegs));
return uniqueId;
}
return null;
}
use of org.apache.carbondata.core.indexstore.PartitionSpec in project carbondata by apache.
the class SegmentFileStore method dropPartitions.
/**
* Drops the partition related files from the segment file of the segment and writes
* to a new file. First iterator over segment file and check the path it needs to be dropped.
* And update the status with delete if it found.
*
* @param uniqueId
* @throws IOException
*/
public void dropPartitions(Segment segment, List<PartitionSpec> partitionSpecs, String uniqueId, List<String> toBeDeletedSegments, List<String> toBeUpdatedSegments) throws IOException {
readSegment(tablePath, segment.getSegmentFileName());
boolean updateSegment = false;
for (Map.Entry<String, FolderDetails> entry : segmentFile.getLocationMap().entrySet()) {
String location = entry.getKey();
if (entry.getValue().isRelative) {
location = tablePath + CarbonCommonConstants.FILE_SEPARATOR + location;
}
Path path = new Path(location);
// Update the status to delete if path equals
if (null != partitionSpecs) {
for (PartitionSpec spec : partitionSpecs) {
if (path.equals(spec.getLocation())) {
entry.getValue().setStatus(SegmentStatus.MARKED_FOR_DELETE.getMessage());
updateSegment = true;
break;
}
}
}
}
if (updateSegment) {
String writePath = CarbonTablePath.getSegmentFilesLocation(tablePath);
writePath = writePath + CarbonCommonConstants.FILE_SEPARATOR + SegmentFileStore.genSegmentFileName(segment.getSegmentNo(), String.valueOf(uniqueId)) + CarbonTablePath.SEGMENT_EXT;
writeSegmentFile(segmentFile, writePath);
}
// Check whether we can completly remove the segment.
boolean deleteSegment = true;
for (Map.Entry<String, FolderDetails> entry : segmentFile.getLocationMap().entrySet()) {
if (entry.getValue().getStatus().equals(SegmentStatus.SUCCESS.getMessage())) {
deleteSegment = false;
break;
}
}
if (deleteSegment) {
toBeDeletedSegments.add(segment.getSegmentNo());
}
if (updateSegment) {
toBeUpdatedSegments.add(segment.getSegmentNo());
}
}
use of org.apache.carbondata.core.indexstore.PartitionSpec in project carbondata by apache.
the class BlockletDataMap method prune.
@Override
public List<Blocklet> prune(FilterResolverIntf filterExp, SegmentProperties segmentProperties, List<PartitionSpec> partitions) {
if (unsafeMemoryDMStore.getRowCount() == 0) {
return new ArrayList<>();
}
// partitions are dropped so return empty list.
if (partitions != null) {
// First get the partitions which are stored inside datamap.
String[] fileDetails = getFileDetails();
// Check the exact match of partition information inside the stored partitions.
boolean found = false;
Path folderPath = new Path(fileDetails[0]);
for (PartitionSpec spec : partitions) {
if (folderPath.equals(spec.getLocation()) && isCorrectUUID(fileDetails, spec)) {
found = true;
break;
}
}
if (!found) {
return new ArrayList<>();
}
}
// Prune with filters if the partitions are existed in this datamap
return prune(filterExp, segmentProperties);
}
use of org.apache.carbondata.core.indexstore.PartitionSpec in project carbondata by apache.
the class SegmentFileStore method getPartitionSpecs.
/**
* Get the partition specs of the segment
* @param segmentId
* @param tablePath
* @return
* @throws IOException
*/
public static List<PartitionSpec> getPartitionSpecs(String segmentId, String tablePath) throws IOException {
LoadMetadataDetails segEntry = null;
LoadMetadataDetails[] details = SegmentStatusManager.readLoadMetadata(CarbonTablePath.getMetadataPath(tablePath));
for (LoadMetadataDetails entry : details) {
if (entry.getLoadName().equals(segmentId)) {
segEntry = entry;
break;
}
}
if (segEntry != null && segEntry.getSegmentFile() != null) {
SegmentFileStore fileStore = new SegmentFileStore(tablePath, segEntry.getSegmentFile());
List<PartitionSpec> partitionSpecs = fileStore.getPartitionSpecs();
for (PartitionSpec spec : partitionSpecs) {
spec.setUuid(segmentId + "_" + segEntry.getLoadStartTime());
}
return partitionSpecs;
}
return null;
}
use of org.apache.carbondata.core.indexstore.PartitionSpec in project carbondata by apache.
the class SegmentFileStore method getSegmentFileForPhysicalDataPartitions.
/**
* It provides segment file only for the partitions which has physical index files.
*
* @param partitionSpecs
*/
public static SegmentFile getSegmentFileForPhysicalDataPartitions(String tablePath, List<PartitionSpec> partitionSpecs) throws IOException {
SegmentFile segmentFile = null;
for (PartitionSpec spec : partitionSpecs) {
String location = spec.getLocation().toString();
CarbonFile carbonFile = FileFactory.getCarbonFile(location);
CarbonFile[] listFiles = carbonFile.listFiles(new CarbonFileFilter() {
@Override
public boolean accept(CarbonFile file) {
return CarbonTablePath.isCarbonIndexFile(file.getAbsolutePath());
}
});
if (listFiles != null && listFiles.length > 0) {
boolean isRelative = false;
if (location.startsWith(tablePath)) {
location = location.substring(tablePath.length(), location.length());
isRelative = true;
}
SegmentFile localSegmentFile = new SegmentFile();
FolderDetails folderDetails = new FolderDetails();
folderDetails.setRelative(isRelative);
folderDetails.setPartitions(spec.getPartitions());
folderDetails.setStatus(SegmentStatus.SUCCESS.getMessage());
for (CarbonFile file : listFiles) {
if (file.getName().endsWith(CarbonTablePath.MERGE_INDEX_FILE_EXT)) {
List<String> indexFiles = new SegmentIndexFileStore().getIndexFilesFromMergeFile(file.getAbsolutePath());
folderDetails.getFiles().addAll(indexFiles);
folderDetails.setMergeFileName(file.getName());
} else {
folderDetails.getFiles().add(file.getName());
}
}
localSegmentFile.addPath(location, folderDetails);
if (segmentFile == null) {
segmentFile = localSegmentFile;
} else {
segmentFile = segmentFile.merge(localSegmentFile);
}
}
}
return segmentFile;
}
Aggregations