use of org.apache.carbondata.core.index.Segment in project carbondata by apache.
the class SegmentFileStore method commitDropPartitions.
/**
* Update the table status file with the dropped partitions information
*
* @param carbonTable
* @param uniqueId
* @param toBeUpdatedSegments
* @param toBeDeleteSegments
* @throws IOException
*/
public static void commitDropPartitions(CarbonTable carbonTable, String uniqueId, List<String> toBeUpdatedSegments, List<String> toBeDeleteSegments, String uuid) throws IOException {
if (toBeDeleteSegments.size() > 0 || toBeUpdatedSegments.size() > 0) {
Set<Segment> segmentSet = new HashSet<>(new SegmentStatusManager(carbonTable.getAbsoluteTableIdentifier()).getValidAndInvalidSegments(carbonTable.isMV()).getValidSegments());
CarbonUpdateUtil.updateTableMetadataStatus(segmentSet, carbonTable, uniqueId, true, false, Segment.toSegmentList(toBeDeleteSegments, null), Segment.toSegmentList(toBeUpdatedSegments, null), uuid);
}
}
use of org.apache.carbondata.core.index.Segment in project carbondata by apache.
the class SegmentWrapper method readFields.
@Override
public void readFields(DataInput dataInput) throws IOException {
int size = dataInput.readInt();
segments = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
Segment segment = new Segment();
segment.readFields(dataInput);
segments.add(segment);
}
}
use of org.apache.carbondata.core.index.Segment in project carbondata by apache.
the class DeleteLoadFolders method physicalFactAndMeasureMetadataDeletion.
/**
* Delete the invalid data physically from table.
* @param carbonTable table
* @param loadDetails Load details which need clean up
* @param isForceDelete Force delete Compacted and MFD segments. it will empty the trash folder
* @param specs Partition specs
* @param currLoadDetails Current table status load details which are required for update manager.
*/
private static void physicalFactAndMeasureMetadataDeletion(CarbonTable carbonTable, LoadMetadataDetails[] loadDetails, boolean isForceDelete, List<PartitionSpec> specs, LoadMetadataDetails[] currLoadDetails, boolean cleanStaleInProgress, Set<String> loadsToDelete) {
List<TableIndex> indexes = new ArrayList<>();
try {
for (TableIndex index : IndexStoreManager.getInstance().getAllCGAndFGIndexes(carbonTable)) {
if (index.getIndexSchema().isIndex()) {
indexes.add(index);
}
}
} catch (IOException e) {
LOGGER.warn(String.format("Failed to get indexes for %s.%s, therefore the index files could not be cleaned.", carbonTable.getAbsoluteTableIdentifier().getDatabaseName(), carbonTable.getAbsoluteTableIdentifier().getTableName()));
}
SegmentUpdateStatusManager updateStatusManager = new SegmentUpdateStatusManager(carbonTable, currLoadDetails);
for (final LoadMetadataDetails oneLoad : loadDetails) {
if (loadsToDelete.contains(oneLoad.getLoadName())) {
try {
if (oneLoad.getSegmentFile() != null) {
String tablePath = carbonTable.getAbsoluteTableIdentifier().getTablePath();
Segment segment = new Segment(oneLoad.getLoadName(), oneLoad.getSegmentFile());
// No need to delete physical data for external segments.
if (oneLoad.getPath() == null || oneLoad.getPath().equalsIgnoreCase("NA")) {
SegmentFileStore.deleteSegment(tablePath, segment, specs, updateStatusManager);
}
// delete segment files for all segments.
SegmentFileStore.deleteSegmentFile(tablePath, segment);
} else {
String path = getSegmentPath(carbonTable.getAbsoluteTableIdentifier(), oneLoad);
boolean status = false;
if (FileFactory.isFileExist(path)) {
CarbonFile file = FileFactory.getCarbonFile(path);
CarbonFile[] filesToBeDeleted = file.listFiles(new CarbonFileFilter() {
@Override
public boolean accept(CarbonFile file) {
return (CarbonTablePath.isCarbonDataFile(file.getName()) || CarbonTablePath.isCarbonIndexFile(file.getName()));
}
});
// entry in metadata.
if (filesToBeDeleted.length == 0) {
status = true;
} else {
for (CarbonFile eachFile : filesToBeDeleted) {
if (!eachFile.delete()) {
LOGGER.warn("Unable to delete the file as per delete command " + eachFile.getAbsolutePath());
status = false;
} else {
status = true;
}
}
}
// need to delete the complete folder.
if (status) {
if (!file.delete()) {
LOGGER.warn("Unable to delete the folder as per delete command " + file.getAbsolutePath());
}
}
}
}
List<Segment> segments = new ArrayList<>(1);
for (TableIndex index : indexes) {
segments.clear();
segments.add(new Segment(oneLoad.getLoadName()));
index.deleteIndexData(segments);
}
} catch (Exception e) {
LOGGER.warn("Unable to delete the file as per delete command " + oneLoad.getLoadName());
}
}
}
}
use of org.apache.carbondata.core.index.Segment in project carbondata by apache.
the class CarbonLoaderUtil method addIndexSizeIntoMetaEntry.
public static void addIndexSizeIntoMetaEntry(LoadMetadataDetails loadMetadataDetails, String segmentId, CarbonTable carbonTable) throws IOException {
Segment segment = new Segment(segmentId, loadMetadataDetails.getSegmentFile());
if (segment.getSegmentFileName() != null) {
SegmentFileStore fileStore = new SegmentFileStore(carbonTable.getTablePath(), segment.getSegmentFileName());
if (fileStore.getLocationMap() != null) {
fileStore.readIndexFiles(FileFactory.getConfiguration());
long carbonIndexSize = CarbonUtil.getCarbonIndexSize(fileStore, fileStore.getLocationMap());
loadMetadataDetails.setIndexSize(String.valueOf(carbonIndexSize));
}
}
}
use of org.apache.carbondata.core.index.Segment in project carbondata by apache.
the class CarbonLoaderUtil method recordNewLoadMetadata.
/**
* This API will write the load level metadata for the loadmanagement module inorder to
* manage the load and query execution management smoothly.
*
* @param newMetaEntry
* @param loadModel
* @param uuid
* @return boolean which determines whether status update is done or not.
* @throws IOException
*/
public static boolean recordNewLoadMetadata(LoadMetadataDetails newMetaEntry, CarbonLoadModel loadModel, boolean loadStartEntry, boolean insertOverwrite, String uuid, List<Segment> segmentsToBeDeleted, List<Segment> segmentFilesTobeUpdated, boolean isUpdateStatusRequired) throws IOException {
boolean status = false;
AbsoluteTableIdentifier identifier = loadModel.getCarbonDataLoadSchema().getCarbonTable().getAbsoluteTableIdentifier();
if (loadModel.isCarbonTransactionalTable()) {
String metadataPath = CarbonTablePath.getMetadataPath(identifier.getTablePath());
if (!FileFactory.isFileExist(metadataPath)) {
FileFactory.mkdirs(metadataPath);
}
}
String tableStatusPath = CarbonTablePath.getTableStatusFilePath(identifier.getTablePath());
SegmentStatusManager segmentStatusManager = new SegmentStatusManager(identifier);
ICarbonLock carbonLock = segmentStatusManager.getTableStatusLock();
int retryCount = CarbonLockUtil.getLockProperty(CarbonCommonConstants.NUMBER_OF_TRIES_FOR_CONCURRENT_LOCK, CarbonCommonConstants.NUMBER_OF_TRIES_FOR_CONCURRENT_LOCK_DEFAULT);
int maxTimeout = CarbonLockUtil.getLockProperty(CarbonCommonConstants.MAX_TIMEOUT_FOR_CONCURRENT_LOCK, CarbonCommonConstants.MAX_TIMEOUT_FOR_CONCURRENT_LOCK_DEFAULT);
try {
if (carbonLock.lockWithRetries(retryCount, maxTimeout)) {
LOGGER.info("Acquired lock for table" + loadModel.getDatabaseName() + "." + loadModel.getTableName() + " for table status updation");
LoadMetadataDetails[] listOfLoadFolderDetailsArray = SegmentStatusManager.readLoadMetadata(CarbonTablePath.getMetadataPath(identifier.getTablePath()));
List<LoadMetadataDetails> listOfLoadFolderDetails = new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
Collections.addAll(listOfLoadFolderDetails, listOfLoadFolderDetailsArray);
// create a new segment Id if load has just begun else add the already generated Id
if (loadStartEntry) {
String segmentId = String.valueOf(SegmentStatusManager.createNewSegmentId(listOfLoadFolderDetailsArray));
loadModel.setLoadMetadataDetails(listOfLoadFolderDetails);
LoadMetadataDetails entryTobeRemoved = null;
if (loadModel.getCarbonDataLoadSchema().getCarbonTable().isMV() && !loadModel.getSegmentId().isEmpty()) {
for (LoadMetadataDetails entry : listOfLoadFolderDetails) {
if (entry.getLoadName().equalsIgnoreCase(loadModel.getSegmentId())) {
newMetaEntry.setLoadName(loadModel.getSegmentId());
newMetaEntry.setExtraInfo(entry.getExtraInfo());
entryTobeRemoved = entry;
}
}
} else {
if (isUpdateStatusRequired && segmentId.equalsIgnoreCase("0") && !StringUtils.isBlank(uuid)) {
newMetaEntry.setUpdateStatusFileName(CarbonUpdateUtil.getUpdateStatusFileName(uuid));
}
newMetaEntry.setLoadName(segmentId);
loadModel.setSegmentId(segmentId);
}
listOfLoadFolderDetails.remove(entryTobeRemoved);
// is triggered
for (LoadMetadataDetails entry : listOfLoadFolderDetails) {
if (entry.getSegmentStatus() == SegmentStatus.INSERT_OVERWRITE_IN_PROGRESS && SegmentStatusManager.isLoadInProgress(identifier, entry.getLoadName())) {
throw new RuntimeException("Already insert overwrite is in progress");
} else if (newMetaEntry.getSegmentStatus() == SegmentStatus.INSERT_OVERWRITE_IN_PROGRESS && entry.getSegmentStatus() == SegmentStatus.INSERT_IN_PROGRESS && SegmentStatusManager.isLoadInProgress(identifier, entry.getLoadName())) {
throw new RuntimeException("Already insert into or load is in progress");
}
}
listOfLoadFolderDetails.add(newMetaEntry);
} else {
newMetaEntry.setLoadName(String.valueOf(loadModel.getSegmentId()));
// existing entry needs to be overwritten as the entry will exist with some
// intermediate status
int indexToOverwriteNewMetaEntry = 0;
boolean found = false;
for (LoadMetadataDetails entry : listOfLoadFolderDetails) {
if (entry.getLoadName().equals(newMetaEntry.getLoadName()) && entry.getLoadStartTime() == newMetaEntry.getLoadStartTime()) {
newMetaEntry.setExtraInfo(entry.getExtraInfo());
found = true;
break;
}
indexToOverwriteNewMetaEntry++;
}
if (insertOverwrite) {
for (LoadMetadataDetails entry : listOfLoadFolderDetails) {
if (entry.getSegmentStatus() != SegmentStatus.INSERT_OVERWRITE_IN_PROGRESS) {
entry.setSegmentStatus(SegmentStatus.MARKED_FOR_DELETE);
}
}
}
if (!found) {
LOGGER.error("Entry not found to update " + newMetaEntry + " From list :: " + listOfLoadFolderDetails);
throw new IOException("Entry not found to update in the table status file");
}
listOfLoadFolderDetails.set(indexToOverwriteNewMetaEntry, newMetaEntry);
}
for (LoadMetadataDetails detail : listOfLoadFolderDetails) {
// if the segments is in the list of marked for delete then update the status.
if (segmentsToBeDeleted.contains(new Segment(detail.getLoadName()))) {
detail.setSegmentStatus(SegmentStatus.MARKED_FOR_DELETE);
} else if (segmentFilesTobeUpdated.contains(Segment.toSegment(detail.getLoadName(), null))) {
detail.setSegmentFile(detail.getLoadName() + "_" + newMetaEntry.getUpdateStatusFileName() + CarbonTablePath.SEGMENT_EXT);
} else if (isUpdateStatusRequired && detail.getLoadName().equalsIgnoreCase("0") && !StringUtils.isBlank(uuid)) {
detail.setUpdateStatusFileName(CarbonUpdateUtil.getUpdateStatusFileName(uuid));
}
}
SegmentStatusManager.writeLoadDetailsIntoFile(tableStatusPath, listOfLoadFolderDetails.toArray(new LoadMetadataDetails[0]));
status = true;
} else {
LOGGER.error("Not able to acquire the lock for Table status updation for table " + loadModel.getDatabaseName() + "." + loadModel.getTableName());
}
} finally {
if (carbonLock.unlock()) {
LOGGER.info("Table unlocked successfully after table status updation" + loadModel.getDatabaseName() + "." + loadModel.getTableName());
} else {
LOGGER.error("Unable to unlock Table lock for table" + loadModel.getDatabaseName() + "." + loadModel.getTableName() + " during table status updation");
}
}
return status;
}
Aggregations