use of org.apache.carbondata.core.metadata.CarbonTableIdentifier in project carbondata by apache.
the class DataLoadExecutor method execute.
public void execute(CarbonLoadModel loadModel, String storeLocation, CarbonIterator<Object[]>[] inputIterators) throws Exception {
AbstractDataLoadProcessorStep loadProcessorStep = null;
try {
loadProcessorStep = new DataLoadProcessBuilder().build(loadModel, storeLocation, inputIterators);
// 1. initialize
loadProcessorStep.initialize();
LOGGER.info("Data Loading is started for table " + loadModel.getTableName());
// 2. execute the step
loadProcessorStep.execute();
} catch (CarbonDataLoadingException e) {
throw e;
} catch (Exception e) {
LOGGER.error(e, "Data Loading failed for table " + loadModel.getTableName());
throw new CarbonDataLoadingException("Data Loading failed for table " + loadModel.getTableName(), e);
} finally {
if (loadProcessorStep != null) {
// 3. Close the step
loadProcessorStep.close();
}
}
String key = new CarbonTableIdentifier(loadModel.getDatabaseName(), loadModel.getTableName(), null).getBadRecordLoggerKey();
if (null != BadRecordsLogger.hasBadRecord(key)) {
LOGGER.error("Data Load is partially success for table " + loadModel.getTableName());
throw new BadRecordFoundException("Bad records found during load");
} else {
LOGGER.info("Data loading is successful for table " + loadModel.getTableName());
}
}
use of org.apache.carbondata.core.metadata.CarbonTableIdentifier in project carbondata by apache.
the class DeleteLoadFolders method getSegmentPath.
/**
* returns segment path
*
* @param dbName
* @param tableName
* @param storeLocation
* @param partitionId
* @param oneLoad
* @return
*/
private static String getSegmentPath(String dbName, String tableName, String storeLocation, int partitionId, LoadMetadataDetails oneLoad) {
CarbonTablePath carbon = new CarbonStorePath(storeLocation).getCarbonTablePath(new CarbonTableIdentifier(dbName, tableName, ""));
String segmentId = oneLoad.getLoadName();
return carbon.getCarbonDataDirectoryPath("" + partitionId, segmentId);
}
use of org.apache.carbondata.core.metadata.CarbonTableIdentifier in project carbondata by apache.
the class CarbonDataMergerUtil method identifySegmentsToBeMergedBasedOnSize.
/**
* Identify the segments to be merged based on the Size in case of Major compaction.
*
* @param compactionSize
* @param listOfSegmentsAfterPreserve
* @param carbonLoadModel
* @param storeLocation
* @return
*/
private static List<LoadMetadataDetails> identifySegmentsToBeMergedBasedOnSize(long compactionSize, List<LoadMetadataDetails> listOfSegmentsAfterPreserve, CarbonLoadModel carbonLoadModel, String storeLocation) {
List<LoadMetadataDetails> segmentsToBeMerged = new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
CarbonTableIdentifier tableIdentifier = carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().getCarbonTableIdentifier();
// total length
long totalLength = 0;
// check size of each segment , sum it up across partitions
for (LoadMetadataDetails segment : listOfSegmentsAfterPreserve) {
String segId = segment.getLoadName();
// variable to store one segment size across partition.
long sizeOfOneSegmentAcrossPartition = getSizeOfSegment(storeLocation, tableIdentifier, segId);
// if size of a segment is greater than the Major compaction size. then ignore it.
if (sizeOfOneSegmentAcrossPartition > (compactionSize * 1024 * 1024)) {
// if already 2 segments have been found for merging then stop scan here and merge.
if (segmentsToBeMerged.size() > 1) {
break;
} else {
// if only one segment is found then remove the earlier one in list.
// reset the total length to 0.
segmentsToBeMerged = new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
totalLength = 0;
continue;
}
}
totalLength += sizeOfOneSegmentAcrossPartition;
// in case of major compaction the size doesnt matter. all the segments will be merged.
if (totalLength < (compactionSize * 1024 * 1024)) {
segmentsToBeMerged.add(segment);
} else {
// if already 2 segments have been found for merging then stop scan here and merge.
if (segmentsToBeMerged.size() > 1) {
break;
} else {
// if only one segment is found then remove the earlier one in list and put this.
// reset the total length to the current identified segment.
segmentsToBeMerged = new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
segmentsToBeMerged.add(segment);
totalLength = sizeOfOneSegmentAcrossPartition;
}
}
}
return segmentsToBeMerged;
}
use of org.apache.carbondata.core.metadata.CarbonTableIdentifier in project carbondata by apache.
the class CarbonTable method loadCarbonTable.
/**
* @param tableInfo
*/
public void loadCarbonTable(TableInfo tableInfo) {
this.blockSize = getTableBlockSizeInMB(tableInfo);
this.tableLastUpdatedTime = tableInfo.getLastUpdatedTime();
this.tableUniqueName = tableInfo.getTableUniqueName();
this.metaDataFilepath = tableInfo.getMetaDataFilepath();
//setting unique table identifier
CarbonTableIdentifier carbontableIdentifier = new CarbonTableIdentifier(tableInfo.getDatabaseName(), tableInfo.getFactTable().getTableName(), tableInfo.getFactTable().getTableId());
this.absoluteTableIdentifier = new AbsoluteTableIdentifier(tableInfo.getStorePath(), carbontableIdentifier);
fillDimensionsAndMeasuresForTables(tableInfo.getFactTable());
fillCreateOrderColumn(tableInfo.getFactTable().getTableName());
List<TableSchema> aggregateTableList = tableInfo.getAggregateTableList();
for (TableSchema aggTable : aggregateTableList) {
this.aggregateTablesName.add(aggTable.getTableName());
fillDimensionsAndMeasuresForTables(aggTable);
tableBucketMap.put(aggTable.getTableName(), aggTable.getBucketingInfo());
tablePartitionMap.put(aggTable.getTableName(), aggTable.getPartitionInfo());
}
tableBucketMap.put(tableInfo.getFactTable().getTableName(), tableInfo.getFactTable().getBucketingInfo());
tablePartitionMap.put(tableInfo.getFactTable().getTableName(), tableInfo.getFactTable().getPartitionInfo());
}
use of org.apache.carbondata.core.metadata.CarbonTableIdentifier in project carbondata by apache.
the class SegmentStatusManager method updateDeletionStatus.
/**
* updates deletion status
*
* @param loadIds
* @param tableFolderPath
* @return
*/
public static List<String> updateDeletionStatus(AbsoluteTableIdentifier identifier, List<String> loadIds, String tableFolderPath) throws Exception {
CarbonTableIdentifier carbonTableIdentifier = identifier.getCarbonTableIdentifier();
ICarbonLock carbonDeleteSegmentLock = CarbonLockFactory.getCarbonLockObj(carbonTableIdentifier, LockUsage.DELETE_SEGMENT_LOCK);
ICarbonLock carbonTableStatusLock = CarbonLockFactory.getCarbonLockObj(carbonTableIdentifier, LockUsage.TABLE_STATUS_LOCK);
String tableDetails = carbonTableIdentifier.getDatabaseName() + "." + carbonTableIdentifier.getTableName();
List<String> invalidLoadIds = new ArrayList<String>(0);
try {
if (carbonDeleteSegmentLock.lockWithRetries()) {
LOG.info("Delete segment lock has been successfully acquired");
CarbonTablePath carbonTablePath = CarbonStorePath.getCarbonTablePath(identifier.getStorePath(), identifier.getCarbonTableIdentifier());
String dataLoadLocation = carbonTablePath.getTableStatusFilePath();
LoadMetadataDetails[] listOfLoadFolderDetailsArray = null;
if (!FileFactory.isFileExist(dataLoadLocation, FileFactory.getFileType(dataLoadLocation))) {
// log error.
LOG.error("Load metadata file is not present.");
return loadIds;
}
// read existing metadata details in load metadata.
listOfLoadFolderDetailsArray = readLoadMetadata(tableFolderPath);
if (listOfLoadFolderDetailsArray != null && listOfLoadFolderDetailsArray.length != 0) {
updateDeletionStatus(loadIds, listOfLoadFolderDetailsArray, invalidLoadIds);
if (invalidLoadIds.isEmpty()) {
// All or None , if anything fails then dont write
if (carbonTableStatusLock.lockWithRetries()) {
LOG.info("Table status lock has been successfully acquired");
// To handle concurrency scenarios, always take latest metadata before writing
// into status file.
LoadMetadataDetails[] latestLoadMetadataDetails = readLoadMetadata(tableFolderPath);
updateLatestTableStatusDetails(listOfLoadFolderDetailsArray, latestLoadMetadataDetails);
writeLoadDetailsIntoFile(dataLoadLocation, listOfLoadFolderDetailsArray);
} else {
String errorMsg = "Delete segment by id is failed for " + tableDetails + ". Not able to acquire the table status lock due to other operation running " + "in the background.";
LOG.audit(errorMsg);
LOG.error(errorMsg);
throw new Exception(errorMsg + " Please try after some time.");
}
} else {
return invalidLoadIds;
}
} else {
LOG.audit("Delete segment by Id is failed. No matching segment id found.");
return loadIds;
}
} else {
String errorMsg = "Delete segment by id is failed for " + tableDetails + ". Not able to acquire the delete segment lock due to another delete " + "operation is running in the background.";
LOG.audit(errorMsg);
LOG.error(errorMsg);
throw new Exception(errorMsg + " Please try after some time.");
}
} catch (IOException e) {
LOG.error("IOException" + e.getMessage());
throw e;
} finally {
CarbonLockUtil.fileUnlock(carbonTableStatusLock, LockUsage.TABLE_STATUS_LOCK);
CarbonLockUtil.fileUnlock(carbonDeleteSegmentLock, LockUsage.DELETE_SEGMENT_LOCK);
}
return invalidLoadIds;
}
Aggregations