use of org.apache.carbondata.core.statusmanager.LoadMetadataDetails in project carbondata by apache.
the class CarbonDataMergerUtil method identifySegmentsToBeMergedBasedOnLoadedDate.
/**
* This method will return the list of loads which are loaded at the same interval.
* This property is configurable.
*
* @param listOfSegmentsBelowThresholdSize
* @return
*/
private static List<LoadMetadataDetails> identifySegmentsToBeMergedBasedOnLoadedDate(List<LoadMetadataDetails> listOfSegmentsBelowThresholdSize) {
List<LoadMetadataDetails> loadsOfSameDate = new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
long numberOfDaysAllowedToMerge = 0;
try {
numberOfDaysAllowedToMerge = Long.parseLong(CarbonProperties.getInstance().getProperty(CarbonCommonConstants.DAYS_ALLOWED_TO_COMPACT, CarbonCommonConstants.DEFAULT_DAYS_ALLOWED_TO_COMPACT));
if (numberOfDaysAllowedToMerge < 0 || numberOfDaysAllowedToMerge > 100) {
LOGGER.error("The specified value for property " + CarbonCommonConstants.DAYS_ALLOWED_TO_COMPACT + " is incorrect." + " Correct value should be in range of 0 -100. Taking the default value.");
numberOfDaysAllowedToMerge = Long.parseLong(CarbonCommonConstants.DEFAULT_DAYS_ALLOWED_TO_COMPACT);
}
} catch (NumberFormatException e) {
numberOfDaysAllowedToMerge = Long.parseLong(CarbonCommonConstants.DEFAULT_DAYS_ALLOWED_TO_COMPACT);
}
// if true then process loads according to the load date.
if (numberOfDaysAllowedToMerge > 0) {
// filter loads based on the loaded date
boolean first = true;
Date segDate1 = null;
SimpleDateFormat sdf = new SimpleDateFormat(CarbonCommonConstants.CARBON_TIMESTAMP);
for (LoadMetadataDetails segment : listOfSegmentsBelowThresholdSize) {
if (first) {
segDate1 = initializeFirstSegment(loadsOfSameDate, segment, sdf);
first = false;
continue;
}
long segmentDate = segment.getLoadStartTime();
Date segDate2 = null;
try {
segDate2 = sdf.parse(sdf.format(segmentDate));
} catch (ParseException e) {
LOGGER.error("Error while parsing segment start time" + e.getMessage());
}
if (isTwoDatesPresentInRequiredRange(segDate1, segDate2, numberOfDaysAllowedToMerge)) {
loadsOfSameDate.add(segment);
} else // then reset everything and continue search for loads.
if (loadsOfSameDate.size() < 2) {
loadsOfSameDate.clear();
// need to add the next segment as first and to check further
segDate1 = initializeFirstSegment(loadsOfSameDate, segment, sdf);
} else {
// case where a load is beyond merge date and there is at least 2 loads to merge.
break;
}
}
} else {
return listOfSegmentsBelowThresholdSize;
}
return loadsOfSameDate;
}
use of org.apache.carbondata.core.statusmanager.LoadMetadataDetails in project carbondata by apache.
the class CarbonUpdateUtil method updateTableMetadataStatus.
/**
*
* @param updatedSegmentsList
* @param table
* @param updatedTimeStamp
* @param isTimestampUpdationRequired
* @param segmentsToBeDeleted
* @return
*/
public static boolean updateTableMetadataStatus(Set<String> updatedSegmentsList, CarbonTable table, String updatedTimeStamp, boolean isTimestampUpdationRequired, List<String> segmentsToBeDeleted) {
boolean status = false;
String metaDataFilepath = table.getMetaDataFilepath();
AbsoluteTableIdentifier absoluteTableIdentifier = table.getAbsoluteTableIdentifier();
CarbonTablePath carbonTablePath = CarbonStorePath.getCarbonTablePath(absoluteTableIdentifier.getStorePath(), absoluteTableIdentifier.getCarbonTableIdentifier());
String tableStatusPath = carbonTablePath.getTableStatusFilePath();
SegmentStatusManager segmentStatusManager = new SegmentStatusManager(absoluteTableIdentifier);
ICarbonLock carbonLock = segmentStatusManager.getTableStatusLock();
boolean lockStatus = false;
try {
lockStatus = carbonLock.lockWithRetries();
if (lockStatus) {
LOGGER.info("Acquired lock for table" + table.getDatabaseName() + "." + table.getFactTableName() + " for table status updation");
LoadMetadataDetails[] listOfLoadFolderDetailsArray = segmentStatusManager.readLoadMetadata(metaDataFilepath);
for (LoadMetadataDetails loadMetadata : listOfLoadFolderDetailsArray) {
if (isTimestampUpdationRequired) {
// we are storing the link between the 2 status files in the segment 0 only.
if (loadMetadata.getLoadName().equalsIgnoreCase("0")) {
loadMetadata.setUpdateStatusFileName(CarbonUpdateUtil.getUpdateStatusFileName(updatedTimeStamp));
}
// if the segments is in the list of marked for delete then update the status.
if (segmentsToBeDeleted.contains(loadMetadata.getLoadName())) {
loadMetadata.setLoadStatus(CarbonCommonConstants.MARKED_FOR_DELETE);
loadMetadata.setModificationOrdeletionTimesStamp(Long.parseLong(updatedTimeStamp));
}
}
for (String segName : updatedSegmentsList) {
if (loadMetadata.getLoadName().equalsIgnoreCase(segName)) {
// String will come empty then no need to write into table status file.
if (isTimestampUpdationRequired) {
loadMetadata.setIsDeleted(CarbonCommonConstants.KEYWORD_TRUE);
// if in case of update flow.
if (loadMetadata.getUpdateDeltaStartTimestamp().isEmpty()) {
// this means for first time it is getting updated .
loadMetadata.setUpdateDeltaStartTimestamp(updatedTimeStamp);
}
// update end timestamp for each time.
loadMetadata.setUpdateDeltaEndTimestamp(updatedTimeStamp);
}
}
}
}
try {
segmentStatusManager.writeLoadDetailsIntoFile(tableStatusPath, listOfLoadFolderDetailsArray);
} catch (IOException e) {
return false;
}
status = true;
} else {
LOGGER.error("Not able to acquire the lock for Table status updation for table " + table.getDatabaseName() + "." + table.getFactTableName());
}
} finally {
if (lockStatus) {
if (carbonLock.unlock()) {
LOGGER.info("Table unlocked successfully after table status updation" + table.getDatabaseName() + "." + table.getFactTableName());
} else {
LOGGER.error("Unable to unlock Table lock for table" + table.getDatabaseName() + "." + table.getFactTableName() + " during table status updation");
}
}
}
return status;
}
use of org.apache.carbondata.core.statusmanager.LoadMetadataDetails in project carbondata by apache.
the class ManageDictionaryAndBTree method clearBTreeAndDictionaryLRUCache.
/**
* This mwthod will invalidate both BTree and dictionary instances from LRU cache
*
* @param carbonTable
*/
public static void clearBTreeAndDictionaryLRUCache(CarbonTable carbonTable) {
// clear Btree cache from LRU cache
LoadMetadataDetails[] loadMetadataDetails = SegmentStatusManager.readLoadMetadata(carbonTable.getMetaDataFilepath());
if (null != loadMetadataDetails) {
String[] segments = new String[loadMetadataDetails.length];
int i = 0;
for (LoadMetadataDetails loadMetadataDetail : loadMetadataDetails) {
segments[i++] = loadMetadataDetail.getLoadName();
}
invalidateBTreeCache(carbonTable.getAbsoluteTableIdentifier(), segments);
}
// clear dictionary cache from LRU cache
List<CarbonDimension> dimensions = carbonTable.getDimensionByTableName(carbonTable.getFactTableName());
for (CarbonDimension dimension : dimensions) {
removeDictionaryColumnFromCache(carbonTable.getCarbonTableIdentifier(), carbonTable.getStorePath(), dimension.getColumnId());
}
}
use of org.apache.carbondata.core.statusmanager.LoadMetadataDetails in project carbondata by apache.
the class CarbonQueryUtil method getListOfSlices.
public static List<String> getListOfSlices(LoadMetadataDetails[] details) {
List<String> slices = new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
if (null != details) {
for (LoadMetadataDetails oneLoad : details) {
if (!CarbonCommonConstants.STORE_LOADSTATUS_FAILURE.equals(oneLoad.getLoadStatus())) {
String loadName = CarbonCommonConstants.LOAD_FOLDER + oneLoad.getLoadName();
slices.add(loadName);
}
}
}
return slices;
}
use of org.apache.carbondata.core.statusmanager.LoadMetadataDetails in project carbondata by apache.
the class StoreCreator method writeLoadMetadata.
public static void writeLoadMetadata(CarbonDataLoadSchema schema, String databaseName, String tableName, List<LoadMetadataDetails> listOfLoadFolderDetails) throws IOException {
LoadMetadataDetails loadMetadataDetails = new LoadMetadataDetails();
loadMetadataDetails.setLoadEndTime(System.currentTimeMillis());
loadMetadataDetails.setLoadStatus("SUCCESS");
loadMetadataDetails.setLoadName(String.valueOf(0));
loadMetadataDetails.setLoadStartTime(loadMetadataDetails.getTimeStamp(readCurrentTime()));
listOfLoadFolderDetails.add(loadMetadataDetails);
String dataLoadLocation = schema.getCarbonTable().getMetaDataFilepath() + File.separator + CarbonCommonConstants.LOADMETADATA_FILENAME;
DataOutputStream dataOutputStream;
Gson gsonObjectToWrite = new Gson();
BufferedWriter brWriter = null;
AtomicFileOperations writeOperation = new AtomicFileOperationsImpl(dataLoadLocation, FileFactory.getFileType(dataLoadLocation));
try {
dataOutputStream = writeOperation.openForWrite(FileWriteOperation.OVERWRITE);
brWriter = new BufferedWriter(new OutputStreamWriter(dataOutputStream, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET)));
String metadataInstance = gsonObjectToWrite.toJson(listOfLoadFolderDetails.toArray());
brWriter.write(metadataInstance);
} finally {
try {
if (null != brWriter) {
brWriter.flush();
}
} catch (Exception e) {
throw e;
}
CarbonUtil.closeStreams(brWriter);
}
writeOperation.close();
}
Aggregations