use of org.apache.carbondata.core.fileoperations.AtomicFileOperations in project carbondata by apache.
the class CarbonLoaderUtil method writeLoadMetadata.
public static void writeLoadMetadata(String storeLocation, String dbName, String tableName, List<LoadMetadataDetails> listOfLoadFolderDetails) throws IOException {
CarbonTablePath carbonTablePath = CarbonStorePath.getCarbonTablePath(storeLocation, dbName, tableName);
String dataLoadLocation = carbonTablePath.getTableStatusFilePath();
DataOutputStream dataOutputStream;
Gson gsonObjectToWrite = new Gson();
BufferedWriter brWriter = null;
AtomicFileOperations writeOperation = new AtomicFileOperationsImpl(dataLoadLocation, FileFactory.getFileType(dataLoadLocation));
try {
dataOutputStream = writeOperation.openForWrite(FileWriteOperation.OVERWRITE);
brWriter = new BufferedWriter(new OutputStreamWriter(dataOutputStream, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET)));
String metadataInstance = gsonObjectToWrite.toJson(listOfLoadFolderDetails.toArray());
brWriter.write(metadataInstance);
} finally {
try {
if (null != brWriter) {
brWriter.flush();
}
} catch (Exception e) {
LOGGER.error("error in flushing ");
}
CarbonUtil.closeStreams(brWriter);
writeOperation.close();
}
}
use of org.apache.carbondata.core.fileoperations.AtomicFileOperations in project carbondata by apache.
the class SegmentStatusManager method getValidAndInvalidSegments.
/**
* get valid segment for given table
*
* @return
* @throws IOException
*/
public ValidAndInvalidSegmentsInfo getValidAndInvalidSegments() throws IOException {
// @TODO: move reading LoadStatus file to separate class
List<String> listOfValidSegments = new ArrayList<String>(10);
List<String> listOfValidUpdatedSegments = new ArrayList<String>(10);
List<String> listOfInvalidSegments = new ArrayList<String>(10);
CarbonTablePath carbonTablePath = CarbonStorePath.getCarbonTablePath(absoluteTableIdentifier.getStorePath(), absoluteTableIdentifier.getCarbonTableIdentifier());
String dataPath = carbonTablePath.getTableStatusFilePath();
DataInputStream dataInputStream = null;
Gson gsonObjectToRead = new Gson();
AtomicFileOperations fileOperation = new AtomicFileOperationsImpl(dataPath, FileFactory.getFileType(dataPath));
LoadMetadataDetails[] loadFolderDetailsArray;
try {
if (FileFactory.isFileExist(dataPath, FileFactory.getFileType(dataPath))) {
dataInputStream = fileOperation.openForRead();
BufferedReader buffReader = new BufferedReader(new InputStreamReader(dataInputStream, "UTF-8"));
loadFolderDetailsArray = gsonObjectToRead.fromJson(buffReader, LoadMetadataDetails[].class);
//just directly iterate Array
for (LoadMetadataDetails loadMetadataDetails : loadFolderDetailsArray) {
if (CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS.equalsIgnoreCase(loadMetadataDetails.getLoadStatus()) || CarbonCommonConstants.MARKED_FOR_UPDATE.equalsIgnoreCase(loadMetadataDetails.getLoadStatus()) || CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS.equalsIgnoreCase(loadMetadataDetails.getLoadStatus())) {
// check for merged loads.
if (null != loadMetadataDetails.getMergedLoadName()) {
if (!listOfValidSegments.contains(loadMetadataDetails.getMergedLoadName())) {
listOfValidSegments.add(loadMetadataDetails.getMergedLoadName());
}
// if merged load is updated then put it in updated list
if (CarbonCommonConstants.MARKED_FOR_UPDATE.equalsIgnoreCase(loadMetadataDetails.getLoadStatus())) {
listOfValidUpdatedSegments.add(loadMetadataDetails.getMergedLoadName());
}
continue;
}
if (CarbonCommonConstants.MARKED_FOR_UPDATE.equalsIgnoreCase(loadMetadataDetails.getLoadStatus())) {
listOfValidUpdatedSegments.add(loadMetadataDetails.getLoadName());
}
listOfValidSegments.add(loadMetadataDetails.getLoadName());
} else if ((CarbonCommonConstants.STORE_LOADSTATUS_FAILURE.equalsIgnoreCase(loadMetadataDetails.getLoadStatus()) || CarbonCommonConstants.COMPACTED.equalsIgnoreCase(loadMetadataDetails.getLoadStatus()) || CarbonCommonConstants.MARKED_FOR_DELETE.equalsIgnoreCase(loadMetadataDetails.getLoadStatus()))) {
listOfInvalidSegments.add(loadMetadataDetails.getLoadName());
}
}
}
} catch (IOException e) {
LOG.error(e);
throw e;
} finally {
try {
if (null != dataInputStream) {
dataInputStream.close();
}
} catch (Exception e) {
LOG.error(e);
throw e;
}
}
return new ValidAndInvalidSegmentsInfo(listOfValidSegments, listOfValidUpdatedSegments, listOfInvalidSegments);
}
use of org.apache.carbondata.core.fileoperations.AtomicFileOperations in project carbondata by apache.
the class SegmentStatusManager method readLoadMetadata.
/**
* This method reads the load metadata file
*
* @param tableFolderPath
* @return
*/
public static LoadMetadataDetails[] readLoadMetadata(String tableFolderPath) {
Gson gsonObjectToRead = new Gson();
DataInputStream dataInputStream = null;
BufferedReader buffReader = null;
InputStreamReader inStream = null;
String metadataFileName = tableFolderPath + CarbonCommonConstants.FILE_SEPARATOR + CarbonCommonConstants.LOADMETADATA_FILENAME;
LoadMetadataDetails[] listOfLoadFolderDetailsArray;
AtomicFileOperations fileOperation = new AtomicFileOperationsImpl(metadataFileName, FileFactory.getFileType(metadataFileName));
try {
if (!FileFactory.isFileExist(metadataFileName, FileFactory.getFileType(metadataFileName))) {
return new LoadMetadataDetails[0];
}
dataInputStream = fileOperation.openForRead();
inStream = new InputStreamReader(dataInputStream, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
buffReader = new BufferedReader(inStream);
listOfLoadFolderDetailsArray = gsonObjectToRead.fromJson(buffReader, LoadMetadataDetails[].class);
} catch (IOException e) {
return new LoadMetadataDetails[0];
} finally {
closeStreams(buffReader, inStream, dataInputStream);
}
return listOfLoadFolderDetailsArray;
}
use of org.apache.carbondata.core.fileoperations.AtomicFileOperations in project carbondata by apache.
the class StoreCreator method writeLoadMetadata.
public static void writeLoadMetadata(CarbonDataLoadSchema schema, String databaseName, String tableName, List<LoadMetadataDetails> listOfLoadFolderDetails) throws IOException {
LoadMetadataDetails loadMetadataDetails = new LoadMetadataDetails();
loadMetadataDetails.setLoadEndTime(System.currentTimeMillis());
loadMetadataDetails.setLoadStatus("SUCCESS");
loadMetadataDetails.setLoadName(String.valueOf(0));
loadMetadataDetails.setLoadStartTime(loadMetadataDetails.getTimeStamp(readCurrentTime()));
listOfLoadFolderDetails.add(loadMetadataDetails);
String dataLoadLocation = schema.getCarbonTable().getMetaDataFilepath() + File.separator + CarbonCommonConstants.LOADMETADATA_FILENAME;
DataOutputStream dataOutputStream;
Gson gsonObjectToWrite = new Gson();
BufferedWriter brWriter = null;
AtomicFileOperations writeOperation = new AtomicFileOperationsImpl(dataLoadLocation, FileFactory.getFileType(dataLoadLocation));
try {
dataOutputStream = writeOperation.openForWrite(FileWriteOperation.OVERWRITE);
brWriter = new BufferedWriter(new OutputStreamWriter(dataOutputStream, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET)));
String metadataInstance = gsonObjectToWrite.toJson(listOfLoadFolderDetails.toArray());
brWriter.write(metadataInstance);
} finally {
try {
if (null != brWriter) {
brWriter.flush();
}
} catch (Exception e) {
throw e;
}
CarbonUtil.closeStreams(brWriter);
}
writeOperation.close();
}
Aggregations