use of org.apache.carbondata.core.datastore.filesystem.CarbonFile in project carbondata by apache.
the class SDKUtil method listFiles.
public static ArrayList listFiles(String sourceImageFolder, final String suf, Configuration conf) {
final String sufImageFinal = suf;
ArrayList result = new ArrayList();
CarbonFile[] fileList = FileFactory.getCarbonFile(sourceImageFolder, conf).listFiles();
for (int i = 0; i < fileList.length; i++) {
if (fileList[i].isDirectory()) {
result.addAll(listFiles(fileList[i].getCanonicalPath(), sufImageFinal, conf));
} else if (fileList[i].getCanonicalPath().endsWith(sufImageFinal)) {
result.add(fileList[i].getCanonicalPath());
}
}
return result;
}
use of org.apache.carbondata.core.datastore.filesystem.CarbonFile in project carbondata by apache.
the class CarbonCliTest method testSummaryAllColumnsForOneFile.
@Test
public void testSummaryAllColumnsForOneFile() {
CarbonFile folder = FileFactory.getCarbonFile(path);
CarbonFile[] carbonFiles = folder.listFiles(new CarbonFileFilter() {
@Override
public boolean accept(CarbonFile file) {
return file.getName().endsWith(CarbonTablePath.CARBON_DATA_EXT);
}
});
String[] args = { "-cmd", "summary", "-p", carbonFiles[0].getCanonicalPath(), "-C" };
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream stream = new PrintStream(out);
CarbonCli.run(args, stream);
String output = new String(out.toByteArray());
Assert.assertTrue(output.contains("Block Blocklet Column Name Meta Size Data Size"));
}
use of org.apache.carbondata.core.datastore.filesystem.CarbonFile in project carbondata by apache.
the class MVProvider method getSchemaProvider.
private SchemaProvider getSchemaProvider(MVManager viewManager, String databaseName) {
String databaseNameUpper = databaseName.toUpperCase();
SchemaProvider schemaProvider = this.schemaProviders.get(databaseNameUpper);
if (schemaProvider == null) {
synchronized (this.schemaProviders) {
schemaProvider = this.schemaProviders.get(databaseNameUpper);
if (schemaProvider == null) {
String databaseLocation = viewManager.getDatabaseLocation(databaseName);
CarbonFile databasePath = FileFactory.getCarbonFile(databaseLocation);
if (!databasePath.exists()) {
return null;
}
schemaProvider = new SchemaProvider(databasePath.getCanonicalPath());
this.schemaProviders.put(databaseNameUpper, schemaProvider);
}
}
}
return schemaProvider;
}
use of org.apache.carbondata.core.datastore.filesystem.CarbonFile in project carbondata by apache.
the class CarbonIndexFileMergeWriter method mergeCarbonIndexFilesOfSegment.
/**
* Merge all the carbon index files of segment to a merged file
* @param tablePath
* @param indexFileNamesTobeAdded while merging, it considers only these files.
* If null, then consider all
* @param isOldStoreIndexFilesPresent flag to read file footer information from carbondata
* file. This will used in case of upgrade from version
* which do not store the blocklet info to current version
* @throws IOException
*/
private String mergeCarbonIndexFilesOfSegment(String segmentId, String tablePath, List<String> indexFileNamesTobeAdded, boolean isOldStoreIndexFilesPresent, String uuid, String partitionPath) {
Segment segment = Segment.getSegment(segmentId, tablePath);
String segmentPath = CarbonTablePath.getSegmentPath(tablePath, segmentId);
try {
List<CarbonFile> indexFiles = new ArrayList<>();
SegmentFileStore sfs = null;
if (segment != null && segment.getSegmentFileName() != null) {
sfs = new SegmentFileStore(tablePath, segment.getSegmentFileName());
List<CarbonFile> indexCarbonFiles = sfs.getIndexCarbonFiles();
if (table.isHivePartitionTable()) {
// in case of partition table, merge index files of a partition
List<CarbonFile> indexFilesInPartition = new ArrayList<>();
for (CarbonFile indexCarbonFile : indexCarbonFiles) {
if (FileFactory.getUpdatedFilePath(indexCarbonFile.getParentFile().getPath()).equals(partitionPath)) {
indexFilesInPartition.add(indexCarbonFile);
}
}
indexFiles = indexFilesInPartition;
} else {
indexFiles = indexCarbonFiles;
}
}
if (sfs == null || indexFiles.isEmpty()) {
if (table.isHivePartitionTable()) {
segmentPath = partitionPath;
}
return writeMergeIndexFileBasedOnSegmentFolder(indexFileNamesTobeAdded, isOldStoreIndexFilesPresent, segmentPath, segmentId, uuid, true);
} else {
return writeMergeIndexFileBasedOnSegmentFile(segmentId, indexFileNamesTobeAdded, isOldStoreIndexFilesPresent, sfs, indexFiles.toArray(new CarbonFile[0]), uuid, partitionPath);
}
} catch (Exception e) {
String message = "Failed to merge index files in path: " + segmentPath + ". " + e.getMessage();
LOGGER.error(message);
throw new RuntimeException(message, e);
}
}
use of org.apache.carbondata.core.datastore.filesystem.CarbonFile in project carbondata by apache.
the class CarbonIndexFileMergeWriter method writeMergeIndexFileBasedOnSegmentFolder.
public String writeMergeIndexFileBasedOnSegmentFolder(List<String> indexFileNamesTobeAdded, boolean isOldStoreIndexFilesPresent, String segmentPath, String segmentId, String uuid, boolean readBasedOnUUID) throws IOException {
CarbonFile[] indexFiles = null;
SegmentIndexFileStore fileStore = new SegmentIndexFileStore();
if (isOldStoreIndexFilesPresent) {
// this case will be used in case of upgrade where old store will not have the blocklet
// info in the index file and therefore blocklet info need to be read from the file footer
// in the carbondata file
fileStore.readAllIndexAndFillBlockletInfo(segmentPath, null);
} else {
if (readBasedOnUUID) {
indexFiles = SegmentIndexFileStore.getCarbonIndexFiles(segmentPath, FileFactory.getConfiguration(), uuid);
} else {
// The uuid can be different, when we add load from external path.
indexFiles = SegmentIndexFileStore.getCarbonIndexFiles(segmentPath, FileFactory.getConfiguration());
}
fileStore.readAllIIndexOfSegment(indexFiles);
}
Map<String, byte[]> indexMap = fileStore.getCarbonIndexMap();
Map<String, List<String>> mergeToIndexFileMap = fileStore.getCarbonMergeFileToIndexFilesMap();
if (!mergeToIndexFileMap.containsValue(new ArrayList<>(indexMap.keySet()))) {
writeMergeIndexFile(indexFileNamesTobeAdded, segmentPath, indexMap, segmentId, uuid);
// to avoid index file not found during concurrent queries
if (!isOldStoreIndexFilesPresent && indexFiles != null) {
for (CarbonFile indexFile : indexFiles) {
indexFile.delete();
}
}
}
return null;
}
Aggregations