use of org.apache.carbondata.core.metadata.schema.table.CarbonTable in project carbondata by apache.
the class RestructureUtilTest method testToGetUpdatedQueryDimension.
@Test
public void testToGetUpdatedQueryDimension() {
BlockExecutionInfo blockExecutionInfo = new BlockExecutionInfo();
List<Encoding> encodingList = new ArrayList<Encoding>();
encodingList.add(Encoding.DICTIONARY);
ColumnSchema columnSchema1 = new ColumnSchema();
columnSchema1.setColumnName("Id");
columnSchema1.setDataType(DataTypes.STRING);
columnSchema1.setColumnUniqueId(UUID.randomUUID().toString());
columnSchema1.setEncodingList(encodingList);
ColumnSchema columnSchema2 = new ColumnSchema();
columnSchema2.setColumnName("Name");
columnSchema2.setDataType(DataTypes.STRING);
columnSchema2.setColumnUniqueId(UUID.randomUUID().toString());
columnSchema2.setEncodingList(encodingList);
ColumnSchema columnSchema3 = new ColumnSchema();
columnSchema3.setColumnName("Age");
columnSchema3.setDataType(DataTypes.INT);
columnSchema3.setColumnUniqueId(UUID.randomUUID().toString());
columnSchema3.setEncodingList(encodingList);
ColumnSchema columnSchema4 = new ColumnSchema();
columnSchema4.setColumnName("Salary");
columnSchema4.setDataType(DataTypes.INT);
columnSchema4.setColumnUniqueId(UUID.randomUUID().toString());
columnSchema4.setEncodingList(encodingList);
ColumnSchema columnSchema5 = new ColumnSchema();
columnSchema5.setColumnName("Address");
columnSchema5.setDataType(DataTypes.STRING);
columnSchema5.setColumnUniqueId(UUID.randomUUID().toString());
columnSchema5.setEncodingList(encodingList);
CarbonDimension tableBlockDimension1 = new CarbonDimension(columnSchema1, 1, 1, 1);
CarbonDimension tableBlockDimension2 = new CarbonDimension(columnSchema2, 5, 5, 5);
List<CarbonDimension> tableBlockDimensions = Arrays.asList(tableBlockDimension1, tableBlockDimension2);
CarbonDimension tableComplexDimension1 = new CarbonDimension(columnSchema3, 4, 4, 4);
CarbonDimension tableComplexDimension2 = new CarbonDimension(columnSchema4, 2, 2, 2);
List<CarbonDimension> tableComplexDimensions = Arrays.asList(tableComplexDimension1, tableComplexDimension2);
ProjectionDimension queryDimension1 = new ProjectionDimension(tableBlockDimension1);
ProjectionDimension queryDimension2 = new ProjectionDimension(tableComplexDimension2);
ProjectionDimension queryDimension3 = new ProjectionDimension(new CarbonDimension(columnSchema5, 3, 3, 3));
ProjectionMeasure queryMeasure1 = new ProjectionMeasure(new CarbonMeasure(columnSchema3, 2));
ProjectionMeasure queryMeasure2 = new ProjectionMeasure(new CarbonMeasure(columnSchema4, 4));
List<ProjectionMeasure> queryMeasures = Arrays.asList(queryMeasure1, queryMeasure2);
ProjectionDimension[] queryDimensions = new ProjectionDimension[] { queryDimension1, queryDimension2, queryDimension3 };
List<ProjectionDimension> result = null;
result = RestructureUtil.createDimensionInfoAndGetCurrentBlockQueryDimension(blockExecutionInfo, queryDimensions, tableBlockDimensions, tableComplexDimensions, queryMeasures.size(), true, QueryModel.newInstance(new CarbonTable()));
List<CarbonDimension> resultDimension = new ArrayList<>(result.size());
for (ProjectionDimension queryDimension : result) {
resultDimension.add(queryDimension.getDimension());
}
assertThat(resultDimension, is(equalTo(Arrays.asList(queryDimension1.getDimension(), queryDimension2.getDimension()))));
}
use of org.apache.carbondata.core.metadata.schema.table.CarbonTable in project carbondata by apache.
the class CarbonInternalLoaderUtil method isSegmentsAlreadyCompactedForNewMetaDataDetails.
/**
* This method read the details of SI table and check whether new metadata details are already
* compacted, if it is, then already compaction for SI is completed and updating with new segment
* status is useless, this can happen in case of updating the status of index while loading
* segments for failed segments, so do not update anything, just exit gracefully
*/
private static boolean isSegmentsAlreadyCompactedForNewMetaDataDetails(List<CarbonTable> indexTables, String indexTableName, List<LoadMetadataDetails> newLoadMetadataDetails) {
CarbonTable currentIndexTable = null;
for (CarbonTable indexTable : indexTables) {
if (indexTable.getTableName().equalsIgnoreCase(indexTableName)) {
currentIndexTable = indexTable;
break;
}
}
boolean isIndexTableSegmentsCompacted = false;
if (null != currentIndexTable) {
LoadMetadataDetails[] existingLoadMetaDataDetails = SegmentStatusManager.readLoadMetadata(currentIndexTable.getMetadataPath());
for (LoadMetadataDetails existingLoadMetaDataDetail : existingLoadMetaDataDetails) {
for (LoadMetadataDetails newLoadMetadataDetail : newLoadMetadataDetails) {
if (existingLoadMetaDataDetail.getLoadName().equalsIgnoreCase(newLoadMetadataDetail.getLoadName()) && existingLoadMetaDataDetail.getSegmentStatus() == SegmentStatus.COMPACTED) {
isIndexTableSegmentsCompacted = true;
break;
}
}
if (isIndexTableSegmentsCompacted) {
break;
}
}
return isIndexTableSegmentsCompacted;
} else {
return false;
}
}
use of org.apache.carbondata.core.metadata.schema.table.CarbonTable in project carbondata by apache.
the class CarbonStreamOutputFormatTest method setUp.
@Override
protected void setUp() throws Exception {
super.setUp();
JobID jobId = CarbonInputFormatUtil.getJobId(0);
TaskID taskId = new TaskID(jobId, TaskType.MAP, 0);
taskAttemptId = new TaskAttemptID(taskId, 0);
hadoopConf = new Configuration();
hadoopConf.set("mapred.job.id", jobId.toString());
hadoopConf.set("mapred.tip.id", taskAttemptId.getTaskID().toString());
hadoopConf.set("mapred.task.id", taskAttemptId.toString());
hadoopConf.setBoolean("mapred.task.is.map", true);
hadoopConf.setInt("mapred.task.partition", 0);
tablePath = new File("target/stream_output").getCanonicalPath();
String dbName = "default";
String tableName = "stream_table_output";
AbsoluteTableIdentifier identifier = AbsoluteTableIdentifier.from(tablePath, new CarbonTableIdentifier(dbName, tableName, UUID.randomUUID().toString()));
CarbonTable table = new StoreCreator(new File("target/store").getAbsolutePath(), new File("../hadoop/src/test/resources/data.csv").getCanonicalPath()).createTable(identifier);
String factFilePath = new File("../hadoop/src/test/resources/data.csv").getCanonicalPath();
carbonLoadModel = StoreCreator.buildCarbonLoadModel(table, factFilePath, identifier);
}
use of org.apache.carbondata.core.metadata.schema.table.CarbonTable in project carbondata by apache.
the class CarbonTestUtil method getIndexFileCount.
public static int getIndexFileCount(String tableName, String segment, String extension) throws IOException {
if (extension == null) {
extension = CarbonTablePath.INDEX_FILE_EXT;
}
CarbonTable table = CarbonMetadata.getInstance().getCarbonTable(tableName);
String path = CarbonTablePath.getSegmentPath(table.getAbsoluteTableIdentifier().getTablePath(), segment);
boolean recursive = false;
if (table.isHivePartitionTable()) {
path = table.getAbsoluteTableIdentifier().getTablePath();
recursive = true;
}
List<CarbonFile> carbonFiles = FileFactory.getCarbonFile(path).listFiles(recursive, file -> file.getName().endsWith(CarbonTablePath.INDEX_FILE_EXT) || file.getName().endsWith(CarbonTablePath.MERGE_INDEX_FILE_EXT));
CarbonFile[] validIndexFiles = (CarbonFile[]) SegmentFileStore.getValidCarbonIndexFiles(carbonFiles.toArray(new CarbonFile[carbonFiles.size()]));
String finalExtension = extension;
return Arrays.stream(validIndexFiles).filter(file -> file.getName().endsWith(finalExtension)).toArray().length;
}
use of org.apache.carbondata.core.metadata.schema.table.CarbonTable in project carbondata by apache.
the class CarbonTestUtil method getSegmentFileCount.
public static int getSegmentFileCount(String tableName) throws IOException {
CarbonTable carbonTable = CarbonMetadata.getInstance().getCarbonTable(tableName);
CarbonFile segmentsFolder = FileFactory.getCarbonFile(CarbonTablePath.getSegmentFilesLocation(carbonTable.getTablePath()));
assert (segmentsFolder.isFileExist());
return segmentsFolder.listFiles(true).size();
}
Aggregations