use of org.apache.carbondata.core.datastore.block.TableBlockInfo in project carbondata by apache.
the class BlockIndexStoreTest method testloadAndGetTaskIdToSegmentsMapForSameBlockLoadedConcurrently.
@Test
public void testloadAndGetTaskIdToSegmentsMapForSameBlockLoadedConcurrently() throws IOException {
String canonicalPath = new File(this.getClass().getResource("/").getPath() + "/../../").getCanonicalPath();
File file = getPartFile();
TableBlockInfo info = new TableBlockInfo(file.getAbsolutePath(), 0, "0", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info1 = new TableBlockInfo(file.getAbsolutePath(), 0, "0", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info2 = new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info3 = new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info4 = new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
CarbonTableIdentifier carbonTableIdentifier = new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "t3", "1");
AbsoluteTableIdentifier absoluteTableIdentifier = new AbsoluteTableIdentifier("/src/test/resources", carbonTableIdentifier);
ExecutorService executor = Executors.newFixedThreadPool(3);
executor.submit(new BlockLoaderThread(Arrays.asList(new TableBlockInfo[] { info, info1 }), absoluteTableIdentifier));
executor.submit(new BlockLoaderThread(Arrays.asList(new TableBlockInfo[] { info2, info3, info4 }), absoluteTableIdentifier));
executor.submit(new BlockLoaderThread(Arrays.asList(new TableBlockInfo[] { info, info1 }), absoluteTableIdentifier));
executor.submit(new BlockLoaderThread(Arrays.asList(new TableBlockInfo[] { info2, info3, info4 }), absoluteTableIdentifier));
executor.shutdown();
try {
executor.awaitTermination(1, TimeUnit.DAYS);
} catch (InterruptedException e) {
e.printStackTrace();
}
List<TableBlockInfo> tableBlockInfos = Arrays.asList(new TableBlockInfo[] { info, info1, info2, info3, info4 });
try {
List<TableBlockUniqueIdentifier> tableBlockUniqueIdentifiers = getTableBlockUniqueIdentifierList(tableBlockInfos, absoluteTableIdentifier);
List<AbstractIndex> loadAndGetBlocks = cache.getAll(tableBlockUniqueIdentifiers);
assertTrue(loadAndGetBlocks.size() == 5);
} catch (Exception e) {
assertTrue(false);
}
List<String> segmentIds = new ArrayList<>();
for (TableBlockInfo tableBlockInfo : tableBlockInfos) {
segmentIds.add(tableBlockInfo.getSegmentId());
}
cache.removeTableBlocks(segmentIds, absoluteTableIdentifier);
}
use of org.apache.carbondata.core.datastore.block.TableBlockInfo in project carbondata by apache.
the class BlockIndexStoreTest method testloadAndGetTaskIdToSegmentsMapForDifferentSegmentLoadedConcurrently.
@Test
public void testloadAndGetTaskIdToSegmentsMapForDifferentSegmentLoadedConcurrently() throws IOException {
String canonicalPath = new File(this.getClass().getResource("/").getPath() + "/../../").getCanonicalPath();
File file = getPartFile();
TableBlockInfo info = new TableBlockInfo(file.getAbsolutePath(), 0, "0", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info1 = new TableBlockInfo(file.getAbsolutePath(), 0, "0", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info2 = new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info3 = new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info4 = new TableBlockInfo(file.getAbsolutePath(), 0, "1", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info5 = new TableBlockInfo(file.getAbsolutePath(), 0, "2", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info6 = new TableBlockInfo(file.getAbsolutePath(), 0, "2", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
TableBlockInfo info7 = new TableBlockInfo(file.getAbsolutePath(), 0, "3", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
CarbonTableIdentifier carbonTableIdentifier = new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "t3", "1");
AbsoluteTableIdentifier absoluteTableIdentifier = new AbsoluteTableIdentifier("/src/test/resources", carbonTableIdentifier);
ExecutorService executor = Executors.newFixedThreadPool(3);
executor.submit(new BlockLoaderThread(Arrays.asList(new TableBlockInfo[] { info, info1 }), absoluteTableIdentifier));
executor.submit(new BlockLoaderThread(Arrays.asList(new TableBlockInfo[] { info2, info3, info4 }), absoluteTableIdentifier));
executor.submit(new BlockLoaderThread(Arrays.asList(new TableBlockInfo[] { info5, info6 }), absoluteTableIdentifier));
executor.submit(new BlockLoaderThread(Arrays.asList(new TableBlockInfo[] { info7 }), absoluteTableIdentifier));
executor.shutdown();
try {
executor.awaitTermination(1, TimeUnit.DAYS);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
List<TableBlockInfo> tableBlockInfos = Arrays.asList(new TableBlockInfo[] { info, info1, info2, info3, info4, info5, info6, info7 });
try {
List<TableBlockUniqueIdentifier> blockUniqueIdentifierList = getTableBlockUniqueIdentifierList(tableBlockInfos, absoluteTableIdentifier);
List<AbstractIndex> loadAndGetBlocks = cache.getAll(blockUniqueIdentifierList);
assertTrue(loadAndGetBlocks.size() == 8);
} catch (Exception e) {
assertTrue(false);
}
List<String> segmentIds = new ArrayList<>();
for (TableBlockInfo tableBlockInfo : tableBlockInfos) {
segmentIds.add(tableBlockInfo.getSegmentId());
}
cache.removeTableBlocks(segmentIds, absoluteTableIdentifier);
}
use of org.apache.carbondata.core.datastore.block.TableBlockInfo in project carbondata by apache.
the class AbstractBlockIndexStoreCache method checkAndLoadTableBlocks.
/**
* This method will get the value for the given key. If value does not exist
* for the given key, it will check and load the value.
*
* @param tableBlock
* @param tableBlockUniqueIdentifier
* @param lruCacheKey
*/
protected void checkAndLoadTableBlocks(AbstractIndex tableBlock, TableBlockUniqueIdentifier tableBlockUniqueIdentifier, String lruCacheKey) throws IOException {
// calculate the required size is
TableBlockInfo blockInfo = tableBlockUniqueIdentifier.getTableBlockInfo();
long requiredMetaSize = CarbonUtil.calculateMetaSize(blockInfo);
if (requiredMetaSize > 0) {
tableBlock.setMemorySize(requiredMetaSize);
// load table blocks data
// getting the data file meta data of the block
DataFileFooter footer = CarbonUtil.readMetadatFile(blockInfo);
footer.setBlockInfo(new BlockInfo(blockInfo));
// building the block
tableBlock.buildIndex(Collections.singletonList(footer));
tableBlock.incrementAccessCount();
boolean isTableBlockAddedToLruCache = lruCache.put(lruCacheKey, tableBlock, requiredMetaSize);
if (!isTableBlockAddedToLruCache) {
throw new IndexBuilderException("Cannot load table blocks into memory. Not enough memory available");
}
} else {
throw new IndexBuilderException("Invalid carbon data file: " + blockInfo.getFilePath());
}
}
use of org.apache.carbondata.core.datastore.block.TableBlockInfo in project carbondata by apache.
the class SegmentTaskIndexStore method mappedAndGetTaskIdToTableBlockInfo.
/**
* Below method will be used to get the task id to all the table block info belongs to
* that task id mapping
*
* @param segmentToTableBlocksInfos segment if to table blocks info map
* @return task id to table block info mapping
*/
private Map<TaskBucketHolder, List<TableBlockInfo>> mappedAndGetTaskIdToTableBlockInfo(Map<String, List<TableBlockInfo>> segmentToTableBlocksInfos) {
Map<TaskBucketHolder, List<TableBlockInfo>> taskIdToTableBlockInfoMap = new ConcurrentHashMap<>();
Iterator<Entry<String, List<TableBlockInfo>>> iterator = segmentToTableBlocksInfos.entrySet().iterator();
while (iterator.hasNext()) {
Entry<String, List<TableBlockInfo>> next = iterator.next();
List<TableBlockInfo> value = next.getValue();
for (TableBlockInfo blockInfo : value) {
String taskNo = DataFileUtil.getTaskNo(blockInfo.getFilePath());
String bucketNo = DataFileUtil.getBucketNo(blockInfo.getFilePath());
TaskBucketHolder bucketHolder = new TaskBucketHolder(taskNo, bucketNo);
List<TableBlockInfo> list = taskIdToTableBlockInfoMap.get(bucketHolder);
if (null == list) {
list = new ArrayList<TableBlockInfo>();
taskIdToTableBlockInfoMap.put(bucketHolder, list);
}
list.add(blockInfo);
}
}
return taskIdToTableBlockInfoMap;
}
use of org.apache.carbondata.core.datastore.block.TableBlockInfo in project carbondata by apache.
the class CarbonLoaderUtilTest method initSet1.
void initSet1() {
blockInfos = new ArrayList<>();
activeNode = new ArrayList<>();
activeNode.add("node-7");
activeNode.add("node-9");
activeNode.add("node-11");
String[] location = { "node-7", "node-9", "node-11" };
blockInfos.add(new TableBlockInfo("node", 1, "1", location, 0));
blockInfos.add(new TableBlockInfo("node", 2, "1", location, 0));
blockInfos.add(new TableBlockInfo("node", 3, "1", location, 0));
blockInfos.add(new TableBlockInfo("node", 4, "1", location, 0));
blockInfos.add(new TableBlockInfo("node", 5, "1", location, 0));
blockInfos.add(new TableBlockInfo("node", 6, "1", location, 0));
expected = new HashMap<>();
expected.put("node-7", blockInfos.subList(0, 2));
expected.put("node-9", blockInfos.subList(2, 4));
expected.put("node-11", blockInfos.subList(4, 6));
}
Aggregations