Search in sources :

Example 6 with ColumnStreamData

use of org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData in project hive by apache.

the class SerDeEncodedDataReader method returnData.

@Override
public void returnData(OrcEncodedColumnBatch ecb) {
    for (int colIx = 0; colIx < ecb.getTotalColCount(); ++colIx) {
        if (!ecb.hasData(colIx))
            continue;
        // TODO: reuse columnvector-s on hasBatch - save the array by column? take apart each list.
        ColumnStreamData[] datas = ecb.getColumnData(colIx);
        for (ColumnStreamData data : datas) {
            if (data == null || data.decRef() != 0)
                continue;
            if (LlapIoImpl.LOCKING_LOGGER.isTraceEnabled()) {
                for (MemoryBuffer buf : data.getCacheBuffers()) {
                    LlapIoImpl.LOCKING_LOGGER.trace("Unlocking {} at the end of processing", buf);
                }
            }
            bufferManager.decRefBuffers(data.getCacheBuffers());
            CSD_POOL.offer(data);
        }
    }
    // We can offer ECB even with some streams not discarded; reset() will clear the arrays.
    ECB_POOL.offer(ecb);
}
Also used : MemoryBuffer(org.apache.hadoop.hive.common.io.encoded.MemoryBuffer) ColumnStreamData(org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData)

Example 7 with ColumnStreamData

use of org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData in project hive by apache.

the class OrcEncodedDataReader method returnData.

@Override
public void returnData(OrcEncodedColumnBatch ecb) {
    for (int colIx = 0; colIx < ecb.getTotalColCount(); ++colIx) {
        if (!ecb.hasData(colIx))
            continue;
        ColumnStreamData[] datas = ecb.getColumnData(colIx);
        for (ColumnStreamData data : datas) {
            if (data == null || data.decRef() != 0)
                continue;
            if (LlapIoImpl.LOCKING_LOGGER.isTraceEnabled()) {
                for (MemoryBuffer buf : data.getCacheBuffers()) {
                    LlapIoImpl.LOCKING_LOGGER.trace("Unlocking {} at the end of processing", buf);
                }
            }
            bufferManager.decRefBuffers(data.getCacheBuffers());
            CSD_POOL.offer(data);
        }
    }
    // We can offer ECB even with some streams not discarded; reset() will clear the arrays.
    ECB_POOL.offer(ecb);
}
Also used : MemoryBuffer(org.apache.hadoop.hive.common.io.encoded.MemoryBuffer) ColumnStreamData(org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData)

Aggregations

ColumnStreamData (org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData)7 MemoryBuffer (org.apache.hadoop.hive.common.io.encoded.MemoryBuffer)2 OrcEncodedColumnBatch (org.apache.hadoop.hive.ql.io.orc.encoded.Reader.OrcEncodedColumnBatch)2 OrcProto (org.apache.orc.OrcProto)2 IOException (java.io.IOException)1 BooleanRef (org.apache.hadoop.hive.common.io.DataCache.BooleanRef)1 DiskRangeList (org.apache.hadoop.hive.common.io.DiskRangeList)1 CreateHelper (org.apache.hadoop.hive.common.io.DiskRangeList.CreateHelper)1 LlapDataBuffer (org.apache.hadoop.hive.llap.cache.LlapDataBuffer)1 StripeData (org.apache.hadoop.hive.llap.cache.SerDeLowLevelCacheImpl.StripeData)1 SerDeStripeMetadata (org.apache.hadoop.hive.llap.io.decode.GenericColumnVectorProducer.SerDeStripeMetadata)1 CacheWriter (org.apache.hadoop.hive.llap.io.encoded.SerDeEncodedDataReader.CacheWriter)1 ColumnVector (org.apache.hadoop.hive.ql.exec.vector.ColumnVector)1 ColumnEncoding (org.apache.orc.OrcProto.ColumnEncoding)1 TypeDescription (org.apache.orc.TypeDescription)1 OutStream (org.apache.orc.impl.OutStream)1