use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.
the class HashCacheClient method serialize.
private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions, boolean singleValueOnly, Expression keyRangeRhsExpression, List<Expression> keyRangeRhsValues) throws SQLException {
long maxSize = serverCache.getConnection().getQueryServices().getProps().getLong(QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE);
estimatedSize = Math.min(estimatedSize, maxSize);
if (estimatedSize > Integer.MAX_VALUE) {
throw new IllegalStateException("Estimated size(" + estimatedSize + ") must not be greater than Integer.MAX_VALUE(" + Integer.MAX_VALUE + ")");
}
try {
TrustedByteArrayOutputStream baOut = new TrustedByteArrayOutputStream((int) estimatedSize);
DataOutputStream out = new DataOutputStream(baOut);
// Write onExpressions first, for hash key evaluation along with deserialization
out.writeInt(onExpressions.size());
for (Expression expression : onExpressions) {
WritableUtils.writeVInt(out, ExpressionType.valueOf(expression).ordinal());
expression.write(out);
}
int exprSize = baOut.size() + Bytes.SIZEOF_INT;
out.writeInt(exprSize * (singleValueOnly ? -1 : 1));
int nRows = 0;
// In the end will be replaced with total number of rows
out.writeInt(nRows);
ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
for (Tuple result = iterator.next(); result != null; result = iterator.next()) {
TupleUtil.write(result, out);
if (baOut.size() > maxSize) {
throw new MaxServerCacheSizeExceededException("Size of hash cache (" + baOut.size() + " bytes) exceeds the maximum allowed size (" + maxSize + " bytes)");
}
// Evaluate key expressions for hash join key range optimization.
if (keyRangeRhsExpression != null) {
keyRangeRhsValues.add(evaluateKeyExpression(keyRangeRhsExpression, result, tempPtr));
}
nRows++;
}
TrustedByteArrayOutputStream sizeOut = new TrustedByteArrayOutputStream(Bytes.SIZEOF_INT);
DataOutputStream dataOut = new DataOutputStream(sizeOut);
try {
dataOut.writeInt(nRows);
dataOut.flush();
byte[] cache = baOut.getBuffer();
// Replace number of rows written above with the correct value.
System.arraycopy(sizeOut.getBuffer(), 0, cache, exprSize, sizeOut.size());
// Reallocate to actual size plus compressed buffer size (which is allocated below)
int maxCompressedSize = Snappy.maxCompressedLength(baOut.size());
// size for worst case
byte[] compressed = new byte[maxCompressedSize];
int compressedSize = Snappy.compress(baOut.getBuffer(), 0, baOut.size(), compressed, 0);
// Last realloc to size of compressed buffer.
ptr.set(compressed, 0, compressedSize);
} finally {
dataOut.close();
}
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
} finally {
iterator.close();
}
}
use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.
the class NonAggregateRegionScannerFactory method getTopNScanner.
/**
* Return region scanner that does TopN.
* We only need to call startRegionOperation and closeRegionOperation when
* getting the first Tuple (which forces running through the entire region)
* since after this everything is held in memory
*/
private RegionScanner getTopNScanner(RegionCoprocessorEnvironment env, final RegionScanner s, final OrderedResultIterator iterator, ImmutableBytesPtr tenantId) throws Throwable {
final Tuple firstTuple;
TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId);
long estSize = iterator.getEstimatedByteSize();
final MemoryManager.MemoryChunk chunk = tenantCache.getMemoryManager().allocate(estSize);
final Region region = getRegion();
region.startRegionOperation();
try {
// Once we return from the first call to next, we've run through and cached
// the topN rows, so we no longer need to start/stop a region operation.
firstTuple = iterator.next();
// Now that the topN are cached, we can resize based on the real size
long actualSize = iterator.getByteSize();
chunk.resize(actualSize);
} catch (Throwable t) {
ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t);
return null;
} finally {
region.closeRegionOperation();
}
return new BaseRegionScanner(s) {
private Tuple tuple = firstTuple;
@Override
public boolean isFilterDone() {
return tuple == null;
}
@Override
public boolean next(List<Cell> results) throws IOException {
try {
if (isFilterDone()) {
return false;
}
for (int i = 0; i < tuple.size(); i++) {
results.add(tuple.getValue(i));
}
tuple = iterator.next();
return !isFilterDone();
} catch (Throwable t) {
ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), t);
return false;
}
}
@Override
public void close() throws IOException {
try {
s.close();
} finally {
try {
if (iterator != null) {
iterator.close();
}
} catch (SQLException e) {
ServerUtil.throwIOException(region.getRegionInfo().getRegionNameAsString(), e);
} finally {
chunk.close();
}
}
}
};
}
use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.
the class FilterResultIterator method advance.
@Override
protected Tuple advance() throws SQLException {
Tuple next;
do {
next = delegate.next();
expression.reset();
} while (next != null && (!expression.evaluate(next, ptr) || ptr.getLength() == 0 || !Boolean.TRUE.equals(expression.getDataType().toObject(ptr))));
return next;
}
use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.
the class ConcatResultIteratorTest method testConcat.
@Test
public void testConcat() throws Throwable {
Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))) };
Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2))) };
Tuple[] results3 = new Tuple[] { new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3))), new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4))) };
final List<PeekingResultIterator> results = Arrays.asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), new MaterializedResultIterator(Arrays.asList(results2)), new MaterializedResultIterator(Arrays.asList(results3)) });
ResultIterators iterators = new MaterializedResultIterators(results);
Tuple[] expectedResults = new Tuple[] { new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2))), new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3))), new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4))) };
ResultIterator scanner = new ConcatResultIterator(iterators);
AssertResults.assertResults(scanner, expectedResults);
}
use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.
the class RowKeyOrderedAggregateResultIteratorTest method testNoSpan.
@Test
public void testNoSpan() throws Exception {
Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))) };
Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))) };
Tuple[] results3 = new Tuple[] { new SingleKeyValueTuple(new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), new SingleKeyValueTuple(new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))) };
final List<PeekingResultIterator> results = Arrays.asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), new MaterializedResultIterator(Arrays.asList(results2)), new MaterializedResultIterator(Arrays.asList(results3)) });
ResultIterators iterators = new MaterializedResultIterators(results);
Tuple[] expectedResults = new Tuple[] { new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1L))), new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(2L))), new SingleKeyValueTuple(new KeyValue(C, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(3L))), new SingleKeyValueTuple(new KeyValue(D, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(4L))) };
ClientAggregators aggregators = TestUtil.getSingleSumAggregator(getUrl(), PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES));
ResultIterator scanner = new RowKeyOrderedAggregateResultIterator(iterators, aggregators);
AssertResults.assertResults(scanner, expectedResults);
}
Aggregations