use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class CorrelateVariableFieldAccessExpression method write.
@Override
public void write(DataOutput output) throws IOException {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
boolean success = evaluate(null, ptr);
Object value = success ? getDataType().toObject(ptr) : null;
try {
LiteralExpression expr = LiteralExpression.newConstant(value, getDataType());
expr.write(output);
} catch (SQLException e) {
throw new IOException(e);
}
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class MutationState method send.
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
List<TableRef> txTableRefs = Lists.newArrayListWithExpectedSize(mutations.size());
Map<TableInfo, List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
boolean isTransactional;
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validate(tableRef, valuesMap) : serverTimeStamps[i++];
final PTable table = tableRef.getTable();
Iterator<Pair<PName, List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName, List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList != null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache != null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
txTableRefs.add(origTableRef);
addDMLFence(table);
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
TransactionAwareHTable txnAware = TransactionUtil.getTransactionAwareHTable(hTable, table.isImmutableRows());
// during a commit), as we don't need conflict detection for these.
if (tableInfo.isDataTable()) {
// Even for immutable, we need to do this so that an abort has the state
// necessary to generate the rows to delete.
addTransactionParticipant(txnAware);
} else {
txnAware.startTx(getTransaction());
}
hTable = txnAware;
}
long numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
long startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled())
logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
long mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
long mutationSizeBytes = calculateMutationSize(mutationList);
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span, "Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
sqlE = new CommitException(e, getUncommittedStatementIndexes(), serverTimestamp);
} finally {
try {
if (cache != null)
cache.close();
} finally {
try {
hTable.close();
} catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class TupleProjector method mergeProjectedValue.
public static ProjectedValueTuple mergeProjectedValue(ProjectedValueTuple dest, KeyValueSchema destSchema, ValueBitSet destBitSet, Tuple src, KeyValueSchema srcSchema, ValueBitSet srcBitSet, int offset, boolean useNewValueColumnQualifier) throws IOException {
ImmutableBytesWritable destValue = dest.getProjectedValue();
int origDestBitSetLen = dest.getBitSetLength();
destBitSet.clear();
destBitSet.or(destValue, origDestBitSetLen);
ImmutableBytesWritable srcValue = null;
int srcValueLen = 0;
if (src != null) {
srcValue = new ImmutableBytesWritable();
decodeProjectedValue(src, srcValue);
srcBitSet.clear();
srcBitSet.or(srcValue);
int origSrcBitSetLen = srcBitSet.getEstimatedLength();
for (int i = 0; i <= srcBitSet.getMaxSetBit(); i++) {
if (srcBitSet.get(i)) {
destBitSet.set(offset + i);
}
}
srcValueLen = srcValue.getLength() - origSrcBitSetLen;
}
int destBitSetLen = destBitSet.getEstimatedLength();
byte[] merged = new byte[destValue.getLength() - origDestBitSetLen + srcValueLen + destBitSetLen];
int o = Bytes.putBytes(merged, 0, destValue.get(), destValue.getOffset(), destValue.getLength() - origDestBitSetLen);
if (src != null) {
o = Bytes.putBytes(merged, o, srcValue.get(), srcValue.getOffset(), srcValueLen);
}
destBitSet.toBytes(merged, o);
return useNewValueColumnQualifier ? new ProjectedValueTuple(dest, dest.getTimestamp(), merged, 0, merged.length, destBitSetLen) : new OldProjectedValueTuple(dest, dest.getTimestamp(), merged, 0, merged.length, destBitSetLen);
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class PDataTypeTest method testFloat.
@Test
public void testFloat() {
Float na = 0.005f;
byte[] b = PFloat.INSTANCE.toBytes(na);
Float nb = (Float) PFloat.INSTANCE.toObject(b);
assertEquals(na, nb);
na = 10.0f;
b = PFloat.INSTANCE.toBytes(na, SortOrder.DESC);
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ptr.set(b);
nb = PFloat.INSTANCE.getCodec().decodeFloat(ptr, SortOrder.DESC);
assertEquals(na, nb);
na = 1.0f;
nb = -1.0f;
byte[] ba = PFloat.INSTANCE.toBytes(na);
byte[] bb = PFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) > 0);
na = -1f;
nb = -3f;
ba = PFloat.INSTANCE.toBytes(na);
bb = PFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) > 0);
na = Float.NEGATIVE_INFINITY;
nb = -Float.MAX_VALUE;
ba = PFloat.INSTANCE.toBytes(na);
bb = PFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
na = -Float.MAX_VALUE;
nb = -Float.MIN_VALUE;
ba = PFloat.INSTANCE.toBytes(na);
bb = PFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
na = -Float.MIN_VALUE;
nb = -0.0f;
ba = PFloat.INSTANCE.toBytes(na);
bb = PFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
na = -0.0f;
nb = 0.0f;
ba = PFloat.INSTANCE.toBytes(na);
bb = PFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
na = 0.0f;
nb = Float.MIN_VALUE;
ba = PFloat.INSTANCE.toBytes(na);
bb = PFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
na = Float.MIN_VALUE;
nb = Float.MAX_VALUE;
ba = PFloat.INSTANCE.toBytes(na);
bb = PFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
na = Float.MAX_VALUE;
nb = Float.POSITIVE_INFINITY;
ba = PFloat.INSTANCE.toBytes(na);
bb = PFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
na = Float.POSITIVE_INFINITY;
nb = Float.NaN;
ba = PFloat.INSTANCE.toBytes(na);
bb = PFloat.INSTANCE.toBytes(nb);
assertTrue(Bytes.compareTo(ba, bb) < 0);
Integer value = 100;
Object obj = PFloat.INSTANCE.toObject(value, PInteger.INSTANCE);
assertTrue(obj instanceof Float);
Double dvalue = Double.NEGATIVE_INFINITY;
obj = PFloat.INSTANCE.toObject(dvalue, PDouble.INSTANCE);
assertTrue(obj instanceof Float);
assertEquals(Float.NEGATIVE_INFINITY, obj);
na = 1.0f;
nb = -1.0f;
ba = PFloat.INSTANCE.toBytes(na);
bb = PFloat.INSTANCE.toBytes(nb);
float nna = PFloat.INSTANCE.getCodec().decodeFloat(ba, 0, SortOrder.DESC);
float nnb = PFloat.INSTANCE.getCodec().decodeFloat(bb, 0, SortOrder.DESC);
assertTrue(Float.compare(nna, nnb) < 0);
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class GetBitFunction method init.
private void init() {
Expression offsetExpr = children.get(1);
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
if (offsetExpr.isStateless() && offsetExpr.getDeterminism() == Determinism.ALWAYS && offsetExpr.evaluate(null, ptr)) {
offsetPreCompute = (Integer) PInteger.INSTANCE.toObject(ptr, offsetExpr.getSortOrder());
} else
offsetPreCompute = null;
}
Aggregations