use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class MetaDataUtil method getPutOnlyAutoPartitionColumn.
public static Put getPutOnlyAutoPartitionColumn(PTable parentTable, List<Mutation> tableMetaData) {
int autoPartitionPutIndex = parentTable.isMultiTenant() ? 2 : 1;
int i = 0;
for (Mutation m : tableMetaData) {
if (m instanceof Put && i++ == autoPartitionPutIndex) {
return (Put) m;
}
}
throw new IllegalStateException("No auto partition column row found in table metadata");
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class MetaDataUtil method getTenantIdAndFunctionName.
public static void getTenantIdAndFunctionName(List<Mutation> functionMetadata, byte[][] rowKeyMetaData) {
Mutation m = getTableHeaderRow(functionMetadata);
getVarChars(m.getRow(), 2, rowKeyMetaData);
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class MutationState method send.
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
List<TableRef> txTableRefs = Lists.newArrayListWithExpectedSize(mutations.size());
Map<TableInfo, List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
boolean isTransactional;
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validate(tableRef, valuesMap) : serverTimeStamps[i++];
final PTable table = tableRef.getTable();
Iterator<Pair<PName, List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName, List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList != null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache != null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
txTableRefs.add(origTableRef);
addDMLFence(table);
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
TransactionAwareHTable txnAware = TransactionUtil.getTransactionAwareHTable(hTable, table.isImmutableRows());
// during a commit), as we don't need conflict detection for these.
if (tableInfo.isDataTable()) {
// Even for immutable, we need to do this so that an abort has the state
// necessary to generate the rows to delete.
addTransactionParticipant(txnAware);
} else {
txnAware.startTx(getTransaction());
}
hTable = txnAware;
}
long numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
long startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled())
logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
long mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
long mutationSizeBytes = calculateMutationSize(mutationList);
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span, "Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
sqlE = new CommitException(e, getUncommittedStatementIndexes(), serverTimestamp);
} finally {
try {
if (cache != null)
cache.close();
} finally {
try {
hTable.close();
} catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class MutationState method addRowMutations.
private Iterator<Pair<PName, List<Mutation>>> addRowMutations(final TableRef tableRef, final Map<ImmutableBytesPtr, RowMutationState> values, final long timestamp, boolean includeAllIndexes, final boolean sendAll) {
final PTable table = tableRef.getTable();
final // Only maintain tables with immutable rows through this client-side mechanism
Iterator<PTable> indexes = // TODO: remove check for isWALDisabled once PHOENIX-3137 is fixed.
includeAllIndexes || table.isWALDisabled() ? IndexMaintainer.nonDisabledIndexIterator(table.getIndexes().iterator()) : table.isImmutableRows() ? IndexMaintainer.enabledGlobalIndexIterator(table.getIndexes().iterator()) : Iterators.<PTable>emptyIterator();
final List<Mutation> mutationList = Lists.newArrayListWithExpectedSize(values.size());
final List<Mutation> mutationsPertainingToIndex = indexes.hasNext() ? Lists.<Mutation>newArrayListWithExpectedSize(values.size()) : null;
generateMutations(tableRef, timestamp, values, mutationList, mutationsPertainingToIndex);
return new Iterator<Pair<PName, List<Mutation>>>() {
boolean isFirst = true;
@Override
public boolean hasNext() {
return isFirst || indexes.hasNext();
}
@Override
public Pair<PName, List<Mutation>> next() {
if (isFirst) {
isFirst = false;
return new Pair<PName, List<Mutation>>(table.getPhysicalName(), mutationList);
}
PTable index = indexes.next();
List<Mutation> indexMutations;
try {
indexMutations = IndexUtil.generateIndexData(table, index, values, mutationsPertainingToIndex, connection.getKeyValueBuilder(), connection);
// we may also have to include delete mutations for immutable tables if we are not processing all the tables in the mutations map
if (!sendAll) {
TableRef key = new TableRef(index);
Map<ImmutableBytesPtr, RowMutationState> rowToColumnMap = mutations.remove(key);
if (rowToColumnMap != null) {
final List<Mutation> deleteMutations = Lists.newArrayList();
generateMutations(tableRef, timestamp, rowToColumnMap, deleteMutations, null);
indexMutations.addAll(deleteMutations);
}
}
} catch (SQLException e) {
throw new IllegalDataException(e);
}
return new Pair<PName, List<Mutation>>(index.getPhysicalName(), indexMutations);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class MutationState method getMutationBatchList.
/**
* Split the list of mutations into multiple lists that don't exceed row and byte thresholds
* @param allMutationList List of HBase mutations
* @return List of lists of mutations
*/
public static List<List<Mutation>> getMutationBatchList(long batchSize, long batchSizeBytes, List<Mutation> allMutationList) {
List<List<Mutation>> mutationBatchList = Lists.newArrayList();
List<Mutation> currentList = Lists.newArrayList();
long currentBatchSizeBytes = 0L;
for (Mutation mutation : allMutationList) {
long mutationSizeBytes = KeyValueUtil.calculateMutationDiskSize(mutation);
if (currentList.size() == batchSize || currentBatchSizeBytes + mutationSizeBytes > batchSizeBytes) {
if (currentList.size() > 0) {
mutationBatchList.add(currentList);
currentList = Lists.newArrayList();
currentBatchSizeBytes = 0L;
}
}
currentList.add(mutation);
currentBatchSizeBytes += mutationSizeBytes;
}
if (currentList.size() > 0) {
mutationBatchList.add(currentList);
}
return mutationBatchList;
}
Aggregations