use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class MetaDataEndpointImpl method switchAttribute.
private boolean switchAttribute(PTable table, boolean currAttribute, List<Mutation> tableMetaData, byte[] attrQualifier) {
for (Mutation m : tableMetaData) {
if (m instanceof Put) {
Put p = (Put) m;
List<Cell> cells = p.get(TABLE_FAMILY_BYTES, attrQualifier);
if (cells != null && cells.size() > 0) {
Cell cell = cells.get(0);
boolean newAttribute = (boolean) PBoolean.INSTANCE.toObject(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
return currAttribute != newAttribute;
}
}
}
return false;
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class MetaDataEndpointImpl method updateIndexState.
@Override
public void updateIndexState(RpcController controller, UpdateIndexStateRequest request, RpcCallback<MetaDataResponse> done) {
MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
byte[] schemaName = null;
byte[] tableName = null;
try {
byte[][] rowKeyMetaData = new byte[3][];
List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
Region region = env.getRegion();
MetaDataMutationResult result = checkTableKeyInRegion(key, region);
if (result != null) {
done.run(MetaDataMutationResult.toProto(result));
return;
}
long timeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
List<Cell> newKVs = tableMetadata.get(0).getFamilyCellMap().get(TABLE_FAMILY_BYTES);
Cell newKV = null;
int disableTimeStampKVIndex = -1;
int indexStateKVIndex = 0;
int index = 0;
for (Cell cell : newKVs) {
if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), INDEX_STATE_BYTES, 0, INDEX_STATE_BYTES.length) == 0) {
newKV = cell;
indexStateKVIndex = index;
} else if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), INDEX_DISABLE_TIMESTAMP_BYTES, 0, INDEX_DISABLE_TIMESTAMP_BYTES.length) == 0) {
disableTimeStampKVIndex = index;
}
index++;
}
PIndexState newState = PIndexState.fromSerializedValue(newKV.getValueArray()[newKV.getValueOffset()]);
RowLock rowLock = region.getRowLock(key, false);
if (rowLock == null) {
throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
}
try {
Get get = new Get(key);
get.setTimeRange(PTable.INITIAL_SEQ_NUM, timeStamp);
get.addColumn(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
get.addColumn(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
get.addColumn(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
get.addColumn(TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES);
Result currentResult = region.get(get);
if (currentResult.rawCells().length == 0) {
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
return;
}
Cell dataTableKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
Cell currentStateKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
Cell currentDisableTimeStamp = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
boolean rowKeyOrderOptimizable = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES) != null;
PIndexState currentState = PIndexState.fromSerializedValue(currentStateKV.getValueArray()[currentStateKV.getValueOffset()]);
if ((currentDisableTimeStamp != null && currentDisableTimeStamp.getValueLength() > 0) && (disableTimeStampKVIndex >= 0)) {
long curTimeStampVal = (Long) PLong.INSTANCE.toObject(currentDisableTimeStamp.getValueArray(), currentDisableTimeStamp.getValueOffset(), currentDisableTimeStamp.getValueLength());
// new DisableTimeStamp is passed in
Cell newDisableTimeStampCell = newKVs.get(disableTimeStampKVIndex);
long newDisableTimeStamp = (Long) PLong.INSTANCE.toObject(newDisableTimeStampCell.getValueArray(), newDisableTimeStampCell.getValueOffset(), newDisableTimeStampCell.getValueLength());
// when a new data table write occurs.
if (curTimeStampVal != 0 && Math.abs(curTimeStampVal) < Math.abs(newDisableTimeStamp)) {
// not reset disable timestamp
newKVs.remove(disableTimeStampKVIndex);
disableTimeStampKVIndex = -1;
}
}
// Detect invalid transitions
if (currentState == PIndexState.BUILDING) {
if (newState == PIndexState.USABLE) {
builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
return;
}
} else if (currentState == PIndexState.DISABLE) {
if (newState != PIndexState.BUILDING && newState != PIndexState.DISABLE && newState != PIndexState.INACTIVE) {
builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
return;
}
// Done building, but was disable before that, so that in disabled state
if (newState == PIndexState.ACTIVE) {
newState = PIndexState.DISABLE;
}
}
if (currentState == PIndexState.BUILDING && newState != PIndexState.ACTIVE) {
timeStamp = currentStateKV.getTimestamp();
}
if ((currentState == PIndexState.UNUSABLE && newState == PIndexState.ACTIVE) || (currentState == PIndexState.ACTIVE && newState == PIndexState.UNUSABLE)) {
newState = PIndexState.INACTIVE;
newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
} else if (currentState == PIndexState.INACTIVE && newState == PIndexState.USABLE) {
newState = PIndexState.ACTIVE;
newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
}
PTable returnTable = null;
if (currentState != newState || disableTimeStampKVIndex != -1) {
byte[] dataTableKey = null;
if (dataTableKV != null) {
dataTableKey = SchemaUtil.getTableKey(tenantId, schemaName, dataTableKV.getValue());
}
if (dataTableKey != null) {
// make a copy of tableMetadata
tableMetadata = new ArrayList<Mutation>(tableMetadata);
// insert an empty KV to trigger time stamp update on data table row
Put p = new Put(dataTableKey);
p.add(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timeStamp, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
tableMetadata.add(p);
}
boolean setRowKeyOrderOptimizableCell = newState == PIndexState.BUILDING && !rowKeyOrderOptimizable;
// so that the row keys get generated using the new row key format
if (setRowKeyOrderOptimizableCell) {
UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, key, timeStamp);
}
region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
// Invalidate from cache
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
metaDataCache.invalidate(cacheKey);
if (dataTableKey != null) {
metaDataCache.invalidate(new ImmutableBytesPtr(dataTableKey));
}
if (setRowKeyOrderOptimizableCell || disableTimeStampKVIndex != -1 || currentState == PIndexState.DISABLE || newState == PIndexState.BUILDING) {
returnTable = doGetTable(key, HConstants.LATEST_TIMESTAMP, rowLock);
}
}
// Get client timeStamp from mutations, since it may get updated by the
// mutateRowsWithLocks call
long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
builder.setMutationTime(currentTime);
if (returnTable != null) {
builder.setTable(PTableImpl.toProto(returnTable));
}
done.run(builder.build());
return;
} finally {
rowLock.release();
}
} catch (Throwable t) {
logger.error("updateIndexState failed", t);
ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
}
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class MetaDataEndpointImpl method getPhysicalTableForView.
/**
*
* @return null if the physical table row information is not present.
*
*/
private static Mutation getPhysicalTableForView(List<Mutation> tableMetadata, byte[][] parentSchemaTableNames) {
int size = tableMetadata.size();
byte[][] rowKeyMetaData = new byte[3][];
MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
Mutation physicalTableRow = null;
boolean physicalTableLinkFound = false;
if (size >= 2) {
int i = size - 1;
while (i >= 1) {
Mutation m = tableMetadata.get(i);
if (m instanceof Put) {
LinkType linkType = MetaDataUtil.getLinkType(m);
if (linkType == LinkType.PHYSICAL_TABLE) {
physicalTableRow = m;
physicalTableLinkFound = true;
break;
}
}
i--;
}
}
if (!physicalTableLinkFound) {
parentSchemaTableNames[0] = null;
parentSchemaTableNames[1] = null;
return null;
}
rowKeyMetaData = new byte[5][];
getVarChars(physicalTableRow.getRow(), 5, rowKeyMetaData);
byte[] colBytes = rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
byte[] famBytes = rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX];
if ((colBytes == null || colBytes.length == 0) && (famBytes != null && famBytes.length > 0)) {
byte[] sName = SchemaUtil.getSchemaNameFromFullName(famBytes).getBytes();
byte[] tName = SchemaUtil.getTableNameFromFullName(famBytes).getBytes();
parentSchemaTableNames[0] = sName;
parentSchemaTableNames[1] = tName;
}
return physicalTableRow;
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class MetaDataRegionObserver method updateIndexState.
private static void updateIndexState(PhoenixConnection conn, String indexTableName, RegionCoprocessorEnvironment env, PIndexState oldState, PIndexState newState) throws ServiceException, Throwable {
byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName);
String schemaName = SchemaUtil.getSchemaNameFromFullName(indexTableName);
String indexName = SchemaUtil.getTableNameFromFullName(indexTableName);
// Mimic the Put that gets generated by the client on an update of the
// index state
Put put = new Put(indexTableKey);
put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, newState.getSerializedBytes());
if (newState == PIndexState.ACTIVE) {
put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0));
put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0));
}
final List<Mutation> tableMetadata = Collections.<Mutation>singletonList(put);
MetaDataMutationResult result = conn.getQueryServices().updateIndexState(tableMetadata, null);
MutationCode code = result.getMutationCode();
if (code == MutationCode.TABLE_NOT_FOUND) {
throw new TableNotFoundException(schemaName, indexName);
}
if (code == MutationCode.UNALLOWED_TABLE_MUTATION) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_INDEX_STATE_TRANSITION).setMessage(" currentState=" + oldState + ". requestedState=" + newState).setSchemaName(schemaName).setTableName(indexName).build().buildException();
}
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class Indexer method preBatchMutateWithExceptions.
public void preBatchMutateWithExceptions(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws Throwable {
// first group all the updates for a single row into a single update to be processed
Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
Durability defaultDurability = Durability.SYNC_WAL;
if (c.getEnvironment().getRegion() != null) {
defaultDurability = c.getEnvironment().getRegion().getTableDesc().getDurability();
defaultDurability = (defaultDurability == Durability.USE_DEFAULT) ? Durability.SYNC_WAL : defaultDurability;
}
Durability durability = Durability.SKIP_WAL;
for (int i = 0; i < miniBatchOp.size(); i++) {
Mutation m = miniBatchOp.getOperation(i);
if (this.builder.isAtomicOp(m)) {
miniBatchOp.setOperationStatus(i, SUCCESS);
continue;
}
// way optimization go though.
if (this.builder.isEnabled(m)) {
Durability effectiveDurablity = (m.getDurability() == Durability.USE_DEFAULT) ? defaultDurability : m.getDurability();
if (effectiveDurablity.ordinal() > durability.ordinal()) {
durability = effectiveDurablity;
}
// add the mutation to the batch set
ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
MultiMutation stored = mutations.get(row);
// we haven't seen this row before, so add it
if (stored == null) {
stored = new MultiMutation(row);
mutations.put(row, stored);
}
stored.addAll(m);
}
}
// early exit if it turns out we don't have any edits
if (mutations.isEmpty()) {
return;
}
// dump all the index updates into a single WAL. They will get combined in the end anyways, so
// don't worry which one we get
WALEdit edit = miniBatchOp.getWalEdit(0);
if (edit == null) {
edit = new WALEdit();
miniBatchOp.setWalEdit(0, edit);
}
// get the current span, or just use a null-span to avoid a bunch of if statements
try (TraceScope scope = Trace.startSpan("Starting to build index updates")) {
Span current = scope.getSpan();
if (current == null) {
current = NullSpan.INSTANCE;
}
// get the index updates for all elements in this batch
Collection<Pair<Mutation, byte[]>> indexUpdates = this.builder.getIndexUpdate(miniBatchOp, mutations.values());
current.addTimelineAnnotation("Built index updates, doing preStep");
TracingUtils.addAnnotation(current, "index update count", indexUpdates.size());
byte[] tableName = c.getEnvironment().getRegion().getTableDesc().getTableName().getName();
Iterator<Pair<Mutation, byte[]>> indexUpdatesItr = indexUpdates.iterator();
List<Mutation> localUpdates = new ArrayList<Mutation>(indexUpdates.size());
while (indexUpdatesItr.hasNext()) {
Pair<Mutation, byte[]> next = indexUpdatesItr.next();
if (Bytes.compareTo(next.getSecond(), tableName) == 0) {
localUpdates.add(next.getFirst());
indexUpdatesItr.remove();
}
}
if (!localUpdates.isEmpty()) {
miniBatchOp.addOperationsFromCP(0, localUpdates.toArray(new Mutation[localUpdates.size()]));
}
// write them, either to WAL or the index tables
doPre(indexUpdates, edit, durability);
}
}
Aggregations