use of org.apache.hadoop.hbase.regionserver.Region.RowLock in project phoenix by apache.
the class MetaDataEndpointImpl method dropSchema.
@Override
public void dropSchema(RpcController controller, DropSchemaRequest request, RpcCallback<MetaDataResponse> done) {
String schemaName = null;
try {
List<Mutation> schemaMetaData = ProtobufUtil.getMutations(request);
schemaName = request.getSchemaName();
byte[] lockKey = SchemaUtil.getSchemaKey(schemaName);
Region region = env.getRegion();
MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region);
if (result != null) {
done.run(MetaDataMutationResult.toProto(result));
return;
}
List<RowLock> locks = Lists.newArrayList();
long clientTimeStamp = MetaDataUtil.getClientTimeStamp(schemaMetaData);
try {
acquireLock(region, lockKey, locks);
List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>(1);
result = doDropSchema(clientTimeStamp, schemaName, lockKey, schemaMetaData, invalidateList);
if (result.getMutationCode() != MutationCode.SCHEMA_ALREADY_EXISTS) {
done.run(MetaDataMutationResult.toProto(result));
return;
}
region.mutateRowsWithLocks(schemaMetaData, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
long currentTime = MetaDataUtil.getClientTimeStamp(schemaMetaData);
for (ImmutableBytesPtr ptr : invalidateList) {
metaDataCache.invalidate(ptr);
metaDataCache.put(ptr, newDeletedSchemaMarker(currentTime));
}
done.run(MetaDataMutationResult.toProto(result));
return;
} finally {
region.releaseRowLocks(locks);
}
} catch (Throwable t) {
logger.error("drop schema failed:", t);
ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t));
}
}
use of org.apache.hadoop.hbase.regionserver.Region.RowLock in project phoenix by apache.
the class MetaDataEndpointImpl method createFunction.
@Override
public void createFunction(RpcController controller, CreateFunctionRequest request, RpcCallback<MetaDataResponse> done) {
MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
byte[][] rowKeyMetaData = new byte[2][];
byte[] functionName = null;
try {
List<Mutation> functionMetaData = ProtobufUtil.getMutations(request);
boolean temporaryFunction = request.getTemporary();
MetaDataUtil.getTenantIdAndFunctionName(functionMetaData, rowKeyMetaData);
byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
functionName = rowKeyMetaData[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX];
byte[] lockKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionName);
Region region = env.getRegion();
MetaDataMutationResult result = checkFunctionKeyInRegion(lockKey, region);
if (result != null) {
done.run(MetaDataMutationResult.toProto(result));
return;
}
List<RowLock> locks = Lists.newArrayList();
long clientTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData);
try {
acquireLock(region, lockKey, locks);
// Get as of latest timestamp so we can detect if we have a newer function that already
// exists without making an additional query
ImmutableBytesPtr cacheKey = new FunctionBytesPtr(lockKey);
PFunction function = loadFunction(env, lockKey, cacheKey, clientTimeStamp, clientTimeStamp, request.getReplace(), functionMetaData);
if (function != null) {
if (function.getTimeStamp() < clientTimeStamp) {
// continue
if (!isFunctionDeleted(function)) {
builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_ALREADY_EXISTS);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
builder.addFunction(PFunction.toProto(function));
done.run(builder.build());
if (!request.getReplace()) {
return;
}
}
} else {
builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_FUNCTION_FOUND);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
builder.addFunction(PFunction.toProto(function));
done.run(builder.build());
return;
}
}
// Don't store function info for temporary functions.
if (!temporaryFunction) {
region.mutateRowsWithLocks(functionMetaData, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
}
// Invalidate the cache - the next getFunction call will add it
// TODO: consider loading the function that was just created here, patching up the parent function, and updating the cache
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
metaDataCache.invalidate(cacheKey);
// Get timeStamp from mutations - the above method sets it if it's unset
long currentTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData);
builder.setReturnCode(MetaDataProtos.MutationCode.FUNCTION_NOT_FOUND);
builder.setMutationTime(currentTimeStamp);
done.run(builder.build());
return;
} finally {
region.releaseRowLocks(locks);
}
} catch (Throwable t) {
logger.error("createFunction failed", t);
ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(Bytes.toString(functionName), t));
}
}
use of org.apache.hadoop.hbase.regionserver.Region.RowLock in project phoenix by apache.
the class MetaDataEndpointImpl method addColumnsAndTablePropertiesToChildViews.
private MetaDataMutationResult addColumnsAndTablePropertiesToChildViews(PTable basePhysicalTable, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, Region region, List<RowLock> locks) throws IOException, SQLException {
List<PutWithOrdinalPosition> columnPutsForBaseTable = Lists.newArrayListWithExpectedSize(tableMetadata.size());
Map<TableProperty, Cell> tablePropertyCellMap = Maps.newHashMapWithExpectedSize(tableMetadata.size());
// Isolate the puts relevant to adding columns. Also figure out what kind of columns are being added.
for (Mutation m : tableMetadata) {
if (m instanceof Put) {
byte[][] rkmd = new byte[5][];
int pkCount = getVarChars(m.getRow(), rkmd);
// check if this put is for adding a column
if (pkCount > COLUMN_NAME_INDEX && rkmd[COLUMN_NAME_INDEX] != null && rkmd[COLUMN_NAME_INDEX].length > 0 && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
columnPutsForBaseTable.add(new PutWithOrdinalPosition((Put) m, getInteger((Put) m, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES)));
} else // check if the put is for a table property
if (pkCount <= COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
for (Cell cell : m.getFamilyCellMap().get(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)) {
for (TableProperty tableProp : TableProperty.values()) {
byte[] propNameBytes = Bytes.toBytes(tableProp.getPropertyName());
if (Bytes.compareTo(propNameBytes, 0, propNameBytes.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()) == 0 && tableProp.isValidOnView() && tableProp.isMutable()) {
Cell tablePropCell = CellUtil.createCell(cell.getRow(), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(), CellUtil.cloneValue(cell));
tablePropertyCellMap.put(tableProp, tablePropCell);
}
}
}
}
}
}
// Sort the puts by ordinal position
Collections.sort(columnPutsForBaseTable);
for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
short deltaNumPkColsSoFar = 0;
short columnsAddedToView = 0;
short columnsAddedToBaseTable = 0;
byte[] tenantId = viewInfo.getTenantId();
byte[] schema = viewInfo.getSchemaName();
byte[] table = viewInfo.getViewName();
byte[] viewKey = SchemaUtil.getTableKey(tenantId, schema, table);
// lock the rows corresponding to views so that no other thread can modify the view meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
List<PColumn> viewPkCols = new ArrayList<>(view.getPKColumns());
boolean addingExistingPkCol = false;
int numCols = view.getColumns().size();
// add the new columns to the child view
for (PutWithOrdinalPosition p : columnPutsForBaseTable) {
Put baseTableColumnPut = p.put;
PColumn existingViewColumn = null;
byte[][] rkmd = new byte[5][];
getVarChars(baseTableColumnPut.getRow(), rkmd);
String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
try {
existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
} catch (ColumnFamilyNotFoundException e) {
// ignore since it means that the column family is not present for the column to be added.
} catch (ColumnNotFoundException e) {
// ignore since it means the column is not present in the view
}
boolean isPkCol = columnFamily == null;
byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
if (existingViewColumn != null) {
MetaDataMutationResult result = validateColumnForAddToBaseTable(existingViewColumn, baseTableColumnPut, basePhysicalTable, isPkCol, view);
if (result != null) {
return result;
}
if (isPkCol) {
viewPkCols.remove(existingViewColumn);
addingExistingPkCol = true;
}
/*
* For views that are not diverged, we need to make sure that the existing columns
* have the same ordinal position as in the base table. This is important because
* we rely on the ordinal position of the column to figure out whether dropping a
* column from the view will end up diverging the view from the base table.
*
* For already diverged views, we don't care about the ordinal position of the existing column.
*/
if (!isDivergedView(view)) {
int newOrdinalPosition = p.ordinalPosition;
// Check if the ordinal position of the column was getting updated from previous add column
// mutations.
int existingOrdinalPos = ordinalPositionList.getOrdinalPositionOfColumn(columnKey);
if (ordinalPositionList.size() == 0) {
/*
* No ordinal positions to be updated are in the list. In that case, check whether the
* existing ordinal position of the column is different from its new ordinal position.
* If yes, then initialize the ordinal position list with this column's ordinal position
* as the offset.
*/
existingOrdinalPos = getOrdinalPosition(view, existingViewColumn);
if (existingOrdinalPos != newOrdinalPosition) {
ordinalPositionList.setOffset(newOrdinalPosition);
ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
for (PColumn col : view.getColumns()) {
int ordinalPos = getOrdinalPosition(view, col);
if (ordinalPos >= newOrdinalPosition) {
if (ordinalPos == existingOrdinalPos) {
/*
* No need to update ordinal positions of columns beyond the existing column's
* old ordinal position.
*/
break;
}
// increment ordinal position of columns occurring after this column by 1
int updatedPos = ordinalPos + 1;
ordinalPositionList.addColumn(getColumnKey(viewKey, col), updatedPos);
}
}
}
} else {
if (existingOrdinalPos != newOrdinalPosition) {
ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
}
}
columnsAddedToBaseTable++;
}
} else {
// The column doesn't exist in the view.
Put viewColumnPut = new Put(columnKey, clientTimeStamp);
for (Cell cell : baseTableColumnPut.getFamilyCellMap().values().iterator().next()) {
viewColumnPut.add(CellUtil.createCell(columnKey, CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(), CellUtil.cloneValue(cell)));
}
if (isDivergedView(view)) {
if (isPkCol) {
/*
* Only pk cols of the base table are added to the diverged views. These pk
* cols are added at the end.
*/
int lastOrdinalPos = getOrdinalPosition(view, view.getColumns().get(numCols - 1));
int newPosition = ++lastOrdinalPos;
byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()];
PInteger.INSTANCE.getCodec().encodeInt(newPosition, ptr, 0);
viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, clientTimeStamp, ptr);
mutationsForAddingColumnsToViews.add(viewColumnPut);
} else {
// move on to the next column
continue;
}
} else {
int newOrdinalPosition = p.ordinalPosition;
/*
* For a non-diverged view, we need to make sure that the base table column
* is added at the right position.
*/
if (ordinalPositionList.size() == 0) {
ordinalPositionList.setOffset(newOrdinalPosition);
ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
for (PColumn col : view.getColumns()) {
int ordinalPos = getOrdinalPosition(view, col);
if (ordinalPos >= newOrdinalPosition) {
// increment ordinal position of columns by 1
int updatedPos = ordinalPos + 1;
ordinalPositionList.addColumn(getColumnKey(viewKey, col), updatedPos);
}
}
} else {
ordinalPositionList.addColumn(columnKey, newOrdinalPosition);
}
mutationsForAddingColumnsToViews.add(viewColumnPut);
}
if (isPkCol) {
deltaNumPkColsSoFar++;
// Set the key sequence for the pk column to be added
short currentKeySeq = SchemaUtil.getMaxKeySeq(view);
short newKeySeq = (short) (currentKeySeq + deltaNumPkColsSoFar);
byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()];
PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0);
viewColumnPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes);
addMutationsForAddingPkColsToViewIndexes(mutationsForAddingColumnsToViews, clientTimeStamp, view, deltaNumPkColsSoFar, columnName, viewColumnPut);
}
columnsAddedToView++;
columnsAddedToBaseTable++;
}
}
/*
* Allow adding a pk columns to base table : 1. if all the view pk columns are exactly the same as the base
* table pk columns 2. if we are adding all the existing view pk columns to the base table
*/
if (addingExistingPkCol && !viewPkCols.equals(basePhysicalTable.getPKColumns())) {
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
}
addViewIndexesHeaderRowMutations(mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, view, deltaNumPkColsSoFar);
// set table properties in child view
if (!tablePropertyCellMap.isEmpty()) {
Put viewHeaderRowPut = new Put(viewKey, clientTimeStamp);
for (TableProperty tableProp : TableProperty.values()) {
Cell tablePropertyCell = tablePropertyCellMap.get(tableProp);
if (tablePropertyCell != null) {
// or if it is mutable on a view and the property value is the same as the base table property (which means it wasn't changed on the view)
if (!tableProp.isMutableOnView() || tableProp.getPTableValue(view).equals(tableProp.getPTableValue(basePhysicalTable))) {
viewHeaderRowPut.add(CellUtil.createCell(viewKey, CellUtil.cloneFamily(tablePropertyCell), CellUtil.cloneQualifier(tablePropertyCell), clientTimeStamp, tablePropertyCell.getTypeByte(), CellUtil.cloneValue(tablePropertyCell)));
}
}
}
byte[] viewSequencePtr = new byte[PLong.INSTANCE.getByteSize()];
PLong.INSTANCE.getCodec().encodeLong(view.getSequenceNumber() + 1, viewSequencePtr, 0);
viewHeaderRowPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, clientTimeStamp, viewSequencePtr);
// invalidate the view so that it is removed from the cache
invalidateList.add(new ImmutableBytesPtr(viewKey));
mutationsForAddingColumnsToViews.add(viewHeaderRowPut);
}
/*
* Increment the sequence number by 1 if:
* 1) For a diverged view, there were columns (pk columns) added to the view.
* 2) For a non-diverged view if the base column count changed.
*/
boolean changeSequenceNumber = (isDivergedView(view) && columnsAddedToView > 0) || (!isDivergedView(view) && columnsAddedToBaseTable > 0);
updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, columnsAddedToView, columnsAddedToBaseTable, viewKey, view, ordinalPositionList, numCols, changeSequenceNumber);
}
return null;
}
use of org.apache.hadoop.hbase.regionserver.Region.RowLock in project phoenix by apache.
the class MetaDataEndpointImpl method addColumn.
@Override
public void addColumn(RpcController controller, final AddColumnRequest request, RpcCallback<MetaDataResponse> done) {
try {
List<Mutation> tableMetaData = ProtobufUtil.getMutations(request);
MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {
@Override
public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData, List<Mutation> tableMetaData, Region region, List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, long clientTimeStamp) throws IOException, SQLException {
byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
PTableType type = table.getType();
byte[] tableHeaderRowKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
// Size for worst case - all new columns are PK column
List<Mutation> mutationsForAddingColumnsToViews = Lists.newArrayListWithExpectedSize(tableMetaData.size() * (1 + table.getIndexes().size()));
if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
TableViewFinder childViewsResult = new TableViewFinder();
findAllChildViews(region, tenantId, table, childViewsResult, clientTimeStamp);
if (childViewsResult.hasViews()) {
/*
* Dis-allow if:
* 1) The meta-data for child view/s spans over
* more than one region (since the changes cannot be made in a transactional fashion)
*
* 2) The base column count is 0 which means that the metadata hasn't been upgraded yet or
* the upgrade is currently in progress.
*
* 3) If the request is from a client that is older than 4.5 version of phoenix.
* Starting from 4.5, metadata requests have the client version included in them.
* We don't want to allow clients before 4.5 to add a column to the base table if it has views.
*
* 4) Trying to swtich tenancy of a table that has views
*/
if (!childViewsResult.allViewsInSingleRegion() || table.getBaseColumnCount() == 0 || !request.hasClientVersion() || switchAttribute(table, table.isMultiTenant(), tableMetaData, MULTI_TENANT_BYTES)) {
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
} else {
mutationsForAddingColumnsToViews = new ArrayList<>(childViewsResult.getViewInfoList().size() * tableMetaData.size());
MetaDataMutationResult mutationResult = addColumnsAndTablePropertiesToChildViews(table, tableMetaData, mutationsForAddingColumnsToViews, schemaName, tableName, invalidateList, clientTimeStamp, childViewsResult, region, locks);
// return if we were not able to add the column successfully
if (mutationResult != null)
return mutationResult;
}
}
} else if (type == PTableType.VIEW && EncodedColumnsUtil.usesEncodedColumnNames(table)) {
/*
* When adding a column to a view that uses encoded column name scheme, we
* need to modify the CQ counters stored in the view's physical table. So to
* make sure clients get the latest PTable, we need to invalidate the cache
* entry.
*/
invalidateList.add(new ImmutableBytesPtr(MetaDataUtil.getPhysicalTableRowForView(table)));
}
for (Mutation m : tableMetaData) {
byte[] key = m.getRow();
boolean addingPKColumn = false;
int pkCount = getVarChars(key, rowKeyMetaData);
if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) {
try {
if (pkCount > FAMILY_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0) {
PColumnFamily family = table.getColumnFamily(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
family.getPColumnForColumnNameBytes(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]);
} else if (pkCount > COLUMN_NAME_INDEX && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length > 0) {
addingPKColumn = true;
table.getPKColumn(new String(rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX]));
} else {
continue;
}
return new MetaDataMutationResult(MutationCode.COLUMN_ALREADY_EXISTS, EnvironmentEdgeManager.currentTimeMillis(), table);
} catch (ColumnFamilyNotFoundException e) {
continue;
} catch (ColumnNotFoundException e) {
if (addingPKColumn) {
// able to be rowKeyOptimized, it should continue to be so.
if (table.rowKeyOrderOptimizable()) {
UpgradeUtil.addRowKeyOrderOptimizableCell(mutationsForAddingColumnsToViews, tableHeaderRowKey, clientTimeStamp);
} else if (table.getType() == PTableType.VIEW) {
// does not handle this.
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
}
// have the parent table lock at this point.
for (PTable index : table.getIndexes()) {
invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(tenantId, index.getSchemaName().getBytes(), index.getTableName().getBytes())));
// able to be rowKeyOptimized, it should continue to be so.
if (index.rowKeyOrderOptimizable()) {
byte[] indexHeaderRowKey = SchemaUtil.getTableKey(index.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : index.getTenantId().getBytes(), index.getSchemaName().getBytes(), index.getTableName().getBytes());
UpgradeUtil.addRowKeyOrderOptimizableCell(mutationsForAddingColumnsToViews, indexHeaderRowKey, clientTimeStamp);
}
}
}
continue;
}
} else if (pkCount == COLUMN_NAME_INDEX && !(Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0)) {
// Invalidate any table with mutations
// TODO: this likely means we don't need the above logic that
// loops through the indexes if adding a PK column, since we'd
// always have header rows for those.
invalidateList.add(new ImmutableBytesPtr(SchemaUtil.getTableKey(tenantId, rowKeyMetaData[SCHEMA_NAME_INDEX], rowKeyMetaData[TABLE_NAME_INDEX])));
}
}
tableMetaData.addAll(mutationsForAddingColumnsToViews);
return null;
}
});
if (result != null) {
done.run(MetaDataMutationResult.toProto(result));
}
} catch (IOException ioe) {
ProtobufUtil.setControllerException(controller, ioe);
}
}
use of org.apache.hadoop.hbase.regionserver.Region.RowLock in project phoenix by apache.
the class SequenceRegionObserver method acquireLock.
private static void acquireLock(Region region, byte[] key, List<RowLock> locks) throws IOException {
RowLock rowLock = region.getRowLock(key, false);
if (rowLock == null) {
throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
}
locks.add(rowLock);
}
Aggregations