use of org.apache.phoenix.schema.PName in project phoenix by apache.
the class MetaDataEndpointImpl method doDropTable.
private MetaDataMutationResult doDropTable(byte[] key, byte[] tenantId, byte[] schemaName, byte[] tableName, byte[] parentTableName, PTableType tableType, List<Mutation> rowsToDelete, List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete, boolean isCascade, int clientVersion) throws IOException, SQLException {
long clientTimeStamp = MetaDataUtil.getClientTimeStamp(rowsToDelete);
Region region = env.getRegion();
ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
PTable table = (PTable) metaDataCache.getIfPresent(cacheKey);
// We always cache the latest version - fault in if not in cache
if (table != null || (table = buildTable(key, cacheKey, region, HConstants.LATEST_TIMESTAMP, clientVersion)) != null) {
if (table.getTimeStamp() < clientTimeStamp) {
if (isTableDeleted(table) || tableType != table.getType()) {
return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
}
} else {
return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
}
}
// there was a table, but it's been deleted. In either case we want to return.
if (table == null) {
if (buildDeletedTable(key, cacheKey, region, clientTimeStamp) != null) {
return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
}
return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
}
// Make sure we're not deleting the "wrong" child
if (parentTableName != null && table.getParentTableName() != null && !Arrays.equals(parentTableName, table.getParentTableName().getBytes())) {
return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
}
// Since we don't allow back in time DDL, we know if we have a table it's the one
// we want to delete. FIXME: we shouldn't need a scan here, but should be able to
// use the table to generate the Delete markers.
Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
List<byte[]> indexNames = Lists.newArrayList();
List<Cell> results = Lists.newArrayList();
try (RegionScanner scanner = region.getScanner(scan)) {
scanner.next(results);
if (results.isEmpty()) {
// Should not be possible
return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
}
if (tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) {
// Handle any child views that exist
TableViewFinder tableViewFinderResult = findChildViews(region, tenantId, table, clientVersion);
if (tableViewFinderResult.hasViews()) {
if (isCascade) {
if (tableViewFinderResult.allViewsInMultipleRegions()) {
// view metadata spans multiple regions
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
} else if (tableViewFinderResult.allViewsInSingleRegion()) {
// Recursively delete views - safe as all the views as all in the same region
for (ViewInfo viewInfo : tableViewFinderResult.getViewInfoList()) {
byte[] viewTenantId = viewInfo.getTenantId();
byte[] viewSchemaName = viewInfo.getSchemaName();
byte[] viewName = viewInfo.getViewName();
byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
Delete delete = new Delete(viewKey, clientTimeStamp);
rowsToDelete.add(delete);
acquireLock(region, viewKey, locks);
MetaDataMutationResult result = doDropTable(viewKey, viewTenantId, viewSchemaName, viewName, null, PTableType.VIEW, rowsToDelete, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, false, clientVersion);
if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
return result;
}
}
}
} else {
// DROP without CASCADE on tables with child views is not permitted
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
}
}
}
// Add to list of HTables to delete, unless it's a view or its a shared index
if (tableType != PTableType.VIEW && table.getViewIndexId() == null) {
tableNamesToDelete.add(table.getPhysicalName().getBytes());
} else {
sharedTablesToDelete.add(new SharedTableState(table));
}
invalidateList.add(cacheKey);
byte[][] rowKeyMetaData = new byte[5][];
do {
Cell kv = results.get(LINK_TYPE_INDEX);
int nColumns = getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), 0, rowKeyMetaData);
if (nColumns == 5 && rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX].length > 0 && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0) {
LinkType linkType = LinkType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()]);
if (rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length == 0 && linkType == LinkType.INDEX_TABLE) {
indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX]);
} else if (linkType == LinkType.PARENT_TABLE || linkType == LinkType.PHYSICAL_TABLE) {
// delete parent->child link for views
Cell parentTenantIdCell = MetaDataUtil.getCell(results, PhoenixDatabaseMetaData.PARENT_TENANT_ID_BYTES);
PName parentTenantId = parentTenantIdCell != null ? PNameFactory.newName(parentTenantIdCell.getValueArray(), parentTenantIdCell.getValueOffset(), parentTenantIdCell.getValueLength()) : null;
byte[] linkKey = MetaDataUtil.getChildLinkKey(parentTenantId, table.getParentSchemaName(), table.getParentTableName(), table.getTenantId(), table.getName());
Delete linkDelete = new Delete(linkKey, clientTimeStamp);
rowsToDelete.add(linkDelete);
}
}
// FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
// FIXME: the version of the Delete constructor without the lock args was introduced
// in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
// of the client.
Delete delete = new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), clientTimeStamp);
rowsToDelete.add(delete);
results.clear();
scanner.next(results);
} while (!results.isEmpty());
}
// Recursively delete indexes
for (byte[] indexName : indexNames) {
byte[] indexKey = SchemaUtil.getTableKey(tenantId, schemaName, indexName);
// FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
// FIXME: the version of the Delete constructor without the lock args was introduced
// in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
// of the client.
Delete delete = new Delete(indexKey, clientTimeStamp);
rowsToDelete.add(delete);
acquireLock(region, indexKey, locks);
MetaDataMutationResult result = doDropTable(indexKey, tenantId, schemaName, indexName, tableName, PTableType.INDEX, rowsToDelete, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, false, clientVersion);
if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
return result;
}
}
return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, EnvironmentEdgeManager.currentTimeMillis(), table, tableNamesToDelete);
}
use of org.apache.phoenix.schema.PName in project phoenix by apache.
the class MetaDataEndpointImpl method createTable.
@Override
public void createTable(RpcController controller, CreateTableRequest request, RpcCallback<MetaDataResponse> done) {
MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
byte[][] rowKeyMetaData = new byte[3][];
byte[] schemaName = null;
byte[] tableName = null;
try {
int clientVersion = request.getClientVersion();
List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
boolean isNamespaceMapped = MetaDataUtil.isNameSpaceMapped(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
final IndexType indexType = MetaDataUtil.getIndexType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
byte[] parentSchemaName = null;
byte[] parentTableName = null;
PTableType tableType = MetaDataUtil.getTableType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
byte[] parentTableKey = null;
Mutation viewPhysicalTableRow = null;
Set<TableName> indexes = new HashSet<TableName>();
;
byte[] cPhysicalName = SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMapped).getBytes();
byte[] cParentPhysicalName = null;
if (tableType == PTableType.VIEW) {
byte[][] parentSchemaTableNames = new byte[3][];
byte[][] parentPhysicalSchemaTableNames = new byte[3][];
/*
* For a view, we lock the base physical table row. For a mapped view, there is
* no link present to the physical table. So the viewPhysicalTableRow is null
* in that case.
*/
viewPhysicalTableRow = getPhysicalTableRowForView(tableMetadata, parentSchemaTableNames, parentPhysicalSchemaTableNames);
long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
if (parentPhysicalSchemaTableNames[2] != null) {
parentTableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, parentPhysicalSchemaTableNames[1], parentPhysicalSchemaTableNames[2]);
PTable parentTable = getTable(env, parentTableKey, new ImmutableBytesPtr(parentTableKey), clientTimeStamp, clientTimeStamp, clientVersion);
if (parentTable == null) {
builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
return;
}
cParentPhysicalName = parentTable.getPhysicalName().getBytes();
if (parentSchemaTableNames[2] != null && Bytes.compareTo(parentSchemaTableNames[2], parentPhysicalSchemaTableNames[2]) != 0) {
// if view is created on view
byte[] parentKey = SchemaUtil.getTableKey(parentSchemaTableNames[0] == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentSchemaTableNames[0], parentSchemaTableNames[1], parentSchemaTableNames[2]);
parentTable = getTable(env, parentKey, new ImmutableBytesPtr(parentKey), clientTimeStamp, clientTimeStamp, clientVersion);
if (parentTable == null) {
// it could be a global view
parentKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, parentSchemaTableNames[1], parentSchemaTableNames[2]);
parentTable = getTable(env, parentKey, new ImmutableBytesPtr(parentKey), clientTimeStamp, clientTimeStamp, clientVersion);
}
}
if (parentTable == null) {
builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
return;
}
for (PTable index : parentTable.getIndexes()) {
indexes.add(TableName.valueOf(index.getPhysicalName().getBytes()));
}
} else {
// Mapped View
cParentPhysicalName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
}
parentSchemaName = parentPhysicalSchemaTableNames[1];
parentTableName = parentPhysicalSchemaTableNames[2];
} else if (tableType == PTableType.INDEX) {
parentSchemaName = schemaName;
/*
* For an index we lock the parent table's row which could be a physical table or a view.
* If the parent table is a physical table, then the tenantIdBytes is empty because
* we allow creating an index with a tenant connection only if the parent table is a view.
*/
parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, parentSchemaName, parentTableName);
long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
PTable parentTable = loadTable(env, parentTableKey, new ImmutableBytesPtr(parentTableKey), clientTimeStamp, clientTimeStamp, clientVersion);
if (IndexType.LOCAL == indexType) {
cPhysicalName = parentTable.getPhysicalName().getBytes();
cParentPhysicalName = parentTable.getPhysicalName().getBytes();
} else if (parentTable.getType() == PTableType.VIEW) {
cPhysicalName = MetaDataUtil.getViewIndexPhysicalName(parentTable.getPhysicalName().getBytes());
cParentPhysicalName = parentTable.getPhysicalName().getBytes();
} else {
cParentPhysicalName = SchemaUtil.getPhysicalHBaseTableName(parentSchemaName, parentTableName, isNamespaceMapped).getBytes();
}
}
getCoprocessorHost().preCreateTable(Bytes.toString(tenantIdBytes), SchemaUtil.getTableName(schemaName, tableName), (tableType == PTableType.VIEW) ? null : TableName.valueOf(cPhysicalName), cParentPhysicalName == null ? null : TableName.valueOf(cParentPhysicalName), tableType, /* TODO: During inital create we may not need the family map */
Collections.<byte[]>emptySet(), indexes);
Region region = env.getRegion();
List<RowLock> locks = Lists.newArrayList();
// Place a lock using key for the table to be created
byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName);
try {
acquireLock(region, tableKey, locks);
// If the table key resides outside the region, return without doing anything
MetaDataMutationResult result = checkTableKeyInRegion(tableKey, region);
if (result != null) {
done.run(MetaDataMutationResult.toProto(result));
return;
}
long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
ImmutableBytesPtr parentCacheKey = null;
PTable parentTable = null;
if (parentTableName != null) {
// Check if the parent table resides in the same region. If not, don't worry about locking the parent table row
// or loading the parent table. For a view, the parent table that needs to be locked is the base physical table.
// For an index on view, the view header row needs to be locked.
result = checkTableKeyInRegion(parentTableKey, region);
if (result == null) {
acquireLock(region, parentTableKey, locks);
parentCacheKey = new ImmutableBytesPtr(parentTableKey);
parentTable = loadTable(env, parentTableKey, parentCacheKey, clientTimeStamp, clientTimeStamp, clientVersion);
if (parentTable == null || isTableDeleted(parentTable)) {
builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
return;
}
// make sure we haven't gone over our threshold for indexes on this table.
if (execeededIndexQuota(tableType, parentTable)) {
builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
return;
}
long parentTableSeqNumber;
if (tableType == PTableType.VIEW && viewPhysicalTableRow != null && request.hasClientVersion()) {
// Starting 4.5, the client passes the sequence number of the physical table in the table metadata.
parentTableSeqNumber = MetaDataUtil.getSequenceNumber(viewPhysicalTableRow);
} else if (tableType == PTableType.VIEW && !request.hasClientVersion()) {
// Before 4.5, due to a bug, the parent table key wasn't available.
// So don't do anything and prevent the exception from being thrown.
parentTableSeqNumber = parentTable.getSequenceNumber();
} else {
parentTableSeqNumber = MetaDataUtil.getParentSequenceNumber(tableMetadata);
}
// If parent table isn't at the expected sequence number, then return
if (parentTable.getSequenceNumber() != parentTableSeqNumber) {
builder.setReturnCode(MetaDataProtos.MutationCode.CONCURRENT_TABLE_MUTATION);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
builder.setTable(PTableImpl.toProto(parentTable));
done.run(builder.build());
return;
}
}
}
// Load child table next
ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(tableKey);
// Get as of latest timestamp so we can detect if we have a newer table that already
// exists without making an additional query
PTable table = loadTable(env, tableKey, cacheKey, clientTimeStamp, HConstants.LATEST_TIMESTAMP, clientVersion);
if (table != null) {
if (table.getTimeStamp() < clientTimeStamp) {
// continue
if (!isTableDeleted(table)) {
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
builder.setTable(PTableImpl.toProto(table));
done.run(builder.build());
return;
}
} else {
builder.setReturnCode(MetaDataProtos.MutationCode.NEWER_TABLE_FOUND);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
builder.setTable(PTableImpl.toProto(table));
done.run(builder.build());
return;
}
}
// sends over depending on its base physical table.
if (tableType != PTableType.VIEW) {
UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, tableKey, clientTimeStamp);
}
// tableMetadata and set the view statement and partition column correctly
if (parentTable != null && parentTable.getAutoPartitionSeqName() != null) {
long autoPartitionNum = 1;
try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
Statement stmt = connection.createStatement()) {
String seqName = parentTable.getAutoPartitionSeqName();
// Not going through the standard route of using statement.execute() as that code path
// is blocked if the metadata hasn't been been upgraded to the new minor release.
String seqNextValueSql = String.format("SELECT NEXT VALUE FOR %s", seqName);
PhoenixStatement ps = stmt.unwrap(PhoenixStatement.class);
QueryPlan plan = ps.compileQuery(seqNextValueSql);
ResultIterator resultIterator = plan.iterator();
PhoenixResultSet rs = ps.newResultSet(resultIterator, plan.getProjector(), plan.getContext());
rs.next();
autoPartitionNum = rs.getLong(1);
} catch (SequenceNotFoundException e) {
builder.setReturnCode(MetaDataProtos.MutationCode.AUTO_PARTITION_SEQUENCE_NOT_FOUND);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
return;
}
PColumn autoPartitionCol = parentTable.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parentTable));
if (!PLong.INSTANCE.isCoercibleTo(autoPartitionCol.getDataType(), autoPartitionNum)) {
builder.setReturnCode(MetaDataProtos.MutationCode.CANNOT_COERCE_AUTO_PARTITION_ID);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
return;
}
builder.setAutoPartitionNum(autoPartitionNum);
// set the VIEW STATEMENT column of the header row
Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata);
NavigableMap<byte[], List<Cell>> familyCellMap = tableHeaderPut.getFamilyCellMap();
List<Cell> cells = familyCellMap.get(TABLE_FAMILY_BYTES);
Cell cell = cells.get(0);
String autoPartitionWhere = QueryUtil.getViewPartitionClause(MetaDataUtil.getAutoPartitionColumnName(parentTable), autoPartitionNum);
String hbaseVersion = VersionInfo.getVersion();
ImmutableBytesPtr ptr = new ImmutableBytesPtr();
KeyValueBuilder kvBuilder = KeyValueBuilder.get(hbaseVersion);
MetaDataUtil.getMutationValue(tableHeaderPut, VIEW_STATEMENT_BYTES, kvBuilder, ptr);
byte[] value = ptr.copyBytesIfNecessary();
byte[] viewStatement = null;
// if we have an existing where clause add the auto partition where clause to it
if (!Bytes.equals(value, QueryConstants.EMPTY_COLUMN_VALUE_BYTES)) {
viewStatement = Bytes.add(value, Bytes.toBytes(" AND "), Bytes.toBytes(autoPartitionWhere));
} else {
viewStatement = Bytes.toBytes(QueryUtil.getViewStatement(parentTable.getSchemaName().getString(), parentTable.getTableName().getString(), autoPartitionWhere));
}
Cell viewStatementCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_STATEMENT_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), viewStatement);
cells.add(viewStatementCell);
// set the IS_VIEW_REFERENCED column of the auto partition column row
Put autoPartitionPut = MetaDataUtil.getPutOnlyAutoPartitionColumn(parentTable, tableMetadata);
familyCellMap = autoPartitionPut.getFamilyCellMap();
cells = familyCellMap.get(TABLE_FAMILY_BYTES);
cell = cells.get(0);
PDataType dataType = autoPartitionCol.getDataType();
Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE);
byte[] bytes = new byte[dataType.getByteSize() + 1];
dataType.toBytes(val, bytes, 0);
Cell viewConstantCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_CONSTANT_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
cells.add(viewConstantCell);
}
Short indexId = null;
if (request.hasAllocateIndexId() && request.getAllocateIndexId()) {
String tenantIdStr = tenantIdBytes.length == 0 ? null : Bytes.toString(tenantIdBytes);
try (PhoenixConnection connection = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class)) {
PName physicalName = parentTable.getPhysicalName();
int nSequenceSaltBuckets = connection.getQueryServices().getSequenceSaltBuckets();
SequenceKey key = MetaDataUtil.getViewIndexSequenceKey(tenantIdStr, physicalName, nSequenceSaltBuckets, parentTable.isNamespaceMapped());
// TODO Review Earlier sequence was created at (SCN-1/LATEST_TIMESTAMP) and incremented at the client max(SCN,dataTable.getTimestamp), but it seems we should
// use always LATEST_TIMESTAMP to avoid seeing wrong sequence values by different connection having SCN
// or not.
long sequenceTimestamp = HConstants.LATEST_TIMESTAMP;
try {
connection.getQueryServices().createSequence(key.getTenantId(), key.getSchemaName(), key.getSequenceName(), Short.MIN_VALUE, 1, 1, Long.MIN_VALUE, Long.MAX_VALUE, false, sequenceTimestamp);
} catch (SequenceAlreadyExistsException e) {
}
long[] seqValues = new long[1];
SQLException[] sqlExceptions = new SQLException[1];
connection.getQueryServices().incrementSequences(Collections.singletonList(new SequenceAllocation(key, 1)), HConstants.LATEST_TIMESTAMP, seqValues, sqlExceptions);
if (sqlExceptions[0] != null) {
throw sqlExceptions[0];
}
long seqValue = seqValues[0];
if (seqValue > Short.MAX_VALUE) {
builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
return;
}
Put tableHeaderPut = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetadata);
NavigableMap<byte[], List<Cell>> familyCellMap = tableHeaderPut.getFamilyCellMap();
List<Cell> cells = familyCellMap.get(TABLE_FAMILY_BYTES);
Cell cell = cells.get(0);
PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
Object val = dataType.toObject(seqValue, PLong.INSTANCE);
byte[] bytes = new byte[dataType.getByteSize() + 1];
dataType.toBytes(val, bytes, 0);
Cell indexIdCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_INDEX_ID_BYTES, cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
cells.add(indexIdCell);
indexId = (short) seqValue;
}
}
// TODO: Switch this to HRegion#batchMutate when we want to support indexes on the
// system table. Basically, we get all the locks that we don't already hold for all the
// tableMetadata rows. This ensures we don't have deadlock situations (ensuring
// primary and then index table locks are held, in that order). For now, we just don't support
// indexing on the system table. This is an issue because of the way we manage batch mutation
// in the Indexer.
mutateRowsWithLocks(region, tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
// Invalidate the cache - the next getTable call will add it
// TODO: consider loading the table that was just created here, patching up the parent table, and updating the cache
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
if (parentCacheKey != null) {
metaDataCache.invalidate(parentCacheKey);
}
metaDataCache.invalidate(cacheKey);
// Get timeStamp from mutations - the above method sets it if it's unset
long currentTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
if (indexId != null) {
builder.setViewIndexId(indexId);
}
builder.setMutationTime(currentTimeStamp);
done.run(builder.build());
return;
} finally {
releaseRowLocks(region, locks);
}
} catch (Throwable t) {
logger.error("createTable failed", t);
ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
}
}
use of org.apache.phoenix.schema.PName in project phoenix by apache.
the class PhoenixDatabaseMetaData method addTenantIdFilter.
private void addTenantIdFilter(StringBuilder buf, String tenantIdPattern) {
PName tenantId = connection.getTenantId();
if (tenantIdPattern == null) {
if (tenantId != null) {
appendConjunction(buf);
buf.append(" (" + TENANT_ID + " IS NULL " + " OR " + TENANT_ID + " = '" + StringUtil.escapeStringConstant(tenantId.getString()) + "') ");
}
} else if (tenantIdPattern.length() == 0) {
appendConjunction(buf);
buf.append(TENANT_ID + " IS NULL ");
} else {
appendConjunction(buf);
buf.append(" TENANT_ID LIKE '" + StringUtil.escapeStringConstant(tenantIdPattern) + "' ");
if (tenantId != null) {
buf.append(" and TENANT_ID = '" + StringUtil.escapeStringConstant(tenantId.getString()) + "' ");
}
}
}
use of org.apache.phoenix.schema.PName in project phoenix by apache.
the class MutationState method addRowMutations.
private Iterator<Pair<PName, List<Mutation>>> addRowMutations(final TableRef tableRef, final MultiRowMutationState values, final long mutationTimestamp, final long serverTimestamp, boolean includeAllIndexes, final boolean sendAll) {
final PTable table = tableRef.getTable();
final // Only maintain tables with immutable rows through this client-side mechanism
Iterator<PTable> indexIterator = includeAllIndexes ? IndexMaintainer.maintainedIndexes(table.getIndexes().iterator()) : (table.isImmutableRows() || table.isTransactional()) ? IndexMaintainer.maintainedGlobalIndexes(table.getIndexes().iterator()) : Collections.<PTable>emptyIterator();
final List<PTable> indexList = Lists.newArrayList(indexIterator);
final Iterator<PTable> indexes = indexList.iterator();
final List<Mutation> mutationList = Lists.newArrayListWithExpectedSize(values.size());
final List<Mutation> mutationsPertainingToIndex = indexes.hasNext() ? Lists.<Mutation>newArrayListWithExpectedSize(values.size()) : null;
generateMutations(tableRef, mutationTimestamp, serverTimestamp, values, mutationList, mutationsPertainingToIndex);
return new Iterator<Pair<PName, List<Mutation>>>() {
boolean isFirst = true;
Map<byte[], List<Mutation>> indexMutationsMap = null;
@Override
public boolean hasNext() {
return isFirst || indexes.hasNext();
}
@Override
public Pair<PName, List<Mutation>> next() {
if (isFirst) {
isFirst = false;
return new Pair<PName, List<Mutation>>(table.getPhysicalName(), mutationList);
}
PTable index = indexes.next();
List<Mutation> indexMutations = null;
try {
if (!mutationsPertainingToIndex.isEmpty()) {
if (table.isTransactional()) {
if (indexMutationsMap == null) {
PhoenixTxIndexMutationGenerator generator = newTxIndexMutationGenerator(table, indexList, mutationsPertainingToIndex.get(0).getAttributesMap());
try (HTableInterface htable = connection.getQueryServices().getTable(table.getPhysicalName().getBytes())) {
Collection<Pair<Mutation, byte[]>> allMutations = generator.getIndexUpdates(htable, mutationsPertainingToIndex.iterator());
indexMutationsMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
for (Pair<Mutation, byte[]> mutation : allMutations) {
List<Mutation> mutations = indexMutationsMap.get(mutation.getSecond());
if (mutations == null) {
mutations = Lists.newArrayList();
indexMutationsMap.put(mutation.getSecond(), mutations);
}
mutations.add(mutation.getFirst());
}
}
}
indexMutations = indexMutationsMap.get(index.getPhysicalName().getBytes());
} else {
indexMutations = IndexUtil.generateIndexData(table, index, values, mutationsPertainingToIndex, connection.getKeyValueBuilder(), connection);
}
}
// we may also have to include delete mutations for immutable tables if we are not processing all the tables in the mutations map
if (!sendAll) {
TableRef key = new TableRef(index);
MultiRowMutationState multiRowMutationState = mutations.remove(key);
if (multiRowMutationState != null) {
final List<Mutation> deleteMutations = Lists.newArrayList();
generateMutations(tableRef, mutationTimestamp, serverTimestamp, multiRowMutationState, deleteMutations, null);
if (indexMutations == null) {
indexMutations = deleteMutations;
} else {
indexMutations.addAll(deleteMutations);
}
}
}
} catch (SQLException | IOException e) {
throw new IllegalDataException(e);
}
return new Pair<PName, List<Mutation>>(index.getPhysicalName(), indexMutations == null ? Collections.<Mutation>emptyList() : indexMutations);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
use of org.apache.phoenix.schema.PName in project phoenix by apache.
the class MutationState method toMutations.
public Iterator<Pair<byte[], List<Mutation>>> toMutations(final boolean includeMutableIndexes, final Long tableTimestamp) {
final Iterator<Map.Entry<TableRef, MultiRowMutationState>> iterator = this.mutations.entrySet().iterator();
if (!iterator.hasNext()) {
return Collections.emptyIterator();
}
Long scn = connection.getSCN();
final long serverTimestamp = getTableTimestamp(tableTimestamp, scn);
final long mutationTimestamp = getMutationTimestamp(scn);
return new Iterator<Pair<byte[], List<Mutation>>>() {
private Map.Entry<TableRef, MultiRowMutationState> current = iterator.next();
private Iterator<Pair<byte[], List<Mutation>>> innerIterator = init();
private Iterator<Pair<byte[], List<Mutation>>> init() {
final Iterator<Pair<PName, List<Mutation>>> mutationIterator = addRowMutations(current.getKey(), current.getValue(), mutationTimestamp, serverTimestamp, includeMutableIndexes, true);
return new Iterator<Pair<byte[], List<Mutation>>>() {
@Override
public boolean hasNext() {
return mutationIterator.hasNext();
}
@Override
public Pair<byte[], List<Mutation>> next() {
Pair<PName, List<Mutation>> pair = mutationIterator.next();
return new Pair<byte[], List<Mutation>>(pair.getFirst().getBytes(), pair.getSecond());
}
@Override
public void remove() {
mutationIterator.remove();
}
};
}
@Override
public boolean hasNext() {
return innerIterator.hasNext() || iterator.hasNext();
}
@Override
public Pair<byte[], List<Mutation>> next() {
if (!innerIterator.hasNext()) {
current = iterator.next();
innerIterator = init();
}
return innerIterator.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
Aggregations