use of org.apache.phoenix.schema.PTable in project phoenix by apache.
the class BaseResultIterators method getGuidePosts.
private GuidePostsInfo getGuidePosts() throws SQLException {
if (!useStats() || !StatisticsUtil.isStatsEnabled(TableName.valueOf(physicalTableName))) {
return GuidePostsInfo.NO_GUIDEPOST;
}
TreeSet<byte[]> whereConditions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (Pair<byte[], byte[]> where : context.getWhereConditionColumns()) {
byte[] cf = where.getFirst();
if (cf != null) {
whereConditions.add(cf);
}
}
PTable table = getTable();
byte[] defaultCF = SchemaUtil.getEmptyColumnFamily(getTable());
byte[] cf = null;
if (!table.getColumnFamilies().isEmpty() && !whereConditions.isEmpty()) {
for (Pair<byte[], byte[]> where : context.getWhereConditionColumns()) {
byte[] whereCF = where.getFirst();
if (Bytes.compareTo(defaultCF, whereCF) == 0) {
cf = defaultCF;
break;
}
}
if (cf == null) {
cf = context.getWhereConditionColumns().get(0).getFirst();
}
}
if (cf == null) {
cf = defaultCF;
}
GuidePostsKey key = new GuidePostsKey(physicalTableName, cf);
return context.getConnection().getQueryServices().getTableStats(key);
}
use of org.apache.phoenix.schema.PTable in project phoenix by apache.
the class ConnectionQueryServicesImpl method metaDataMutated.
/**
* Ensures that metaData mutations are handled in the correct order
*/
private PMetaData metaDataMutated(PName tenantId, String tableName, long tableSeqNum, Mutator mutator) throws SQLException {
synchronized (latestMetaDataLock) {
throwConnectionClosedIfNullMetaData();
PMetaData metaData = latestMetaData;
PTable table;
long endTime = System.currentTimeMillis() + DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS;
while (true) {
try {
try {
table = metaData.getTableRef(new PTableKey(tenantId, tableName)).getTable();
/* If the table is at the prior sequence number, then we're good to go.
* We know if we've got this far, that the server validated the mutations,
* so we'd just need to wait until the other connection that mutated the same
* table is processed.
*/
if (table.getSequenceNumber() + 1 == tableSeqNum) {
// TODO: assert that timeStamp is bigger that table timeStamp?
mutator.mutate(metaData);
break;
} else if (table.getSequenceNumber() >= tableSeqNum) {
logger.warn("Attempt to cache older version of " + tableName + ": current= " + table.getSequenceNumber() + ", new=" + tableSeqNum);
break;
}
} catch (TableNotFoundException e) {
}
long waitTime = endTime - System.currentTimeMillis();
// and the next time it's used it'll be pulled over from the server.
if (waitTime <= 0) {
logger.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS / 1000) + " seconds for " + tableName);
// There will never be a parentTableName here, as that would only
// be non null for an index an we never add/remove columns from an index.
metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP);
break;
}
latestMetaDataLock.wait(waitTime);
} catch (InterruptedException e) {
// restore the interrupt status
Thread.currentThread().interrupt();
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
}
}
latestMetaData = metaData;
latestMetaDataLock.notifyAll();
return metaData;
}
}
use of org.apache.phoenix.schema.PTable in project phoenix by apache.
the class ConnectionQueryServicesImpl method dropTable.
@Override
public MetaDataMutationResult dropTable(final List<Mutation> tableMetaData, final PTableType tableType, final boolean cascade) throws SQLException {
byte[][] rowKeyMetadata = new byte[3][];
SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata);
byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantIdBytes, schemaBytes, tableBytes);
final MetaDataMutationResult result = metaDataCoprocessorExec(tableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {
@Override
public MetaDataResponse call(MetaDataService instance) throws IOException {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
DropTableRequest.Builder builder = DropTableRequest.newBuilder();
for (Mutation m : tableMetaData) {
MutationProto mp = ProtobufUtil.toProto(m);
builder.addTableMetadataMutations(mp.toByteString());
}
builder.setTableType(tableType.getSerializedValue());
builder.setCascade(cascade);
builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
instance.dropTable(controller, builder.build(), rpcCallback);
if (controller.getFailedOn() != null) {
throw controller.getFailedOn();
}
return rpcCallback.get();
}
});
final MutationCode code = result.getMutationCode();
switch(code) {
case TABLE_ALREADY_EXISTS:
ReadOnlyProps props = this.getProps();
boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
PTable table = result.getTable();
if (dropMetadata) {
flushParentPhysicalTable(table);
dropTables(result.getTableNamesToDelete());
} else {
invalidateTableStats(result.getTableNamesToDelete());
}
long timestamp = MetaDataUtil.getClientTimeStamp(tableMetaData);
if (tableType == PTableType.TABLE) {
byte[] physicalName = table.getPhysicalName().getBytes();
ensureViewIndexTableDropped(physicalName, timestamp);
ensureLocalIndexTableDropped(physicalName, timestamp);
tableStatsCache.invalidateAll(table);
}
break;
default:
break;
}
return result;
}
use of org.apache.phoenix.schema.PTable in project phoenix by apache.
the class ConnectionQueryServicesImpl method getSaltBuckets.
private static int getSaltBuckets(TableAlreadyExistsException e) {
PTable table = e.getTable();
Integer sequenceSaltBuckets = table == null ? null : table.getBucketNum();
return sequenceSaltBuckets == null ? 0 : sequenceSaltBuckets;
}
use of org.apache.phoenix.schema.PTable in project phoenix by apache.
the class ConnectionQueryServicesImpl method ensureViewIndexTableCreated.
private void ensureViewIndexTableCreated(PName tenantId, byte[] physicalIndexTableName, long timestamp, boolean isNamespaceMapped) throws SQLException {
String name = Bytes.toString(SchemaUtil.getParentTableNameFromIndexTable(physicalIndexTableName, MetaDataUtil.VIEW_INDEX_TABLE_PREFIX)).replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR);
PTable table = getTable(tenantId, name, timestamp);
ensureViewIndexTableCreated(table, timestamp, isNamespaceMapped);
}
Aggregations