Search in sources :

Example 6 with PName

use of org.apache.phoenix.schema.PName in project phoenix by apache.

the class MetaDataEndpointImpl method getFunction.

private PFunction getFunction(RegionScanner scanner, final boolean isReplace, long clientTimeStamp, List<Mutation> deleteMutationsForReplace) throws IOException, SQLException {
    List<Cell> results = Lists.newArrayList();
    scanner.next(results);
    if (results.isEmpty()) {
        return null;
    }
    Cell[] functionKeyValues = new Cell[FUNCTION_KV_COLUMNS.size()];
    Cell[] functionArgKeyValues = new Cell[FUNCTION_ARG_KV_COLUMNS.size()];
    // Create PFunction based on KeyValues from scan
    Cell keyValue = results.get(0);
    byte[] keyBuffer = keyValue.getRowArray();
    int keyLength = keyValue.getRowLength();
    int keyOffset = keyValue.getRowOffset();
    long currentTimeMillis = EnvironmentEdgeManager.currentTimeMillis();
    if (isReplace) {
        long deleteTimeStamp = clientTimeStamp == HConstants.LATEST_TIMESTAMP ? currentTimeMillis - 1 : (keyValue.getTimestamp() < clientTimeStamp ? clientTimeStamp - 1 : keyValue.getTimestamp());
        deleteMutationsForReplace.add(new Delete(keyBuffer, keyOffset, keyLength, deleteTimeStamp));
    }
    PName tenantId = newPName(keyBuffer, keyOffset, keyLength);
    int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length;
    if (tenantIdLength == 0) {
        tenantId = null;
    }
    PName functionName = newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength - tenantIdLength - 1);
    int functionNameLength = functionName.getBytes().length + 1;
    int offset = tenantIdLength + functionNameLength + 1;
    long timeStamp = keyValue.getTimestamp();
    int i = 0;
    int j = 0;
    while (i < results.size() && j < FUNCTION_KV_COLUMNS.size()) {
        Cell kv = results.get(i);
        Cell searchKv = FUNCTION_KV_COLUMNS.get(j);
        int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), searchKv.getQualifierArray(), searchKv.getQualifierOffset(), searchKv.getQualifierLength());
        if (cmp == 0) {
            // Find max timestamp of table
            timeStamp = Math.max(timeStamp, kv.getTimestamp());
            // header row
            functionKeyValues[j++] = kv;
            i++;
        } else if (cmp > 0) {
            timeStamp = Math.max(timeStamp, kv.getTimestamp());
            functionKeyValues[j++] = null;
        } else {
            // shouldn't happen - means unexpected KV in system table header row
            i++;
        }
    }
    // CLASS_NAME,NUM_ARGS and JAR_PATH are required.
    if (functionKeyValues[CLASS_NAME_INDEX] == null || functionKeyValues[NUM_ARGS_INDEX] == null) {
        throw new IllegalStateException("Didn't find expected key values for function row in metadata row");
    }
    Cell classNameKv = functionKeyValues[CLASS_NAME_INDEX];
    PName className = newPName(classNameKv.getValueArray(), classNameKv.getValueOffset(), classNameKv.getValueLength());
    Cell jarPathKv = functionKeyValues[JAR_PATH_INDEX];
    PName jarPath = null;
    if (jarPathKv != null) {
        jarPath = newPName(jarPathKv.getValueArray(), jarPathKv.getValueOffset(), jarPathKv.getValueLength());
    }
    Cell numArgsKv = functionKeyValues[NUM_ARGS_INDEX];
    int numArgs = PInteger.INSTANCE.getCodec().decodeInt(numArgsKv.getValueArray(), numArgsKv.getValueOffset(), SortOrder.getDefault());
    Cell returnTypeKv = functionKeyValues[RETURN_TYPE_INDEX];
    PName returnType = returnTypeKv == null ? null : newPName(returnTypeKv.getValueArray(), returnTypeKv.getValueOffset(), returnTypeKv.getValueLength());
    List<FunctionArgument> arguments = Lists.newArrayListWithExpectedSize(numArgs);
    for (int k = 0; k < numArgs; k++) {
        results.clear();
        scanner.next(results);
        if (results.isEmpty()) {
            break;
        }
        Cell typeKv = results.get(0);
        if (isReplace) {
            long deleteTimeStamp = clientTimeStamp == HConstants.LATEST_TIMESTAMP ? currentTimeMillis - 1 : (typeKv.getTimestamp() < clientTimeStamp ? clientTimeStamp - 1 : typeKv.getTimestamp());
            deleteMutationsForReplace.add(new Delete(typeKv.getRowArray(), typeKv.getRowOffset(), typeKv.getRowLength(), deleteTimeStamp));
        }
        int typeKeyLength = typeKv.getRowLength();
        PName typeName = newPName(typeKv.getRowArray(), typeKv.getRowOffset() + offset, typeKeyLength - offset - 3);
        int argPositionOffset = offset + typeName.getBytes().length + 1;
        short argPosition = Bytes.toShort(typeKv.getRowArray(), typeKv.getRowOffset() + argPositionOffset, typeKeyLength - argPositionOffset);
        addArgumentToFunction(results, functionName, typeName, functionArgKeyValues, arguments, argPosition);
    }
    Collections.sort(arguments, new Comparator<FunctionArgument>() {

        @Override
        public int compare(FunctionArgument o1, FunctionArgument o2) {
            return o1.getArgPosition() - o2.getArgPosition();
        }
    });
    return new PFunction(tenantId, functionName.getString(), arguments, returnType.getString(), className.getString(), jarPath == null ? null : jarPath.getString(), timeStamp);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PFunction(org.apache.phoenix.parse.PFunction) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PName(org.apache.phoenix.schema.PName) Cell(org.apache.hadoop.hbase.Cell) FunctionArgument(org.apache.phoenix.parse.PFunction.FunctionArgument)

Example 7 with PName

use of org.apache.phoenix.schema.PName in project phoenix by apache.

the class WhereOptimizer method pushKeyExpressionsToScan.

// For testing so that the extractedNodes can be verified
public static Expression pushKeyExpressionsToScan(StatementContext context, FilterableStatement statement, Expression whereClause, Set<Expression> extractNodes) throws SQLException {
    PName tenantId = context.getConnection().getTenantId();
    byte[] tenantIdBytes = null;
    PTable table = context.getCurrentTable().getTable();
    Integer nBuckets = table.getBucketNum();
    boolean isSalted = nBuckets != null;
    RowKeySchema schema = table.getRowKeySchema();
    boolean isMultiTenant = tenantId != null && table.isMultiTenant();
    boolean isSharedIndex = table.getViewIndexId() != null;
    if (isMultiTenant) {
        tenantIdBytes = ScanUtil.getTenantIdBytes(schema, isSalted, tenantId, isSharedIndex);
    }
    if (whereClause == null && (tenantId == null || !table.isMultiTenant()) && table.getViewIndexId() == null) {
        context.setScanRanges(ScanRanges.EVERYTHING);
        return whereClause;
    }
    if (LiteralExpression.isBooleanFalseOrNull(whereClause)) {
        context.setScanRanges(ScanRanges.NOTHING);
        return null;
    }
    KeyExpressionVisitor visitor = new KeyExpressionVisitor(context, table);
    KeyExpressionVisitor.KeySlots keySlots = null;
    if (whereClause != null) {
        // TODO:: When we only have one where clause, the keySlots returns as a single slot object,
        // instead of an array of slots for the corresponding column. Change the behavior so it
        // becomes consistent.
        keySlots = whereClause.accept(visitor);
        if (keySlots == null && (tenantId == null || !table.isMultiTenant()) && table.getViewIndexId() == null) {
            context.setScanRanges(ScanRanges.EVERYTHING);
            return whereClause;
        }
        // for unequal lengths.
        if (keySlots == KeyExpressionVisitor.EMPTY_KEY_SLOTS) {
            context.setScanRanges(ScanRanges.NOTHING);
            return null;
        }
    }
    if (keySlots == null) {
        keySlots = KeyExpressionVisitor.EMPTY_KEY_SLOTS;
    }
    if (extractNodes == null) {
        extractNodes = new HashSet<Expression>(table.getPKColumns().size());
    }
    int pkPos = 0;
    int nPKColumns = table.getPKColumns().size();
    int[] slotSpan = new int[nPKColumns];
    List<List<KeyRange>> cnf = Lists.newArrayListWithExpectedSize(schema.getMaxFields());
    KeyRange minMaxRange = keySlots.getMinMaxRange();
    if (minMaxRange == null) {
        minMaxRange = KeyRange.EVERYTHING_RANGE;
    }
    boolean hasMinMaxRange = (minMaxRange != KeyRange.EVERYTHING_RANGE);
    int minMaxRangeOffset = 0;
    byte[] minMaxRangePrefix = null;
    boolean hasViewIndex = table.getViewIndexId() != null;
    if (hasMinMaxRange) {
        int minMaxRangeSize = (isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0) + (isMultiTenant ? tenantIdBytes.length + 1 : 0) + (hasViewIndex ? MetaDataUtil.getViewIndexIdDataType().getByteSize() : 0);
        minMaxRangePrefix = new byte[minMaxRangeSize];
    }
    Iterator<KeyExpressionVisitor.KeySlot> iterator = keySlots.iterator();
    // Add placeholder for salt byte ranges
    if (isSalted) {
        cnf.add(SALT_PLACEHOLDER);
        if (hasMinMaxRange) {
            System.arraycopy(SALT_PLACEHOLDER.get(0).getLowerRange(), 0, minMaxRangePrefix, minMaxRangeOffset, SaltingUtil.NUM_SALTING_BYTES);
            minMaxRangeOffset += SaltingUtil.NUM_SALTING_BYTES;
        }
        // Increment the pkPos, as the salt column is in the row schema
        // Do not increment the iterator, though, as there will never be
        // an expression in the keySlots for the salt column
        pkPos++;
    }
    // that different indexes don't interleave.
    if (hasViewIndex) {
        byte[] viewIndexBytes = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
        KeyRange indexIdKeyRange = KeyRange.getKeyRange(viewIndexBytes);
        cnf.add(singletonList(indexIdKeyRange));
        if (hasMinMaxRange) {
            System.arraycopy(viewIndexBytes, 0, minMaxRangePrefix, minMaxRangeOffset, viewIndexBytes.length);
            minMaxRangeOffset += viewIndexBytes.length;
        }
        pkPos++;
    }
    // Add tenant data isolation for tenant-specific tables
    if (isMultiTenant) {
        KeyRange tenantIdKeyRange = KeyRange.getKeyRange(tenantIdBytes);
        cnf.add(singletonList(tenantIdKeyRange));
        if (hasMinMaxRange) {
            System.arraycopy(tenantIdBytes, 0, minMaxRangePrefix, minMaxRangeOffset, tenantIdBytes.length);
            minMaxRangeOffset += tenantIdBytes.length;
            Field f = schema.getField(pkPos);
            if (!f.getDataType().isFixedWidth()) {
                minMaxRangePrefix[minMaxRangeOffset] = SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), tenantIdBytes.length == 0, f);
                minMaxRangeOffset++;
            }
        }
        pkPos++;
    }
    // range with the other range.
    if (hasMinMaxRange) {
        minMaxRange = minMaxRange.prependRange(minMaxRangePrefix, 0, minMaxRangeOffset);
    }
    boolean forcedSkipScan = statement.getHint().hasHint(Hint.SKIP_SCAN);
    boolean forcedRangeScan = statement.getHint().hasHint(Hint.RANGE_SCAN);
    boolean hasUnboundedRange = false;
    boolean hasMultiRanges = false;
    boolean hasRangeKey = false;
    boolean stopExtracting = false;
    boolean useSkipScan = false;
    // Concat byte arrays of literals to form scan start key
    while (iterator.hasNext()) {
        KeyExpressionVisitor.KeySlot slot = iterator.next();
        // If the slot is null this means we have no entry for this pk position.
        if (slot == null || slot.getKeyRanges().isEmpty()) {
            continue;
        }
        if (slot.getPKPosition() != pkPos) {
            if (!forcedSkipScan) {
                stopExtracting = true;
            } else {
                useSkipScan |= !stopExtracting && !forcedRangeScan && forcedSkipScan;
            }
            for (int i = pkPos; i < slot.getPKPosition(); i++) {
                cnf.add(Collections.singletonList(KeyRange.EVERYTHING_RANGE));
            }
        }
        KeyPart keyPart = slot.getKeyPart();
        slotSpan[cnf.size()] = slot.getPKSpan() - 1;
        pkPos = slot.getPKPosition() + slot.getPKSpan();
        // Skip span-1 slots as we skip one at the top of the loop
        for (int i = 1; i < slot.getPKSpan() && iterator.hasNext(); i++) {
            iterator.next();
        }
        List<KeyRange> keyRanges = slot.getKeyRanges();
        cnf.add(keyRanges);
        // TODO: when stats are available, we may want to use a skip scan if the
        // cardinality of this slot is low.
        /*
             *  Stop extracting nodes once we encounter:
             *  1) An unbound range unless we're forcing a skip scan and havn't encountered
             *     a multi-column span. Even if we're trying to force a skip scan, we can't
             *     execute it over a multi-column span.
             *  2) A non range key as we can extract the first one, but further ones need
             *     to be evaluated in a filter.
             */
        stopExtracting |= (hasUnboundedRange && !forcedSkipScan) || (hasRangeKey && forcedRangeScan);
        useSkipScan |= !stopExtracting && !forcedRangeScan && (keyRanges.size() > 1 || hasRangeKey);
        for (int i = 0; (!hasUnboundedRange || !hasRangeKey) && i < keyRanges.size(); i++) {
            KeyRange range = keyRanges.get(i);
            if (range.isUnbound()) {
                hasUnboundedRange = hasRangeKey = true;
            } else if (!range.isSingleKey()) {
                hasRangeKey = true;
            }
        }
        hasMultiRanges |= keyRanges.size() > 1;
        // We cannot extract if we have multiple ranges and are forcing a range scan.
        stopExtracting |= forcedRangeScan && hasMultiRanges;
        // that, so must filter on the remaining conditions (see issue #467).
        if (!stopExtracting) {
            List<Expression> nodesToExtract = keyPart.getExtractNodes();
            extractNodes.addAll(nodesToExtract);
        }
    }
    // If we have fully qualified point keys with multi-column spans (i.e. RVC),
    // we can still use our skip scan. The ScanRanges.create() call will explode
    // out the keys.
    slotSpan = Arrays.copyOf(slotSpan, cnf.size());
    ScanRanges scanRanges = ScanRanges.create(schema, cnf, slotSpan, minMaxRange, nBuckets, useSkipScan, table.getRowTimestampColPos());
    context.setScanRanges(scanRanges);
    if (whereClause == null) {
        return null;
    } else {
        return whereClause.accept(new RemoveExtractedNodesVisitor(extractNodes));
    }
}
Also used : KeyRange(org.apache.phoenix.query.KeyRange) RowKeySchema(org.apache.phoenix.schema.RowKeySchema) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) Field(org.apache.phoenix.schema.ValueSchema.Field) BaseExpression(org.apache.phoenix.expression.BaseExpression) BaseTerminalExpression(org.apache.phoenix.expression.BaseTerminalExpression) Expression(org.apache.phoenix.expression.Expression) LikeExpression(org.apache.phoenix.expression.LikeExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) InListExpression(org.apache.phoenix.expression.InListExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) RowValueConstructorExpression(org.apache.phoenix.expression.RowValueConstructorExpression) IsNullExpression(org.apache.phoenix.expression.IsNullExpression) AndExpression(org.apache.phoenix.expression.AndExpression) ComparisonExpression(org.apache.phoenix.expression.ComparisonExpression) OrExpression(org.apache.phoenix.expression.OrExpression) PName(org.apache.phoenix.schema.PName) Collections.singletonList(java.util.Collections.singletonList) List(java.util.List) ArrayList(java.util.ArrayList)

Example 8 with PName

use of org.apache.phoenix.schema.PName in project phoenix by apache.

the class MetaDataEndpointImpl method getSchema.

private PSchema getSchema(RegionScanner scanner, long clientTimeStamp) throws IOException, SQLException {
    List<Cell> results = Lists.newArrayList();
    scanner.next(results);
    if (results.isEmpty()) {
        return null;
    }
    Cell keyValue = results.get(0);
    byte[] keyBuffer = keyValue.getRowArray();
    int keyLength = keyValue.getRowLength();
    int keyOffset = keyValue.getRowOffset();
    PName tenantId = newPName(keyBuffer, keyOffset, keyLength);
    int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length;
    if (tenantIdLength == 0) {
        tenantId = null;
    }
    PName schemaName = newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength - tenantIdLength - 1);
    long timeStamp = keyValue.getTimestamp();
    return new PSchema(schemaName.getString(), timeStamp);
}
Also used : PName(org.apache.phoenix.schema.PName) PSchema(org.apache.phoenix.parse.PSchema) Cell(org.apache.hadoop.hbase.Cell) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint)

Example 9 with PName

use of org.apache.phoenix.schema.PName in project phoenix by apache.

the class UpgradeUtil method addSaltByte.

@SuppressWarnings("deprecation")
private static KeyValue addSaltByte(KeyValue keyValue, int nSaltBuckets) {
    byte[] buf = keyValue.getBuffer();
    int length = keyValue.getRowLength();
    int offset = keyValue.getRowOffset();
    boolean isViewSeq = length > SEQ_PREFIX_BYTES.length && Bytes.compareTo(SEQ_PREFIX_BYTES, 0, SEQ_PREFIX_BYTES.length, buf, offset, SEQ_PREFIX_BYTES.length) == 0;
    if (!isViewSeq && nSaltBuckets == 0) {
        return null;
    }
    byte[] newBuf;
    if (isViewSeq) {
        // We messed up the name for the sequences for view indexes so we'll take this opportunity to fix it
        if (buf[length - 1] == 0) {
            // Global indexes on views have trailing null byte
            length--;
        }
        byte[][] rowKeyMetaData = new byte[3][];
        SchemaUtil.getVarChars(buf, offset, length, 0, rowKeyMetaData);
        byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        byte[] unprefixedSchemaName = new byte[schemaName.length - MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length];
        System.arraycopy(schemaName, MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length, unprefixedSchemaName, 0, unprefixedSchemaName.length);
        byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        PName physicalName = PNameFactory.newName(unprefixedSchemaName);
        // Reformulate key based on correct data
        newBuf = MetaDataUtil.getViewIndexSequenceKey(tableName == null ? null : Bytes.toString(tableName), physicalName, nSaltBuckets, false).getKey();
    } else {
        newBuf = new byte[length + 1];
        System.arraycopy(buf, offset, newBuf, SaltingUtil.NUM_SALTING_BYTES, length);
        newBuf[0] = SaltingUtil.getSaltingByte(newBuf, SaltingUtil.NUM_SALTING_BYTES, length, nSaltBuckets);
    }
    return new KeyValue(newBuf, 0, newBuf.length, buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(), buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(), keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getType()), buf, keyValue.getValueOffset(), keyValue.getValueLength());
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) PName(org.apache.phoenix.schema.PName)

Example 10 with PName

use of org.apache.phoenix.schema.PName in project phoenix by apache.

the class UpgradeUtil method upgradeDescVarLengthRowKeys.

private static void upgradeDescVarLengthRowKeys(PhoenixConnection upgradeConn, PhoenixConnection globalConn, String schemaName, String tableName, boolean isTable, boolean bypassUpgrade) throws SQLException {
    String physicalName = SchemaUtil.getTableName(schemaName, tableName);
    long currentTime = System.currentTimeMillis();
    String snapshotName = physicalName + "_" + currentTime;
    HBaseAdmin admin = null;
    if (isTable && !bypassUpgrade) {
        admin = globalConn.getQueryServices().getAdmin();
    }
    boolean restoreSnapshot = false;
    boolean success = false;
    try {
        if (isTable && !bypassUpgrade) {
            String msg = "Taking snapshot of physical table " + physicalName + " prior to upgrade...";
            System.out.println(msg);
            logger.info(msg);
            admin.disableTable(physicalName);
            admin.snapshot(snapshotName, physicalName);
            admin.enableTable(physicalName);
            restoreSnapshot = true;
        }
        String escapedTableName = SchemaUtil.getEscapedTableName(schemaName, tableName);
        String tenantInfo = "";
        PName tenantId = PName.EMPTY_NAME;
        if (upgradeConn.getTenantId() != null) {
            tenantId = upgradeConn.getTenantId();
            tenantInfo = " for tenant " + tenantId.getString();
        }
        String msg = "Starting upgrade of " + escapedTableName + tenantInfo + "...";
        System.out.println(msg);
        logger.info(msg);
        ResultSet rs;
        if (!bypassUpgrade) {
            rs = upgradeConn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + escapedTableName);
            // Run query
            rs.next();
        }
        List<String> tableNames = Lists.newArrayListWithExpectedSize(1024);
        tableNames.add(tenantId == PName.EMPTY_NAME ? null : tenantId.getString());
        tableNames.add(schemaName);
        tableNames.add(tableName);
        // Find views to mark as upgraded
        if (isTable) {
            String query = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + "FROM SYSTEM.CATALOG\n" + "WHERE COLUMN_NAME IS NULL\n" + "AND COLUMN_FAMILY = '" + physicalName + "'" + "AND LINK_TYPE = " + LinkType.PHYSICAL_TABLE.getSerializedValue();
            rs = globalConn.createStatement().executeQuery(query);
            while (rs.next()) {
                tableNames.add(rs.getString(1));
                tableNames.add(rs.getString(2));
                tableNames.add(rs.getString(3));
            }
        }
        // Mark the table and views as upgraded now
        for (int i = 0; i < tableNames.size(); i += 3) {
            String theTenantId = tableNames.get(i);
            String theSchemaName = tableNames.get(i + 1);
            String theTableName = tableNames.get(i + 2);
            globalConn.createStatement().execute("UPSERT INTO " + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " (" + PhoenixDatabaseMetaData.TENANT_ID + "," + PhoenixDatabaseMetaData.TABLE_SCHEM + "," + PhoenixDatabaseMetaData.TABLE_NAME + "," + MetaDataEndpointImpl.ROW_KEY_ORDER_OPTIMIZABLE + " BOOLEAN" + ") VALUES (" + "'" + (theTenantId == null ? StringUtil.EMPTY_STRING : theTenantId) + "'," + "'" + (theSchemaName == null ? StringUtil.EMPTY_STRING : theSchemaName) + "'," + "'" + theTableName + "'," + "TRUE)");
        }
        globalConn.commit();
        for (int i = 0; i < tableNames.size(); i += 3) {
            String theTenantId = tableNames.get(i);
            String theSchemaName = tableNames.get(i + 1);
            String theTableName = tableNames.get(i + 2);
            globalConn.getQueryServices().clearTableFromCache(theTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(theTenantId), theSchemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName), Bytes.toBytes(theTableName), HConstants.LATEST_TIMESTAMP);
        }
        success = true;
        msg = "Completed upgrade of " + escapedTableName + tenantInfo;
        System.out.println(msg);
        logger.info(msg);
    } catch (Exception e) {
        logger.error("Exception during upgrade of " + physicalName + ":", e);
    } finally {
        boolean restored = false;
        try {
            if (!success && restoreSnapshot) {
                admin.disableTable(physicalName);
                admin.restoreSnapshot(snapshotName, false);
                admin.enableTable(physicalName);
                String msg = "Restored snapshot of " + physicalName + " due to failure of upgrade";
                System.out.println(msg);
                logger.info(msg);
            }
            restored = true;
        } catch (Exception e) {
            logger.warn("Unable to restoring snapshot " + snapshotName + " after failed upgrade", e);
        } finally {
            try {
                if (restoreSnapshot && restored) {
                    admin.deleteSnapshot(snapshotName);
                }
            } catch (Exception e) {
                logger.warn("Unable to delete snapshot " + snapshotName + " after upgrade:", e);
            } finally {
                try {
                    if (admin != null) {
                        admin.close();
                    }
                } catch (IOException e) {
                    logger.warn("Unable to close admin after upgrade:", e);
                }
            }
        }
    }
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PName(org.apache.phoenix.schema.PName) ResultSet(java.sql.ResultSet) IOException(java.io.IOException) SnapshotCreationException(org.apache.hadoop.hbase.snapshot.SnapshotCreationException) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) SQLException(java.sql.SQLException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

PName (org.apache.phoenix.schema.PName)45 PTable (org.apache.phoenix.schema.PTable)26 PColumn (org.apache.phoenix.schema.PColumn)18 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)13 Test (org.junit.Test)10 SQLException (java.sql.SQLException)9 Cell (org.apache.hadoop.hbase.Cell)9 PColumnImpl (org.apache.phoenix.schema.PColumnImpl)9 PTableKey (org.apache.phoenix.schema.PTableKey)9 TableRef (org.apache.phoenix.schema.TableRef)8 PSmallint (org.apache.phoenix.schema.types.PSmallint)8 Connection (java.sql.Connection)7 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)7 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)7 PTinyint (org.apache.phoenix.schema.types.PTinyint)7 List (java.util.List)6 Mutation (org.apache.hadoop.hbase.client.Mutation)6 Scan (org.apache.hadoop.hbase.client.Scan)6 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)6 IOException (java.io.IOException)5