Search in sources :

Example 96 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method incrementSequenceValues.

@SuppressWarnings("deprecation")
private void incrementSequenceValues(List<SequenceAllocation> sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions, Sequence.ValueOp op) throws SQLException {
    List<Sequence> sequences = Lists.newArrayListWithExpectedSize(sequenceAllocations.size());
    for (SequenceAllocation sequenceAllocation : sequenceAllocations) {
        SequenceKey key = sequenceAllocation.getSequenceKey();
        Sequence newSequences = new Sequence(key);
        Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
        if (sequence == null) {
            sequence = newSequences;
        }
        sequences.add(sequence);
    }
    try {
        for (Sequence sequence : sequences) {
            sequence.getLock().lock();
        }
        // Now that we have all the locks we need, increment the sequences
        List<Increment> incrementBatch = Lists.newArrayListWithExpectedSize(sequences.size());
        List<Sequence> toIncrementList = Lists.newArrayListWithExpectedSize(sequences.size());
        int[] indexes = new int[sequences.size()];
        for (int i = 0; i < sequences.size(); i++) {
            Sequence sequence = sequences.get(i);
            try {
                values[i] = sequence.incrementValue(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
            } catch (EmptySequenceCacheException e) {
                indexes[toIncrementList.size()] = i;
                toIncrementList.add(sequence);
                Increment inc = sequence.newIncrement(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
                incrementBatch.add(inc);
            } catch (SQLException e) {
                exceptions[i] = e;
            }
        }
        if (toIncrementList.isEmpty()) {
            return;
        }
        HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        Object[] resultObjects = null;
        SQLException sqlE = null;
        try {
            resultObjects = hTable.batch(incrementBatch);
        } catch (IOException e) {
            sqlE = ServerUtil.parseServerException(e);
        } catch (InterruptedException e) {
            // restore the interrupt status
            Thread.currentThread().interrupt();
            sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
        } finally {
            try {
                hTable.close();
            } catch (IOException e) {
                if (sqlE == null) {
                    sqlE = ServerUtil.parseServerException(e);
                } else {
                    sqlE.setNextException(ServerUtil.parseServerException(e));
                }
            }
            if (sqlE != null) {
                throw sqlE;
            }
        }
        for (int i = 0; i < resultObjects.length; i++) {
            Sequence sequence = toIncrementList.get(i);
            Result result = (Result) resultObjects[i];
            try {
                long numToAllocate = Bytes.toLong(incrementBatch.get(i).getAttribute(SequenceRegionObserver.NUM_TO_ALLOCATE));
                values[indexes[i]] = sequence.incrementValue(result, op, numToAllocate);
            } catch (SQLException e) {
                exceptions[indexes[i]] = e;
            }
        }
    } finally {
        for (Sequence sequence : sequences) {
            sequence.getLock().unlock();
        }
    }
}
Also used : EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) SQLException(java.sql.SQLException) Sequence(org.apache.phoenix.schema.Sequence) SequenceAllocation(org.apache.phoenix.schema.SequenceAllocation) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result) SequenceKey(org.apache.phoenix.schema.SequenceKey) Increment(org.apache.hadoop.hbase.client.Increment) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 97 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method dropSequence.

@Override
public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) throws SQLException {
    SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets);
    Sequence newSequences = new Sequence(sequenceKey);
    Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
    if (sequence == null) {
        sequence = newSequences;
    }
    try {
        sequence.getLock().lock();
        // Now that we have the lock we need, create the sequence
        Append append = sequence.dropSequence(timestamp);
        HTableInterface htable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        try {
            Result result = htable.append(append);
            return sequence.dropSequence(result);
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } finally {
            Closeables.closeQuietly(htable);
        }
    } finally {
        sequence.getLock().unlock();
    }
}
Also used : Append(org.apache.hadoop.hbase.client.Append) SequenceKey(org.apache.phoenix.schema.SequenceKey) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result)

Example 98 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method createSysMutexTable.

private void createSysMutexTable(HBaseAdmin admin) throws IOException, SQLException {
    try {
        final TableName mutexTableName = TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES);
        List<TableName> systemTables = getSystemTableNames(admin);
        if (systemTables.contains(mutexTableName)) {
            logger.debug("System mutex table already appears to exist, not creating it");
            return;
        }
        HTableDescriptor tableDesc = new HTableDescriptor(mutexTableName);
        HColumnDescriptor columnDesc = new HColumnDescriptor(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES);
        // Let mutex expire after some time
        columnDesc.setTimeToLive(TTL_FOR_MUTEX);
        tableDesc.addFamily(columnDesc);
        admin.createTable(tableDesc);
        try (HTableInterface sysMutexTable = getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
            byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
            Put put = new Put(mutexRowKey);
            put.add(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, UPGRADE_MUTEX_UNLOCKED);
            sysMutexTable.put(put);
        }
    } catch (TableExistsException e) {
    // Ignore
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) TableExistsException(org.apache.hadoop.hbase.TableExistsException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 99 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method ensureSystemTablesUpgraded.

void ensureSystemTablesUpgraded(ReadOnlyProps props) throws SQLException, IOException, IllegalArgumentException, InterruptedException {
    if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, props)) {
        return;
    }
    HTableInterface metatable = null;
    try (HBaseAdmin admin = getAdmin()) {
        // Namespace-mapping is enabled at this point.
        try {
            ensureNamespaceCreated(QueryConstants.SYSTEM_SCHEMA_NAME);
        } catch (PhoenixIOException e) {
        // We could either:
        // 1) Not access the NS descriptor. The NS may or may not exist at this point.
        // 2) We could not create the NS
        // Regardless of the case 1 or 2, if the NS does not exist, we will error expectedly
        // below. If the NS does exist and is mapped, the below check will exit gracefully.
        }
        List<TableName> tableNames = getSystemTableNames(admin);
        // No tables exist matching "SYSTEM\..*", they are all already in "SYSTEM:.*"
        if (tableNames.size() == 0) {
            return;
        }
        // Try to move any remaining tables matching "SYSTEM\..*" into "SYSTEM:"
        if (tableNames.size() > 5) {
            logger.warn("Expected 5 system tables but found " + tableNames.size() + ":" + tableNames);
        }
        byte[] mappedSystemTable = SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props).getName();
        metatable = getTable(mappedSystemTable);
        if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)) {
            if (!admin.tableExists(mappedSystemTable)) {
                UpgradeUtil.mapTableToNamespace(admin, metatable, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, props, null, PTableType.SYSTEM, null);
                ConnectionQueryServicesImpl.this.removeTable(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0);
            }
            tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
        }
        tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME);
        for (TableName table : tableNames) {
            UpgradeUtil.mapTableToNamespace(admin, metatable, table.getNameAsString(), props, null, PTableType.SYSTEM, null);
            ConnectionQueryServicesImpl.this.removeTable(null, table.getNameAsString(), null, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0);
        }
        if (!tableNames.isEmpty()) {
            clearCache();
        }
    } finally {
        if (metatable != null) {
            metatable.close();
        }
    }
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) TableName(org.apache.hadoop.hbase.TableName) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface)

Example 100 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class DisableLocalIndexIT method testDisabledLocalIndexes.

@Test
public void testDisabledLocalIndexes() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
    props.setProperty(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, Boolean.FALSE.toString());
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.setAutoCommit(true);
    String baseName = generateUniqueName();
    String tableName = baseName + "_TABLE";
    String viewName = baseName + "_VIEW";
    String indexName1 = baseName + "_INDEX1";
    String indexName2 = baseName + "_INDEX2";
    conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true");
    conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t1','x')");
    conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t2','y')");
    HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
    assertFalse(admin.tableExists(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName)));
    admin.close();
    try {
        HTableInterface t = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName));
        // Exception no longer thrown by getTable, but instead need to force an RPC
        t.getTableDescriptor();
        fail("Local index table should not have been created");
    } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
    //expected
    } finally {
        admin.close();
    }
    Properties tsconnProps = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
    tsconnProps.setProperty(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, Boolean.FALSE.toString());
    tsconnProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "t1");
    Connection tsconn = DriverManager.getConnection(getUrl(), tsconnProps);
    tsconn.createStatement().execute("CREATE VIEW " + viewName + "(V1 VARCHAR) AS SELECT * FROM " + tableName);
    tsconn.createStatement().execute("CREATE INDEX " + indexName1 + " ON " + viewName + "(V1)");
    tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX + tableName));
    try {
        conn.createStatement().execute("CREATE LOCAL INDEX " + indexName2 + " ON " + tableName + "(k2)");
        fail("Should not allow creation of local index");
    } catch (SQLException e) {
        assertEquals(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES.getErrorCode(), e.getErrorCode());
    }
    try {
        tsconn.createStatement().execute("CREATE LOCAL INDEX " + indexName2 + " ON " + viewName + "(k2, v1)");
        fail("Should not allow creation of local index");
    } catch (SQLException e) {
        assertEquals(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES.getErrorCode(), e.getErrorCode());
    }
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) SQLException(java.sql.SQLException) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Properties(java.util.Properties) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Test(org.junit.Test)

Aggregations

HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)122 Result (org.apache.hadoop.hbase.client.Result)43 Put (org.apache.hadoop.hbase.client.Put)42 IOException (java.io.IOException)38 ArrayList (java.util.ArrayList)27 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Get (org.apache.hadoop.hbase.client.Get)21 Scan (org.apache.hadoop.hbase.client.Scan)21 Test (org.junit.Test)20 SQLException (java.sql.SQLException)19 HashMap (java.util.HashMap)17 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)17 Connection (java.sql.Connection)15 Delete (org.apache.hadoop.hbase.client.Delete)12 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)12 Mutation (org.apache.hadoop.hbase.client.Mutation)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)11 ResultSet (java.sql.ResultSet)10 Map (java.util.Map)9 Configuration (org.apache.hadoop.conf.Configuration)9