Search in sources :

Example 86 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ParameterizedTransactionIT method testCreateTableToBeTransactional.

@Test
public void testCreateTableToBeTransactional() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    String t1 = generateUniqueName();
    String t2 = generateUniqueName();
    String ddl = "CREATE TABLE " + t1 + " (k varchar primary key) " + tableDDLOptions;
    conn.createStatement().execute(ddl);
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = pconn.getTable(new PTableKey(null, t1));
    HTableInterface htable = pconn.getQueryServices().getTable(Bytes.toBytes(t1));
    assertTrue(table.isTransactional());
    assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
    try {
        ddl = "ALTER TABLE " + t1 + " SET transactional=false";
        conn.createStatement().execute(ddl);
        fail();
    } catch (SQLException e) {
        assertEquals(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX.getErrorCode(), e.getErrorCode());
    }
    HBaseAdmin admin = pconn.getQueryServices().getAdmin();
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(t2));
    desc.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
    admin.createTable(desc);
    ddl = "CREATE TABLE " + t2 + " (k varchar primary key) transactional=true";
    conn.createStatement().execute(ddl);
    assertEquals(Boolean.TRUE.toString(), admin.getTableDescriptor(TableName.valueOf(t2)).getValue(TxConstants.READ_NON_TX_DATA));
    // Should be ok, as HBase metadata should match existing metadata.
    ddl = "CREATE TABLE IF NOT EXISTS " + t1 + " (k varchar primary key)";
    try {
        conn.createStatement().execute(ddl);
        fail();
    } catch (SQLException e) {
        assertEquals(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX.getErrorCode(), e.getErrorCode());
    }
    ddl += " transactional=true";
    conn.createStatement().execute(ddl);
    table = pconn.getTable(new PTableKey(null, t1));
    htable = pconn.getQueryServices().getTable(Bytes.toBytes(t1));
    assertTrue(table.isTransactional());
    assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Properties(java.util.Properties) PTableKey(org.apache.phoenix.schema.PTableKey) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTable(org.apache.phoenix.schema.PTable) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 87 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class FromCompiler method getResolverForCreation.

public static ColumnResolver getResolverForCreation(final CreateTableStatement statement, final PhoenixConnection connection) throws SQLException {
    TableName baseTable = statement.getBaseTableName();
    String schemaName;
    if (baseTable == null) {
        if (SchemaUtil.isSchemaCheckRequired(statement.getTableType(), connection.getQueryServices().getProps())) {
            schemaName = statement.getTableName().getSchemaName();
            if (schemaName != null) {
                new SchemaResolver(connection, statement.getTableName().getSchemaName(), true);
            } else if (connection.getSchema() != null) {
                // To ensure schema set through properties or connection string exists before creating table
                new SchemaResolver(connection, connection.getSchema(), true);
            }
        }
        return EMPTY_TABLE_RESOLVER;
    }
    NamedTableNode tableNode = NamedTableNode.create(null, baseTable, Collections.<ColumnDef>emptyList());
    // Always use non-tenant-specific connection here
    try {
        // We need to always get the latest meta data for the parent table of a create view call to ensure that
        // that we're copying the current table meta data as of when the view is created. Once we no longer
        // copy the parent meta data, but store only the local diffs (PHOENIX-3534), we will no longer need
        // to do this.
        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true, true);
        return visitor;
    } catch (TableNotFoundException e) {
        // A tenant-specific connection may not create a mapped VIEW.
        if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) {
            ConnectionQueryServices services = connection.getQueryServices();
            byte[] fullTableName = SchemaUtil.getPhysicalName(SchemaUtil.getTableNameAsBytes(baseTable.getSchemaName(), baseTable.getTableName()), connection.getQueryServices().getProps()).getName();
            HTableInterface htable = null;
            try {
                htable = services.getTable(fullTableName);
            } catch (UnsupportedOperationException ignore) {
                // For Connectionless
                throw e;
            } finally {
                if (htable != null)
                    Closeables.closeQuietly(htable);
            }
            tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs());
            return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp(), new HashMap<String, UDFParseNode>(1), false);
        }
        throw e;
    }
}
Also used : TableName(org.apache.phoenix.parse.TableName) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) HashMap(java.util.HashMap) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 88 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ServerCacheClient method removeServerCache.

/**
     * Remove the cached table from all region servers
     * @param cacheId unique identifier for the hash join (returned from {@link #addHashCache(HTable, Scan, Set)})
     * @param servers list of servers upon which table was cached (filled in by {@link #addHashCache(HTable, Scan, Set)})
     * @throws SQLException
     * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
     */
private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    Throwable lastThrowable = null;
    TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
    final PTable cacheUsingTable = cacheUsingTableRef.getTable();
    byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
    HTableInterface iterateOverTable = services.getTable(tableName);
    try {
        List<HRegionLocation> locations = services.getAllTableRegions(tableName);
        Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
        /**
    		 * Allow for the possibility that the region we based where to send our cache has split and been
    		 * relocated to another region server *after* we sent it, but before we removed it. To accommodate
    		 * this, we iterate through the current metadata boundaries and remove the cache once for each
    		 * server that we originally sent to.
    		 */
        if (LOG.isDebugEnabled()) {
            LOG.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
        }
        for (HRegionLocation entry : locations) {
            if (remainingOnServers.contains(entry)) {
                // Call once per server
                try {
                    byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
                    iterateOverTable.coprocessorService(ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {

                        @Override
                        public RemoveServerCacheResponse call(ServerCachingService instance) throws IOException {
                            ServerRpcController controller = new ServerRpcController();
                            BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback = new BlockingRpcCallback<RemoveServerCacheResponse>();
                            RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
                            final byte[] tenantIdBytes;
                            if (cacheUsingTable.isMultiTenant()) {
                                try {
                                    tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.getViewIndexId() != null);
                                } catch (SQLException e) {
                                    throw new IOException(e);
                                }
                            } else {
                                tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                            }
                            if (tenantIdBytes != null) {
                                builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                            }
                            builder.setCacheId(ByteStringer.wrap(cacheId));
                            instance.removeServerCache(controller, builder.build(), rpcCallback);
                            if (controller.getFailedOn() != null) {
                                throw controller.getFailedOn();
                            }
                            return rpcCallback.get();
                        }
                    });
                    remainingOnServers.remove(entry);
                } catch (Throwable t) {
                    lastThrowable = t;
                    LOG.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection), t);
                }
            }
        }
        if (!remainingOnServers.isEmpty()) {
            LOG.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection), lastThrowable);
        }
    } finally {
        Closeables.closeQuietly(iterateOverTable);
    }
}
Also used : RemoveServerCacheResponse(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.RemoveServerCacheResponse) SQLException(java.sql.SQLException) ServerCachingService(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachingService) IOException(java.io.IOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) PTable(org.apache.phoenix.schema.PTable) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef) HashSet(java.util.HashSet)

Example 89 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class IndexIT method assertNoIndexDeletes.

private void assertNoIndexDeletes(Connection conn, long minTimestamp, String fullIndexName) throws IOException, SQLException {
    if (!this.mutable) {
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        PTable index = pconn.getTable(new PTableKey(null, fullIndexName));
        byte[] physicalIndexTable = index.getPhysicalName().getBytes();
        try (HTableInterface hIndex = pconn.getQueryServices().getTable(physicalIndexTable)) {
            Scan scan = new Scan();
            scan.setRaw(true);
            if (this.transactional) {
                minTimestamp = TransactionUtil.convertToNanoseconds(minTimestamp);
            }
            scan.setTimeRange(minTimestamp, HConstants.LATEST_TIMESTAMP);
            ResultScanner scanner = hIndex.getScanner(scan);
            Result result;
            while ((result = scanner.next()) != null) {
                CellScanner cellScanner = result.cellScanner();
                while (cellScanner.advance()) {
                    Cell current = cellScanner.current();
                    assertEquals(KeyValue.Type.Put.getCode(), current.getTypeByte());
                }
            }
        }
        ;
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scan(org.apache.hadoop.hbase.client.Scan) PTableKey(org.apache.phoenix.schema.PTableKey) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) CellScanner(org.apache.hadoop.hbase.CellScanner) Cell(org.apache.hadoop.hbase.Cell) PTable(org.apache.phoenix.schema.PTable) Result(org.apache.hadoop.hbase.client.Result)

Example 90 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class StatisticsWriter method newWriter.

/**
     * @param tableName
     *            TODO
     * @param clientTimeStamp
     *            TODO
     * @return the {@link StatisticsWriter} for the given primary table.
     * @throws IOException
     *             if the table cannot be created due to an underlying HTable creation error
     */
public static StatisticsWriter newWriter(RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp) throws IOException {
    if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
        clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
    }
    HTableInterface statsWriterTable = env.getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, env.getConfiguration()));
    HTableInterface statsReaderTable = ServerUtil.getHTableForCoprocessorScan(env, statsWriterTable);
    StatisticsWriter statsTable = new StatisticsWriter(statsReaderTable, statsWriterTable, tableName, clientTimeStamp);
    return statsTable;
}
Also used : HTableInterface(org.apache.hadoop.hbase.client.HTableInterface)

Aggregations

HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)122 Result (org.apache.hadoop.hbase.client.Result)43 Put (org.apache.hadoop.hbase.client.Put)42 IOException (java.io.IOException)38 ArrayList (java.util.ArrayList)27 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Get (org.apache.hadoop.hbase.client.Get)21 Scan (org.apache.hadoop.hbase.client.Scan)21 Test (org.junit.Test)20 SQLException (java.sql.SQLException)19 HashMap (java.util.HashMap)17 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)17 Connection (java.sql.Connection)15 Delete (org.apache.hadoop.hbase.client.Delete)12 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)12 Mutation (org.apache.hadoop.hbase.client.Mutation)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)11 ResultSet (java.sql.ResultSet)10 Map (java.util.Map)9 Configuration (org.apache.hadoop.conf.Configuration)9