Search in sources :

Example 1 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class AppendOnlySchemaIT method testTableWithSameSchema.

private void testTableWithSameSchema(boolean notExists, boolean sameClient) throws Exception {
    // use a spyed ConnectionQueryServices so we can verify calls to getTable
    ConnectionQueryServices connectionQueryServices = Mockito.spy(driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)));
    Properties props = new Properties();
    props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
    try (Connection conn1 = connectionQueryServices.connect(getUrl(), props);
        Connection conn2 = sameClient ? conn1 : connectionQueryServices.connect(getUrl(), props)) {
        String metricTableName = generateUniqueName();
        String viewName = generateUniqueName();
        String metricIdSeqTableName = generateUniqueName();
        // create sequence for auto partition
        conn1.createStatement().execute("CREATE SEQUENCE " + metricIdSeqTableName + " CACHE 1");
        // create base table
        conn1.createStatement().execute("CREATE TABLE " + metricTableName + "(metricId INTEGER NOT NULL, metricVal DOUBLE, CONSTRAINT PK PRIMARY KEY(metricId))" + " APPEND_ONLY_SCHEMA = true, UPDATE_CACHE_FREQUENCY=1, AUTO_PARTITION_SEQ=" + metricIdSeqTableName);
        // create view
        String ddl = "CREATE VIEW " + (notExists ? "IF NOT EXISTS " : "") + viewName + " ( hostName varchar NOT NULL, tagName varChar" + " CONSTRAINT HOSTNAME_PK PRIMARY KEY (hostName))" + " AS SELECT * FROM " + metricTableName + " UPDATE_CACHE_FREQUENCY=300000";
        conn1.createStatement().execute(ddl);
        conn1.createStatement().execute("UPSERT INTO " + viewName + "(hostName, metricVal) VALUES('host1', 1.0)");
        conn1.commit();
        reset(connectionQueryServices);
        // execute same create ddl
        try {
            conn2.createStatement().execute(ddl);
            if (!notExists) {
                fail("Create Table should fail");
            }
        } catch (TableAlreadyExistsException e) {
            if (notExists) {
                fail("Create Table should not fail");
            }
        }
        // verify getTable rpcs
        verify(connectionQueryServices, sameClient ? never() : times(1)).getTable((PName) isNull(), eq(new byte[0]), eq(Bytes.toBytes(viewName)), anyLong(), anyLong());
        // verify no create table rpcs
        verify(connectionQueryServices, never()).createTable(anyListOf(Mutation.class), any(byte[].class), any(PTableType.class), anyMap(), anyList(), any(byte[][].class), eq(false), eq(false));
        reset(connectionQueryServices);
        // execute alter table ddl that adds the same column
        ddl = "ALTER VIEW " + viewName + " ADD " + (notExists ? "IF NOT EXISTS" : "") + " tagName varchar";
        try {
            conn2.createStatement().execute(ddl);
            if (!notExists) {
                fail("Alter Table should fail");
            }
        } catch (ColumnAlreadyExistsException e) {
            if (notExists) {
                fail("Alter Table should not fail");
            }
        }
        // if not verify exists is true one call to add column table with empty mutation list (which does not make a rpc) 
        // else verify no add column calls
        verify(connectionQueryServices, notExists ? times(1) : never()).addColumn(eq(Collections.<Mutation>emptyList()), any(PTable.class), anyMap(), anySetOf(String.class), anyListOf(PColumn.class));
        // upsert one row
        conn2.createStatement().execute("UPSERT INTO " + viewName + "(hostName, metricVal) VALUES('host2', 2.0)");
        conn2.commit();
        // verify data in base table
        ResultSet rs = conn2.createStatement().executeQuery("SELECT * from " + metricTableName);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
        assertEquals(1.0, rs.getDouble(2), 1e-6);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
        assertEquals(2.0, rs.getDouble(2), 1e-6);
        assertFalse(rs.next());
        // verify data in view
        rs = conn2.createStatement().executeQuery("SELECT * from " + viewName);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
        assertEquals(1.0, rs.getDouble(2), 1e-6);
        assertEquals("host1", rs.getString(3));
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
        assertEquals(2.0, rs.getDouble(2), 1e-6);
        assertEquals("host2", rs.getString(3));
        assertFalse(rs.next());
    }
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) TableAlreadyExistsException(org.apache.phoenix.schema.TableAlreadyExistsException) PTableType(org.apache.phoenix.schema.PTableType) ColumnAlreadyExistsException(org.apache.phoenix.schema.ColumnAlreadyExistsException) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) Mutation(org.apache.hadoop.hbase.client.Mutation) Properties(java.util.Properties) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) PTable(org.apache.phoenix.schema.PTable)

Example 2 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class NativeHBaseTypesIT method initTableValues.

@SuppressWarnings("deprecation")
private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
    HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));
    try {
        // Insert rows using standard HBase mechanism with standard HBase "types"
        List<Row> mutations = new ArrayList<Row>();
        byte[] family = Bytes.toBytes("1");
        byte[] uintCol = Bytes.toBytes("UINT_COL");
        byte[] ulongCol = Bytes.toBytes("ULONG_COL");
        byte[] key, bKey;
        Put put;
        key = ByteUtil.concat(Bytes.toBytes(10), Bytes.toBytes(100L), Bytes.toBytes("a"));
        put = new Put(key);
        put.add(family, uintCol, ts - 2, Bytes.toBytes(5));
        put.add(family, ulongCol, ts - 2, Bytes.toBytes(50L));
        mutations.add(put);
        put = new Put(key);
        put.add(family, uintCol, ts, Bytes.toBytes(10));
        put.add(family, ulongCol, ts, Bytes.toBytes(100L));
        mutations.add(put);
        bKey = key = ByteUtil.concat(Bytes.toBytes(20), Bytes.toBytes(200L), Bytes.toBytes("b"));
        put = new Put(key);
        put.add(family, uintCol, ts - 4, Bytes.toBytes(5000));
        put.add(family, ulongCol, ts - 4, Bytes.toBytes(50000L));
        mutations.add(put);
        // FIXME: the version of the Delete constructor without the lock args was introduced
        // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
        // of the client.
        Delete del = new Delete(key, ts - 2);
        mutations.add(del);
        put = new Put(key);
        put.add(family, uintCol, ts, Bytes.toBytes(2000));
        put.add(family, ulongCol, ts, Bytes.toBytes(20000L));
        mutations.add(put);
        key = ByteUtil.concat(Bytes.toBytes(30), Bytes.toBytes(300L), Bytes.toBytes("c"));
        put = new Put(key);
        put.add(family, uintCol, ts, Bytes.toBytes(3000));
        put.add(family, ulongCol, ts, Bytes.toBytes(30000L));
        mutations.add(put);
        key = ByteUtil.concat(Bytes.toBytes(40), Bytes.toBytes(400L), Bytes.toBytes("d"));
        put = new Put(key);
        put.add(family, uintCol, ts, Bytes.toBytes(4000));
        put.add(family, ulongCol, ts, Bytes.toBytes(40000L));
        mutations.add(put);
        hTable.batch(mutations);
        Result r = hTable.get(new Get(bKey));
        assertFalse(r.isEmpty());
    } finally {
        hTable.close();
    }
    // Create Phoenix table after HBase table was created through the native APIs
    // The timestamp of the table creation must be later than the timestamp of the data
    ensureTableCreated(getUrl(), HBASE_NATIVE, HBASE_NATIVE, null, ts + 1, null);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Get(org.apache.hadoop.hbase.client.Get) ArrayList(java.util.ArrayList) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result)

Example 3 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class DynamicColumnIT method initTable.

@Before
public void initTable() throws Exception {
    tableName = generateUniqueName();
    try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
        ConnectionQueryServices services = pconn.getQueryServices();
        try (HBaseAdmin admin = services.getAdmin()) {
            HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
            htd.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
            htd.addFamily(new HColumnDescriptor(FAMILY_NAME_A));
            htd.addFamily(new HColumnDescriptor(FAMILY_NAME_B));
            admin.createTable(htd);
        }
        try (HTableInterface hTable = services.getTable(Bytes.toBytes(tableName))) {
            // Insert rows using standard HBase mechanism with standard HBase "types"
            List<Row> mutations = new ArrayList<Row>();
            byte[] dv = Bytes.toBytes("DV");
            byte[] first = Bytes.toBytes("F");
            byte[] f1v1 = Bytes.toBytes("F1V1");
            byte[] f1v2 = Bytes.toBytes("F1V2");
            byte[] f2v1 = Bytes.toBytes("F2V1");
            byte[] f2v2 = Bytes.toBytes("F2V2");
            byte[] key = Bytes.toBytes("entry1");
            Put put = new Put(key);
            put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
            put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
            put.add(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1"));
            put.add(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2"));
            put.add(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1"));
            put.add(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
            mutations.add(put);
            hTable.batch(mutations);
            // Create Phoenix table after HBase table was created through the native APIs
            // The timestamp of the table creation must be later than the timestamp of the data
            pconn.createStatement().execute("create table " + tableName + "   (entry varchar not null," + "    F varchar," + "    A.F1v1 varchar," + "    A.F1v2 varchar," + "    B.F2v1 varchar" + "    CONSTRAINT pk PRIMARY KEY (entry))");
        }
    }
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Before(org.junit.Before)

Example 4 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class DynamicFamilyIT method initTableValues.

@SuppressWarnings("deprecation")
private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
    HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(WEB_STATS_SCHEMA_NAME, WEB_STATS));
    try {
        // Insert rows using standard HBase mechanism with standard HBase "types"
        Put put;
        List<Row> mutations = new ArrayList<Row>();
        put = new Put(Bytes.toBytes("entry1"));
        put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
        put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID2_BYTES), PInteger.INSTANCE.toBytes(ENTRY1_CLICK_COUNT));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID1_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID2_LOGIN_TIME));
        mutations.add(put);
        put = new Put(Bytes.toBytes("entry2"));
        put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
        put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID3_BYTES), PInteger.INSTANCE.toBytes(ENTRY2_CLICK_COUNT));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID2_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID3_LOGIN_TIME));
        mutations.add(put);
        put = new Put(Bytes.toBytes("entry3"));
        put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
        put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID1_BYTES), PInteger.INSTANCE.toBytes(ENTRY3_CLICK_COUNT));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID1_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID2_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID3_LOGIN_TIME));
        mutations.add(put);
        hTable.batch(mutations);
    } finally {
        hTable.close();
    }
}
Also used : ArrayList(java.util.ArrayList) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Put(org.apache.hadoop.hbase.client.Put)

Example 5 with ConnectionQueryServices

use of org.apache.phoenix.query.ConnectionQueryServices in project phoenix by apache.

the class RoundRobinResultIteratorIT method testRoundRobinAfterTableSplit.

@Test
public void testRoundRobinAfterTableSplit() throws Exception {
    String tableName = generateUniqueName();
    byte[] tableNameBytes = Bytes.toBytes(tableName);
    int numRows = setupTableForSplit(tableName);
    Connection conn = getConnection();
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    int nRegions = services.getAllTableRegions(tableNameBytes).size();
    int nRegionsBeforeSplit = nRegions;
    HBaseAdmin admin = services.getAdmin();
    try {
        // Split is an async operation. So hoping 10 seconds is long enough time.
        // If the test tends to flap, then you might want to increase the wait time
        admin.split(tableName);
        CountDownLatch latch = new CountDownLatch(1);
        int nTries = 0;
        long waitTimeMillis = 2000;
        while (nRegions == nRegionsBeforeSplit && nTries < 10) {
            latch.await(waitTimeMillis, TimeUnit.MILLISECONDS);
            nRegions = services.getAllTableRegions(tableNameBytes).size();
            nTries++;
        }
        String query = "SELECT * FROM " + tableName;
        Statement stmt = conn.createStatement();
        // this makes scanner caches to be replenished in parallel.
        stmt.setFetchSize(10);
        ResultSet rs = stmt.executeQuery(query);
        int numRowsRead = 0;
        while (rs.next()) {
            numRowsRead++;
        }
        nRegions = services.getAllTableRegions(tableNameBytes).size();
        // Region cache has been updated, as there are more regions now
        assertNotEquals(nRegions, nRegionsBeforeSplit);
        assertEquals(numRows, numRowsRead);
    } finally {
        admin.close();
    }
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) CountDownLatch(java.util.concurrent.CountDownLatch) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Test(org.junit.Test)

Aggregations

ConnectionQueryServices (org.apache.phoenix.query.ConnectionQueryServices)38 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Connection (java.sql.Connection)14 SQLException (java.sql.SQLException)12 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)9 PTable (org.apache.phoenix.schema.PTable)9 Test (org.junit.Test)9 ResultSet (java.sql.ResultSet)8 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)8 ArrayList (java.util.ArrayList)7 Properties (java.util.Properties)7 PreparedStatement (java.sql.PreparedStatement)5 Put (org.apache.hadoop.hbase.client.Put)5 Hint (org.apache.phoenix.parse.HintNode.Hint)5 Scan (org.apache.hadoop.hbase.client.Scan)4 MutationState (org.apache.phoenix.execute.MutationState)4 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)4 PhoenixResultSet (org.apache.phoenix.jdbc.PhoenixResultSet)4 DelegateConnectionQueryServices (org.apache.phoenix.query.DelegateConnectionQueryServices)4 PColumn (org.apache.phoenix.schema.PColumn)4