Search in sources :

Example 61 with PhoenixConnection

use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.

the class CSVCommonsLoaderIT method testCSVUpsertWithColumns.

@Test
public void testCSVUpsertWithColumns() throws Exception {
    CSVParser parser = null;
    PhoenixConnection conn = null;
    try {
        String stockTableName = generateUniqueName();
        // Create table
        String statements = "CREATE TABLE IF NOT EXISTS " + stockTableName + "(SYMBOL VARCHAR NOT NULL PRIMARY KEY, COMPANY VARCHAR);";
        conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
        PhoenixRuntime.executeStatements(conn, new StringReader(statements), null);
        // Upsert CSV file
        CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, stockTableName, Arrays.<String>asList(STOCK_COLUMNS), true);
        // no header
        csvUtil.upsert(new StringReader(STOCK_CSV_VALUES));
        // Compare Phoenix ResultSet with CSV file content
        PreparedStatement statement = conn.prepareStatement("SELECT SYMBOL, COMPANY FROM " + stockTableName);
        ResultSet phoenixResultSet = statement.executeQuery();
        parser = new CSVParser(new StringReader(STOCK_CSV_VALUES), csvUtil.getFormat());
        for (CSVRecord record : parser) {
            assertTrue(phoenixResultSet.next());
            int i = 0;
            for (String value : record) {
                assertEquals(value, phoenixResultSet.getString(i + 1));
                i++;
            }
        }
        assertFalse(phoenixResultSet.next());
    } finally {
        if (parser != null)
            parser.close();
        if (conn != null)
            conn.close();
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) CSVParser(org.apache.commons.csv.CSVParser) StringReader(java.io.StringReader) CSVCommonsLoader(org.apache.phoenix.util.CSVCommonsLoader) ResultSet(java.sql.ResultSet) PreparedStatement(java.sql.PreparedStatement) CSVRecord(org.apache.commons.csv.CSVRecord) Test(org.junit.Test)

Example 62 with PhoenixConnection

use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.

the class ImmutableTablePropertiesIT method testImmutableProperty.

@Test
public void testImmutableProperty() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    String immutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
    String mutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
    try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
        Statement stmt = conn.createStatement();
        // create table with immutable table property set to true
        String ddl = "CREATE TABLE  " + immutableDataTableFullName + "  (a_string varchar not null, col1 integer" + "  CONSTRAINT pk PRIMARY KEY (a_string)) IMMUTABLE_ROWS=true";
        stmt.execute(ddl);
        // create table with immutable table property set to false
        ddl = "CREATE TABLE  " + mutableDataTableFullName + "  (a_string varchar not null, col1 integer" + "  CONSTRAINT pk PRIMARY KEY (a_string))  IMMUTABLE_ROWS=false";
        stmt.execute(ddl);
        PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
        PTable immutableTable = phxConn.getTable(new PTableKey(null, immutableDataTableFullName));
        assertTrue("IMMUTABLE_ROWS should be set to true", immutableTable.isImmutableRows());
        PTable mutableTable = phxConn.getTable(new PTableKey(null, mutableDataTableFullName));
        assertFalse("IMMUTABLE_ROWS should be set to false", mutableTable.isImmutableRows());
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Statement(java.sql.Statement) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Properties(java.util.Properties) PTableKey(org.apache.phoenix.schema.PTableKey) PTable(org.apache.phoenix.schema.PTable) Test(org.junit.Test)

Example 63 with PhoenixConnection

use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.

the class QueryMoreIT method testMaxMutationSize.

@Test
public void testMaxMutationSize() throws Exception {
    Properties connectionProperties = new Properties();
    connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3");
    connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "1000000");
    PhoenixConnection connection = (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties);
    String fullTableName = generateUniqueName();
    try (Statement stmt = connection.createStatement()) {
        stmt.execute("CREATE TABLE " + fullTableName + "(\n" + "    ORGANIZATION_ID CHAR(15) NOT NULL,\n" + "    SCORE DOUBLE NOT NULL,\n" + "    ENTITY_ID CHAR(15) NOT NULL\n" + "    CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n" + "        ORGANIZATION_ID,\n" + "        SCORE DESC,\n" + "        ENTITY_ID DESC\n" + "    )\n" + ") MULTI_TENANT=TRUE");
    }
    try {
        upsertRows(connection, fullTableName);
        fail();
    } catch (SQLException e) {
        assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getErrorCode(), e.getErrorCode());
    }
    // set the max mutation size (bytes) to a low value
    connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "1000");
    connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "4");
    connection = (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties);
    try {
        upsertRows(connection, fullTableName);
        fail();
    } catch (SQLException e) {
        assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getErrorCode(), e.getErrorCode());
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Properties(java.util.Properties) Test(org.junit.Test)

Example 64 with PhoenixConnection

use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.

the class FlappingTransactionIT method testExternalTxContext.

@Test
public void testExternalTxContext() throws Exception {
    ResultSet rs;
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.setAutoCommit(false);
    String fullTableName = generateUniqueName();
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    TransactionSystemClient txServiceClient = pconn.getQueryServices().getTransactionSystemClient();
    Statement stmt = conn.createStatement();
    stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
    HTableInterface htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
    stmt.executeUpdate("upsert into " + fullTableName + " values('x', 'a', 'a')");
    conn.commit();
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
    }
    // Use HBase level Tephra APIs to start a new transaction
    TransactionAwareHTable txAware = new TransactionAwareHTable(htable, TxConstants.ConflictDetection.ROW);
    TransactionContext txContext = new TransactionContext(txServiceClient, txAware);
    txContext.start();
    // Use HBase APIs to add a new row
    Put put = new Put(Bytes.toBytes("z"));
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("b"));
    txAware.put(put);
    // Use Phoenix APIs to add new row (sharing the transaction context)
    pconn.setTransactionContext(txContext);
    conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('y', 'c', 'c')");
    // New connection should not see data as it hasn't been committed yet
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
    }
    // Use new connection to create a row with a conflict
    Connection connWithConflict = DriverManager.getConnection(getUrl(), props);
    connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('z', 'd', 'd')");
    // Existing connection should see data even though it hasn't been committed yet
    rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(3, rs.getInt(1));
    // Use Tephra APIs directly to finish (i.e. commit) the transaction
    txContext.finish();
    // Confirm that attempt to commit row with conflict fails
    try {
        connWithConflict.commit();
        fail();
    } catch (SQLException e) {
        assertEquals(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode(), e.getErrorCode());
    } finally {
        connWithConflict.close();
    }
    // New connection should now see data as it has been committed
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(3, rs.getInt(1));
    }
    // Repeat the same as above, but this time abort the transaction
    txContext = new TransactionContext(txServiceClient, txAware);
    txContext.start();
    // Use HBase APIs to add a new row
    put = new Put(Bytes.toBytes("j"));
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("e"));
    txAware.put(put);
    // Use Phoenix APIs to add new row (sharing the transaction context)
    pconn.setTransactionContext(txContext);
    conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('k', 'f', 'f')");
    // Existing connection should see data even though it hasn't been committed yet
    rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(5, rs.getInt(1));
    connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('k', 'g', 'g')");
    rs = connWithConflict.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(4, rs.getInt(1));
    // Use Tephra APIs directly to abort (i.e. rollback) the transaction
    txContext.abort();
    rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(3, rs.getInt(1));
    // Should succeed since conflicting row was aborted
    connWithConflict.commit();
    // New connection should now see data as it has been committed
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(4, rs.getInt(1));
    }
    // Even using HBase APIs directly, we shouldn't find 'j' since a delete marker would have been
    // written to hide it.
    Result result = htable.get(new Get(Bytes.toBytes("j")));
    assertTrue(result.isEmpty());
}
Also used : TransactionAwareHTable(org.apache.tephra.hbase.TransactionAwareHTable) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Properties(java.util.Properties) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) TransactionContext(org.apache.tephra.TransactionContext) Get(org.apache.hadoop.hbase.client.Get) ResultSet(java.sql.ResultSet) Test(org.junit.Test)

Example 65 with PhoenixConnection

use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.

the class ScannerLeaseRenewalIT method setUp.

@BeforeClass
public static void setUp() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    hbaseTestUtil = new HBaseTestingUtility(conf);
    setUpConfigForMiniCluster(conf);
    conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, LEASE_TIMEOUT_PERIOD_MILLIS);
    hbaseTestUtil.startMiniCluster();
    // establish url and quorum. Need to use PhoenixDriver and not PhoenixTestDriver
    zkQuorum = "localhost:" + hbaseTestUtil.getZkCluster().getClientPort();
    url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
    Properties driverProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    driverProps.put(RENEW_LEASE_THREAD_POOL_SIZE, Long.toString(4));
    // if this property is false, tests will fail with UnknownScannerException errors. 
    driverProps.put(RENEW_LEASE_ENABLED, Boolean.toString(true));
    driverProps.put(RENEW_LEASE_THRESHOLD_MILLISECONDS, Long.toString(LEASE_TIMEOUT_PERIOD_MILLIS / 2));
    driverProps.put(RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS, Long.toString(LEASE_TIMEOUT_PERIOD_MILLIS / 4));
    driverProps.put(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, Long.toString(LEASE_TIMEOUT_PERIOD_MILLIS));
    // use round robin iterator
    driverProps.put(FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false));
    DriverManager.registerDriver(PhoenixDriver.INSTANCE);
    try (PhoenixConnection phxConn = DriverManager.getConnection(url, driverProps).unwrap(PhoenixConnection.class)) {
        // run test methods only if we are at the hbase version that supports lease renewal.
        Assume.assumeTrue(phxConn.getQueryServices().supportsFeature(Feature.RENEW_LEASE));
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Properties(java.util.Properties) BeforeClass(org.junit.BeforeClass)

Aggregations

PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)216 Test (org.junit.Test)111 Connection (java.sql.Connection)57 Properties (java.util.Properties)52 PTable (org.apache.phoenix.schema.PTable)52 Scan (org.apache.hadoop.hbase.client.Scan)51 PhoenixPreparedStatement (org.apache.phoenix.jdbc.PhoenixPreparedStatement)48 BaseConnectionlessQueryTest (org.apache.phoenix.query.BaseConnectionlessQueryTest)47 PTableKey (org.apache.phoenix.schema.PTableKey)43 ResultSet (java.sql.ResultSet)41 PreparedStatement (java.sql.PreparedStatement)40 SQLException (java.sql.SQLException)40 Filter (org.apache.hadoop.hbase.filter.Filter)29 SkipScanFilter (org.apache.phoenix.filter.SkipScanFilter)29 RowKeyComparisonFilter (org.apache.phoenix.filter.RowKeyComparisonFilter)28 TestUtil.multiEncodedKVFilter (org.apache.phoenix.util.TestUtil.multiEncodedKVFilter)28 TestUtil.singleKVFilter (org.apache.phoenix.util.TestUtil.singleKVFilter)28 Statement (java.sql.Statement)19 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)17 PColumn (org.apache.phoenix.schema.PColumn)17