Search in sources :

Example 81 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project Solbase by Photobucket.

the class IndexWriter method deleteDocKeyIdMap.

public void deleteDocKeyIdMap(Put mappingPut) {
    // for remote server update via solr update, we want to use
    // getDocTable(), but for now map/red can use local htable
    HTableInterface mappingTable = SolbaseUtil.getDocKeyIdMapTable();
    // insert document to doctable
    try {
        Delete delete = new Delete(mappingPut.getRow());
        mappingTable.delete(delete);
    } catch (IOException e) {
        throw new SolbaseException(SolbaseException.ErrorCode.SERVER_ERROR, e.getMessage());
    } finally {
        SolbaseUtil.releaseTable(mappingTable);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) SolbaseException(org.solbase.common.SolbaseException) IOException(java.io.IOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface)

Example 82 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project honeycomb by altamiracorp.

the class HBaseMetadataTest method testRenameExistingTableNoAutoFlush.

@Test(expected = TableNotFoundException.class)
public void testRenameExistingTableNoAutoFlush() throws Exception {
    String originalName = "OriginalName";
    String newName = "NewName";
    TableSchema origSchema = TABLE_SCHEMA_GEN.next();
    // Configure the table to disable auto flush
    HTableInterface hTableSpy = PowerMockito.spy(MockHTable.create());
    Mockito.when(hTableSpy.isAutoFlush()).thenReturn(false);
    hbaseMetadata.createTable(originalName, origSchema);
    long origId = hbaseMetadata.getTableId(originalName);
    hbaseMetadata.renameExistingTable(originalName, newName);
    long newId = hbaseMetadata.getTableId(newName);
    assertEquals(origId, newId);
    Collection<ColumnSchema> origSchemaColumns = origSchema.getColumns();
    TableSchema newSchema = hbaseMetadata.getSchema(newId);
    for (ColumnSchema columnSchema : newSchema.getColumns()) {
        assertTrue(origSchemaColumns.contains(columnSchema));
    }
    // Trying to access the id of the old table name will result in an exception
    hbaseMetadata.getTableId(originalName);
    hTableSpy.close();
}
Also used : TableSchema(com.nearinfinity.honeycomb.mysql.schema.TableSchema) ColumnSchema(com.nearinfinity.honeycomb.mysql.schema.ColumnSchema) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Test(org.junit.Test)

Example 83 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project Cloud9 by lintool.

the class HBaseWordCountFetch method run.

/**
   * Runs this tool.
   */
@SuppressWarnings({ "static-access" })
public int run(String[] args) throws Exception {
    Options options = new Options();
    options.addOption(OptionBuilder.withArgName("table").hasArg().withDescription("HBase table name").create(TABLE));
    options.addOption(OptionBuilder.withArgName("word").hasArg().withDescription("word to look up").create(WORD));
    CommandLine cmdline;
    CommandLineParser parser = new GnuParser();
    try {
        cmdline = parser.parse(options, args);
    } catch (ParseException exp) {
        System.err.println("Error parsing command line: " + exp.getMessage());
        return -1;
    }
    if (!cmdline.hasOption(TABLE) || !cmdline.hasOption(WORD)) {
        System.out.println("args: " + Arrays.toString(args));
        HelpFormatter formatter = new HelpFormatter();
        formatter.setWidth(120);
        formatter.printHelp(this.getClass().getName(), options);
        ToolRunner.printGenericCommandUsage(System.out);
        return -1;
    }
    String tableName = cmdline.getOptionValue(TABLE);
    String word = cmdline.getOptionValue(WORD);
    Configuration conf = getConf();
    conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));
    Configuration hbaseConfig = HBaseConfiguration.create(conf);
    HConnection hbaseConnection = HConnectionManager.createConnection(hbaseConfig);
    HTableInterface table = hbaseConnection.getTable(tableName);
    Get get = new Get(Bytes.toBytes(word));
    Result result = table.get(get);
    int count = Bytes.toInt(result.getValue(HBaseWordCount.CF, HBaseWordCount.COUNT));
    LOG.info("word: " + word + ", count: " + count);
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) Options(org.apache.commons.cli.Options) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) GnuParser(org.apache.commons.cli.GnuParser) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) HConnection(org.apache.hadoop.hbase.client.HConnection) Result(org.apache.hadoop.hbase.client.Result) HelpFormatter(org.apache.commons.cli.HelpFormatter) CommandLine(org.apache.commons.cli.CommandLine) Get(org.apache.hadoop.hbase.client.Get) CommandLineParser(org.apache.commons.cli.CommandLineParser) ParseException(org.apache.commons.cli.ParseException)

Example 84 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class AlterTableWithViewsIT method testMakeBaseTableTransactional.

@Test
public void testMakeBaseTableTransactional() throws Exception {
    try (Connection conn = DriverManager.getConnection(getUrl());
        Connection viewConn = isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn) {
        String baseTableName = "NONTXNTBL_" + generateUniqueName() + (isMultiTenant ? "0" : "1");
        String viewOfTable = baseTableName + "_VIEW";
        String ddlFormat = "CREATE TABLE IF NOT EXISTS " + baseTableName + " (" + " %s ID char(1) NOT NULL," + " COL1 integer NOT NULL," + " COL2 bigint NOT NULL," + " CONSTRAINT NAME_PK PRIMARY KEY (%s ID, COL1, COL2)" + " ) %s";
        conn.createStatement().execute(generateDDL(ddlFormat));
        assertTableDefinition(conn, baseTableName, PTableType.TABLE, null, 0, 3, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, "ID", "COL1", "COL2");
        viewConn.createStatement().execute("CREATE VIEW " + viewOfTable + " ( VIEW_COL1 DECIMAL(10,2), VIEW_COL2 VARCHAR ) AS SELECT * FROM " + baseTableName);
        assertTableDefinition(conn, viewOfTable, PTableType.VIEW, baseTableName, 0, 5, 3, "ID", "COL1", "COL2", "VIEW_COL1", "VIEW_COL2");
        PName tenantId = isMultiTenant ? PNameFactory.newName("tenant1") : null;
        PhoenixConnection phoenixConn = conn.unwrap(PhoenixConnection.class);
        HTableInterface htable = phoenixConn.getQueryServices().getTable(Bytes.toBytes(baseTableName));
        assertFalse(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
        assertFalse(phoenixConn.getTable(new PTableKey(null, baseTableName)).isTransactional());
        assertFalse(viewConn.unwrap(PhoenixConnection.class).getTable(new PTableKey(tenantId, viewOfTable)).isTransactional());
        // make the base table transactional
        conn.createStatement().execute("ALTER TABLE " + baseTableName + " SET TRANSACTIONAL=true");
        // query the view to force the table cache to be updated
        viewConn.createStatement().execute("SELECT * FROM " + viewOfTable);
        htable = phoenixConn.getQueryServices().getTable(Bytes.toBytes(baseTableName));
        assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
        assertTrue(phoenixConn.getTable(new PTableKey(null, baseTableName)).isTransactional());
        assertTrue(viewConn.unwrap(PhoenixConnection.class).getTable(new PTableKey(tenantId, viewOfTable)).isTransactional());
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PName(org.apache.phoenix.schema.PName) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTableKey(org.apache.phoenix.schema.PTableKey) Test(org.junit.Test)

Example 85 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method returnSequences.

@SuppressWarnings("deprecation")
@Override
public void returnSequences(List<SequenceKey> keys, long timestamp, SQLException[] exceptions) throws SQLException {
    List<Sequence> sequences = Lists.newArrayListWithExpectedSize(keys.size());
    for (SequenceKey key : keys) {
        Sequence newSequences = new Sequence(key);
        Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
        if (sequence == null) {
            sequence = newSequences;
        }
        sequences.add(sequence);
    }
    try {
        for (Sequence sequence : sequences) {
            sequence.getLock().lock();
        }
        // Now that we have all the locks we need, attempt to return the unused sequence values
        List<Append> mutations = Lists.newArrayListWithExpectedSize(sequences.size());
        List<Sequence> toReturnList = Lists.newArrayListWithExpectedSize(sequences.size());
        int[] indexes = new int[sequences.size()];
        for (int i = 0; i < sequences.size(); i++) {
            Sequence sequence = sequences.get(i);
            try {
                Append append = sequence.newReturn(timestamp);
                toReturnList.add(sequence);
                mutations.add(append);
            } catch (EmptySequenceCacheException ignore) {
            // Nothing to return, so ignore
            }
        }
        if (toReturnList.isEmpty()) {
            return;
        }
        HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        Object[] resultObjects = null;
        SQLException sqlE = null;
        try {
            resultObjects = hTable.batch(mutations);
        } catch (IOException e) {
            sqlE = ServerUtil.parseServerException(e);
        } catch (InterruptedException e) {
            // restore the interrupt status
            Thread.currentThread().interrupt();
            sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
        } finally {
            try {
                hTable.close();
            } catch (IOException e) {
                if (sqlE == null) {
                    sqlE = ServerUtil.parseServerException(e);
                } else {
                    sqlE.setNextException(ServerUtil.parseServerException(e));
                }
            }
            if (sqlE != null) {
                throw sqlE;
            }
        }
        for (int i = 0; i < resultObjects.length; i++) {
            Sequence sequence = toReturnList.get(i);
            Result result = (Result) resultObjects[i];
            try {
                sequence.returnValue(result);
            } catch (SQLException e) {
                exceptions[indexes[i]] = e;
            }
        }
    } finally {
        for (Sequence sequence : sequences) {
            sequence.getLock().unlock();
        }
    }
}
Also used : EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) SQLException(java.sql.SQLException) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result) Append(org.apache.hadoop.hbase.client.Append) SequenceKey(org.apache.phoenix.schema.SequenceKey) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Aggregations

HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)117 Result (org.apache.hadoop.hbase.client.Result)43 Put (org.apache.hadoop.hbase.client.Put)41 IOException (java.io.IOException)36 ArrayList (java.util.ArrayList)26 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Get (org.apache.hadoop.hbase.client.Get)21 Scan (org.apache.hadoop.hbase.client.Scan)21 Test (org.junit.Test)20 SQLException (java.sql.SQLException)19 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)17 Connection (java.sql.Connection)15 HashMap (java.util.HashMap)15 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)13 Delete (org.apache.hadoop.hbase.client.Delete)12 Mutation (org.apache.hadoop.hbase.client.Mutation)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)11 ResultSet (java.sql.ResultSet)10 Configuration (org.apache.hadoop.conf.Configuration)9 ConnectionQueryServices (org.apache.phoenix.query.ConnectionQueryServices)9