Search in sources :

Example 11 with Row

use of org.apache.hadoop.hbase.client.Row in project bagheera by mozilla-metrics.

the class FlushResult method flush.

public void flush() throws IOException {
    IOException lastException = null;
    this.currentTimeMillis = System.currentTimeMillis();
    int i;
    for (i = 0; i < getRetryCount(); i++) {
        HTableInterface table = hbasePool.getTable(tableName);
        try {
            table.setAutoFlush(false);
            final TimerContext flushTimerContext = flushTimer.time();
            try {
                List<Row> rows = new ArrayList<Row>(batchSize);
                while (!rowQueue.isEmpty() && rows.size() < batchSize) {
                    Row row = rowQueue.poll();
                    if (row != null) {
                        rows.add(row);
                        rowQueueSize.decrementAndGet();
                    }
                }
                try {
                    FlushResult result = flushTable(table, rows);
                    stored.mark(result.successfulPutCount);
                    storeFailed.mark(result.failedPutCount);
                    deleted.mark(result.successfulDeleteCount);
                    deleteFailed.mark(result.failedDeleteCount);
                } catch (InterruptedException e) {
                    LOG.error("Error flushing batch of " + batchSize + " messages", e);
                }
            } finally {
                flushTimerContext.stop();
                if (table != null) {
                    table.close();
                }
            }
            break;
        } catch (IOException e) {
            LOG.warn(String.format("Error in flush attempt %d of %d, clearing Region cache", (i + 1), getRetryCount()), e);
            lastException = e;
            // connection.clearRegionCache();
            try {
                Thread.sleep(getRetrySleepSeconds() * 1000);
            } catch (InterruptedException e1) {
                // wake up
                LOG.info("woke up by interruption", e1);
            }
        }
    }
    if (i >= getRetryCount() && lastException != null) {
        LOG.error("Error in final flush attempt, giving up.");
        throw lastException;
    }
    LOG.debug("Flush finished");
}
Also used : TimerContext(com.yammer.metrics.core.TimerContext) ArrayList(java.util.ArrayList) IOException(java.io.IOException) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface)

Example 12 with Row

use of org.apache.hadoop.hbase.client.Row in project hbase by apache.

the class RequestConverter method buildNoDataRegionAction.

/**
   * Create a protocol buffer multirequest with NO data for a list of actions (data is carried
   * otherwise than via protobuf).  This means it just notes attributes, whether to write the
   * WAL, etc., and the presence in protobuf serves as place holder for the data which is
   * coming along otherwise.  Note that Get is different.  It does not contain 'data' and is always
   * carried by protobuf.  We return references to the data by adding them to the passed in
   * <code>data</code> param.
   *
   * <p>Propagates Actions original index.
   *
   * @param regionName
   * @param actions
   * @param cells Place to stuff references to actual data.
   * @return a multi request that does not carry any data.
   * @throws IOException
   */
public static RegionAction.Builder buildNoDataRegionAction(final byte[] regionName, final Iterable<Action> actions, final List<CellScannable> cells, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException {
    RegionAction.Builder builder = getRegionActionBuilderWithRegion(regionActionBuilder, regionName);
    ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
    for (Action action : actions) {
        Row row = action.getAction();
        actionBuilder.clear();
        actionBuilder.setIndex(action.getOriginalIndex());
        mutationBuilder.clear();
        if (row instanceof Get) {
            Get g = (Get) row;
            builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
        } else if (row instanceof Put) {
            Put p = (Put) row;
            cells.add(p);
            builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, p, mutationBuilder)));
        } else if (row instanceof Delete) {
            Delete d = (Delete) row;
            int size = d.size();
            // metadata only in the pb and then send the kv along the side in cells.
            if (size > 0) {
                cells.add(d);
                builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.DELETE, d, mutationBuilder)));
            } else {
                builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, d, mutationBuilder)));
            }
        } else if (row instanceof Append) {
            Append a = (Append) row;
            cells.add(a);
            builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.APPEND, a, mutationBuilder, action.getNonce())));
        } else if (row instanceof Increment) {
            Increment i = (Increment) row;
            cells.add(i);
            builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.INCREMENT, i, mutationBuilder, action.getNonce())));
        } else if (row instanceof RegionCoprocessorServiceExec) {
            RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
            // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
            if (cpBuilder == null) {
                cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
            } else {
                cpBuilder.clear();
            }
            builder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
        } else if (row instanceof RowMutations) {
            throw new UnsupportedOperationException("No RowMutations in multi calls; use mutateRow");
        } else {
            throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
        }
    }
    return builder;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Action(org.apache.hadoop.hbase.client.Action) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorServiceExec(org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Append(org.apache.hadoop.hbase.client.Append) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) Row(org.apache.hadoop.hbase.client.Row)

Example 13 with Row

use of org.apache.hadoop.hbase.client.Row in project hbase by apache.

the class ReplicationSink method batch.

/**
   * Do the changes and handle the pool
   * @param tableName table to insert into
   * @param allRows list of actions
   * @throws IOException
   */
protected void batch(TableName tableName, Collection<List<Row>> allRows) throws IOException {
    if (allRows.isEmpty()) {
        return;
    }
    Table table = null;
    try {
        Connection connection = getConnection();
        table = connection.getTable(tableName);
        for (List<Row> rows : allRows) {
            table.batch(rows, null);
        }
    } catch (InterruptedException ix) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(ix);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) Table(org.apache.hadoop.hbase.client.Table) Connection(org.apache.hadoop.hbase.client.Connection) Row(org.apache.hadoop.hbase.client.Row)

Example 14 with Row

use of org.apache.hadoop.hbase.client.Row in project cdap by caskdata.

the class HBaseQueueConsumer method undoState.

@Override
protected void undoState(Set<byte[]> rowKeys, byte[] stateColumnName) throws IOException, InterruptedException {
    if (rowKeys.isEmpty()) {
        return;
    }
    List<Row> ops = Lists.newArrayListWithCapacity(rowKeys.size());
    for (byte[] rowKey : rowKeys) {
        Delete delete = new Delete(queueStrategy.getActualRowKey(getConfig(), rowKey));
        delete.deleteColumns(QueueEntryRow.COLUMN_FAMILY, stateColumnName);
        ops.add(delete);
    }
    hTable.batch(ops);
    hTable.flushCommits();
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) QueueEntryRow(co.cask.cdap.data2.transaction.queue.QueueEntryRow) Row(org.apache.hadoop.hbase.client.Row)

Example 15 with Row

use of org.apache.hadoop.hbase.client.Row in project phoenix by apache.

the class NativeHBaseTypesIT method testNegativeCompareNegativeValue.

@SuppressWarnings("deprecation")
@Test
public void testNegativeCompareNegativeValue() throws Exception {
    String tableName = initTableValues();
    String query = "SELECT string_key FROM " + tableName + " WHERE uint_key > 100000";
    PhoenixConnection conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
    HTableInterface hTable = conn.getQueryServices().getTable(tableName.getBytes());
    List<Row> mutations = new ArrayList<Row>();
    byte[] family = Bytes.toBytes("1");
    byte[] uintCol = Bytes.toBytes("UINT_COL");
    byte[] ulongCol = Bytes.toBytes("ULONG_COL");
    byte[] key;
    Put put;
    // Need to use native APIs because the Phoenix APIs wouldn't let you insert a
    // negative number for an unsigned type
    key = ByteUtil.concat(Bytes.toBytes(-10), Bytes.toBytes(100L), Bytes.toBytes("e"));
    put = new Put(key);
    put.add(family, uintCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(10));
    put.add(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L));
    put.add(family, QueryConstants.EMPTY_COLUMN_BYTES, HConstants.LATEST_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY);
    mutations.add(put);
    hTable.batch(mutations);
    // Demonstrates weakness of HBase Bytes serialization. Negative numbers
    // show up as bigger than positive numbers
    PreparedStatement statement = conn.prepareStatement(query);
    ResultSet rs = statement.executeQuery();
    assertTrue(rs.next());
    assertEquals("e", rs.getString(1));
    assertFalse(rs.next());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ArrayList(java.util.ArrayList) ResultSet(java.sql.ResultSet) PreparedStatement(java.sql.PreparedStatement) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Aggregations

Row (org.apache.hadoop.hbase.client.Row)20 Put (org.apache.hadoop.hbase.client.Put)16 ArrayList (java.util.ArrayList)14 Delete (org.apache.hadoop.hbase.client.Delete)12 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)7 IOException (java.io.IOException)5 Append (org.apache.hadoop.hbase.client.Append)4 Get (org.apache.hadoop.hbase.client.Get)4 Increment (org.apache.hadoop.hbase.client.Increment)4 Pair (org.apache.hadoop.hbase.util.Pair)4 List (java.util.List)3 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 Action (org.apache.hadoop.hbase.client.Action)3 RegionCoprocessorServiceExec (org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec)3 Result (org.apache.hadoop.hbase.client.Result)3 RowMutations (org.apache.hadoop.hbase.client.RowMutations)3 Table (org.apache.hadoop.hbase.client.Table)3 RegionAction (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction)3 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)3 ConnectionQueryServices (org.apache.phoenix.query.ConnectionQueryServices)3