Search in sources :

Example 1 with CellBuilder

use of org.apache.hadoop.hbase.CellBuilder in project hbase by apache.

the class TestMutationGetCellBuilder method testMutationGetCellBuilder.

@Test
public void testMutationGetCellBuilder() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final byte[] rowKey = Bytes.toBytes("12345678");
    final byte[] uselessRowKey = Bytes.toBytes("123");
    final byte[] family = Bytes.toBytes("cf");
    final byte[] qualifier = Bytes.toBytes("foo");
    final long now = EnvironmentEdgeManager.currentTime();
    try (Table table = TEST_UTIL.createTable(tableName, family)) {
        TEST_UTIL.waitTableAvailable(tableName.getName(), 5000);
        // put one row
        Put put = new Put(rowKey);
        CellBuilder cellBuilder = put.getCellBuilder().setQualifier(qualifier).setFamily(family).setValue(Bytes.toBytes("bar")).setTimestamp(now);
        // setRow is useless
        cellBuilder.setRow(uselessRowKey);
        put.add(cellBuilder.build());
        byte[] cloneRow = CellUtil.cloneRow(cellBuilder.build());
        assertTrue("setRow must be useless", !Arrays.equals(cloneRow, uselessRowKey));
        table.put(put);
        // get the row back and assert the values
        Get get = new Get(rowKey);
        get.setTimestamp(now);
        Result result = table.get(get);
        assertTrue("row key must be same", Arrays.equals(result.getRow(), rowKey));
        assertTrue("Column foo value should be bar", Bytes.toString(result.getValue(family, qualifier)).equals("bar"));
        // Delete that row
        Delete delete = new Delete(rowKey);
        cellBuilder = delete.getCellBuilder().setQualifier(qualifier).setFamily(family);
        // if this row has been deleted,then can check setType is useless.
        cellBuilder.setType(Cell.Type.Put);
        delete.add(cellBuilder.build());
        table.delete(delete);
        // check this row whether exist
        get = new Get(rowKey);
        get.setTimestamp(now);
        result = table.get(get);
        assertTrue("Column foo should not exist", result.getValue(family, qualifier) == null);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) CellBuilder(org.apache.hadoop.hbase.CellBuilder) Test(org.junit.Test)

Example 2 with CellBuilder

use of org.apache.hadoop.hbase.CellBuilder in project hbase by apache.

the class TestWALEntrySinkFilter method testWALEntryFilter.

/**
 * Test filter. Filter will filter out any write time that is <= 5 (BOUNDARY). We count how many
 * items we filter out and we count how many cells make it through for distribution way down below
 * in the Table#batch implementation. Puts in place a custom DevNullConnection so we can insert
 * our counting Table.
 * @throws IOException
 */
@Test
public void testWALEntryFilter() throws IOException {
    Configuration conf = HBaseConfiguration.create();
    // Make it so our filter is instantiated on construction of ReplicationSink.
    conf.setClass(DummyConnectionRegistry.REGISTRY_IMPL_CONF_KEY, DevNullConnectionRegistry.class, DummyConnectionRegistry.class);
    conf.setClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY, IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl.class, WALEntrySinkFilter.class);
    conf.setClass(ClusterConnectionFactory.HBASE_SERVER_CLUSTER_CONNECTION_IMPL, DevNullAsyncClusterConnection.class, AsyncClusterConnection.class);
    ReplicationSink sink = new ReplicationSink(conf);
    // Create some dumb walentries.
    List<AdminProtos.WALEntry> entries = new ArrayList<>();
    AdminProtos.WALEntry.Builder entryBuilder = AdminProtos.WALEntry.newBuilder();
    // Need a tablename.
    ByteString tableName = ByteString.copyFromUtf8(TableName.valueOf(this.name.getMethodName()).toString());
    // Add WALEdit Cells to Cells List. The way edits arrive at the sink is with protos
    // describing the edit with all Cells from all edits aggregated in a single CellScanner.
    final List<Cell> cells = new ArrayList<>();
    int count = BOUNDARY * 2;
    for (int i = 0; i < count; i++) {
        byte[] bytes = Bytes.toBytes(i);
        // Create a wal entry. Everything is set to the current index as bytes or int/long.
        entryBuilder.clear();
        entryBuilder.setKey(entryBuilder.getKeyBuilder().setLogSequenceNumber(i).setEncodedRegionName(ByteString.copyFrom(bytes)).setWriteTime(i).setTableName(tableName).build());
        // Lets have one Cell associated with each WALEdit.
        entryBuilder.setAssociatedCellCount(1);
        entries.add(entryBuilder.build());
        // We need to add a Cell per WALEdit to the cells array.
        CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
        // Make cells whose row, family, cell, value, and ts are == 'i'.
        Cell cell = cellBuilder.setRow(bytes).setFamily(bytes).setQualifier(bytes).setType(Cell.Type.Put).setTimestamp(i).setValue(bytes).build();
        cells.add(cell);
    }
    // Now wrap our cells array in a CellScanner that we can pass in to replicateEntries. It has
    // all Cells from all the WALEntries made above.
    CellScanner cellScanner = new CellScanner() {

        // Set to -1 because advance gets called before current.
        int index = -1;

        @Override
        public Cell current() {
            return cells.get(index);
        }

        @Override
        public boolean advance() throws IOException {
            index++;
            return index < cells.size();
        }
    };
    // Call our sink.
    sink.replicateEntries(entries, cellScanner, null, null, null);
    // Check what made it through and what was filtered.
    assertTrue(FILTERED.get() > 0);
    assertTrue(UNFILTERED.get() > 0);
    assertEquals(count, FILTERED.get() + UNFILTERED.get());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) ArrayList(java.util.ArrayList) CellScanner(org.apache.hadoop.hbase.CellScanner) CellBuilder(org.apache.hadoop.hbase.CellBuilder) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 3 with CellBuilder

use of org.apache.hadoop.hbase.CellBuilder in project hbase by apache.

the class TestBulkLoadReplication method createHFileForFamilies.

private String createHFileForFamilies(byte[] row, byte[] value, Configuration clusterConfig) throws IOException {
    CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
    cellBuilder.setRow(row).setFamily(TestReplicationBase.famName).setQualifier(Bytes.toBytes("1")).setValue(value).setType(Cell.Type.Put);
    HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(clusterConfig);
    // TODO We need a way to do this without creating files
    File hFileLocation = testFolder.newFile();
    FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null);
    try {
        hFileFactory.withOutputStream(out);
        hFileFactory.withFileContext(new HFileContextBuilder().build());
        HFile.Writer writer = hFileFactory.create();
        try {
            writer.append(new KeyValue(cellBuilder.build()));
        } finally {
            writer.close();
        }
    } finally {
        out.close();
    }
    return hFileLocation.getAbsoluteFile().getAbsolutePath();
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) CellBuilder(org.apache.hadoop.hbase.CellBuilder) FileOutputStream(java.io.FileOutputStream) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HFile(org.apache.hadoop.hbase.io.hfile.HFile) HFile(org.apache.hadoop.hbase.io.hfile.HFile) File(java.io.File)

Example 4 with CellBuilder

use of org.apache.hadoop.hbase.CellBuilder in project hbase by apache.

the class TestBulkLoadReplicationHFileRefs method createHFileForFamilies.

private String createHFileForFamilies(byte[] family) throws IOException {
    CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
    cellBuilder.setRow(row).setFamily(family).setQualifier(qualifier).setValue(value).setType(Cell.Type.Put);
    HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(UTIL1.getConfiguration());
    File hFileLocation = testFolder.newFile();
    FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null);
    try {
        hFileFactory.withOutputStream(out);
        hFileFactory.withFileContext(new HFileContextBuilder().build());
        HFile.Writer writer = hFileFactory.create();
        try {
            writer.append(new KeyValue(cellBuilder.build()));
        } finally {
            writer.close();
        }
    } finally {
        out.close();
    }
    return hFileLocation.getAbsoluteFile().getAbsolutePath();
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) CellBuilder(org.apache.hadoop.hbase.CellBuilder) FileOutputStream(java.io.FileOutputStream) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HFile(org.apache.hadoop.hbase.io.hfile.HFile) HFile(org.apache.hadoop.hbase.io.hfile.HFile) File(java.io.File)

Example 5 with CellBuilder

use of org.apache.hadoop.hbase.CellBuilder in project hbase by apache.

the class ThriftHBaseServiceHandler method mutateRowTs.

@Override
public void mutateRowTs(ByteBuffer tableName, ByteBuffer row, List<Mutation> mutations, long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError, IllegalArgument {
    Table table = null;
    try {
        table = getTable(tableName);
        Put put = new Put(getBytes(row), timestamp);
        addAttributes(put, attributes);
        Delete delete = new Delete(getBytes(row));
        addAttributes(delete, attributes);
        if (metrics != null) {
            metrics.incNumRowKeysInBatchMutate(mutations.size());
        }
        // I apologize for all this mess :)
        CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY);
        for (Mutation m : mutations) {
            byte[][] famAndQf = CellUtil.parseColumn(getBytes(m.column));
            if (m.isDelete) {
                if (famAndQf.length == 1) {
                    delete.addFamily(famAndQf[0], timestamp);
                } else {
                    delete.addColumns(famAndQf[0], famAndQf[1], timestamp);
                }
                delete.setDurability(m.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
            } else {
                if (famAndQf.length == 1) {
                    LOG.warn("No column qualifier specified. Delete is the only mutation supported " + "over the whole column family.");
                } else {
                    put.add(builder.clear().setRow(put.getRow()).setFamily(famAndQf[0]).setQualifier(famAndQf[1]).setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(m.value != null ? getBytes(m.value) : HConstants.EMPTY_BYTE_ARRAY).build());
                }
                put.setDurability(m.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
            }
        }
        if (!delete.isEmpty()) {
            table.delete(delete);
        }
        if (!put.isEmpty()) {
            table.put(put);
        }
    } catch (IOException e) {
        LOG.warn(e.getMessage(), e);
        throw getIOError(e);
    } catch (IllegalArgumentException e) {
        LOG.warn(e.getMessage(), e);
        throw new IllegalArgument(Throwables.getStackTraceAsString(e));
    } finally {
        closeTable(table);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) CellBuilder(org.apache.hadoop.hbase.CellBuilder) BatchMutation(org.apache.hadoop.hbase.thrift.generated.BatchMutation) Mutation(org.apache.hadoop.hbase.thrift.generated.Mutation) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) IllegalArgument(org.apache.hadoop.hbase.thrift.generated.IllegalArgument) Put(org.apache.hadoop.hbase.client.Put)

Aggregations

CellBuilder (org.apache.hadoop.hbase.CellBuilder)9 IOException (java.io.IOException)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 Put (org.apache.hadoop.hbase.client.Put)3 Test (org.junit.Test)3 File (java.io.File)2 FileOutputStream (java.io.FileOutputStream)2 ArrayList (java.util.ArrayList)2 Cell (org.apache.hadoop.hbase.Cell)2 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2 KeyValue (org.apache.hadoop.hbase.KeyValue)2 Delete (org.apache.hadoop.hbase.client.Delete)2 Table (org.apache.hadoop.hbase.client.Table)2 HFile (org.apache.hadoop.hbase.io.hfile.HFile)2 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)2 BatchMutation (org.apache.hadoop.hbase.thrift.generated.BatchMutation)2 IllegalArgument (org.apache.hadoop.hbase.thrift.generated.IllegalArgument)2 Mutation (org.apache.hadoop.hbase.thrift.generated.Mutation)2 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1