Search in sources :

Example 71 with Put

use of org.apache.hadoop.hbase.client.Put in project pinpoint by naver.

the class HbaseTraceDao method insert.

@Override
public void insert(final SpanBo spanBo) {
    if (spanBo == null) {
        throw new NullPointerException("span must not be null");
    }
    long acceptedTime = spanBo.getCollectorAcceptTime();
    TransactionId transactionId = spanBo.getTransactionId();
    final byte[] rowKey = rowKeyEncoder.encodeRowKey(transactionId);
    final Put put = new Put(rowKey, acceptedTime);
    this.spanSerializer.serialize(spanBo, put, null);
    this.annotationSerializer.serialize(spanBo, put, null);
    addNestedSpanEvent(put, spanBo);
    boolean success = hbaseTemplate.asyncPut(TRACES, put);
    if (!success) {
        hbaseTemplate.put(TRACES, put);
    }
}
Also used : Put(org.apache.hadoop.hbase.client.Put) TransactionId(com.navercorp.pinpoint.common.util.TransactionId)

Example 72 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TestHBaseFsckReplicas method testHbckWithExcessReplica.

@Test(timeout = 180000)
public void testHbckWithExcessReplica() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        setupTableWithRegionReplica(tableName, 2);
        admin.flush(tableName);
        assertNoErrors(doFsck(conf, false));
        assertEquals(ROWKEYS.length, countRows());
        // the next few lines inject a location in meta for a replica, and then
        // asks the master to assign the replica (the meta needs to be injected
        // for the master to treat the request for assignment as valid; the master
        // checks the region is valid either from its memory or meta)
        Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
        List<HRegionInfo> regions = admin.getTableRegions(tableName);
        byte[] startKey = Bytes.toBytes("B");
        byte[] endKey = Bytes.toBytes("C");
        byte[] metaKey = null;
        HRegionInfo newHri = null;
        for (HRegionInfo h : regions) {
            if (Bytes.compareTo(h.getStartKey(), startKey) == 0 && Bytes.compareTo(h.getEndKey(), endKey) == 0 && h.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
                metaKey = h.getRegionName();
                //create a hri with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
                newHri = RegionReplicaUtil.getRegionInfoForReplica(h, 2);
                break;
            }
        }
        Put put = new Put(metaKey);
        Collection<ServerName> var = admin.getClusterStatus().getServers();
        ServerName sn = var.toArray(new ServerName[var.size()])[0];
        //add a location with replicaId as 2 (since we already have replicas with replicaid 0 and 1)
        MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), -1, 2);
        meta.put(put);
        // assign the new replica
        HBaseFsckRepair.fixUnassigned(admin, newHri);
        HBaseFsckRepair.waitUntilAssigned(admin, newHri);
        // now reset the meta row to its original value
        Delete delete = new Delete(metaKey);
        delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(2));
        delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(2));
        delete.addColumns(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(2));
        meta.delete(delete);
        meta.close();
        // check that problem exists
        HBaseFsck hbck = doFsck(conf, false);
        assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META });
        // fix the problem
        hbck = doFsck(conf, true);
        // run hbck again to make sure we don't see any errors
        hbck = doFsck(conf, false);
        assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {});
    } finally {
        cleanupTable(tableName);
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Delete(org.apache.hadoop.hbase.client.Delete) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) ServerName(org.apache.hadoop.hbase.ServerName) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 73 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TestHBaseFsckOneRS method testMissingRegionInfoQualifier.

/**
   * Test mission REGIONINFO_QUALIFIER in hbase:meta
   */
@Test(timeout = 180000)
public void testMissingRegionInfoQualifier() throws Exception {
    Connection connection = ConnectionFactory.createConnection(conf);
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        setupTable(tableName);
        // Mess it up by removing the RegionInfo for one region.
        final List<Delete> deletes = new LinkedList<>();
        Table meta = connection.getTable(TableName.META_TABLE_NAME, hbfsckExecutorService);
        MetaTableAccessor.fullScanRegions(connection, new MetaTableAccessor.Visitor() {

            @Override
            public boolean visit(Result rowResult) throws IOException {
                HRegionInfo hri = MetaTableAccessor.getHRegionInfo(rowResult);
                if (hri != null && !hri.getTable().isSystemTable()) {
                    Delete delete = new Delete(rowResult.getRow());
                    delete.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
                    deletes.add(delete);
                }
                return true;
            }
        });
        meta.delete(deletes);
        // Mess it up by creating a fake hbase:meta entry with no associated RegionInfo
        meta.put(new Put(Bytes.toBytes(tableName + ",,1361911384013.810e28f59a57da91c66")).addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes("node1:60020")));
        meta.put(new Put(Bytes.toBytes(tableName + ",,1361911384013.810e28f59a57da91c66")).addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(1362150791183L)));
        meta.close();
        HBaseFsck hbck = doFsck(conf, false);
        assertTrue(hbck.getErrors().getErrorList().contains(HBaseFsck.ErrorReporter.ERROR_CODE.EMPTY_META_CELL));
        // fix reference file
        hbck = doFsck(conf, true);
        // check that reference file fixed
        assertFalse(hbck.getErrors().getErrorList().contains(HBaseFsck.ErrorReporter.ERROR_CODE.EMPTY_META_CELL));
    } finally {
        cleanupTable(tableName);
    }
    connection.close();
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) MetaTableAccessor(org.apache.hadoop.hbase.MetaTableAccessor) IOException(java.io.IOException) LinkedList(java.util.LinkedList) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) Test(org.junit.Test)

Example 74 with Put

use of org.apache.hadoop.hbase.client.Put in project hive by apache.

the class MockUtils method init.

static HBaseStore init(Configuration conf, HTableInterface htable, final SortedMap<String, Cell> rows) throws IOException {
    ((HiveConf) conf).setVar(ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, NOOPProxy.class.getName());
    Mockito.when(htable.get(Mockito.any(Get.class))).thenAnswer(new Answer<Result>() {

        @Override
        public Result answer(InvocationOnMock invocation) throws Throwable {
            Get get = (Get) invocation.getArguments()[0];
            Cell cell = rows.get(new String(get.getRow()));
            if (cell == null) {
                return new Result();
            } else {
                return Result.create(new Cell[] { cell });
            }
        }
    });
    Mockito.when(htable.get(Mockito.anyListOf(Get.class))).thenAnswer(new Answer<Result[]>() {

        @Override
        public Result[] answer(InvocationOnMock invocation) throws Throwable {
            @SuppressWarnings("unchecked") List<Get> gets = (List<Get>) invocation.getArguments()[0];
            Result[] results = new Result[gets.size()];
            for (int i = 0; i < gets.size(); i++) {
                Cell cell = rows.get(new String(gets.get(i).getRow()));
                Result result;
                if (cell == null) {
                    result = new Result();
                } else {
                    result = Result.create(new Cell[] { cell });
                }
                results[i] = result;
            }
            return results;
        }
    });
    Mockito.when(htable.getScanner(Mockito.any(Scan.class))).thenAnswer(new Answer<ResultScanner>() {

        @Override
        public ResultScanner answer(InvocationOnMock invocation) throws Throwable {
            Scan scan = (Scan) invocation.getArguments()[0];
            List<Result> results = new ArrayList<Result>();
            String start = new String(scan.getStartRow());
            String stop = new String(scan.getStopRow());
            SortedMap<String, Cell> sub = rows.subMap(start, stop);
            for (Map.Entry<String, Cell> e : sub.entrySet()) {
                results.add(Result.create(new Cell[] { e.getValue() }));
            }
            final Iterator<Result> iter = results.iterator();
            return new ResultScanner() {

                @Override
                public Result next() throws IOException {
                    return null;
                }

                @Override
                public Result[] next(int nbRows) throws IOException {
                    return new Result[0];
                }

                @Override
                public void close() {
                }

                @Override
                public Iterator<Result> iterator() {
                    return iter;
                }
            };
        }
    });
    Mockito.doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            Put put = (Put) invocation.getArguments()[0];
            rows.put(new String(put.getRow()), put.getFamilyCellMap().firstEntry().getValue().get(0));
            return null;
        }
    }).when(htable).put(Mockito.any(Put.class));
    Mockito.when(htable.checkAndPut(Mockito.any(byte[].class), Mockito.any(byte[].class), Mockito.any(byte[].class), Mockito.any(byte[].class), Mockito.any(Put.class))).thenAnswer(new Answer<Boolean>() {

        @Override
        public Boolean answer(InvocationOnMock invocation) throws Throwable {
            // Always say it succeeded and overwrite
            Put put = (Put) invocation.getArguments()[4];
            rows.put(new String(put.getRow()), put.getFamilyCellMap().firstEntry().getValue().get(0));
            return true;
        }
    });
    Mockito.doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            Delete del = (Delete) invocation.getArguments()[0];
            rows.remove(new String(del.getRow()));
            return null;
        }
    }).when(htable).delete(Mockito.any(Delete.class));
    Mockito.when(htable.checkAndDelete(Mockito.any(byte[].class), Mockito.any(byte[].class), Mockito.any(byte[].class), Mockito.any(byte[].class), Mockito.any(Delete.class))).thenAnswer(new Answer<Boolean>() {

        @Override
        public Boolean answer(InvocationOnMock invocation) throws Throwable {
            // Always say it succeeded
            Delete del = (Delete) invocation.getArguments()[4];
            rows.remove(new String(del.getRow()));
            return true;
        }
    });
    // Mock connection
    HBaseConnection hconn = Mockito.mock(HBaseConnection.class);
    Mockito.when(hconn.getHBaseTable(Mockito.anyString())).thenReturn(htable);
    HiveConf.setVar(conf, HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, HBaseReadWrite.TEST_CONN);
    HBaseReadWrite.setTestConnection(hconn);
    HBaseReadWrite.setConf(conf);
    HBaseStore store = new HBaseStore();
    store.setConf(conf);
    return store;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Result(org.apache.hadoop.hbase.client.Result) Iterator(java.util.Iterator) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Get(org.apache.hadoop.hbase.client.Get) SortedMap(java.util.SortedMap) Scan(org.apache.hadoop.hbase.client.Scan)

Example 75 with Put

use of org.apache.hadoop.hbase.client.Put in project tdi-studio-se by Talend.

the class HBaseStore method run.

public static void run(String zookeeperHost, String zookeeperPort, String table, final String columns, Map<String, String> properties, TalendRDD<List<Object>> rdd, final List<Integer> keyList) throws IOException {
    Configuration conf = HBaseConfiguration.create();
    conf.set("hbase.zookeeper.quorum", zookeeperHost);
    conf.set("hbase.zookeeper.property.clientPort", zookeeperPort);
    conf.set("hbase.mapred.tablecolumns", columns);
    for (Entry<String, String> e : properties.entrySet()) {
        conf.set(e.getKey(), e.getValue());
    }
    TalendPairRDD<ImmutableBytesWritable, Put> hbaseRdd = rdd.mapToPair(new PairFunction<List<Object>, ImmutableBytesWritable, Put>() {

        private static final long serialVersionUID = 1L;

        public Tuple2<ImmutableBytesWritable, Put> call(List<Object> t) throws Exception {
            String key = "";
            for (int i : keyList) {
                key = key + t.get(i);
            }
            org.apache.hadoop.hbase.client.Put put = new org.apache.hadoop.hbase.client.Put(DigestUtils.md5("".equals(key) ? t.toString() : key));
            String[] cols = columns.split(" ");
            int i = 0;
            for (Object o : t) {
                if (cols.length > i) {
                    put.add(org.apache.hadoop.hbase.util.Bytes.toBytes(cols[i].split(":")[0]), org.apache.hadoop.hbase.util.Bytes.toBytes(cols[i].split(":")[1]), (o != null ? org.apache.hadoop.hbase.util.Bytes.toBytes(o.toString()) : null));
                }
                i++;
            }
            return new Tuple2<ImmutableBytesWritable, Put>(new ImmutableBytesWritable(), put);
        }
    });
    JobConf config = new JobConf(conf);
    config.set(TableOutputFormat.OUTPUT_TABLE, table);
    config.setOutputFormat(TableOutputFormat.class);
    hbaseRdd.saveAsHadoopDataset(config);
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) Put(org.apache.hadoop.hbase.client.Put) Put(org.apache.hadoop.hbase.client.Put) IOException(java.io.IOException) Tuple2(scala.Tuple2) List(java.util.List) JobConf(org.apache.hadoop.mapred.JobConf)

Aggregations

Put (org.apache.hadoop.hbase.client.Put)1416 Test (org.junit.Test)672 Table (org.apache.hadoop.hbase.client.Table)489 ArrayList (java.util.ArrayList)317 Result (org.apache.hadoop.hbase.client.Result)279 TableName (org.apache.hadoop.hbase.TableName)257 IOException (java.io.IOException)241 Delete (org.apache.hadoop.hbase.client.Delete)225 Scan (org.apache.hadoop.hbase.client.Scan)222 Cell (org.apache.hadoop.hbase.Cell)200 Get (org.apache.hadoop.hbase.client.Get)196 Configuration (org.apache.hadoop.conf.Configuration)148 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)139 Connection (org.apache.hadoop.hbase.client.Connection)122 KeyValue (org.apache.hadoop.hbase.KeyValue)112 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)110 Admin (org.apache.hadoop.hbase.client.Admin)89 List (java.util.List)83 Mutation (org.apache.hadoop.hbase.client.Mutation)82 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)80