Search in sources :

Example 11 with NavigableMap

use of java.util.NavigableMap in project hbase by apache.

the class Increment method getFamilyMapOfLongs.

/**
   * Before 0.95, when you called Increment#getFamilyMap(), you got back
   * a map of families to a list of Longs.  Now, {@link #getFamilyCellMap()} returns
   * families by list of Cells.  This method has been added so you can have the
   * old behavior.
   * @return Map of families to a Map of qualifiers and their Long increments.
   * @since 0.95.0
   */
public Map<byte[], NavigableMap<byte[], Long>> getFamilyMapOfLongs() {
    NavigableMap<byte[], List<Cell>> map = super.getFamilyCellMap();
    Map<byte[], NavigableMap<byte[], Long>> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (Map.Entry<byte[], List<Cell>> entry : map.entrySet()) {
        NavigableMap<byte[], Long> longs = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (Cell cell : entry.getValue()) {
            longs.put(CellUtil.cloneQualifier(cell), Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
        }
        results.put(entry.getKey(), longs);
    }
    return results;
}
Also used : NavigableMap(java.util.NavigableMap) List(java.util.List) TreeMap(java.util.TreeMap) NavigableMap(java.util.NavigableMap) TreeMap(java.util.TreeMap) Map(java.util.Map) Cell(org.apache.hadoop.hbase.Cell)

Example 12 with NavigableMap

use of java.util.NavigableMap in project hbase by apache.

the class TestFromClientSide method testClientPoolRoundRobin.

@Test
public void testClientPoolRoundRobin() throws IOException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    int poolSize = 3;
    int numVersions = poolSize * 2;
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "round-robin");
    conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize);
    Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, Integer.MAX_VALUE);
    final long ts = EnvironmentEdgeManager.currentTime();
    Get get = new Get(ROW);
    get.addColumn(FAMILY, QUALIFIER);
    get.setMaxVersions();
    for (int versions = 1; versions <= numVersions; versions++) {
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, ts + versions, VALUE);
        table.put(put);
        Result result = table.get(get);
        NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(QUALIFIER);
        assertEquals("The number of versions of '" + FAMILY + ":" + QUALIFIER + " did not match " + versions, versions, navigableMap.size());
        for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) {
            assertTrue("The value at time " + entry.getKey() + " did not match what was put", Bytes.equals(VALUE, entry.getValue()));
        }
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Configuration(org.apache.hadoop.conf.Configuration) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) Test(org.junit.Test)

Example 13 with NavigableMap

use of java.util.NavigableMap in project hbase by apache.

the class TestHBaseFsckReplicas method testNotInHdfsWithReplicas.

/**
   * This creates and fixes a bad table with a region that is in meta but has
   * no deployment or data hdfs. The table has region_replication set to 2.
   */
@Test(timeout = 180000)
public void testNotInHdfsWithReplicas() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        HRegionInfo[] oldHris = new HRegionInfo[2];
        setupTableWithRegionReplica(tableName, 2);
        assertEquals(ROWKEYS.length, countRows());
        NavigableMap<HRegionInfo, ServerName> map = MetaTableAccessor.allTableRegions(TEST_UTIL.getConnection(), tbl.getName());
        int i = 0;
        // store the HRIs of the regions we will mess up
        for (Map.Entry<HRegionInfo, ServerName> m : map.entrySet()) {
            if (m.getKey().getStartKey().length > 0 && m.getKey().getStartKey()[0] == Bytes.toBytes("B")[0]) {
                LOG.debug("Initially server hosting " + m.getKey() + " is " + m.getValue());
                oldHris[i++] = m.getKey();
            }
        }
        // make sure data in regions
        admin.flush(tableName);
        // Mess it up by leaving a hole in the hdfs data
        deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, false, // don't rm meta
        true);
        HBaseFsck hbck = doFsck(conf, false);
        assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_HDFS });
        // fix hole
        doFsck(conf, true);
        // check that hole fixed
        assertNoErrors(doFsck(conf, false));
        assertEquals(ROWKEYS.length - 2, countRows());
        // the following code checks whether the old primary/secondary has
        // been unassigned and the new primary/secondary has been assigned
        i = 0;
        HRegionInfo[] newHris = new HRegionInfo[2];
        // get all table's regions from meta
        map = MetaTableAccessor.allTableRegions(TEST_UTIL.getConnection(), tbl.getName());
        // get the HRIs of the new regions (hbck created new regions for fixing the hdfs mess-up)
        for (Map.Entry<HRegionInfo, ServerName> m : map.entrySet()) {
            if (m.getKey().getStartKey().length > 0 && m.getKey().getStartKey()[0] == Bytes.toBytes("B")[0]) {
                newHris[i++] = m.getKey();
            }
        }
        // get all the online regions in the regionservers
        Collection<ServerName> servers = admin.getClusterStatus().getServers();
        Set<HRegionInfo> onlineRegions = new HashSet<>();
        for (ServerName s : servers) {
            List<HRegionInfo> list = admin.getOnlineRegions(s);
            onlineRegions.addAll(list);
        }
        // the new HRIs must be a subset of the online regions
        assertTrue(onlineRegions.containsAll(Arrays.asList(newHris)));
        // the old HRIs must not be part of the set (removeAll would return false if
        // the set didn't change)
        assertFalse(onlineRegions.removeAll(Arrays.asList(oldHris)));
    } finally {
        cleanupTable(tableName);
        admin.close();
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ServerName(org.apache.hadoop.hbase.ServerName) Map(java.util.Map) NavigableMap(java.util.NavigableMap) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 14 with NavigableMap

use of java.util.NavigableMap in project hive by apache.

the class HBaseReadWrite method printOneTable.

private String printOneTable(Result result) throws IOException, TException {
    byte[] key = result.getRow();
    HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializeTable(key, result.getValue(CATALOG_CF, CATALOG_COL));
    StringBuilder builder = new StringBuilder();
    builder.append(dumpThriftObject(sdParts.containingTable)).append(" sdHash: ").append(Base64.encodeBase64URLSafeString(sdParts.sdHash)).append(" stats:");
    NavigableMap<byte[], byte[]> statsCols = result.getFamilyMap(STATS_CF);
    for (Map.Entry<byte[], byte[]> statsCol : statsCols.entrySet()) {
        builder.append(" column ").append(new String(statsCol.getKey(), HBaseUtils.ENCODING)).append(": ");
        ColumnStatistics pcs = buildColStats(key, true);
        ColumnStatisticsObj cso = HBaseUtils.deserializeStatsForOneColumn(pcs, statsCol.getValue());
        builder.append(dumpThriftObject(cso));
    }
    // Add the primary key
    List<SQLPrimaryKey> pk = getPrimaryKey(sdParts.containingTable.getDbName(), sdParts.containingTable.getTableName());
    if (pk != null && pk.size() > 0) {
        builder.append(" primary key: ");
        for (SQLPrimaryKey pkcol : pk) builder.append(dumpThriftObject(pkcol));
    }
    // Add any foreign keys
    List<SQLForeignKey> fks = getForeignKeys(sdParts.containingTable.getDbName(), sdParts.containingTable.getTableName());
    if (fks != null && fks.size() > 0) {
        builder.append(" foreign keys: ");
        for (SQLForeignKey fkcol : fks) builder.append(dumpThriftObject(fkcol));
    }
    return builder.toString();
}
Also used : ColumnStatistics(org.apache.hadoop.hive.metastore.api.ColumnStatistics) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) Map(java.util.Map) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap)

Example 15 with NavigableMap

use of java.util.NavigableMap in project Solbase by Photobucket.

the class DocumentLoader method loadObject.

public CachedObjectWrapper<Document, Long> loadObject(Integer docNum, int start, int end, LayeredCache<Integer, Document, Long, ParsedDoc> cache) throws IOException {
    Document document = new Document();
    Get documentGet = new Get(SolbaseUtil.randomize(docNum));
    if (fieldNames == null || fieldNames.size() == 0) {
        // get all columns ( except this skips meta info )
        documentGet.addFamily(Bytes.toBytes("field"));
    } else {
        for (byte[] fieldName : fieldNames) {
            documentGet.addColumn(Bytes.toBytes("field"), fieldName);
        }
    }
    Result documentResult = null;
    // if docTable is set up, reuse instance, otherwise create brand new one and close after done
    if (this.docTable == null) {
        HTableInterface docTable = null;
        try {
            docTable = SolbaseUtil.getDocTable();
            documentResult = docTable.get(documentGet);
        } finally {
            SolbaseUtil.releaseTable(docTable);
        }
    } else {
        documentResult = this.docTable.get(documentGet);
    }
    if (documentResult == null || documentResult.isEmpty()) {
        return null;
    }
    // TODO, get from result
    Long versionIdentifier = 0l;
    NavigableMap<byte[], byte[]> familyMap = documentResult.getFamilyMap(Bytes.toBytes("field"));
    for (Map.Entry<byte[], byte[]> fieldColumn : familyMap.entrySet()) {
        Field field = null;
        String fieldName = Bytes.toString(fieldColumn.getKey());
        byte[] value;
        ByteBuffer v = ByteBuffer.wrap(fieldColumn.getValue());
        int vlimit = v.limit() + v.arrayOffset();
        if (v.array()[vlimit - 1] != Byte.MAX_VALUE && v.array()[vlimit - 1] != Byte.MIN_VALUE) {
            throw new CorruptIndexException("Solbase field is not properly encoded: " + docNum + "(" + fieldName + ")");
        } else if (v.array()[vlimit - 1] == Byte.MAX_VALUE) {
            // Binary
            value = new byte[vlimit - 1];
            System.arraycopy(v.array(), v.position() + v.arrayOffset(), value, 0, vlimit - 1);
            field = new Field(fieldName, value, Store.YES);
            document.add(field);
        } else if (v.array()[vlimit - 1] == Byte.MIN_VALUE) {
            // String
            value = new byte[vlimit - 1];
            System.arraycopy(v.array(), v.position() + v.arrayOffset(), value, 0, vlimit - 1);
            // Check for multi-fields
            String fieldString = new String(value, "UTF-8");
            if (fieldString.indexOf(Bytes.toString(SolbaseUtil.delimiter)) >= 0) {
                StringTokenizer tok = new StringTokenizer(fieldString, Bytes.toString(SolbaseUtil.delimiter));
                while (tok.hasMoreTokens()) {
                    // update logic
                    if (schema != null) {
                        SchemaField sfield = schema.getFieldOrNull(fieldName);
                        if (sfield.getType() instanceof EmbeddedIndexedIntField) {
                            EmbeddedIndexedIntField eiif = (EmbeddedIndexedIntField) sfield.getType();
                            EmbeddedSortField sf = new EmbeddedSortField(fieldName, tok.nextToken(), Field.Store.YES, Field.Index.NO, eiif.getFieldNumber());
                            document.add(sf);
                        } else {
                            Field f = sfield.createField(tok.nextToken(), 1.0f);
                            if (f != null) {
                                // null fields are not added
                                document.add(f);
                            }
                        }
                    } else {
                        field = new Field(fieldName, tok.nextToken(), Store.YES, Index.ANALYZED);
                        document.add(field);
                    }
                }
            } else {
                // update logic
                if (schema != null) {
                    SchemaField sfield = schema.getFieldOrNull(fieldName);
                    if (sfield.getType() instanceof EmbeddedIndexedIntField) {
                        EmbeddedIndexedIntField eiif = (EmbeddedIndexedIntField) sfield.getType();
                        EmbeddedSortField sf = new EmbeddedSortField(fieldName, fieldString, Field.Store.YES, Field.Index.NO, eiif.getFieldNumber());
                        document.add(sf);
                    } else {
                        Field f = sfield.createField(fieldString, 1.0f);
                        if (f != null) {
                            // null fields are not added
                            document.add(f);
                        }
                    }
                } else {
                    field = new Field(fieldName, fieldString, Store.YES, Index.ANALYZED);
                    document.add(field);
                }
            }
        }
    }
    return new CachedObjectWrapper<Document, Long>(document, versionIdentifier, System.currentTimeMillis());
}
Also used : CachedObjectWrapper(org.solbase.cache.CachedObjectWrapper) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) EmbeddedSortField(org.apache.lucene.document.EmbeddedSortField) Document(org.apache.lucene.document.Document) SolrInputDocument(org.apache.solr.common.SolrInputDocument) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ByteBuffer(java.nio.ByteBuffer) Result(org.apache.hadoop.hbase.client.Result) SchemaField(org.apache.solr.schema.SchemaField) EmbeddedIndexedIntField(org.apache.solr.schema.EmbeddedIndexedIntField) SchemaField(org.apache.solr.schema.SchemaField) Field(org.apache.lucene.document.Field) EmbeddedSortField(org.apache.lucene.document.EmbeddedSortField) StringTokenizer(java.util.StringTokenizer) Get(org.apache.hadoop.hbase.client.Get) Map(java.util.Map) NavigableMap(java.util.NavigableMap) EmbeddedIndexedIntField(org.apache.solr.schema.EmbeddedIndexedIntField)

Aggregations

NavigableMap (java.util.NavigableMap)170 Map (java.util.Map)84 TreeMap (java.util.TreeMap)61 SortedMap (java.util.SortedMap)34 ArrayList (java.util.ArrayList)33 List (java.util.List)27 Iterator (java.util.Iterator)21 HashMap (java.util.HashMap)20 Cell (org.apache.hadoop.hbase.Cell)20 Result (org.apache.hadoop.hbase.client.Result)18 Set (java.util.Set)14 Get (org.apache.hadoop.hbase.client.Get)13 IOException (java.io.IOException)12 KeyValue (org.apache.hadoop.hbase.KeyValue)10 Put (org.apache.hadoop.hbase.client.Put)10 Test (org.junit.Test)10 Entry (java.util.Map.Entry)9 Update (co.cask.cdap.data2.dataset2.lib.table.Update)7 TestSuite (junit.framework.TestSuite)7 ImmutableMap (com.google.common.collect.ImmutableMap)6