Search in sources :

Example 56 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class ReplicationIT method singleTableWithSingleTarget.

@Test
public void singleTableWithSingleTarget() throws Exception {
    // We want to kill the GC so it doesn't come along and close Status records and mess up the comparisons
    // against expected Status messages.
    getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
    Connector conn = getConnector();
    String table1 = "table1";
    // replication shouldn't be online when we begin
    Assert.assertFalse(ReplicationTable.isOnline(conn));
    // Create a table
    conn.tableOperations().create(table1);
    int attempts = 10;
    // Might think the table doesn't yet exist, retry
    while (attempts > 0) {
        try {
            // Enable replication on table1
            conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
            // Replicate table1 to cluster1 in the table with id of '4'
            conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "4");
            // Sleep for 100 seconds before saying something is replicated
            conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1", ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "100000"));
            break;
        } catch (Exception e) {
            attempts--;
            if (attempts <= 0) {
                throw e;
            }
            sleepUninterruptibly(2, TimeUnit.SECONDS);
        }
    }
    // Write some data to table1
    writeSomeData(conn, table1, 2000, 50);
    // Make sure the replication table is online at this point
    while (!ReplicationTable.isOnline(conn)) {
        sleepUninterruptibly(MILLIS_BETWEEN_REPLICATION_TABLE_ONLINE_CHECKS, TimeUnit.MILLISECONDS);
    }
    Assert.assertTrue("Replication table was never created", ReplicationTable.isOnline(conn));
    // ACCUMULO-2743 The Observer in the tserver has to be made aware of the change to get the combiner (made by the master)
    for (int i = 0; i < 10 && !conn.tableOperations().listIterators(ReplicationTable.NAME).keySet().contains(ReplicationTable.COMBINER_NAME); i++) {
        sleepUninterruptibly(2, TimeUnit.SECONDS);
    }
    Assert.assertTrue("Combiner was never set on replication table", conn.tableOperations().listIterators(ReplicationTable.NAME).keySet().contains(ReplicationTable.COMBINER_NAME));
    // Trigger the minor compaction, waiting for it to finish.
    // This should write the entry to metadata that the file has data
    conn.tableOperations().flush(table1, null, null, true);
    // Make sure that we have one status element, should be a new file
    try (Scanner s = ReplicationTable.getScanner(conn)) {
        StatusSection.limit(s);
        Entry<Key, Value> entry = null;
        Status expectedStatus = StatusUtil.openWithUnknownLength();
        attempts = 10;
        // This record will move from new to new with infinite length because of the minc (flush)
        while (null == entry && attempts > 0) {
            try {
                entry = Iterables.getOnlyElement(s);
                Status actual = Status.parseFrom(entry.getValue().get());
                if (actual.getInfiniteEnd() != expectedStatus.getInfiniteEnd()) {
                    entry = null;
                    // the master process didn't yet fire and write the new mutation, wait for it to do
                    // so and try to read it again
                    Thread.sleep(1000);
                }
            } catch (NoSuchElementException e) {
                entry = null;
                Thread.sleep(500);
            } catch (IllegalArgumentException e) {
                // saw this contain 2 elements once
                try (Scanner s2 = ReplicationTable.getScanner(conn)) {
                    StatusSection.limit(s2);
                    for (Entry<Key, Value> content : s2) {
                        log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
                    }
                    throw e;
                }
            } finally {
                attempts--;
            }
        }
        Assert.assertNotNull("Could not find expected entry in replication table", entry);
        Status actual = Status.parseFrom(entry.getValue().get());
        Assert.assertTrue("Expected to find a replication entry that is open with infinite length: " + ProtobufUtil.toString(actual), !actual.getClosed() && actual.getInfiniteEnd());
        // Try a couple of times to watch for the work record to be created
        boolean notFound = true;
        for (int i = 0; i < 10 && notFound; i++) {
            try (Scanner s2 = ReplicationTable.getScanner(conn)) {
                WorkSection.limit(s2);
                int elementsFound = Iterables.size(s2);
                if (0 < elementsFound) {
                    Assert.assertEquals(1, elementsFound);
                    notFound = false;
                }
                Thread.sleep(500);
            }
        }
        // If we didn't find the work record, print the contents of the table
        if (notFound) {
            try (Scanner s2 = ReplicationTable.getScanner(conn)) {
                for (Entry<Key, Value> content : s2) {
                    log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
                }
                Assert.assertFalse("Did not find the work entry for the status entry", notFound);
            }
        }
        // Write some more data so that we over-run the single WAL
        writeSomeData(conn, table1, 3000, 50);
        log.info("Issued compaction for table");
        conn.tableOperations().compact(table1, null, null, true, true);
        log.info("Compaction completed");
        // Master is creating entries in the replication table from the metadata table every second.
        // Compaction should trigger the record to be written to metadata. Wait a bit to ensure
        // that the master has time to work.
        Thread.sleep(5000);
        try (Scanner s2 = ReplicationTable.getScanner(conn)) {
            StatusSection.limit(s2);
            int numRecords = 0;
            for (Entry<Key, Value> e : s2) {
                numRecords++;
                log.info("Found status record {}\t{}", e.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(e.getValue().get())));
            }
            Assert.assertEquals(2, numRecords);
        }
        // We should eventually get 2 work records recorded, need to account for a potential delay though
        // might see: status1 -> work1 -> status2 -> (our scans) -> work2
        notFound = true;
        for (int i = 0; i < 10 && notFound; i++) {
            try (Scanner s2 = ReplicationTable.getScanner(conn)) {
                WorkSection.limit(s2);
                int elementsFound = Iterables.size(s2);
                if (2 == elementsFound) {
                    notFound = false;
                }
                Thread.sleep(500);
            }
        }
        // If we didn't find the work record, print the contents of the table
        if (notFound) {
            try (Scanner s2 = ReplicationTable.getScanner(conn)) {
                for (Entry<Key, Value> content : s2) {
                    log.info("{} => {}", content.getKey().toStringNoTruncate(), content.getValue());
                }
                Assert.assertFalse("Did not find the work entries for the status entries", notFound);
            }
        }
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) URISyntaxException(java.net.URISyntaxException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) NoSuchElementException(java.util.NoSuchElementException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Entry(java.util.Map.Entry) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) NoSuchElementException(java.util.NoSuchElementException) Test(org.junit.Test)

Example 57 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class ReplicationIT method noRecordsWithoutReplication.

@Test
public void noRecordsWithoutReplication() throws Exception {
    Connector conn = getConnector();
    List<String> tables = new ArrayList<>();
    // replication shouldn't be online when we begin
    Assert.assertFalse(ReplicationTable.isOnline(conn));
    for (int i = 0; i < 5; i++) {
        String name = "table" + i;
        tables.add(name);
        conn.tableOperations().create(name);
    }
    // nor after we create some tables (that aren't being replicated)
    Assert.assertFalse(ReplicationTable.isOnline(conn));
    for (String table : tables) {
        writeSomeData(conn, table, 5, 5);
    }
    // After writing data, still no replication table
    Assert.assertFalse(ReplicationTable.isOnline(conn));
    for (String table : tables) {
        conn.tableOperations().compact(table, null, null, true, true);
    }
    // After compacting data, still no replication table
    Assert.assertFalse(ReplicationTable.isOnline(conn));
    for (String table : tables) {
        conn.tableOperations().delete(table);
    }
    // After deleting tables, still no replication table
    Assert.assertFalse(ReplicationTable.isOnline(conn));
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 58 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class ReplicationIT method correctRecordsCompleteFile.

@Test
public void correctRecordsCompleteFile() throws Exception {
    Connector conn = getConnector();
    String table = "table1";
    conn.tableOperations().create(table);
    // If we have more than one tserver, this is subject to a race condition.
    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
    for (int i = 0; i < 10; i++) {
        Mutation m = new Mutation(Integer.toString(i));
        m.put(new byte[0], new byte[0], new byte[0]);
        bw.addMutation(m);
    }
    bw.close();
    // After writing data, we'll get a replication table online
    while (!ReplicationTable.isOnline(conn)) {
        sleepUninterruptibly(MILLIS_BETWEEN_REPLICATION_TABLE_ONLINE_CHECKS, TimeUnit.MILLISECONDS);
    }
    Assert.assertTrue("Replication table did not exist", ReplicationTable.isOnline(conn));
    for (int i = 0; i < 5; i++) {
        if (conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ)) {
            break;
        }
        log.info("Could not read replication table, waiting and will retry");
        Thread.sleep(2000);
    }
    Assert.assertTrue("'root' user could not read the replication table", conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ));
    Set<String> replRows = new HashSet<>();
    int attempts = 5;
    while (replRows.isEmpty() && attempts > 0) {
        try (Scanner scanner = ReplicationTable.getScanner(conn)) {
            StatusSection.limit(scanner);
            for (Entry<Key, Value> entry : scanner) {
                Key k = entry.getKey();
                String fileUri = k.getRow().toString();
                try {
                    new URI(fileUri);
                } catch (URISyntaxException e) {
                    Assert.fail("Expected a valid URI: " + fileUri);
                }
                replRows.add(fileUri);
            }
        }
    }
    Set<String> wals = new HashSet<>();
    attempts = 5;
    Instance i = conn.getInstance();
    ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
    while (wals.isEmpty() && attempts > 0) {
        WalStateManager markers = new WalStateManager(i, zk);
        for (Entry<Path, WalState> entry : markers.getAllState().entrySet()) {
            wals.add(entry.getKey().toString());
        }
        attempts--;
    }
    // We only have one file that should need replication (no trace table)
    // We should find an entry in tablet and in the repl row
    Assert.assertEquals("Rows found: " + replRows, 1, replRows.size());
    // There should only be one extra WALog that replication doesn't know about
    replRows.removeAll(wals);
    Assert.assertEquals(2, wals.size());
    Assert.assertEquals(0, replRows.size());
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Instance(org.apache.accumulo.core.client.Instance) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) ZooReaderWriter(org.apache.accumulo.server.zookeeper.ZooReaderWriter) URISyntaxException(java.net.URISyntaxException) URI(java.net.URI) WalStateManager(org.apache.accumulo.server.log.WalStateManager) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) WalState(org.apache.accumulo.server.log.WalStateManager.WalState) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 59 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class ReplicationIT method replicationEntriesPrecludeWalDeletion.

@Test
public void replicationEntriesPrecludeWalDeletion() throws Exception {
    final Connector conn = getConnector();
    String table1 = "table1", table2 = "table2", table3 = "table3";
    final Multimap<String, Table.ID> logs = HashMultimap.create();
    final AtomicBoolean keepRunning = new AtomicBoolean(true);
    Thread t = new Thread(new Runnable() {

        @Override
        public void run() {
            // when that happens
            while (keepRunning.get()) {
                try {
                    logs.putAll(getAllLogs(conn));
                } catch (Exception e) {
                    log.error("Error getting logs", e);
                }
            }
        }
    });
    t.start();
    conn.tableOperations().create(table1);
    conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    Thread.sleep(2000);
    // Write some data to table1
    writeSomeData(conn, table1, 200, 500);
    conn.tableOperations().create(table2);
    conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    Thread.sleep(2000);
    writeSomeData(conn, table2, 200, 500);
    conn.tableOperations().create(table3);
    conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    Thread.sleep(2000);
    writeSomeData(conn, table3, 200, 500);
    // Force a write to metadata for the data written
    for (String table : Arrays.asList(table1, table2, table3)) {
        conn.tableOperations().flush(table, null, null, true);
    }
    keepRunning.set(false);
    t.join(5000);
    // The master is only running every second to create records in the replication table from the metadata table
    // Sleep a sufficient amount of time to ensure that we get the straggling WALs that might have been created at the end
    Thread.sleep(5000);
    Set<String> replFiles = getReferencesToFilesToBeReplicated(conn);
    // We might have a WAL that was use solely for the replication table
    // We want to remove that from our list as it should not appear in the replication table
    String replicationTableId = conn.tableOperations().tableIdMap().get(ReplicationTable.NAME);
    Iterator<Entry<String, Table.ID>> observedLogs = logs.entries().iterator();
    while (observedLogs.hasNext()) {
        Entry<String, Table.ID> observedLog = observedLogs.next();
        if (replicationTableId.equals(observedLog.getValue().canonicalID())) {
            log.info("Removing {} because its tableId is for the replication table", observedLog);
            observedLogs.remove();
        }
    }
    // We should have *some* reference to each log that was seen in the metadata table
    // They might not yet all be closed though (might be newfile)
    Assert.assertTrue("Metadata log distribution: " + logs + "replFiles " + replFiles, logs.keySet().containsAll(replFiles));
    Assert.assertTrue("Difference between replication entries and current logs is bigger than one", logs.keySet().size() - replFiles.size() <= 1);
    final Configuration conf = new Configuration();
    for (String replFile : replFiles) {
        Path p = new Path(replFile);
        FileSystem fs = p.getFileSystem(conf);
        if (!fs.exists(p)) {
            // double-check: the garbage collector can be fast
            Set<String> currentSet = getReferencesToFilesToBeReplicated(conn);
            log.info("Current references {}", currentSet);
            log.info("Looking for reference to {}", replFile);
            log.info("Contains? {}", currentSet.contains(replFile));
            Assert.assertTrue("File does not exist anymore, it was likely incorrectly garbage collected: " + p, !currentSet.contains(replFile));
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Configuration(org.apache.hadoop.conf.Configuration) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) URISyntaxException(java.net.URISyntaxException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) NoSuchElementException(java.util.NoSuchElementException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Entry(java.util.Map.Entry) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) UUID(java.util.UUID) Test(org.junit.Test)

Example 60 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class ReplicationIT method filesClosedAfterUnused.

@Test
public void filesClosedAfterUnused() throws Exception {
    Connector conn = getConnector();
    String table = "table";
    conn.tableOperations().create(table);
    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(table));
    Assert.assertNotNull(tableId);
    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
    conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
    // just sleep
    conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1", ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
    // Write a mutation to make a log file
    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
    Mutation m = new Mutation("one");
    m.put("", "", "");
    bw.addMutation(m);
    bw.close();
    // Write another to make sure the logger rolls itself?
    bw = conn.createBatchWriter(table, new BatchWriterConfig());
    m = new Mutation("three");
    m.put("", "", "");
    bw.addMutation(m);
    bw.close();
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
        s.setRange(TabletsSection.getRange(tableId));
        Set<String> wals = new HashSet<>();
        for (Entry<Key, Value> entry : s) {
            LogEntry logEntry = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
            wals.add(new Path(logEntry.filename).toString());
        }
        log.warn("Found wals {}", wals);
        bw = conn.createBatchWriter(table, new BatchWriterConfig());
        m = new Mutation("three");
        byte[] bytes = new byte[1024 * 1024];
        m.put("1".getBytes(), new byte[0], bytes);
        m.put("2".getBytes(), new byte[0], bytes);
        m.put("3".getBytes(), new byte[0], bytes);
        m.put("4".getBytes(), new byte[0], bytes);
        m.put("5".getBytes(), new byte[0], bytes);
        bw.addMutation(m);
        bw.close();
        conn.tableOperations().flush(table, null, null, true);
        while (!ReplicationTable.isOnline(conn)) {
            sleepUninterruptibly(MILLIS_BETWEEN_REPLICATION_TABLE_ONLINE_CHECKS, TimeUnit.MILLISECONDS);
        }
        for (int i = 0; i < 10; i++) {
            try (Scanner s2 = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
                s2.fetchColumnFamily(LogColumnFamily.NAME);
                s2.setRange(TabletsSection.getRange(tableId));
                for (Entry<Key, Value> entry : s2) {
                    log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
                }
            }
            try (Scanner s3 = ReplicationTable.getScanner(conn)) {
                StatusSection.limit(s3);
                Text buff = new Text();
                boolean allReferencedLogsClosed = true;
                int recordsFound = 0;
                for (Entry<Key, Value> e : s3) {
                    recordsFound++;
                    allReferencedLogsClosed = true;
                    StatusSection.getFile(e.getKey(), buff);
                    String file = buff.toString();
                    if (wals.contains(file)) {
                        Status stat = Status.parseFrom(e.getValue().get());
                        if (!stat.getClosed()) {
                            log.info("{} wasn't closed", file);
                            allReferencedLogsClosed = false;
                        }
                    }
                }
                if (recordsFound > 0 && allReferencedLogsClosed) {
                    return;
                }
                Thread.sleep(2000);
            } catch (RuntimeException e) {
                Throwable cause = e.getCause();
                if (cause instanceof AccumuloSecurityException) {
                    AccumuloSecurityException ase = (AccumuloSecurityException) cause;
                    switch(ase.getSecurityErrorCode()) {
                        case PERMISSION_DENIED:
                            // We tried to read the replication table before the GRANT went through
                            Thread.sleep(2000);
                            break;
                        default:
                            throw e;
                    }
                }
            }
        }
        Assert.fail("We had a file that was referenced but didn't get closed");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Status(org.apache.accumulo.server.replication.proto.Replication.Status) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

Connector (org.apache.accumulo.core.client.Connector)622 Test (org.junit.Test)415 BatchWriter (org.apache.accumulo.core.client.BatchWriter)171 Value (org.apache.accumulo.core.data.Value)162 Text (org.apache.hadoop.io.Text)160 Scanner (org.apache.accumulo.core.client.Scanner)158 Mutation (org.apache.accumulo.core.data.Mutation)152 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)143 Key (org.apache.accumulo.core.data.Key)139 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)101 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)87 AccumuloException (org.apache.accumulo.core.client.AccumuloException)83 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)75 Range (org.apache.accumulo.core.data.Range)74 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)65 Authorizations (org.apache.accumulo.core.security.Authorizations)60 HashSet (java.util.HashSet)57 Instance (org.apache.accumulo.core.client.Instance)55 ArrayList (java.util.ArrayList)53 Entry (java.util.Map.Entry)53