Search in sources :

Example 66 with CompletionException

use of java.util.concurrent.CompletionException in project alluxio by Alluxio.

the class JournalStateMachine method query.

@Override
public CompletableFuture<Message> query(Message request) {
    CompletableFuture<Message> future = new CompletableFuture<>();
    try {
        JournalQueryRequest queryRequest = JournalQueryRequest.parseFrom(request.getContent().asReadOnlyByteBuffer());
        LOG.debug("Received query request: {}", queryRequest);
        // give snapshot manager a chance to handle snapshot related requests
        Message reply = mSnapshotManager.handleRequest(queryRequest);
        if (reply != null) {
            future.complete(reply);
            return future;
        }
        // other type of requests.
        if (queryRequest.hasAddQuorumServerRequest()) {
            AddQuorumServerRequest addRequest = queryRequest.getAddQuorumServerRequest();
            return CompletableFuture.supplyAsync(() -> {
                try {
                    mJournalSystem.addQuorumServer(addRequest.getServerAddress());
                } catch (IOException e) {
                    throw new CompletionException(e);
                }
                return Message.EMPTY;
            });
        }
    } catch (Exception e) {
        LOG.error("failed processing request {}", request, e);
        future.completeExceptionally(e);
        return future;
    }
    return super.query(request);
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) Message(org.apache.ratis.protocol.Message) CompletionException(java.util.concurrent.CompletionException) AddQuorumServerRequest(alluxio.grpc.AddQuorumServerRequest) JournalQueryRequest(alluxio.grpc.JournalQueryRequest) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) FileNotFoundException(java.io.FileNotFoundException) UnavailableException(alluxio.exception.status.UnavailableException) IOException(java.io.IOException)

Example 67 with CompletionException

use of java.util.concurrent.CompletionException in project hbase by apache.

the class TestAsyncTableAdminApi method testCloneTableSchemaWithExistentDestinationTable.

@Test
public void testCloneTableSchemaWithExistentDestinationTable() throws Exception {
    final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
    byte[] FAMILY_0 = Bytes.toBytes("cf0");
    TEST_UTIL.createTable(tableName, FAMILY_0);
    TEST_UTIL.createTable(newTableName, FAMILY_0);
    // test for existent destination table
    try {
        admin.cloneTableSchema(tableName, newTableName, false).join();
        fail("Should have failed when destination table exists.");
    } catch (CompletionException e) {
        assertTrue(e.getCause() instanceof TableExistsException);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) CompletionException(java.util.concurrent.CompletionException) TableExistsException(org.apache.hadoop.hbase.TableExistsException) Test(org.junit.Test)

Example 68 with CompletionException

use of java.util.concurrent.CompletionException in project hbase by apache.

the class TestAsyncTableAdminApi method testCreateTableWithRegions.

@Test
public void testCreateTableWithRegions() throws Exception {
    byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 }, new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } };
    int expectedRegions = splitKeys.length + 1;
    createTableWithDefaultConf(tableName, splitKeys);
    boolean tableAvailable = admin.isTableAvailable(tableName).get();
    assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable);
    AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
    List<HRegionLocation> regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
    Iterator<HRegionLocation> hris = regions.iterator();
    assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size());
    System.err.println("Found " + regions.size() + " regions");
    RegionInfo hri;
    hris = regions.iterator();
    hri = hris.next().getRegion();
    assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0);
    assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[0]));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[0]));
    assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[1]));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[1]));
    assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[2]));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[2]));
    assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[3]));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[3]));
    assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[4]));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[4]));
    assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[5]));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[5]));
    assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[6]));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[6]));
    assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[7]));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[7]));
    assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[8]));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[8]));
    assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0);
    // Now test using start/end with a number of regions
    // Use 80 bit numbers to make sure we aren't limited
    byte[] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
    byte[] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
    // Splitting into 10 regions, we expect (null,1) ... (9, null)
    // with (1,2) (2,3) (3,4) (4,5) (5,6) (6,7) (7,8) (8,9) in the middle
    expectedRegions = 10;
    final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "_2");
    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName2);
    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
    admin.createTable(builder.build(), startKey, endKey, expectedRegions).join();
    regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName2).get();
    assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size());
    System.err.println("Found " + regions.size() + " regions");
    hris = regions.iterator();
    hri = hris.next().getRegion();
    assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0);
    assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }));
    assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }));
    assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }));
    assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }));
    assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }));
    assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }));
    assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }));
    assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }));
    assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }));
    hri = hris.next().getRegion();
    assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }));
    assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0);
    // Try once more with something that divides into something infinite
    startKey = new byte[] { 0, 0, 0, 0, 0, 0 };
    endKey = new byte[] { 1, 0, 0, 0, 0, 0 };
    expectedRegions = 5;
    final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "_3");
    builder = TableDescriptorBuilder.newBuilder(tableName3);
    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
    admin.createTable(builder.build(), startKey, endKey, expectedRegions).join();
    regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3).get();
    assertEquals("Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(), expectedRegions, regions.size());
    System.err.println("Found " + regions.size() + " regions");
    // Try an invalid case where there are duplicate split keys
    splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 }, new byte[] { 2, 2, 2 } };
    final TableName tableName4 = TableName.valueOf(tableName.getNameAsString() + "_4");
    try {
        createTableWithDefaultConf(tableName4, splitKeys);
        fail("Should not be able to create this table because of " + "duplicate split keys");
    } catch (CompletionException e) {
        assertTrue(e.getCause() instanceof IllegalArgumentException);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) CompletionException(java.util.concurrent.CompletionException) Test(org.junit.Test)

Example 69 with CompletionException

use of java.util.concurrent.CompletionException in project hbase by apache.

the class TestAsyncReplicationAdminApi method testAddRemovePeer.

@Test
public void testAddRemovePeer() throws Exception {
    ReplicationPeerConfig rpc1 = ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build();
    ReplicationPeerConfig rpc2 = ReplicationPeerConfig.newBuilder().setClusterKey(KEY_TWO).build();
    // Add a valid peer
    admin.addReplicationPeer(ID_ONE, rpc1).join();
    // try adding the same (fails)
    try {
        admin.addReplicationPeer(ID_ONE, rpc1).join();
        fail("Test case should fail as adding a same peer.");
    } catch (CompletionException e) {
    // OK!
    }
    assertEquals(1, admin.listReplicationPeers().get().size());
    // Try to remove an inexisting peer
    try {
        admin.removeReplicationPeer(ID_TWO).join();
        fail("Test case should fail as removing a inexisting peer.");
    } catch (CompletionException e) {
    // OK!
    }
    assertEquals(1, admin.listReplicationPeers().get().size());
    // Add a second since multi-slave is supported
    admin.addReplicationPeer(ID_TWO, rpc2).join();
    assertEquals(2, admin.listReplicationPeers().get().size());
    // Remove the first peer we added
    admin.removeReplicationPeer(ID_ONE).join();
    assertEquals(1, admin.listReplicationPeers().get().size());
    admin.removeReplicationPeer(ID_TWO).join();
    assertEquals(0, admin.listReplicationPeers().get().size());
}
Also used : ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) CompletionException(java.util.concurrent.CompletionException) Test(org.junit.Test)

Example 70 with CompletionException

use of java.util.concurrent.CompletionException in project hbase by apache.

the class TestAsyncReplicationAdminApi method testNamespacesAndTableCfsConfigConflict.

@Test
public void testNamespacesAndTableCfsConfigConflict() throws Exception {
    String ns1 = "ns1";
    String ns2 = "ns2";
    final TableName tableName1 = TableName.valueOf(ns1 + ":" + tableName.getNameAsString() + "1");
    final TableName tableName2 = TableName.valueOf(ns2 + ":" + tableName.getNameAsString() + "2");
    ReplicationPeerConfigBuilder rpcBuilder = ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE);
    admin.addReplicationPeer(ID_ONE, rpcBuilder.build()).join();
    rpcBuilder.setReplicateAllUserTables(false);
    admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join();
    Set<String> namespaces = new HashSet<String>();
    namespaces.add(ns1);
    rpcBuilder.setNamespaces(namespaces);
    admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).get();
    Map<TableName, List<String>> tableCfs = new HashMap<>();
    tableCfs.put(tableName1, new ArrayList<>());
    rpcBuilder.setTableCFsMap(tableCfs);
    try {
        admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join();
        fail("Test case should fail, because table " + tableName1 + " conflict with namespace " + ns1);
    } catch (CompletionException e) {
    // OK
    }
    tableCfs.clear();
    tableCfs.put(tableName2, new ArrayList<>());
    rpcBuilder.setTableCFsMap(tableCfs);
    admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).get();
    namespaces.clear();
    namespaces.add(ns2);
    rpcBuilder.setNamespaces(namespaces);
    try {
        admin.updateReplicationPeerConfig(ID_ONE, rpcBuilder.build()).join();
        fail("Test case should fail, because namespace " + ns2 + " conflict with table " + tableName2);
    } catch (CompletionException e) {
    // OK
    }
    admin.removeReplicationPeer(ID_ONE).join();
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ReplicationPeerConfigBuilder(org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder) HashMap(java.util.HashMap) CompletionException(java.util.concurrent.CompletionException) ArrayList(java.util.ArrayList) List(java.util.List) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

CompletionException (java.util.concurrent.CompletionException)199 Test (org.junit.Test)80 CompletableFuture (java.util.concurrent.CompletableFuture)62 List (java.util.List)52 ArrayList (java.util.ArrayList)51 IOException (java.io.IOException)45 Map (java.util.Map)39 Collection (java.util.Collection)31 ExecutionException (java.util.concurrent.ExecutionException)31 HashMap (java.util.HashMap)30 Collections (java.util.Collections)24 TimeUnit (java.util.concurrent.TimeUnit)22 Collectors (java.util.stream.Collectors)22 FlinkException (org.apache.flink.util.FlinkException)22 Before (org.junit.Before)21 Duration (java.time.Duration)19 Arrays (java.util.Arrays)19 BeforeClass (org.junit.BeforeClass)19 ExecutorService (java.util.concurrent.ExecutorService)18 Nonnull (javax.annotation.Nonnull)17