use of herddb.client.HDBClient in project herddb by diennea.
the class BackupRestoreTest method test_backup_restore_with_deletes.
/**
* Check that restore a dirty delete doesn't revive a record (it is: a phantom deleted record on a dirty page)
*/
@Test
public void test_backup_restore_with_deletes() throws Exception {
ServerConfiguration serverconfig_1 = newServerConfigurationWithAutoPort(folder.newFolder().toPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_NODEID, "server1");
serverconfig_1.set(ServerConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
serverconfig_1.set(ServerConfiguration.PROPERTY_ENFORCE_LEADERSHIP, false);
/* Disable page compaction (avoid compaction of dirty page) */
serverconfig_1.set(ServerConfiguration.PROPERTY_FILL_PAGE_THRESHOLD, 0.0D);
ServerConfiguration serverconfig_2 = serverconfig_1.copy().set(ServerConfiguration.PROPERTY_NODEID, "server2").set(ServerConfiguration.PROPERTY_BASEDIR, folder.newFolder().toPath().toAbsolutePath());
ClientConfiguration client_configuration = new ClientConfiguration(folder.newFolder().toPath());
client_configuration.set(ClientConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
client_configuration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
client_configuration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
client_configuration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
Table table = Table.builder().name("t1").column("c", ColumnTypes.INTEGER).column("d", ColumnTypes.INTEGER).primaryKey("c").build();
DBManager manager = server_1.getManager();
manager.executeStatement(new CreateTableStatement(table), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
manager.executeUpdate(new InsertStatement(TableSpace.DEFAULT, table.name, RecordSerializer.makeRecord(table, "c", 1, "d", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
manager.executeUpdate(new InsertStatement(TableSpace.DEFAULT, table.name, RecordSerializer.makeRecord(table, "c", 2, "d", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
manager.executeUpdate(new InsertStatement(TableSpace.DEFAULT, table.name, RecordSerializer.makeRecord(table, "c", 3, "d", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
manager.executeUpdate(new InsertStatement(TableSpace.DEFAULT, table.name, RecordSerializer.makeRecord(table, "c", 4, "d", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
manager.checkpoint();
/* Check that new data isn't in a dirty page */
assertEquals(0, manager.getTableSpaceManager(TableSpace.DEFAULT).getTableManager(table.name).getStats().getDirtypages());
manager.executeUpdate(new DeleteStatement(TableSpace.DEFAULT, table.name, Bytes.from_int(1), null), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
manager.executeUpdate(new InsertStatement(TableSpace.DEFAULT, table.name, RecordSerializer.makeRecord(table, "c", 5, "d", 2)), StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), TransactionContext.NO_TRANSACTION);
/* Now we have a dirty page in the checkpoint */
manager.checkpoint();
/* Check that a dirty page exists */
assertEquals(1, manager.getTableSpaceManager(TableSpace.DEFAULT).getTableManager(table.name).getStats().getDirtypages());
try (Server server_2 = new Server(serverconfig_2)) {
server_2.start();
try (HDBClient client = new HDBClient(client_configuration);
HDBConnection connection = client.openConnection()) {
assertEquals(4, connection.executeScan(TableSpace.DEFAULT, "SELECT * FROM t1", true, Collections.emptyList(), 0, 0, 10, true).consume().size());
ByteArrayOutputStream oo = new ByteArrayOutputStream();
BackupUtils.dumpTableSpace(TableSpace.DEFAULT, 64 * 1024, connection, oo, new ProgressListener() {
});
byte[] backupData = oo.toByteArray();
connection.executeUpdate(TableSpace.DEFAULT, "DELETE FROM t1", 0, false, true, Collections.emptyList());
assertEquals(0, connection.executeScan(TableSpace.DEFAULT, "SELECT * FROM t1", true, Collections.emptyList(), 0, 0, 10, true).consume().size());
BackupUtils.restoreTableSpace("newts", server_2.getNodeId(), connection, new ByteArrayInputStream(backupData), new ProgressListener() {
});
/* No new insert AND no delete... if 5 it did see the new insert but missed the delete! */
assertEquals(4, connection.executeScan("newts", "SELECT * FROM newts.t1", true, Collections.emptyList(), 0, 0, 10, true).consume().size());
}
}
}
}
use of herddb.client.HDBClient in project herddb by diennea.
the class ServerWithDynamicPortTest method test.
@Test
public void test() throws Exception {
TestStatsProvider statsProvider = new TestStatsProvider();
ServerConfiguration serverconfig_1 = newServerConfigurationWithAutoPort(folder.newFolder().toPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_NODEID, "server1");
serverconfig_1.set(ServerConfiguration.PROPERTY_PORT, 7867);
serverconfig_1.set(ServerConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.set(ClientConfiguration.PROPERTY_MODE, ClientConfiguration.PROPERTY_MODE_CLUSTER);
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
StatsLogger logger = statsProvider.getStatsLogger("ds");
try (HDBClient client1 = new HDBClient(clientConfiguration, logger)) {
try (HDBConnection connection = client1.openConnection()) {
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
// create table and insert data
connection.executeUpdate(TableSpace.DEFAULT, "CREATE TABLE t1(k1 int primary key, n1 int)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO t1(k1,n1) values(1,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
}
// change port
serverconfig_1.set(ServerConfiguration.PROPERTY_PORT, 7868);
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO t1(k1,n1) values(2,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO t1(k1,n1) values(3,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
}
}
}
}
use of herddb.client.HDBClient in project herddb by diennea.
the class RetryOnLeaderChangedTest method test.
@Test
public void test() throws Exception {
TestStatsProvider statsProvider = new TestStatsProvider();
ServerConfiguration serverconfig_1 = newServerConfigurationWithAutoPort(folder.newFolder().toPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_NODEID, "server1");
serverconfig_1.set(ServerConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_CLUSTER);
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
ServerConfiguration serverconfig_2 = serverconfig_1.copy().set(ServerConfiguration.PROPERTY_NODEID, "server2").set(ServerConfiguration.PROPERTY_BASEDIR, folder.newFolder().toPath().toAbsolutePath());
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
try (Server server_2 = new Server(serverconfig_2)) {
server_2.start();
TestUtils.execute(server_1.getManager(), "CREATE TABLESPACE 'ttt','leader:" + server_2.getNodeId() + "','expectedreplicacount:2'", Collections.emptyList());
// wait for server_2 to wake up
for (int i = 0; i < 40; i++) {
TableSpaceManager tableSpaceManager2 = server_2.getManager().getTableSpaceManager("ttt");
if (tableSpaceManager2 != null && tableSpaceManager2.isLeader()) {
break;
}
Thread.sleep(500);
}
assertTrue(server_2.getManager().getTableSpaceManager("ttt") != null && server_2.getManager().getTableSpaceManager("ttt").isLeader());
// wait for server_1 to announce as follower
waitClusterStatus(server_1.getManager(), server_1.getNodeId(), "follower");
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.set(ClientConfiguration.PROPERTY_MODE, ClientConfiguration.PROPERTY_MODE_CLUSTER);
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
StatsLogger logger = statsProvider.getStatsLogger("ds");
try (HDBClient client1 = new HDBClient(clientConfiguration, logger)) {
try (HDBConnection connection = client1.openConnection()) {
// create table and insert data
connection.executeUpdate(TableSpace.DEFAULT, "CREATE TABLE ttt.t1(k1 int primary key, n1 int)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(1,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(2,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(3,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
// use client2, so that it opens a connection to current leader
try (ScanResultSet scan = connection.executeScan(TableSpace.DEFAULT, "SELECT * FROM ttt.t1", false, Collections.emptyList(), TransactionContext.NOTRANSACTION_ID, 0, 0, true)) {
assertEquals(3, scan.consume().size());
}
// change leader
switchLeader(server_1.getNodeId(), server_2.getNodeId(), server_1.getManager());
// perform operation
try (ScanResultSet scan = connection.executeScan(TableSpace.DEFAULT, "SELECT * FROM ttt.t1", false, Collections.emptyList(), TransactionContext.NOTRANSACTION_ID, 0, 0, true)) {
assertEquals(3, scan.consume().size());
}
// check the client handled "not leader error"
assertEquals(1, logger.scope("hdbclient").getCounter("leaderChangedErrors").get().intValue());
// change leader
switchLeader(server_2.getNodeId(), server_1.getNodeId(), server_1.getManager());
// perform operation
GetResult get = connection.executeGet(TableSpace.DEFAULT, "SELECT * FROM ttt.t1 where k1=1", TransactionContext.NOTRANSACTION_ID, false, Collections.emptyList());
assertTrue(get.isFound());
// check the client handled "not leader error"
assertEquals(2, logger.scope("hdbclient").getCounter("leaderChangedErrors").get().intValue());
// change leader
switchLeader(server_1.getNodeId(), server_2.getNodeId(), server_1.getManager());
// perform operation
assertEquals(1, connection.executeUpdate(TableSpace.DEFAULT, "UPDATE ttt.t1 set n1=3 where k1=1", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList()).updateCount);
// check the client handled "not leader error"
assertEquals(3, logger.scope("hdbclient").getCounter("leaderChangedErrors").get().intValue());
// change leader
switchLeader(server_2.getNodeId(), server_1.getNodeId(), server_1.getManager());
// perform operation
assertEquals(1, connection.executeUpdateAsync(TableSpace.DEFAULT, "UPDATE ttt.t1 set n1=3 where k1=1", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList()).get().updateCount);
// check the client handled "not leader error"
assertEquals(4, logger.scope("hdbclient").getCounter("leaderChangedErrors").get().intValue());
// change leader
switchLeader(server_1.getNodeId(), server_2.getNodeId(), server_1.getManager());
// perform operation
assertEquals(1, connection.executeUpdates(TableSpace.DEFAULT, "UPDATE ttt.t1 set n1=3 where k1=1", TransactionContext.NOTRANSACTION_ID, false, false, Arrays.asList(Collections.emptyList())).get(0).updateCount);
// check the client handled "not leader error"
assertEquals(5, logger.scope("hdbclient").getCounter("leaderChangedErrors").get().intValue());
// change leader
switchLeader(server_2.getNodeId(), server_1.getNodeId(), server_1.getManager());
// perform operation
assertEquals(1, connection.executeUpdatesAsync(TableSpace.DEFAULT, "UPDATE ttt.t1 set n1=3 where k1=1", TransactionContext.NOTRANSACTION_ID, false, false, Arrays.asList(Collections.emptyList())).get().get(0).updateCount);
// check the client handled "not leader error"
assertEquals(6, logger.scope("hdbclient").getCounter("leaderChangedErrors").get().intValue());
}
}
}
}
}
use of herddb.client.HDBClient in project herddb by diennea.
the class ExpectedReplicaCountTest method testDisklessClusterReplication.
@Test
public void testDisklessClusterReplication() throws Exception {
TestStatsProvider statsProvider = new TestStatsProvider();
ServerConfiguration serverconfig_1 = newServerConfigurationWithAutoPort(folder.newFolder().toPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_NODEID, "server1");
serverconfig_1.set(ServerConfiguration.PROPERTY_MODE, ServerConfiguration.PROPERTY_MODE_DISKLESSCLUSTER);
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
serverconfig_1.set(ServerConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
try (Server server_1 = new Server(serverconfig_1)) {
server_1.start();
server_1.waitForStandaloneBoot();
TestUtils.execute(server_1.getManager(), "CREATE TABLESPACE 'ttt','leader:" + server_1.getNodeId() + "','expectedreplicacount:2'", Collections.emptyList());
// perform some writes
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.set(ClientConfiguration.PROPERTY_MODE, ClientConfiguration.PROPERTY_MODE_CLUSTER);
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_ADDRESS, testEnv.getAddress());
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_PATH, testEnv.getPath());
clientConfiguration.set(ClientConfiguration.PROPERTY_ZOOKEEPER_SESSIONTIMEOUT, testEnv.getTimeout());
StatsLogger logger = statsProvider.getStatsLogger("ds");
try (HDBClient client1 = new HDBClient(clientConfiguration, logger)) {
try (HDBConnection connection = client1.openConnection()) {
// create table and insert data
connection.executeUpdate(TableSpace.DEFAULT, "CREATE TABLE ttt.t1(k1 int primary key, n1 int)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(1,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(2,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(3,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
// flush data pages to BK
server_1.getManager().checkpoint();
Set<Long> initialLedgers = new HashSet<>();
// verify that every ledger has ensemble size 2
try (BookKeeper bk = createBookKeeper()) {
BookKeeperAdmin admin = new BookKeeperAdmin(bk);
for (long lId : admin.listLedgers()) {
LedgerMetadata md = bk.getLedgerManager().readLedgerMetadata(lId).get().getValue();
if ("ttt".equals(new String(md.getCustomMetadata().get("tablespaceuuid"), StandardCharsets.UTF_8))) {
assertEquals(2, md.getEnsembleSize());
assertEquals(2, md.getWriteQuorumSize());
assertEquals(2, md.getAckQuorumSize());
initialLedgers.add(lId);
}
}
}
BookkeeperCommitLog log = (BookkeeperCommitLog) server_1.getManager().getTableSpaceManager("ttt").getLog();
final long currentLedgerId = log.getWriter().getLedgerId();
// downsize to expectedreplicacount = 1
TestUtils.execute(server_1.getManager(), "ALTER TABLESPACE 'ttt','leader:" + server_1.getNodeId() + "','expectedreplicacount:1'", Collections.emptyList());
// the TableSpaceManager will roll a new ledger
herddb.utils.TestUtils.waitForCondition(() -> {
if (log.getWriter() == null) {
return false;
}
long newLedgerId = log.getWriter().getLedgerId();
return newLedgerId != currentLedgerId;
}, herddb.utils.TestUtils.NOOP, 100);
// write some other record
connection.executeUpdate(TableSpace.DEFAULT, "INSERT INTO ttt.t1(k1,n1) values(4,1)", TransactionContext.NOTRANSACTION_ID, false, false, Collections.emptyList());
// flush data pages
server_1.getManager().checkpoint();
// verify that every ledger has ensemble size 2 or 1
try (BookKeeper bk = createBookKeeper()) {
BookKeeperAdmin admin = new BookKeeperAdmin(bk);
for (long lId : admin.listLedgers()) {
LedgerMetadata md = bk.getLedgerManager().readLedgerMetadata(lId).get().getValue();
if ("ttt".equals(new String(md.getCustomMetadata().get("tablespaceuuid"), StandardCharsets.UTF_8))) {
if (initialLedgers.contains(lId)) {
assertEquals(2, md.getEnsembleSize());
assertEquals(2, md.getWriteQuorumSize());
assertEquals(2, md.getAckQuorumSize());
} else {
assertEquals(1, md.getEnsembleSize());
assertEquals(1, md.getWriteQuorumSize());
assertEquals(1, md.getAckQuorumSize());
}
}
}
}
}
}
}
}
use of herddb.client.HDBClient in project herddb by diennea.
the class TableSpaceManager method downloadTableSpaceData.
private void downloadTableSpaceData() throws MetadataStorageManagerException, DataStorageManagerException, LogNotAvailableException {
TableSpace tableSpaceData = metadataStorageManager.describeTableSpace(tableSpaceName);
String leaderId = tableSpaceData.leaderId;
if (this.nodeId.equals(leaderId)) {
throw new DataStorageManagerException("cannot download data of tableSpace " + tableSpaceName + " from myself");
}
Optional<NodeMetadata> leaderAddress = metadataStorageManager.listNodes().stream().filter(n -> n.nodeId.equals(leaderId)).findAny();
if (!leaderAddress.isPresent()) {
throw new DataStorageManagerException("cannot download data of tableSpace " + tableSpaceName + " from leader " + leaderId + ", no metadata found");
}
// ensure we do not have any data on disk and in memory
actualLogSequenceNumber = LogSequenceNumber.START_OF_TIME;
newTransactionId.set(0);
LOGGER.log(Level.INFO, "tablespace " + tableSpaceName + " at downloadTableSpaceData " + tables + ", " + indexes + ", " + transactions);
for (AbstractTableManager manager : tables.values()) {
// and all indexes
if (!manager.isSystemTable()) {
manager.dropTableData();
}
manager.close();
}
tables.clear();
// this map should be empty
for (AbstractIndexManager manager : indexes.values()) {
manager.dropIndexData();
manager.close();
}
indexes.clear();
transactions.clear();
dataStorageManager.eraseTablespaceData(tableSpaceUUID);
NodeMetadata nodeData = leaderAddress.get();
ClientConfiguration clientConfiguration = new ClientConfiguration(dbmanager.getTmpDirectory());
clientConfiguration.set(ClientConfiguration.PROPERTY_CLIENT_USERNAME, dbmanager.getServerToServerUsername());
clientConfiguration.set(ClientConfiguration.PROPERTY_CLIENT_PASSWORD, dbmanager.getServerToServerPassword());
// always use network, we want to run tests with this case
clientConfiguration.set(ClientConfiguration.PROPERTY_CLIENT_CONNECT_LOCALVM_SERVER, false);
try (HDBClient client = new HDBClient(clientConfiguration)) {
client.setClientSideMetadataProvider(new ClientSideMetadataProvider() {
@Override
public String getTableSpaceLeader(String tableSpace) throws ClientSideMetadataProviderException {
return leaderId;
}
@Override
public ServerHostData getServerHostData(String nodeId) throws ClientSideMetadataProviderException {
return new ServerHostData(nodeData.host, nodeData.port, "?", nodeData.ssl, Collections.emptyMap());
}
});
try (HDBConnection con = client.openConnection()) {
ReplicaFullTableDataDumpReceiver receiver = new ReplicaFullTableDataDumpReceiver(this);
int fetchSize = 10000;
con.dumpTableSpace(tableSpaceName, receiver, fetchSize, false);
receiver.getLatch().get(1, TimeUnit.HOURS);
this.actualLogSequenceNumber = receiver.logSequenceNumber;
LOGGER.log(Level.INFO, tableSpaceName + " After download local actualLogSequenceNumber is " + actualLogSequenceNumber);
} catch (ClientSideMetadataProviderException | HDBException | InterruptedException | ExecutionException | TimeoutException internalError) {
LOGGER.log(Level.SEVERE, tableSpaceName + " error downloading snapshot", internalError);
throw new DataStorageManagerException(internalError);
}
}
}
Aggregations