use of org.apache.accumulo.manager.Manager in project accumulo by apache.
the class ShutdownTServerTest method testSingleShutdown.
@Test
public void testSingleShutdown() throws Exception {
HostAndPort hap = HostAndPort.fromParts("localhost", 1234);
final TServerInstance tserver = new TServerInstance(hap, "fake");
final boolean force = false;
final ShutdownTServer op = new ShutdownTServer(tserver, force);
final Manager manager = EasyMock.createMock(Manager.class);
final long tid = 1L;
final TServerConnection tserverCnxn = EasyMock.createMock(TServerConnection.class);
final TabletServerStatus status = new TabletServerStatus();
status.tableMap = new HashMap<>();
// Put in a table info record, don't care what
status.tableMap.put("a_table", new TableInfo());
manager.shutdownTServer(tserver);
EasyMock.expectLastCall().once();
EasyMock.expect(manager.onlineTabletServers()).andReturn(Collections.singleton(tserver));
EasyMock.expect(manager.getConnection(tserver)).andReturn(tserverCnxn);
EasyMock.expect(tserverCnxn.getTableMap(false)).andReturn(status);
EasyMock.replay(tserverCnxn, manager);
// FATE op is not ready
long wait = op.isReady(tid, manager);
assertTrue("Expected wait to be greater than 0", wait > 0);
EasyMock.verify(tserverCnxn, manager);
// Reset the mocks
EasyMock.reset(tserverCnxn, manager);
// reset the table map to the empty set to simulate all tablets unloaded
status.tableMap = new HashMap<>();
manager.shutdownTServer(tserver);
EasyMock.expectLastCall().once();
EasyMock.expect(manager.onlineTabletServers()).andReturn(Collections.singleton(tserver));
EasyMock.expect(manager.getConnection(tserver)).andReturn(tserverCnxn);
EasyMock.expect(tserverCnxn.getTableMap(false)).andReturn(status);
EasyMock.expect(manager.getManagerLock()).andReturn(null);
tserverCnxn.halt(null);
EasyMock.expectLastCall().once();
EasyMock.replay(tserverCnxn, manager);
// FATE op is not ready
wait = op.isReady(tid, manager);
assertTrue("Expected wait to be 0", wait == 0);
Repo<Manager> op2 = op.call(tid, manager);
assertNull("Expected no follow on step", op2);
EasyMock.verify(tserverCnxn, manager);
}
use of org.apache.accumulo.manager.Manager in project accumulo by apache.
the class ImportTableTest method testCreateImportDir.
@Test
public void testCreateImportDir() throws Exception {
Manager manager = EasyMock.createMock(Manager.class);
ServerContext context = EasyMock.createMock(ServerContext.class);
VolumeManager volumeManager = EasyMock.createMock(VolumeManager.class);
UniqueNameAllocator uniqueNameAllocator = EasyMock.createMock(UniqueNameAllocator.class);
String[] expDirs = { "hdfs://nn1:8020/import-dir-nn1", "hdfs://nn2:8020/import-dir-nn2", "hdfs://nn3:8020/import-dir-nn3" };
String[] tableDirs = { "hdfs://nn1:8020/apps/accumulo1/tables", "hdfs://nn2:8020/applications/accumulo/tables", "hdfs://nn3:8020/applications/accumulo/tables" };
Set<String> tableDirSet = Set.of(tableDirs);
String dirName = "abcd";
EasyMock.expect(manager.getContext()).andReturn(context);
EasyMock.expect(manager.getVolumeManager()).andReturn(volumeManager).times(3);
EasyMock.expect(context.getUniqueNameAllocator()).andReturn(uniqueNameAllocator);
EasyMock.expect(volumeManager.matchingFileSystem(EasyMock.eq(new Path(expDirs[0])), EasyMock.eq(tableDirSet))).andReturn(new Path(tableDirs[0]));
EasyMock.expect(volumeManager.matchingFileSystem(EasyMock.eq(new Path(expDirs[1])), EasyMock.eq(tableDirSet))).andReturn(new Path(tableDirs[1]));
EasyMock.expect(volumeManager.matchingFileSystem(EasyMock.eq(new Path(expDirs[2])), EasyMock.eq(tableDirSet))).andReturn(new Path(tableDirs[2]));
EasyMock.expect(uniqueNameAllocator.getNextName()).andReturn(dirName).times(3);
ImportedTableInfo ti = new ImportedTableInfo();
ti.tableId = TableId.of("5b");
ti.directories = ImportTable.parseExportDir(Set.of(expDirs));
assertEquals(3, ti.directories.size());
EasyMock.replay(manager, context, volumeManager, uniqueNameAllocator);
CreateImportDir ci = new CreateImportDir(ti);
ci.create(tableDirSet, manager);
assertEquals(3, ti.directories.size());
for (ImportedTableInfo.DirectoryMapping dm : ti.directories) {
assertNotNull(dm.exportDir);
assertNotNull(dm.importDir);
assertTrue(dm.importDir.contains(Constants.HDFS_TABLES_DIR));
assertMatchingFilesystem(dm.exportDir, dm.importDir);
assertTrue(dm.importDir.contains(ti.tableId.canonical() + "/" + Constants.BULK_PREFIX + dirName));
}
EasyMock.verify(manager, context, volumeManager, uniqueNameAllocator);
}
use of org.apache.accumulo.manager.Manager in project accumulo by apache.
the class ImportTable method checkVersions.
@SuppressFBWarnings(value = "OS_OPEN_STREAM", justification = "closing intermediate readers would close the ZipInputStream")
public void checkVersions(Manager env) throws AcceptableThriftTableOperationException {
Set<String> exportDirs = tableInfo.directories.stream().map(dm -> dm.exportDir).collect(Collectors.toSet());
log.debug("Searching for export file in {}", exportDirs);
Integer exportVersion = null;
Integer dataVersion = null;
try {
Path exportFilePath = TableOperationsImpl.findExportFile(env.getContext(), exportDirs);
tableInfo.exportFile = exportFilePath.toString();
log.info("Export file is {}", tableInfo.exportFile);
ZipInputStream zis = new ZipInputStream(env.getVolumeManager().open(exportFilePath));
ZipEntry zipEntry;
while ((zipEntry = zis.getNextEntry()) != null) {
if (zipEntry.getName().equals(Constants.EXPORT_INFO_FILE)) {
BufferedReader in = new BufferedReader(new InputStreamReader(zis, UTF_8));
String line = null;
while ((line = in.readLine()) != null) {
String[] sa = line.split(":", 2);
if (sa[0].equals(ExportTable.EXPORT_VERSION_PROP)) {
exportVersion = Integer.parseInt(sa[1]);
} else if (sa[0].equals(ExportTable.DATA_VERSION_PROP)) {
dataVersion = Integer.parseInt(sa[1]);
}
}
break;
}
}
} catch (IOException | AccumuloException e) {
log.warn("{}", e.getMessage(), e);
throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Failed to read export metadata " + e.getMessage());
}
if (exportVersion == null || exportVersion > ExportTable.VERSION)
throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible export version " + exportVersion);
if (dataVersion == null || dataVersion > AccumuloDataVersion.get())
throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible data version " + dataVersion);
}
use of org.apache.accumulo.manager.Manager in project accumulo by apache.
the class CompactionDriverTest method testCancelId.
@Test
public void testCancelId() throws Exception {
final InstanceId instance = InstanceId.of(UUID.randomUUID());
final long compactId = 123;
final long cancelId = 124;
final NamespaceId namespaceId = NamespaceId.of("13");
final TableId tableId = TableId.of("42");
final byte[] startRow = new byte[0];
final byte[] endRow = new byte[0];
Manager manager = EasyMock.createNiceMock(Manager.class);
ServerContext ctx = EasyMock.createNiceMock(ServerContext.class);
ZooReaderWriter zrw = EasyMock.createNiceMock(ZooReaderWriter.class);
EasyMock.expect(manager.getInstanceID()).andReturn(instance).anyTimes();
EasyMock.expect(manager.getContext()).andReturn(ctx);
EasyMock.expect(ctx.getZooReaderWriter()).andReturn(zrw);
final String zCancelID = CompactionDriver.createCompactionCancellationPath(instance, tableId);
EasyMock.expect(zrw.getData(zCancelID)).andReturn(Long.toString(cancelId).getBytes());
EasyMock.replay(manager, ctx, zrw);
final CompactionDriver driver = new CompactionDriver(compactId, namespaceId, tableId, startRow, endRow);
final long tableIdLong = Long.parseLong(tableId.toString());
var e = assertThrows(AcceptableThriftTableOperationException.class, () -> driver.isReady(tableIdLong, manager));
assertTrue(e.getTableId().equals(tableId.toString()));
assertTrue(e.getOp().equals(TableOperation.COMPACT));
assertTrue(e.getType().equals(TableOperationExceptionType.OTHER));
assertTrue(e.getDescription().equals(TableOperationsImpl.COMPACTION_CANCELED_MSG));
EasyMock.verify(manager, ctx, zrw);
}
use of org.apache.accumulo.manager.Manager in project accumulo by apache.
the class MoveExportedFiles method call.
@Override
public Repo<Manager> call(long tid, Manager manager) throws Exception {
String fmtTid = FateTxId.formatTid(tid);
int workerCount = manager.getConfiguration().getCount(Property.MANAGER_RENAME_THREADS);
VolumeManager fs = manager.getVolumeManager();
Map<Path, Path> oldToNewPaths = new HashMap<>();
for (ImportedTableInfo.DirectoryMapping dm : tableInfo.directories) {
Map<String, String> fileNameMappings = new HashMap<>();
PopulateMetadataTable.readMappingFile(fs, tableInfo, dm.importDir, fileNameMappings);
FileStatus[] exportedFiles = fs.listStatus(new Path(dm.exportDir));
FileStatus[] importedFiles = fs.listStatus(new Path(dm.importDir));
Function<FileStatus, String> fileStatusName = fstat -> fstat.getPath().getName();
Set<String> importing = Arrays.stream(exportedFiles).map(fileStatusName).map(fileNameMappings::get).collect(Collectors.toSet());
Set<String> imported = Arrays.stream(importedFiles).map(fileStatusName).collect(Collectors.toSet());
if (log.isDebugEnabled()) {
log.debug("{} files already present in imported (target) directory: {}", fmtTid, String.join(",", imported));
}
Set<String> missingFiles = Sets.difference(new HashSet<>(fileNameMappings.values()), new HashSet<>(Sets.union(importing, imported)));
if (!missingFiles.isEmpty()) {
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Missing source files corresponding to files " + String.join(",", missingFiles));
}
for (FileStatus fileStatus : exportedFiles) {
Path originalPath = fileStatus.getPath();
String newName = fileNameMappings.get(originalPath.getName());
// Need to exclude any other files which may be present in the exported directory
if (newName != null) {
Path newPath = new Path(dm.importDir, newName);
// No try-catch here, as we do not expect any "benign" exceptions. Prior code already
// accounts for files which were already moved. So anything returned by the rename
// operation would be truly unexpected
oldToNewPaths.put(originalPath, newPath);
} else {
log.debug("{} not moving (unmapped) file {}", fmtTid, originalPath);
}
}
}
try {
fs.bulkRename(oldToNewPaths, workerCount, "importtable rename", fmtTid);
} catch (IOException ioe) {
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), null, TableOperation.IMPORT, TableOperationExceptionType.OTHER, ioe.getCause().getMessage());
}
return new FinishImportTable(tableInfo);
}
Aggregations