Search in sources :

Example 6 with Manager

use of org.apache.accumulo.manager.Manager in project accumulo by apache.

the class ShutdownTServerTest method testSingleShutdown.

@Test
public void testSingleShutdown() throws Exception {
    HostAndPort hap = HostAndPort.fromParts("localhost", 1234);
    final TServerInstance tserver = new TServerInstance(hap, "fake");
    final boolean force = false;
    final ShutdownTServer op = new ShutdownTServer(tserver, force);
    final Manager manager = EasyMock.createMock(Manager.class);
    final long tid = 1L;
    final TServerConnection tserverCnxn = EasyMock.createMock(TServerConnection.class);
    final TabletServerStatus status = new TabletServerStatus();
    status.tableMap = new HashMap<>();
    // Put in a table info record, don't care what
    status.tableMap.put("a_table", new TableInfo());
    manager.shutdownTServer(tserver);
    EasyMock.expectLastCall().once();
    EasyMock.expect(manager.onlineTabletServers()).andReturn(Collections.singleton(tserver));
    EasyMock.expect(manager.getConnection(tserver)).andReturn(tserverCnxn);
    EasyMock.expect(tserverCnxn.getTableMap(false)).andReturn(status);
    EasyMock.replay(tserverCnxn, manager);
    // FATE op is not ready
    long wait = op.isReady(tid, manager);
    assertTrue("Expected wait to be greater than 0", wait > 0);
    EasyMock.verify(tserverCnxn, manager);
    // Reset the mocks
    EasyMock.reset(tserverCnxn, manager);
    // reset the table map to the empty set to simulate all tablets unloaded
    status.tableMap = new HashMap<>();
    manager.shutdownTServer(tserver);
    EasyMock.expectLastCall().once();
    EasyMock.expect(manager.onlineTabletServers()).andReturn(Collections.singleton(tserver));
    EasyMock.expect(manager.getConnection(tserver)).andReturn(tserverCnxn);
    EasyMock.expect(tserverCnxn.getTableMap(false)).andReturn(status);
    EasyMock.expect(manager.getManagerLock()).andReturn(null);
    tserverCnxn.halt(null);
    EasyMock.expectLastCall().once();
    EasyMock.replay(tserverCnxn, manager);
    // FATE op is not ready
    wait = op.isReady(tid, manager);
    assertTrue("Expected wait to be 0", wait == 0);
    Repo<Manager> op2 = op.call(tid, manager);
    assertNull("Expected no follow on step", op2);
    EasyMock.verify(tserverCnxn, manager);
}
Also used : TServerConnection(org.apache.accumulo.server.manager.LiveTServerSet.TServerConnection) HostAndPort(org.apache.accumulo.core.util.HostAndPort) TableInfo(org.apache.accumulo.core.master.thrift.TableInfo) Manager(org.apache.accumulo.manager.Manager) TServerInstance(org.apache.accumulo.core.metadata.TServerInstance) ShutdownTServer(org.apache.accumulo.manager.tserverOps.ShutdownTServer) TabletServerStatus(org.apache.accumulo.core.master.thrift.TabletServerStatus) Test(org.junit.Test)

Example 7 with Manager

use of org.apache.accumulo.manager.Manager in project accumulo by apache.

the class ImportTableTest method testCreateImportDir.

@Test
public void testCreateImportDir() throws Exception {
    Manager manager = EasyMock.createMock(Manager.class);
    ServerContext context = EasyMock.createMock(ServerContext.class);
    VolumeManager volumeManager = EasyMock.createMock(VolumeManager.class);
    UniqueNameAllocator uniqueNameAllocator = EasyMock.createMock(UniqueNameAllocator.class);
    String[] expDirs = { "hdfs://nn1:8020/import-dir-nn1", "hdfs://nn2:8020/import-dir-nn2", "hdfs://nn3:8020/import-dir-nn3" };
    String[] tableDirs = { "hdfs://nn1:8020/apps/accumulo1/tables", "hdfs://nn2:8020/applications/accumulo/tables", "hdfs://nn3:8020/applications/accumulo/tables" };
    Set<String> tableDirSet = Set.of(tableDirs);
    String dirName = "abcd";
    EasyMock.expect(manager.getContext()).andReturn(context);
    EasyMock.expect(manager.getVolumeManager()).andReturn(volumeManager).times(3);
    EasyMock.expect(context.getUniqueNameAllocator()).andReturn(uniqueNameAllocator);
    EasyMock.expect(volumeManager.matchingFileSystem(EasyMock.eq(new Path(expDirs[0])), EasyMock.eq(tableDirSet))).andReturn(new Path(tableDirs[0]));
    EasyMock.expect(volumeManager.matchingFileSystem(EasyMock.eq(new Path(expDirs[1])), EasyMock.eq(tableDirSet))).andReturn(new Path(tableDirs[1]));
    EasyMock.expect(volumeManager.matchingFileSystem(EasyMock.eq(new Path(expDirs[2])), EasyMock.eq(tableDirSet))).andReturn(new Path(tableDirs[2]));
    EasyMock.expect(uniqueNameAllocator.getNextName()).andReturn(dirName).times(3);
    ImportedTableInfo ti = new ImportedTableInfo();
    ti.tableId = TableId.of("5b");
    ti.directories = ImportTable.parseExportDir(Set.of(expDirs));
    assertEquals(3, ti.directories.size());
    EasyMock.replay(manager, context, volumeManager, uniqueNameAllocator);
    CreateImportDir ci = new CreateImportDir(ti);
    ci.create(tableDirSet, manager);
    assertEquals(3, ti.directories.size());
    for (ImportedTableInfo.DirectoryMapping dm : ti.directories) {
        assertNotNull(dm.exportDir);
        assertNotNull(dm.importDir);
        assertTrue(dm.importDir.contains(Constants.HDFS_TABLES_DIR));
        assertMatchingFilesystem(dm.exportDir, dm.importDir);
        assertTrue(dm.importDir.contains(ti.tableId.canonical() + "/" + Constants.BULK_PREFIX + dirName));
    }
    EasyMock.verify(manager, context, volumeManager, uniqueNameAllocator);
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) ServerContext(org.apache.accumulo.server.ServerContext) UniqueNameAllocator(org.apache.accumulo.server.tablets.UniqueNameAllocator) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Manager(org.apache.accumulo.manager.Manager) Test(org.junit.Test)

Example 8 with Manager

use of org.apache.accumulo.manager.Manager in project accumulo by apache.

the class ImportTable method checkVersions.

@SuppressFBWarnings(value = "OS_OPEN_STREAM", justification = "closing intermediate readers would close the ZipInputStream")
public void checkVersions(Manager env) throws AcceptableThriftTableOperationException {
    Set<String> exportDirs = tableInfo.directories.stream().map(dm -> dm.exportDir).collect(Collectors.toSet());
    log.debug("Searching for export file in {}", exportDirs);
    Integer exportVersion = null;
    Integer dataVersion = null;
    try {
        Path exportFilePath = TableOperationsImpl.findExportFile(env.getContext(), exportDirs);
        tableInfo.exportFile = exportFilePath.toString();
        log.info("Export file is {}", tableInfo.exportFile);
        ZipInputStream zis = new ZipInputStream(env.getVolumeManager().open(exportFilePath));
        ZipEntry zipEntry;
        while ((zipEntry = zis.getNextEntry()) != null) {
            if (zipEntry.getName().equals(Constants.EXPORT_INFO_FILE)) {
                BufferedReader in = new BufferedReader(new InputStreamReader(zis, UTF_8));
                String line = null;
                while ((line = in.readLine()) != null) {
                    String[] sa = line.split(":", 2);
                    if (sa[0].equals(ExportTable.EXPORT_VERSION_PROP)) {
                        exportVersion = Integer.parseInt(sa[1]);
                    } else if (sa[0].equals(ExportTable.DATA_VERSION_PROP)) {
                        dataVersion = Integer.parseInt(sa[1]);
                    }
                }
                break;
            }
        }
    } catch (IOException | AccumuloException e) {
        log.warn("{}", e.getMessage(), e);
        throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Failed to read export metadata " + e.getMessage());
    }
    if (exportVersion == null || exportVersion > ExportTable.VERSION)
        throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible export version " + exportVersion);
    if (dataVersion == null || dataVersion > AccumuloDataVersion.get())
        throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible data version " + dataVersion);
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ExportTable(org.apache.accumulo.manager.tableOps.tableExport.ExportTable) ZipInputStream(java.util.zip.ZipInputStream) LoggerFactory(org.slf4j.LoggerFactory) Repo(org.apache.accumulo.fate.Repo) Manager(org.apache.accumulo.manager.Manager) TableOperationsImpl(org.apache.accumulo.core.clientImpl.TableOperationsImpl) Path(org.apache.hadoop.fs.Path) TableOperationExceptionType(org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType) ZipEntry(java.util.zip.ZipEntry) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException) Logger(org.slf4j.Logger) UTF_8(java.nio.charset.StandardCharsets.UTF_8) Set(java.util.Set) IOException(java.io.IOException) Constants(org.apache.accumulo.core.Constants) InputStreamReader(java.io.InputStreamReader) Collectors(java.util.stream.Collectors) AccumuloException(org.apache.accumulo.core.client.AccumuloException) AccumuloDataVersion(org.apache.accumulo.server.AccumuloDataVersion) TableOperation(org.apache.accumulo.core.clientImpl.thrift.TableOperation) List(java.util.List) Utils(org.apache.accumulo.manager.tableOps.Utils) NamespaceId(org.apache.accumulo.core.data.NamespaceId) BufferedReader(java.io.BufferedReader) Predicate.not(java.util.function.Predicate.not) Collections(java.util.Collections) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) ManagerRepo(org.apache.accumulo.manager.tableOps.ManagerRepo) Path(org.apache.hadoop.fs.Path) AccumuloException(org.apache.accumulo.core.client.AccumuloException) InputStreamReader(java.io.InputStreamReader) ZipEntry(java.util.zip.ZipEntry) IOException(java.io.IOException) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException) ZipInputStream(java.util.zip.ZipInputStream) BufferedReader(java.io.BufferedReader) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings)

Example 9 with Manager

use of org.apache.accumulo.manager.Manager in project accumulo by apache.

the class CompactionDriverTest method testCancelId.

@Test
public void testCancelId() throws Exception {
    final InstanceId instance = InstanceId.of(UUID.randomUUID());
    final long compactId = 123;
    final long cancelId = 124;
    final NamespaceId namespaceId = NamespaceId.of("13");
    final TableId tableId = TableId.of("42");
    final byte[] startRow = new byte[0];
    final byte[] endRow = new byte[0];
    Manager manager = EasyMock.createNiceMock(Manager.class);
    ServerContext ctx = EasyMock.createNiceMock(ServerContext.class);
    ZooReaderWriter zrw = EasyMock.createNiceMock(ZooReaderWriter.class);
    EasyMock.expect(manager.getInstanceID()).andReturn(instance).anyTimes();
    EasyMock.expect(manager.getContext()).andReturn(ctx);
    EasyMock.expect(ctx.getZooReaderWriter()).andReturn(zrw);
    final String zCancelID = CompactionDriver.createCompactionCancellationPath(instance, tableId);
    EasyMock.expect(zrw.getData(zCancelID)).andReturn(Long.toString(cancelId).getBytes());
    EasyMock.replay(manager, ctx, zrw);
    final CompactionDriver driver = new CompactionDriver(compactId, namespaceId, tableId, startRow, endRow);
    final long tableIdLong = Long.parseLong(tableId.toString());
    var e = assertThrows(AcceptableThriftTableOperationException.class, () -> driver.isReady(tableIdLong, manager));
    assertTrue(e.getTableId().equals(tableId.toString()));
    assertTrue(e.getOp().equals(TableOperation.COMPACT));
    assertTrue(e.getType().equals(TableOperationExceptionType.OTHER));
    assertTrue(e.getDescription().equals(TableOperationsImpl.COMPACTION_CANCELED_MSG));
    EasyMock.verify(manager, ctx, zrw);
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ServerContext(org.apache.accumulo.server.ServerContext) InstanceId(org.apache.accumulo.core.data.InstanceId) ZooReaderWriter(org.apache.accumulo.fate.zookeeper.ZooReaderWriter) NamespaceId(org.apache.accumulo.core.data.NamespaceId) Manager(org.apache.accumulo.manager.Manager) Test(org.junit.Test)

Example 10 with Manager

use of org.apache.accumulo.manager.Manager in project accumulo by apache.

the class MoveExportedFiles method call.

@Override
public Repo<Manager> call(long tid, Manager manager) throws Exception {
    String fmtTid = FateTxId.formatTid(tid);
    int workerCount = manager.getConfiguration().getCount(Property.MANAGER_RENAME_THREADS);
    VolumeManager fs = manager.getVolumeManager();
    Map<Path, Path> oldToNewPaths = new HashMap<>();
    for (ImportedTableInfo.DirectoryMapping dm : tableInfo.directories) {
        Map<String, String> fileNameMappings = new HashMap<>();
        PopulateMetadataTable.readMappingFile(fs, tableInfo, dm.importDir, fileNameMappings);
        FileStatus[] exportedFiles = fs.listStatus(new Path(dm.exportDir));
        FileStatus[] importedFiles = fs.listStatus(new Path(dm.importDir));
        Function<FileStatus, String> fileStatusName = fstat -> fstat.getPath().getName();
        Set<String> importing = Arrays.stream(exportedFiles).map(fileStatusName).map(fileNameMappings::get).collect(Collectors.toSet());
        Set<String> imported = Arrays.stream(importedFiles).map(fileStatusName).collect(Collectors.toSet());
        if (log.isDebugEnabled()) {
            log.debug("{} files already present in imported (target) directory: {}", fmtTid, String.join(",", imported));
        }
        Set<String> missingFiles = Sets.difference(new HashSet<>(fileNameMappings.values()), new HashSet<>(Sets.union(importing, imported)));
        if (!missingFiles.isEmpty()) {
            throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Missing source files corresponding to files " + String.join(",", missingFiles));
        }
        for (FileStatus fileStatus : exportedFiles) {
            Path originalPath = fileStatus.getPath();
            String newName = fileNameMappings.get(originalPath.getName());
            // Need to exclude any other files which may be present in the exported directory
            if (newName != null) {
                Path newPath = new Path(dm.importDir, newName);
                // No try-catch here, as we do not expect any "benign" exceptions. Prior code already
                // accounts for files which were already moved. So anything returned by the rename
                // operation would be truly unexpected
                oldToNewPaths.put(originalPath, newPath);
            } else {
                log.debug("{} not moving (unmapped) file {}", fmtTid, originalPath);
            }
        }
    }
    try {
        fs.bulkRename(oldToNewPaths, workerCount, "importtable rename", fmtTid);
    } catch (IOException ioe) {
        throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), null, TableOperation.IMPORT, TableOperationExceptionType.OTHER, ioe.getCause().getMessage());
    }
    return new FinishImportTable(tableInfo);
}
Also used : Path(org.apache.hadoop.fs.Path) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException) Arrays(java.util.Arrays) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Logger(org.slf4j.Logger) LoggerFactory(org.slf4j.LoggerFactory) Set(java.util.Set) IOException(java.io.IOException) HashMap(java.util.HashMap) FileStatus(org.apache.hadoop.fs.FileStatus) Function(java.util.function.Function) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) Repo(org.apache.accumulo.fate.Repo) HashSet(java.util.HashSet) TableOperation(org.apache.accumulo.core.clientImpl.thrift.TableOperation) Manager(org.apache.accumulo.manager.Manager) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) TableOperationExceptionType(org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType) FateTxId(org.apache.accumulo.fate.FateTxId) Property(org.apache.accumulo.core.conf.Property) ManagerRepo(org.apache.accumulo.manager.tableOps.ManagerRepo) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) IOException(java.io.IOException) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException)

Aggregations

Manager (org.apache.accumulo.manager.Manager)15 ServerContext (org.apache.accumulo.server.ServerContext)10 Test (org.junit.Test)9 TableId (org.apache.accumulo.core.data.TableId)5 ZooReaderWriter (org.apache.accumulo.fate.zookeeper.ZooReaderWriter)5 Path (org.apache.hadoop.fs.Path)5 AcceptableThriftTableOperationException (org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException)4 TableOperation (org.apache.accumulo.core.clientImpl.thrift.TableOperation)4 TableOperationExceptionType (org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType)4 NamespaceId (org.apache.accumulo.core.data.NamespaceId)4 Repo (org.apache.accumulo.fate.Repo)4 ManagerRepo (org.apache.accumulo.manager.tableOps.ManagerRepo)4 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)4 Logger (org.slf4j.Logger)4 LoggerFactory (org.slf4j.LoggerFactory)4 IOException (java.io.IOException)3 Set (java.util.Set)3 Constants (org.apache.accumulo.core.Constants)3 TServerInstance (org.apache.accumulo.core.metadata.TServerInstance)3 UTF_8 (java.nio.charset.StandardCharsets.UTF_8)2