Search in sources :

Example 6 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.

the class PopulateMetadataTable method call.

@Override
public Repo<Manager> call(long tid, Manager manager) throws Exception {
    Path path = new Path(tableInfo.exportFile);
    VolumeManager fs = manager.getVolumeManager();
    try (BatchWriter mbw = manager.getContext().createBatchWriter(MetadataTable.NAME);
        ZipInputStream zis = new ZipInputStream(fs.open(path))) {
        Map<String, String> fileNameMappings = new HashMap<>();
        for (ImportedTableInfo.DirectoryMapping dm : tableInfo.directories) {
            log.info("importDir is " + dm.importDir);
            // mappings are prefixed with the proper volume information, e.g:
            // hdfs://localhost:8020/path/to/accumulo/tables/...
            readMappingFile(fs, tableInfo, dm.importDir, fileNameMappings);
        }
        ZipEntry zipEntry;
        while ((zipEntry = zis.getNextEntry()) != null) {
            if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
                DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
                Key key = new Key();
                Value val = new Value();
                Mutation m = null;
                Text currentRow = null;
                int dirCount = 0;
                while (true) {
                    key.readFields(in);
                    val.readFields(in);
                    Text endRow = KeyExtent.fromMetaRow(key.getRow()).endRow();
                    Text metadataRow = new KeyExtent(tableInfo.tableId, endRow, null).toMetaRow();
                    Text cq;
                    if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                        String oldName = new Path(key.getColumnQualifier().toString()).getName();
                        String newName = fileNameMappings.get(oldName);
                        if (newName == null) {
                            throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "File " + oldName + " does not exist in import dir");
                        }
                        cq = new Text(newName);
                    } else {
                        cq = key.getColumnQualifier();
                    }
                    if (m == null || !currentRow.equals(metadataRow)) {
                        if (m != null) {
                            mbw.addMutation(m);
                        }
                        // Make a unique directory inside the table's dir. Cannot import multiple tables
                        // into one table, so don't need to use unique allocator
                        String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
                        m = new Mutation(metadataRow);
                        ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(tabletDir));
                        currentRow = metadataRow;
                    }
                    m.put(key.getColumnFamily(), cq, val);
                    if (endRow == null && TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
                        mbw.addMutation(m);
                        // its the last column in the last row
                        break;
                    }
                }
                break;
            }
        }
        return new MoveExportedFiles(tableInfo);
    } catch (IOException ioe) {
        log.warn("{}", ioe.getMessage(), ioe);
        throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error reading " + path + " " + ioe.getMessage());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) HashMap(java.util.HashMap) ZipEntry(java.util.zip.ZipEntry) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException) ZipInputStream(java.util.zip.ZipInputStream) BufferedInputStream(java.io.BufferedInputStream) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key)

Example 7 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.

the class ImportPopulateZookeeper method call.

@Override
public Repo<Manager> call(long tid, Manager env) throws Exception {
    // reserve the table name in zookeeper or fail
    Utils.getTableNameLock().lock();
    try {
        // write tableName & tableId to zookeeper
        Utils.checkTableDoesNotExist(env.getContext(), tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
        String namespace = TableNameUtil.qualify(tableInfo.tableName).getFirst();
        NamespaceId namespaceId = Namespaces.getNamespaceId(env.getContext(), namespace);
        env.getTableManager().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName);
        env.getContext().clearTableListCache();
    } finally {
        Utils.getTableNameLock().unlock();
    }
    VolumeManager volMan = env.getVolumeManager();
    for (Entry<String, String> entry : getExportedProps(volMan).entrySet()) if (!TablePropUtil.setTableProperty(env.getContext(), tableInfo.tableId, entry.getKey(), entry.getValue())) {
        throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Invalid table property " + entry.getKey());
    }
    return new CreateImportDir(tableInfo);
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) NamespaceId(org.apache.accumulo.core.data.NamespaceId) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException)

Example 8 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.

the class ImportTable method checkVersions.

@SuppressFBWarnings(value = "OS_OPEN_STREAM", justification = "closing intermediate readers would close the ZipInputStream")
public void checkVersions(Manager env) throws AcceptableThriftTableOperationException {
    Set<String> exportDirs = tableInfo.directories.stream().map(dm -> dm.exportDir).collect(Collectors.toSet());
    log.debug("Searching for export file in {}", exportDirs);
    Integer exportVersion = null;
    Integer dataVersion = null;
    try {
        Path exportFilePath = TableOperationsImpl.findExportFile(env.getContext(), exportDirs);
        tableInfo.exportFile = exportFilePath.toString();
        log.info("Export file is {}", tableInfo.exportFile);
        ZipInputStream zis = new ZipInputStream(env.getVolumeManager().open(exportFilePath));
        ZipEntry zipEntry;
        while ((zipEntry = zis.getNextEntry()) != null) {
            if (zipEntry.getName().equals(Constants.EXPORT_INFO_FILE)) {
                BufferedReader in = new BufferedReader(new InputStreamReader(zis, UTF_8));
                String line = null;
                while ((line = in.readLine()) != null) {
                    String[] sa = line.split(":", 2);
                    if (sa[0].equals(ExportTable.EXPORT_VERSION_PROP)) {
                        exportVersion = Integer.parseInt(sa[1]);
                    } else if (sa[0].equals(ExportTable.DATA_VERSION_PROP)) {
                        dataVersion = Integer.parseInt(sa[1]);
                    }
                }
                break;
            }
        }
    } catch (IOException | AccumuloException e) {
        log.warn("{}", e.getMessage(), e);
        throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Failed to read export metadata " + e.getMessage());
    }
    if (exportVersion == null || exportVersion > ExportTable.VERSION)
        throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible export version " + exportVersion);
    if (dataVersion == null || dataVersion > AccumuloDataVersion.get())
        throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible data version " + dataVersion);
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ExportTable(org.apache.accumulo.manager.tableOps.tableExport.ExportTable) ZipInputStream(java.util.zip.ZipInputStream) LoggerFactory(org.slf4j.LoggerFactory) Repo(org.apache.accumulo.fate.Repo) Manager(org.apache.accumulo.manager.Manager) TableOperationsImpl(org.apache.accumulo.core.clientImpl.TableOperationsImpl) Path(org.apache.hadoop.fs.Path) TableOperationExceptionType(org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType) ZipEntry(java.util.zip.ZipEntry) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException) Logger(org.slf4j.Logger) UTF_8(java.nio.charset.StandardCharsets.UTF_8) Set(java.util.Set) IOException(java.io.IOException) Constants(org.apache.accumulo.core.Constants) InputStreamReader(java.io.InputStreamReader) Collectors(java.util.stream.Collectors) AccumuloException(org.apache.accumulo.core.client.AccumuloException) AccumuloDataVersion(org.apache.accumulo.server.AccumuloDataVersion) TableOperation(org.apache.accumulo.core.clientImpl.thrift.TableOperation) List(java.util.List) Utils(org.apache.accumulo.manager.tableOps.Utils) NamespaceId(org.apache.accumulo.core.data.NamespaceId) BufferedReader(java.io.BufferedReader) Predicate.not(java.util.function.Predicate.not) Collections(java.util.Collections) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) ManagerRepo(org.apache.accumulo.manager.tableOps.ManagerRepo) Path(org.apache.hadoop.fs.Path) AccumuloException(org.apache.accumulo.core.client.AccumuloException) InputStreamReader(java.io.InputStreamReader) ZipEntry(java.util.zip.ZipEntry) IOException(java.io.IOException) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException) ZipInputStream(java.util.zip.ZipInputStream) BufferedReader(java.io.BufferedReader) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings)

Example 9 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.

the class RenameTable method call.

@Override
public Repo<Manager> call(long tid, Manager manager) throws Exception {
    Pair<String, String> qualifiedOldTableName = TableNameUtil.qualify(oldTableName);
    Pair<String, String> qualifiedNewTableName = TableNameUtil.qualify(newTableName);
    // ensure no attempt is made to rename across namespaces
    if (newTableName.contains(".") && !namespaceId.equals(Namespaces.getNamespaceId(manager.getContext(), qualifiedNewTableName.getFirst())))
        throw new AcceptableThriftTableOperationException(tableId.canonical(), oldTableName, TableOperation.RENAME, TableOperationExceptionType.INVALID_NAME, "Namespace in new table name does not match the old table name");
    ZooReaderWriter zoo = manager.getContext().getZooReaderWriter();
    Utils.getTableNameLock().lock();
    try {
        Utils.checkTableDoesNotExist(manager.getContext(), newTableName, tableId, TableOperation.RENAME);
        final String newName = qualifiedNewTableName.getSecond();
        final String oldName = qualifiedOldTableName.getSecond();
        final String tap = manager.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_NAME;
        zoo.mutateExisting(tap, current -> {
            final String currentName = new String(current, UTF_8);
            if (currentName.equals(newName))
                // assume in this case the operation is running again, so we are done
                return null;
            if (!currentName.equals(oldName)) {
                throw new AcceptableThriftTableOperationException(null, oldTableName, TableOperation.RENAME, TableOperationExceptionType.NOTFOUND, "Name changed while processing");
            }
            return newName.getBytes(UTF_8);
        });
        manager.getContext().clearTableListCache();
    } finally {
        Utils.getTableNameLock().unlock();
        Utils.unreserveTable(manager, tableId, tid, true);
        Utils.unreserveNamespace(manager, namespaceId, tid, false);
    }
    LoggerFactory.getLogger(RenameTable.class).debug("Renamed table {} {} {}", tableId, oldTableName, newTableName);
    return null;
}
Also used : ZooReaderWriter(org.apache.accumulo.fate.zookeeper.ZooReaderWriter) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException)

Example 10 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.

the class Utils method getNextId.

public static <T extends AbstractId<T>> T getNextId(String name, ServerContext context, Function<String, T> newIdFunction) throws AcceptableThriftTableOperationException {
    try {
        ZooReaderWriter zoo = context.getZooReaderWriter();
        final String ntp = context.getZooKeeperRoot() + Constants.ZTABLES;
        byte[] nid = zoo.mutateOrCreate(ntp, ZERO_BYTE, currentValue -> {
            BigInteger nextId = new BigInteger(new String(currentValue, UTF_8), Character.MAX_RADIX);
            nextId = nextId.add(BigInteger.ONE);
            return nextId.toString(Character.MAX_RADIX).getBytes(UTF_8);
        });
        return newIdFunction.apply(new String(nid, UTF_8));
    } catch (Exception e1) {
        log.error("Failed to assign id to " + name, e1);
        throw new AcceptableThriftTableOperationException(null, name, TableOperation.CREATE, TableOperationExceptionType.OTHER, e1.getMessage());
    }
}
Also used : ZooReaderWriter(org.apache.accumulo.fate.zookeeper.ZooReaderWriter) BigInteger(java.math.BigInteger) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException)

Aggregations

AcceptableThriftTableOperationException (org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException)19 Path (org.apache.hadoop.fs.Path)10 IOException (java.io.IOException)9 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)7 ZooReaderWriter (org.apache.accumulo.fate.zookeeper.ZooReaderWriter)5 HashMap (java.util.HashMap)4 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)4 FileStatus (org.apache.hadoop.fs.FileStatus)4 TableOperation (org.apache.accumulo.core.clientImpl.thrift.TableOperation)3 TableOperationExceptionType (org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType)3 NamespaceId (org.apache.accumulo.core.data.NamespaceId)3 BufferedReader (java.io.BufferedReader)2 BufferedWriter (java.io.BufferedWriter)2 InputStreamReader (java.io.InputStreamReader)2 OutputStreamWriter (java.io.OutputStreamWriter)2 UTF_8 (java.nio.charset.StandardCharsets.UTF_8)2 List (java.util.List)2 Map (java.util.Map)2 Set (java.util.Set)2 Collectors (java.util.stream.Collectors)2