Search in sources :

Example 16 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.

the class WriteExportFiles method call.

@Override
public Repo<Manager> call(long tid, Manager manager) throws Exception {
    try {
        exportTable(manager.getVolumeManager(), manager.getContext(), tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
    } catch (IOException ioe) {
        throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonical(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Failed to create export files " + ioe.getMessage());
    }
    Utils.unreserveNamespace(manager, tableInfo.namespaceID, tid, false);
    Utils.unreserveTable(manager, tableInfo.tableID, tid, false);
    Utils.unreserveHdfsDirectory(manager, new Path(tableInfo.exportDir).toString(), tid);
    return null;
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException)

Example 17 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.

the class WriteExportFiles method isReady.

@Override
public long isReady(long tid, Manager manager) throws Exception {
    long reserved = Utils.reserveNamespace(manager, tableInfo.namespaceID, tid, false, true, TableOperation.EXPORT) + Utils.reserveTable(manager, tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
    if (reserved > 0)
        return reserved;
    AccumuloClient client = manager.getContext();
    checkOffline(manager.getContext());
    Scanner metaScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    metaScanner.setRange(new KeyExtent(tableInfo.tableID, null, null).toMetaRange());
    // scan for locations
    metaScanner.fetchColumnFamily(CurrentLocationColumnFamily.NAME);
    metaScanner.fetchColumnFamily(FutureLocationColumnFamily.NAME);
    if (metaScanner.iterator().hasNext()) {
        return 500;
    }
    // use the same range to check for walogs that we used to check for hosted (or future hosted)
    // tablets
    // this is done as a separate scan after we check for locations, because walogs are okay only if
    // there is no location
    metaScanner.clearColumns();
    metaScanner.fetchColumnFamily(LogColumnFamily.NAME);
    if (metaScanner.iterator().hasNext()) {
        throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonical(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Write ahead logs found for table");
    }
    return 0;
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Scanner(org.apache.accumulo.core.client.Scanner) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException)

Example 18 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.

the class TableRangeOp method call.

@Override
public Repo<Manager> call(long tid, Manager env) throws Exception {
    if (RootTable.ID.equals(tableId) && Operation.MERGE.equals(op)) {
        log.warn("Attempt to merge tablets for {} does nothing. It is not splittable.", RootTable.NAME);
    }
    Text start = startRow.length == 0 ? null : new Text(startRow);
    Text end = endRow.length == 0 ? null : new Text(endRow);
    if (start != null && end != null)
        if (start.compareTo(end) >= 0)
            throw new AcceptableThriftTableOperationException(tableId.canonical(), null, TableOperation.MERGE, TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");
    env.mustBeOnline(tableId);
    MergeInfo info = env.getMergeInfo(tableId);
    if (info.getState() == MergeState.NONE) {
        KeyExtent range = new KeyExtent(tableId, end, start);
        env.setMergeState(new MergeInfo(range, op), MergeState.STARTED);
    }
    return new TableRangeOpWait(namespaceId, tableId);
}
Also used : MergeInfo(org.apache.accumulo.server.manager.state.MergeInfo) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException)

Example 19 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.

the class MapImportFileNames method call.

@Override
public Repo<Manager> call(long tid, Manager environment) throws Exception {
    for (ImportedTableInfo.DirectoryMapping dm : tableInfo.directories) {
        Path path = new Path(dm.importDir, "mappings.txt");
        BufferedWriter mappingsWriter = null;
        try {
            VolumeManager fs = environment.getVolumeManager();
            fs.mkdirs(new Path(dm.importDir));
            FileStatus[] files = fs.listStatus(new Path(dm.exportDir));
            UniqueNameAllocator namer = environment.getContext().getUniqueNameAllocator();
            mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
            for (FileStatus fileStatus : files) {
                String fileName = fileStatus.getPath().getName();
                log.info("filename " + fileStatus.getPath());
                String[] sa = fileName.split("\\.");
                String extension = "";
                if (sa.length > 1) {
                    extension = sa[sa.length - 1];
                    if (!FileOperations.getValidExtensions().contains(extension)) {
                        continue;
                    }
                } else {
                    // assume it is a map file
                    extension = Constants.MAPFILE_EXTENSION;
                }
                String newName = "I" + namer.getNextName() + "." + extension;
                mappingsWriter.append(fileName);
                mappingsWriter.append(':');
                mappingsWriter.append(newName);
                mappingsWriter.newLine();
            }
            mappingsWriter.close();
            mappingsWriter = null;
        } catch (IOException ioe) {
            log.warn("{}", ioe.getMessage(), ioe);
            throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error writing mapping file " + path + " " + ioe.getMessage());
        } finally {
            if (mappingsWriter != null)
                try {
                    mappingsWriter.close();
                } catch (IOException ioe) {
                    log.warn("Failed to close " + path, ioe);
                }
        }
    }
    return new PopulateMetadataTable(tableInfo);
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) FileStatus(org.apache.hadoop.fs.FileStatus) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) AcceptableThriftTableOperationException(org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException) UniqueNameAllocator(org.apache.accumulo.server.tablets.UniqueNameAllocator) OutputStreamWriter(java.io.OutputStreamWriter)

Aggregations

AcceptableThriftTableOperationException (org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException)19 Path (org.apache.hadoop.fs.Path)10 IOException (java.io.IOException)9 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)7 ZooReaderWriter (org.apache.accumulo.fate.zookeeper.ZooReaderWriter)5 HashMap (java.util.HashMap)4 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)4 FileStatus (org.apache.hadoop.fs.FileStatus)4 TableOperation (org.apache.accumulo.core.clientImpl.thrift.TableOperation)3 TableOperationExceptionType (org.apache.accumulo.core.clientImpl.thrift.TableOperationExceptionType)3 NamespaceId (org.apache.accumulo.core.data.NamespaceId)3 BufferedReader (java.io.BufferedReader)2 BufferedWriter (java.io.BufferedWriter)2 InputStreamReader (java.io.InputStreamReader)2 OutputStreamWriter (java.io.OutputStreamWriter)2 UTF_8 (java.nio.charset.StandardCharsets.UTF_8)2 List (java.util.List)2 Map (java.util.Map)2 Set (java.util.Set)2 Collectors (java.util.stream.Collectors)2