Search in sources :

Example 1 with UniqueNameAllocator

use of org.apache.accumulo.server.tablets.UniqueNameAllocator in project accumulo by apache.

the class Tablet method createTabletDirectory.

private static String createTabletDirectory(VolumeManager fs, Table.ID tableId, Text endRow) {
    String lowDirectory;
    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
    VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableId);
    String volume = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR;
    while (true) {
        try {
            if (endRow == null) {
                lowDirectory = Constants.DEFAULT_TABLET_LOCATION;
                Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
                if (fs.exists(lowDirectoryPath) || fs.mkdirs(lowDirectoryPath)) {
                    FileSystem pathFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
                    return lowDirectoryPath.makeQualified(pathFs.getUri(), pathFs.getWorkingDirectory()).toString();
                }
                log.warn("Failed to create {} for unknown reason", lowDirectoryPath);
            } else {
                lowDirectory = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
                Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
                if (fs.exists(lowDirectoryPath))
                    throw new IllegalStateException("Dir exist when it should not " + lowDirectoryPath);
                if (fs.mkdirs(lowDirectoryPath)) {
                    FileSystem lowDirectoryFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
                    return lowDirectoryPath.makeQualified(lowDirectoryFs.getUri(), lowDirectoryFs.getWorkingDirectory()).toString();
                }
            }
        } catch (IOException e) {
            log.warn("{}", e.getMessage(), e);
        }
        log.warn("Failed to create dir for tablet in table {} in volume {} will retry ...", tableId, volume);
        sleepUninterruptibly(3, TimeUnit.SECONDS);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeChooserEnvironment(org.apache.accumulo.server.fs.VolumeChooserEnvironment) UniqueNameAllocator(org.apache.accumulo.server.tablets.UniqueNameAllocator) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException)

Example 2 with UniqueNameAllocator

use of org.apache.accumulo.server.tablets.UniqueNameAllocator in project accumulo by apache.

the class CreateImportDir method call.

@Override
public Repo<Master> call(long tid, Master master) throws Exception {
    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
    Path exportDir = new Path(tableInfo.exportDir);
    String[] tableDirs = ServerConstants.getTablesDirs();
    log.info("Looking for matching filesystem for " + exportDir + " from options " + Arrays.toString(tableDirs));
    Path base = master.getFileSystem().matchingFileSystem(exportDir, tableDirs);
    if (base == null) {
        throw new IOException(tableInfo.exportDir + " is not in a volume configured for Accumulo");
    }
    log.info("Chose base table directory of " + base);
    Path directory = new Path(base, tableInfo.tableId.canonicalID());
    Path newBulkDir = new Path(directory, Constants.BULK_PREFIX + namer.getNextName());
    tableInfo.importDir = newBulkDir.toString();
    log.info("Using import dir: " + tableInfo.importDir);
    return new MapImportFileNames(tableInfo);
}
Also used : Path(org.apache.hadoop.fs.Path) UniqueNameAllocator(org.apache.accumulo.server.tablets.UniqueNameAllocator) IOException(java.io.IOException)

Example 3 with UniqueNameAllocator

use of org.apache.accumulo.server.tablets.UniqueNameAllocator in project accumulo by apache.

the class MapImportFileNames method call.

@Override
public Repo<Master> call(long tid, Master environment) throws Exception {
    Path path = new Path(tableInfo.importDir, "mappings.txt");
    BufferedWriter mappingsWriter = null;
    try {
        VolumeManager fs = environment.getFileSystem();
        fs.mkdirs(new Path(tableInfo.importDir));
        FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
        UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
        mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
        for (FileStatus fileStatus : files) {
            String fileName = fileStatus.getPath().getName();
            log.info("filename " + fileStatus.getPath().toString());
            String[] sa = fileName.split("\\.");
            String extension = "";
            if (sa.length > 1) {
                extension = sa[sa.length - 1];
                if (!FileOperations.getValidExtensions().contains(extension)) {
                    continue;
                }
            } else {
                // assume it is a map file
                extension = Constants.MAPFILE_EXTENSION;
            }
            String newName = "I" + namer.getNextName() + "." + extension;
            mappingsWriter.append(fileName);
            mappingsWriter.append(':');
            mappingsWriter.append(newName);
            mappingsWriter.newLine();
        }
        mappingsWriter.close();
        mappingsWriter = null;
        return new PopulateMetadataTable(tableInfo);
    } catch (IOException ioe) {
        log.warn("{}", ioe.getMessage(), ioe);
        throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error writing mapping file " + path + " " + ioe.getMessage());
    } finally {
        if (mappingsWriter != null)
            try {
                mappingsWriter.close();
            } catch (IOException ioe) {
                log.warn("Failed to close " + path, ioe);
            }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) FileStatus(org.apache.hadoop.fs.FileStatus) UniqueNameAllocator(org.apache.accumulo.server.tablets.UniqueNameAllocator) OutputStreamWriter(java.io.OutputStreamWriter) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) AcceptableThriftTableOperationException(org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException)

Example 4 with UniqueNameAllocator

use of org.apache.accumulo.server.tablets.UniqueNameAllocator in project accumulo by apache.

the class BulkImport method prepareBulkImport.

private String prepareBulkImport(Master master, final VolumeManager fs, String dir, Table.ID tableId) throws Exception {
    final Path bulkDir = createNewBulkDir(fs, tableId);
    MetadataTableUtil.addBulkLoadInProgressFlag(master, "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
    Path dirPath = new Path(dir);
    FileStatus[] mapFiles = fs.listStatus(dirPath);
    final UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
    int workerCount = master.getConfiguration().getCount(Property.MASTER_BULK_RENAME_THREADS);
    SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulk move");
    List<Future<Exception>> results = new ArrayList<>();
    for (FileStatus file : mapFiles) {
        final FileStatus fileStatus = file;
        results.add(workers.submit(new Callable<Exception>() {

            @Override
            public Exception call() throws Exception {
                try {
                    String[] sa = fileStatus.getPath().getName().split("\\.");
                    String extension = "";
                    if (sa.length > 1) {
                        extension = sa[sa.length - 1];
                        if (!FileOperations.getValidExtensions().contains(extension)) {
                            log.warn("{} does not have a valid extension, ignoring", fileStatus.getPath());
                            return null;
                        }
                    } else {
                        // assume it is a map file
                        extension = Constants.MAPFILE_EXTENSION;
                    }
                    if (extension.equals(Constants.MAPFILE_EXTENSION)) {
                        if (!fileStatus.isDirectory()) {
                            log.warn("{} is not a map file, ignoring", fileStatus.getPath());
                            return null;
                        }
                        if (fileStatus.getPath().getName().equals("_logs")) {
                            log.info("{} is probably a log directory from a map/reduce task, skipping", fileStatus.getPath());
                            return null;
                        }
                        try {
                            FileStatus dataStatus = fs.getFileStatus(new Path(fileStatus.getPath(), MapFile.DATA_FILE_NAME));
                            if (dataStatus.isDirectory()) {
                                log.warn("{} is not a map file, ignoring", fileStatus.getPath());
                                return null;
                            }
                        } catch (FileNotFoundException fnfe) {
                            log.warn("{} is not a map file, ignoring", fileStatus.getPath());
                            return null;
                        }
                    }
                    String newName = "I" + namer.getNextName() + "." + extension;
                    Path newPath = new Path(bulkDir, newName);
                    try {
                        fs.rename(fileStatus.getPath(), newPath);
                        log.debug("Moved {} to {}", fileStatus.getPath(), newPath);
                    } catch (IOException E1) {
                        log.error("Could not move: {} {}", fileStatus.getPath().toString(), E1.getMessage());
                    }
                } catch (Exception ex) {
                    return ex;
                }
                return null;
            }
        }));
    }
    workers.shutdown();
    while (!workers.awaitTermination(1000L, TimeUnit.MILLISECONDS)) {
    }
    for (Future<Exception> ex : results) {
        if (ex.get() != null) {
            throw ex.get();
        }
    }
    return bulkDir.toString();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) Callable(java.util.concurrent.Callable) IOException(java.io.IOException) AcceptableThriftTableOperationException(org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException) FileNotFoundException(java.io.FileNotFoundException) UniqueNameAllocator(org.apache.accumulo.server.tablets.UniqueNameAllocator) Future(java.util.concurrent.Future) SimpleThreadPool(org.apache.accumulo.core.util.SimpleThreadPool)

Example 5 with UniqueNameAllocator

use of org.apache.accumulo.server.tablets.UniqueNameAllocator in project accumulo by apache.

the class BulkImport method createNewBulkDir.

private Path createNewBulkDir(VolumeManager fs, Table.ID tableId) throws IOException {
    Path tempPath = fs.matchingFileSystem(new Path(sourceDir), ServerConstants.getTablesDirs());
    if (tempPath == null)
        throw new IOException(sourceDir + " is not in a volume configured for Accumulo");
    String tableDir = tempPath.toString();
    if (tableDir == null)
        throw new IOException(sourceDir + " is not in a volume configured for Accumulo");
    Path directory = new Path(tableDir + "/" + tableId);
    fs.mkdirs(directory);
    // only one should be able to create the lock file
    // the purpose of the lock file is to avoid a race
    // condition between the call to fs.exists() and
    // fs.mkdirs()... if only hadoop had a mkdir() function
    // that failed when the dir existed
    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
    while (true) {
        Path newBulkDir = new Path(directory, Constants.BULK_PREFIX + namer.getNextName());
        if (// sanity check
        fs.exists(newBulkDir))
            throw new IOException("Dir exist when it should not " + newBulkDir);
        if (fs.mkdirs(newBulkDir))
            return newBulkDir;
        log.warn("Failed to create {} for unknown reason", newBulkDir);
        sleepUninterruptibly(3, TimeUnit.SECONDS);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) UniqueNameAllocator(org.apache.accumulo.server.tablets.UniqueNameAllocator) IOException(java.io.IOException)

Aggregations

IOException (java.io.IOException)5 UniqueNameAllocator (org.apache.accumulo.server.tablets.UniqueNameAllocator)5 Path (org.apache.hadoop.fs.Path)5 AcceptableThriftTableOperationException (org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 BufferedWriter (java.io.BufferedWriter)1 FileNotFoundException (java.io.FileNotFoundException)1 OutputStreamWriter (java.io.OutputStreamWriter)1 ArrayList (java.util.ArrayList)1 Callable (java.util.concurrent.Callable)1 Future (java.util.concurrent.Future)1 SimpleThreadPool (org.apache.accumulo.core.util.SimpleThreadPool)1 VolumeChooserEnvironment (org.apache.accumulo.server.fs.VolumeChooserEnvironment)1 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)1 FileSystem (org.apache.hadoop.fs.FileSystem)1