Search in sources :

Example 1 with VolumeChooserEnvironment

use of org.apache.accumulo.server.fs.VolumeChooserEnvironment in project accumulo by apache.

the class Master method moveRootTabletToRootTable.

private void moveRootTabletToRootTable(IZooReaderWriter zoo) throws Exception {
    String dirZPath = ZooUtil.getRoot(getInstance()) + RootTable.ZROOT_TABLET_PATH;
    if (!zoo.exists(dirZPath)) {
        Path oldPath = fs.getFullPath(FileType.TABLE, "/" + MetadataTable.ID + "/root_tablet");
        if (fs.exists(oldPath)) {
            VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(RootTable.ID);
            String newPath = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + RootTable.ID;
            fs.mkdirs(new Path(newPath));
            if (!fs.rename(oldPath, new Path(newPath))) {
                throw new IOException("Failed to move root tablet from " + oldPath + " to " + newPath);
            }
            log.info("Upgrade renamed {} to {}", oldPath, newPath);
        }
        Path location = null;
        for (String basePath : ServerConstants.getTablesDirs()) {
            Path path = new Path(basePath + "/" + RootTable.ID + RootTable.ROOT_TABLET_LOCATION);
            if (fs.exists(path)) {
                if (location != null) {
                    throw new IllegalStateException("Root table at multiple locations " + location + " " + path);
                }
                location = path;
            }
        }
        if (location == null)
            throw new IllegalStateException("Failed to find root tablet");
        log.info("Upgrade setting root table location in zookeeper {}", location);
        zoo.putPersistentData(dirZPath, location.toString().getBytes(), NodeExistsPolicy.FAIL);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeChooserEnvironment(org.apache.accumulo.server.fs.VolumeChooserEnvironment) IOException(java.io.IOException)

Example 2 with VolumeChooserEnvironment

use of org.apache.accumulo.server.fs.VolumeChooserEnvironment in project accumulo by apache.

the class DfsLogger method open.

/**
 * Opens a Write-Ahead Log file and writes the necessary header information and OPEN entry to the file. The file is ready to be used for ingest if this method
 * returns successfully. If an exception is thrown from this method, it is the callers responsibility to ensure that {@link #close()} is called to prevent
 * leaking the file handle and/or syncing thread.
 *
 * @param address
 *          The address of the host using this WAL
 */
public synchronized void open(String address) throws IOException {
    String filename = UUID.randomUUID().toString();
    log.debug("Address is {}", address);
    String logger = Joiner.on("+").join(address.split(":"));
    log.debug("DfsLogger.open() begin");
    VolumeManager fs = conf.getFileSystem();
    VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.LOGGER);
    logPath = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + filename;
    metaReference = toString();
    LoggerOperation op = null;
    try {
        short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
        if (replication == 0)
            replication = fs.getDefaultReplication(new Path(logPath));
        long blockSize = getWalBlockSize(conf.getConfiguration());
        if (conf.getConfiguration().getBoolean(Property.TSERV_WAL_SYNC))
            logFile = fs.createSyncable(new Path(logPath), 0, replication, blockSize);
        else
            logFile = fs.create(new Path(logPath), true, 0, replication, blockSize);
        sync = logFile.getClass().getMethod("hsync");
        flush = logFile.getClass().getMethod("hflush");
        // Initialize the crypto operations.
        org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory.getCryptoModule(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
        // Initialize the log file with a header and the crypto params used to set up this log file.
        logFile.write(LOG_FILE_HEADER_V3.getBytes(UTF_8));
        CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf.getConfiguration());
        // Immediately update to the correct cipher. Doing this here keeps the CryptoModule independent of the writers using it
        if (params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()) != null && !params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()).equals("")) {
            params.setCipherSuite(params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()));
        }
        NoFlushOutputStream nfos = new NoFlushOutputStream(logFile);
        params.setPlaintextOutputStream(nfos);
        // In order to bootstrap the reading of this file later, we have to record the CryptoModule that was used to encipher it here,
        // so that that crypto module can re-read its own parameters.
        logFile.writeUTF(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
        params = cryptoModule.getEncryptingOutputStream(params);
        OutputStream encipheringOutputStream = params.getEncryptedOutputStream();
        // another data OutputStream.
        if (encipheringOutputStream == nfos) {
            log.debug("No enciphering, using raw output stream");
            encryptingLogFile = nfos;
        } else {
            log.debug("Enciphering found, wrapping in DataOutputStream");
            encryptingLogFile = new DataOutputStream(encipheringOutputStream);
        }
        LogFileKey key = new LogFileKey();
        key.event = OPEN;
        key.tserverSession = filename;
        key.filename = filename;
        op = logFileData(Collections.singletonList(new Pair<>(key, EMPTY)), Durability.SYNC);
    } catch (Exception ex) {
        if (logFile != null)
            logFile.close();
        logFile = null;
        encryptingLogFile = null;
        throw new IOException(ex);
    }
    syncThread = new Daemon(new LoggingRunnable(log, new LogSyncingTask()));
    syncThread.setName("Accumulo WALog thread " + toString());
    syncThread.start();
    op.await();
    log.debug("Got new write-ahead log: {}", this);
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) NoFlushOutputStream(org.apache.accumulo.core.security.crypto.NoFlushOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) OutputStream(java.io.OutputStream) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) IOException(java.io.IOException) EOFException(java.io.EOFException) ClosedChannelException(java.nio.channels.ClosedChannelException) IOException(java.io.IOException) CryptoModule(org.apache.accumulo.core.security.crypto.CryptoModule) LoggingRunnable(org.apache.accumulo.fate.util.LoggingRunnable) CryptoModuleParameters(org.apache.accumulo.core.security.crypto.CryptoModuleParameters) Daemon(org.apache.accumulo.core.util.Daemon) VolumeChooserEnvironment(org.apache.accumulo.server.fs.VolumeChooserEnvironment) NoFlushOutputStream(org.apache.accumulo.core.security.crypto.NoFlushOutputStream)

Example 3 with VolumeChooserEnvironment

use of org.apache.accumulo.server.fs.VolumeChooserEnvironment in project accumulo by apache.

the class Initialize method initFileSystem.

private void initFileSystem(Opts opts, VolumeManager fs, UUID uuid, String rootTabletDir) throws IOException {
    initDirs(fs, uuid, VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance()), false);
    // initialize initial system tables config in zookeeper
    initSystemTablesConfig();
    VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.INIT);
    String tableMetadataTabletDir = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID + TABLE_TABLETS_TABLET_DIR;
    String replicationTableDefaultTabletDir = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID + Constants.DEFAULT_TABLET_LOCATION;
    String defaultMetadataTabletDir = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID + Constants.DEFAULT_TABLET_LOCATION;
    // create table and default tablets directories
    createDirectories(fs, rootTabletDir, tableMetadataTabletDir, defaultMetadataTabletDir, replicationTableDefaultTabletDir);
    String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
    // populate the metadata tables tablet with info about the replication table's one initial tablet
    String metadataFileName = tableMetadataTabletDir + Path.SEPARATOR + "0_1." + ext;
    Tablet replicationTablet = new Tablet(ReplicationTable.ID, replicationTableDefaultTabletDir, null, null);
    createMetadataFile(fs, metadataFileName, replicationTablet);
    // populate the root tablet with info about the metadata table's two initial tablets
    String rootTabletFileName = rootTabletDir + Path.SEPARATOR + "00000_00000." + ext;
    Text splitPoint = TabletsSection.getRange().getEndKey().getRow();
    Tablet tablesTablet = new Tablet(MetadataTable.ID, tableMetadataTabletDir, null, splitPoint, metadataFileName);
    Tablet defaultTablet = new Tablet(MetadataTable.ID, defaultMetadataTabletDir, splitPoint, null);
    createMetadataFile(fs, rootTabletFileName, tablesTablet, defaultTablet);
}
Also used : VolumeChooserEnvironment(org.apache.accumulo.server.fs.VolumeChooserEnvironment) Text(org.apache.hadoop.io.Text)

Example 4 with VolumeChooserEnvironment

use of org.apache.accumulo.server.fs.VolumeChooserEnvironment in project accumulo by apache.

the class Tablet method createTabletDirectory.

private static String createTabletDirectory(VolumeManager fs, Table.ID tableId, Text endRow) {
    String lowDirectory;
    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
    VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableId);
    String volume = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR;
    while (true) {
        try {
            if (endRow == null) {
                lowDirectory = Constants.DEFAULT_TABLET_LOCATION;
                Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
                if (fs.exists(lowDirectoryPath) || fs.mkdirs(lowDirectoryPath)) {
                    FileSystem pathFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
                    return lowDirectoryPath.makeQualified(pathFs.getUri(), pathFs.getWorkingDirectory()).toString();
                }
                log.warn("Failed to create {} for unknown reason", lowDirectoryPath);
            } else {
                lowDirectory = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
                Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
                if (fs.exists(lowDirectoryPath))
                    throw new IllegalStateException("Dir exist when it should not " + lowDirectoryPath);
                if (fs.mkdirs(lowDirectoryPath)) {
                    FileSystem lowDirectoryFs = fs.getVolumeByPath(lowDirectoryPath).getFileSystem();
                    return lowDirectoryPath.makeQualified(lowDirectoryFs.getUri(), lowDirectoryFs.getWorkingDirectory()).toString();
                }
            }
        } catch (IOException e) {
            log.warn("{}", e.getMessage(), e);
        }
        log.warn("Failed to create dir for tablet in table {} in volume {} will retry ...", tableId, volume);
        sleepUninterruptibly(3, TimeUnit.SECONDS);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeChooserEnvironment(org.apache.accumulo.server.fs.VolumeChooserEnvironment) UniqueNameAllocator(org.apache.accumulo.server.tablets.UniqueNameAllocator) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException)

Example 5 with VolumeChooserEnvironment

use of org.apache.accumulo.server.fs.VolumeChooserEnvironment in project accumulo by apache.

the class TabletGroupWatcher method deleteTablets.

private void deleteTablets(MergeInfo info) throws AccumuloException {
    KeyExtent extent = info.getExtent();
    String targetSystemTable = extent.isMeta() ? RootTable.NAME : MetadataTable.NAME;
    Master.log.debug("Deleting tablets for {}", extent);
    char timeType = '\0';
    KeyExtent followingTablet = null;
    if (extent.getEndRow() != null) {
        Key nextExtent = new Key(extent.getEndRow()).followingKey(PartialKey.ROW);
        followingTablet = getHighTablet(new KeyExtent(extent.getTableId(), nextExtent.getRow(), extent.getEndRow()));
        Master.log.debug("Found following tablet {}", followingTablet);
    }
    try {
        Connector conn = this.master.getConnector();
        Text start = extent.getPrevEndRow();
        if (start == null) {
            start = new Text();
        }
        Master.log.debug("Making file deletion entries for {}", extent);
        Range deleteRange = new Range(KeyExtent.getMetadataEntry(extent.getTableId(), start), false, KeyExtent.getMetadataEntry(extent.getTableId(), extent.getEndRow()), true);
        Scanner scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
        scanner.setRange(deleteRange);
        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
        TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
        Set<FileRef> datafiles = new TreeSet<>();
        for (Entry<Key, Value> entry : scanner) {
            Key key = entry.getKey();
            if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
                datafiles.add(new FileRef(this.master.fs, key));
                if (datafiles.size() > 1000) {
                    MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
                    datafiles.clear();
                }
            } else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
                timeType = entry.getValue().toString().charAt(0);
            } else if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
                throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
            } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                // ACCUMULO-2974 Need to include the TableID when converting a relative path to an absolute path.
                // The value has the leading path separator already included so it doesn't need it included.
                String path = entry.getValue().toString();
                if (path.contains(":")) {
                    datafiles.add(new FileRef(path));
                } else {
                    datafiles.add(new FileRef(path, this.master.fs.getFullPath(FileType.TABLE, Path.SEPARATOR + extent.getTableId() + path)));
                }
                if (datafiles.size() > 1000) {
                    MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
                    datafiles.clear();
                }
            }
        }
        MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
        BatchWriter bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
        try {
            deleteTablets(info, deleteRange, bw, conn);
        } finally {
            bw.close();
        }
        if (followingTablet != null) {
            Master.log.debug("Updating prevRow of {} to {}", followingTablet, extent.getPrevEndRow());
            bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
            try {
                Mutation m = new Mutation(followingTablet.getMetadataEntry());
                TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(extent.getPrevEndRow()));
                ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
                bw.addMutation(m);
                bw.flush();
            } finally {
                bw.close();
            }
        } else {
            // Recreate the default tablet to hold the end of the table
            Master.log.debug("Recreating the last tablet to point to {}", extent.getPrevEndRow());
            VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(extent.getTableId());
            String tdir = master.getFileSystem().choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId() + Constants.DEFAULT_TABLET_LOCATION;
            MetadataTableUtil.addTablet(new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir, master, timeType, this.master.masterLock);
        }
    } catch (RuntimeException | IOException | TableNotFoundException | AccumuloSecurityException ex) {
        throw new AccumuloException(ex);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) FileRef(org.apache.accumulo.server.fs.FileRef) VolumeChooserEnvironment(org.apache.accumulo.server.fs.VolumeChooserEnvironment) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Aggregations

VolumeChooserEnvironment (org.apache.accumulo.server.fs.VolumeChooserEnvironment)12 IOException (java.io.IOException)6 Path (org.apache.hadoop.fs.Path)5 Mutation (org.apache.accumulo.core.data.Mutation)4 Value (org.apache.accumulo.core.data.Value)4 BatchWriter (org.apache.accumulo.core.client.BatchWriter)3 Scanner (org.apache.accumulo.core.client.Scanner)3 Key (org.apache.accumulo.core.data.Key)3 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)3 Text (org.apache.hadoop.io.Text)3 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)2 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)2 Connector (org.apache.accumulo.core.client.Connector)2 PartialKey (org.apache.accumulo.core.data.PartialKey)2 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)2 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)2 DataOutputStream (java.io.DataOutputStream)1 EOFException (java.io.EOFException)1 FileNotFoundException (java.io.FileNotFoundException)1 OutputStream (java.io.OutputStream)1