Search in sources :

Example 6 with BlobContainer

use of org.elasticsearch.common.blobstore.BlobContainer in project elasticsearch by elastic.

the class BlobStoreRepository method startVerification.

@Override
public String startVerification() {
    try {
        if (isReadOnly()) {
            // It's readonly - so there is not much we can do here to verify it
            return null;
        } else {
            String seed = UUIDs.randomBase64UUID();
            byte[] testBytes = Strings.toUTF8Bytes(seed);
            BlobContainer testContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed)));
            String blobName = "master.dat";
            BytesArray bytes = new BytesArray(testBytes);
            try (InputStream stream = bytes.streamInput()) {
                testContainer.writeBlob(blobName + "-temp", stream, bytes.length());
            }
            // Make sure that move is supported
            testContainer.move(blobName + "-temp", blobName);
            return seed;
        }
    } catch (IOException exp) {
        throw new RepositoryVerificationException(metadata.name(), "path " + basePath() + " is not accessible on master node", exp);
    }
}
Also used : BytesArray(org.elasticsearch.common.bytes.BytesArray) RateLimitingInputStream(org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream) FilterInputStream(java.io.FilterInputStream) SlicedInputStream(org.elasticsearch.index.snapshots.blobstore.SlicedInputStream) InputStream(java.io.InputStream) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) IOException(java.io.IOException)

Example 7 with BlobContainer

use of org.elasticsearch.common.blobstore.BlobContainer in project elasticsearch by elastic.

the class BlobStoreRepository method deleteSnapshot.

@Override
public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) {
    if (isReadOnly()) {
        throw new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository");
    }
    final RepositoryData repositoryData = getRepositoryData();
    List<String> indices = Collections.emptyList();
    SnapshotInfo snapshot = null;
    try {
        snapshot = getSnapshotInfo(snapshotId);
        indices = snapshot.indices();
    } catch (SnapshotMissingException ex) {
        throw ex;
    } catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) {
        logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex);
    }
    MetaData metaData = null;
    try {
        if (snapshot != null) {
            metaData = readSnapshotMetaData(snapshotId, snapshot.version(), repositoryData.resolveIndices(indices), true);
        } else {
            metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true);
        }
    } catch (IOException | SnapshotException ex) {
        logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex);
    }
    try {
        // Delete snapshot from the index file, since it is the maintainer of truth of active snapshots
        final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId);
        writeIndexGen(updatedRepositoryData, repositoryStateId);
        // delete the snapshot file
        safeSnapshotBlobDelete(snapshot, snapshotId.getUUID());
        // delete the global metadata file
        safeGlobalMetaDataBlobDelete(snapshot, snapshotId.getUUID());
        // Now delete all indices
        for (String index : indices) {
            final IndexId indexId = repositoryData.resolveIndexId(index);
            BlobPath indexPath = basePath().add("indices").add(indexId.getId());
            BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
            try {
                indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID());
            } catch (IOException ex) {
                logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex);
            }
            if (metaData != null) {
                IndexMetaData indexMetaData = metaData.index(index);
                if (indexMetaData != null) {
                    for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
                        try {
                            delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId));
                        } catch (SnapshotException ex) {
                            final int finalShardId = shardId;
                            logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex);
                        }
                    }
                }
            }
        }
        // cleanup indices that are no longer part of the repository
        final Collection<IndexId> indicesToCleanUp = Sets.newHashSet(repositoryData.getIndices().values());
        indicesToCleanUp.removeAll(updatedRepositoryData.getIndices().values());
        final BlobContainer indicesBlobContainer = blobStore().blobContainer(basePath().add("indices"));
        for (final IndexId indexId : indicesToCleanUp) {
            try {
                indicesBlobContainer.deleteBlob(indexId.getId());
            } catch (DirectoryNotEmptyException dnee) {
                // if the directory isn't empty for some reason, it will fail to clean up;
                // we'll ignore that and accept that cleanup didn't fully succeed.
                // since we are using UUIDs for path names, this won't be an issue for
                // snapshotting indices of the same name
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder due to the directory not being empty.", metadata.name(), indexId), dnee);
            } catch (IOException ioe) {
                // a different IOException occurred while trying to delete - will just log the issue for now
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder.", metadata.name(), indexId), ioe);
            }
        }
    } catch (IOException ex) {
        throw new RepositoryException(metadata.name(), "failed to update snapshot in repository", ex);
    }
}
Also used : IndexId(org.elasticsearch.repositories.IndexId) BlobPath(org.elasticsearch.common.blobstore.BlobPath) RepositoryException(org.elasticsearch.repositories.RepositoryException) DirectoryNotEmptyException(java.nio.file.DirectoryNotEmptyException) IOException(java.io.IOException) SnapshotException(org.elasticsearch.snapshots.SnapshotException) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) RepositoryData(org.elasticsearch.repositories.RepositoryData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) ShardId(org.elasticsearch.index.shard.ShardId) SnapshotInfo(org.elasticsearch.snapshots.SnapshotInfo) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) BlobMetaData(org.elasticsearch.common.blobstore.BlobMetaData) RepositoryMetaData(org.elasticsearch.cluster.metadata.RepositoryMetaData) ElasticsearchParseException(org.elasticsearch.ElasticsearchParseException) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 8 with BlobContainer

use of org.elasticsearch.common.blobstore.BlobContainer in project elasticsearch by elastic.

the class BlobStoreRepository method readSnapshotMetaData.

private MetaData readSnapshotMetaData(SnapshotId snapshotId, Version snapshotVersion, List<IndexId> indices, boolean ignoreIndexErrors) throws IOException {
    MetaData metaData;
    if (snapshotVersion == null) {
        // We can try detecting the version based on the metadata file format
        assert ignoreIndexErrors;
        if (globalMetaDataFormat.exists(snapshotsBlobContainer, snapshotId.getUUID())) {
            snapshotVersion = Version.CURRENT;
        } else {
            throw new SnapshotMissingException(metadata.name(), snapshotId);
        }
    }
    try {
        metaData = globalMetaDataFormat.read(snapshotsBlobContainer, snapshotId.getUUID());
    } catch (NoSuchFileException ex) {
        throw new SnapshotMissingException(metadata.name(), snapshotId, ex);
    } catch (IOException ex) {
        throw new SnapshotException(metadata.name(), snapshotId, "failed to get snapshots", ex);
    }
    MetaData.Builder metaDataBuilder = MetaData.builder(metaData);
    for (IndexId index : indices) {
        BlobPath indexPath = basePath().add("indices").add(index.getId());
        BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
        try {
            metaDataBuilder.put(indexMetaDataFormat.read(indexMetaDataBlobContainer, snapshotId.getUUID()), false);
        } catch (ElasticsearchParseException | IOException ex) {
            if (ignoreIndexErrors) {
                logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex);
            } else {
                throw ex;
            }
        }
    }
    return metaDataBuilder.build();
}
Also used : IndexId(org.elasticsearch.repositories.IndexId) BlobPath(org.elasticsearch.common.blobstore.BlobPath) NoSuchFileException(java.nio.file.NoSuchFileException) IOException(java.io.IOException) SnapshotException(org.elasticsearch.snapshots.SnapshotException) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) MetaData(org.elasticsearch.cluster.metadata.MetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) BlobMetaData(org.elasticsearch.common.blobstore.BlobMetaData) RepositoryMetaData(org.elasticsearch.cluster.metadata.RepositoryMetaData) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) ElasticsearchParseException(org.elasticsearch.ElasticsearchParseException) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 9 with BlobContainer

use of org.elasticsearch.common.blobstore.BlobContainer in project elasticsearch by elastic.

the class ESBlobStoreContainerTestCase method testMoveAndList.

public void testMoveAndList() throws IOException {
    try (BlobStore store = newBlobStore()) {
        final BlobContainer container = store.blobContainer(new BlobPath());
        assertThat(container.listBlobs().size(), equalTo(0));
        int numberOfFooBlobs = randomIntBetween(0, 10);
        int numberOfBarBlobs = randomIntBetween(3, 20);
        Map<String, Long> generatedBlobs = new HashMap<>();
        for (int i = 0; i < numberOfFooBlobs; i++) {
            int length = randomIntBetween(10, 100);
            String name = "foo-" + i + "-";
            generatedBlobs.put(name, (long) length);
            writeRandomBlob(container, name, length);
        }
        for (int i = 1; i < numberOfBarBlobs; i++) {
            int length = randomIntBetween(10, 100);
            String name = "bar-" + i + "-";
            generatedBlobs.put(name, (long) length);
            writeRandomBlob(container, name, length);
        }
        int length = randomIntBetween(10, 100);
        String name = "bar-0-";
        generatedBlobs.put(name, (long) length);
        byte[] data = writeRandomBlob(container, name, length);
        Map<String, BlobMetaData> blobs = container.listBlobs();
        assertThat(blobs.size(), equalTo(numberOfFooBlobs + numberOfBarBlobs));
        for (Map.Entry<String, Long> generated : generatedBlobs.entrySet()) {
            BlobMetaData blobMetaData = blobs.get(generated.getKey());
            assertThat(generated.getKey(), blobMetaData, notNullValue());
            assertThat(blobMetaData.name(), equalTo(generated.getKey()));
            assertThat(blobMetaData.length(), equalTo(generated.getValue()));
        }
        assertThat(container.listBlobsByPrefix("foo-").size(), equalTo(numberOfFooBlobs));
        assertThat(container.listBlobsByPrefix("bar-").size(), equalTo(numberOfBarBlobs));
        assertThat(container.listBlobsByPrefix("baz-").size(), equalTo(0));
        String newName = "bar-new";
        // Move to a new location
        container.move(name, newName);
        assertThat(container.listBlobsByPrefix(name).size(), equalTo(0));
        blobs = container.listBlobsByPrefix(newName);
        assertThat(blobs.size(), equalTo(1));
        assertThat(blobs.get(newName).length(), equalTo(generatedBlobs.get(name)));
        assertThat(data, equalTo(readBlobFully(container, newName, length)));
    }
}
Also used : BlobPath(org.elasticsearch.common.blobstore.BlobPath) HashMap(java.util.HashMap) BlobMetaData(org.elasticsearch.common.blobstore.BlobMetaData) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) HashMap(java.util.HashMap) Map(java.util.Map) BlobStore(org.elasticsearch.common.blobstore.BlobStore)

Example 10 with BlobContainer

use of org.elasticsearch.common.blobstore.BlobContainer in project elasticsearch by elastic.

the class ESBlobStoreContainerTestCase method testWriteRead.

public void testWriteRead() throws IOException {
    try (BlobStore store = newBlobStore()) {
        final BlobContainer container = store.blobContainer(new BlobPath());
        byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
        writeBlob(container, "foobar", new BytesArray(data));
        try (InputStream stream = container.readBlob("foobar")) {
            BytesRefBuilder target = new BytesRefBuilder();
            while (target.length() < data.length) {
                byte[] buffer = new byte[scaledRandomIntBetween(1, data.length - target.length())];
                int offset = scaledRandomIntBetween(0, buffer.length - 1);
                int read = stream.read(buffer, offset, buffer.length - offset);
                target.append(new BytesRef(buffer, offset, read));
            }
            assertEquals(data.length, target.length());
            assertArrayEquals(data, Arrays.copyOfRange(target.bytes(), 0, target.length()));
        }
    }
}
Also used : BlobPath(org.elasticsearch.common.blobstore.BlobPath) BytesArray(org.elasticsearch.common.bytes.BytesArray) BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) InputStream(java.io.InputStream) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) BlobStore(org.elasticsearch.common.blobstore.BlobStore) BytesRef(org.apache.lucene.util.BytesRef)

Aggregations

BlobContainer (org.elasticsearch.common.blobstore.BlobContainer)16 BlobStore (org.elasticsearch.common.blobstore.BlobStore)9 BlobPath (org.elasticsearch.common.blobstore.BlobPath)8 IOException (java.io.IOException)5 BytesArray (org.elasticsearch.common.bytes.BytesArray)5 InputStream (java.io.InputStream)4 BlobMetaData (org.elasticsearch.common.blobstore.BlobMetaData)4 FsBlobStore (org.elasticsearch.common.blobstore.fs.FsBlobStore)4 IndexId (org.elasticsearch.repositories.IndexId)4 ChecksumBlobStoreFormat (org.elasticsearch.repositories.blobstore.ChecksumBlobStoreFormat)4 ElasticsearchParseException (org.elasticsearch.ElasticsearchParseException)3 IndexMetaData (org.elasticsearch.cluster.metadata.IndexMetaData)3 RepositoryData (org.elasticsearch.repositories.RepositoryData)3 Matchers.containsString (org.hamcrest.Matchers.containsString)3 EOFException (java.io.EOFException)2 NoSuchFileException (java.nio.file.NoSuchFileException)2 ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)2 Supplier (org.apache.logging.log4j.util.Supplier)2 ElasticsearchCorruptionException (org.elasticsearch.ElasticsearchCorruptionException)2 MetaData (org.elasticsearch.cluster.metadata.MetaData)2