use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class Netty4BadRequestIT method testBadRequest.
public void testBadRequest() throws IOException {
final Response response = client().performRequest("GET", "/_nodes/settings", Collections.emptyMap());
final ObjectPath objectPath = ObjectPath.createFromResponse(response);
final Map<String, Object> map = objectPath.evaluate("nodes");
int maxMaxInitialLineLength = Integer.MIN_VALUE;
final Setting<ByteSizeValue> httpMaxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;
final String key = httpMaxInitialLineLength.getKey().substring("http.".length());
for (Map.Entry<String, Object> entry : map.entrySet()) {
@SuppressWarnings("unchecked") final Map<String, Object> settings = (Map<String, Object>) ((Map<String, Object>) entry.getValue()).get("settings");
final int maxIntialLineLength;
if (settings.containsKey("http")) {
@SuppressWarnings("unchecked") final Map<String, Object> httpSettings = (Map<String, Object>) settings.get("http");
if (httpSettings.containsKey(key)) {
maxIntialLineLength = ByteSizeValue.parseBytesSizeValue((String) httpSettings.get(key), key).bytesAsInt();
} else {
maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt();
}
} else {
maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt();
}
maxMaxInitialLineLength = Math.max(maxMaxInitialLineLength, maxIntialLineLength);
}
final String path = "/" + new String(new byte[maxMaxInitialLineLength], Charset.forName("UTF-8")).replace('\0', 'a');
final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(randomFrom("GET", "POST", "PUT"), path, Collections.emptyMap()));
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus()));
assertThat(e, hasToString(containsString("too_long_frame_exception")));
assertThat(e, hasToString(matches("An HTTP line is larger than \\d+ bytes")));
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class CorruptedFileIT method testCorruptFileThenSnapshotAndRestore.
/**
* Tests that restoring of a corrupted shard fails and we get a partial snapshot.
* TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several
* parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard.
*/
@TestLogging("org.elasticsearch.monitor.fs:DEBUG")
public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException {
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, // no replicas for this test
"0").put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), // no checkindex - we corrupt shards on purpose
false).put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), // no translog based flush - it might change the .liv / segments.N files
new ByteSizeValue(1, ByteSizeUnit.PB))));
ensureGreen();
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
}
indexRandom(true, builders);
ensureGreen();
assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
ShardRouting shardRouting = corruptRandomPrimaryFile(false);
// we don't corrupt segments.gen since S/R doesn't snapshot this file
// the other problem here why we can't corrupt segments.X files is that the snapshot flushes again before
// it snapshots and that will write a new segments.X+1 file
logger.info("--> creating repository");
assertAcked(client().admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", randomRepoPath().toAbsolutePath()).put("compress", randomBoolean()).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
logger.info("--> snapshot");
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test").get();
assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL));
logger.info("failed during snapshot -- maybe SI file got corrupted");
final List<Path> files = listShardFiles(shardRouting);
Path corruptedFile = null;
for (Path file : files) {
if (file.getFileName().toString().startsWith("corrupted_")) {
corruptedFile = file;
break;
}
}
assertThat(corruptedFile, notNullValue());
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class CorruptedTranslogIT method enableTranslogFlush.
/** Enables translog flushing for the specified index */
private static void enableTranslogFlush(String index) {
Settings settings = Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)).build();
client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class TranslogTests method getTranslogConfig.
private TranslogConfig getTranslogConfig(Path path) {
Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build();
ByteSizeValue bufferSize = randomBoolean() ? TranslogConfig.DEFAULT_BUFFER_SIZE : new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES);
return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.getIndex(), build), BigArrays.NON_RECYCLING_INSTANCE, bufferSize);
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class FileInfoTests method testGetPartSize.
public void testGetPartSize() {
BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36, "666"), new ByteSizeValue(6));
int numBytes = 0;
for (int i = 0; i < info.numberOfParts(); i++) {
numBytes += info.partBytes(i);
}
assertEquals(numBytes, 36);
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35, "666"), new ByteSizeValue(6));
numBytes = 0;
for (int i = 0; i < info.numberOfParts(); i++) {
numBytes += info.partBytes(i);
}
assertEquals(numBytes, 35);
final int numIters = randomIntBetween(10, 100);
for (int j = 0; j < numIters; j++) {
StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000), "666");
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", metaData, new ByteSizeValue(randomIntBetween(1, 1000)));
numBytes = 0;
for (int i = 0; i < info.numberOfParts(); i++) {
numBytes += info.partBytes(i);
}
assertEquals(numBytes, metaData.length());
}
}
Aggregations