use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class ClusterStateResponse method readFrom.
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
clusterName = new ClusterName(in);
clusterState = ClusterState.readFrom(in, null);
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
totalCompressedSize = new ByteSizeValue(in);
} else {
// in a mixed cluster, if a pre 6.0 node processes the get cluster state
// request, then a compressed size won't be returned, so just return 0;
// its a temporary situation until all nodes in the cluster have been upgraded,
// at which point the correct cluster state size will always be reported
totalCompressedSize = new ByteSizeValue(0L);
}
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class TcpTransport method validateMessageHeader.
/**
* Validates the first N bytes of the message header and returns <code>false</code> if the message is
* a ping message and has no payload ie. isn't a real user level message.
*
* @throws IllegalStateException if the message is too short, less than the header or less that the header plus the message size
* @throws HttpOnTransportException if the message has no valid header and appears to be a HTTP message
* @throws IllegalArgumentException if the message is greater that the maximum allowed frame size. This is dependent on the available
* memory.
*/
public static boolean validateMessageHeader(BytesReference buffer) throws IOException {
final int sizeHeaderLength = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE;
if (buffer.length() < sizeHeaderLength) {
throw new IllegalStateException("message size must be >= to the header size");
}
int offset = 0;
if (buffer.get(offset) != 'E' || buffer.get(offset + 1) != 'S') {
// special handling for what is probably HTTP
if (bufferStartsWith(buffer, offset, "GET ") || bufferStartsWith(buffer, offset, "POST ") || bufferStartsWith(buffer, offset, "PUT ") || bufferStartsWith(buffer, offset, "HEAD ") || bufferStartsWith(buffer, offset, "DELETE ") || bufferStartsWith(buffer, offset, "OPTIONS ") || bufferStartsWith(buffer, offset, "PATCH ") || bufferStartsWith(buffer, offset, "TRACE ")) {
throw new HttpOnTransportException("This is not a HTTP port");
}
// we have 6 readable bytes, show 4 (should be enough)
throw new StreamCorruptedException("invalid internal transport message format, got (" + Integer.toHexString(buffer.get(offset) & 0xFF) + "," + Integer.toHexString(buffer.get(offset + 1) & 0xFF) + "," + Integer.toHexString(buffer.get(offset + 2) & 0xFF) + "," + Integer.toHexString(buffer.get(offset + 3) & 0xFF) + ")");
}
final int dataLen;
try (StreamInput input = buffer.streamInput()) {
input.skip(TcpHeader.MARKER_BYTES_SIZE);
dataLen = input.readInt();
if (dataLen == PING_DATA_SIZE) {
// and returning null
return false;
}
}
if (dataLen <= 0) {
throw new StreamCorruptedException("invalid data length: " + dataLen);
}
// safety against too large frames being sent
if (dataLen > NINETY_PER_HEAP_SIZE) {
throw new IllegalArgumentException("transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]");
}
if (buffer.length() < dataLen + sizeHeaderLength) {
throw new IllegalStateException("buffer must be >= to the message size but wasn't");
}
return true;
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class MemorySizeSettingsTests method testCircuitBreakerSettings.
public void testCircuitBreakerSettings() {
assertMemorySizeSetting(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.total.limit", new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.7)));
assertMemorySizeSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.fielddata.limit", new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.6)));
assertMemorySizeSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "indices.breaker.request.limit", new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.6)));
assertMemorySizeSetting(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING, "network.breaker.inflight_requests.limit", new ByteSizeValue((JvmInfo.jvmInfo().getMem().getHeapMax().getBytes())));
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class SettingTests method testByteSize.
public void testByteSize() {
Setting<ByteSizeValue> byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope);
assertFalse(byteSizeValueSetting.isGroupSetting());
ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY);
assertEquals(byteSizeValue.getBytes(), 1024);
byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope);
byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY);
assertEquals(byteSizeValue.getBytes(), 2048);
AtomicReference<ByteSizeValue> value = new AtomicReference<>(null);
ClusterSettings.SettingUpdater<ByteSizeValue> settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger);
try {
settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY);
fail("no unit");
} catch (IllegalArgumentException ex) {
assertEquals("failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized", ex.getMessage());
}
assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "12b").build(), Settings.EMPTY));
assertEquals(new ByteSizeValue(12), value.get());
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class BulkProcessorIT method testBulkProcessorWaitOnClose.
public void testBulkProcessorWaitOnClose() throws Exception {
BulkProcessorTestListener listener = new BulkProcessorTestListener();
int numDocs = randomIntBetween(10, 100);
BulkProcessor processor = BulkProcessor.builder(client(), listener).setName("foo").setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs).setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values()))).build();
MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
assertThat(processor.isOpen(), is(true));
assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true));
if (randomBoolean()) {
// check if we can call it multiple times
if (randomBoolean()) {
assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true));
} else {
processor.close();
}
}
assertThat(processor.isOpen(), is(false));
assertThat(listener.beforeCounts.get(), greaterThanOrEqualTo(1));
assertThat(listener.afterCounts.get(), greaterThanOrEqualTo(1));
assertThat(listener.bulkFailures.size(), equalTo(0));
assertResponseItems(listener.bulkItems, numDocs);
assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs);
}
Aggregations