use of org.elasticsearch.Version in project elasticsearch by elastic.
the class MetaDataIndexStateService method openIndex.
public void openIndex(final OpenIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
if (request.indices() == null || request.indices().length == 0) {
throw new IllegalArgumentException("Index name is required");
}
final String indicesAsString = Arrays.toString(request.indices());
clusterService.submitStateUpdateTask("open-indices " + indicesAsString, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
@Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
return new ClusterStateUpdateResponse(acknowledged);
}
@Override
public ClusterState execute(ClusterState currentState) {
List<IndexMetaData> indicesToOpen = new ArrayList<>();
for (Index index : request.indices()) {
final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index);
if (indexMetaData.getState() != IndexMetaData.State.OPEN) {
indicesToOpen.add(indexMetaData);
}
}
if (indicesToOpen.isEmpty()) {
return currentState;
}
logger.info("opening indices [{}]", indicesAsString);
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks());
final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion().minimumIndexCompatibilityVersion();
for (IndexMetaData closedMetaData : indicesToOpen) {
final String indexName = closedMetaData.getIndex().getName();
IndexMetaData indexMetaData = IndexMetaData.builder(closedMetaData).state(IndexMetaData.State.OPEN).build();
// The index might be closed because we couldn't import it due to old incompatible version
// We need to check that this index can be upgraded to the current version
indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData, minIndexCompatibilityVersion);
try {
indicesService.verifyIndexMetadata(indexMetaData, indexMetaData);
} catch (Exception e) {
throw new ElasticsearchException("Failed to verify index " + indexMetaData.getIndex(), e);
}
mdBuilder.put(indexMetaData, true);
blocksBuilder.removeIndexBlock(indexName, INDEX_CLOSED_BLOCK);
}
ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build();
RoutingTable.Builder rtBuilder = RoutingTable.builder(updatedState.routingTable());
for (IndexMetaData index : indicesToOpen) {
rtBuilder.addAsFromCloseToOpen(updatedState.metaData().getIndexSafe(index.getIndex()));
}
//no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
return allocationService.reroute(ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build(), "indices opened [" + indicesAsString + "]");
}
});
}
use of org.elasticsearch.Version in project elasticsearch by elastic.
the class TransportUpgradeAction method newResponse.
@Override
protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeResult> shardUpgradeResults, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
Map<String, Integer> successfulPrimaryShards = new HashMap<>();
Map<String, Tuple<Version, org.apache.lucene.util.Version>> versions = new HashMap<>();
for (ShardUpgradeResult result : shardUpgradeResults) {
successfulShards++;
String index = result.getShardId().getIndex().getName();
if (result.primary()) {
Integer count = successfulPrimaryShards.get(index);
successfulPrimaryShards.put(index, count == null ? 1 : count + 1);
}
Tuple<Version, org.apache.lucene.util.Version> versionTuple = versions.get(index);
if (versionTuple == null) {
versions.put(index, new Tuple<>(result.upgradeVersion(), result.oldestLuceneSegment()));
} else {
// We already have versions for this index - let's see if we need to update them based on the current shard
Version version = versionTuple.v1();
org.apache.lucene.util.Version luceneVersion = versionTuple.v2();
// Since we rewrite the mapping during upgrade the metadata is always rewritten by the latest version
if (result.upgradeVersion().after(versionTuple.v1())) {
version = result.upgradeVersion();
}
// oldest version that we need to support
if (result.oldestLuceneSegment().onOrAfter(versionTuple.v2()) == false) {
luceneVersion = result.oldestLuceneSegment();
}
versions.put(index, new Tuple<>(version, luceneVersion));
}
}
Map<String, Tuple<org.elasticsearch.Version, String>> updatedVersions = new HashMap<>();
MetaData metaData = clusterState.metaData();
for (Map.Entry<String, Tuple<Version, org.apache.lucene.util.Version>> versionEntry : versions.entrySet()) {
String index = versionEntry.getKey();
Integer primaryCount = successfulPrimaryShards.get(index);
int expectedPrimaryCount = metaData.index(index).getNumberOfShards();
if (primaryCount == metaData.index(index).getNumberOfShards()) {
updatedVersions.put(index, new Tuple<>(versionEntry.getValue().v1(), versionEntry.getValue().v2().toString()));
} else {
logger.warn("Not updating settings for the index [{}] because upgraded of some primary shards failed - expected[{}], received[{}]", index, expectedPrimaryCount, primaryCount == null ? 0 : primaryCount);
}
}
return new UpgradeResponse(updatedVersions, totalShards, successfulShards, failedShards, shardFailures);
}
use of org.elasticsearch.Version in project elasticsearch by elastic.
the class UpgradeSettingsRequest method readFrom.
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
versions = new HashMap<>();
for (int i = 0; i < size; i++) {
String index = in.readString();
Version upgradeVersion = Version.readVersion(in);
String oldestLuceneSegment = in.readString();
versions.put(index, new Tuple<>(upgradeVersion, oldestLuceneSegment));
}
readTimeout(in);
}
use of org.elasticsearch.Version in project elasticsearch by elastic.
the class TcpTransport method openConnection.
@Override
public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) throws IOException {
if (node == null) {
throw new ConnectTransportException(null, "can't open connection to a null node");
}
boolean success = false;
NodeChannels nodeChannels = null;
connectionProfile = resolveConnectionProfile(connectionProfile, defaultConnectionProfile);
// ensure we don't open connections while we are closing
globalLock.readLock().lock();
try {
ensureOpen();
try {
nodeChannels = connectToChannels(node, connectionProfile);
// one channel is guaranteed by the connection profile
final Channel channel = nodeChannels.getChannels().get(0);
final TimeValue connectTimeout = connectionProfile.getConnectTimeout() == null ? defaultConnectionProfile.getConnectTimeout() : connectionProfile.getConnectTimeout();
final TimeValue handshakeTimeout = connectionProfile.getHandshakeTimeout() == null ? connectTimeout : connectionProfile.getHandshakeTimeout();
final Version version = executeHandshake(node, channel, handshakeTimeout);
transportServiceAdapter.onConnectionOpened(node);
// clone the channels - we now have the correct version
nodeChannels = new NodeChannels(nodeChannels, version);
success = true;
return nodeChannels;
} catch (ConnectTransportException e) {
throw e;
} catch (Exception e) {
// only relevant exceptions are logged on the caller end.. this is the same as in connectToNode
throw new ConnectTransportException(node, "general node connection failure", e);
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(nodeChannels);
}
}
} finally {
globalLock.readLock().unlock();
}
}
use of org.elasticsearch.Version in project elasticsearch by elastic.
the class TcpTransport method messageReceived.
/**
* This method handles the message receive part for both request and responses
*/
public final void messageReceived(BytesReference reference, Channel channel, String profileName, InetSocketAddress remoteAddress, int messageLengthBytes) throws IOException {
final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE;
transportServiceAdapter.addBytesReceived(totalMessageSize);
// we have additional bytes to read, outside of the header
boolean hasMessageBytesToRead = (totalMessageSize - TcpHeader.HEADER_SIZE) > 0;
StreamInput streamIn = reference.streamInput();
boolean success = false;
try (ThreadContext.StoredContext tCtx = threadPool.getThreadContext().stashContext()) {
long requestId = streamIn.readLong();
byte status = streamIn.readByte();
Version version = Version.fromId(streamIn.readInt());
if (TransportStatus.isCompress(status) && hasMessageBytesToRead && streamIn.available() > 0) {
Compressor compressor;
try {
final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE;
compressor = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed));
} catch (NotCompressedException ex) {
int maxToRead = Math.min(reference.length(), 10);
StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead).append("] content bytes out of [").append(reference.length()).append("] readable bytes with message size [").append(messageLengthBytes).append("] ").append("] are [");
for (int i = 0; i < maxToRead; i++) {
sb.append(reference.get(i)).append(",");
}
sb.append("]");
throw new IllegalStateException(sb.toString());
}
streamIn = compressor.streamInput(streamIn);
}
if (version.isCompatible(getCurrentVersion()) == false) {
throw new IllegalStateException("Received message from unsupported version: [" + version + "] minimal compatible version is: [" + getCurrentVersion().minimumCompatibilityVersion() + "]");
}
streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry);
streamIn.setVersion(version);
threadPool.getThreadContext().readHeaders(streamIn);
if (TransportStatus.isRequest(status)) {
handleRequest(channel, profileName, streamIn, requestId, messageLengthBytes, version, remoteAddress, status);
} else {
final TransportResponseHandler<?> handler;
if (TransportStatus.isHandshake(status)) {
handler = pendingHandshakes.remove(requestId);
} else {
TransportResponseHandler theHandler = transportServiceAdapter.onResponseReceived(requestId);
if (theHandler == null && TransportStatus.isError(status)) {
handler = pendingHandshakes.remove(requestId);
} else {
handler = theHandler;
}
}
// ignore if its null, the adapter logs it
if (handler != null) {
if (TransportStatus.isError(status)) {
handlerResponseError(streamIn, handler);
} else {
handleResponse(remoteAddress, streamIn, handler);
}
// Check the entire message has been read
final int nextByte = streamIn.read();
// calling read() is useful to make sure the message is fully read, even if there is an EOS marker
if (nextByte != -1) {
throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler [" + handler + "], error [" + TransportStatus.isError(status) + "]; resetting");
}
}
}
success = true;
} finally {
if (success) {
IOUtils.close(streamIn);
} else {
IOUtils.closeWhileHandlingException(streamIn);
}
}
}
Aggregations