use of org.infinispan.commons.util.Version in project infinispan by infinispan.
the class ContainerInfinispanServerDriver method start.
@Override
protected void start(String name, File rootDir, File configurationFile) {
this.name = name;
String jGroupsStack = System.getProperty(Server.INFINISPAN_CLUSTER_STACK);
// Build a skeleton server layout
createServerHierarchy(rootDir);
// Build the command-line that launches the server
List<String> args = new ArrayList<>();
args.add("bin/server.sh");
args.add("-c");
args.add(configurationFile.getName());
args.add("-b");
args.add("SITE_LOCAL");
args.add("-Djgroups.bind.address=SITE_LOCAL");
if (jGroupsStack != null) {
args.add("-j");
args.add(jGroupsStack);
}
args.add("-Dinfinispan.cluster.name=" + name);
args.add("-D" + TEST_HOST_ADDRESS + "=" + testHostAddress.getHostAddress());
if (configuration.isJMXEnabled()) {
args.add("-Dcom.sun.management.jmxremote.port=" + JMX_PORT);
args.add("-Dcom.sun.management.jmxremote.authenticate=false");
args.add("-Dcom.sun.management.jmxremote.ssl=false");
}
String logFile = System.getProperty(INFINISPAN_TEST_SERVER_LOG_FILE);
if (logFile != null) {
Path logPath = Paths.get(logFile);
String logFileName = logPath.getFileName().toString();
if (logPath.isAbsolute()) {
try {
// we need to copy the log file to the conf dir because the withFileFromPath("test"..) will overwrite
// everything
Files.copy(logPath, new File(getConfDir(), logFileName).toPath(), StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) {
throw new IllegalStateException("Cannot copy the log file", e);
}
}
args.add("-l");
args.add(logFileName);
}
Properties properties = new Properties();
properties.setProperty(Server.INFINISPAN_SERVER_CONFIG_PATH, Paths.get(INFINISPAN_SERVER_HOME, DEFAULT_SERVER_CONFIG).toString());
properties.setProperty(Server.INFINISPAN_CLUSTER_NAME, name);
properties.setProperty(TEST_HOST_ADDRESS, testHostAddress.getHostName());
configuration.properties().forEach((k, v) -> args.add("-D" + k + "=" + StringPropertyReplacer.replaceProperties((String) v, properties)));
configureSite(args);
boolean preserveImageAfterTest = Boolean.parseBoolean(configuration.properties().getProperty(TestSystemPropertyNames.INFINISPAN_TEST_SERVER_PRESERVE_IMAGE, "false"));
Path tmp = Paths.get(CommonsTestingUtil.tmpDirectory(this.getClass()));
File libDir = new File(rootDir, "lib");
libDir.mkdirs();
copyArtifactsToUserLibDir(libDir);
image = new ImageFromDockerfile("localhost/testcontainers/" + Base58.randomString(16).toLowerCase(), !preserveImageAfterTest).withFileFromPath("test", rootDir.toPath()).withFileFromPath("tmp", tmp).withFileFromPath("lib", libDir.toPath());
final boolean prebuiltImage;
final String imageName;
String baseImageName = configuration.properties().getProperty(TestSystemPropertyNames.INFINISPAN_TEST_SERVER_BASE_IMAGE_NAME);
if (baseImageName == null) {
String serverOutputDir = configuration.properties().getProperty(TestSystemPropertyNames.INFINISPAN_TEST_SERVER_DIR);
if (serverOutputDir == null) {
// We try to use the latest public image for this major.minor version
imageName = "quay.io/infinispan/server:" + Version.getMajorMinor();
prebuiltImage = true;
log.infof("Using prebuilt image '%s'", imageName);
} else {
// We build our local image based on the supplied server
Path serverOutputPath = Paths.get(serverOutputDir).normalize();
imageName = JDK_BASE_IMAGE_NAME;
image.withFileFromPath("target", serverOutputPath.getParent()).withFileFromPath("src", serverOutputPath.getParent().getParent().resolve("src")).withFileFromPath("build", cleanServerDirectory(serverOutputPath));
prebuiltImage = false;
log.infof("Using local image from server built at '%s'", serverOutputPath);
}
} else {
imageName = baseImageName;
prebuiltImage = true;
log.infof("Using prebuilt image '%s'", imageName);
}
image.withDockerfileFromBuilder(builder -> {
builder.from(imageName).env("INFINISPAN_SERVER_HOME", INFINISPAN_SERVER_HOME).env("INFINISPAN_VERSION", Version.getVersion()).label("name", "Infinispan Server").label("version", Version.getVersion()).label("release", Version.getVersion()).label("architecture", "x86_64");
if (!prebuiltImage) {
builder.copy("build", INFINISPAN_SERVER_HOME);
}
// Copy the resources to a location from where they can be added to the image
try {
URL resource = ContainerInfinispanServerDriver.class.getResource("/overlay");
if (resource != null) {
URI overlayUri = resource.toURI();
if ("jar".equals(overlayUri.getScheme())) {
try (FileSystem fileSystem = FileSystems.newFileSystem(overlayUri, Collections.emptyMap())) {
Files.walkFileTree(fileSystem.getPath("/overlay"), new CommonsTestingUtil.CopyFileVisitor(tmp, true, f -> {
f.setExecutable(true, false);
}));
}
} else {
Files.walkFileTree(Paths.get(overlayUri), new CommonsTestingUtil.CopyFileVisitor(tmp, true, f -> {
f.setExecutable(true, false);
}));
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
builder.copy("test", INFINISPAN_SERVER_HOME + "/server").copy("tmp", INFINISPAN_SERVER_HOME).workDir(INFINISPAN_SERVER_HOME).entryPoint(args.toArray(new String[] {})).expose(// JMX Remoting
EXPOSED_PORTS);
builder.copy("lib", serverPathFrom("lib")).user("root").run("chown", "-R", IMAGE_USER, INFINISPAN_SERVER_HOME).run("chmod", "-R", "g+rw", INFINISPAN_SERVER_HOME).user(IMAGE_USER);
});
int numServers = configuration.numServers();
CountdownLatchLoggingConsumer clusterLatch = new CountdownLatchLoggingConsumer(numServers, String.format(CLUSTER_VIEW_REGEX, numServers));
if (configuration.isParallelStartup()) {
CountdownLatchLoggingConsumer startupLatch = new CountdownLatchLoggingConsumer(numServers, STARTUP_MESSAGE_REGEX);
IntStream.range(0, configuration.numServers()).forEach(i -> createContainer(i, startupLatch, clusterLatch));
Exceptions.unchecked(() -> startupLatch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
} else {
for (int i = 0; i < configuration.numServers(); i++) {
CountdownLatchLoggingConsumer startupLatch = new CountdownLatchLoggingConsumer(1, STARTUP_MESSAGE_REGEX);
createContainer(i, startupLatch, clusterLatch);
Exceptions.unchecked(() -> startupLatch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
}
}
// Ensure that a cluster of numServers has actually formed before proceeding
Exceptions.unchecked(() -> clusterLatch.await(TIMEOUT_SECONDS, TimeUnit.SECONDS));
}
use of org.infinispan.commons.util.Version in project infinispan by infinispan.
the class PatchTool method createSinglePatch.
private void createSinglePatch(String qualifier, Path source, Path target, Version targetVersion, Map<Path, ServerFile> targetFiles, FileSystem zipfs) throws IOException {
Version sourceVersion = getVersion(source);
// Ensure that the brand name coincides
String sourceBrand = sourceVersion.brandName();
String targetBrand = targetVersion.brandName();
if (!sourceBrand.equals(targetBrand)) {
throw MSG.patchIncompatibleProduct(sourceBrand, targetBrand);
}
if (sourceVersion.brandVersion().equals(targetVersion.brandVersion())) {
throw MSG.patchServerAndTargetMustBeDifferent(sourceVersion.brandVersion());
}
PatchInfo patchInfo = new PatchInfo(sourceBrand, sourceVersion.brandVersion(), targetVersion.brandVersion(), qualifier);
// Build a list of files in the old version
Map<Path, ServerFile> v1Files = getServerFiles(source);
// Compare the two file lists, generating a list of upgrade instructions
List<PatchOperation> operations = patchInfo.getOperations();
v1Files.forEach((k1, v1File) -> {
if (!targetFiles.containsKey(k1)) {
operations.add(PatchOperation.remove(v1File.getVersionedPath(), v1File.getDigest(), v1File.getPermissions()));
} else {
ServerFile targetFile = targetFiles.get(k1);
if (!v1File.getFilename().equals(targetFile.getFilename())) {
// Different filename means upgrade
operations.add(PatchOperation.upgrade(v1File.getVersionedPath(), v1File.getDigest(), v1File.getPermissions(), targetFile.getVersionedPath(), targetFile.getDigest(), targetFile.getPermissions()));
addFileToZip(zipfs, target, targetFile);
} else if (!v1File.getDigest().equals(targetFile.getDigest())) {
// Check contents
operations.add(PatchOperation.replace(targetFile.isSoft(), targetFile.getVersionedPath(), v1File.getDigest(), v1File.getPermissions(), targetFile.getDigest(), targetFile.getPermissions()));
addFileToZip(zipfs, target, targetFile);
}
}
});
targetFiles.forEach((k2, targetFile) -> {
if (!v1Files.containsKey(k2)) {
operations.add(PatchOperation.add(targetFile.getVersionedPath(), targetFile.getDigest(), targetFile.getPermissions()));
addFileToZip(zipfs, target, targetFile);
}
});
// Write out the JSON patch file
Path patchPath = zipfs.getPath("patch-" + patchInfo.getSourceVersion() + "_" + patchInfo.getTargetVersion() + ".json");
try (OutputStream os = Files.newOutputStream(patchPath, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
String json = patchInfo.toJson().toPrettyString();
os.write(json.getBytes(UTF_8));
}
}
use of org.infinispan.commons.util.Version in project infinispan by infinispan.
the class PatchTool method installPatch.
public void installPatch(Path patch, Path target, boolean dryRun) throws IOException {
// Obtain the target version
Version targetVersion = getVersion(target);
String version = targetVersion.brandVersion();
String brandName = targetVersion.brandName();
List<PatchInfo> installedPatches = getInstalledPatches(target);
// Open the patch file
try (FileSystem zipfs = getPatchFile(patch)) {
// Iterate the patch json files to find one that matches our version as a source
PatchInfo patchInfo = getPatchInfos(zipfs).stream().filter(info -> brandName.equals(info.getBrandName()) && version.equals(info.getSourceVersion())).findFirst().orElseThrow(() -> {
throw MSG.patchCannotApply(brandName, version);
});
// Validate the SHAs of the existing files against the source ones in the patch
List<PatchOperation> operations = patchInfo.getOperations();
// Collect all errors
List<String> errors = new ArrayList<>();
// Scan the patch to ensure its contents match with the list of operations
for (PatchOperation operation : operations) {
switch(operation.getAction()) {
case ADD:
case SOFT_REPLACE:
case HARD_REPLACE:
case UPGRADE:
String sha256 = sha256(zipfs.getPath(operation.getNewPath().toString()));
if (sha256 == null || !sha256.equals(operation.getNewDigest())) {
errors.add(MSG.patchCorruptArchive(operation));
}
}
}
if (errors.size() > 0) {
throw MSG.patchValidationErrors(String.join("\n", errors));
}
// Scan the server files to ensure that the patch can be installed
for (PatchOperation operation : operations) {
switch(operation.getAction()) {
case ADD:
case SOFT_REPLACE:
// Ignore adds and soft replaces
break;
case REMOVE:
case HARD_REPLACE:
case UPGRADE:
String sha256 = sha256(target.resolve(operation.getPath()));
if (sha256 == null || !sha256.equals(operation.getDigest())) {
errors.add(MSG.patchShaMismatch(operation.getPath(), operation.getDigest(), sha256));
}
break;
}
}
if (errors.size() > 0) {
throw MSG.patchValidationErrors(String.join("\n", errors));
}
// We're good to go, backup the files being removed / replaced
Path backup = getBackupPath(target, patchInfo);
Files.createDirectories(backup);
for (PatchOperation operation : operations) {
switch(operation.getAction()) {
case ADD:
// Ignore adds
break;
case SOFT_REPLACE:
// We backup only if the checksum matches (which means we will be replacing a distribution file)
String sha256 = sha256(target.resolve(operation.getPath()));
if (sha256 == null || !sha256.equals(operation.getDigest())) {
break;
}
case REMOVE:
case HARD_REPLACE:
case UPGRADE:
Path file = backup.resolve(operation.getPath());
println(dryRun, MSG.patchBackup(target.resolve(operation.getPath()), file));
if (!dryRun) {
Files.createDirectories(file.getParent());
Files.move(target.resolve(operation.getPath()), file);
}
break;
}
}
// Now perform the actual operations
for (PatchOperation operation : operations) {
switch(operation.getAction()) {
case REMOVE:
// Do nothing, the file has already been removed as part of the backup
break;
case SOFT_REPLACE:
String sha256 = sha256(target.resolve(operation.getPath()));
if (sha256 == null || sha256.equals(operation.getDigest())) {
if (!dryRun) {
Path file = Files.copy(zipfs.getPath(operation.getNewPath().toString()), target.resolve(operation.getNewPath()));
Files.setPosixFilePermissions(file, PosixFilePermissions.fromString(operation.getNewPermissions()));
}
} else {
// We create a new file by appending the target version to the filename
if (!dryRun) {
Path file = target.resolve(operation.getNewPath());
file = file.getParent().resolve(file.getFileName().toString() + "-" + patchInfo.getTargetVersion());
Files.copy(zipfs.getPath(operation.getNewPath().toString()), file);
Files.setPosixFilePermissions(file, PosixFilePermissions.fromString(operation.getNewPermissions()));
}
}
break;
case ADD:
case HARD_REPLACE:
case UPGRADE:
if (!dryRun) {
Path file = target.resolve(operation.getNewPath());
if (file.getParent() != null) {
Files.createDirectories(file.getParent());
}
Files.copy(zipfs.getPath(operation.getNewPath().toString()), file, StandardCopyOption.REPLACE_EXISTING);
Files.setPosixFilePermissions(file, PosixFilePermissions.fromString(operation.getNewPermissions()));
}
break;
}
}
patchInfo.setInstallationDate(new Date());
if (!dryRun) {
installedPatches.add(patchInfo);
writeInstalledPatches(target, installedPatches);
}
println(dryRun, MSG.patchInfo(patchInfo));
}
}
use of org.infinispan.commons.util.Version in project infinispan by infinispan.
the class PatchTool method createPatch.
public void createPatch(String qualifier, Path patch, Path target, Path... sources) throws IOException {
// Obtain version information
Version targetVersion = getVersion(target);
// Build a list of files in the target
Map<Path, ServerFile> targetFiles = getServerFiles(target);
// Create the patch zip file
try (FileSystem zipfs = getPatchFile(patch, true)) {
for (Path source : sources) {
createSinglePatch(qualifier, source, target, targetVersion, targetFiles, zipfs);
}
}
}
use of org.infinispan.commons.util.Version in project infinispan by infinispan.
the class Resp3Handler method handleRequest.
@Override
public RespRequestHandler handleRequest(ChannelHandlerContext ctx, String type, List<byte[]> arguments) {
switch(type) {
case "HELLO":
byte[] respProtocolBytes = arguments.get(0);
String version = new String(respProtocolBytes, CharsetUtil.UTF_8);
if (!version.equals("3")) {
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf("-NOPROTO sorry this protocol version is not supported\r\n", ctx.alloc()));
break;
}
if (arguments.size() == 4) {
performAuth(arguments.get(2), arguments.get(3)).whenComplete((subject, t) -> {
if (t == null) {
cache = cache.withSubject(subject);
helloResponse(ctx);
} else {
handleThrowable(ctx, t);
}
});
} else {
helloResponse(ctx);
}
break;
case "AUTH":
performAuth(arguments.get(0), arguments.get(1)).whenComplete((subject, t) -> {
if (t == null) {
cache = cache.withSubject(subject);
ctx.writeAndFlush(statusOK());
} else {
handleThrowable(ctx, t);
}
});
break;
case "PING":
if (arguments.size() == 0) {
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf("$4\r\nPONG\r\n", ctx.alloc()));
break;
}
// falls-through
case "ECHO":
byte[] argument = arguments.get(0);
ByteBuf bufferToWrite = RespRequestHandler.stringToByteBufWithExtra("$" + argument.length + "\r\n", ctx.alloc(), argument.length + 2);
bufferToWrite.writeBytes(argument);
bufferToWrite.writeByte('\r').writeByte('\n');
ctx.writeAndFlush(bufferToWrite);
break;
case "SET":
performSet(ctx, cache, arguments.get(0), arguments.get(1), -1, type, statusOK());
break;
case "GET":
byte[] keyBytes = arguments.get(0);
cache.getAsync(keyBytes).whenComplete((innerValueBytes, t) -> {
if (t != null) {
log.trace("Exception encountered while performing GET", t);
handleThrowable(ctx, t);
} else if (innerValueBytes != null) {
int length = innerValueBytes.length;
ByteBuf buf = RespRequestHandler.stringToByteBufWithExtra("$" + length + "\r\n", ctx.alloc(), length + 2);
buf.writeBytes(innerValueBytes);
buf.writeByte('\r').writeByte('\n');
ctx.writeAndFlush(buf);
} else {
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf("_\r\n", ctx.alloc()));
}
});
break;
case "DEL":
int keysToRemove = arguments.size();
if (keysToRemove == 1) {
keyBytes = arguments.get(0);
cache.removeAsync(keyBytes).whenComplete((prev, t) -> {
if (t != null) {
log.trace("Exception encountered while performing DEL", t);
handleThrowable(ctx, t);
return;
}
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf(":" + (prev == null ? "0" : "1") + "\r\n", ctx.alloc()));
});
} else if (keysToRemove == 0) {
// TODO: is this an error?
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf(":0\r\n", ctx.alloc()));
} else {
AtomicInteger removes = new AtomicInteger();
AggregateCompletionStage<AtomicInteger> deleteStages = CompletionStages.aggregateCompletionStage(removes);
for (byte[] keyBytesLoop : arguments) {
deleteStages.dependsOn(cache.removeAsync(keyBytesLoop).thenAccept(prev -> {
if (prev != null) {
removes.incrementAndGet();
}
}));
}
deleteStages.freeze().whenComplete((removals, t) -> {
if (t != null) {
log.trace("Exception encountered while performing multiple DEL", t);
handleThrowable(ctx, t);
return;
}
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf(":" + removals.get() + "\r\n", ctx.alloc()));
});
}
break;
case "MGET":
int keysToRetrieve = arguments.size();
if (keysToRetrieve == 0) {
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf("*0\r\n", ctx.alloc()));
break;
}
List<byte[]> results = Collections.synchronizedList(Arrays.asList(new byte[keysToRetrieve][]));
AtomicInteger resultBytesSize = new AtomicInteger();
AggregateCompletionStage<Void> getStage = CompletionStages.aggregateCompletionStage();
for (int i = 0; i < keysToRetrieve; ++i) {
int innerCount = i;
keyBytes = arguments.get(i);
getStage.dependsOn(cache.getAsync(keyBytes).whenComplete((returnValue, t) -> {
if (returnValue != null) {
results.set(innerCount, returnValue);
int length = returnValue.length;
if (length > 0) {
// byte length + digit length (log10 + 1) + $
resultBytesSize.addAndGet(returnValue.length + (int) Math.log10(length) + 1 + 1);
} else {
// $0
resultBytesSize.addAndGet(2);
}
} else {
// _
resultBytesSize.addAndGet(1);
}
// /r/n
resultBytesSize.addAndGet(2);
}));
}
getStage.freeze().whenComplete((ignore, t) -> {
if (t != null) {
log.trace("Exception encountered while performing multiple DEL", t);
handleThrowable(ctx, t);
return;
}
int elements = results.size();
// * + digit length (log10 + 1) + \r\n
ByteBuf byteBuf = ctx.alloc().buffer(resultBytesSize.addAndGet(1 + (int) Math.log10(elements) + 1 + 2));
byteBuf.writeCharSequence("*" + results.size(), CharsetUtil.UTF_8);
byteBuf.writeByte('\r');
byteBuf.writeByte('\n');
for (byte[] value : results) {
if (value == null) {
byteBuf.writeCharSequence("_", CharsetUtil.UTF_8);
} else {
byteBuf.writeCharSequence("$" + value.length, CharsetUtil.UTF_8);
byteBuf.writeByte('\r');
byteBuf.writeByte('\n');
byteBuf.writeBytes(value);
}
byteBuf.writeByte('\r');
byteBuf.writeByte('\n');
}
ctx.writeAndFlush(byteBuf);
});
break;
case "MSET":
int keyValuePairCount = arguments.size();
if ((keyValuePairCount & 1) == 1) {
log.tracef("Received: %s count for keys and values combined, should be even for MSET", keyValuePairCount);
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf("-ERR Missing a value for a key" + "\r\n", ctx.alloc()));
break;
}
AggregateCompletionStage<Void> setStage = CompletionStages.aggregateCompletionStage();
for (int i = 0; i < keyValuePairCount; i += 2) {
keyBytes = arguments.get(i);
byte[] valueBytes = arguments.get(i + 1);
setStage.dependsOn(cache.putAsync(keyBytes, valueBytes));
}
setStage.freeze().whenComplete((ignore, t) -> {
if (t != null) {
log.trace("Exception encountered while performing MSET", t);
handleThrowable(ctx, t);
} else {
ctx.writeAndFlush(statusOK());
}
});
break;
case "INCR":
counterIncOrDec(cache, arguments.get(0), true).thenAccept(longValue -> handleLongResult(ctx, longValue));
break;
case "DECR":
counterIncOrDec(cache, arguments.get(0), false).thenAccept(longValue -> handleLongResult(ctx, longValue));
break;
case "INFO":
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf("-ERR not implemented yet\r\n", ctx.alloc()));
break;
case "PUBLISH":
// TODO: should we return the # of subscribers on this node?
// We use expiration to remove the event values eventually while preventing them during high periods of
// updates
performSet(ctx, cache, SubscriberHandler.keyToChannel(arguments.get(0)), arguments.get(1), 3, type, RespRequestHandler.stringToByteBuf(":0\r\n", ctx.alloc()));
break;
case "SUBSCRIBE":
SubscriberHandler subscriberHandler = new SubscriberHandler(respServer, this);
return subscriberHandler.handleRequest(ctx, type, arguments);
case "SELECT":
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf("-ERR Select not supported in cluster mode\r\n", ctx.alloc()));
break;
case "READWRITE":
case "READONLY":
// We are always in read write allowing read from backups
ctx.writeAndFlush(statusOK());
break;
case "RESET":
// TODO: do we need to reset anything in this case?
ctx.writeAndFlush(RespRequestHandler.stringToByteBuf("+RESET\r\n", ctx.alloc()));
break;
case "QUIT":
// TODO: need to close connection
ctx.flush();
break;
default:
return RespRequestHandler.super.handleRequest(ctx, type, arguments);
}
return this;
}
Aggregations