use of java.io.UncheckedIOException in project gocd by gocd.
the class GoDashboardPipelineGroup method etag.
public String etag() {
try {
MessageDigest digest = DigestUtils.getSha256Digest();
OutputStreamWriter outputStreamWriter = new OutputStreamWriter(new DigestOutputStream(new NullOutputStream(), digest));
outputStreamWriter.write(name);
outputStreamWriter.write("/");
outputStreamWriter.write(Integer.toString(permissions.hashCode()));
outputStreamWriter.write("[");
for (Map.Entry<String, GoDashboardPipeline> entry : pipelines.entrySet()) {
long lastUpdatedTimeStamp = entry.getValue().getLastUpdatedTimeStamp();
outputStreamWriter.write(entry.getKey());
outputStreamWriter.write(":");
outputStreamWriter.write(Long.toString(lastUpdatedTimeStamp));
}
outputStreamWriter.write("]");
outputStreamWriter.flush();
return Hex.encodeHexString(digest.digest());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
use of java.io.UncheckedIOException in project pravega by pravega.
the class ContainerReadIndexTests method testReadDirect.
/**
* Tests the readDirect() method on the ReadIndex.
*/
@Test
public void testReadDirect() throws Exception {
final int randomAppendLength = 1024;
@Cleanup TestContext context = new TestContext();
ArrayList<Long> segmentIds = new ArrayList<>();
final long segmentId = createSegment(0, context);
final UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
segmentIds.add(segmentId);
HashMap<Long, ArrayList<Long>> transactionsBySegment = createTransactions(segmentIds, 1, context);
final long mergedTxId = transactionsBySegment.get(segmentId).get(0);
// Add data to all segments.
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
transactionsBySegment.values().forEach(segmentIds::addAll);
appendData(segmentIds, segmentContents, context);
// Mark everything so far (minus a few bytes) as being written to storage.
segmentMetadata.setStorageLength(segmentMetadata.getLength() - 100);
// Now partially merge a second transaction
final long mergedTxOffset = beginMergeTransaction(mergedTxId, segmentMetadata, segmentContents, context);
// Add one more append after all of this.
final long endOfMergedDataOffset = segmentMetadata.getLength();
byte[] appendData = new byte[randomAppendLength];
new Random(0).nextBytes(appendData);
appendSingleWrite(segmentId, appendData, context);
recordAppend(segmentId, appendData, segmentContents);
// Verify we are not allowed to read from the range which has already been committed to Storage (invalid arguments).
for (AtomicLong offset = new AtomicLong(0); offset.get() < segmentMetadata.getStorageLength(); offset.incrementAndGet()) {
AssertExtensions.assertThrows(String.format("readDirect allowed reading from an illegal offset (%s).", offset), () -> context.readIndex.readDirect(segmentId, offset.get(), 1), ex -> ex instanceof IllegalArgumentException);
}
// Verify that any reads overlapping a merged transaction return null (that is, we cannot retrieve the requested data).
for (long offset = mergedTxOffset - 1; offset < endOfMergedDataOffset; offset++) {
InputStream resultStream = context.readIndex.readDirect(segmentId, offset, 2);
Assert.assertNull("readDirect() returned data overlapping a partially merged transaction", resultStream);
}
// Verify that we can read from any other offset.
final byte[] expectedData = segmentContents.get(segmentId).toByteArray();
BiConsumer<Long, Long> verifyReadResult = (startOffset, endOffset) -> {
int readLength = (int) (endOffset - startOffset);
while (readLength > 0) {
InputStream actualDataStream;
try {
actualDataStream = context.readIndex.readDirect(segmentId, startOffset, readLength);
} catch (StreamSegmentNotExistsException ex) {
throw new CompletionException(ex);
}
Assert.assertNotNull(String.format("Unexpected result when data is readily available for Offset = %s, Length = %s.", startOffset, readLength), actualDataStream);
byte[] actualData = new byte[readLength];
try {
int bytesCopied = StreamHelpers.readAll(actualDataStream, actualData, 0, readLength);
Assert.assertEquals(String.format("Unexpected number of bytes read for Offset = %s, Length = %s (pre-partial-merge).", startOffset, readLength), readLength, bytesCopied);
} catch (IOException ex) {
// Technically not possible.
throw new UncheckedIOException(ex);
}
AssertExtensions.assertArrayEquals("Unexpected data read from the segment at offset " + startOffset, expectedData, startOffset.intValue(), actualData, 0, actualData.length);
// Setup the read for the next test (where we read 1 less byte than now).
readLength--;
if (readLength % 2 == 0) {
// For every 2 bytes of decreased read length, increase the start offset by 1. This allows for a greater
// number of combinations to be tested.
startOffset++;
}
}
};
// Verify that we can read the cached data just after the StorageLength but before the merged transaction.
verifyReadResult.accept(segmentMetadata.getStorageLength(), mergedTxOffset);
// Verify that we can read the cached data just after the merged transaction but before the end of the segment.
verifyReadResult.accept(endOfMergedDataOffset, segmentMetadata.getLength());
}
use of java.io.UncheckedIOException in project janusgraph by JanusGraph.
the class ElasticSearchIndex method query.
@Override
public Stream<String> query(IndexQuery query, KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException {
final ElasticSearchRequest sr = new ElasticSearchRequest();
final Map<String, Object> esQuery = getFilter(query.getCondition(), informations.get(query.getStore()));
sr.setQuery(compat.prepareQuery(esQuery));
if (!query.getOrder().isEmpty()) {
final List<IndexQuery.OrderEntry> orders = query.getOrder();
for (final IndexQuery.OrderEntry orderEntry : orders) {
final String order = orderEntry.getOrder().name();
final KeyInformation information = informations.get(query.getStore()).get(orderEntry.getKey());
final Mapping mapping = Mapping.getMapping(information);
final Class<?> datatype = orderEntry.getDatatype();
sr.addSort(orderEntry.getKey(), order.toLowerCase(), convertToEsDataType(datatype, mapping));
}
}
sr.setFrom(0);
if (query.hasLimit()) {
sr.setSize(Math.min(query.getLimit(), batchSize));
} else {
sr.setSize(batchSize);
}
ElasticSearchResponse response;
try {
final String indexStoreName = getIndexStoreName(query.getStore());
final String indexType = useMultitypeIndex ? query.getStore() : null;
response = client.search(indexStoreName, indexType, compat.createRequestBody(sr, NULL_PARAMETERS), sr.getSize() >= batchSize);
log.debug("First Executed query [{}] in {} ms", query.getCondition(), response.getTook());
final ElasticSearchScroll resultIterator = new ElasticSearchScroll(client, response, sr.getSize());
final Stream<RawQuery.Result<String>> toReturn = StreamSupport.stream(Spliterators.spliteratorUnknownSize(resultIterator, Spliterator.ORDERED), false);
return (query.hasLimit() ? toReturn.limit(query.getLimit()) : toReturn).map(RawQuery.Result::getResult);
} catch (final IOException | UncheckedIOException e) {
throw new PermanentBackendException(e);
}
}
use of java.io.UncheckedIOException in project ArachneCentralAPI by OHDSI.
the class EstimationPreprocessor method attachEstimationAnalysisCode.
private void attachEstimationAnalysisCode(Analysis analysis) {
Resource resource = new ClassPathResource(ESTIMATION_ANALYSIS_SOURCE);
try (final InputStream in = resource.getInputStream()) {
final MultipartFile analysisFile = new MockMultipartFile(ANALYSIS_BUNDLE_FILENAME, ANALYSIS_BUNDLE_FILENAME, null, in);
analysisService.saveFile(analysisFile, analysis.getAuthor(), analysis, analysisFile.getName(), false, null);
} catch (IOException e) {
LOGGER.error("Failed to add file", e);
throw new UncheckedIOException(e);
}
}
use of java.io.UncheckedIOException in project ArachneCentralAPI by OHDSI.
the class AnalysisController method convertToMultipartFile.
private MultipartFile convertToMultipartFile(Resource resource) {
try {
String rootPath = ((ClassPathResource) resource).getPath();
String name = convertToUnixPath(rootPath.substring(rootPath.indexOf(CC_SQLS_DIR) + CC_SQLS_DIR.length() + 1));
return new MockMultipartFile(name, name, null, readResource(CC_SQLS_DIR + "/" + name));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Aggregations