use of java.io.DataOutputStream in project hbase by apache.
the class TestPrefixTreeEncoding method testSeekWithRandomData.
@Test
public void testSeekWithRandomData() throws Exception {
PrefixTreeCodec encoder = new PrefixTreeCodec();
ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
int batchId = numBatchesWritten++;
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build();
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
generateRandomTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, onDiskBytes.length - DataBlockEncoding.ID_SIZE);
verifySeeking(seeker, readBuffer, batchId);
}
use of java.io.DataOutputStream in project buck by facebook.
the class DirArtifactCache method store.
@Override
public ListenableFuture<Void> store(ArtifactInfo info, BorrowablePath output) {
if (!doStore) {
return Futures.immediateFuture(null);
}
try {
Optional<Path> borrowedAndStoredArtifactPath = Optional.empty();
for (RuleKey ruleKey : info.getRuleKeys()) {
Path artifactPath = getPathForRuleKey(ruleKey, Optional.empty());
Path metadataPath = getPathForRuleKey(ruleKey, Optional.of(".metadata"));
if (filesystem.exists(artifactPath) && filesystem.exists(metadataPath)) {
continue;
}
filesystem.mkdirs(getParentDirForRuleKey(ruleKey));
if (!output.canBorrow()) {
storeArtifactOutput(output.getPath(), artifactPath);
} else {
// move it without copying. This significantly optimizes the Disk I/O.
if (!borrowedAndStoredArtifactPath.isPresent()) {
borrowedAndStoredArtifactPath = Optional.of(artifactPath);
filesystem.move(output.getPath(), artifactPath, StandardCopyOption.REPLACE_EXISTING);
} else {
storeArtifactOutput(borrowedAndStoredArtifactPath.get(), artifactPath);
}
}
bytesSinceLastDeleteOldFiles += filesystem.getFileSize(artifactPath);
// Now, write the meta data artifact.
Path tmp = filesystem.createTempFile(getPreparedTempFolder(), "metadata", TMP_EXTENSION);
try {
try (DataOutputStream out = new DataOutputStream(filesystem.newFileOutputStream(tmp))) {
out.writeInt(info.getMetadata().size());
for (Map.Entry<String, String> ent : info.getMetadata().entrySet()) {
out.writeUTF(ent.getKey());
byte[] val = ent.getValue().getBytes(Charsets.UTF_8);
out.writeInt(val.length);
out.write(val);
}
}
filesystem.move(tmp, metadataPath, StandardCopyOption.REPLACE_EXISTING);
bytesSinceLastDeleteOldFiles += filesystem.getFileSize(metadataPath);
} finally {
filesystem.deleteFileAtPathIfExists(tmp);
}
}
} catch (IOException e) {
LOG.warn(e, "Artifact store(%s, %s) error", info.getRuleKeys(), output);
}
if (maxCacheSizeBytes.isPresent() && bytesSinceLastDeleteOldFiles > (maxCacheSizeBytes.get() * STORED_TO_MAX_BYTES_RATIO_TRIM_TRIGGER)) {
bytesSinceLastDeleteOldFiles = 0L;
deleteOldFiles();
}
return Futures.immediateFuture(null);
}
use of java.io.DataOutputStream in project buck by facebook.
the class BlockingHttpEndpoint method send.
@VisibleForTesting
HttpResponse send(final HttpURLConnection connection, final String content) throws IOException {
try (DataOutputStream out = new DataOutputStream(connection.getOutputStream())) {
out.writeBytes(content);
out.flush();
out.close();
InputStream inputStream = connection.getInputStream();
String response = CharStreams.toString(new InputStreamReader(inputStream, Charsets.UTF_8));
return new HttpResponse(response);
} finally {
connection.disconnect();
}
}
use of java.io.DataOutputStream in project pinot by linkedin.
the class DataSchema method toBytes.
@Nonnull
public byte[] toBytes() throws IOException {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream);
int length = _columnNames.length;
// Write the number of columns.
dataOutputStream.writeInt(length);
// Write the column names.
for (String columnName : _columnNames) {
byte[] bytes = columnName.getBytes(UTF_8);
dataOutputStream.writeInt(bytes.length);
dataOutputStream.write(bytes);
}
// Write the column types.
for (FieldSpec.DataType columnType : _columnTypes) {
// We don't want to use ordinal of the enum since adding a new data type will break things if server and broker
// use different versions of DataType class.
byte[] bytes = columnType.name().getBytes(UTF_8);
dataOutputStream.writeInt(bytes.length);
dataOutputStream.write(bytes);
}
return byteArrayOutputStream.toByteArray();
}
use of java.io.DataOutputStream in project pinot by linkedin.
the class DataTableImplV2 method serializeDictionaryMap.
private byte[] serializeDictionaryMap() throws IOException {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream);
dataOutputStream.writeInt(_dictionaryMap.size());
for (Entry<String, Map<Integer, String>> dictionaryMapEntry : _dictionaryMap.entrySet()) {
String columnName = dictionaryMapEntry.getKey();
Map<Integer, String> dictionary = dictionaryMapEntry.getValue();
byte[] bytes = columnName.getBytes(UTF_8);
dataOutputStream.writeInt(bytes.length);
dataOutputStream.write(bytes);
dataOutputStream.writeInt(dictionary.size());
for (Entry<Integer, String> dictionaryEntry : dictionary.entrySet()) {
dataOutputStream.writeInt(dictionaryEntry.getKey());
byte[] valueBytes = dictionaryEntry.getValue().getBytes(UTF_8);
dataOutputStream.writeInt(valueBytes.length);
dataOutputStream.write(valueBytes);
}
}
return byteArrayOutputStream.toByteArray();
}
Aggregations