use of com.google.common.io.ByteArrayDataOutput in project hadoop by apache.
the class BlockReaderFactory method tryToCreateExternalBlockReader.
private BlockReader tryToCreateExternalBlockReader() {
List<Class<? extends ReplicaAccessorBuilder>> clses = conf.getReplicaAccessorBuilderClasses();
for (Class<? extends ReplicaAccessorBuilder> cls : clses) {
try {
ByteArrayDataOutput bado = ByteStreams.newDataOutput();
token.write(bado);
byte[] tokenBytes = bado.toByteArray();
Constructor<? extends ReplicaAccessorBuilder> ctor = cls.getConstructor();
ReplicaAccessorBuilder builder = ctor.newInstance();
long visibleLength = startOffset + length;
ReplicaAccessor accessor = builder.setAllowShortCircuitReads(allowShortCircuitLocalReads).setBlock(block.getBlockId(), block.getBlockPoolId()).setGenerationStamp(block.getGenerationStamp()).setBlockAccessToken(tokenBytes).setClientName(clientName).setConfiguration(configuration).setFileName(fileName).setVerifyChecksum(verifyChecksum).setVisibleLength(visibleLength).build();
if (accessor == null) {
LOG.trace("{}: No ReplicaAccessor created by {}", this, cls.getName());
} else {
return new ExternalBlockReader(accessor, visibleLength, startOffset);
}
} catch (Throwable t) {
LOG.warn("Failed to construct new object of type " + cls.getName(), t);
}
}
return null;
}
use of com.google.common.io.ByteArrayDataOutput in project druid by druid-io.
the class StringArrayWritable method toBytes.
public static final byte[] toBytes(final InputRow row, AggregatorFactory[] aggs, boolean reportParseExceptions) {
try {
ByteArrayDataOutput out = ByteStreams.newDataOutput();
//write timestamp
out.writeLong(row.getTimestampFromEpoch());
//writing all dimensions
List<String> dimList = row.getDimensions();
WritableUtils.writeVInt(out, dimList.size());
if (dimList != null) {
for (String dim : dimList) {
List<String> dimValues = row.getDimension(dim);
writeString(dim, out);
writeStringArray(dimValues, out);
}
}
//writing all metrics
Supplier<InputRow> supplier = new Supplier<InputRow>() {
@Override
public InputRow get() {
return row;
}
};
WritableUtils.writeVInt(out, aggs.length);
for (AggregatorFactory aggFactory : aggs) {
String k = aggFactory.getName();
writeString(k, out);
Aggregator agg = aggFactory.factorize(IncrementalIndex.makeColumnSelectorFactory(VirtualColumns.EMPTY, aggFactory, supplier, true));
try {
agg.aggregate();
} catch (ParseException e) {
// "aggregate" can throw ParseExceptions if a selector expects something but gets something else.
if (reportParseExceptions) {
throw new ParseException(e, "Encountered parse error for aggregator[%s]", k);
}
log.debug(e, "Encountered parse error, skipping aggregator[%s].", k);
}
String t = aggFactory.getTypeName();
if (t.equals("float")) {
out.writeFloat(agg.getFloat());
} else if (t.equals("long")) {
WritableUtils.writeVLong(out, agg.getLong());
} else {
//its a complex metric
Object val = agg.get();
ComplexMetricSerde serde = getComplexMetricSerde(t);
writeBytes(serde.toBytes(val), out);
}
}
return out.toByteArray();
} catch (IOException ex) {
throw Throwables.propagate(ex);
}
}
use of com.google.common.io.ByteArrayDataOutput in project MinecraftForge by MinecraftForge.
the class GenDiffSet method main.
public static void main(String[] args) throws IOException {
//Clean Vanilla jar minecraft.jar or minecraft_server.jar
String sourceJar = args[0];
//Directory containing obfed output classes, typically mcp/reobf/minecraft
String targetDir = args[1];
//Path to FML's deobfusication_data.lzma
String deobfData = args[2];
//Path to place generated .binpatch
String outputDir = args[3];
//"true" if we should destroy the target file if it generated a successful .binpatch
String killTarget = args[4];
LogManager.getLogger("GENDIFF").log(Level.INFO, String.format("Creating patches at %s for %s from %s", outputDir, sourceJar, targetDir));
Delta delta = new Delta();
FMLDeobfuscatingRemapper remapper = FMLDeobfuscatingRemapper.INSTANCE;
remapper.setupLoadOnly(deobfData, false);
JarFile sourceZip = new JarFile(sourceJar);
boolean kill = killTarget.equalsIgnoreCase("true");
File f = new File(outputDir);
f.mkdirs();
for (String name : remapper.getObfedClasses()) {
// Logger.getLogger("GENDIFF").info(String.format("Evaluating path for data :%s",name));
String fileName = name;
String jarName = name;
if (RESERVED_NAMES.contains(name.toUpperCase(Locale.ENGLISH))) {
fileName = "_" + name;
}
File targetFile = new File(targetDir, fileName.replace('/', File.separatorChar) + ".class");
jarName = jarName + ".class";
if (targetFile.exists()) {
String sourceClassName = name.replace('/', '.');
String targetClassName = remapper.map(name).replace('/', '.');
JarEntry entry = sourceZip.getJarEntry(jarName);
byte[] vanillaBytes = toByteArray(sourceZip, entry);
byte[] patchedBytes = Files.toByteArray(targetFile);
byte[] diff = delta.compute(vanillaBytes, patchedBytes);
ByteArrayDataOutput diffOut = ByteStreams.newDataOutput(diff.length + 50);
// Original name
diffOut.writeUTF(name);
// Source name
diffOut.writeUTF(sourceClassName);
// Target name
diffOut.writeUTF(targetClassName);
// exists at original
diffOut.writeBoolean(entry != null);
if (entry != null) {
diffOut.writeInt(Hashing.adler32().hashBytes(vanillaBytes).asInt());
}
// length of patch
diffOut.writeInt(diff.length);
// patch
diffOut.write(diff);
File target = new File(outputDir, targetClassName + ".binpatch");
target.getParentFile().mkdirs();
Files.write(diffOut.toByteArray(), target);
Logger.getLogger("GENDIFF").info(String.format("Wrote patch for %s (%s) at %s", name, targetClassName, target.getAbsolutePath()));
if (kill) {
targetFile.delete();
Logger.getLogger("GENDIFF").info(String.format(" Deleted target: %s", targetFile.toString()));
}
}
}
sourceZip.close();
}
use of com.google.common.io.ByteArrayDataOutput in project cdap by caskdata.
the class AbstractStreamFileConsumer method claimEntry.
/**
* Try to claim a stream event offset.
*
* @return The row key for writing to the state table if successfully claimed or {@code null} if not claimed.
*/
private byte[] claimEntry(StreamFileOffset offset, byte[] claimedStateContent) throws IOException {
ByteArrayDataOutput out = ByteStreams.newDataOutput(50);
out.writeLong(consumerConfig.getGroupId());
StreamUtils.encodeOffset(out, offset);
byte[] row = out.toByteArray();
SortedMap<byte[], byte[]> rowStates = getInitRowStates(row);
// See if the entry should be ignored. If it is in the rowStates with null value, then it should be ignored.
byte[] rowState = rowStates.get(row);
if (rowStates.containsKey(row) && rowState == null) {
return null;
}
// Only need to claim entry if FIFO and group size > 1
if (consumerConfig.getDequeueStrategy() == DequeueStrategy.FIFO && consumerConfig.getGroupSize() > 1) {
return claimFifoEntry(row, claimedStateContent, rowState) ? row : null;
}
// as it's already handled by the readFilter
return row;
}
use of com.google.common.io.ByteArrayDataOutput in project hbase by apache.
the class Mutation method setClusterIds.
/**
* Marks that the clusters with the given clusterIds have consumed the mutation
* @param clusterIds of the clusters that have consumed the mutation
*/
public Mutation setClusterIds(List<UUID> clusterIds) {
ByteArrayDataOutput out = ByteStreams.newDataOutput();
out.writeInt(clusterIds.size());
for (UUID clusterId : clusterIds) {
out.writeLong(clusterId.getMostSignificantBits());
out.writeLong(clusterId.getLeastSignificantBits());
}
setAttribute(CONSUMED_CLUSTER_IDS, out.toByteArray());
return this;
}
Aggregations