use of gnu.trove.map.hash.TIntIntHashMap in project Terasology by MovingBlocks.
the class GLTFAnimationFormat method load.
@Override
public MeshAnimationBundleData load(ResourceUrn urn, List<AssetDataFile> inputs) throws IOException {
try (Reader in = new InputStreamReader(inputs.get(0).openStream())) {
GLTF gltf = gson.fromJson(in, GLTF.class);
checkVersionSupported(urn, gltf);
List<byte[]> loadedBuffers = loadBinaryBuffers(urn, gltf);
if (gltf.getSkins().isEmpty()) {
throw new IOException("Skeletal mesh '" + urn + "' missing skin");
}
GLTFSkin skin = gltf.getSkins().get(0);
List<String> boneNames = Lists.newArrayList();
TIntList boneParents = new TIntArrayList();
TIntIntMap nodeToJoint = new TIntIntHashMap();
for (int i = 0; i < skin.getJoints().size(); i++) {
nodeToJoint.put(skin.getJoints().get(i), i);
}
List<Bone> bones = loadBones(gltf, skin, loadedBuffers);
bones.forEach(x -> boneNames.add(x.getName()));
bones.forEach(x -> {
if (x.getParentIndex() != -1) {
boneParents.add(x.getParentIndex());
} else {
boneParents.add(MeshAnimationData.NO_PARENT);
}
});
Map<ResourceUrn, MeshAnimationData> animations = new HashMap<>();
for (int index = 0; index < gltf.getAnimations().size(); ++index) {
GLTFAnimation gltfAnimation = gltf.getAnimations().get(index);
String name = gltfAnimation.getName();
if (Strings.isNullOrEmpty(name)) {
name = "anim_" + index;
}
animations.put(new ResourceUrn(urn, name), loadAnimation(gltf, gltfAnimation, loadedBuffers, nodeToJoint, boneNames, boneParents, bones));
}
return new MeshAnimationBundleData(animations);
}
}
use of gnu.trove.map.hash.TIntIntHashMap in project Terasology by MovingBlocks.
the class GLTFCommonFormat method loadBones.
protected List<Bone> loadBones(GLTF gltf, GLTFSkin skin, List<byte[]> loadedBuffers) {
List<Bone> bones = new ArrayList<>();
TIntIntMap boneToJoint = new TIntIntHashMap();
List<Matrix4f> inverseMats = loadInverseMats(skin.getInverseBindMatrices(), skin.getJoints().size(), gltf, loadedBuffers);
for (int i = 0; i < skin.getJoints().size(); i++) {
int nodeIndex = skin.getJoints().get(i);
GLTFNode node = gltf.getNodes().get(nodeIndex);
Vector3f position = new Vector3f();
Quaternionf rotation = new Quaternionf();
Vector3f scale = new Vector3f(1, 1, 1);
if (node.getTranslation() != null) {
position.set(node.getTranslation());
}
if (node.getRotation() != null) {
rotation.set(node.getRotation());
}
if (node.getScale() != null) {
scale.set(node.getScale());
}
String boneName = node.getName();
if (Strings.isNullOrEmpty(boneName)) {
boneName = "bone_" + i;
}
Bone bone = new Bone(i, boneName, new Matrix4f().translationRotateScale(position, rotation, scale));
bone.setInverseBindMatrix(inverseMats.get(i));
bones.add(bone);
boneToJoint.put(nodeIndex, i);
}
for (int i = 0; i < skin.getJoints().size(); i++) {
int nodeIndex = skin.getJoints().get(i);
GLTFNode node = gltf.getNodes().get(nodeIndex);
Bone bone = bones.get(i);
TIntIterator iterator = node.getChildren().iterator();
while (iterator.hasNext()) {
bone.addChild(bones.get(boneToJoint.get(iterator.next())));
}
}
return bones;
}
use of gnu.trove.map.hash.TIntIntHashMap in project GregTech by GregTechCE.
the class MetaBlockIdRemapCache method deserialize.
public static MetaBlockIdRemapCache deserialize(String newNamePrefix, NBTTagCompound tag) {
int[] idMapSer = tag.getIntArray(KEY_ID_MAPPING);
TIntIntMap idToIndex = new TIntIntHashMap(idMapSer.length, 1.1F, -1, -1);
TIntIntMap indexToId = new TIntIntHashMap(idMapSer.length, 1.1F, -1, -1);
for (int entrySer : idMapSer) {
// 26-bit block ID, 6-bit meta block index
int id = (entrySer & SER_MASK_ID) >>> 6;
int index = entrySer & SER_MASK_INDEX;
idToIndex.put(id, index);
indexToId.put(index, id);
}
return new MetaBlockIdRemapCache(newNamePrefix, idToIndex, indexToId);
}
use of gnu.trove.map.hash.TIntIntHashMap in project cogcomp-nlp by CogComp.
the class MascReader method findSentenceEndTokenIndexes.
private static int[] findSentenceEndTokenIndexes(IntPair[] goldTokCharOffsets, IntPair[] goldSentCharOffsets) {
int[] goldSentTokIndexes = new int[goldSentCharOffsets.length];
TIntIntHashMap tokEndCharToIndex = new TIntIntHashMap();
for (int index = 0; index < goldTokCharOffsets.length; ++index) tokEndCharToIndex.put(goldTokCharOffsets[index].getSecond(), index);
for (int index = 0; index < goldSentCharOffsets.length; ++index) {
int sentEndCharOffset = goldSentCharOffsets[index].getSecond();
if (!tokEndCharToIndex.containsKey(sentEndCharOffset)) {
throw new IllegalArgumentException("saw gold sent end char offset '" + sentEndCharOffset + "' but there was no corresponding gold tok end offset.");
}
goldSentTokIndexes[index] = tokEndCharToIndex.get(sentEndCharOffset);
}
return goldSentTokIndexes;
}
use of gnu.trove.map.hash.TIntIntHashMap in project cogcomp-nlp by CogComp.
the class MascReader method buildTextAnnotation.
private List<TextAnnotation> buildTextAnnotation(Path textPath, Path sentPath, Path tokPath, Path pennPath) throws FileNotFoundException, XMLStreamException {
String fileStem = IOUtils.getFileStem(textPath.toFile().getName());
String text = LineIO.slurp(textPath.toFile().getAbsolutePath());
List<Pair<String, IntPair>> tokenInfo = tokenParser.parseFile(tokPath.toFile().getAbsolutePath());
Pair<List<SentenceStaxParser.MascSentence>, List<SentenceStaxParser.MascSentenceGroup>> sentenceInfo = sentenceParser.parseFile(sentPath.toFile().getAbsolutePath());
String[] tokens = new String[tokenInfo.size()];
int[] sentEndTokOffsets = new int[sentenceInfo.getFirst().size()];
IntPair[] tokOffsets = new IntPair[tokens.length];
TIntIntHashMap tokEndTotokIndex = new TIntIntHashMap();
int index = 0;
for (Pair<String, IntPair> tok : tokenInfo) {
// TODO: check indexing is one-past-the-end
tokens[index] = text.substring(tok.getSecond().getFirst(), tok.getSecond().getSecond());
tokOffsets[index] = tok.getSecond();
tokEndTotokIndex.put(tok.getSecond().getSecond(), index);
index++;
}
List<SentenceStaxParser.MascSentence> sentences = sentenceInfo.getFirst();
if (DEBUG) {
printSentences(System.err, text, sentences);
}
removeOverlappingSentences(sentences);
int lastIndex = -1;
for (int i = 0; i < sentences.size(); ++i) {
int newIndex = tokEndTotokIndex.get(sentences.get(i).end) + 1;
if (newIndex < lastIndex)
throw new IllegalStateException("sentence end before beginning -- doc '" + sentPath.toString() + "', sent id '" + sentences.get(i).regionId + "', start " + sentences.get(i).start + ", end " + sentences.get(i).end);
// one-past-the-end index.
sentEndTokOffsets[i] = newIndex;
lastIndex = newIndex;
}
if (sentEndTokOffsets[sentences.size() - 1] == tokens.length - 1)
sentEndTokOffsets[sentences.size() - 1] = tokens.length;
// needed due to constraints imposed by TextAnnotation
if (sentEndTokOffsets[sentences.size() - 1] != tokens.length) {
int[] modSentEndTokOffsets = new int[sentences.size() + 1];
for (int i = 0; i < sentEndTokOffsets.length; ++i) modSentEndTokOffsets[i] = sentEndTokOffsets[i];
modSentEndTokOffsets[modSentEndTokOffsets.length - 1] = tokens.length;
sentEndTokOffsets = modSentEndTokOffsets;
}
TextAnnotation ta = new TextAnnotation(super.corpusName, fileStem, text, tokOffsets, tokens, sentEndTokOffsets);
if (readPenn) {
addLemmaAndPos(ta, tokenInfo, pennPath);
}
return Collections.singletonList(ta);
}
Aggregations