use of edu.uci.ics.textdb.api.schema.Attribute in project textdb by TextDB.
the class CatalogConstants method getSchemaCatalogTuples.
/**
* Gets the tuples to be inserted to the schema catalog.
*
* @param tableName
* @param tableDirectory
* @param luceneAnalyzerStr
* @return
* @throws StorageException
*/
public static List<Tuple> getSchemaCatalogTuples(String tableName, Schema tableSchema) {
List<Tuple> schemaCatalogTuples = new ArrayList<>();
for (int i = 0; i < tableSchema.getAttributes().size(); i++) {
Attribute attr = tableSchema.getAttributes().get(i);
Tuple schemaTuple = new Tuple(SCHEMA_CATALOG_SCHEMA, new StringField(tableName), new StringField(attr.getAttributeName()), new StringField(attr.getAttributeType().toString().toLowerCase()), new IntegerField(i));
schemaCatalogTuples.add(schemaTuple);
}
return schemaCatalogTuples;
}
use of edu.uci.ics.textdb.api.schema.Attribute in project textdb by TextDB.
the class KeywordMatcherPerformanceTest method match.
/*
* This function does match for a list of queries
*/
public static void match(ArrayList<String> queryList, KeywordMatchingType opType, String luceneAnalyzerStr, String tableName) throws TextDBException, IOException {
Attribute[] attributeList = new Attribute[] { MedlineIndexWriter.ABSTRACT_ATTR };
for (String query : queryList) {
KeywordSourcePredicate predicate = new KeywordSourcePredicate(query, Utils.getAttributeNames(attributeList), luceneAnalyzerStr, opType, tableName, null);
KeywordMatcherSourceOperator keywordSource = new KeywordMatcherSourceOperator(predicate);
long startMatchTime = System.currentTimeMillis();
keywordSource.open();
int counter = 0;
Tuple nextTuple = null;
while ((nextTuple = keywordSource.getNextTuple()) != null) {
ListField<Span> spanListField = nextTuple.getField(SchemaConstants.SPAN_LIST);
List<Span> spanList = spanListField.getValue();
counter += spanList.size();
}
keywordSource.close();
long endMatchTime = System.currentTimeMillis();
double matchTime = (endMatchTime - startMatchTime) / 1000.0;
timeResults.add(Double.parseDouble(String.format("%.4f", matchTime)));
totalResultCount += counter;
}
}
use of edu.uci.ics.textdb.api.schema.Attribute in project textdb by TextDB.
the class MedlineIndexWriter method recordToTuple.
public static Tuple recordToTuple(String record) throws IOException, ParseException {
JsonNode jsonNode = new ObjectMapper().readValue(record, JsonNode.class);
ArrayList<IField> fieldList = new ArrayList<IField>();
for (Attribute attr : ATTRIBUTES_MEDLINE) {
fieldList.add(StorageUtils.getField(attr.getAttributeType(), jsonNode.get(attr.getAttributeName()).toString()));
}
IField[] fieldArray = new IField[fieldList.size()];
Tuple tuple = new Tuple(SCHEMA_MEDLINE, fieldList.toArray(fieldArray));
return tuple;
}
use of edu.uci.ics.textdb.api.schema.Attribute in project textdb by TextDB.
the class ExcelSinkTest method attributeTypeTest.
@Test
public // writing 10000 tuples
void attributeTypeTest() throws Exception {
ArrayList<String> attributeNames = new ArrayList<>();
attributeNames.add(TestConstants.FIRST_NAME);
attributeNames.add(TestConstants.LAST_NAME);
attributeNames.add(TestConstants.AGE);
attributeNames.add(TestConstants.HEIGHT);
attributeNames.add(TestConstants.DATE_OF_BIRTH);
attributeNames.add(TestConstants.DESCRIPTION);
// Prepare Schema
Attribute[] schemaAttributes = new Attribute[TestConstants.ATTRIBUTES_PEOPLE.length];
for (int count = 0; count < schemaAttributes.length; count++) {
schemaAttributes[count] = TestConstants.ATTRIBUTES_PEOPLE[count];
}
// Prepare 10000 tuples as a tupleList
int testSize = 10000;
Random rand = new Random();
List<Tuple> tupleList = new ArrayList<Tuple>();
for (int i = 0; i < testSize; i++) {
IField[] fields = { new StringField(getRandomString()), new StringField(getRandomString()), new IntegerField(rand.nextInt()), new DoubleField(rand.nextDouble() * rand.nextInt()), new DateField(getRandomDate()), new TextField(getRandomString()) };
tupleList.add(new Tuple(new Schema(schemaAttributes), fields));
}
assert (tupleList.size() == testSize);
IOperator inputOperator = Mockito.mock(IOperator.class);
Mockito.when(inputOperator.getOutputSchema()).thenReturn(new Schema(schemaAttributes));
OngoingStubbing<Tuple> stubbing = Mockito.when(inputOperator.getNextTuple());
for (Tuple t : tupleList) {
stubbing = stubbing.thenReturn(t);
}
stubbing = stubbing.thenReturn(null);
// excel writing test
excelSink = new ExcelSink(new ExcelSinkPredicate());
excelSink.setInputOperator(inputOperator);
excelSink.open();
excelSink.collectAllTuples();
excelSink.close();
Files.deleteIfExists(Paths.get(excelSink.getFilePath()));
}
use of edu.uci.ics.textdb.api.schema.Attribute in project textdb by TextDB.
the class FileSourceOperatorTest method test2.
/*
* Test FileSourceOperator with a Directory.
* Optional parameters are all set to default. (only list files directly in this folder)
*
* Only the files directly under this directory will be used.
*
* expected results: test1.txt and test2.txt will be included.
*/
@Test
public void test2() throws Exception {
String attrName = "content";
Schema schema = new Schema(new Attribute(attrName, AttributeType.TEXT));
FileSourcePredicate predicate = new FileSourcePredicate(tempFolderPath.toString(), attrName);
FileSourceOperator fileSource = new FileSourceOperator(predicate);
Tuple tuple;
ArrayList<Tuple> exactResults = new ArrayList<>();
fileSource.open();
while ((tuple = fileSource.getNextTuple()) != null) {
exactResults.add(tuple);
}
fileSource.close();
List<Tuple> expectedResults = Arrays.asList(new Tuple(schema, new TextField(tempFile1String)), new Tuple(schema, new TextField(tempFile2String)));
Assert.assertTrue(TestUtils.equals(expectedResults, exactResults));
}
Aggregations