use of com.airbnb.spinaltap.mysql.mutation.schema.Table in project SpinalTap by airbnb.
the class RowTest method testNoPrimaryKey.
@Test
public void testNoPrimaryKey() throws Exception {
Table table = new Table(TABLE_ID, TABLE_NAME, DB_NAME, ImmutableList.of(new ColumnMetadata(ID_COLUMN, ColumnDataType.LONGLONG, false, 0)), ImmutableList.of());
Row row = new Row(table, ImmutableMap.of(ID_COLUMN, new Column(table.getColumns().get(ID_COLUMN), 1)));
assertNull(row.getPrimaryKeyValue());
}
use of com.airbnb.spinaltap.mysql.mutation.schema.Table in project SpinalTap by airbnb.
the class KafkaDestinationTest method createMutation.
private Mutation createMutation(MutationType type) {
Mapper<com.airbnb.spinaltap.Mutation<?>, ? extends TBase<?, ?>> thriftMutationMapper = ThriftMutationMapper.create("spinaltap");
Table table = new Table(0L, TABLE, DATABASE, ImmutableList.of(new ColumnMetadata("id", ColumnDataType.LONGLONG, true, 0)), ImmutableList.of("id"));
MysqlMutationMetadata metadata = new MysqlMutationMetadata(new DataSource(HOSTNAME, 0, "service"), new BinlogFilePos(), table, 0L, 0L, 0L, null, null, 0L, 0);
Row row = new Row(table, ImmutableMap.of("id", new Column(new ColumnMetadata("id", ColumnDataType.LONGLONG, true, 0), 1L)));
MysqlMutation mutation;
switch(type) {
case INSERT:
mutation = new MysqlInsertMutation(metadata, row);
break;
case UPDATE:
mutation = new MysqlUpdateMutation(metadata, row, row);
break;
case DELETE:
mutation = new MysqlDeleteMutation(metadata, row);
break;
default:
mutation = null;
}
return (Mutation) (thriftMutationMapper.map(mutation));
}
use of com.airbnb.spinaltap.mysql.mutation.schema.Table in project SpinalTap by airbnb.
the class TableCache method addOrUpdate.
public void addOrUpdate(long tableId, String tableName, String database, BinlogFilePos binlogFilePos, List<ColumnDataType> columnTypes) throws Exception {
Table table = tableCache.getIfPresent(tableId);
if (table == null || !validTable(table, tableName, database, columnTypes)) {
table = fetchTable(tableId, database, tableName, binlogFilePos, columnTypes);
tableCache.put(tableId, table);
}
}
use of com.airbnb.spinaltap.mysql.mutation.schema.Table in project SpinalTap by airbnb.
the class TableCache method fetchTable.
private Table fetchTable(long tableId, String databaseName, String tableName, BinlogFilePos binlogFilePos, List<ColumnDataType> columnTypes) throws Exception {
List<ColumnInfo> tableSchema = schemaStore.query(databaseName, tableName, binlogFilePos).getColumnInfo();
Iterator<ColumnInfo> schemaIterator = tableSchema.iterator();
if (tableSchema.size() != columnTypes.size()) {
log.error("Schema length {} and Column length {} don't match", tableSchema.size(), columnTypes.size());
}
List<ColumnMetadata> columnMetadata = new ArrayList<>();
for (int position = 0; position < columnTypes.size() && schemaIterator.hasNext(); position++) {
ColumnInfo colInfo = schemaIterator.next();
columnMetadata.add(new ColumnMetadata(colInfo.getName(), columnTypes.get(position), colInfo.isPrimaryKey(), position));
}
List<String> primaryColumns = tableSchema.stream().filter(ColumnInfo::isPrimaryKey).map(ColumnInfo::getName).collect(Collectors.toList());
return new Table(tableId, tableName, databaseName, columnMetadata, primaryColumns);
}
use of com.airbnb.spinaltap.mysql.mutation.schema.Table in project SpinalTap by airbnb.
the class TableCacheTest method test.
@Test
public void test() throws Exception {
TableCache tableCache = new TableCache(schemaReader);
List<ColumnDataType> columnTypes = Arrays.asList(ColumnDataType.TINY, ColumnDataType.STRING, ColumnDataType.FLOAT, ColumnDataType.LONG);
when(schemaReader.query(DATABASE_NAME, TABLE_NAME, binlogFilePos)).thenReturn(TABLE_SCHEMA);
assertNull(tableCache.get(TABLE_ID));
tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, binlogFilePos, columnTypes);
Table table = tableCache.get(TABLE_ID);
assertEquals(TABLE, table);
verify(schemaReader, times(1)).query(DATABASE_NAME, TABLE_NAME, binlogFilePos);
tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, binlogFilePos, columnTypes);
table = tableCache.get(TABLE_ID);
assertEquals(TABLE, table);
verify(schemaReader, times(1)).query(DATABASE_NAME, TABLE_NAME, binlogFilePos);
columnTypes = Arrays.asList(ColumnDataType.TINY, ColumnDataType.STRING, ColumnDataType.FLOAT);
when(schemaReader.query(DATABASE_NAME, TABLE_NAME, binlogFilePos)).thenReturn(TABLE_SCHEMA_UPDATED);
tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, binlogFilePos, columnTypes);
table = tableCache.get(TABLE_ID);
assertEquals(TABLE_UPDATED, table);
verify(schemaReader, times(2)).query(DATABASE_NAME, TABLE_NAME, binlogFilePos);
tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, binlogFilePos, columnTypes);
table = tableCache.get(TABLE_ID);
assertEquals(TABLE_UPDATED, table);
verify(schemaReader, times(2)).query(DATABASE_NAME, TABLE_NAME, binlogFilePos);
// Schema reader now returns schema with 5 columns, but columnTypes has size 4
columnTypes = Arrays.asList(ColumnDataType.TINY, ColumnDataType.STRING, ColumnDataType.FLOAT, ColumnDataType.LONG);
when(schemaReader.query(DATABASE_NAME, TABLE_NAME, binlogFilePos)).thenReturn(TABLE_SCHEMA_LARGE_STUB);
tableCache.addOrUpdate(TABLE_ID, TABLE_NAME, DATABASE_NAME, binlogFilePos, columnTypes);
table = tableCache.get(TABLE_ID);
assertEquals(TABLE, table);
verify(schemaReader, times(3)).query(DATABASE_NAME, TABLE_NAME, binlogFilePos);
}
Aggregations