Skip to content

Commit

Permalink
fix #1205, use data node instead of actual table name on table metadata
Browse files Browse the repository at this point in the history
  • Loading branch information
terrymanu committed Sep 1, 2018
1 parent 78ea549 commit 44cbca7
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import io.shardingsphere.core.metadata.datasource.ShardingDataSourceMetaData;
import io.shardingsphere.core.metadata.table.ColumnMetaData;
import io.shardingsphere.core.metadata.table.TableMetaData;
import io.shardingsphere.core.rule.DataNode;
import io.shardingsphere.core.rule.ShardingDataSourceNames;
import io.shardingsphere.core.rule.ShardingRule;
import lombok.RequiredArgsConstructor;
Expand Down Expand Up @@ -71,33 +72,33 @@ public TableMetaData load(final String logicTableName, final ShardingRule shardi
return actualTableMetaDataList.iterator().next();
}

private List<TableMetaData> load(final Map<String, List<String>> dataNodeGroups, final ShardingDataSourceNames shardingDataSourceNames) throws SQLException {
return executeEngine.groupExecute(partitionDataNodeGroups(dataNodeGroups), new ShardingGroupExecuteCallback<String, TableMetaData>() {
private List<TableMetaData> load(final Map<String, List<DataNode>> dataNodeGroups, final ShardingDataSourceNames shardingDataSourceNames) throws SQLException {
return executeEngine.groupExecute(partitionDataNodeGroups(dataNodeGroups), new ShardingGroupExecuteCallback<DataNode, TableMetaData>() {

@Override
public Collection<TableMetaData> execute(final String dataSourceName, final Collection<String> actualTableNames) throws SQLException {
public Collection<TableMetaData> execute(final String dataSourceName, final Collection<DataNode> dataNodes) throws SQLException {
DataSourceMetaData dataSourceMetaData = shardingDataSourceMetaData.getActualDataSourceMetaData(dataSourceName);
String catalog = null == dataSourceMetaData ? null : dataSourceMetaData.getSchemeName();
return load(shardingDataSourceNames.getRawMasterDataSourceName(dataSourceName), catalog, actualTableNames);
return load(shardingDataSourceNames.getRawMasterDataSourceName(dataSourceName), catalog, dataNodes);
}
});
}

private Collection<TableMetaData> load(final String dataSourceName, final String catalog, final Collection<String> actualTableNames) throws SQLException {
private Collection<TableMetaData> load(final String dataSourceName, final String catalog, final Collection<DataNode> dataNodes) throws SQLException {
Collection<TableMetaData> result = new LinkedList<>();
try (Connection connection = connectionManager.getConnection(dataSourceName)) {
for (String each : actualTableNames) {
result.add(new TableMetaData(isTableExist(connection, catalog, each) ? getColumnMetaDataList(connection, catalog, each) : Collections.<ColumnMetaData>emptyList()));
for (DataNode each : dataNodes) {
result.add(new TableMetaData(isTableExist(connection, catalog, each.getTableName()) ? getColumnMetaDataList(connection, catalog, each.getTableName()) : Collections.<ColumnMetaData>emptyList()));
}
}
return result;
}

private Map<String, List<List<String>>> partitionDataNodeGroups(final Map<String, List<String>> dataNodeGroups) {
Map<String, List<List<String>>> result = new HashMap<>(dataNodeGroups.size(), 1);
for (Entry<String, List<String>> entry : dataNodeGroups.entrySet()) {
int desiredPartitionSize = entry.getValue().size() / maxConnectionsSizePerQuery;
result.put(entry.getKey(), Lists.partition(entry.getValue(), 0 == desiredPartitionSize ? 1 : desiredPartitionSize));
private Map<String, List<List<DataNode>>> partitionDataNodeGroups(final Map<String, List<DataNode>> dataNodeGroups) {
Map<String, List<List<DataNode>>> result = new HashMap<>(dataNodeGroups.size(), 1);
for (Entry<String, List<DataNode>> entry : dataNodeGroups.entrySet()) {
int desiredPartitionSize = Math.max(entry.getValue().size() / maxConnectionsSizePerQuery, 1);
result.put(entry.getKey(), Lists.partition(entry.getValue(), desiredPartitionSize));
}
return result;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,14 @@ private List<DataNode> generateDataNodes(final List<String> actualDataNodes, fin
*
* @return data node groups, key is data source name, value is tables belong to this data source
*/
public Map<String, List<String>> getDataNodeGroups() {
Map<String, List<String>> result = new LinkedHashMap<>(actualDataNodes.size(), 1);
public Map<String, List<DataNode>> getDataNodeGroups() {
Map<String, List<DataNode>> result = new LinkedHashMap<>(actualDataNodes.size(), 1);
for (DataNode each : actualDataNodes) {
String dataSourceName = each.getDataSourceName();
if (!result.containsKey(dataSourceName)) {
result.put(dataSourceName, new LinkedList<String>());
result.put(dataSourceName, new LinkedList<DataNode>());
}
result.get(dataSourceName).add(each.getTableName());
result.get(dataSourceName).add(each);
}
return result;
}
Expand Down

0 comments on commit 44cbca7

Please sign in to comment.