diff --git a/changelog.d/10761.bugfix b/changelog.d/10761.bugfix new file mode 100644 index 000000000000..54a4fc1c8e0d --- /dev/null +++ b/changelog.d/10761.bugfix @@ -0,0 +1 @@ +Fix leaking per-room nicknames and avatars to the user directory for local users when the user directory is rebuilt. diff --git a/docs/user_directory.md b/docs/user_directory.md index 5ff14e334c4b..07fe95489133 100644 --- a/docs/user_directory.md +++ b/docs/user_directory.md @@ -14,7 +14,15 @@ flush the current tables and regenerate the directory. Data model ---------- -There are five relevant tables: +There are five relevant tables that collectively form the "user directory". +Three of them track a master list of all the users we could search for. +The last two (collectively called the "search tables") track who can +see who. + +From all of these tables we exclude three types of local user: + - support users + - appservice users + - deactivated users * `user_directory`. This contains the user_id, display name and avatar we'll return when you search the directory. diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 0817a9ad5ad2..38769c3d1615 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -114,13 +114,8 @@ async def handle_local_profile_change( """ # FIXME(#3714): We should probably do this in the same worker as all # the other changes. - - # Support users are for diagnostics and should not appear in the user directory. - is_support = await self.store.is_support_user(user_id) - # When change profile information of deactivated user it should not appear in the user directory. - is_deactivated = await self.store.get_user_deactivated_status(user_id) - - if not (is_support or is_deactivated): + excluded = await self.store.is_excluded_from_user_dir(user_id) + if not excluded: await self.store.update_profile_in_user_dir( user_id, profile.display_name, profile.avatar_url ) @@ -201,7 +196,7 @@ async def _handle_delta(self, delta: Dict[str, Any]) -> None: room_id, prev_event_id, event_id, typ ) elif typ == EventTypes.Member: - if await self._user_omitted_from_directory(state_key): + if await self.store.is_excluded_from_user_dir(state_key): return joined = await self._get_key_change( @@ -258,16 +253,6 @@ async def _handle_delta(self, delta: Dict[str, Any]) -> None: else: logger.debug("Ignoring irrelevant type: %r", typ) - async def _user_omitted_from_directory(self, user_id: str) -> bool: - """We want to ignore events from "hidden" users who shouldn't be exposed - to real users.""" - if await self.store.is_support_user(user_id): - return True - if self.store.get_if_app_services_interested_in_user(user_id): - return True - - return False - async def _handle_room_publicity_change( self, room_id: str, @@ -340,18 +325,24 @@ async def _handle_room_publicity_change( async def _track_user_joined_room(self, room_id: str, user_id: str) -> None: """Someone's just joined a room. Add to `users_in_public_rooms` and - `users_who_share_private_rooms` if necessary.""" + `users_who_share_private_rooms` if necessary. + + The caller is responsible for ensuring that the given user may be + included in the user directory. + (See UserDirectoryStore.exclude_from_user_dir) + """ is_public = await self.store.is_room_world_readable_or_publicly_joinable( room_id ) if is_public: await self.store.add_users_in_public_rooms(room_id, (user_id,)) else: - other_users_in_room = await self.store.get_users_in_room(room_id) + users_in_room = await self.store.get_users_in_room(room_id) other_users_in_room = [ - other_user - for other_user in other_users_in_room - if not await self._user_omitted_from_directory(other_user) + user + for user in users_in_room + if user != user_id + and not await self.store.is_excluded_from_user_dir(user) ] to_insert = set() @@ -359,15 +350,10 @@ async def _track_user_joined_room(self, room_id: str, user_id: str) -> None: # First, if they're our user then we need to update for every user if self.is_mine_id(user_id): for other_user_id in other_users_in_room: - if user_id == other_user_id: - continue - to_insert.add((user_id, other_user_id)) # Next we need to update for every local user in the room for other_user_id in other_users_in_room: - if user_id == other_user_id: - continue if self.is_mine_id(other_user_id): to_insert.add((other_user_id, user_id)) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 21b516423120..3f2781e33d3f 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -14,30 +14,24 @@ import logging import re -from typing import Any, Dict, Iterable, Optional, Sequence, Set, Tuple - -from typing_extensions import TypedDict +from typing import Any, Dict, Iterable, Optional, Sequence, Set, Tuple, cast from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules from synapse.storage.database import DatabasePool, LoggingTransaction +from synapse.storage.databases.main.appservice import ApplicationServiceWorkerStore +from synapse.storage.databases.main.registration import RegistrationWorkerStore from synapse.storage.databases.main.state import StateFilter from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import get_domain_from_id, get_localpart_from_id +from synapse.types import JsonDict, get_domain_from_id, get_localpart_from_id from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) - TEMP_TABLE = "_temp_populate_user_directory" -class ProgressDict(TypedDict): - remaining: int - - class UserDirectoryBackgroundUpdateStore(StateDeltasStore): - # How many records do we calculate before sending it to # add_users_who_share_private_rooms? SHARE_PRIVATE_WORKING_SET = 500 @@ -64,7 +58,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): ) async def _populate_user_directory_createtables( - self, progress: Dict, batch_size: str + self, progress: JsonDict, batch_size: str ) -> int: # Get all the rooms that we want to process. @@ -119,7 +113,7 @@ def _make_staging_area(txn): return 1 async def _populate_user_directory_cleanup( - self, progress: Dict, batch_size: str + self, progress: JsonDict, batch_size: str ) -> int: """ Update the user directory stream position, then clean up the old tables. @@ -144,7 +138,7 @@ def _delete_staging_area(txn): return 1 async def _populate_user_directory_process_rooms( - self, progress: ProgressDict, batch_size: int + self, progress: JsonDict, batch_size: int ) -> int: """ Rescan the state of all rooms so we can track @@ -168,7 +162,7 @@ async def _populate_user_directory_process_rooms( def _get_next_batch( txn: LoggingTransaction, - ) -> Optional[Sequence[Tuple[str, str]]]: + ) -> Optional[Sequence[Tuple[str, int]]]: # Only fetch 250 rooms, so we don't fetch too many at once, even # if those 250 rooms have less than batch_size state events. sql = """ @@ -179,7 +173,7 @@ def _get_next_batch( TEMP_TABLE + "_rooms", ) txn.execute(sql) - rooms_to_work_on = txn.fetchall() + rooms_to_work_on = cast(Sequence[Tuple[str, int]], txn.fetchall()) if not rooms_to_work_on: return None @@ -187,7 +181,8 @@ def _get_next_batch( # Get how many are left to process, so we can give status on how # far we are in processing txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms") - progress["remaining"] = txn.fetchone()[0] + result = cast(Tuple[int], txn.fetchone()) + progress["remaining"] = result[0] return rooms_to_work_on @@ -249,13 +244,16 @@ async def _populate_user_directory_process_single_room(self, room_id: str) -> No if not is_in_room: return - is_public = await self.is_room_world_readable_or_publicly_joinable(room_id) - # TODO: this will leak per-room profiles to the user directory. + # TODO: get_users_in_room_with_profiles returns per-room profiles. Leak! users_with_profile = await self.get_users_in_room_with_profiles(room_id) - - # Update each remote user in the user directory. - # (Entries for local users are managed by the UserDirectoryHandler - # and do not require us to peek at room state/events.) + users_with_profile = { + user_id: profile + for user_id, profile in users_with_profile.items() + if not await self.is_excluded_from_user_dir(user_id) + } + + # Upsert a user_directory record for each remote user we see. + # (Local users are processed in `_populate_user_directory_users`.) for user_id, profile in users_with_profile.items(): if self.hs.is_mine_id(user_id): continue @@ -263,26 +261,17 @@ async def _populate_user_directory_process_single_room(self, room_id: str) -> No user_id, profile.display_name, profile.avatar_url ) - to_insert = set() - + # Now update the room sharing tables to include this room. + is_public = await self.is_room_world_readable_or_publicly_joinable(room_id) if is_public: - for user_id in users_with_profile: - if self.get_if_app_services_interested_in_user(user_id): - continue - - to_insert.add(user_id) - - if to_insert: - await self.add_users_in_public_rooms(room_id, to_insert) - to_insert.clear() + if users_with_profile: + await self.add_users_in_public_rooms(room_id, users_with_profile.keys()) else: + to_insert = set() for user_id in users_with_profile: if not self.hs.is_mine_id(user_id): continue - if self.get_if_app_services_interested_in_user(user_id): - continue - for other_user_id in users_with_profile: if user_id == other_user_id: continue @@ -298,10 +287,9 @@ async def _populate_user_directory_process_single_room(self, room_id: str) -> No if to_insert: await self.add_users_who_share_private_room(room_id, to_insert) - to_insert.clear() async def _populate_user_directory_process_users( - self, progress: ProgressDict, batch_size: int + self, progress: JsonDict, batch_size: int ) -> int: """Upsert a user_directory entry for each local user.""" @@ -343,10 +331,13 @@ def _get_next_batch(txn): ) for user_id in users_to_work_on: - profile = await self.get_profileinfo(get_localpart_from_id(user_id)) - await self.update_profile_in_user_dir( - user_id, profile.display_name, profile.avatar_url - ) + if not await self.is_excluded_from_user_dir(user_id): + # Populate this local user's user directory entry with their + # profile information + profile = await self.get_profileinfo(get_localpart_from_id(user_id)) + await self.update_profile_in_user_dir( + user_id, profile.display_name, profile.avatar_url + ) # We've finished processing a user. Delete it from the table. await self.db_pool.simple_delete_one( @@ -521,7 +512,7 @@ async def get_user_in_directory(self, user_id: str) -> Optional[Dict[str, Any]]: desc="get_user_in_directory", ) - async def update_user_directory_stream_pos(self, stream_id: int) -> None: + async def update_user_directory_stream_pos(self, stream_id: Optional[int]) -> None: await self.db_pool.simple_update_one( table="user_directory_stream_pos", keyvalues={}, @@ -529,13 +520,31 @@ async def update_user_directory_stream_pos(self, stream_id: int) -> None: desc="update_user_directory_stream_pos", ) + async def is_excluded_from_user_dir(self, user_id: str) -> bool: + """Certain classes of local user are omitted from the user directory. + Is this user one of them? + """ + if self.hs.is_mine_id(user_id): + # Support users are for diagnostics and should not appear in the user directory. + if await self.is_support_user(user_id): + return True -class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): + # Deactivated users aren't contactable, so should not appear in the user directory. + if await self.get_user_deactivated_status(user_id): + return True + + # App service users aren't usually contactable, so exclude them. + if self.get_if_app_services_interested_in_user(user_id): + return True + + return False - # How many records do we calculate before sending it to - # add_users_who_share_private_rooms? - SHARE_PRIVATE_WORKING_SET = 500 +class UserDirectoryStore( + UserDirectoryBackgroundUpdateStore, + ApplicationServiceWorkerStore, + RegistrationWorkerStore, +): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) @@ -744,7 +753,7 @@ async def search_user_dir(self, user_id, search_term, limit): # We allow manipulating the ranking algorithm by injecting statements # based on config options. additional_ordering_statements = [] - ordering_arguments = () + ordering_arguments: Tuple[str, ...] = () if isinstance(self.database_engine, PostgresEngine): full_query, exact_query, prefix_query = _parse_query_postgres(search_term) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index a251f77f9c2e..71e0b0a7c4b2 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Tuple from unittest.mock import Mock from twisted.internet import defer @@ -23,12 +22,19 @@ from synapse.storage.roommember import ProfileInfo from tests import unittest -from tests.unittest import override_config +from tests.storage.test_user_directory import GetUserDirectoryTables +from tests.unittest import HomeserverTestCase, override_config -class UserDirectoryTestCase(unittest.HomeserverTestCase): +class UserDirectoryTestCase(GetUserDirectoryTables, HomeserverTestCase): """ Tests the UserDirectoryHandler. + + We're broadly testing two kinds of things here. + + 1. Check that we correctly update the user directory in response + to events (e.g. join a room, leave a room, change name, make public) + 2. Check that the search logic behaves as expected. """ servlets = [ @@ -371,134 +377,7 @@ def test_legacy_spam_checker(self): s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 1) - def _compress_shared(self, shared): - """ - Compress a list of users who share rooms dicts to a list of tuples. - """ - r = set() - for i in shared: - r.add((i["user_id"], i["other_user_id"], i["room_id"])) - return r - - def get_users_in_public_rooms(self) -> List[Tuple[str, str]]: - r = self.get_success( - self.store.db_pool.simple_select_list( - "users_in_public_rooms", None, ("user_id", "room_id") - ) - ) - retval = [] - for i in r: - retval.append((i["user_id"], i["room_id"])) - return retval - - def get_users_who_share_private_rooms(self) -> List[Tuple[str, str, str]]: - return self.get_success( - self.store.db_pool.simple_select_list( - "users_who_share_private_rooms", - None, - ["user_id", "other_user_id", "room_id"], - ) - ) - - def _add_background_updates(self): - """ - Add the background updates we need to run. - """ - # Ugh, have to reset this flag - self.store.db_pool.updates._all_done = False - - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_user_directory_createtables", - "progress_json": "{}", - }, - ) - ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_user_directory_process_rooms", - "progress_json": "{}", - "depends_on": "populate_user_directory_createtables", - }, - ) - ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_user_directory_process_users", - "progress_json": "{}", - "depends_on": "populate_user_directory_process_rooms", - }, - ) - ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_user_directory_cleanup", - "progress_json": "{}", - "depends_on": "populate_user_directory_process_users", - }, - ) - ) - - def test_initial(self): - """ - The user directory's initial handler correctly updates the search tables. - """ - u1 = self.register_user("user1", "pass") - u1_token = self.login(u1, "pass") - u2 = self.register_user("user2", "pass") - u2_token = self.login(u2, "pass") - u3 = self.register_user("user3", "pass") - u3_token = self.login(u3, "pass") - - room = self.helper.create_room_as(u1, is_public=True, tok=u1_token) - self.helper.invite(room, src=u1, targ=u2, tok=u1_token) - self.helper.join(room, user=u2, tok=u2_token) - - private_room = self.helper.create_room_as(u1, is_public=False, tok=u1_token) - self.helper.invite(private_room, src=u1, targ=u3, tok=u1_token) - self.helper.join(private_room, user=u3, tok=u3_token) - - self.get_success(self.store.update_user_directory_stream_pos(None)) - self.get_success(self.store.delete_all_from_user_dir()) - - shares_private = self.get_users_who_share_private_rooms() - public_users = self.get_users_in_public_rooms() - - # Nothing updated yet - self.assertEqual(shares_private, []) - self.assertEqual(public_users, []) - - # Do the initial population of the user directory via the background update - self._add_background_updates() - - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) - - shares_private = self.get_users_who_share_private_rooms() - public_users = self.get_users_in_public_rooms() - - # User 1 and User 2 are in the same public room - self.assertEqual(set(public_users), {(u1, room), (u2, room)}) - - # User 1 and User 3 share private rooms - self.assertEqual( - self._compress_shared(shares_private), - {(u1, u3, private_room), (u3, u1, private_room)}, - ) - - def test_initial_share_all_users(self): + def test_search_all_users(self): """ Search all users = True means that a user does not have to share a private room with the searching user or be in a public room to be search @@ -511,20 +390,6 @@ def test_initial_share_all_users(self): self.register_user("user2", "pass") u3 = self.register_user("user3", "pass") - # Wipe the user dir - self.get_success(self.store.update_user_directory_stream_pos(None)) - self.get_success(self.store.delete_all_from_user_dir()) - - # Do the initial population of the user directory via the background update - self._add_background_updates() - - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) - shares_private = self.get_users_who_share_private_rooms() public_users = self.get_users_in_public_rooms() @@ -589,15 +454,6 @@ def test_prefer_local_users(self): local_users = [local_user_1, local_user_2, local_user_3] remote_users = [remote_user_1, remote_user_2, remote_user_3] - # Populate the user directory via background update - self._add_background_updates() - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) - # The local searching user searches for the term "user", which other users have # in their user id results = self.get_success( @@ -702,8 +558,6 @@ def test_making_room_public_doesnt_alter_directory_entry(self): class TestUserDirSearchDisabled(unittest.HomeserverTestCase): - user_id = "@test:test" - servlets = [ user_directory.register_servlets, room.register_servlets, @@ -722,17 +576,22 @@ def make_homeserver(self, reactor, clock): def test_disabling_room_list(self): self.config.user_directory_search_enabled = True - - # First we create a room with another user so that user dir is non-empty - # for our user - self.helper.create_room_as(self.user_id) + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") u2 = self.register_user("user2", "pass") - room = self.helper.create_room_as(self.user_id) - self.helper.join(room, user=u2) + u2_token = self.login(u2, "pass") + + # First we create a room with another user u2, so that user dir is + # non-empty for our user u1 + room = self.helper.create_room_as(u1, is_public=True, tok=u1_token) + self.helper.join(room, user=u2, tok=u2_token) # Assert user directory is not empty channel = self.make_request( - "POST", b"user_directory/search", b'{"search_term":"user2"}' + "POST", + b"user_directory/search", + b'{"search_term":"user2"}', + access_token=u1_token, ) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) > 0) @@ -740,7 +599,10 @@ def test_disabling_room_list(self): # Disable user directory and check search returns nothing self.config.user_directory_search_enabled = False channel = self.make_request( - "POST", b"user_directory/search", b'{"search_term":"user2"}' + "POST", + b"user_directory/search", + b'{"search_term":"user2"}', + access_token=u1_token, ) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) == 0) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 222e5d129d73..a2da30572659 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -11,9 +11,320 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, List, Tuple +from unittest.mock import Mock, patch +import synapse +from synapse.api.constants import UserTypes +from synapse.appservice import ApplicationService +from synapse.rest.client import login, room +from synapse.storage import DataStore +from synapse.types import UserID, create_requester + +from tests.test_utils.event_injection import inject_member_event from tests.unittest import HomeserverTestCase, override_config + +class GetUserDirectoryTables(HomeserverTestCase): + """These helpers aren't present on the store itself. We want to use them + here and in the handler's tests too. + """ + + store: DataStore + + def get_users_in_public_rooms(self) -> List[Tuple[str, str]]: + r = self.get_success( + self.store.db_pool.simple_select_list( + "users_in_public_rooms", None, ("user_id", "room_id") + ) + ) + retval = [] + for i in r: + retval.append((i["user_id"], i["room_id"])) + return retval + + def get_users_who_share_private_rooms(self) -> List[Tuple[str, str, str]]: + return self.get_success( + self.store.db_pool.simple_select_list( + "users_who_share_private_rooms", + None, + ["user_id", "other_user_id", "room_id"], + ) + ) + + def get_users_in_user_directory(self) -> Dict[str, str]: + # Just the set of usernames for now + r = self.get_success( + self.store.db_pool.simple_select_list( + "user_directory", None, ("user_id", "display_name") + ) + ) + return {entry["user_id"]: entry["display_name"] for entry in r} + + def _compress_shared(self, shared): + """ + Compress a list of users who share rooms dicts to a list of tuples. + """ + r = set() + for i in shared: + r.add((i["user_id"], i["other_user_id"], i["room_id"])) + return r + + +class UserDirectoryInitialPopulationTestcase( + GetUserDirectoryTables, HomeserverTestCase +): + """Ensure that the initial background process creates the user directory data + as intended. + + See also tests/handlers/test_user_directory.py for similar checks. They + test the incremental updates, rather than the big batch of updates. + """ + + servlets = [ + login.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + room.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + def _purge_and_rebuild_user_dir(self): + """Nuke the user directory tables, start the background process to + repopulate them, and wait for the process to complete. This allows us + to inspect the outcome of the background process alone, without any of + the other incremental updates. + """ + self.get_success(self.store.update_user_directory_stream_pos(None)) + self.get_success(self.store.delete_all_from_user_dir()) + + shares_private = self.get_users_who_share_private_rooms() + public_users = self.get_users_in_public_rooms() + + # Nothing updated yet + self.assertEqual(shares_private, []) + self.assertEqual(public_users, []) + + # Ugh, have to reset this flag + self.store.db_pool.updates._all_done = False + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": "populate_user_directory_createtables", + "progress_json": "{}", + }, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": "populate_user_directory_process_rooms", + "progress_json": "{}", + "depends_on": "populate_user_directory_createtables", + }, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": "populate_user_directory_process_users", + "progress_json": "{}", + "depends_on": "populate_user_directory_process_rooms", + }, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": "populate_user_directory_cleanup", + "progress_json": "{}", + "depends_on": "populate_user_directory_process_users", + }, + ) + ) + + while not self.get_success( + self.store.db_pool.updates.has_completed_background_updates() + ): + self.get_success( + self.store.db_pool.updates.do_next_background_update(100), by=0.1 + ) + + def test_populates_local_users(self): + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") + u2 = self.register_user("user2", "pass") + u2_token = self.login(u2, "pass") + u3 = self.register_user("user3", "pass") + u3_token = self.login(u3, "pass") + + room = self.helper.create_room_as(u1, is_public=True, tok=u1_token) + self.helper.invite(room, src=u1, targ=u2, tok=u1_token) + self.helper.join(room, user=u2, tok=u2_token) + + private_room = self.helper.create_room_as(u1, is_public=False, tok=u1_token) + self.helper.invite(private_room, src=u1, targ=u3, tok=u1_token) + self.helper.join(private_room, user=u3, tok=u3_token) + + self._purge_and_rebuild_user_dir() + + shares_private = self.get_users_who_share_private_rooms() + public_users = self.get_users_in_public_rooms() + + # User 1 and User 2 are in the same public room + self.assertEqual(set(public_users), {(u1, room), (u2, room)}) + + # User 1 and User 3 share private rooms + self.assertEqual( + self._compress_shared(shares_private), + {(u1, u3, private_room), (u3, u1, private_room)}, + ) + + # All three should have entries in the directory + self.assertEqual(set(self.get_users_in_user_directory().keys()), {u1, u2, u3}) + + def test_populates_users_in_zero_rooms(self): + billy_no_mates = self.register_user("user2", "pass") + self._purge_and_rebuild_user_dir() + self.assertEqual(self.get_users_in_user_directory().keys(), {billy_no_mates}) + self.assertEqual(self.get_users_in_public_rooms(), []) + self.assertEqual(self.get_users_who_share_private_rooms(), []) + + def test_population_excludes_support_user(self): + support = "@support1:test" + self.get_success( + self.store.register_user( + user_id=support, password_hash=None, user_type=UserTypes.SUPPORT + ) + ) + + self._purge_and_rebuild_user_dir() + # TODO add support user to a public and private room. Check that + # users_in_public_rooms and users_who_share_private_rooms is empty. + self.assertEqual(self.get_users_in_user_directory(), {}) + + def test_population_excludes_appservice_user(self): + as_token = "i_am_an_app_service" + appservice = ApplicationService( + as_token, + self.hs.config.server_name, + id="1234", + namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, + sender="@as:test", + ) + self.store.services_cache.append(appservice) + + as_user = "@as_user_potato:test" + self.get_success(self.store.register_user(user_id=as_user, password_hash=None)) + + # TODO can we configure the app service up front somehow? This is a hack. + mock_regex = Mock() + mock_regex.match = lambda user_id: user_id == as_user + with patch.object(self.store, "exclusive_user_regex", mock_regex): + self._purge_and_rebuild_user_dir() + + # TODO add AS user to a public and private room. Check that + # users_in_public_rooms and users_who_share_private_rooms is empty. + self.assertEqual(self.get_users_in_user_directory(), {}) + + def test_population_excludes_deactivated_user(self): + user = self.register_user("rip", "pass") + user_token = self.login(user, "pass") + self.helper.create_room_as(user, is_public=True, tok=user_token) + self.helper.create_room_as(user, is_public=False, tok=user_token) + self.get_success(self.store.set_user_deactivated_status(user, True)) + + self._purge_and_rebuild_user_dir() + + self.assertEqual(self.get_users_in_public_rooms(), []) + self.assertEqual(self.get_users_who_share_private_rooms(), []) + self.assertEqual(self.get_users_in_user_directory(), {}) + + def test_populates_remote_and_local_users(self): + """All local users and remote users have entries in the user_directory table. + + Test normal local user in a room is in the user directory. + Test remote user in a public room is in the user directory. + Test remote user in a private room is in the user directory. + """ + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") + u2 = self.register_user("user2", "pass") + + remote1 = "@c:other.server" + remote2 = "@d:other.server" + + public_room = self.helper.create_room_as(u1, is_public=True, tok=u1_token) + self.get_success( + inject_member_event( + self.hs, + public_room, + remote1, + "join", + ) + ) + + private_room = self.helper.create_room_as(u1, is_public=False, tok=u1_token) + self.get_success( + inject_member_event(self.hs, public_room, u1, "invite", remote2) + ) + self.get_success( + inject_member_event( + self.hs, + private_room, + remote2, + "join", + ) + ) + + self._purge_and_rebuild_user_dir() + + users_in_directory = set(self.get_users_in_user_directory().keys()) + # No assertions about displaynames or avatars here. + # TODO extend this case to do so, or add a new test case to cover it + self.assertEqual(users_in_directory, {u1, u2, remote1, remote2}) + + def test_population_of_local_users_ignores_per_room_nicknames(self): + user = self.register_user("user", "pass") + token = self.login(user, "pass") + + # Explictly set a profile name for our userAlice + self.get_success( + self.hs.get_profile_handler().set_displayname( + UserID.from_string(user), + create_requester(user, token), + "Alice Cooper", + ) + ) + + # Alice makes a private room and sets a nickname there + private_room = self.helper.create_room_as(user, is_public=False, tok=token) + self.helper.send_state( + private_room, + "m.room.member", + { + "displayname": "Freddie Mercury", + "membership": "join", + }, + token, + state_key=user, + ) + + # Rebuild the directory + self._purge_and_rebuild_user_dir() + + # Check we only see Alice's profile name in the directory + self.assertEqual( + self.get_users_in_user_directory(), {"@user:test": "Alice Cooper"} + ) + + ALICE = "@alice:a" BOB = "@bob:b" BOBBY = "@bobby:a"