diff --git a/src/tests/system/tests/test_cache.py b/src/tests/system/tests/test_cache.py new file mode 100644 index 00000000000..a21e804e8e8 --- /dev/null +++ b/src/tests/system/tests/test_cache.py @@ -0,0 +1,196 @@ +""" +SSSD Cache Tests. + +Tests pertaining SSSD caches, the following types are tested and some will be in other python files. + +* Local cache (LDB) +* Negative cache (ncache) +* In-memory cache (memcache): test_memcache.py + +:requirement: Cache +""" + +from __future__ import annotations + +import time + +import pytest +from sssd_test_framework.roles.client import Client +from sssd_test_framework.roles.generic import GenericProvider +from sssd_test_framework.topology import KnownTopologyGroup + + +@pytest.mark.importance("critical") +@pytest.mark.topology(KnownTopologyGroup.AnyProvider) +def test_cache__is_refreshed_as_configured(client: Client, provider: GenericProvider): + """ + :title: Ensuring LDB cache refreshes at configured intervals + :setup: + 1. Create user + 2. Create group + 3. Create netgroup + 4. Configure SSSD and set 'entry_cache_timeout to 1' and 'refresh_expired_interval to 2' + 5. Restart SSSD + 6. Lookup user, group and netgroup + :steps: + 1. Search for objects lastUpdate and dataExpireTimestamp in ldb database + 2. Wait 5 seconds and repeat search + :expectedresults: + 1. The 'dataExpireTimestamp' value equals the 'lastUpdate + entry_cache_timeout' value + 2. Objects 'lastUpdate' timestamp value has been refreshed + :customerscenario: False + """ + user = provider.user("test_user").add() + provider.group("test_group").add().add_member(user) + provider.netgroup("test_netgroup").add().add_member(user=user) + + domain = client.sssd.default_domain + entry_cache_timeout = 1 + refresh_expired_interval = 2 + + client.sssd.domain["entry_cache_timeout"] = str(entry_cache_timeout) + client.sssd.domain["refresh_expired_interval"] = str(refresh_expired_interval) + + client.sssd.restart() + client.tools.getent.passwd(f"test_user@{domain}") + client.tools.getent.group(f"test_group@{domain}") + client.tools.getent.netgroup(f"test_netgroup@{domain}") + + ldb_cache = f"/var/lib/sss/db/cache_{domain}.ldb" + ldb_suffix = f"cn={domain},cn=sysdb" + + last_update: list[int] = [] + expire_time: list[int] = [] + + for i in [f"test_user@{domain}", f"test_group@{domain}", "test_netgroup"]: + result = client.ldb.search(ldb_cache, ldb_suffix, filter=f"name={i}") + for k, v in result.items(): + for y in v.items(): + if y[0] == "lastUpdate": + last_update = last_update + [(int(y[1][0]))] + if y[0] == "dataExpireTimestamp": + expire_time = expire_time + [(int(y[1][0]))] + + for m, n in enumerate(last_update): + assert ( + last_update[m] + entry_cache_timeout == expire_time[m] + ), f"{expire_time[m]} != {last_update[m]} + {entry_cache_timeout}" + + time.sleep(5) + + for s, t in enumerate([f"test_user@{domain}", f"test_group@{domain}", "test_netgroup"]): + result = client.ldb.search(ldb_cache, ldb_suffix, filter=f"name={t}") + for k, v in result.items(): + for y in v.items(): + if y[0] == "lastUpdate": + assert last_update[s] <= (int(y[1][0])), f"{s} lastUpdate value is greater than expected!" + + +@pytest.mark.topology(KnownTopologyGroup.AnyProvider) +def test_cache__search_for_user_in_ldb_databases(client: Client, provider: GenericProvider): + """ + :title: Search for user in the following ldb databases, cache_*.ldb and timestamp_*.ldb + :setup: + 1. Create user + 2. Start SSSD + :steps: + 1. Lookup user + 2. Check cache + 3. Lookup user in cache ldb database + 4. Lookup user in timestamp ldb database + :expectedresults: + 1. User is found + 2. Cache file exists + 3. User found + 4. User found + :customerscenario: False + """ + provider.user("user1").add() + client.sssd.start() + client.tools.getent.passwd("user1") + cache = "/var/lib/sss/db/cache_test.ldb" + timestamps = "/var/lib/sss/db/timestamps_test.ldb" + assert client.fs.exists(timestamps), f"Timestamp file '{timestamps}' does not exist" + + ldb1 = client.ldb.search(cache, "name=user1@test,cn=users,cn=test,cn=sysdb") + ldb2 = client.ldb.search(timestamps, "name=user1@test,cn=users,cn=test,cn=sysdb") + assert ldb1 != {}, f"ldbsearch failed to find user1 in {cache}" + assert ldb2 != {}, f"ldbsearch failed to find user1 in {timestamps}" + + +@pytest.mark.topology(KnownTopologyGroup.AnyProvider) +def test_cache__search_for_user_using_fully_qualified_name_in_ldb_databases(client: Client, provider: GenericProvider): + """ + :title: Search for user using fully qualified name in the following ldb databases, cache_*.ldb and timestamp_*.ldb + :setup: + 1. Create user + 2. Start SSSD + :steps: + 1. Lookup user + 2. Lookup user in cache ldb database + 3. Lookup user in timestamp ldb database + :expectedresults: + 1. User found + 2. User found + 3. User found + :customerscenario: False + """ + provider.user("user1").add() + client.sssd.domain["use_fully_qualified_names"] = "True" + client.sssd.start() + client.tools.getent.passwd("user1@test") + + cache = "/var/lib/sss/db/cache_test.ldb" + timestamps = "/var/lib/sss/db/timestamps_test.ldb" + user_basedn = "name=user1@test,cn=users,cn=test,cn=sysdb" + ldb1 = client.ldb.search(cache, user_basedn) + ldb2 = client.ldb.search(timestamps, user_basedn) + + assert ldb1 != {}, f"ldbsearch failed to find user1@test in {cache}" + assert ldb2 != {}, f"ldbsearch failed to find user1@test in {timestamps}" + + +@pytest.mark.topology(KnownTopologyGroup.AnyProvider) +def test_cache__check_ldb_database_for_latest_user_changes_when_modified_and_deleted( + client: Client, provider: GenericProvider +): + """ + :title: Check ldb database for latest user changes when modified and deleted + :setup: + 1. Add users 'user-modify' and 'user-delete' + 2. Start SSSD + 3. Lookup users + :steps: + 1. Login as users + 2. Modify 'user-modify' shell and delete 'user-delete' and clear cache + 3. Login as users + 4. Lookup user 'user-delete' + 5. Lookup user 'user-modify' + :expectedresults: + 1. Users logged in + 2. User 'user-modify' is modified and user 'user-delete' is deleted + 3. User 'user-modify' logged in + 4. User 'user-delete' is not found + 5. User 'user-modify' is found and shell was updated + :customerscenario: False + """ + provider.user("user-modify").add(shell="/bin/bash") + provider.user("user-delete").add(shell="/bin/bash") + client.sssd.start() + client.tools.getent.passwd("user-modify") + client.tools.getent.passwd("user-delete") + + assert client.auth.ssh.password("user-modify", "Secret123"), "Login failed!" + assert client.auth.ssh.password("user-delete", "Secret123"), "Login failed!" + + provider.user("user-delete").delete() + provider.user("user-modify").modify(shell="/bin/sh") + + client.sssctl.cache_expire(everything=True) + + assert client.auth.ssh.password("user-modify", "Secret123"), "Login failed!" + assert not client.auth.ssh.password("user-delete", "Secret123"), "Login successful!" + + result = client.tools.getent.passwd("user-modify") + assert result is not None, "User not found!" + assert result.shell == "/bin/sh", "User shell did not update!" diff --git a/src/tests/system/tests/test_memory_cache.py b/src/tests/system/tests/test_memcache.py similarity index 96% rename from src/tests/system/tests/test_memory_cache.py rename to src/tests/system/tests/test_memcache.py index 7c743e35511..ab476552463 100644 --- a/src/tests/system/tests/test_memory_cache.py +++ b/src/tests/system/tests/test_memcache.py @@ -1,5 +1,5 @@ """ -SSSD Memory cache-related Test Cases +SSSD In-Memory Cache (memcache) Test Cases. :requirement: IDM-SSSD-REQ: Client side performance improvements """ @@ -16,7 +16,7 @@ @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__lookup_users(client: Client, provider: GenericProvider): +def test_memcache__lookup_users(client: Client, provider: GenericProvider): """ :title: Lookup user by name uses memory cache when SSSD is stopped :setup: @@ -57,7 +57,7 @@ def check(users): @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__lookup_groups(client: Client, provider: GenericProvider): +def test_memcache__lookup_groups(client: Client, provider: GenericProvider): """ :title: Lookup group by groupname uses memory cache when SSSD is stopped :setup: @@ -98,7 +98,7 @@ def check(groups): @pytest.mark.importance("high") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__user_cache_is_disabled_and_lookup_groups(client: Client, provider: GenericProvider): +def test_memcache__user_cache_is_disabled_and_lookup_groups(client: Client, provider: GenericProvider): """ :title: Lookup group by groupname uses memory cache when SSSD is stopped and 'memcache_size_passwd' = 0 :setup: @@ -141,7 +141,7 @@ def check(groups): @pytest.mark.importance("high") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__user_cache_is_disabled_and_lookup_users(client: Client, provider: GenericProvider): +def test_memcache__user_cache_is_disabled_and_lookup_users(client: Client, provider: GenericProvider): """ :title: Lookup user by name when SSSD is stopped and 'memcache_size_passwd' = 0 uses memory cache therefore user is not found @@ -188,7 +188,7 @@ def test_memory_cache__user_cache_is_disabled_and_lookup_users(client: Client, p @pytest.mark.importance("high") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__initgroup_cache_is_disabled_and_lookup_groups(client: Client, provider: GenericProvider): +def test_memcache__initgroup_cache_is_disabled_and_lookup_groups(client: Client, provider: GenericProvider): """ :title: Lookup group by groupname when SSSD is stopped and 'memcache_size_initgroups' = 0 uses memory cache :setup: @@ -231,7 +231,7 @@ def check(groups): @pytest.mark.importance("high") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__initgroup_cache_is_disabled_and_lookup_users(client: Client, provider: GenericProvider): +def test_memcache__initgroup_cache_is_disabled_and_lookup_users(client: Client, provider: GenericProvider): """ :title: Lookup user by name and id when SSSD is stopped and 'memcache_size_initgroups' = 0 uses memory cache :setup: @@ -286,7 +286,7 @@ def check(ids): @pytest.mark.importance("high") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__group_cache_disabled_and_lookup_groups(client: Client, provider: GenericProvider): +def test_memcache__group_cache_disabled_and_lookup_groups(client: Client, provider: GenericProvider): """ :title: Lookup user by name and id when SSSD is stopped and 'memcache_size_group' = 0 uses memory cache, but lookup groups is not possible @@ -360,7 +360,7 @@ def check(users): @pytest.mark.importance("high") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__all_caches_disabled_and_all_lookups_fails(client: Client, provider: GenericProvider): +def test_memcache__all_caches_disabled_and_all_lookups_fails(client: Client, provider: GenericProvider): """ :title: Lookup user and group when SSSD is stopped and whole cache disabled uses memory cache and therefore it is not possible @@ -437,7 +437,7 @@ def test_memory_cache__all_caches_disabled_and_all_lookups_fails(client: Client, @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__lookup_users_check_group_memberships(client: Client, provider: GenericProvider): +def test_memcache__lookup_users_check_group_memberships(client: Client, provider: GenericProvider): """ :title: Lookup user by name and test membership by name use memory cache when SSSD is stopped :setup: @@ -490,7 +490,7 @@ def check(): @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__lookup_users_and_check_membership_by_gid(client: Client, provider: GenericProvider): +def test_memcache__lookup_users_and_check_membership_by_gid(client: Client, provider: GenericProvider): """ :title: Lookup user by name and test membership by gid use memory cache when SSSD is stopped :setup: @@ -546,7 +546,7 @@ def check(): @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__lookup_uids_and_check_membership_by_gid(client: Client, provider: GenericProvider): +def test_memcache__lookup_uids_and_check_membership_by_gid(client: Client, provider: GenericProvider): """ :title: Lookup user by id and test membership by gid use memory cache when SSSD is stopped :setup: @@ -602,7 +602,7 @@ def check(): @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__lookup_users_by_fully_qualified_names(client: Client, provider: GenericProvider): +def test_memcache__lookup_users_by_fully_qualified_names(client: Client, provider: GenericProvider): """ :title: Lookup user by full name when 'use_fully_qualified_names' is 'true' uses memory cache when sssd is stopped @@ -655,7 +655,7 @@ def check(): @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__lookup_users_when_case_insensitive_is_false(client: Client, provider: GenericProvider): +def test_memcache__lookup_users_when_case_insensitive_is_false(client: Client, provider: GenericProvider): """ :title: Lookup user by case insensitive name when 'case_sensitive' is 'false' uses memory cache when SSSD is stopped @@ -718,7 +718,7 @@ def test_memory_cache__lookup_users_when_case_insensitive_is_false(client: Clien @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__lookup_users_when_fully_qualified_name_is_true_and_case_ins_is_false( +def test_memcache__lookup_users_when_fully_qualified_name_is_true_and_case_ins_is_false( client: Client, provider: GenericProvider ): """ @@ -780,7 +780,7 @@ def test_memory_cache__lookup_users_when_fully_qualified_name_is_true_and_case_i @pytest.mark.importance("high") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidation_of_gids_after_initgroups(client: Client, provider: GenericProvider): +def test_memcache__invalidation_of_gids_after_initgroups(client: Client, provider: GenericProvider): """ :title: Invalidate groups after initgroups call when SSSD is stopped :setup: @@ -860,7 +860,7 @@ def check_group(name, gid): @pytest.mark.importance("high") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__lookup_initgroups_without_change_in_membership(client: Client, provider: GenericProvider): +def test_memcache__lookup_initgroups_without_change_in_membership(client: Client, provider: GenericProvider): """ :title: Invalidated cache, after refresh and stopped SSSD, has everything loaded in memory :setup: @@ -949,7 +949,7 @@ def check(): @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidate_user_cache_before_stop(client: Client, provider: GenericProvider): +def test_memcache__invalidate_user_cache_before_stop(client: Client, provider: GenericProvider): """ :title: Invalidate user cache before SSSD is stopped :setup: @@ -1005,7 +1005,7 @@ def test_memory_cache__invalidate_user_cache_before_stop(client: Client, provide @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidate_user_cache_after_stop(client: Client, provider: GenericProvider): +def test_memcache__invalidate_user_cache_after_stop(client: Client, provider: GenericProvider): """ :title: Invalidate user cache after SSSD is stopped :setup: @@ -1061,7 +1061,7 @@ def test_memory_cache__invalidate_user_cache_after_stop(client: Client, provider @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidate_users_cache_before_stop(client: Client, provider: GenericProvider): +def test_memcache__invalidate_users_cache_before_stop(client: Client, provider: GenericProvider): """ :title: Invalidate users cache before SSSD is stopped :setup: @@ -1125,7 +1125,7 @@ def test_memory_cache__invalidate_users_cache_before_stop(client: Client, provid @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidate_users_cache_after_stop(client: Client, provider: GenericProvider): +def test_memcache__invalidate_users_cache_after_stop(client: Client, provider: GenericProvider): """ :title: Invalidate users cache after SSSD is stopped :setup: @@ -1189,7 +1189,7 @@ def test_memory_cache__invalidate_users_cache_after_stop(client: Client, provide @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidate_group_cache_before_stop(client: Client, provider: GenericProvider): +def test_memcache__invalidate_group_cache_before_stop(client: Client, provider: GenericProvider): """ :title: Invalidate group cache before SSSD is stopped :setup: @@ -1232,7 +1232,7 @@ def test_memory_cache__invalidate_group_cache_before_stop(client: Client, provid @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidate_group_cache_after_stop(client: Client, provider: GenericProvider): +def test_memcache__invalidate_group_cache_after_stop(client: Client, provider: GenericProvider): """ :title: Invalidate group cache after SSSD is stopped :setup: @@ -1275,7 +1275,7 @@ def test_memory_cache__invalidate_group_cache_after_stop(client: Client, provide @pytest.mark.importance("high") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidate_groups_cache_before_stop(client: Client, provider: GenericProvider): +def test_memcache__invalidate_groups_cache_before_stop(client: Client, provider: GenericProvider): """ :title: Invalidate groups cache before SSSD is stopped :setup: @@ -1322,7 +1322,7 @@ def test_memory_cache__invalidate_groups_cache_before_stop(client: Client, provi @pytest.mark.importance("high") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidate_groups_cache_after_stop(client: Client, provider: GenericProvider): +def test_memcache__invalidate_groups_cache_after_stop(client: Client, provider: GenericProvider): """ :title: Invalidate groups cache after SSSD is stopped :setup: @@ -1369,7 +1369,7 @@ def test_memory_cache__invalidate_groups_cache_after_stop(client: Client, provid @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidate_everything_before_stop(client: Client, provider: GenericProvider): +def test_memcache__invalidate_everything_before_stop(client: Client, provider: GenericProvider): """ :title: Invalidate all parts of cache before SSSD is stopped :setup: @@ -1440,7 +1440,7 @@ def test_memory_cache__invalidate_everything_before_stop(client: Client, provide @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__invalidate_everything_after_stop(client: Client, provider: GenericProvider): +def test_memcache__invalidate_everything_after_stop(client: Client, provider: GenericProvider): """ :title: Invalidate all parts of cache after SSSD is stopped :setup: @@ -1511,7 +1511,7 @@ def test_memory_cache__invalidate_everything_after_stop(client: Client, provider @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__memcache_timeout_zero(client: Client, provider: GenericProvider): +def test_memcache__memcache_timeout_zero(client: Client, provider: GenericProvider): """ :title: Cache is not created at all when 'memcache_timeout' set to '0' :setup: @@ -1571,7 +1571,7 @@ def test_memory_cache__memcache_timeout_zero(client: Client, provider: GenericPr @pytest.mark.importance("critical") @pytest.mark.cache @pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_memory_cache__removed_cache_without_invalidation(client: Client, provider: GenericProvider): +def test_memcache__removed_cache_without_invalidation(client: Client, provider: GenericProvider): """ :title: SSSD is stopped, cache removed then users and groups cannot be lookedup :setup: @@ -1629,7 +1629,7 @@ def test_memory_cache__removed_cache_without_invalidation(client: Client, provid @pytest.mark.topology(KnownTopology.LDAP) @pytest.mark.ticket(bz=2226021) -def test_memory_cache__truncate_in_memory_cache_no_sigbus(client: Client, ldap: LDAP): +def test_memcache__truncate_in_memory_cache_no_sigbus(client: Client, ldap: LDAP): """ :title: Accessing truncated in-memory cache file does not cause SIGBUS :setup: diff --git a/src/tests/system/tests/test_sss_cache.py b/src/tests/system/tests/test_sss_cache.py deleted file mode 100644 index ba20fa858a4..00000000000 --- a/src/tests/system/tests/test_sss_cache.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -sss_cache tests. - -:requirement: IDM-SSSD-REQ: Status utility -""" - -from __future__ import annotations - -import time - -import pytest -from pytest_mh.ssh import SSHProcessError -from sssd_test_framework.roles.client import Client -from sssd_test_framework.roles.generic import GenericProvider -from sssd_test_framework.topology import KnownTopology, KnownTopologyGroup - - -@pytest.mark.ticket(bz=1661182) -@pytest.mark.topology(KnownTopology.Client) -def test_sss_cache__cache_expire_message(client: Client): - """ - :title: sss_cache do not print fake error messages - :setup: - 1. Configure SSSD without any domain - 2. Set to sssd section "enable_files_domain" to "false" - 3. Create local user - :steps: - 1. Restart SSSD - 2. Modify existing local user - 3. Expire cache with specific options - :expectedresults: - 1. Error is raised, SSSD is not running - 2. Modified successfully - 3. Output did not contain wrong messages - :customerscenario: True - """ - client.sssd.sssd["enable_files_domain"] = "false" - client.local.user("user1").add() - - with pytest.raises(SSHProcessError): - client.sssd.restart() - - res = client.host.ssh.run("usermod -a -G wheel user1") - assert "No domains configured, fatal error!" not in res.stdout - - for cmd in ("sss_cache -U", "sss_cache -G", "sss_cache -E", "sss_cache --user=nonexisting"): - res = client.host.ssh.run(cmd) - assert "No domains configured, fatal error!" not in res.stdout - - -@pytest.mark.importance("critical") -@pytest.mark.cache -@pytest.mark.topology(KnownTopologyGroup.AnyProvider) -def test_sss_cache__ldb_database_is_refreshed_as_configured(client: Client, provider: GenericProvider): - """ - :title: Ensuring ldb cache data is refreshed correctly - :setup: - 1. Create provider user - 2. Create provider group - 3. Create provider netgroup - 4. Configure SSSD and set 'entry_cache_timeout' to 1 and 'refresh_expired_interval' to 2 - 5. Restart SSSD - 6. Populate the cache by performing 'getent' on the user, group and netgroup - :steps: - 1. Search for user, group and netgroup lastUpdate and dataExpireTimestamp in the ldb database - 2. Wait 5 seconds and search for all timestamp in the cache again - :expectedresults: - 1. The 'dataExpireTimestamp' value equals the 'lastUpdate + entry_cache_timeout' value - 2. User, group and netgroup 'lastUpdate' timestamp value has been refreshed - :customerscenario: False - """ - user = provider.user("test_user").add() - provider.group("test_group").add().add_member(user) - provider.netgroup("test_netgroup").add().add_member(user=user) - - domain = client.sssd.default_domain - entry_cache_timeout = 1 - refresh_expired_interval = 2 - - client.sssd.domain["entry_cache_timeout"] = str(entry_cache_timeout) - client.sssd.domain["refresh_expired_interval"] = str(refresh_expired_interval) - - client.sssd.restart() - client.tools.getent.passwd(f"test_user@{domain}") - client.tools.getent.group(f"test_group@{domain}") - client.tools.getent.netgroup(f"test_netgroup@{domain}") - - ldb_cache = f"/var/lib/sss/db/cache_{domain}.ldb" - ldb_suffix = f"cn={domain},cn=sysdb" - - last_update: list[int] = [] - expire_time: list[int] = [] - - for i in [f"test_user@{domain}", f"test_group@{domain}", "test_netgroup"]: - result = client.ldb.search(ldb_cache, ldb_suffix, filter=f"name={i}") - for k, v in result.items(): - for y in v.items(): - if y[0] == "lastUpdate": - last_update = last_update + [(int(y[1][0]))] - if y[0] == "dataExpireTimestamp": - expire_time = expire_time + [(int(y[1][0]))] - - for m, n in enumerate(last_update): - assert last_update[m] + entry_cache_timeout == expire_time[m] - - time.sleep(5) - - for s, t in enumerate([f"test_user@{domain}", f"test_group@{domain}", "test_netgroup"]): - result = client.ldb.search(ldb_cache, ldb_suffix, filter=f"name={t}") - for k, v in result.items(): - for y in v.items(): - if y[0] == "lastUpdate": - assert last_update[s] <= (int(y[1][0])) diff --git a/src/tests/system/tests/test_tools.py b/src/tests/system/tests/test_tools.py new file mode 100644 index 00000000000..bf69d14f2a7 --- /dev/null +++ b/src/tests/system/tests/test_tools.py @@ -0,0 +1,61 @@ +""" +Tools Tests. + +Tests pertaining to command line tools, some tools will have their own file. + +* sssctl: test_sssctl.py +* sss_cache +* sss_obfuscate +* sss_seed +* sss_debuglevel +* sss_override: sss_override.py +* sss_ssh_authorizedkeys +* sss_ssh_knownhostsproxy + +:requirement: Tools +""" + +from __future__ import annotations + +import pytest +from pytest_mh.ssh import SSHProcessError +from sssd_test_framework.roles.client import Client +from sssd_test_framework.topology import KnownTopology + + +@pytest.mark.importance("medium") +@pytest.mark.ticket(bz=1661182) +@pytest.mark.topology(KnownTopology.Client) +def test_tools__sss_cache_expired_does_not_print_unrelated_message(client: Client): + """ + :title: Usermod command does not print unrelated sss_cache messages + :setup: + 1. Configure SSSD without any domain + 2. Set to sssd section "enable_files_domain" to "false" + 3. Create local user + :steps: + 1. Restart SSSD + 2. Modify existing local user + 3. Expire cache with specific options + :expectedresults: + 1. Error is raised, SSSD is not running + 2. Modified successfully + 3. Output did not contain wrong messages + :customerscenario: True + """ + client.sssd.sssd["enable_files_domain"] = "false" + client.local.user("user1").add() + + with pytest.raises(SSHProcessError): + client.sssd.restart() + + res = client.host.ssh.run("usermod -a -G wheel user1") + assert ( + "No domains configured, fatal error!" not in res.stdout + ), "'No domains configured, fatal error!' printed to stdout!" + + for cmd in ("sss_cache -U", "sss_cache -G", "sss_cache -E", "sss_cache --user=nonexisting"): + res = client.host.ssh.run(cmd) + assert ( + "No domains configured, fatal error!" not in res.stdout + ), "'No domains configured, fatal error!' printed to stdout!"