From 757010905ea85333672289a0ac124d41bd923bb3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 11:14:14 +0000 Subject: [PATCH 01/75] Bump twisted from 22.10.0 to 23.8.0 (#16235) * Bump twisted from 22.10.0 to 23.8.0 Bumps [twisted](https://github.com/twisted/twisted) from 22.10.0 to 23.8.0. - [Release notes](https://github.com/twisted/twisted/releases) - [Changelog](https://github.com/twisted/twisted/blob/trunk/NEWS.rst) - [Commits](https://github.com/twisted/twisted/compare/twisted-22.10.0...twisted-23.8.0) --- updated-dependencies: - dependency-name: twisted dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Fix types * Fix lint * Newsfile --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Erik Johnston --- changelog.d/16235.misc | 1 + poetry.lock | 37 ++++++++++++++++---------------- synapse/handlers/initial_sync.py | 8 ++----- synapse/logging/context.py | 4 ++-- synapse/util/gai_resolver.py | 2 +- 5 files changed, 24 insertions(+), 28 deletions(-) create mode 100644 changelog.d/16235.misc diff --git a/changelog.d/16235.misc b/changelog.d/16235.misc new file mode 100644 index 000000000000..b1533f93b608 --- /dev/null +++ b/changelog.d/16235.misc @@ -0,0 +1 @@ +Fix type checking when using the new version of Twisted. diff --git a/poetry.lock b/poetry.lock index 1cefabb358df..872a863edc95 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2866,44 +2866,43 @@ urllib3 = ">=1.26.0" [[package]] name = "twisted" -version = "22.10.0" +version = "23.8.0" description = "An asynchronous networking framework written in Python" optional = false python-versions = ">=3.7.1" files = [ - {file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"}, - {file = "Twisted-22.10.0.tar.gz", hash = "sha256:32acbd40a94f5f46e7b42c109bfae2b302250945561783a8b7a059048f2d4d31"}, + {file = "twisted-23.8.0-py3-none-any.whl", hash = "sha256:b8bdba145de120ffb36c20e6e071cce984e89fba798611ed0704216fb7f884cd"}, + {file = "twisted-23.8.0.tar.gz", hash = "sha256:3c73360add17336a622c0d811c2a2ce29866b6e59b1125fd6509b17252098a24"}, ] [package.dependencies] -attrs = ">=19.2.0" -Automat = ">=0.8.0" +attrs = ">=21.3.0" +automat = ">=0.8.0" constantly = ">=15.1" hyperlink = ">=17.1.1" idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""} -incremental = ">=21.3.0" +incremental = ">=22.10.0" pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""} service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""} twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""} -typing-extensions = ">=3.6.5" -"zope.interface" = ">=4.4.2" +typing-extensions = ">=3.10.0" +zope-interface = ">=5" [package.extras] -all-non-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] -conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] -conch-nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] +all-non-platform = ["twisted[conch,contextvars,http2,serial,test,tls]", "twisted[conch,contextvars,http2,serial,test,tls]"] +conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"] contextvars = ["contextvars (>=2.4,<3)"] -dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)"] -dev-release = ["pydoctor (>=22.9.0,<22.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)"] -gtk-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pygobject", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +dev = ["coverage (>=6b1,<7)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "twisted[dev-release]", "twistedchecker (>=0.7,<1.0)"] +dev-release = ["pydoctor (>=23.4.0,<23.5.0)", "pydoctor (>=23.4.0,<23.5.0)", "readthedocs-sphinx-ext (>=2.2,<3.0)", "readthedocs-sphinx-ext (>=2.2,<3.0)", "sphinx (>=5,<7)", "sphinx (>=5,<7)", "sphinx-rtd-theme (>=1.2,<2.0)", "sphinx-rtd-theme (>=1.2,<2.0)", "towncrier (>=22.12,<23.0)", "towncrier (>=22.12,<23.0)", "urllib3 (<2)", "urllib3 (<2)"] +gtk-platform = ["pygobject", "pygobject", "twisted[all-non-platform]", "twisted[all-non-platform]"] http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"] -macos-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] -mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"] -osx-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +macos-platform = ["pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "twisted[all-non-platform]", "twisted[all-non-platform]"] +mypy = ["mypy (==0.981)", "mypy-extensions (==0.4.3)", "mypy-zope (==0.3.11)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"] +osx-platform = ["twisted[macos-platform]", "twisted[macos-platform]"] serial = ["pyserial (>=3.0)", "pywin32 (!=226)"] -test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.0,<7.0)"] +test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"] tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"] -windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +windows-platform = ["pywin32 (!=226)", "pywin32 (!=226)", "twisted[all-non-platform]", "twisted[all-non-platform]"] [[package]] name = "twisted-iocpsupport" diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index b3be7a86f0df..5dc76ef588f7 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, List, Optional, Tuple from synapse.api.constants import ( AccountDataTypes, @@ -23,7 +23,6 @@ Membership, ) from synapse.api.errors import SynapseError -from synapse.events import EventBase from synapse.events.utils import SerializeEventConfig from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state @@ -35,7 +34,6 @@ JsonDict, Requester, RoomStreamToken, - StateMap, StreamKeyType, StreamToken, UserID, @@ -199,9 +197,7 @@ async def handle_room(event: RoomsForUser) -> None: deferred_room_state = run_in_background( self._state_storage_controller.get_state_for_events, [event.event_id], - ).addCallback( - lambda states: cast(StateMap[EventBase], states[event.event_id]) - ) + ).addCallback(lambda states: states[event.event_id]) (messages, token), current_state = await make_deferred_yieldable( gather_results( diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 64c6ae451208..bf7e311026e0 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -728,7 +728,7 @@ async def _unwrap_awaitable(awaitable: Awaitable[R]) -> R: @overload -def preserve_fn( # type: ignore[misc] +def preserve_fn( f: Callable[P, Awaitable[R]], ) -> Callable[P, "defer.Deferred[R]"]: # The `type: ignore[misc]` above suppresses @@ -756,7 +756,7 @@ def g(*args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[R]": @overload -def run_in_background( # type: ignore[misc] +def run_in_background( f: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": # The `type: ignore[misc]` above suppresses diff --git a/synapse/util/gai_resolver.py b/synapse/util/gai_resolver.py index 214eb17fbccb..fecf829ade5f 100644 --- a/synapse/util/gai_resolver.py +++ b/synapse/util/gai_resolver.py @@ -136,7 +136,7 @@ def __init__( # The types on IHostnameResolver is incorrect in Twisted, see # https://twistedmatrix.com/trac/ticket/10276 - def resolveHostName( # type: ignore[override] + def resolveHostName( self, resolutionReceiver: IResolutionReceiver, hostName: str, From dfcfa9f0eda11b339d51d73cd12167ed6e3c01f0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Sep 2023 13:12:50 +0100 Subject: [PATCH 02/75] Bump minimum supported Rust version to 1.61.0 (#16248) --- .github/workflows/tests.yml | 18 +++++++++--------- changelog.d/16248.misc | 1 + docs/upgrade.md | 8 ++++++++ rust/Cargo.toml | 9 +++++++-- 4 files changed, 25 insertions(+), 11 deletions(-) create mode 100644 changelog.d/16248.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 0a01e8298468..fb117380d028 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -35,7 +35,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 with: @@ -93,7 +93,7 @@ jobs: uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - name: Setup Poetry @@ -150,7 +150,7 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha }} - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 with: @@ -167,7 +167,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 with: components: clippy - uses: Swatinem/rust-cache@v2 @@ -268,7 +268,7 @@ jobs: postgres:${{ matrix.job.postgres-version }} - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 @@ -308,7 +308,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 # There aren't wheels for some of the older deps, so we need to install @@ -416,7 +416,7 @@ jobs: run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - name: Run SyTest @@ -556,7 +556,7 @@ jobs: path: synapse - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - uses: actions/setup-go@v4 @@ -584,7 +584,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - run: cargo test diff --git a/changelog.d/16248.misc b/changelog.d/16248.misc new file mode 100644 index 000000000000..0a5ed6dccb17 --- /dev/null +++ b/changelog.d/16248.misc @@ -0,0 +1 @@ +Bump minimum supported Rust version to 1.61.0. diff --git a/docs/upgrade.md b/docs/upgrade.md index f50a279e985a..2f888b6f1270 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,14 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.93.0 + +## Minimum supported Rust version +The minimum supported Rust version has been increased from v1.60.0 to v1.61.0. +Users building from source will need to ensure their `rustc` version is up to +date. + + # Upgrading to v1.90.0 ## App service query parameter authorization is now a configuration option diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 3ead01c0521e..16917136db52 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -7,7 +7,7 @@ name = "synapse" version = "0.1.0" edition = "2021" -rust-version = "1.60.0" +rust-version = "1.61.0" [lib] name = "synapse" @@ -23,7 +23,12 @@ name = "synapse.synapse_rust" anyhow = "1.0.63" lazy_static = "1.4.0" log = "0.4.17" -pyo3 = { version = "0.17.1", features = ["macros", "anyhow", "abi3", "abi3-py37"] } +pyo3 = { version = "0.17.1", features = [ + "macros", + "anyhow", + "abi3", + "abi3-py37", +] } pyo3-log = "0.8.1" pythonize = "0.17.0" regex = "1.6.0" From 36ae8611fe98977153387308311f7d38b67e39c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 13:14:00 +0000 Subject: [PATCH 03/75] Bump regex from 1.9.4 to 1.9.5 (#16233) Bumps [regex](https://github.com/rust-lang/regex) from 1.9.4 to 1.9.5. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.9.4...1.9.5) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d60f8dcb62a..95a713e437a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -138,9 +138,9 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memoffset" @@ -291,9 +291,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick", "memchr", @@ -303,9 +303,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick", "memchr", From ea75346f6af8c182a42d1ca29119a10361693a7b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 5 Sep 2023 09:58:51 -0400 Subject: [PATCH 04/75] Track presence state per-device and combine to a user state. (#16066) Tracks presence on an individual per-device basis and combine the per-device state into a per-user state. This should help in situations where a user has multiple devices with conflicting status (e.g. one is syncing with unavailable and one is syncing with online). The tie-breaking is done by priority: BUSY > ONLINE > UNAVAILABLE > OFFLINE --- changelog.d/16066.bugfix | 1 + changelog.d/16170.bugfix | 1 + changelog.d/16170.misc | 1 - changelog.d/16171.bugfix | 1 + changelog.d/16171.misc | 1 - changelog.d/16172.bugfix | 1 + changelog.d/16172.misc | 1 - synapse/api/presence.py | 43 ++- synapse/handlers/presence.py | 279 +++++++++++++++--- tests/handlers/test_presence.py | 500 +++++++++++++++++++++++++++++++- 10 files changed, 765 insertions(+), 64 deletions(-) create mode 100644 changelog.d/16066.bugfix create mode 100644 changelog.d/16170.bugfix delete mode 100644 changelog.d/16170.misc create mode 100644 changelog.d/16171.bugfix delete mode 100644 changelog.d/16171.misc create mode 100644 changelog.d/16172.bugfix delete mode 100644 changelog.d/16172.misc diff --git a/changelog.d/16066.bugfix b/changelog.d/16066.bugfix new file mode 100644 index 000000000000..83649cf42a4a --- /dev/null +++ b/changelog.d/16066.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16170.bugfix b/changelog.d/16170.bugfix new file mode 100644 index 000000000000..83649cf42a4a --- /dev/null +++ b/changelog.d/16170.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16170.misc b/changelog.d/16170.misc deleted file mode 100644 index c950b5436705..000000000000 --- a/changelog.d/16170.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify presence code when using workers. diff --git a/changelog.d/16171.bugfix b/changelog.d/16171.bugfix new file mode 100644 index 000000000000..83649cf42a4a --- /dev/null +++ b/changelog.d/16171.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16171.misc b/changelog.d/16171.misc deleted file mode 100644 index 4d709cb56e19..000000000000 --- a/changelog.d/16171.misc +++ /dev/null @@ -1 +0,0 @@ -Track per-device information in the presence code. diff --git a/changelog.d/16172.bugfix b/changelog.d/16172.bugfix new file mode 100644 index 000000000000..83649cf42a4a --- /dev/null +++ b/changelog.d/16172.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16172.misc b/changelog.d/16172.misc deleted file mode 100644 index 4d709cb56e19..000000000000 --- a/changelog.d/16172.misc +++ /dev/null @@ -1 +0,0 @@ -Track per-device information in the presence code. diff --git a/synapse/api/presence.py b/synapse/api/presence.py index b80aa83cb3d6..b78f41999456 100644 --- a/synapse/api/presence.py +++ b/synapse/api/presence.py @@ -20,18 +20,53 @@ from synapse.types import JsonDict +@attr.s(slots=True, auto_attribs=True) +class UserDevicePresenceState: + """ + Represents the current presence state of a user's device. + + user_id: The user ID. + device_id: The user's device ID. + state: The presence state, see PresenceState. + last_active_ts: Time in msec that the device last interacted with server. + last_sync_ts: Time in msec that the device last *completed* a sync + (or event stream). + """ + + user_id: str + device_id: Optional[str] + state: str + last_active_ts: int + last_sync_ts: int + + @classmethod + def default( + cls, user_id: str, device_id: Optional[str] + ) -> "UserDevicePresenceState": + """Returns a default presence state.""" + return cls( + user_id=user_id, + device_id=device_id, + state=PresenceState.OFFLINE, + last_active_ts=0, + last_sync_ts=0, + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) class UserPresenceState: """Represents the current presence state of the user. - user_id - last_active: Time in msec that the user last interacted with server. - last_federation_update: Time in msec since either a) we sent a presence + user_id: The user ID. + state: The presence state, see PresenceState. + last_active_ts: Time in msec that the user last interacted with server. + last_federation_update_ts: Time in msec since either a) we sent a presence update to other servers or b) we received a presence update, depending on if is a local user or not. - last_user_sync: Time in msec that the user last *completed* a sync + last_user_sync_ts: Time in msec that the user last *completed* a sync (or event stream). status_msg: User set status message. + currently_active: True if the user is currently syncing. """ user_id: str diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index f31e18328bcc..80190838b799 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -13,13 +13,56 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""This module is responsible for keeping track of presence status of local +""" +This module is responsible for keeping track of presence status of local and remote users. The methods that define policy are: - PresenceHandler._update_states - PresenceHandler._handle_timeouts - should_notify + +# Tracking local presence + +For local users, presence is tracked on a per-device basis. When a user has multiple +devices the user presence state is derived by coalescing the presence from each +device: + + BUSY > ONLINE > UNAVAILABLE > OFFLINE + +The time that each device was last active and last synced is tracked in order to +automatically downgrade a device's presence state: + + A device may move from ONLINE -> UNAVAILABLE, if it has not been active for + a period of time. + + A device may go from any state -> OFFLINE, if it is not active and has not + synced for a period of time. + +The timeouts are handled using a wheel timer, which has coarse buckets. Timings +do not need to be exact. + +Generally a device's presence state is updated whenever a user syncs (via the +set_presence parameter), when the presence API is called, or if "pro-active" +events occur, including: + +* Sending an event, receipt, read marker. +* Updating typing status. + +The busy state has special status that it cannot is not downgraded by a call to +sync with a lower priority state *and* it takes a long period of time to transition +to offline. + +# Persisting (and restoring) presence + +For all users, presence is persisted on a per-user basis. Data is kept in-memory +and persisted periodically. When Synapse starts each worker loads the current +presence state and then tracks the presence stream to keep itself up-to-date. + +When restoring presence for local users a pseudo-device is created to match the +user state; this device follows the normal timeout logic (see above) and will +automatically be replaced with any information from currently available devices. + """ import abc import contextlib @@ -30,6 +73,7 @@ from types import TracebackType from typing import ( TYPE_CHECKING, + AbstractSet, Any, Callable, Collection, @@ -49,7 +93,7 @@ import synapse.metrics from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState from synapse.api.errors import SynapseError -from synapse.api.presence import UserPresenceState +from synapse.api.presence import UserDevicePresenceState, UserPresenceState from synapse.appservice import ApplicationService from synapse.events.presence_router import PresenceRouter from synapse.logging.context import run_in_background @@ -162,6 +206,7 @@ def __init__(self, hs: "HomeServer"): self.VALID_PRESENCE += (PresenceState.BUSY,) active_presence = self.store.take_presence_startup_info() + # The combined status across all user devices. self.user_to_current_state = {state.user_id: state for state in active_presence} @abc.abstractmethod @@ -708,9 +753,27 @@ def __init__(self, hs: "HomeServer"): lambda: len(self.user_to_current_state), ) + # The per-device presence state, maps user to devices to per-device presence state. + self._user_to_device_to_current_state: Dict[ + str, Dict[Optional[str], UserDevicePresenceState] + ] = {} + now = self.clock.time_msec() if self._presence_enabled: for state in self.user_to_current_state.values(): + # Create a psuedo-device to properly handle time outs. This will + # be overridden by any "real" devices within SYNC_ONLINE_TIMEOUT. + pseudo_device_id = None + self._user_to_device_to_current_state[state.user_id] = { + pseudo_device_id: UserDevicePresenceState( + user_id=state.user_id, + device_id=pseudo_device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) + } + self.wheel_timer.insert( now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER ) @@ -752,7 +815,7 @@ def __init__(self, hs: "HomeServer"): # Keeps track of the number of *ongoing* syncs on other processes. # - # While any sync is ongoing on another process the user will never + # While any sync is ongoing on another process the user's device will never # go offline. # # Each process has a unique identifier and an update frequency. If @@ -981,22 +1044,21 @@ async def _handle_timeouts(self) -> None: timers_fired_counter.inc(len(states)) - syncing_user_ids = { - user_id - for (user_id, _), count in self._user_device_to_num_current_syncs.items() + # Set of user ID & device IDs which are currently syncing. + syncing_user_devices = { + user_id_device_id + for user_id_device_id, count in self._user_device_to_num_current_syncs.items() if count } - syncing_user_ids.update( - user_id - for user_id, _ in itertools.chain( - *self.external_process_to_current_syncs.values() - ) + syncing_user_devices.update( + itertools.chain(*self.external_process_to_current_syncs.values()) ) changes = handle_timeouts( states, is_mine_fn=self.is_mine_id, - syncing_user_ids=syncing_user_ids, + syncing_user_devices=syncing_user_devices, + user_to_devices=self._user_to_device_to_current_state, now=now, ) @@ -1016,11 +1078,26 @@ async def bump_presence_active_time( bump_active_time_counter.inc() - prev_state = await self.current_state_for_user(user_id) + now = self.clock.time_msec() + + # Update the device information & mark the device as online if it was + # unavailable. + devices = self._user_to_device_to_current_state.setdefault(user_id, {}) + device_state = devices.setdefault( + device_id, + UserDevicePresenceState.default(user_id, device_id), + ) + device_state.last_active_ts = now + if device_state.state == PresenceState.UNAVAILABLE: + device_state.state = PresenceState.ONLINE - new_fields: Dict[str, Any] = {"last_active_ts": self.clock.time_msec()} - if prev_state.state == PresenceState.UNAVAILABLE: - new_fields["state"] = PresenceState.ONLINE + # Update the user state, this will always update last_active_ts and + # might update the presence state. + prev_state = await self.current_state_for_user(user_id) + new_fields: Dict[str, Any] = { + "last_active_ts": now, + "state": _combine_device_states(devices.values()), + } await self._update_states([prev_state.copy_and_replace(**new_fields)]) @@ -1132,6 +1209,12 @@ async def update_external_syncs_row( if is_syncing and (user_id, device_id) not in process_presence: process_presence.add((user_id, device_id)) elif not is_syncing and (user_id, device_id) in process_presence: + devices = self._user_to_device_to_current_state.setdefault(user_id, {}) + device_state = devices.setdefault( + device_id, UserDevicePresenceState.default(user_id, device_id) + ) + device_state.last_sync_ts = sync_time_msec + new_state = prev_state.copy_and_replace( last_user_sync_ts=sync_time_msec ) @@ -1151,11 +1234,24 @@ async def update_external_syncs_clear(self, process_id: str) -> None: process_presence = self.external_process_to_current_syncs.pop( process_id, set() ) - prev_states = await self.current_state_for_users( - {user_id for user_id, device_id in process_presence} - ) + time_now_ms = self.clock.time_msec() + # Mark each device as having a last sync time. + updated_users = set() + for user_id, device_id in process_presence: + device_state = self._user_to_device_to_current_state.setdefault( + user_id, {} + ).setdefault( + device_id, UserDevicePresenceState.default(user_id, device_id) + ) + + device_state.last_sync_ts = time_now_ms + updated_users.add(user_id) + + # Update each user (and insert into the appropriate timers to check if + # they've gone offline). + prev_states = await self.current_state_for_users(updated_users) await self._update_states( [ prev_state.copy_and_replace(last_user_sync_ts=time_now_ms) @@ -1277,6 +1373,20 @@ async def set_state( if prev_state.state == PresenceState.BUSY and is_sync: presence = PresenceState.BUSY + # Update the device specific information. + devices = self._user_to_device_to_current_state.setdefault(user_id, {}) + device_state = devices.setdefault( + device_id, + UserDevicePresenceState.default(user_id, device_id), + ) + device_state.state = presence + device_state.last_active_ts = now + if is_sync: + device_state.last_sync_ts = now + + # Based on the state of each user's device calculate the new presence state. + presence = _combine_device_states(devices.values()) + new_fields = {"state": presence} if presence == PresenceState.ONLINE or presence == PresenceState.BUSY: @@ -1873,7 +1983,8 @@ def get_current_key(self) -> int: def handle_timeouts( user_states: List[UserPresenceState], is_mine_fn: Callable[[str], bool], - syncing_user_ids: Set[str], + syncing_user_devices: AbstractSet[Tuple[str, Optional[str]]], + user_to_devices: Dict[str, Dict[Optional[str], UserDevicePresenceState]], now: int, ) -> List[UserPresenceState]: """Checks the presence of users that have timed out and updates as @@ -1882,7 +1993,8 @@ def handle_timeouts( Args: user_states: List of UserPresenceState's to check. is_mine_fn: Function that returns if a user_id is ours - syncing_user_ids: Set of user_ids with active syncs. + syncing_user_devices: A set of (user ID, device ID) tuples with active syncs.. + user_to_devices: A map of user ID to device ID to UserDevicePresenceState. now: Current time in ms. Returns: @@ -1891,9 +2003,16 @@ def handle_timeouts( changes = {} # Actual changes we need to notify people about for state in user_states: - is_mine = is_mine_fn(state.user_id) - - new_state = handle_timeout(state, is_mine, syncing_user_ids, now) + user_id = state.user_id + is_mine = is_mine_fn(user_id) + + new_state = handle_timeout( + state, + is_mine, + syncing_user_devices, + user_to_devices.get(user_id, {}), + now, + ) if new_state: changes[state.user_id] = new_state @@ -1901,14 +2020,19 @@ def handle_timeouts( def handle_timeout( - state: UserPresenceState, is_mine: bool, syncing_user_ids: Set[str], now: int + state: UserPresenceState, + is_mine: bool, + syncing_device_ids: AbstractSet[Tuple[str, Optional[str]]], + user_devices: Dict[Optional[str], UserDevicePresenceState], + now: int, ) -> Optional[UserPresenceState]: """Checks the presence of the user to see if any of the timers have elapsed Args: - state + state: UserPresenceState to check. is_mine: Whether the user is ours - syncing_user_ids: Set of user_ids with active syncs. + syncing_user_devices: A set of (user ID, device ID) tuples with active syncs.. + user_devices: A map of device ID to UserDevicePresenceState. now: Current time in ms. Returns: @@ -1919,34 +2043,55 @@ def handle_timeout( return None changed = False - user_id = state.user_id if is_mine: - if state.state == PresenceState.ONLINE: - if now - state.last_active_ts > IDLE_TIMER: - # Currently online, but last activity ages ago so auto - # idle - state = state.copy_and_replace(state=PresenceState.UNAVAILABLE) - changed = True - elif now - state.last_active_ts > LAST_ACTIVE_GRANULARITY: - # So that we send down a notification that we've - # stopped updating. + # Check per-device whether the device should be considered idle or offline + # due to timeouts. + device_changed = False + offline_devices = [] + for device_id, device_state in user_devices.items(): + if device_state.state == PresenceState.ONLINE: + if now - device_state.last_active_ts > IDLE_TIMER: + # Currently online, but last activity ages ago so auto + # idle + device_state.state = PresenceState.UNAVAILABLE + device_changed = True + + # If there are have been no sync for a while (and none ongoing), + # set presence to offline. + if (state.user_id, device_id) not in syncing_device_ids: + # If the user has done something recently but hasn't synced, + # don't set them as offline. + sync_or_active = max( + device_state.last_sync_ts, device_state.last_active_ts + ) + + if now - sync_or_active > SYNC_ONLINE_TIMEOUT: + # Mark the device as going offline. + offline_devices.append(device_id) + device_changed = True + + # Offline devices are not needed and do not add information. + for device_id in offline_devices: + user_devices.pop(device_id) + + # If the presence state of the devices changed, then (maybe) update + # the user's overall presence state. + if device_changed: + new_presence = _combine_device_states(user_devices.values()) + if new_presence != state.state: + state = state.copy_and_replace(state=new_presence) changed = True + if now - state.last_active_ts > LAST_ACTIVE_GRANULARITY: + # So that we send down a notification that we've + # stopped updating. + changed = True + if now - state.last_federation_update_ts > FEDERATION_PING_INTERVAL: # Need to send ping to other servers to ensure they don't # timeout and set us to offline changed = True - - # If there are have been no sync for a while (and none ongoing), - # set presence to offline - if user_id not in syncing_user_ids: - # If the user has done something recently but hasn't synced, - # don't set them as offline. - sync_or_active = max(state.last_user_sync_ts, state.last_active_ts) - if now - sync_or_active > SYNC_ONLINE_TIMEOUT: - state = state.copy_and_replace(state=PresenceState.OFFLINE) - changed = True else: # We expect to be poked occasionally by the other side. # This is to protect against forgetful/buggy servers, so that @@ -2036,6 +2181,46 @@ def handle_update( return new_state, persist_and_notify, federation_ping +PRESENCE_BY_PRIORITY = { + PresenceState.BUSY: 4, + PresenceState.ONLINE: 3, + PresenceState.UNAVAILABLE: 2, + PresenceState.OFFLINE: 1, +} + + +def _combine_device_states( + device_states: Iterable[UserDevicePresenceState], +) -> str: + """ + Find the device to use presence information from. + + Orders devices by priority, then last_active_ts. + + Args: + device_states: An iterable of device presence states + + Return: + The combined presence state. + """ + + # Based on (all) the user's devices calculate the new presence state. + presence = PresenceState.OFFLINE + last_active_ts = -1 + + # Find the device to use the presence state of based on the presence priority, + # but tie-break with how recently the device has been seen. + for device_state in device_states: + if (PRESENCE_BY_PRIORITY[device_state.state], device_state.last_active_ts) > ( + PRESENCE_BY_PRIORITY[presence], + last_active_ts, + ): + presence = device_state.state + last_active_ts = device_state.last_active_ts + + return presence + + async def get_interested_parties( store: DataStore, presence_router: PresenceRouter, states: List[UserPresenceState] ) -> Tuple[Dict[str, List[UserPresenceState]], Dict[str, List[UserPresenceState]]]: diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 88a16193a3b6..914415740a74 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -21,7 +21,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, Membership, PresenceState -from synapse.api.presence import UserPresenceState +from synapse.api.presence import UserDevicePresenceState, UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events.builder import EventBuilder from synapse.federation.sender import FederationSender @@ -352,6 +352,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): def test_idle_timer(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -362,8 +363,21 @@ def test_idle_timer(self) -> None: last_user_sync_ts=now, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNotNone(new_state) assert new_state is not None @@ -376,6 +390,7 @@ def test_busy_no_idle(self) -> None: presence state into unavailable. """ user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -386,8 +401,21 @@ def test_busy_no_idle(self) -> None: last_user_sync_ts=now, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNotNone(new_state) assert new_state is not None @@ -396,6 +424,7 @@ def test_busy_no_idle(self) -> None: def test_sync_timeout(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -406,8 +435,21 @@ def test_sync_timeout(self) -> None: last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNotNone(new_state) assert new_state is not None @@ -416,6 +458,7 @@ def test_sync_timeout(self) -> None: def test_sync_online(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -426,9 +469,20 @@ def test_sync_online(self) -> None: last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) new_state = handle_timeout( - state, is_mine=True, syncing_user_ids={user_id}, now=now + state, + is_mine=True, + syncing_device_ids={(user_id, device_id)}, + user_devices={device_id: device_state}, + now=now, ) self.assertIsNotNone(new_state) @@ -438,6 +492,7 @@ def test_sync_online(self) -> None: def test_federation_ping(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -449,14 +504,28 @@ def test_federation_ping(self) -> None: last_federation_update_ts=now - FEDERATION_PING_INTERVAL - 1, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNotNone(new_state) self.assertEqual(state, new_state) def test_no_timeout(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" now = 5000000 state = UserPresenceState.default(user_id) @@ -466,8 +535,21 @@ def test_no_timeout(self) -> None: last_user_sync_ts=now, last_federation_update_ts=now, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNone(new_state) @@ -485,8 +567,9 @@ def test_federation_timeout(self) -> None: status_msg=status_msg, ) + # Note that this is a remote user so we do not have their device information. new_state = handle_timeout( - state, is_mine=False, syncing_user_ids=set(), now=now + state, is_mine=False, syncing_device_ids=set(), user_devices={}, now=now ) self.assertIsNotNone(new_state) @@ -496,6 +579,7 @@ def test_federation_timeout(self) -> None: def test_last_active(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -507,8 +591,21 @@ def test_last_active(self) -> None: last_federation_update_ts=now, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNotNone(new_state) self.assertEqual(state, new_state) @@ -579,7 +676,7 @@ def test_restored_presence_idles(self) -> None: [ (PresenceState.BUSY, PresenceState.BUSY), (PresenceState.ONLINE, PresenceState.ONLINE), - (PresenceState.UNAVAILABLE, PresenceState.UNAVAILABLE), + (PresenceState.UNAVAILABLE, PresenceState.ONLINE), # Offline syncs don't update the state. (PresenceState.OFFLINE, PresenceState.ONLINE), ] @@ -800,6 +897,389 @@ def test_set_presence_from_syncing_is_set(self) -> None: # we should now be online self.assertEqual(state.state, PresenceState.ONLINE) + @parameterized.expand( + # A list of tuples of 4 strings: + # + # * The presence state of device 1. + # * The presence state of device 2. + # * The expected user presence state after both devices have synced. + # * The expected user presence state after device 1 has idled. + # * The expected user presence state after device 2 has idled. + # * True to use workers, False a monolith. + [ + (*cases, workers) + for workers in (False, True) + for cases in [ + # If both devices have the same state, online should eventually idle. + # Otherwise, the state doesn't change. + ( + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.OFFLINE, + PresenceState.OFFLINE, + PresenceState.OFFLINE, + PresenceState.OFFLINE, + PresenceState.OFFLINE, + ), + # If the second device has a "lower" state it should fallback to it. + ( + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.ONLINE, + PresenceState.OFFLINE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + # If the second device has a "higher" state it should override. + ( + PresenceState.UNAVAILABLE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.OFFLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.OFFLINE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ] + ], + name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[5] else 'monolith'}", + ) + @unittest.override_config({"experimental_features": {"msc3026_enabled": True}}) + def test_set_presence_from_syncing_multi_device( + self, + dev_1_state: str, + dev_2_state: str, + expected_state_1: str, + expected_state_2: str, + expected_state_3: str, + test_with_workers: bool, + ) -> None: + """ + Test the behaviour of multiple devices syncing at the same time. + + Roughly the user's presence state should be set to the "highest" priority + of all the devices. When a device then goes offline its state should be + discarded and the next highest should win. + + Note that these tests use the idle timer (and don't close the syncs), it + is unlikely that a *single* sync would last this long, but is close enough + to continually syncing with that current state. + """ + user_id = f"@test:{self.hs.config.server.server_name}" + + # By default, we call /sync against the main process. + worker_presence_handler = self.presence_handler + if test_with_workers: + # Create a worker and use it to handle /sync traffic instead. + # This is used to test that presence changes get replicated from workers + # to the main process correctly. + worker_to_sync_against = self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "synchrotron"} + ) + worker_presence_handler = worker_to_sync_against.get_presence_handler() + + # 1. Sync with the first device. + self.get_success( + worker_presence_handler.user_syncing( + user_id, + "dev-1", + affect_presence=dev_1_state != PresenceState.OFFLINE, + presence_state=dev_1_state, + ), + by=0.01, + ) + + # 2. Wait half the idle timer. + self.reactor.advance(IDLE_TIMER / 1000 / 2) + self.reactor.pump([0.1]) + + # 3. Sync with the second device. + self.get_success( + worker_presence_handler.user_syncing( + user_id, + "dev-2", + affect_presence=dev_2_state != PresenceState.OFFLINE, + presence_state=dev_2_state, + ), + by=0.01, + ) + + # 4. Assert the expected presence state. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_1) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_1) + + # When testing with workers, make another random sync (with any *different* + # user) to keep the process information from expiring. + # + # This is due to EXTERNAL_PROCESS_EXPIRY being equivalent to IDLE_TIMER. + if test_with_workers: + with self.get_success( + worker_presence_handler.user_syncing( + f"@other-user:{self.hs.config.server.server_name}", + "dev-3", + affect_presence=True, + presence_state=PresenceState.ONLINE, + ), + by=0.01, + ): + pass + + # 5. Advance such that the first device should be discarded (the idle timer), + # then pump so _handle_timeouts function to called. + self.reactor.advance(IDLE_TIMER / 1000 / 2) + self.reactor.pump([0.01]) + + # 6. Assert the expected presence state. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_2) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_2) + + # 7. Advance such that the second device should be discarded (half the idle timer), + # then pump so _handle_timeouts function to called. + self.reactor.advance(IDLE_TIMER / 1000 / 2) + self.reactor.pump([0.1]) + + # 8. The devices are still "syncing" (the sync context managers were never + # closed), so might idle. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_3) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_3) + + @parameterized.expand( + # A list of tuples of 4 strings: + # + # * The presence state of device 1. + # * The presence state of device 2. + # * The expected user presence state after both devices have synced. + # * The expected user presence state after device 1 has stopped syncing. + # * True to use workers, False a monolith. + [ + (*cases, workers) + for workers in (False, True) + for cases in [ + # If both devices have the same state, nothing exciting should happen. + ( + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.OFFLINE, + PresenceState.OFFLINE, + PresenceState.OFFLINE, + PresenceState.OFFLINE, + ), + # If the second device has a "lower" state it should fallback to it. + ( + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.ONLINE, + PresenceState.OFFLINE, + PresenceState.ONLINE, + PresenceState.OFFLINE, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + ), + # If the second device has a "higher" state it should override. + ( + PresenceState.UNAVAILABLE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + ), + ( + PresenceState.OFFLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + ), + ( + PresenceState.OFFLINE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ] + ], + name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[4] else 'monolith'}", + ) + @unittest.override_config({"experimental_features": {"msc3026_enabled": True}}) + def test_set_presence_from_non_syncing_multi_device( + self, + dev_1_state: str, + dev_2_state: str, + expected_state_1: str, + expected_state_2: str, + test_with_workers: bool, + ) -> None: + """ + Test the behaviour of multiple devices syncing at the same time. + + Roughly the user's presence state should be set to the "highest" priority + of all the devices. When a device then goes offline its state should be + discarded and the next highest should win. + + Note that these tests use the idle timer (and don't close the syncs), it + is unlikely that a *single* sync would last this long, but is close enough + to continually syncing with that current state. + """ + user_id = f"@test:{self.hs.config.server.server_name}" + + # By default, we call /sync against the main process. + worker_presence_handler = self.presence_handler + if test_with_workers: + # Create a worker and use it to handle /sync traffic instead. + # This is used to test that presence changes get replicated from workers + # to the main process correctly. + worker_to_sync_against = self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "synchrotron"} + ) + worker_presence_handler = worker_to_sync_against.get_presence_handler() + + # 1. Sync with the first device. + sync_1 = self.get_success( + worker_presence_handler.user_syncing( + user_id, + "dev-1", + affect_presence=dev_1_state != PresenceState.OFFLINE, + presence_state=dev_1_state, + ), + by=0.1, + ) + + # 2. Sync with the second device. + sync_2 = self.get_success( + worker_presence_handler.user_syncing( + user_id, + "dev-2", + affect_presence=dev_2_state != PresenceState.OFFLINE, + presence_state=dev_2_state, + ), + by=0.1, + ) + + # 3. Assert the expected presence state. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_1) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_1) + + # 4. Disconnect the first device. + with sync_1: + pass + + # 5. Advance such that the first device should be discarded (the sync timeout), + # then pump so _handle_timeouts function to called. + self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000) + self.reactor.pump([5]) + + # 6. Assert the expected presence state. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_2) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_2) + + # 7. Disconnect the second device. + with sync_2: + pass + + # 8. Advance such that the second device should be discarded (the sync timeout), + # then pump so _handle_timeouts function to called. + self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000) + self.reactor.pump([5]) + + # 9. There are no more devices, should be offline. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, PresenceState.OFFLINE) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, PresenceState.OFFLINE) + def test_set_presence_from_syncing_keeps_status(self) -> None: """Test that presence set by syncing retains status message""" status_msg = "I'm here!" From 8b5013dcbc5db16f0f771898da493e812be6fc8a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 5 Sep 2023 10:39:38 -0400 Subject: [PATCH 05/75] Time out busy presence status & test multi-device busy (#16174) Add a (long) timeout to when a "busy" device is considered not online. This does *not* match MSC3026, but is a reasonable thing for an implementation to do. Expands tests for the (unstable) busy presence with multiple devices. --- changelog.d/16174.bugfix | 1 + synapse/handlers/presence.py | 19 +++++- tests/handlers/test_presence.py | 104 +++++++++++++++++++++++++++++++- 3 files changed, 120 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16174.bugfix diff --git a/changelog.d/16174.bugfix b/changelog.d/16174.bugfix new file mode 100644 index 000000000000..83649cf42a4a --- /dev/null +++ b/changelog.d/16174.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 80190838b799..a4b05b72e7cb 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -155,6 +155,8 @@ # How long to wait until a new /events or /sync request before assuming # the client has gone. SYNC_ONLINE_TIMEOUT = 30 * 1000 +# Busy status waits longer, but does eventually go offline. +BUSY_ONLINE_TIMEOUT = 60 * 60 * 1000 # How long to wait before marking the user as idle. Compared against last active IDLE_TIMER = 5 * 60 * 1000 @@ -2066,7 +2068,15 @@ def handle_timeout( device_state.last_sync_ts, device_state.last_active_ts ) - if now - sync_or_active > SYNC_ONLINE_TIMEOUT: + # Implementations aren't meant to timeout a device with a busy + # state, but it needs to timeout *eventually* or else the user + # will be stuck in that state. + online_timeout = ( + BUSY_ONLINE_TIMEOUT + if device_state.state == PresenceState.BUSY + else SYNC_ONLINE_TIMEOUT + ) + if now - sync_or_active > online_timeout: # Mark the device as going offline. offline_devices.append(device_id) device_changed = True @@ -2166,6 +2176,13 @@ def handle_update( new_state = new_state.copy_and_replace(last_federation_update_ts=now) federation_ping = True + if new_state.state == PresenceState.BUSY: + wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_user_sync_ts + BUSY_ONLINE_TIMEOUT, + ) + else: wheel_timer.insert( now=now, diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 914415740a74..638787b029b8 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -26,6 +26,7 @@ from synapse.events.builder import EventBuilder from synapse.federation.sender import FederationSender from synapse.handlers.presence import ( + BUSY_ONLINE_TIMEOUT, EXTERNAL_PROCESS_EXPIRY, FEDERATION_PING_INTERVAL, FEDERATION_TIMEOUT, @@ -912,6 +913,13 @@ def test_set_presence_from_syncing_is_set(self) -> None: for cases in [ # If both devices have the same state, online should eventually idle. # Otherwise, the state doesn't change. + ( + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.ONLINE, PresenceState.ONLINE, @@ -933,7 +941,29 @@ def test_set_presence_from_syncing_is_set(self) -> None: PresenceState.OFFLINE, PresenceState.OFFLINE, ), - # If the second device has a "lower" state it should fallback to it. + # If the second device has a "lower" state it should fallback to it, + # except for "busy" which overrides. + ( + PresenceState.BUSY, + PresenceState.ONLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.BUSY, + PresenceState.UNAVAILABLE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.BUSY, + PresenceState.OFFLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.ONLINE, PresenceState.UNAVAILABLE, @@ -956,6 +986,27 @@ def test_set_presence_from_syncing_is_set(self) -> None: PresenceState.UNAVAILABLE, ), # If the second device has a "higher" state it should override. + ( + PresenceState.ONLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.OFFLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.UNAVAILABLE, PresenceState.ONLINE, @@ -1114,6 +1165,12 @@ def test_set_presence_from_syncing_multi_device( for workers in (False, True) for cases in [ # If both devices have the same state, nothing exciting should happen. + ( + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.ONLINE, PresenceState.ONLINE, @@ -1132,7 +1189,26 @@ def test_set_presence_from_syncing_multi_device( PresenceState.OFFLINE, PresenceState.OFFLINE, ), - # If the second device has a "lower" state it should fallback to it. + # If the second device has a "lower" state it should fallback to it, + # except for "busy" which overrides. + ( + PresenceState.BUSY, + PresenceState.ONLINE, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.BUSY, + PresenceState.UNAVAILABLE, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.BUSY, + PresenceState.OFFLINE, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.ONLINE, PresenceState.UNAVAILABLE, @@ -1152,6 +1228,24 @@ def test_set_presence_from_syncing_multi_device( PresenceState.OFFLINE, ), # If the second device has a "higher" state it should override. + ( + PresenceState.ONLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.OFFLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.UNAVAILABLE, PresenceState.ONLINE, @@ -1266,7 +1360,11 @@ def test_set_presence_from_non_syncing_multi_device( # 8. Advance such that the second device should be discarded (the sync timeout), # then pump so _handle_timeouts function to called. - self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000) + if dev_1_state == PresenceState.BUSY or dev_2_state == PresenceState.BUSY: + timeout = BUSY_ONLINE_TIMEOUT + else: + timeout = SYNC_ONLINE_TIMEOUT + self.reactor.advance(timeout / 1000) self.reactor.pump([5]) # 9. There are no more devices, should be offline. From c9cec2daed00406b5337a8ce7064e3394ceaf656 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Sep 2023 20:27:41 +0100 Subject: [PATCH 06/75] Fix bug where we kept re-requesting a remote server's key repeatedly. (#16257) * Correctly handle multiple rows per server/key * Newsfile --- changelog.d/16257.bugfix | 1 + synapse/storage/databases/main/keys.py | 17 +++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) create mode 100644 changelog.d/16257.bugfix diff --git a/changelog.d/16257.bugfix b/changelog.d/16257.bugfix new file mode 100644 index 000000000000..28a53197493c --- /dev/null +++ b/changelog.d/16257.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index a3b4744855d7..57aa4921e150 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -221,12 +221,17 @@ def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str], ...]) -> None: """Processes a batch of keys to fetch, and adds the result to `keys`.""" # batch_iter always returns tuples so it's safe to do len(batch) - sql = """ - SELECT server_name, key_id, key_json, ts_valid_until_ms - FROM server_keys_json WHERE 1=0 - """ + " OR (server_name=? AND key_id=?)" * len( - batch - ) + where_clause = " OR (server_name=? AND key_id=?)" * len(batch) + + # `server_keys_json` can have multiple entries per server (one per + # remote server we fetched from, if using perspectives). Order by + # `ts_added_ms` so the most recently fetched one always wins. + sql = f""" + SELECT server_name, key_id, key_json, ts_valid_until_ms + FROM server_keys_json WHERE 1=0 + {where_clause} + ORDER BY ts_added_ms + """ txn.execute(sql, tuple(itertools.chain.from_iterable(batch))) From b1d71c687ae55ce67e4cfc82c475e61f959dfeb0 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 5 Sep 2023 13:45:39 -0600 Subject: [PATCH 07/75] Add MSC4040 `matrix-fed` service lookups (#16137) --- changelog.d/16137.feature | 1 + scripts-dev/federation_client.py | 12 + .../federation/matrix_federation_agent.py | 29 +- .../test_matrix_federation_agent.py | 323 ++++++++++++++++-- 4 files changed, 331 insertions(+), 34 deletions(-) create mode 100644 changelog.d/16137.feature diff --git a/changelog.d/16137.feature b/changelog.d/16137.feature new file mode 100644 index 000000000000..bba6f161cdcf --- /dev/null +++ b/changelog.d/16137.feature @@ -0,0 +1 @@ +Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index 5ad334b4d800..e8baeac5e2cc 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -329,6 +329,17 @@ def _lookup(server_name: str) -> Tuple[str, int, str]: raise ValueError("Invalid host:port '%s'" % (server_name,)) return out[0], port, out[0] + # Look up SRV for Matrix 1.8 `matrix-fed` service first + try: + srv = srvlookup.lookup("matrix-fed", "tcp", server_name)[0] + print( + f"SRV lookup on _matrix-fed._tcp.{server_name} gave {srv}", + file=sys.stderr, + ) + return srv.host, srv.port, server_name + except Exception: + pass + # Fall back to deprecated `matrix` service try: srv = srvlookup.lookup("matrix", "tcp", server_name)[0] print( @@ -337,6 +348,7 @@ def _lookup(server_name: str) -> Tuple[str, int, str]: ) return srv.host, srv.port, server_name except Exception: + # Fall even further back to just port 8448 return server_name, 8448, server_name @staticmethod diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 91a24efcd019..a3a396bb373e 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -399,15 +399,34 @@ async def _resolve_server(self) -> List[Server]: if port or _is_ip_literal(host): return [Server(host, port or 8448)] + # Check _matrix-fed._tcp SRV record. logger.debug("Looking up SRV record for %s", host.decode(errors="replace")) + server_list = await self._srv_resolver.resolve_service( + b"_matrix-fed._tcp." + host + ) + + if server_list: + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + "Got %s from SRV lookup for %s", + ", ".join(map(str, server_list)), + host.decode(errors="replace"), + ) + return server_list + + # No _matrix-fed._tcp SRV record, fallback to legacy _matrix._tcp SRV record. + logger.debug( + "Looking up deprecated SRV record for %s", host.decode(errors="replace") + ) server_list = await self._srv_resolver.resolve_service(b"_matrix._tcp." + host) if server_list: - logger.debug( - "Got %s from SRV lookup for %s", - ", ".join(map(str, server_list)), - host.decode(errors="replace"), - ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + "Got %s from deprecated SRV lookup for %s", + ", ".join(map(str, server_list)), + host.decode(errors="replace"), + ) return server_list # No SRV records, so we fallback to host and 8448 diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 0d17f2fe5be4..9f63fa6fa815 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -15,7 +15,7 @@ import logging import os from typing import Generator, List, Optional, cast -from unittest.mock import AsyncMock, patch +from unittest.mock import AsyncMock, call, patch import treq from netaddr import IPSet @@ -651,9 +651,9 @@ def test_get_hostname_bad_cert(self) -> None: # .well-known request fails. self.reactor.pump((0.4,)) - # now there should be a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv1" + # now there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv1"), call(b"_matrix._tcp.testserv1")] ) # we should fall back to a direct connection @@ -737,9 +737,9 @@ def test_get_no_srv_no_well_known(self) -> None: # .well-known request fails. self.reactor.pump((0.4,)) - # now there should be a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv" + # now there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv"), call(b"_matrix._tcp.testserv")] ) # we should fall back to a direct connection @@ -788,9 +788,12 @@ def test_get_well_known(self) -> None: content=b'{ "m.server": "target-server" }', ) - # there should be a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.target-server" + # there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [ + call(b"_matrix-fed._tcp.target-server"), + call(b"_matrix._tcp.target-server"), + ] ) # now we should get a connection to the target server @@ -878,9 +881,12 @@ def test_get_well_known_redirect(self) -> None: self.reactor.pump((0.1,)) - # there should be a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.target-server" + # there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [ + call(b"_matrix-fed._tcp.target-server"), + call(b"_matrix._tcp.target-server"), + ] ) # now we should get a connection to the target server @@ -942,9 +948,9 @@ def test_get_invalid_well_known(self) -> None: client_factory, expected_sni=b"testserv", content=b"NOT JSON" ) - # now there should be a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv" + # now there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv"), call(b"_matrix._tcp.testserv")] ) # we should fall back to a direct connection @@ -1016,14 +1022,14 @@ def test_get_well_known_unsigned_cert(self) -> None: # there should be no requests self.assertEqual(len(http_proto.requests), 0) - # and there should be a SRV lookup instead - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv" + # and there should be two SRV lookups instead + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv"), call(b"_matrix._tcp.testserv")] ) def test_get_hostname_srv(self) -> None: """ - Test the behaviour when there is a single SRV record + Test the behaviour when there is a single SRV record for _matrix-fed. """ self.agent = self._make_agent() @@ -1039,7 +1045,51 @@ def test_get_hostname_srv(self) -> None: # the request for a .well-known will have failed with a DNS lookup error. self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv" + b"_matrix-fed._tcp.testserv" + ) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8443) + + # make a test server, and wire up the client + http_server = self._make_connection(client_factory, expected_sni=b"testserv") + + self.assertEqual(len(http_server.requests), 1) + request = http_server.requests[0] + self.assertEqual(request.method, b"GET") + self.assertEqual(request.path, b"/foo/bar") + self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"]) + + # finish the request + request.finish() + self.reactor.pump((0.1,)) + self.successResultOf(test_d) + + def test_get_hostname_srv_legacy(self) -> None: + """ + Test the behaviour when there is a single SRV record for _matrix. + """ + self.agent = self._make_agent() + + # Return no entries for the _matrix-fed lookup, and a response for _matrix. + self.mock_resolver.resolve_service.side_effect = [ + [], + [Server(host=b"srvtarget", port=8443)], + ] + self.reactor.lookups["srvtarget"] = "1.2.3.4" + + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # the request for a .well-known will have failed with a DNS lookup error. + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv"), call(b"_matrix._tcp.testserv")] ) # Make sure treq is trying to connect @@ -1065,7 +1115,7 @@ def test_get_hostname_srv(self) -> None: def test_get_well_known_srv(self) -> None: """Test the behaviour when the .well-known redirects to a place where there - is a SRV. + is a _matrix-fed SRV record. """ self.agent = self._make_agent() @@ -1096,7 +1146,72 @@ def test_get_well_known_srv(self) -> None: # there should be a SRV lookup self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.target-server" + b"_matrix-fed._tcp.target-server" + ) + + # now we should get a connection to the target of the SRV record + self.assertEqual(len(clients), 2) + (host, port, client_factory, _timeout, _bindAddress) = clients[1] + self.assertEqual(host, "5.6.7.8") + self.assertEqual(port, 8443) + + # make a test server, and wire up the client + http_server = self._make_connection( + client_factory, expected_sni=b"target-server" + ) + + self.assertEqual(len(http_server.requests), 1) + request = http_server.requests[0] + self.assertEqual(request.method, b"GET") + self.assertEqual(request.path, b"/foo/bar") + self.assertEqual( + request.requestHeaders.getRawHeaders(b"host"), [b"target-server"] + ) + + # finish the request + request.finish() + self.reactor.pump((0.1,)) + self.successResultOf(test_d) + + def test_get_well_known_srv_legacy(self) -> None: + """Test the behaviour when the .well-known redirects to a place where there + is a _matrix SRV record. + """ + self.agent = self._make_agent() + + self.reactor.lookups["testserv"] = "1.2.3.4" + self.reactor.lookups["srvtarget"] = "5.6.7.8" + + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # there should be an attempt to connect on port 443 for the .well-known + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 443) + + # Return no entries for the _matrix-fed lookup, and a response for _matrix. + self.mock_resolver.resolve_service.side_effect = [ + [], + [Server(host=b"srvtarget", port=8443)], + ] + + self._handle_well_known_connection( + client_factory, + expected_sni=b"testserv", + content=b'{ "m.server": "target-server" }', + ) + + # there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [ + call(b"_matrix-fed._tcp.target-server"), + call(b"_matrix._tcp.target-server"), + ] ) # now we should get a connection to the target of the SRV record @@ -1158,8 +1273,11 @@ def test_idna_servername(self) -> None: self.reactor.pump((0.4,)) # now there should have been a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.xn--bcher-kva.com" + self.mock_resolver.resolve_service.assert_has_calls( + [ + call(b"_matrix-fed._tcp.xn--bcher-kva.com"), + call(b"_matrix._tcp.xn--bcher-kva.com"), + ] ) # We should fall back to port 8448 @@ -1188,7 +1306,7 @@ def test_idna_servername(self) -> None: self.successResultOf(test_d) def test_idna_srv_target(self) -> None: - """test the behaviour when the target of a SRV record has idna chars""" + """test the behaviour when the target of a _matrix-fed SRV record has idna chars""" self.agent = self._make_agent() self.mock_resolver.resolve_service.return_value = [ @@ -1204,7 +1322,57 @@ def test_idna_srv_target(self) -> None: self.assertNoResult(test_d) self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.xn--bcher-kva.com" + b"_matrix-fed._tcp.xn--bcher-kva.com" + ) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8443) + + # make a test server, and wire up the client + http_server = self._make_connection( + client_factory, expected_sni=b"xn--bcher-kva.com" + ) + + self.assertEqual(len(http_server.requests), 1) + request = http_server.requests[0] + self.assertEqual(request.method, b"GET") + self.assertEqual(request.path, b"/foo/bar") + self.assertEqual( + request.requestHeaders.getRawHeaders(b"host"), [b"xn--bcher-kva.com"] + ) + + # finish the request + request.finish() + self.reactor.pump((0.1,)) + self.successResultOf(test_d) + + def test_idna_srv_target_legacy(self) -> None: + """test the behaviour when the target of a _matrix SRV record has idna chars""" + self.agent = self._make_agent() + + # Return no entries for the _matrix-fed lookup, and a response for _matrix. + self.mock_resolver.resolve_service.side_effect = [ + [], + [Server(host=b"xn--trget-3qa.com", port=8443)], + ] # târget.com + self.reactor.lookups["xn--trget-3qa.com"] = "1.2.3.4" + + test_d = self._make_get_request( + b"matrix-federation://xn--bcher-kva.com/foo/bar" + ) + + # Nothing happened yet + self.assertNoResult(test_d) + + self.mock_resolver.resolve_service.assert_has_calls( + [ + call(b"_matrix-fed._tcp.xn--bcher-kva.com"), + call(b"_matrix._tcp.xn--bcher-kva.com"), + ] ) # Make sure treq is trying to connect @@ -1394,7 +1562,7 @@ def test_well_known_too_large(self) -> None: self.assertIsNone(r.delegated_server) def test_srv_fallbacks(self) -> None: - """Test that other SRV results are tried if the first one fails.""" + """Test that other SRV results are tried if the first one fails for _matrix-fed SRV.""" self.agent = self._make_agent() self.mock_resolver.resolve_service.return_value = [ @@ -1409,7 +1577,67 @@ def test_srv_fallbacks(self) -> None: self.assertNoResult(test_d) self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv" + b"_matrix-fed._tcp.testserv" + ) + + # We should see an attempt to connect to the first server + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8443) + + # Fonx the connection + client_factory.clientConnectionFailed(None, Exception("nope")) + + # There's a 300ms delay in HostnameEndpoint + self.reactor.pump((0.4,)) + + # Hasn't failed yet + self.assertNoResult(test_d) + + # We shouldnow see an attempt to connect to the second server + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8444) + + # make a test server, and wire up the client + http_server = self._make_connection(client_factory, expected_sni=b"testserv") + + self.assertEqual(len(http_server.requests), 1) + request = http_server.requests[0] + self.assertEqual(request.method, b"GET") + self.assertEqual(request.path, b"/foo/bar") + self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"]) + + # finish the request + request.finish() + self.reactor.pump((0.1,)) + self.successResultOf(test_d) + + def test_srv_fallbacks_legacy(self) -> None: + """Test that other SRV results are tried if the first one fails for _matrix SRV.""" + self.agent = self._make_agent() + + # Return no entries for the _matrix-fed lookup, and a response for _matrix. + self.mock_resolver.resolve_service.side_effect = [ + [], + [ + Server(host=b"target.com", port=8443), + Server(host=b"target.com", port=8444), + ], + ] + self.reactor.lookups["target.com"] = "1.2.3.4" + + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv"), call(b"_matrix._tcp.testserv")] ) # We should see an attempt to connect to the first server @@ -1449,6 +1677,43 @@ def test_srv_fallbacks(self) -> None: self.reactor.pump((0.1,)) self.successResultOf(test_d) + def test_srv_no_fallback_to_legacy(self) -> None: + """Test that _matrix SRV results are not tried if the _matrix-fed one fails.""" + self.agent = self._make_agent() + + # Return a failing entry for _matrix-fed. + self.mock_resolver.resolve_service.side_effect = [ + [Server(host=b"target.com", port=8443)], + [], + ] + self.reactor.lookups["target.com"] = "1.2.3.4" + + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # Only the _matrix-fed is checked, _matrix is ignored. + self.mock_resolver.resolve_service.assert_called_once_with( + b"_matrix-fed._tcp.testserv" + ) + + # We should see an attempt to connect to the first server + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8443) + + # Fonx the connection + client_factory.clientConnectionFailed(None, Exception("nope")) + + # There's a 300ms delay in HostnameEndpoint + self.reactor.pump((0.4,)) + + # Failed to resolve a server. + self.assertFailure(test_d, Exception) + class TestCachePeriodFromHeaders(unittest.TestCase): def test_cache_control(self) -> None: From 1e571cd66437ea2455c203dafb94c20ba48cdcc1 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Tue, 5 Sep 2023 20:46:57 +0100 Subject: [PATCH 08/75] Fix appservices being unable to handle to_device messages for multiple users (#16251) --- changelog.d/16251.bugfix | 1 + synapse/storage/databases/main/deviceinbox.py | 2 +- tests/handlers/test_appservice.py | 125 ++++++++++++++++++ 3 files changed, 127 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16251.bugfix diff --git a/changelog.d/16251.bugfix b/changelog.d/16251.bugfix new file mode 100644 index 000000000000..6d3157c7aa31 --- /dev/null +++ b/changelog.d/16251.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where appservices using MSC2409 to receive to_device messages, would only get messages for one user. \ No newline at end of file diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index b471fcb064a2..271cdf923cf5 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -349,7 +349,7 @@ def get_device_messages_txn( table="devices", column="user_id", iterable=user_ids_to_query, - keyvalues={"user_id": user_id, "hidden": False}, + keyvalues={"hidden": False}, retcols=("device_id",), ) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 46d022092e82..a7e6cdd66a35 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -422,6 +422,18 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: "exclusive_as_user", "password", self.exclusive_as_user_device_id ) + self.exclusive_as_user_2_device_id = "exclusive_as_device_2" + self.exclusive_as_user_2 = self.register_user("exclusive_as_user_2", "password") + self.exclusive_as_user_2_token = self.login( + "exclusive_as_user_2", "password", self.exclusive_as_user_2_device_id + ) + + self.exclusive_as_user_3_device_id = "exclusive_as_device_3" + self.exclusive_as_user_3 = self.register_user("exclusive_as_user_3", "password") + self.exclusive_as_user_3_token = self.login( + "exclusive_as_user_3", "password", self.exclusive_as_user_3_device_id + ) + def _notify_interested_services(self) -> None: # This is normally set in `notify_interested_services` but we need to call the # internal async version so the reactor gets pushed to completion. @@ -849,6 +861,119 @@ def test_application_services_receive_bursts_of_to_device(self) -> None: for count in service_id_to_message_count.values(): self.assertEqual(count, number_of_messages) + @unittest.override_config( + {"experimental_features": {"msc2409_to_device_messages_enabled": True}} + ) + def test_application_services_receive_local_to_device_for_many_users(self) -> None: + """ + Test that when a user sends a to-device message to many users + in an application service's user namespace, the + application service will receive all of them. + """ + interested_appservice = self._register_application_service( + namespaces={ + ApplicationService.NS_USERS: [ + { + "regex": "@exclusive_as_user:.+", + "exclusive": True, + }, + { + "regex": "@exclusive_as_user_2:.+", + "exclusive": True, + }, + { + "regex": "@exclusive_as_user_3:.+", + "exclusive": True, + }, + ], + }, + ) + + # Have local_user send a to-device message to exclusive_as_users + message_content = {"some_key": "some really interesting value"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/3", + content={ + "messages": { + self.exclusive_as_user: { + self.exclusive_as_user_device_id: message_content + }, + self.exclusive_as_user_2: { + self.exclusive_as_user_2_device_id: message_content + }, + self.exclusive_as_user_3: { + self.exclusive_as_user_3_device_id: message_content + }, + } + }, + access_token=self.local_user_token, + ) + self.assertEqual(chan.code, 200, chan.result) + + # Have exclusive_as_user send a to-device message to local_user + for user_token in [ + self.exclusive_as_user_token, + self.exclusive_as_user_2_token, + self.exclusive_as_user_3_token, + ]: + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/4", + content={ + "messages": { + self.local_user: {self.local_user_device_id: message_content} + } + }, + access_token=user_token, + ) + self.assertEqual(chan.code, 200, chan.result) + + # Check if our application service - that is interested in exclusive_as_user - received + # the to-device message as part of an AS transaction. + # Only the local_user -> exclusive_as_user to-device message should have been forwarded to the AS. + # + # The uninterested application service should not have been notified at all. + self.send_mock.assert_called_once() + ( + service, + _events, + _ephemeral, + to_device_messages, + _otks, + _fbks, + _device_list_summary, + ) = self.send_mock.call_args[0] + + # Assert that this was the same to-device message that local_user sent + self.assertEqual(service, interested_appservice) + + # Assert expected number of messages + self.assertEqual(len(to_device_messages), 3) + + for device_msg in to_device_messages: + self.assertEqual(device_msg["type"], "m.room_key_request") + self.assertEqual(device_msg["sender"], self.local_user) + self.assertEqual(device_msg["content"], message_content) + + self.assertEqual(to_device_messages[0]["to_user_id"], self.exclusive_as_user) + self.assertEqual( + to_device_messages[0]["to_device_id"], + self.exclusive_as_user_device_id, + ) + + self.assertEqual(to_device_messages[1]["to_user_id"], self.exclusive_as_user_2) + self.assertEqual( + to_device_messages[1]["to_device_id"], + self.exclusive_as_user_2_device_id, + ) + + self.assertEqual(to_device_messages[2]["to_user_id"], self.exclusive_as_user_3) + self.assertEqual( + to_device_messages[2]["to_device_id"], + self.exclusive_as_user_3_device_id, + ) + def _register_application_service( self, namespaces: Optional[Dict[str, Iterable[Dict]]] = None, From 4f1840a88ad3a93244fc23149c56245704eab824 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 6 Sep 2023 09:30:53 +0200 Subject: [PATCH 09/75] Delete device messages asynchronously and in staged batches (#16240) --- changelog.d/16240.misc | 1 + synapse/handlers/device.py | 48 +++++++++++++++++++ synapse/handlers/presence.py | 4 +- synapse/handlers/sync.py | 16 +++++-- synapse/storage/databases/main/deviceinbox.py | 26 +++++++--- synapse/storage/databases/main/devices.py | 8 ---- synapse/storage/databases/main/receipts.py | 6 +-- synapse/storage/engines/_base.py | 6 +++ synapse/storage/engines/postgres.py | 4 ++ synapse/storage/engines/sqlite.py | 4 ++ .../main/delta/48/group_unique_indexes.py | 4 +- synapse/util/task_scheduler.py | 17 +++---- tests/handlers/test_device.py | 47 ++++++++++++++++++ 13 files changed, 154 insertions(+), 37 deletions(-) create mode 100644 changelog.d/16240.misc diff --git a/changelog.d/16240.misc b/changelog.d/16240.misc new file mode 100644 index 000000000000..4f266c1fb029 --- /dev/null +++ b/changelog.d/16240.misc @@ -0,0 +1 @@ +Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 763f56dfc16a..9e52af5f13ab 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -43,9 +43,12 @@ ) from synapse.types import ( JsonDict, + JsonMapping, + ScheduledTask, StrCollection, StreamKeyType, StreamToken, + TaskStatus, UserID, get_domain_from_id, get_verify_key_from_cross_signing_key, @@ -62,6 +65,7 @@ logger = logging.getLogger(__name__) +DELETE_DEVICE_MSGS_TASK_NAME = "delete_device_messages" MAX_DEVICE_DISPLAY_NAME_LEN = 100 DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000 @@ -78,6 +82,7 @@ def __init__(self, hs: "HomeServer"): self._appservice_handler = hs.get_application_service_handler() self._state_storage = hs.get_storage_controllers().state self._auth_handler = hs.get_auth_handler() + self._event_sources = hs.get_event_sources() self.server_name = hs.hostname self._msc3852_enabled = hs.config.experimental.msc3852_enabled self._query_appservices_for_keys = ( @@ -386,6 +391,7 @@ def __init__(self, hs: "HomeServer"): self._account_data_handler = hs.get_account_data_handler() self._storage_controllers = hs.get_storage_controllers() self.db_pool = hs.get_datastores().main.db_pool + self._task_scheduler = hs.get_task_scheduler() self.device_list_updater = DeviceListUpdater(hs, self) @@ -419,6 +425,10 @@ def __init__(self, hs: "HomeServer"): self._delete_stale_devices, ) + self._task_scheduler.register_action( + self._delete_device_messages, DELETE_DEVICE_MSGS_TASK_NAME + ) + def _check_device_name_length(self, name: Optional[str]) -> None: """ Checks whether a device name is longer than the maximum allowed length. @@ -530,6 +540,7 @@ async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: user_id: The user to delete devices from. device_ids: The list of device IDs to delete """ + to_device_stream_id = self._event_sources.get_current_token().to_device_key try: await self.store.delete_devices(user_id, device_ids) @@ -559,12 +570,49 @@ async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: f"org.matrix.msc3890.local_notification_settings.{device_id}", ) + # Delete device messages asynchronously and in batches using the task scheduler + await self._task_scheduler.schedule_task( + DELETE_DEVICE_MSGS_TASK_NAME, + resource_id=device_id, + params={ + "user_id": user_id, + "device_id": device_id, + "up_to_stream_id": to_device_stream_id, + }, + ) + # Pushers are deleted after `delete_access_tokens_for_user` is called so that # modules using `on_logged_out` hook can use them if needed. await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids) await self.notify_device_update(user_id, device_ids) + DEVICE_MSGS_DELETE_BATCH_LIMIT = 100 + + async def _delete_device_messages( + self, + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + """Scheduler task to delete device messages in batch of `DEVICE_MSGS_DELETE_BATCH_LIMIT`.""" + assert task.params is not None + user_id = task.params["user_id"] + device_id = task.params["device_id"] + up_to_stream_id = task.params["up_to_stream_id"] + + res = await self.store.delete_messages_for_device( + user_id=user_id, + device_id=device_id, + up_to_stream_id=up_to_stream_id, + limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, + ) + + if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: + return TaskStatus.COMPLETE, None, None + else: + # There is probably still device messages to be deleted, let's keep the task active and it will be run + # again in a subsequent scheduler loop run (probably the next one, if not too many tasks are running). + return TaskStatus.ACTIVE, None, None + async def update_device(self, user_id: str, device_id: str, content: dict) -> None: """Update the given device diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index a4b05b72e7cb..375c7d0901d5 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -183,6 +183,7 @@ class BasePresenceHandler(abc.ABC): writer""" def __init__(self, hs: "HomeServer"): + self.hs = hs self.clock = hs.get_clock() self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() @@ -473,8 +474,6 @@ def __exit__( class WorkerPresenceHandler(BasePresenceHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.hs = hs - self._presence_writer_instance = hs.config.worker.writers.presence[0] # Route presence EDUs to the right worker @@ -738,7 +737,6 @@ async def bump_presence_active_time( class PresenceHandler(BasePresenceHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.hs = hs self.wheel_timer: WheelTimer[str] = WheelTimer() self.notifier = hs.get_notifier() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 60a9f341b51d..0ccd7d250c4b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -40,6 +40,7 @@ from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase +from synapse.handlers.device import DELETE_DEVICE_MSGS_TASK_NAME from synapse.handlers.relations import BundledAggregations from synapse.logging import issue9533_logger from synapse.logging.context import current_context @@ -268,6 +269,7 @@ def __init__(self, hs: "HomeServer"): self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state self._device_handler = hs.get_device_handler() + self._task_scheduler = hs.get_task_scheduler() self.should_calculate_push_rules = hs.config.push.enable_push @@ -360,11 +362,19 @@ async def _wait_for_sync_for_user( # (since we now know that the device has received them) if since_token is not None: since_stream_id = since_token.to_device_key - deleted = await self.store.delete_messages_for_device( - sync_config.user.to_string(), sync_config.device_id, since_stream_id + # Delete device messages asynchronously and in batches using the task scheduler + await self._task_scheduler.schedule_task( + DELETE_DEVICE_MSGS_TASK_NAME, + resource_id=sync_config.device_id, + params={ + "user_id": sync_config.user.to_string(), + "device_id": sync_config.device_id, + "up_to_stream_id": since_stream_id, + }, ) logger.debug( - "Deleted %d to-device messages up to %d", deleted, since_stream_id + "Deletion of to-device messages up to %d scheduled", + since_stream_id, ) if timeout == 0 or since_token is None or full_state: diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 271cdf923cf5..744e98c6d0d1 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -445,13 +445,18 @@ def get_device_messages_txn( @trace async def delete_messages_for_device( - self, user_id: str, device_id: Optional[str], up_to_stream_id: int + self, + user_id: str, + device_id: Optional[str], + up_to_stream_id: int, + limit: int, ) -> int: """ Args: user_id: The recipient user_id. device_id: The recipient device_id. up_to_stream_id: Where to delete messages up to. + limit: maximum number of messages to delete Returns: The number of messages deleted. @@ -472,12 +477,16 @@ async def delete_messages_for_device( log_kv({"message": "No changes in cache since last check"}) return 0 + ROW_ID_NAME = self.database_engine.row_id_name + def delete_messages_for_device_txn(txn: LoggingTransaction) -> int: - sql = ( - "DELETE FROM device_inbox" - " WHERE user_id = ? AND device_id = ?" - " AND stream_id <= ?" - ) + sql = f""" + DELETE FROM device_inbox WHERE {ROW_ID_NAME} IN ( + SELECT {ROW_ID_NAME} FROM device_inbox + WHERE user_id = ? AND device_id = ? AND stream_id <= ? + LIMIT {limit} + ) + """ txn.execute(sql, (user_id, device_id, up_to_stream_id)) return txn.rowcount @@ -487,6 +496,11 @@ def delete_messages_for_device_txn(txn: LoggingTransaction) -> int: log_kv({"message": f"deleted {count} messages for device", "count": count}) + # In this case we don't know if we hit the limit or the delete is complete + # so let's not update the cache. + if count == limit: + return count + # Update the cache, ensuring that we only ever increase the value updated_last_deleted_stream_id = self._last_device_delete_cache.get( (user_id, device_id), 0 diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fa69a4a29852..7208fc8b33fc 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1766,14 +1766,6 @@ def _delete_devices_txn(txn: LoggingTransaction) -> None: keyvalues={"user_id": user_id, "hidden": False}, ) - self.db_pool.simple_delete_many_txn( - txn, - table="device_inbox", - column="device_id", - values=device_ids, - keyvalues={"user_id": user_id}, - ) - self.db_pool.simple_delete_many_txn( txn, table="device_auth_providers", diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 5ee5c7ad9f14..e4d10ff250d1 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -939,11 +939,7 @@ async def _background_receipts_linearized_unique_index( receipts.""" def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None: - if isinstance(self.database_engine, PostgresEngine): - ROW_ID_NAME = "ctid" - else: - ROW_ID_NAME = "rowid" - + ROW_ID_NAME = self.database_engine.row_id_name # Identify any duplicate receipts arising from # https://github.com/matrix-org/synapse/issues/14406. # The following query takes less than a minute on matrix.org. diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 0b5b3bf03e1a..b1a2418cbdea 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -100,6 +100,12 @@ def server_version(self) -> str: """Gets a string giving the server version. For example: '3.22.0'""" ... + @property + @abc.abstractmethod + def row_id_name(self) -> str: + """Gets the literal name representing a row id for this engine.""" + ... + @abc.abstractmethod def in_transaction(self, conn: ConnectionType) -> bool: """Whether the connection is currently in a transaction.""" diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 05a72dc55435..6309363217a8 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -211,6 +211,10 @@ def server_version(self) -> str: else: return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100) + @property + def row_id_name(self) -> str: + return "ctid" + def in_transaction(self, conn: psycopg2.extensions.connection) -> bool: return conn.status != psycopg2.extensions.STATUS_READY diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index ca8c59297c42..802069e1e1cb 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -123,6 +123,10 @@ def server_version(self) -> str: """Gets a string giving the server version. For example: '3.22.0'.""" return "%i.%i.%i" % sqlite3.sqlite_version_info + @property + def row_id_name(self) -> str: + return "rowid" + def in_transaction(self, conn: sqlite3.Connection) -> bool: return conn.in_transaction diff --git a/synapse/storage/schema/main/delta/48/group_unique_indexes.py b/synapse/storage/schema/main/delta/48/group_unique_indexes.py index ad2da4c8af84..622686d28f11 100644 --- a/synapse/storage/schema/main/delta/48/group_unique_indexes.py +++ b/synapse/storage/schema/main/delta/48/group_unique_indexes.py @@ -14,7 +14,7 @@ from synapse.storage.database import LoggingTransaction -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.prepare_database import get_statements FIX_INDEXES = """ @@ -37,7 +37,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: - rowid = "ctid" if isinstance(database_engine, PostgresEngine) else "rowid" + rowid = database_engine.row_id_name # remove duplicates from group_users & group_invites tables cur.execute( diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 9e89aeb74891..9b2581e51a15 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -77,6 +77,7 @@ class TaskScheduler: LAST_UPDATE_BEFORE_WARNING_MS = 24 * 60 * 60 * 1000 # 24hrs def __init__(self, hs: "HomeServer"): + self._hs = hs self._store = hs.get_datastores().main self._clock = hs.get_clock() self._running_tasks: Set[str] = set() @@ -97,8 +98,6 @@ def __init__(self, hs: "HomeServer"): "handle_scheduled_tasks", self._handle_scheduled_tasks, ) - else: - self.replication_client = hs.get_replication_command_handler() def register_action( self, @@ -133,7 +132,7 @@ async def schedule_task( params: Optional[JsonMapping] = None, ) -> str: """Schedule a new potentially resumable task. A function matching the specified - `action` should have been previously registered with `register_action`. + `action` should have be registered with `register_action` before the task is run. Args: action: the name of a previously registered action @@ -149,11 +148,6 @@ async def schedule_task( Returns: The id of the scheduled task """ - if action not in self._actions: - raise Exception( - f"No function associated with action {action} of the scheduled task" - ) - status = TaskStatus.SCHEDULED if timestamp is None or timestamp < self._clock.time_msec(): timestamp = self._clock.time_msec() @@ -175,7 +169,7 @@ async def schedule_task( if self._run_background_tasks: await self._launch_task(task) else: - self.replication_client.send_new_active_task(task.id) + self._hs.get_replication_command_handler().send_new_active_task(task.id) return task.id @@ -315,7 +309,10 @@ async def _launch_task(self, task: ScheduledTask) -> None: """ assert self._run_background_tasks - assert task.action in self._actions + if task.action not in self._actions: + raise Exception( + f"No function associated with action {task.action} of the scheduled task {task.id}" + ) function = self._actions[task.action] async def wrapper() -> None: diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 55a4f95ef32b..9659a4a3553b 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -30,6 +30,7 @@ from synapse.storage.databases.main.appservice import _make_exclusive_regex from synapse.types import JsonDict, create_requester from synapse.util import Clock +from synapse.util.task_scheduler import TaskScheduler from tests import unittest from tests.unittest import override_config @@ -49,6 +50,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: assert isinstance(handler, DeviceHandler) self.handler = handler self.store = hs.get_datastores().main + self.device_message_handler = hs.get_device_message_handler() return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -211,6 +213,51 @@ def test_delete_device_and_device_inbox(self) -> None: ) self.assertIsNone(res) + def test_delete_device_and_big_device_inbox(self) -> None: + """Check that deleting a big device inbox is staged and batched asynchronously.""" + DEVICE_ID = "abc" + sender = "@sender:" + self.hs.hostname + receiver = "@receiver:" + self.hs.hostname + self._record_user(sender, DEVICE_ID, DEVICE_ID) + self._record_user(receiver, DEVICE_ID, DEVICE_ID) + + # queue a bunch of messages in the inbox + requester = create_requester(sender, device_id=DEVICE_ID) + for i in range(0, DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10): + self.get_success( + self.device_message_handler.send_device_message( + requester, "message_type", {receiver: {"*": {"val": i}}} + ) + ) + + # delete the device + self.get_success(self.handler.delete_devices(receiver, [DEVICE_ID])) + + # messages should be deleted up to DEVICE_MSGS_DELETE_BATCH_LIMIT straight away + res = self.get_success( + self.store.db_pool.simple_select_list( + table="device_inbox", + keyvalues={"user_id": receiver}, + retcols=("user_id", "device_id", "stream_id"), + desc="get_device_id_from_device_inbox", + ) + ) + self.assertEqual(10, len(res)) + + # wait for the task scheduler to do a second delete pass + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS / 1000) + + # remaining messages should now be deleted + res = self.get_success( + self.store.db_pool.simple_select_list( + table="device_inbox", + keyvalues={"user_id": receiver}, + retcols=("user_id", "device_id", "stream_id"), + desc="get_device_id_from_device_inbox", + ) + ) + self.assertEqual(0, len(res)) + def test_update_device(self) -> None: self._record_users() From 698f6fa2508dbff1a4353d57da60be5d13bbd61d Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 6 Sep 2023 10:50:07 +0000 Subject: [PATCH 10/75] Allow modules to delete rooms. (#15997) * Allow user_id to be optional for room deletion * Add module API method to delete a room * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * Don't worry about the case block=True && requester_user_id is None --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/15997.misc | 1 + synapse/handlers/pagination.py | 12 ++++++++++-- synapse/handlers/room.py | 10 +++++++++- synapse/module_api/__init__.py | 13 +++++++++++++ .../callbacks/third_party_event_rules_callbacks.py | 11 ++++++++--- 5 files changed, 41 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15997.misc diff --git a/changelog.d/15997.misc b/changelog.d/15997.misc new file mode 100644 index 000000000000..94768c3cb82d --- /dev/null +++ b/changelog.d/15997.misc @@ -0,0 +1 @@ +Allow modules to delete rooms. \ No newline at end of file diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index e5ac9096ccf6..19cf5a2b4393 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -713,7 +713,7 @@ async def _shutdown_and_purge_room( self, delete_id: str, room_id: str, - requester_user_id: str, + requester_user_id: Optional[str], new_room_user_id: Optional[str] = None, new_room_name: Optional[str] = None, message: Optional[str] = None, @@ -732,6 +732,10 @@ async def _shutdown_and_purge_room( requester_user_id: User who requested the action. Will be recorded as putting the room on the blocking list. + If None, the action was not manually requested but instead + triggered automatically, e.g. through a Synapse module + or some other policy. + MUST NOT be None if block=True. new_room_user_id: If set, a new room will be created with this user ID as the creator and admin, and all users in the old room will be @@ -818,7 +822,7 @@ def clear_delete() -> None: def start_shutdown_and_purge_room( self, room_id: str, - requester_user_id: str, + requester_user_id: Optional[str], new_room_user_id: Optional[str] = None, new_room_name: Optional[str] = None, message: Optional[str] = None, @@ -833,6 +837,10 @@ def start_shutdown_and_purge_room( requester_user_id: User who requested the action and put the room on the blocking list. + If None, the action was not manually requested but instead + triggered automatically, e.g. through a Synapse module + or some other policy. + MUST NOT be None if block=True. new_room_user_id: If set, a new room will be created with this user ID as the creator and admin, and all users in the old room will be diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 0513e28aabe1..7a762c851101 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1787,7 +1787,7 @@ def __init__(self, hs: "HomeServer"): async def shutdown_room( self, room_id: str, - requester_user_id: str, + requester_user_id: Optional[str], new_room_user_id: Optional[str] = None, new_room_name: Optional[str] = None, message: Optional[str] = None, @@ -1811,6 +1811,10 @@ async def shutdown_room( requester_user_id: User who requested the action and put the room on the blocking list. + If None, the action was not manually requested but instead + triggered automatically, e.g. through a Synapse module + or some other policy. + MUST NOT be None if block=True. new_room_user_id: If set, a new room will be created with this user ID as the creator and admin, and all users in the old room will be @@ -1863,6 +1867,10 @@ async def shutdown_room( # Action the block first (even if the room doesn't exist yet) if block: + if requester_user_id is None: + raise ValueError( + "shutdown_room: block=True not allowed when requester_user_id is None." + ) # This will work even if the room is already blocked, but that is # desirable in case the first attempt at blocking the room failed below. await self.store.block_room(room_id, requester_user_id) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 2f00a7ba2039..d6efe10a28ba 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1730,6 +1730,19 @@ async def create_room( room_alias_str = room_alias.to_string() if room_alias else None return room_id, room_alias_str + async def delete_room(self, room_id: str) -> None: + """ + Schedules the deletion of a room from Synapse's database. + + If the room is already being deleted, this method does nothing. + This method does not wait for the room to be deleted. + + Added in Synapse v1.89.0. + """ + # Future extensions to this method might want to e.g. allow use of `force_purge`. + # TODO In the future we should make sure this is persistent. + self._hs.get_pagination_handler().start_shutdown_and_purge_room(room_id, None) + async def set_displayname( self, user_id: UserID, diff --git a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py index 911f37ba4253..ecaeef35118c 100644 --- a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py +++ b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py @@ -40,7 +40,7 @@ [str, StateMap[EventBase], str], Awaitable[bool] ] ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable] -CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] +CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[Optional[str], str], Awaitable[bool]] CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]] ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable] ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable] @@ -429,12 +429,17 @@ async def on_new_event(self, event_id: str) -> None: "Failed to run module API callback %s: %s", callback, e ) - async def check_can_shutdown_room(self, user_id: str, room_id: str) -> bool: + async def check_can_shutdown_room( + self, user_id: Optional[str], room_id: str + ) -> bool: """Intercept requests to shutdown a room. If `False` is returned, the room must not be shut down. Args: - requester: The ID of the user requesting the shutdown. + user_id: The ID of the user requesting the shutdown. + If no user ID is supplied, then the room is being shut down through + some mechanism other than a user's request, e.g. through a module's + request. room_id: The ID of the room. """ for callback in self._check_can_shutdown_room_callbacks: From e937e2111a45d0cb3ecc973f95dafafecb6e9c36 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 6 Sep 2023 13:01:10 +0000 Subject: [PATCH 11/75] Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. (#16219) * Add more suffixes to `parse_size` * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/16219.feature | 1 + docs/usage/configuration/config_documentation.md | 4 +++- synapse/config/_base.py | 7 ++++--- 3 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16219.feature diff --git a/changelog.d/16219.feature b/changelog.d/16219.feature new file mode 100644 index 000000000000..c789f2abb761 --- /dev/null +++ b/changelog.d/16219.feature @@ -0,0 +1 @@ +Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 0b1725816e3d..97fd1beb39af 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -25,8 +25,10 @@ messages from the database after 5 minutes, rather than 5 months. In addition, configuration options referring to size use the following suffixes: -* `M` = MiB, or 1,048,576 bytes * `K` = KiB, or 1024 bytes +* `M` = MiB, or 1,048,576 bytes +* `G` = GiB, or 1,073,741,824 bytes +* `T` = TiB, or 1,099,511,627,776 bytes For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes for a user avatar. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 69a831812759..58856839e16f 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -179,8 +179,9 @@ def parse_size(value: Union[str, int]) -> int: If an integer is provided it is treated as bytes and is unchanged. - String byte sizes can have a suffix of 'K' or `M`, representing kibibytes and - mebibytes respectively. No suffix is understood as a plain byte count. + String byte sizes can have a suffix of 'K', `M`, `G` or `T`, + representing kibibytes, mebibytes, gibibytes and tebibytes respectively. + No suffix is understood as a plain byte count. Raises: TypeError, if given something other than an integer or a string @@ -189,7 +190,7 @@ def parse_size(value: Union[str, int]) -> int: if type(value) is int: # noqa: E721 return value elif isinstance(value, str): - sizes = {"K": 1024, "M": 1024 * 1024} + sizes = {"K": 1024, "M": 1024 * 1024, "G": 1024**3, "T": 1024**4} size = 1 suffix = value[-1] if suffix in sizes: From ffe4ea130279d10bdb988f60ebee6669ceeddbe7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 6 Sep 2023 14:34:01 +0100 Subject: [PATCH 12/75] Update rust in flake.nix: 1.70.0 -> 1.71.1 to address CVE-2023-38497 (#16260) --- changelog.d/16260.misc | 1 + flake.lock | 6 +++--- flake.nix | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16260.misc diff --git a/changelog.d/16260.misc b/changelog.d/16260.misc new file mode 100644 index 000000000000..9f3289d7d4a5 --- /dev/null +++ b/changelog.d/16260.misc @@ -0,0 +1 @@ +Update rust to version 1.71.1 in the nix development environment. \ No newline at end of file diff --git a/flake.lock b/flake.lock index d53be767a78a..9b360fa33eed 100644 --- a/flake.lock +++ b/flake.lock @@ -258,11 +258,11 @@ "nixpkgs": "nixpkgs_3" }, "locked": { - "lastModified": 1690510705, - "narHash": "sha256-6mjs3Gl9/xrseFh9iNcNq1u5yJ/MIoAmjoaG7SXZDIE=", + "lastModified": 1693966243, + "narHash": "sha256-a2CA1aMIPE67JWSVIGoGtD3EGlFdK9+OlJQs0FOWCKY=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "851ae4c128905a62834d53ce7704ebc1ba481bea", + "rev": "a8b4bb4cbb744baaabc3e69099f352f99164e2c1", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index b89b6d9218ee..dc7ab5b3fe4e 100644 --- a/flake.nix +++ b/flake.nix @@ -82,7 +82,7 @@ # # NOTE: We currently need to set the Rust version unnecessarily high # in order to work around https://github.com/matrix-org/synapse/issues/15939 - (rust-bin.stable."1.70.0".default.override { + (rust-bin.stable."1.71.1".default.override { # Additionally install the "rust-src" extension to allow diving into the # Rust source code in an IDE (rust-analyzer will also make use of it). extensions = [ "rust-src" ]; From 35934b02a98cbb44ba310707a72e55bc4a5c7f0a Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 6 Sep 2023 13:35:02 +0000 Subject: [PATCH 13/75] Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. (#16090) * Add gcc and GNU make to the Nix flake * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * unset LD_LIBRARY_PATH --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/16090.misc | 1 + flake.nix | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 changelog.d/16090.misc diff --git a/changelog.d/16090.misc b/changelog.d/16090.misc new file mode 100644 index 000000000000..d54ef936c7d2 --- /dev/null +++ b/changelog.d/16090.misc @@ -0,0 +1 @@ +Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. \ No newline at end of file diff --git a/flake.nix b/flake.nix index dc7ab5b3fe4e..69c9c19f8982 100644 --- a/flake.nix +++ b/flake.nix @@ -89,6 +89,10 @@ }) # The rust-analyzer language server implementation. rust-analyzer + # GCC includes a linker; needed for building `ruff` + gcc + # Needed for building `ruff` + gnumake # Native dependencies for running Synapse. icu @@ -236,6 +240,19 @@ URI YAMLLibYAML ]}"; + + # Clear the LD_LIBRARY_PATH environment variable on shell init. + # + # By default, devenv will set LD_LIBRARY_PATH to point to .devenv/profile/lib. This causes + # issues when we include `gcc` as a dependency to build C libraries, as the version of glibc + # that the development environment's cc compiler uses may differ from that of the system. + # + # When LD_LIBRARY_PATH is set, system tools will attempt to use the development environment's + # libraries. Which, when built against an different glibc version lead, to "version 'GLIBC_X.YY' not + # found" errors. + enterShell = '' + unset LD_LIBRARY_PATH + ''; } ]; }; From 51303035f2366d60772473f42c64ae6cad6684d0 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 6 Sep 2023 15:15:56 +0000 Subject: [PATCH 14/75] Apply missed suggestions from the review of #16090. (#16263) * Suggestions from PR * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/16263.misc | 1 + flake.nix | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16263.misc diff --git a/changelog.d/16263.misc b/changelog.d/16263.misc new file mode 100644 index 000000000000..d54ef936c7d2 --- /dev/null +++ b/changelog.d/16263.misc @@ -0,0 +1 @@ +Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. \ No newline at end of file diff --git a/flake.nix b/flake.nix index 69c9c19f8982..31f283293971 100644 --- a/flake.nix +++ b/flake.nix @@ -89,6 +89,7 @@ }) # The rust-analyzer language server implementation. rust-analyzer + # GCC includes a linker; needed for building `ruff` gcc # Needed for building `ruff` @@ -248,8 +249,8 @@ # that the development environment's cc compiler uses may differ from that of the system. # # When LD_LIBRARY_PATH is set, system tools will attempt to use the development environment's - # libraries. Which, when built against an different glibc version lead, to "version 'GLIBC_X.YY' not - # found" errors. + # libraries. Which, when built against a different glibc version lead, to "version 'GLIBC_X.YY' + # not found" errors. enterShell = '' unset LD_LIBRARY_PATH ''; From fe69e7f617199f51eb97f510a0a934fdcf02fbad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Grimpard?= Date: Wed, 6 Sep 2023 20:32:24 +0200 Subject: [PATCH 15/75] Handle "registration_enabled" parameter for CAS (#16262) Similar to OIDC, CAS providers can now disable registration such that only existing users are able to login via SSO. --- changelog.d/16262.feature | 1 + .../usage/configuration/config_documentation.md | 7 +++++++ synapse/config/cas.py | 3 +++ synapse/handlers/cas.py | 2 ++ tests/handlers/test_cas.py | 17 +++++++++++++++++ 5 files changed, 30 insertions(+) create mode 100644 changelog.d/16262.feature diff --git a/changelog.d/16262.feature b/changelog.d/16262.feature new file mode 100644 index 000000000000..7c8e7e349bca --- /dev/null +++ b/changelog.d/16262.feature @@ -0,0 +1 @@ +Add the ability to enable/disable registrations when in the CAS flow. Contributed by Aurélien Grimpard. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 97fd1beb39af..42df53d52bb0 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3430,6 +3430,12 @@ Has the following sub-options: and the values must match the given value. Alternately if the given value is `None` then any value is allowed (the attribute just must exist). All of the listed attributes must match for the login to be permitted. +* `enable_registration`: set to 'false' to disable automatic registration of new + users. This allows the CAS SSO flow to be limited to sign in only, rather than + automatically registering users that have a valid SSO login but do not have + a pre-registered account. Defaults to true. + + *Added in Synapse 1.93.0.* Example configuration: ```yaml @@ -3441,6 +3447,7 @@ cas_config: required_attributes: userGroup: "staff" department: None + enable_registration: true ``` --- ### `sso` diff --git a/synapse/config/cas.py b/synapse/config/cas.py index 6e2d9addbf4c..bbc8f4307350 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -57,6 +57,8 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: required_attributes ) + self.cas_enable_registration = cas_config.get("enable_registration", True) + self.idp_name = cas_config.get("idp_name", "CAS") self.idp_icon = cas_config.get("idp_icon") self.idp_brand = cas_config.get("idp_brand") @@ -67,6 +69,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.cas_protocol_version = None self.cas_displayname_attribute = None self.cas_required_attributes = [] + self.cas_enable_registration = False # CAS uses a legacy required attributes mapping, not the one provided by diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py index a85054545356..b5b8b9bd35cf 100644 --- a/synapse/handlers/cas.py +++ b/synapse/handlers/cas.py @@ -70,6 +70,7 @@ def __init__(self, hs: "HomeServer"): self._cas_protocol_version = hs.config.cas.cas_protocol_version self._cas_displayname_attribute = hs.config.cas.cas_displayname_attribute self._cas_required_attributes = hs.config.cas.cas_required_attributes + self._cas_enable_registration = hs.config.cas.cas_enable_registration self._http_client = hs.get_proxied_http_client() @@ -395,4 +396,5 @@ async def grandfather_existing_users() -> Optional[str]: client_redirect_url, cas_response_to_user_attributes, grandfather_existing_users, + registration_enabled=self._cas_enable_registration, ) diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 8582b1cd1e9e..13e2cd153a08 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -197,6 +197,23 @@ def test_required_attributes(self) -> None: auth_provider_session_id=None, ) + @override_config({"cas_config": {"enable_registration": False}}) + def test_map_cas_user_does_not_register_new_user(self) -> None: + """Ensures new users are not registered if the enabled registration flag is disabled.""" + + # stub out the auth handler + auth_handler = self.hs.get_auth_handler() + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] + + cas_response = CasResponse("test_user", {}) + request = _mock_request() + self.get_success( + self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") + ) + + # check that the auth handler was not called as expected + auth_handler.complete_sso_login.assert_not_called() + def _mock_request() -> Mock: """Returns a mock which will stand in as a SynapseRequest""" From 13e9cad537a16108b0cb544ccdc24e7dc2ca33ae Mon Sep 17 00:00:00 2001 From: Marcel Date: Wed, 6 Sep 2023 21:19:17 +0200 Subject: [PATCH 16/75] Send the opentracing span information to appservices (#16227) --- changelog.d/16227.feature | 1 + synapse/appservice/api.py | 32 ++++++++++++++++++++++++-------- tests/appservice/test_api.py | 18 ++++++++++++------ 3 files changed, 37 insertions(+), 14 deletions(-) create mode 100644 changelog.d/16227.feature diff --git a/changelog.d/16227.feature b/changelog.d/16227.feature new file mode 100644 index 000000000000..510062b622c0 --- /dev/null +++ b/changelog.d/16227.feature @@ -0,0 +1 @@ +Add span information to requests sent to appservices. Contributed by MTRNord. \ No newline at end of file diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index de7a94bf2643..b1523be208e9 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -40,6 +40,7 @@ from synapse.events import EventBase from synapse.events.utils import SerializeEventConfig, serialize_event from synapse.http.client import SimpleHttpClient, is_unknown_endpoint +from synapse.logging import opentracing from synapse.types import DeviceListUpdates, JsonDict, ThirdPartyInstanceID from synapse.util.caches.response_cache import ResponseCache @@ -125,6 +126,17 @@ def __init__(self, hs: "HomeServer"): hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS ) + def _get_headers(self, service: "ApplicationService") -> Dict[bytes, List[bytes]]: + """This makes sure we have always the auth header and opentracing headers set.""" + + # This is also ensured before in the functions. However this is needed to please + # the typechecks. + assert service.hs_token is not None + + headers = {b"Authorization": [b"Bearer " + service.hs_token.encode("ascii")]} + opentracing.inject_header_dict(headers, check_destination=False) + return headers + async def query_user(self, service: "ApplicationService", user_id: str) -> bool: if service.url is None: return False @@ -136,10 +148,11 @@ async def query_user(self, service: "ApplicationService", user_id: str) -> bool: args = None if self.config.use_appservice_legacy_authorization: args = {"access_token": service.hs_token} + response = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/users/{urllib.parse.quote(user_id)}", args, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) if response is not None: # just an empty json object return True @@ -162,10 +175,11 @@ async def query_alias(self, service: "ApplicationService", alias: str) -> bool: args = None if self.config.use_appservice_legacy_authorization: args = {"access_token": service.hs_token} + response = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/rooms/{urllib.parse.quote(alias)}", args, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) if response is not None: # just an empty json object return True @@ -203,10 +217,11 @@ async def query_3pe( **fields, b"access_token": service.hs_token, } + response = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/{kind}/{urllib.parse.quote(protocol)}", args=args, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) if not isinstance(response, list): logger.warning( @@ -243,10 +258,11 @@ async def _get() -> Optional[JsonDict]: args = None if self.config.use_appservice_legacy_authorization: args = {"access_token": service.hs_token} + info = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/protocol/{urllib.parse.quote(protocol)}", args, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) if not _is_valid_3pe_metadata(info): @@ -283,7 +299,7 @@ async def ping(self, service: "ApplicationService", txn_id: Optional[str]) -> No await self.post_json_get_json( uri=f"{service.url}{APP_SERVICE_PREFIX}/ping", post_json={"transaction_id": txn_id}, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) async def push_bulk( @@ -364,7 +380,7 @@ async def push_bulk( f"{service.url}{APP_SERVICE_PREFIX}/transactions/{urllib.parse.quote(str(txn_id))}", json_body=body, args=args, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) if logger.isEnabledFor(logging.DEBUG): logger.debug( @@ -437,7 +453,7 @@ async def claim_client_keys( response = await self.post_json_get_json( uri, body, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) except HttpResponseException as e: # The appservice doesn't support this endpoint. @@ -498,7 +514,7 @@ async def query_keys( response = await self.post_json_get_json( uri, query, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) except HttpResponseException as e: # The appservice doesn't support this endpoint. diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 75fb5fae6b92..366b6fd5f00d 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -76,7 +76,7 @@ async def get_json( headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], ) -> List[JsonDict]: # Ensure the access token is passed as a header. - if not headers or not headers.get("Authorization"): + if not headers or not headers.get(b"Authorization"): raise RuntimeError("Access token not provided") # ... and not as a query param if b"access_token" in args: @@ -84,7 +84,9 @@ async def get_json( "Access token should not be passed as a query param." ) - self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) + self.assertEqual( + headers.get(b"Authorization"), [f"Bearer {TOKEN}".encode()] + ) self.request_url = url if url == URL_USER: return SUCCESS_RESULT_USER @@ -152,11 +154,13 @@ async def get_json( # Ensure the access token is passed as a both a query param and in the headers. if not args.get(b"access_token"): raise RuntimeError("Access token should be provided in query params.") - if not headers or not headers.get("Authorization"): + if not headers or not headers.get(b"Authorization"): raise RuntimeError("Access token should be provided in auth headers.") self.assertEqual(args.get(b"access_token"), TOKEN) - self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) + self.assertEqual( + headers.get(b"Authorization"), [f"Bearer {TOKEN}".encode()] + ) self.request_url = url if url == URL_USER: return SUCCESS_RESULT_USER @@ -208,10 +212,12 @@ async def post_json_get_json( headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], ) -> JsonDict: # Ensure the access token is passed as both a header and query arg. - if not headers.get("Authorization"): + if not headers.get(b"Authorization"): raise RuntimeError("Access token not provided") - self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) + self.assertEqual( + headers.get(b"Authorization"), [f"Bearer {TOKEN}".encode()] + ) return RESPONSE # We assign to a method, which mypy doesn't like. From a83f75a37dba765df78319c57c296a3a1ca27e05 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Sep 2023 15:19:56 -0400 Subject: [PATCH 17/75] Bump gitpython from 3.1.32 to 3.1.34 (#16267) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 872a863edc95..d7bbfbd358e4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -586,13 +586,13 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.32" +version = "3.1.34" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.32-py3-none-any.whl", hash = "sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f"}, - {file = "GitPython-3.1.32.tar.gz", hash = "sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6"}, + {file = "GitPython-3.1.34-py3-none-any.whl", hash = "sha256:5d3802b98a3bae1c2b8ae0e1ff2e4aa16bcdf02c145da34d092324f599f01395"}, + {file = "GitPython-3.1.34.tar.gz", hash = "sha256:85f7d365d1f6bf677ae51039c1ef67ca59091c7ebd5a3509aa399d4eda02d6dd"}, ] [package.dependencies] From 8940d1b28ecbaf9185459e2af62169ecf39a96f5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Sep 2023 10:26:07 +0100 Subject: [PATCH 18/75] Add `/notifications` endpoint to workers (#16265) --- changelog.d/16265.feature | 1 + docker/configure_workers_and_start.py | 1 + docs/workers.md | 1 + synapse/rest/__init__.py | 2 +- synapse/rest/client/notifications.py | 2 + .../databases/main/event_push_actions.py | 72 +++++++++---------- 6 files changed, 42 insertions(+), 37 deletions(-) create mode 100644 changelog.d/16265.feature diff --git a/changelog.d/16265.feature b/changelog.d/16265.feature new file mode 100644 index 000000000000..3ffa16dbcb64 --- /dev/null +++ b/changelog.d/16265.feature @@ -0,0 +1 @@ +Allow `/notifications` endpoint to be routed to workers. diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 400a7515aa16..62952e6b2635 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -183,6 +183,7 @@ "^/_matrix/client/(r0|v3|unstable)/password_policy$", "^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$", "^/_matrix/client/(r0|v3|unstable)/capabilities$", + "^/_matrix/client/(r0|v3|unstable)/notifications$", ], "shared_extra_conf": {}, "worker_extra_conf": "", diff --git a/docs/workers.md b/docs/workers.md index 24bd22724e04..dc76b073de8d 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -246,6 +246,7 @@ information. ^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$) ^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$ ^/_matrix/client/(r0|v3|unstable)/capabilities$ + ^/_matrix/client/(r0|v3|unstable)/notifications$ # Encryption requests ^/_matrix/client/(r0|v3|unstable)/keys/query$ diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index df0845edb209..1be9c47c6121 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -123,7 +123,7 @@ def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None: if is_main_process: report_event.register_servlets(hs, client_resource) openid.register_servlets(hs, client_resource) - notifications.register_servlets(hs, client_resource) + notifications.register_servlets(hs, client_resource) devices.register_servlets(hs, client_resource) if is_main_process: thirdparty.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index ea1004256918..e7fe1332e776 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -36,6 +36,8 @@ class NotificationsServlet(RestServlet): PATTERNS = client_patterns("/notifications$") + CATEGORY = "Client API requests" + def __init__(self, hs: "HomeServer"): super().__init__() self.store = hs.get_datastores().main diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 07bda7d6be21..b958a39aebb1 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -1740,42 +1740,6 @@ def _clear_old_push_actions_staging_txn(txn: LoggingTransaction) -> bool: # We sleep to ensure that we don't overwhelm the DB. await self._clock.sleep(1.0) - -class EventPushActionsStore(EventPushActionsWorkerStore): - EPA_HIGHLIGHT_INDEX = "epa_highlight_index" - - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - super().__init__(database, db_conn, hs) - - self.db_pool.updates.register_background_index_update( - self.EPA_HIGHLIGHT_INDEX, - index_name="event_push_actions_u_highlight", - table="event_push_actions", - columns=["user_id", "stream_ordering"], - ) - - self.db_pool.updates.register_background_index_update( - "event_push_actions_highlights_index", - index_name="event_push_actions_highlights_index", - table="event_push_actions", - columns=["user_id", "room_id", "topological_ordering", "stream_ordering"], - where_clause="highlight=1", - ) - - # Add index to make deleting old push actions faster. - self.db_pool.updates.register_background_index_update( - "event_push_actions_stream_highlight_index", - index_name="event_push_actions_stream_highlight_index", - table="event_push_actions", - columns=["highlight", "stream_ordering"], - where_clause="highlight=0", - ) - async def get_push_actions_for_user( self, user_id: str, @@ -1834,6 +1798,42 @@ def f( ] +class EventPushActionsStore(EventPushActionsWorkerStore): + EPA_HIGHLIGHT_INDEX = "epa_highlight_index" + + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_index_update( + self.EPA_HIGHLIGHT_INDEX, + index_name="event_push_actions_u_highlight", + table="event_push_actions", + columns=["user_id", "stream_ordering"], + ) + + self.db_pool.updates.register_background_index_update( + "event_push_actions_highlights_index", + index_name="event_push_actions_highlights_index", + table="event_push_actions", + columns=["user_id", "room_id", "topological_ordering", "stream_ordering"], + where_clause="highlight=1", + ) + + # Add index to make deleting old push actions faster. + self.db_pool.updates.register_background_index_update( + "event_push_actions_stream_highlight_index", + index_name="event_push_actions_stream_highlight_index", + table="event_push_actions", + columns=["highlight", "stream_ordering"], + where_clause="highlight=0", + ) + + def _action_has_highlight(actions: Collection[Union[Mapping, str]]) -> bool: for action in actions: if not isinstance(action, dict): From 1cd410a7833984ef69a7dcecf8997f4c45d609cd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Sep 2023 13:45:43 +0100 Subject: [PATCH 19/75] Recheck if remote device is cached before requesting it (#16252) This fixes a bug where we could get stuck re-requesting the device over replication again and again. --- changelog.d/16252.bugfix | 1 + synapse/handlers/device.py | 21 ++++++++++++------ synapse/replication/http/devices.py | 4 ++-- synapse/storage/databases/main/devices.py | 26 +++++++++++++++-------- 4 files changed, 35 insertions(+), 17 deletions(-) create mode 100644 changelog.d/16252.bugfix diff --git a/changelog.d/16252.bugfix b/changelog.d/16252.bugfix new file mode 100644 index 000000000000..881bc00e6153 --- /dev/null +++ b/changelog.d/16252.bugfix @@ -0,0 +1 @@ +Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 9e52af5f13ab..9356ae998e5e 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -1030,7 +1030,7 @@ def __init__(self, hs: "HomeServer"): async def multi_user_device_resync( self, user_ids: List[str], mark_failed_as_stale: bool = True - ) -> Dict[str, Optional[JsonDict]]: + ) -> Dict[str, Optional[JsonMapping]]: """ Like `user_device_resync` but operates on multiple users **from the same origin** at once. @@ -1059,6 +1059,7 @@ def __init__(self, hs: "HomeServer", device_handler: DeviceHandler): self._notifier = hs.get_notifier() self._remote_edu_linearizer = Linearizer(name="remote_device_list") + self._resync_linearizer = Linearizer(name="remote_device_resync") # user_id -> list of updates waiting to be handled. self._pending_updates: Dict[ @@ -1301,7 +1302,7 @@ async def _maybe_retry_device_resync(self) -> None: async def multi_user_device_resync( self, user_ids: List[str], mark_failed_as_stale: bool = True - ) -> Dict[str, Optional[JsonDict]]: + ) -> Dict[str, Optional[JsonMapping]]: """ Like `user_device_resync` but operates on multiple users **from the same origin** at once. @@ -1321,9 +1322,11 @@ async def multi_user_device_resync( failed = set() # TODO(Perf): Actually batch these up for user_id in user_ids: - user_result, user_failed = await self._user_device_resync_returning_failed( - user_id - ) + async with self._resync_linearizer.queue(user_id): + ( + user_result, + user_failed, + ) = await self._user_device_resync_returning_failed(user_id) result[user_id] = user_result if user_failed: failed.add(user_id) @@ -1335,7 +1338,7 @@ async def multi_user_device_resync( async def _user_device_resync_returning_failed( self, user_id: str - ) -> Tuple[Optional[JsonDict], bool]: + ) -> Tuple[Optional[JsonMapping], bool]: """Fetches all devices for a user and updates the device cache with them. Args: @@ -1348,6 +1351,12 @@ async def _user_device_resync_returning_failed( e.g. due to a connection problem. - True iff the resync failed and the device list should be marked as stale. """ + # Check that we haven't gone and fetched the devices since we last + # checked if we needed to resync these device lists. + if await self.store.get_users_whose_devices_are_cached([user_id]): + cached = await self.store.get_cached_devices_for_user(user_id) + return cached, False + logger.debug("Attempting to resync the device list for %s", user_id) log_kv({"message": "Doing resync to update device list."}) # Fetch all devices for the user. diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 209833d28753..b8198e059c97 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -20,7 +20,7 @@ from synapse.http.server import HttpServer from synapse.logging.opentracing import active_span from synapse.replication.http._base import ReplicationEndpoint -from synapse.types import JsonDict +from synapse.types import JsonDict, JsonMapping if TYPE_CHECKING: from synapse.server import HomeServer @@ -82,7 +82,7 @@ async def _serialize_payload(user_ids: List[str]) -> JsonDict: # type: ignore[o async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict - ) -> Tuple[int, Dict[str, Optional[JsonDict]]]: + ) -> Tuple[int, Dict[str, Optional[JsonMapping]]]: user_ids: List[str] = content["user_ids"] logger.info("Resync for %r", user_ids) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 324fdfa892bd..70faf4b1ecca 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -759,18 +759,10 @@ async def get_user_devices_from_cache( mapping of user_id -> device_id -> device_info. """ unique_user_ids = user_ids | {user_id for user_id, _ in user_and_device_ids} - user_map = await self.get_device_list_last_stream_id_for_remotes( - list(unique_user_ids) - ) - # We go and check if any of the users need to have their device lists - # resynced. If they do then we remove them from the cached list. - users_needing_resync = await self.get_user_ids_requiring_device_list_resync( + user_ids_in_cache = await self.get_users_whose_devices_are_cached( unique_user_ids ) - user_ids_in_cache = { - user_id for user_id, stream_id in user_map.items() if stream_id - } - users_needing_resync user_ids_not_in_cache = unique_user_ids - user_ids_in_cache # First fetch all the users which all devices are to be returned. @@ -792,6 +784,22 @@ async def get_user_devices_from_cache( return user_ids_not_in_cache, results + async def get_users_whose_devices_are_cached( + self, user_ids: StrCollection + ) -> Set[str]: + """Checks which of the given users we have cached the devices for.""" + user_map = await self.get_device_list_last_stream_id_for_remotes(user_ids) + + # We go and check if any of the users need to have their device lists + # resynced. If they do then we remove them from the cached list. + users_needing_resync = await self.get_user_ids_requiring_device_list_resync( + user_ids + ) + user_ids_in_cache = { + user_id for user_id, stream_id in user_map.items() if stream_id + } - users_needing_resync + return user_ids_in_cache + @cached(num_args=2, tree=True) async def _get_cached_user_device(self, user_id: str, device_id: str) -> JsonDict: content = await self.db_pool.simple_select_one_onecol( From d23c394669660a7226c818f222a76ec0905e126e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 Sep 2023 13:06:00 +0100 Subject: [PATCH 20/75] Reduce CPU overhead of change password endpoint (#16264) --- changelog.d/16264.misc | 1 + synapse/rest/client/account.py | 112 ++++++++++++++++----------------- 2 files changed, 55 insertions(+), 58 deletions(-) create mode 100644 changelog.d/16264.misc diff --git a/changelog.d/16264.misc b/changelog.d/16264.misc new file mode 100644 index 000000000000..a744434bef06 --- /dev/null +++ b/changelog.d/16264.misc @@ -0,0 +1 @@ +Reduce CPU overhead of change password endpoint. diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 679ab9f266c7..196b292890dc 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -179,85 +179,81 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # # In the second case, we require a password to confirm their identity. - requester = None - if self.auth.has_access_token(request): - requester = await self.auth.get_user_by_req(request) - try: + try: + requester = None + if self.auth.has_access_token(request): + requester = await self.auth.get_user_by_req(request) params, session_id = await self.auth_handler.validate_user_via_ui_auth( requester, request, body.dict(exclude_unset=True), "modify your account password", ) - except InteractiveAuthIncompleteError as e: - # The user needs to provide more steps to complete auth, but - # they're not required to provide the password again. - # - # If a password is available now, hash the provided password and - # store it for later. - if new_password: - new_password_hash = await self.auth_handler.hash(new_password) - await self.auth_handler.set_session_data( - e.session_id, - UIAuthSessionDataConstants.PASSWORD_HASH, - new_password_hash, - ) - raise - user_id = requester.user.to_string() - else: - try: + user_id = requester.user.to_string() + else: result, params, session_id = await self.auth_handler.check_ui_auth( [[LoginType.EMAIL_IDENTITY]], request, body.dict(exclude_unset=True), "modify your account password", ) - except InteractiveAuthIncompleteError as e: - # The user needs to provide more steps to complete auth, but - # they're not required to provide the password again. - # - # If a password is available now, hash the provided password and - # store it for later. - if new_password: - new_password_hash = await self.auth_handler.hash(new_password) - await self.auth_handler.set_session_data( - e.session_id, - UIAuthSessionDataConstants.PASSWORD_HASH, - new_password_hash, + + if LoginType.EMAIL_IDENTITY in result: + threepid = result[LoginType.EMAIL_IDENTITY] + if "medium" not in threepid or "address" not in threepid: + raise SynapseError(500, "Malformed threepid") + if threepid["medium"] == "email": + # For emails, canonicalise the address. + # We store all email addresses canonicalised in the DB. + # (See add_threepid in synapse/handlers/auth.py) + try: + threepid["address"] = validate_email(threepid["address"]) + except ValueError as e: + raise SynapseError(400, str(e)) + # if using email, we must know about the email they're authing with! + threepid_user_id = await self.datastore.get_user_id_by_threepid( + threepid["medium"], threepid["address"] ) + if not threepid_user_id: + raise SynapseError( + 404, "Email address not found", Codes.NOT_FOUND + ) + user_id = threepid_user_id + else: + logger.error("Auth succeeded but no known type! %r", result.keys()) + raise SynapseError(500, "", Codes.UNKNOWN) + + except InteractiveAuthIncompleteError as e: + # The user needs to provide more steps to complete auth, but + # they're not required to provide the password again. + # + # If a password is available now, hash the provided password and + # store it for later. We only do this if we don't already have the + # password hash stored, to avoid repeatedly hashing the password. + + if not new_password: raise - if LoginType.EMAIL_IDENTITY in result: - threepid = result[LoginType.EMAIL_IDENTITY] - if "medium" not in threepid or "address" not in threepid: - raise SynapseError(500, "Malformed threepid") - if threepid["medium"] == "email": - # For emails, canonicalise the address. - # We store all email addresses canonicalised in the DB. - # (See add_threepid in synapse/handlers/auth.py) - try: - threepid["address"] = validate_email(threepid["address"]) - except ValueError as e: - raise SynapseError(400, str(e)) - # if using email, we must know about the email they're authing with! - threepid_user_id = await self.datastore.get_user_id_by_threepid( - threepid["medium"], threepid["address"] - ) - if not threepid_user_id: - raise SynapseError(404, "Email address not found", Codes.NOT_FOUND) - user_id = threepid_user_id - else: - logger.error("Auth succeeded but no known type! %r", result.keys()) - raise SynapseError(500, "", Codes.UNKNOWN) + existing_session_password_hash = await self.auth_handler.get_session_data( + e.session_id, UIAuthSessionDataConstants.PASSWORD_HASH, None + ) + if existing_session_password_hash: + raise + + new_password_hash = await self.auth_handler.hash(new_password) + await self.auth_handler.set_session_data( + e.session_id, + UIAuthSessionDataConstants.PASSWORD_HASH, + new_password_hash, + ) + raise # If we have a password in this request, prefer it. Otherwise, use the # password hash from an earlier request. if new_password: password_hash: Optional[str] = await self.auth_handler.hash(new_password) elif session_id is not None: - password_hash = await self.auth_handler.get_session_data( - session_id, UIAuthSessionDataConstants.PASSWORD_HASH, None - ) + password_hash = existing_session_password_hash else: # UI validation was skipped, but the request did not include a new # password. From 583d5963e6179689ed7d01be5eec36733be0444f Mon Sep 17 00:00:00 2001 From: V02460 Date: Fri, 8 Sep 2023 14:10:26 +0200 Subject: [PATCH 21/75] Raise setuptools_rust version cap to 1.7.0 (#16277) --- changelog.d/16277.misc | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16277.misc diff --git a/changelog.d/16277.misc b/changelog.d/16277.misc new file mode 100644 index 000000000000..c131a46ec32b --- /dev/null +++ b/changelog.d/16277.misc @@ -0,0 +1 @@ +Raise setuptools_rust version cap to 1.7.0. diff --git a/pyproject.toml b/pyproject.toml index c17f4da72d4d..5b43abe9071c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -370,7 +370,7 @@ furo = ">=2022.12.7,<2024.0.0" # system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.6.0"] +requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.7.0"] build-backend = "poetry.core.masonry.api" From 69b74d9330e42fc91a9c7423d00a06cd6d3732bf Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 8 Sep 2023 08:57:56 -0400 Subject: [PATCH 22/75] Avoid temporary storage of sensitive information. (#16272) During the UI auth process, avoid storing sensitive information into the database. --- changelog.d/16272.bugfix | 1 + synapse/rest/client/account.py | 4 ++-- tests/rest/client/test_account.py | 13 +++++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16272.bugfix diff --git a/changelog.d/16272.bugfix b/changelog.d/16272.bugfix new file mode 100644 index 000000000000..afb22a999f90 --- /dev/null +++ b/changelog.d/16272.bugfix @@ -0,0 +1 @@ +Avoid temporary storage of sensitive information. diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 196b292890dc..49cd0805fdee 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -186,7 +186,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: params, session_id = await self.auth_handler.validate_user_via_ui_auth( requester, request, - body.dict(exclude_unset=True), + body.dict(exclude_unset=True, exclude={"new_password"}), "modify your account password", ) user_id = requester.user.to_string() @@ -194,7 +194,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: result, params, session_id = await self.auth_handler.check_ui_auth( [[LoginType.EMAIL_IDENTITY]], request, - body.dict(exclude_unset=True), + body.dict(exclude_unset=True, exclude={"new_password"}), "modify your account password", ) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index e9f495e20671..4a0eca5b3073 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -31,6 +31,7 @@ from synapse.rest.client import account, login, register, room from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource from synapse.server import HomeServer +from synapse.storage._base import db_to_json from synapse.types import JsonDict, UserID from synapse.util import Clock @@ -134,6 +135,18 @@ def test_basic_password_reset(self) -> None: # Assert we can't log in with the old password self.attempt_wrong_password_login("kermit", old_password) + # Check that the UI Auth information doesn't store the password in the database. + # + # Note that we don't have the UI Auth session ID, so just pull out the single + # row. + ui_auth_data = self.get_success( + self.store.db_pool.simple_select_one( + "ui_auth_sessions", keyvalues={}, retcols=("clientdict",) + ) + ) + client_dict = db_to_json(ui_auth_data["clientdict"]) + self.assertNotIn("new_password", client_dict) + @override_config({"rc_3pid_validation": {"burst_count": 3}}) def test_ratelimit_by_email(self) -> None: """Test that we ratelimit /requestToken for the same email.""" From 9084429a6c6287f37eb6743a9cdc1731f4ea8ed4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Sep 2023 08:59:23 -0400 Subject: [PATCH 23/75] Bump gitpython from 3.1.34 to 3.1.35 (#16279) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index d7bbfbd358e4..b577ae4f18df 100644 --- a/poetry.lock +++ b/poetry.lock @@ -586,13 +586,13 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.34" +version = "3.1.35" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.34-py3-none-any.whl", hash = "sha256:5d3802b98a3bae1c2b8ae0e1ff2e4aa16bcdf02c145da34d092324f599f01395"}, - {file = "GitPython-3.1.34.tar.gz", hash = "sha256:85f7d365d1f6bf677ae51039c1ef67ca59091c7ebd5a3509aa399d4eda02d6dd"}, + {file = "GitPython-3.1.35-py3-none-any.whl", hash = "sha256:c19b4292d7a1d3c0f653858db273ff8a6614100d1eb1528b014ec97286193c09"}, + {file = "GitPython-3.1.35.tar.gz", hash = "sha256:9cbefbd1789a5fe9bcf621bb34d3f441f3a90c8461d377f84eda73e721d9b06b"}, ] [package.dependencies] From f43d99462413b0b572da2e52037db8b1135f5ea6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 Sep 2023 14:43:01 +0100 Subject: [PATCH 24/75] Fix bug with new task scheduler using lots of CPU. (#16278) Using the new `TaskScheduler` meant that we'ed create lots of new metrics (due to adding task ID to the desc of background process), resulting in requests for metrics taking an increasing amount of CPU. --- changelog.d/16278.misc | 1 + synapse/util/task_scheduler.py | 43 +++++++++++++++++----------------- 2 files changed, 23 insertions(+), 21 deletions(-) create mode 100644 changelog.d/16278.misc diff --git a/changelog.d/16278.misc b/changelog.d/16278.misc new file mode 100644 index 000000000000..e82a470c45b6 --- /dev/null +++ b/changelog.d/16278.misc @@ -0,0 +1 @@ +Fix using the new task scheduler causing lots of CPU to be used. diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 9b2581e51a15..b7de201bdeda 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -19,6 +19,7 @@ from twisted.python.failure import Failure +from synapse.logging.context import nested_logging_context from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import JsonMapping, ScheduledTask, TaskStatus from synapse.util.stringutils import random_string @@ -316,26 +317,27 @@ async def _launch_task(self, task: ScheduledTask) -> None: function = self._actions[task.action] async def wrapper() -> None: - try: - (status, result, error) = await function(task) - except Exception: - f = Failure() - logger.error( - f"scheduled task {task.id} failed", - exc_info=(f.type, f.value, f.getTracebackObject()), + with nested_logging_context(task.id): + try: + (status, result, error) = await function(task) + except Exception: + f = Failure() + logger.error( + f"scheduled task {task.id} failed", + exc_info=(f.type, f.value, f.getTracebackObject()), + ) + status = TaskStatus.FAILED + result = None + error = f.getErrorMessage() + + await self._store.update_scheduled_task( + task.id, + self._clock.time_msec(), + status=status, + result=result, + error=error, ) - status = TaskStatus.FAILED - result = None - error = f.getErrorMessage() - - await self._store.update_scheduled_task( - task.id, - self._clock.time_msec(), - status=status, - result=result, - error=error, - ) - self._running_tasks.remove(task.id) + self._running_tasks.remove(task.id) if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: return @@ -353,5 +355,4 @@ async def wrapper() -> None: self._running_tasks.add(task.id) await self.update_task(task.id, status=TaskStatus.ACTIVE) - description = f"{task.id}-{task.action}" - run_as_background_process(description, wrapper) + run_as_background_process(task.action, wrapper) From 5c8870cb28cc98d0b7a310b5dd2fade7ff45743d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 8 Sep 2023 09:47:36 -0400 Subject: [PATCH 25/75] Fix-up incorrect spellings in docs. (#16282) --- changelog.d/16282.doc | 1 + docs/ancient_architecture_notes.md | 2 +- docs/changelogs/CHANGES-2019.md | 12 +- docs/changelogs/CHANGES-2020.md | 18 +-- docs/changelogs/CHANGES-2021.md | 4 +- docs/changelogs/CHANGES-2022.md | 18 +-- docs/changelogs/CHANGES-pre-1.0.md | 150 +++++++++--------- docs/consent_tracking.md | 4 +- docs/development/contributing_guide.md | 2 +- .../synapse_architecture/faster_joins.md | 2 +- docs/log_contexts.md | 2 +- docs/postgres.md | 2 +- docs/setup/installation.md | 4 +- docs/tcp_replication.md | 4 +- docs/turn-howto.md | 2 +- docs/upgrade.md | 6 +- .../administration/admin_api/federation.md | 2 +- .../configuration/config_documentation.md | 4 +- 18 files changed, 120 insertions(+), 119 deletions(-) create mode 100644 changelog.d/16282.doc diff --git a/changelog.d/16282.doc b/changelog.d/16282.doc new file mode 100644 index 000000000000..b249ea4f9fb4 --- /dev/null +++ b/changelog.d/16282.doc @@ -0,0 +1 @@ +Fix typos in the documentation. diff --git a/docs/ancient_architecture_notes.md b/docs/ancient_architecture_notes.md index 07bb199d7afb..005b434ab88f 100644 --- a/docs/ancient_architecture_notes.md +++ b/docs/ancient_architecture_notes.md @@ -24,7 +24,7 @@ Server with a domain specific API. 1. **Messaging Layer** This is what the rest of the homeserver hits to send messages, join rooms, - etc. It also allows you to register callbacks for when it get's notified by + etc. It also allows you to register callbacks for when it gets notified by lower levels that e.g. a new message has been received. It is responsible for serializing requests to send to the data diff --git a/docs/changelogs/CHANGES-2019.md b/docs/changelogs/CHANGES-2019.md index a356cc49a315..1d01c83f0788 100644 --- a/docs/changelogs/CHANGES-2019.md +++ b/docs/changelogs/CHANGES-2019.md @@ -164,7 +164,7 @@ Synapse 1.6.0rc2 (2019-11-25) Bugfixes -------- -- Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407)) +- Fix a bug which could cause the background database update handler for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407)) Synapse 1.6.0rc1 (2019-11-20) @@ -191,7 +191,7 @@ Bugfixes - Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash. ([\#6306](https://github.com/matrix-org/synapse/issues/6306)) - Fix `/purge_room` admin API. ([\#6307](https://github.com/matrix-org/synapse/issues/6307)) - Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0. ([\#6313](https://github.com/matrix-org/synapse/issues/6313)) -- Fix bug which casued rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320)) +- Fix bug which caused rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320)) - Fix bug where `rc_login` ratelimiting would prematurely kick in. ([\#6335](https://github.com/matrix-org/synapse/issues/6335)) - Prevent the server taking a long time to start up when guest registration is enabled. ([\#6338](https://github.com/matrix-org/synapse/issues/6338)) - Fix bug where upgrading a guest account to a full user would fail when account validity is enabled. ([\#6359](https://github.com/matrix-org/synapse/issues/6359)) @@ -232,7 +232,7 @@ Internal Changes - Add some documentation about worker replication. ([\#6305](https://github.com/matrix-org/synapse/issues/6305)) - Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6308](https://github.com/matrix-org/synapse/issues/6308)) - Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only. ([\#6312](https://github.com/matrix-org/synapse/issues/6312)) -- Add optional python dependencies and dependant binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317)) +- Add optional python dependencies and dependent binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317)) - Remove the dependency on psutil and replace functionality with the stdlib `resource` module. ([\#6318](https://github.com/matrix-org/synapse/issues/6318), [\#6336](https://github.com/matrix-org/synapse/issues/6336)) - Improve documentation for EventContext fields. ([\#6319](https://github.com/matrix-org/synapse/issues/6319)) - Add some checks that we aren't using state from rejected events. ([\#6330](https://github.com/matrix-org/synapse/issues/6330)) @@ -653,7 +653,7 @@ Internal Changes - Return 502 not 500 when failing to reach any remote server. ([\#5810](https://github.com/matrix-org/synapse/issues/5810)) - Reduce global pauses in the events stream caused by expensive state resolution during persistence. ([\#5826](https://github.com/matrix-org/synapse/issues/5826)) - Add a lower bound to well-known lookup cache time to avoid repeated lookups. ([\#5836](https://github.com/matrix-org/synapse/issues/5836)) -- Whitelist history visbility sytests in worker mode tests. ([\#5843](https://github.com/matrix-org/synapse/issues/5843)) +- Whitelist history visibility sytests in worker mode tests. ([\#5843](https://github.com/matrix-org/synapse/issues/5843)) Synapse 1.2.1 (2019-07-26) @@ -817,7 +817,7 @@ See the [upgrade notes](docs/upgrade.md#upgrading-to-v110) for more details. Features -------- -- Added possibilty to disable local password authentication. Contributed by Daniel Hoffend. ([\#5092](https://github.com/matrix-org/synapse/issues/5092)) +- Added possibility to disable local password authentication. Contributed by Daniel Hoffend. ([\#5092](https://github.com/matrix-org/synapse/issues/5092)) - Add monthly active users to phonehome stats. ([\#5252](https://github.com/matrix-org/synapse/issues/5252)) - Allow expired user to trigger renewal email sending manually. ([\#5363](https://github.com/matrix-org/synapse/issues/5363)) - Statistics on forward extremities per room are now exposed via Prometheus. ([\#5384](https://github.com/matrix-org/synapse/issues/5384), [\#5458](https://github.com/matrix-org/synapse/issues/5458), [\#5461](https://github.com/matrix-org/synapse/issues/5461)) @@ -850,7 +850,7 @@ Bugfixes - Fix bug where clients could tight loop calling `/sync` for a period. ([\#5507](https://github.com/matrix-org/synapse/issues/5507)) - Fix bug with `jinja2` preventing Synapse from starting. Users who had this problem should now simply need to run `pip install matrix-synapse`. ([\#5514](https://github.com/matrix-org/synapse/issues/5514)) - Fix a regression where homeservers on private IP addresses were incorrectly blacklisted. ([\#5523](https://github.com/matrix-org/synapse/issues/5523)) -- Fixed m.login.jwt using unregistred user_id and added pyjwt>=1.6.4 as jwt conditional dependencies. Contributed by Pau Rodriguez-Estivill. ([\#5555](https://github.com/matrix-org/synapse/issues/5555), [\#5586](https://github.com/matrix-org/synapse/issues/5586)) +- Fixed m.login.jwt using unregistered user_id and added pyjwt>=1.6.4 as jwt conditional dependencies. Contributed by Pau Rodriguez-Estivill. ([\#5555](https://github.com/matrix-org/synapse/issues/5555), [\#5586](https://github.com/matrix-org/synapse/issues/5586)) - Fix a bug that would cause invited users to receive several emails for a single 3PID invite in case the inviter is rate limited. ([\#5576](https://github.com/matrix-org/synapse/issues/5576)) diff --git a/docs/changelogs/CHANGES-2020.md b/docs/changelogs/CHANGES-2020.md index 6b87022251a2..c3739796fb38 100644 --- a/docs/changelogs/CHANGES-2020.md +++ b/docs/changelogs/CHANGES-2020.md @@ -251,7 +251,7 @@ Internal Changes - Optimise `/createRoom` with multiple invited users. ([\#8559](https://github.com/matrix-org/synapse/issues/8559)) - Implement and use an `@lru_cache` decorator. ([\#8595](https://github.com/matrix-org/synapse/issues/8595)) -- Don't instansiate Requester directly. ([\#8614](https://github.com/matrix-org/synapse/issues/8614)) +- Don't instantiate Requester directly. ([\#8614](https://github.com/matrix-org/synapse/issues/8614)) - Type hints for `RegistrationStore`. ([\#8615](https://github.com/matrix-org/synapse/issues/8615)) - Change schema to support access tokens belonging to one user but granting access to another. ([\#8616](https://github.com/matrix-org/synapse/issues/8616)) - Remove unused OPTIONS handlers. ([\#8621](https://github.com/matrix-org/synapse/issues/8621)) @@ -518,7 +518,7 @@ Bugfixes - Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. ([\#8278](https://github.com/matrix-org/synapse/issues/8278)) - Fix edge case where push could get delayed for a user until a later event was pushed. ([\#8287](https://github.com/matrix-org/synapse/issues/8287)) - Fix fetching malformed events from remote servers. ([\#8324](https://github.com/matrix-org/synapse/issues/8324)) -- Fix `UnboundLocalError` from occuring when appservices send a malformed register request. ([\#8329](https://github.com/matrix-org/synapse/issues/8329)) +- Fix `UnboundLocalError` from occurring when appservices send a malformed register request. ([\#8329](https://github.com/matrix-org/synapse/issues/8329)) - Don't send push notifications to expired user accounts. ([\#8353](https://github.com/matrix-org/synapse/issues/8353)) - Fix a regression in v1.19.0 with reactivating users through the admin API. ([\#8362](https://github.com/matrix-org/synapse/issues/8362)) - Fix a bug where during device registration the length of the device name wasn't limited. ([\#8364](https://github.com/matrix-org/synapse/issues/8364)) @@ -815,7 +815,7 @@ Bugfixes - Fix a bug introduced in Synapse v1.7.2 which caused inaccurate membership counts in the room directory. ([\#7977](https://github.com/matrix-org/synapse/issues/7977)) - Fix a long standing bug: 'Duplicate key value violates unique constraint "event_relations_id"' when message retention is configured. ([\#7978](https://github.com/matrix-org/synapse/issues/7978)) - Fix "no create event in auth events" when trying to reject invitation after inviter leaves. Bug introduced in Synapse v1.10.0. ([\#7980](https://github.com/matrix-org/synapse/issues/7980)) -- Fix various comments and minor discrepencies in server notices code. ([\#7996](https://github.com/matrix-org/synapse/issues/7996)) +- Fix various comments and minor discrepancies in server notices code. ([\#7996](https://github.com/matrix-org/synapse/issues/7996)) - Fix a long standing bug where HTTP HEAD requests resulted in a 400 error. ([\#7999](https://github.com/matrix-org/synapse/issues/7999)) - Fix a long-standing bug which caused two copies of some log lines to be written when synctl was used along with a MemoryHandler logger. ([\#8011](https://github.com/matrix-org/synapse/issues/8011), [\#8012](https://github.com/matrix-org/synapse/issues/8012)) @@ -1460,7 +1460,7 @@ Bugfixes - Transfer alias mappings on room upgrade. ([\#6946](https://github.com/matrix-org/synapse/issues/6946)) - Ensure that a user interactive authentication session is tied to a single request. ([\#7068](https://github.com/matrix-org/synapse/issues/7068), [\#7455](https://github.com/matrix-org/synapse/issues/7455)) - Fix a bug in the federation API which could cause occasional "Failed to get PDU" errors. ([\#7089](https://github.com/matrix-org/synapse/issues/7089)) -- Return the proper error (`M_BAD_ALIAS`) when a non-existant canonical alias is provided. ([\#7109](https://github.com/matrix-org/synapse/issues/7109)) +- Return the proper error (`M_BAD_ALIAS`) when a non-existent canonical alias is provided. ([\#7109](https://github.com/matrix-org/synapse/issues/7109)) - Fix a bug which meant that groups updates were not correctly replicated between workers. ([\#7117](https://github.com/matrix-org/synapse/issues/7117)) - Fix starting workers when federation sending not split out. ([\#7133](https://github.com/matrix-org/synapse/issues/7133)) - Ensure `is_verified` is a boolean in responses to `GET /_matrix/client/r0/room_keys/keys`. Also warn the user if they forgot the `version` query param. ([\#7150](https://github.com/matrix-org/synapse/issues/7150)) @@ -1482,7 +1482,7 @@ Bugfixes - Fix bad error handling that would cause Synapse to crash if it's provided with a YAML configuration file that's either empty or doesn't parse into a key-value map. ([\#7341](https://github.com/matrix-org/synapse/issues/7341)) - Fix incorrect metrics reporting for `renew_attestations` background task. ([\#7344](https://github.com/matrix-org/synapse/issues/7344)) - Prevent non-federating rooms from appearing in responses to federated `POST /publicRoom` requests when a filter was included. ([\#7367](https://github.com/matrix-org/synapse/issues/7367)) -- Fix a bug which would cause the room durectory to be incorrectly populated if Synapse was upgraded directly from v1.2.1 or earlier to v1.4.0 or later. Note that this fix does not apply retrospectively; see the [upgrade notes](docs/upgrade.md#upgrading-to-v1130) for more information. ([\#7387](https://github.com/matrix-org/synapse/issues/7387)) +- Fix a bug which would cause the room directory to be incorrectly populated if Synapse was upgraded directly from v1.2.1 or earlier to v1.4.0 or later. Note that this fix does not apply retrospectively; see the [upgrade notes](docs/upgrade.md#upgrading-to-v1130) for more information. ([\#7387](https://github.com/matrix-org/synapse/issues/7387)) - Fix bug in `EventContext.deserialize`. ([\#7393](https://github.com/matrix-org/synapse/issues/7393)) @@ -1638,7 +1638,7 @@ Security advisory ----------------- Synapse may be vulnerable to request-smuggling attacks when it is used with a -reverse-proxy. The vulnerabilties are fixed in Twisted 20.3.0, and are +reverse-proxy. The vulnerabilities are fixed in Twisted 20.3.0, and are described in [CVE-2020-10108](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10108) and @@ -1748,7 +1748,7 @@ Internal Changes - Refactoring work in preparation for changing the event redaction algorithm. ([\#6874](https://github.com/matrix-org/synapse/issues/6874), [\#6875](https://github.com/matrix-org/synapse/issues/6875), [\#6983](https://github.com/matrix-org/synapse/issues/6983), [\#7003](https://github.com/matrix-org/synapse/issues/7003)) - Improve performance of v2 state resolution for large rooms. ([\#6952](https://github.com/matrix-org/synapse/issues/6952), [\#7095](https://github.com/matrix-org/synapse/issues/7095)) - Reduce time spent doing GC, by freezing objects on startup. ([\#6953](https://github.com/matrix-org/synapse/issues/6953)) -- Minor perfermance fixes to `get_auth_chain_ids`. ([\#6954](https://github.com/matrix-org/synapse/issues/6954)) +- Minor performance fixes to `get_auth_chain_ids`. ([\#6954](https://github.com/matrix-org/synapse/issues/6954)) - Don't record remote cross-signing keys in the `devices` table. ([\#6956](https://github.com/matrix-org/synapse/issues/6956)) - Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions. ([\#6957](https://github.com/matrix-org/synapse/issues/6957)) - Merge worker apps together. ([\#6964](https://github.com/matrix-org/synapse/issues/6964), [\#7002](https://github.com/matrix-org/synapse/issues/7002), [\#7055](https://github.com/matrix-org/synapse/issues/7055), [\#7104](https://github.com/matrix-org/synapse/issues/7104)) @@ -1809,7 +1809,7 @@ Bugfixes - Allow URL-encoded User IDs on `/_synapse/admin/v2/users/[/admin]` endpoints. Thanks to @NHAS for reporting. ([\#6825](https://github.com/matrix-org/synapse/issues/6825)) - Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank. ([\#6849](https://github.com/matrix-org/synapse/issues/6849)) - Fix errors from logging in the purge jobs related to the message retention policies support. ([\#6945](https://github.com/matrix-org/synapse/issues/6945)) -- Return a 404 instead of 200 for querying information of a non-existant user through the admin API. ([\#6901](https://github.com/matrix-org/synapse/issues/6901)) +- Return a 404 instead of 200 for querying information of a non-existent user through the admin API. ([\#6901](https://github.com/matrix-org/synapse/issues/6901)) Updates to the Docker image @@ -1889,7 +1889,7 @@ Bugfixes Synapse 1.10.0rc4 (2020-02-11) ============================== -This release candidate was built incorrectly and is superceded by 1.10.0rc5. +This release candidate was built incorrectly and is superseded by 1.10.0rc5. Synapse 1.10.0rc3 (2020-02-10) ============================== diff --git a/docs/changelogs/CHANGES-2021.md b/docs/changelogs/CHANGES-2021.md index 8e349504d595..2247d6333c9b 100644 --- a/docs/changelogs/CHANGES-2021.md +++ b/docs/changelogs/CHANGES-2021.md @@ -2270,7 +2270,7 @@ Features Bugfixes -------- -- Fix spurious errors in logs when deleting a non-existant pusher. ([\#9121](https://github.com/matrix-org/synapse/issues/9121)) +- Fix spurious errors in logs when deleting a non-existent pusher. ([\#9121](https://github.com/matrix-org/synapse/issues/9121)) - Fix a long-standing bug where Synapse would return a 500 error when a thumbnail did not exist (and auto-generation of thumbnails was not enabled). ([\#9163](https://github.com/matrix-org/synapse/issues/9163)) - Fix a long-standing bug where an internal server error was raised when attempting to preview an HTML document in an unknown character encoding. ([\#9164](https://github.com/matrix-org/synapse/issues/9164)) - Fix a long-standing bug where invalid data could cause errors when calculating the presentable room name for push. ([\#9165](https://github.com/matrix-org/synapse/issues/9165)) @@ -2522,7 +2522,7 @@ Bugfixes - Fix a long-standing bug where a `m.image` event without a `url` would cause errors on push. ([\#8965](https://github.com/matrix-org/synapse/issues/8965)) - Fix a small bug in v2 state resolution algorithm, which could also cause performance issues for rooms with large numbers of power levels. ([\#8971](https://github.com/matrix-org/synapse/issues/8971)) - Add validation to the `sendToDevice` API to raise a missing parameters error instead of a 500 error. ([\#8975](https://github.com/matrix-org/synapse/issues/8975)) -- Add validation of group IDs to raise a 400 error instead of a 500 eror. ([\#8977](https://github.com/matrix-org/synapse/issues/8977)) +- Add validation of group IDs to raise a 400 error instead of a 500 error. ([\#8977](https://github.com/matrix-org/synapse/issues/8977)) Improved Documentation diff --git a/docs/changelogs/CHANGES-2022.md b/docs/changelogs/CHANGES-2022.md index 81e284951620..a9eced7c9e3d 100644 --- a/docs/changelogs/CHANGES-2022.md +++ b/docs/changelogs/CHANGES-2022.md @@ -208,7 +208,7 @@ Improved Documentation ---------------------- - Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370)) -- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293)) +- Add additional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293)) - Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297)) - Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414)) @@ -490,7 +490,7 @@ Internal Changes - When authenticating batched events, check for auth events in batch as well as DB. ([\#14214](https://github.com/matrix-org/synapse/issues/14214)) - Update CI config to avoid GitHub Actions deprecation warnings. ([\#14216](https://github.com/matrix-org/synapse/issues/14216), [\#14224](https://github.com/matrix-org/synapse/issues/14224)) - Update dependency requirements to allow building with poetry-core 1.3.2. ([\#14217](https://github.com/matrix-org/synapse/issues/14217)) -- Rename the `cache_memory` extra to `cache-memory`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221)) +- Rename the `cache_memory` extra to `cache-memory`, for compatibility with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221)) - Specify dev-dependencies using lower bounds, to reduce the likelihood of a dependabot merge conflict. The lockfile continues to pin to specific versions. ([\#14227](https://github.com/matrix-org/synapse/issues/14227)) @@ -534,7 +534,7 @@ Bugfixes Internal Changes ---------------- -- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085)) +- Rename the `url_preview` extra to `url-preview`, for compatibility with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085)) Synapse 1.69.0rc2 (2022-10-06) @@ -719,7 +719,7 @@ Improved Documentation - Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480)) - Fix a mistake in the config manual introduced in Synapse 1.22.0: the `event_cache_size` _is_ scaled by `caches.global_factor`. ([\#13726](https://github.com/matrix-org/synapse/issues/13726)) - Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727)) -- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728)) +- Define Synapse's compatibility policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728)) - Add docs for the common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785)) - Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794)) @@ -2035,7 +2035,7 @@ Internal Changes - Add opentracing spans to calls to external cache. ([\#12380](https://github.com/matrix-org/synapse/issues/12380)) - Lay groundwork for using `poetry` to manage Synapse's dependencies. ([\#12381](https://github.com/matrix-org/synapse/issues/12381), [\#12407](https://github.com/matrix-org/synapse/issues/12407), [\#12412](https://github.com/matrix-org/synapse/issues/12412), [\#12418](https://github.com/matrix-org/synapse/issues/12418)) - Make missing `importlib_metadata` dependency explicit. ([\#12384](https://github.com/matrix-org/synapse/issues/12384), [\#12400](https://github.com/matrix-org/synapse/issues/12400)) -- Update type annotations for compatiblity with prometheus_client 0.14. ([\#12389](https://github.com/matrix-org/synapse/issues/12389)) +- Update type annotations for compatibility with prometheus_client 0.14. ([\#12389](https://github.com/matrix-org/synapse/issues/12389)) - Remove support for the unstable identifiers specified in [MSC3288](https://github.com/matrix-org/matrix-doc/pull/3288). ([\#12398](https://github.com/matrix-org/synapse/issues/12398)) - Add missing type hints to configuration classes. ([\#12402](https://github.com/matrix-org/synapse/issues/12402)) - Add files used to build the Docker image used for complement testing into the Synapse repository. ([\#12404](https://github.com/matrix-org/synapse/issues/12404)) @@ -2207,7 +2207,7 @@ Deprecations and Removals - **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))** - **`synctl` has been moved into into `synapse._scripts` and is exposed as an entry point; see [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved). ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) -- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) +- Remove backwards compatibility with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) - The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) @@ -2586,10 +2586,10 @@ Bugfixes Improved Documentation ---------------------- -- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This works around client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686)) +- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This works around client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contributed by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686)) - Document the new `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable in the contributing guide. ([\#11715](https://github.com/matrix-org/synapse/issues/11715)) - Document that the minimum supported PostgreSQL version is now 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725)) -- Fix typo in demo docs: differnt. ([\#11735](https://github.com/matrix-org/synapse/issues/11735)) +- Fix typo in demo docs: different. ([\#11735](https://github.com/matrix-org/synapse/issues/11735)) - Update room spec URL in config files. ([\#11739](https://github.com/matrix-org/synapse/issues/11739)) - Mention `python3-venv` and `libpq-dev` dependencies in the contribution guide. ([\#11740](https://github.com/matrix-org/synapse/issues/11740)) - Update documentation for configuring login with Facebook. ([\#11755](https://github.com/matrix-org/synapse/issues/11755)) @@ -2707,7 +2707,7 @@ Improved Documentation - Update Synapse install command for FreeBSD as the package is now prefixed with `py38`. Contributed by @itchychips. ([\#11267](https://github.com/matrix-org/synapse/issues/11267)) - Document the usage of refresh tokens. ([\#11427](https://github.com/matrix-org/synapse/issues/11427)) -- Add details for how to configure a TURN server when behind a NAT. Contibuted by @AndrewFerr. ([\#11553](https://github.com/matrix-org/synapse/issues/11553)) +- Add details for how to configure a TURN server when behind a NAT. Contributed by @AndrewFerr. ([\#11553](https://github.com/matrix-org/synapse/issues/11553)) - Add references for using Postgres to the Docker documentation. ([\#11640](https://github.com/matrix-org/synapse/issues/11640)) - Fix the documentation link in newly-generated configuration files. ([\#11678](https://github.com/matrix-org/synapse/issues/11678)) - Correct the documentation for `nginx` to use a case-sensitive url pattern. Fixes an error introduced in v1.21.0. ([\#11680](https://github.com/matrix-org/synapse/issues/11680)) diff --git a/docs/changelogs/CHANGES-pre-1.0.md b/docs/changelogs/CHANGES-pre-1.0.md index bcd33d2256f1..e414dbb3b189 100644 --- a/docs/changelogs/CHANGES-pre-1.0.md +++ b/docs/changelogs/CHANGES-pre-1.0.md @@ -823,7 +823,7 @@ Bugfixes - Fix error message for events with m.room.create missing from auth_events ([\#3960](https://github.com/matrix-org/synapse/issues/3960)) - Fix errors due to concurrent monthly_active_user upserts ([\#3961](https://github.com/matrix-org/synapse/issues/3961)) - Fix exceptions when processing incoming events over federation ([\#3968](https://github.com/matrix-org/synapse/issues/3968)) -- Replaced all occurences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970)) +- Replaced all occurrences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970)) - Fix lazy loaded sync in the presence of rejected state events ([\#3986](https://github.com/matrix-org/synapse/issues/3986)) - Fix error when logging incomplete HTTP requests ([\#3990](https://github.com/matrix-org/synapse/issues/3990)) @@ -841,7 +841,7 @@ Internal Changes - Fix the docker image building on python 3 ([\#3911](https://github.com/matrix-org/synapse/issues/3911)) - Add a regression test for logging failed HTTP requests on Python 3. ([\#3912](https://github.com/matrix-org/synapse/issues/3912)) - Comments and interface cleanup for on_receive_pdu ([\#3924](https://github.com/matrix-org/synapse/issues/3924)) -- Fix spurious exceptions when remote http client closes conncetion ([\#3925](https://github.com/matrix-org/synapse/issues/3925)) +- Fix spurious exceptions when remote http client closes connection ([\#3925](https://github.com/matrix-org/synapse/issues/3925)) - Log exceptions thrown by background tasks ([\#3927](https://github.com/matrix-org/synapse/issues/3927)) - Add a cache to get_destination_retry_timings ([\#3933](https://github.com/matrix-org/synapse/issues/3933), [\#3991](https://github.com/matrix-org/synapse/issues/3991)) - Automate pushes to docker hub ([\#3946](https://github.com/matrix-org/synapse/issues/3946)) @@ -1057,7 +1057,7 @@ Bugfixes - Make the tests pass on Twisted < 18.7.0 ([\#3676](https://github.com/matrix-org/synapse/issues/3676)) - Don’t ship recaptcha_ajax.js, use it directly from Google ([\#3677](https://github.com/matrix-org/synapse/issues/3677)) - Fixes test_reap_monthly_active_users so it passes under postgres ([\#3681](https://github.com/matrix-org/synapse/issues/3681)) -- Fix mau blocking calulation bug on login ([\#3689](https://github.com/matrix-org/synapse/issues/3689)) +- Fix mau blocking calculation bug on login ([\#3689](https://github.com/matrix-org/synapse/issues/3689)) - Fix missing yield in synapse.storage.monthly_active_users.initialise_reserved_users ([\#3692](https://github.com/matrix-org/synapse/issues/3692)) - Improve HTTP request logging to include all requests ([\#3700](https://github.com/matrix-org/synapse/issues/3700)) - Avoid timing out requests while we are streaming back the response ([\#3701](https://github.com/matrix-org/synapse/issues/3701)) @@ -1314,10 +1314,10 @@ Changes: - Remove users from user directory on deactivate (PR #3277) - Avoid sending consent notice to guest users (PR #3288) - disable CPUMetrics if no /proc/self/stat (PR #3299) -- Consistently use six\'s iteritems and wrap lazy keys/values in list() if they\'re not meant to be lazy (PR #3307) +- Consistently use six's iteritems and wrap lazy keys/values in list() if they're not meant to be lazy (PR #3307) - Add private IPv6 addresses to example config for url preview blacklist (PR #3317) Thanks to @thegcat! - Reduce stuck read-receipts: ignore depth when updating (PR #3318) -- Put python\'s logs into Trial when running unit tests (PR #3319) +- Put python's logs into Trial when running unit tests (PR #3319) Changes, python 3 migration: @@ -1344,13 +1344,13 @@ Changes in synapse v0.30.0 (2018-05-24) \'Server Notices\' are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server. -They are used as part of communication of the server policies (see `docs/consent_tracking.md`), however the intention is that they may also find a use for features such as \"Message of the day\". +They are used as part of communication of the server policies (see `docs/consent_tracking.md`), however the intention is that they may also find a use for features such as "Message of the day". This feature is specific to Synapse, but uses standard Matrix communication mechanisms, so should work with any Matrix client. For more details see `docs/server_notices.md` Further Server Notices/Consent Tracking Support: -- Allow overriding the server\_notices user\'s avatar (PR #3273) +- Allow overriding the server\_notices user's avatar (PR #3273) - Use the localpart in the consent uri (PR #3272) - Support for putting %(consent\_uri)s in messages (PR #3271) - Block attempts to send server notices to remote users (PR #3270) @@ -1380,7 +1380,7 @@ Changes: - Remove unused update\_external\_syncs (PR #3233) - Use stream rather depth ordering for push actions (PR #3212) - Make purge\_history operate on tokens (PR #3221) -- Don\'t support limitless pagination (PR #3265) +- Don't support limitless pagination (PR #3265) Bug Fixes: @@ -1454,7 +1454,7 @@ Changes - Python 3 migration: - Make event properties raise AttributeError instead (PR #3102) Thanks to @NotAFile! - Use six.moves.urlparse (PR #3108) Thanks to @NotAFile! - Add py3 tests to tox with folders that work (PR #3145) Thanks to @NotAFile! -- Don\'t yield in list comprehensions (PR #3150) Thanks to @NotAFile! +- Don't yield in list comprehensions (PR #3150) Thanks to @NotAFile! - Move more xrange to six (PR #3151) Thanks to @NotAFile! - make imports local (PR #3152) Thanks to @NotAFile! - move httplib import to six (PR #3153) Thanks to @NotAFile! @@ -1556,7 +1556,7 @@ v0.27.3-rc1 used a stale version of the develop branch so the changelog overstat Changes in synapse v0.27.3-rc1 (2018-04-09) =========================================== -Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording \'r30\' stat which is the measure we use to track overal growth of the Matrix ecosystem. It is defined as:- +Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording \'r30\' stat which is the measure we use to track overall growth of the Matrix ecosystem. It is defined as:- Counts the number of native 30 day retained users, defined as:- \* Users who have created their accounts more than 30 days @@ -1654,7 +1654,7 @@ Changes: - No longer require a specific version of saml2 (PR #2695) Thanks to @okurz! - Remove `verbosity`/`log_file` from generated config (PR #2755) - Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838) -- When using synctl with workers, don\'t start the main synapse automatically (PR #2774) +- When using synctl with workers, Don't start the main synapse automatically (PR #2774) - Minor performance improvements (PR #2773, #2792) - Use a connection pool for non-federation outbound connections (PR #2817) - Make it possible to run unit tests against postgres (PR #2829) @@ -1848,7 +1848,7 @@ Changes: Bug fixes: - Fix caching error in the push evaluator (PR #2332) -- Fix bug where pusherpool didn\'t start and broke some rooms (PR #2342) +- Fix bug where pusherpool didn't start and broke some rooms (PR #2342) - Fix port script for user directory tables (PR #2375) - Fix device lists notifications when user rejoins a room (PR #2443, #2449) - Fix sync to always send down current state events in timeline (PR #2451) @@ -1860,7 +1860,7 @@ Changes in synapse v0.22.1 (2017-07-06) Bug fixes: -- Fix bug where pusher pool didn\'t start and caused issues when interacting with some rooms (PR #2342) +- Fix bug where pusher pool didn't start and caused issues when interacting with some rooms (PR #2342) Changes in synapse v0.22.0 (2017-07-06) ======================================= @@ -1933,7 +1933,7 @@ Changes: - Various small performance fixes (PR #2201, #2202, #2224, #2226, #2227, #2228, #2229) - Update username availability checker API (PR #2209, #2213) -- When purging, don\'t de-delta state groups we\'re about to delete (PR #2214) +- When purging, Don't de-delta state groups we're about to delete (PR #2214) - Documentation to check synapse version (PR #2215) Thanks to @hamber-dick! - Add an index to event\_search to speed up purge history API (PR #2218) @@ -1982,7 +1982,7 @@ Bug fixes: - Fix invite state to always include all events (PR #2163) - Fix bug where synapse would always fetch state for any missing event (PR #2170) - Fix a leak with timed out HTTP connections (PR #2180) -- Fix bug where we didn\'t time out HTTP requests to ASes (PR #2192) +- Fix bug where we didn't time out HTTP requests to ASes (PR #2192) Docs: @@ -2016,7 +2016,7 @@ Changes: - Minor `/sync` performance improvements. (PR #2002, #2013, #2022) - Add some debug to help diagnose weird federation issue (PR #2035) - Correctly limit retries for all federation requests (PR #2050, #2061) -- Don\'t lock table when persisting new one time keys (PR #2053) +- Don't lock table when persisting new one time keys (PR #2053) - Reduce some CPU work on DB threads (PR #2054) - Cache hosts in room (PR #2060) - Batch sending of device list pokes (PR #2063) @@ -2033,7 +2033,7 @@ Bug fixes: - Fix bug when federation received a PDU while a room join is in progress (PR #2016) - Fix resetting state on rejected events (PR #2025) - Fix installation issues in readme. Thanks @ricco386 (PR #2037) -- Fix caching of remote servers\' signature keys (PR #2042) +- Fix caching of remote servers' signature keys (PR #2042) - Fix some leaking log context (PR #2048, #2049, #2057, #2058) - Fix rejection of invites not reaching sync (PR #2056) @@ -2060,7 +2060,7 @@ Changes: - Reduce database table sizes (PR #1873, #1916, #1923, #1963) - Update contrib/ to not use syutil. Thanks to andrewshadura! (PR #1907) -- Don\'t fetch current state when sending an event in common case (PR #1955) +- Don't fetch current state when sending an event in common case (PR #1955) Bug fixes: @@ -2068,7 +2068,7 @@ Bug fixes: - Fix caching to not cache error responses (PR #1913) - Fix APIs to make kick & ban reasons work (PR #1917) - Fix bugs in the /keys/changes api (PR #1921) -- Fix bug where users couldn\'t forget rooms they were banned from (PR #1922) +- Fix bug where users couldn't forget rooms they were banned from (PR #1922) - Fix issue with long language values in pushers API (PR #1925) - Fix a race in transaction queue (PR #1930) - Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! (PR #1945) @@ -2129,7 +2129,7 @@ Changes: - Measure size of some caches by sum of the size of cached values (PR #1815) - Measure metrics of string\_cache (PR #1821) - Reduce logging verbosity (PR #1822, #1823, #1824) -- Don\'t clobber a displayname or avatar\_url if provided by an m.room.member event (PR #1852) +- Don't clobber a displayname or avatar\_url if provided by an m.room.member event (PR #1852) - Better handle 401/404 response for federation /send/ (PR #1866, #1871) Fixes: @@ -2141,7 +2141,7 @@ Fixes: Performance: -- Don\'t block messages sending on bumping presence (PR #1789) +- Don't block messages sending on bumping presence (PR #1789) - Change device\_inbox stream index to include user (PR #1793) - Optimise state resolution (PR #1818) - Use DB cache of joined users for presence (PR #1862) @@ -2157,7 +2157,7 @@ Changes in synapse v0.18.7-rc2 (2017-01-07) Bug fixes: -- Fix error in rc1\'s discarding invalid inbound traffic logic that was incorrectly discarding missing events +- Fix error in rc1's discarding invalid inbound traffic logic that was incorrectly discarding missing events Changes in synapse v0.18.7-rc1 (2017-01-06) =========================================== @@ -2181,7 +2181,7 @@ Changes in synapse v0.18.6-rc3 (2017-01-05) Bug fixes: - Fix bug where we failed to send ban events to the banned server (PR #1758) -- Fix bug where we sent event that didn\'t originate on this server to other servers (PR #1764) +- Fix bug where we sent event that didn't originate on this server to other servers (PR #1764) - Fix bug where processing an event from a remote server took a long time because we were making long HTTP requests (PR #1765, PR #1744) Changes: @@ -2208,7 +2208,7 @@ Changes in synapse v0.18.5 (2016-12-16) Bug fixes: -- Fix federation /backfill returning events it shouldn\'t (PR #1700) +- Fix federation /backfill returning events it shouldn't (PR #1700) - Fix crash in url preview (PR #1701) Changes in synapse v0.18.5-rc3 (2016-12-13) @@ -2231,11 +2231,11 @@ Changes: Bug fixes: -- Fix handling of 500 and 429\'s over federation (PR #1650) +- Fix handling of 500 and 429's over federation (PR #1650) - Fix Content-Type header parsing (PR #1660) - Fix error when previewing sites that include unicode, thanks to kyrias (PR #1664) - Fix some cases where we drop read receipts (PR #1678) -- Fix bug where calls to `/sync` didn\'t correctly timeout (PR #1683) +- Fix bug where calls to `/sync` didn't correctly timeout (PR #1683) - Fix bug where E2E key query would fail if a single remote host failed (PR #1686) Changes in synapse v0.18.5-rc2 (2016-11-24) @@ -2243,7 +2243,7 @@ Changes in synapse v0.18.5-rc2 (2016-11-24) Bug fixes: -- Don\'t send old events over federation, fixes bug in -rc1. +- Don't send old events over federation, fixes bug in -rc1. Changes in synapse v0.18.5-rc1 (2016-11-24) =========================================== @@ -2254,7 +2254,7 @@ Features: Changes: -- Use external ldap auth pacakge (PR #1628) +- Use external ldap auth package (PR #1628) - Split out federation transaction sending to a worker (PR #1635) - Fail with a coherent error message if /sync?filter= is invalid (PR #1636) - More efficient notif count queries (PR #1644) @@ -2289,7 +2289,7 @@ SECURITY UPDATE Explicitly require authentication when using LDAP3. This is the default on versions of `ldap3` above 1.0, but some distributions will package an older version. -If you are using LDAP3 login and have a version of `ldap3` older than 1.0 it is **CRITICAL to updgrade**. +If you are using LDAP3 login and have a version of `ldap3` older than 1.0 it is **CRITICAL to upgrade**. Changes in synapse v0.18.2 (2016-11-01) ======================================= @@ -2440,7 +2440,7 @@ Features: Changes: - Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063, #1068) -- Don\'t notify for online to online presence transitions. (PR #1054) +- Don't notify for online to online presence transitions. (PR #1054) - Occasionally persist unpersisted presence updates (PR #1055) - Allow application services to have an optional \'url\' (PR #1056) - Clean up old sent transactions from DB (PR #1059) @@ -2472,7 +2472,7 @@ Features: Changes: -- Don\'t print stack traces when failing to get remote keys (PR #996) +- Don't print stack traces when failing to get remote keys (PR #996) - Various federation /event/ perf improvements (PR #998) - Only process one local membership event per room at a time (PR #1005) - Move default display name push rule (PR #1011, #1023) @@ -2488,7 +2488,7 @@ Bug fixes: - Fix /sync to not clobber status\_msg (PR #997) - Fix redacted state events to include prev\_content (PR #1003) - Fix some bugs in the auth/ldap handler (PR #1007) -- Fix backfill request to limit URI length, so that remotes don\'t reject the requests due to path length limits (PR #1012) +- Fix backfill request to limit URI length, so that remotes Don't reject the requests due to path length limits (PR #1012) - Fix AS push code to not send duplicate events (PR #1025) Changes in synapse v0.17.0 (2016-08-08) @@ -2577,8 +2577,8 @@ Changes: - Send the correct host header when fetching keys (PR #941) - Log the hostname the reCAPTCHA was completed on (PR #946) - Make the device id on e2e key upload optional (PR #956) -- Add r0.2.0 to the \"supported versions\" list (PR #960) -- Don\'t include name of room for invites in push (PR #961) +- Add r0.2.0 to the "supported versions" list (PR #960) +- Don't include name of room for invites in push (PR #961) Bug fixes: @@ -2596,7 +2596,7 @@ Changes in synapse v0.16.1-r1 (2016-07-08) THIS IS A CRITICAL SECURITY UPDATE. -This fixes a bug which allowed users\' accounts to be accessed by unauthorised users. +This fixes a bug which allowed users' accounts to be accessed by unauthorised users. Changes in synapse v0.16.1 (2016-06-20) ======================================= @@ -2619,7 +2619,7 @@ Features: None Changes: - Log requester for `/publicRoom` endpoints when possible (PR #856) -- 502 on `/thumbnail` when can\'t connect to remote server (PR #862) +- 502 on `/thumbnail` when can't connect to remote server (PR #862) - Linearize fetching of gaps on incoming events (PR #871) Bugs fixes: @@ -2640,7 +2640,7 @@ NB: As of v0.14 all AS config files must have an ID field. Bug fixes: -- Don\'t make rooms published by default (PR #857) +- Don't make rooms published by default (PR #857) Changes in synapse v0.16.0-rc2 (2016-06-08) =========================================== @@ -2658,7 +2658,7 @@ Bug fixes: - Fix \'From\' header in email notifications (PR #843) - Fix presence where timeouts were not being fired for the first 8h after restarts (PR #842) -- Fix bug where synapse sent malformed transactions to AS\'s when retrying transactions (Commits 310197b, 8437906) +- Fix bug where synapse sent malformed transactions to AS's when retrying transactions (Commits 310197b, 8437906) Performance improvements: @@ -2685,7 +2685,7 @@ Changes: - Report per request metrics for all of the things using request\_handler (PR #756) - Correctly handle `NULL` password hashes from the database (PR #775) -- Allow receipts for events we haven\'t seen in the db (PR #784) +- Allow receipts for events we haven't seen in the db (PR #784) - Make synctl read a cache factor from config file (PR #785) - Increment badge count per missed convo, not per msg (PR #793) - Special case m.room.third\_party\_invite event auth to match invites (PR #814) @@ -2737,7 +2737,7 @@ Changes: Bug fixes: - Fix bug where disabling all notifications still resulted in push (PR #678) -- Fix bug where users couldn\'t reject remote invites if remote refused (PR #691) +- Fix bug where users couldn't reject remote invites if remote refused (PR #691) - Fix bug where synapse attempted to backfill from itself (PR #693) - Fix bug where profile information was not correctly added when joining remote rooms (PR #703) - Fix bug where register API required incorrect key name for AS registration (PR #727) @@ -2775,7 +2775,7 @@ Features: - Add event\_id to response to state event PUT (PR #581) - Allow guest users access to messages in rooms they have joined (PR #587) - Add config for what state is included in a room invite (PR #598) -- Send the inviter\'s member event in room invite state (PR #607) +- Send the inviter's member event in room invite state (PR #607) - Add error codes for malformed/bad JSON in /login (PR #608) - Add support for changing the actions for default rules (PR #609) - Add environment variable SYNAPSE\_CACHE\_FACTOR, default it to 0.1 (PR #612) @@ -2788,7 +2788,7 @@ Changes: - Make adding push rules idempotent (PR #587) - Improve presence performance (PR #582, #586) - Change presence semantics for `last_active_ago` (PR #582, #586) -- Don\'t allow `m.room.create` to be changed (PR #596) +- Don't allow `m.room.create` to be changed (PR #596) - Add 800x600 to default list of valid thumbnail sizes (PR #616) - Always include kicks and bans in full /sync (PR #625) - Send history visibility on boundary changes (PR #626) @@ -2854,7 +2854,7 @@ Features: Changes: -- Change `/sync` so that guest users only get rooms they\'ve joined (PR #469) +- Change `/sync` so that guest users only get rooms they've joined (PR #469) - Change to require unbanning before other membership changes (PR #501) - Change default push rules to notify for all messages (PR #486) - Change default push rules to not notify on membership changes (PR #514) @@ -2863,12 +2863,12 @@ Changes: - Change server manhole to use SSH rather than telnet (PR #473) - Change server to require AS users to be registered before use (PR #487) - Change server not to start when ASes are invalidly configured (PR #494) -- Change server to require ID and `as_token` to be unique for AS\'s (PR #496) +- Change server to require ID and `as_token` to be unique for AS's (PR #496) - Change maximum pagination limit to 1000 (PR #497) Bug fixes: -- Fix bug where `/sync` didn\'t return when something under the leave key changed (PR #461) +- Fix bug where `/sync` didn't return when something under the leave key changed (PR #461) - Fix bug where we returned smaller rather than larger than requested thumbnails when `method=crop` (PR #464) - Fix thumbnails API to only return cropped thumbnails when asking for a cropped thumbnail (PR #475) - Fix bug where we occasionally still logged access tokens (PR #477) @@ -2888,7 +2888,7 @@ Changes in synapse v0.12.0-rc3 (2015-12-23) - Allow guest accounts access to `/sync` (PR #455) - Allow filters to include/exclude rooms at the room level rather than just from the components of the sync for each room. (PR #454) - Include urls for room avatars in the response to `/publicRooms` (PR #453) -- Don\'t set a identicon as the avatar for a user when they register (PR #450) +- Don't set a identicon as the avatar for a user when they register (PR #450) - Add a `display_name` to third-party invites (PR #449) - Send more information to the identity server for third-party invites so that it can send richer messages to the invitee (PR #446) - Cache the responses to `/initialSync` for 5 minutes. If a client retries a request to `/initialSync` before the a response was computed to the first request then the same response is used for both requests (PR #457) @@ -2917,7 +2917,7 @@ Changes in synapse v0.12.0-rc1 (2015-12-10) - Filter JSON objects may now be passed as query parameters to `/sync` (PR #431) - Fix implementation of `/admin/whois` (PR #418) - Only include the rooms that user has left in `/sync` if the client requests them in the filter (PR #423) - - Don\'t push for `m.room.message` by default (PR #411) + - Don't push for `m.room.message` by default (PR #411) - Add API for setting per account user data (PR #392) - Allow users to forget rooms (PR #385) - Performance improvements and monitoring: @@ -2932,8 +2932,8 @@ Changes in synapse v0.11.1 (2015-11-20) ======================================= - Add extra options to search API (PR #394) -- Fix bug where we did not correctly cap federation retry timers. This meant it could take several hours for servers to start talking to ressurected servers, even when they were receiving traffic from them (PR #393) -- Don\'t advertise login token flow unless CAS is enabled. This caused issues where some clients would always use the fallback API if they did not recognize all login flows (PR #391) +- Fix bug where we did not correctly cap federation retry timers. This meant it could take several hours for servers to start talking to resurrected servers, even when they were receiving traffic from them (PR #393) +- Don't advertise login token flow unless CAS is enabled. This caused issues where some clients would always use the fallback API if they did not recognize all login flows (PR #391) - Change /v2 sync API to rename `private_user_data` to `account_data` (PR #386) - Change /v2 sync API to remove the `event_map` and rename keys in `rooms` object (PR #389) @@ -2973,7 +2973,7 @@ Changes in synapse v0.11.0-rc1 (2015-11-11) - Change retry schedule for application services (PR #320) - Change retry schedule for remote servers (PR #340) - Fix bug where we hosted static content in the incorrect place (PR #329) -- Fix bug where we didn\'t increment retry interval for remote servers (PR #343) +- Fix bug where we didn't increment retry interval for remote servers (PR #343) Changes in synapse v0.10.1-rc1 (2015-10-15) =========================================== @@ -3058,9 +3058,9 @@ General: - Error if a user tries to register with an email already in use. (PR #211) - Add extra and improve existing caches (PR #212, #219, #226, #228) - Batch various storage request (PR #226, #228) -- Fix bug where we didn\'t correctly log the entity that triggered the request if the request came in via an application service (PR #230) +- Fix bug where we didn't correctly log the entity that triggered the request if the request came in via an application service (PR #230) - Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. (PR #232) -- Add support for AS\'s to use v2\_alpha registration API (PR #210) +- Add support for AS's to use v2\_alpha registration API (PR #210) Configuration: @@ -3148,7 +3148,7 @@ Configuration: Federation: - Improve resilience of backfill by ensuring we fetch any missing auth events. -- Improve performance of backfill and joining remote rooms by removing unnecessary computations. This included handling events we\'d previously handled as well as attempting to compute the current state for outliers. +- Improve performance of backfill and joining remote rooms by removing unnecessary computations. This included handling events we'd previously handled as well as attempting to compute the current state for outliers. Changes in synapse v0.9.1 (2015-05-26) ====================================== @@ -3156,7 +3156,7 @@ Changes in synapse v0.9.1 (2015-05-26) General: - Add support for backfilling when a client paginates. This allows servers to request history for a room from remote servers when a client tries to paginate history the server does not have - SYN-36 -- Fix bug where you couldn\'t disable non-default pushrules - SYN-378 +- Fix bug where you couldn't disable non-default pushrules - SYN-378 - Fix `register_new_user` script - SYN-359 - Improve performance of fetching events from the database, this improves both initialSync and sending of events. - Improve performance of event streams, allowing synapse to handle more simultaneous connected clients. @@ -3225,7 +3225,7 @@ General: - Added new default push rules and made them configurable by clients: - Suppress all notice messages. - Notify when invited to a new room. - - Notify for messages that don\'t match any rule. + - Notify for messages that Don't match any rule. - Notify on incoming call. Federation: @@ -3268,7 +3268,7 @@ Changes in synapse v0.7.0 (2015-02-12) > - Computing the state of a room at a point in time, used for authorization on federation requests. > - Fetching events from the database. - > - User\'s room membership, used for authorizing presence updates. + > - User's room membership, used for authorizing presence updates. - Upgraded JSON library to improve parsing and serialisation speeds. @@ -3298,7 +3298,7 @@ Changes in synapse 0.6.0 (2014-12-16) Changes in synapse 0.5.4a (2014-12-13) ====================================== -- Fix bug while generating the error message when a file path specified in the config doesn\'t exist. +- Fix bug while generating the error message when a file path specified in the config doesn't exist. Changes in synapse 0.5.4 (2014-12-03) ===================================== @@ -3329,7 +3329,7 @@ Changes in synapse 0.5.1 (2014-11-26) See UPGRADES.rst for specific instructions on how to upgrade. - Fix bug where we served up an Event that did not match its signatures. -- Fix regression where we no longer correctly handled the case where a homeserver receives an event for a room it doesn\'t recognise (but is in.) +- Fix regression where we no longer correctly handled the case where a homeserver receives an event for a room it doesn't recognise (but is in.) Changes in synapse 0.5.0 (2014-11-19) ===================================== @@ -3342,7 +3342,7 @@ Homeserver: - Add authentication and authorization to the federation protocol. Events are now signed by their originating homeservers. - Implement the new authorization model for rooms. -- Split out web client into a seperate repository: matrix-angular-sdk. +- Split out web client into a separate repository: matrix-angular-sdk. - Change the structure of PDUs. - Fix bug where user could not join rooms via an alias containing 4-byte UTF-8 characters. - Merge concept of PDUs and Events internally. @@ -3352,7 +3352,7 @@ Homeserver: Webclient: -- The webclient has been moved to a seperate repository. +- The webclient has been moved to a separate repository. Changes in synapse 0.4.2 (2014-10-31) ===================================== @@ -3410,10 +3410,10 @@ Webclient: - Add button to send messages to users from the home page. - Add support for using TURN for VoIP calls. - Show display name change messages. -- Fix bug where the client didn\'t get the state of a newly joined room until after it has been refreshed. +- Fix bug where the client didn't get the state of a newly joined room until after it has been refreshed. - Fix bugs with tab complete. - Fix bug where holding down the down arrow caused chrome to chew 100% CPU. -- Fix bug where desktop notifications occasionally used \"Undefined\" as the display name. +- Fix bug where desktop notifications occasionally used "Undefined" as the display name. - Fix more places where we sometimes saw room IDs incorrectly. - Fix bug which caused lag when entering text in the text box. @@ -3427,21 +3427,21 @@ Homeserver: Webclient: - Add support for video calls with basic UI. -- Fix bug where one to one chats were named after your display name rather than the other person\'s. +- Fix bug where one to one chats were named after your display name rather than the other person's. - Fix bug which caused lag when typing in the textarea. -- Refuse to run on browsers we know won\'t work. +- Refuse to run on browsers we know won't work. - Trigger pagination when joining new rooms. -- Fix bug where we sometimes didn\'t display invitations in recents. +- Fix bug where we sometimes didn't display invitations in recents. - Automatically join room when accepting a VoIP call. -- Disable outgoing and reject incoming calls on browsers we don\'t support VoIP in. -- Don\'t display desktop notifications for messages in the room you are non-idle and speaking in. +- Disable outgoing and reject incoming calls on browsers we Don't support VoIP in. +- Don't display desktop notifications for messages in the room you are non-idle and speaking in. Changes in synapse 0.3.2 (2014-09-18) ===================================== Webclient: -- Fix bug where an empty \"bing words\" list in old accounts didn\'t send notifications when it should have done. +- Fix bug where an empty "bing words" list in old accounts didn't send notifications when it should have done. Changes in synapse 0.3.1 (2014-09-18) ===================================== @@ -3451,7 +3451,7 @@ This is a release to hotfix v0.3.0 to fix two regressions. Webclient: - Fix a regression where we sometimes displayed duplicate events. -- Fix a regression where we didn\'t immediately remove rooms you were banned in from the recents list. +- Fix a regression where we didn't immediately remove rooms you were banned in from the recents list. Changes in synapse 0.3.0 (2014-09-18) ===================================== @@ -3462,8 +3462,8 @@ Homeserver: - When a user changes their displayname or avatar the server will now update all their join states to reflect this. - The server now adds \"age\" key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to assume their clock is in sync with everyone else. -- Fix bug where we didn\'t correctly pull in missing PDUs. -- Fix bug where prev\_content key wasn\'t always returned. +- Fix bug where we didn't correctly pull in missing PDUs. +- Fix bug where prev\_content key wasn't always returned. - Add support for password resets. Webclient: @@ -3473,7 +3473,7 @@ Webclient: - Always show room aliases in the UI if one is present. - No longer show user-count in the recents side panel. - Add up & down arrow support to the text box for message sending to step through your sent history. -- Don\'t display notifications for our own messages. +- Don't display notifications for our own messages. - Emotes are now formatted correctly in desktop notifications. - The recents list now differentiates between public & private rooms. - Fix bug where when switching between rooms the pagination flickered before the view jumped to the bottom of the screen. @@ -3503,7 +3503,7 @@ Webclient: - VoIP UI and reliability improvements. - Add glare support for VoIP. - Improvements to initial startup speed. -- Don\'t display duplicate join events. +- Don't display duplicate join events. - Local echo of messages. - Differentiate sending and sent of local echo. - Various minor bug fixes. @@ -3587,7 +3587,7 @@ Homeserver: Changes in synapse 0.1.0 (2014-08-29) ===================================== -Presence has been reenabled in this release. +Presence has been re-enabled in this release. Homeserver: @@ -3629,7 +3629,7 @@ Webclient: - Add profile pages. - Improve CSS layout of room. - Disambiguate identical display names. -- Don\'t get remote users display names and avatars individually. +- Don't get remote users display names and avatars individually. - Use the new initial sync API to reduce number of round trips to the homeserver. - Change url scheme to use room aliases instead of room ids where known. - Increase longpoll timeout. diff --git a/docs/consent_tracking.md b/docs/consent_tracking.md index fb1fec80fe00..26620a075298 100644 --- a/docs/consent_tracking.md +++ b/docs/consent_tracking.md @@ -8,9 +8,9 @@ to the server until they have. There are several parts to this functionality; each requires some specific configuration in `homeserver.yaml` to be enabled. -Note that various parts of the configuation and this document refer to the +Note that various parts of the configuration and this document refer to the "privacy policy": agreement with a privacy policy is one particular use of this -feature, but of course adminstrators can specify other terms and conditions +feature, but of course administrators can specify other terms and conditions unrelated to "privacy" per se. Collecting policy agreement from a user diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 698687b91f2f..4ae2fcfee3a5 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -266,7 +266,7 @@ The easiest way to do so is to run Postgres via a docker container. In one terminal: ```shell -docker run --rm -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgress -p 5432:5432 postgres:14 +docker run --rm -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgres -p 5432:5432 postgres:14 ``` If you see an error like diff --git a/docs/development/synapse_architecture/faster_joins.md b/docs/development/synapse_architecture/faster_joins.md index 1e6d585b001a..2256c30239de 100644 --- a/docs/development/synapse_architecture/faster_joins.md +++ b/docs/development/synapse_architecture/faster_joins.md @@ -264,7 +264,7 @@ But don't want to send out sensitive data in other HS's events in this way. Suppose we discover after resync that we shouldn't have sent out one our events (not a prev_event) to a target HS. Not much we can do. What about if we didn't send them an event but shouldn't've? E.g. what if someone joined from a new HS shortly after you did? We wouldn't talk to them. -Could imagine sending out the "Missed" events after the resync but... painful to work out what they shuld have seen if they joined/left. +Could imagine sending out the "Missed" events after the resync but... painful to work out what they should have seen if they joined/left. Instead, just send them the latest event (if they're still in the room after resync) and let them backfill.(?) - Don't do this currently. - If anyone who has received our messages sends a message to a HS we missed, they can backfill our messages diff --git a/docs/log_contexts.md b/docs/log_contexts.md index cb15dbe158c0..9d087d11ef20 100644 --- a/docs/log_contexts.md +++ b/docs/log_contexts.md @@ -86,7 +86,7 @@ So we have stopped processing the request (and will probably go on to start processing the next), without clearing the logcontext. To circumvent this problem, synapse code assumes that, wherever you have -an awaitable, you will want to `await` it. To that end, whereever +an awaitable, you will want to `await` it. To that end, wherever functions return awaitables, we adopt the following conventions: **Rules for functions returning awaitables:** diff --git a/docs/postgres.md b/docs/postgres.md index fba4430f33a2..02d4b9b162f6 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -249,7 +249,7 @@ of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in underneath the database, or if a different version of the locale is used on any replicas. -If you have a databse with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with +If you have a database with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with the correct locale parameter (as shown above). It is also possible to change the parameters on a live database and run a `REINDEX` on the entire database, however extreme care must be taken to avoid database corruption. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 479f7ea5431b..0357d2a0fb82 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -37,7 +37,7 @@ Dockerfile to automate a synapse server in a single Docker image, at Slavi Pantaleev has created an Ansible playbook, -which installs the offical Docker image of Matrix Synapse +which installs the official Docker image of Matrix Synapse along with many other Matrix-related services (Postgres database, Element, coturn, ma1sd, SSL support, etc.). For more details, see @@ -93,7 +93,7 @@ For `bookworm` and `sid`, it can be installed simply with: sudo apt install matrix-synapse ``` -Synapse is also avaliable in `bullseye-backports`. Please +Synapse is also available in `bullseye-backports`. Please see the [Debian documentation](https://backports.debian.org/Instructions/) for information on how to use backports. diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md index 083cda8413d3..c3b8c76609da 100644 --- a/docs/tcp_replication.md +++ b/docs/tcp_replication.md @@ -38,7 +38,7 @@ noted when manually using the protocol: been disabled on the main process. - The server will only time connections out that have sent a `PING` command. If a ping is sent then the connection will be closed if no - further commands are receieved within 15s. Both the client and + further commands are received within 15s. Both the client and server protocol implementations will send an initial PING on connection and ensure at least one command every 5s is sent (not necessarily `PING`). @@ -128,7 +128,7 @@ batching. See `RdataCommand` for more details. ### Example -An example iteraction is shown below. Each line is prefixed with '>' +An example interaction is shown below. Each line is prefixed with '>' or '<' to indicate which side is sending, these are *not* included on the wire: diff --git a/docs/turn-howto.md b/docs/turn-howto.md index 4e9e4117cd10..9c1c6f4777a4 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -18,7 +18,7 @@ This documentation provides two TURN server configuration examples: For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP. -Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP. +Hosting TURN behind NAT requires port forwarding and for the NAT gateway to have a public IP. However, even with appropriate configuration, NAT is known to cause issues and to often not work. Afterwards, the homeserver needs some further configuration. diff --git a/docs/upgrade.md b/docs/upgrade.md index 2f888b6f1270..ba2f7703bc75 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -1352,7 +1352,7 @@ In line with our [deprecation policy](deprecation_policy.md), we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no longer supported upstream. -This release of Synapse requires Python 3.6+ and PostgresSQL 9.6+ or +This release of Synapse requires Python 3.6+ and PostgreSQL 9.6+ or SQLite 3.22+. ## Removal of old List Accounts Admin API @@ -2312,7 +2312,7 @@ for details. # Upgrading to v0.11.0 This release includes the option to send anonymous usage stats to -matrix.org, and requires that administrators explictly opt in or out by +matrix.org, and requires that administrators explicitly opt in or out by setting the `report_stats` option to either `true` or `false`. We would really appreciate it if you could help our project out by @@ -2416,7 +2416,7 @@ latest module, please run: # Upgrading to v0.5.0 -The webclient has been split out into a seperate repository/pacakage in +The webclient has been split out into a separate repository/package in this release. Before you restart your homeserver you will need to pull in the webclient package by running: diff --git a/docs/usage/administration/admin_api/federation.md b/docs/usage/administration/admin_api/federation.md index 51f3b52da824..ce735793c02d 100644 --- a/docs/usage/administration/admin_api/federation.md +++ b/docs/usage/administration/admin_api/federation.md @@ -77,7 +77,7 @@ The following fields are returned in the JSON response body: remote server, in ms. This is `0` if the last attempt to communicate with the remote server was successful. - `retry_interval` - integer - How long since the last time Synapse tried to reach - the remote server before trying again, in ms. This is `0` if no further retrying occuring. + the remote server before trying again, in ms. This is `0` if no further retrying occurring. - `failure_ts` - nullable integer - The first time Synapse tried and failed to reach the remote server, in ms. This is `null` if communication with the remote server has never failed. - `last_successful_stream_ordering` - nullable integer - The stream ordering of the most diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 42df53d52bb0..a06b3d8a0680 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -521,7 +521,7 @@ listeners: Example configuration #2: ```yaml listeners: - # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy + # Insecure HTTP listener: for when matrix traffic passes through a reverse proxy # that unwraps TLS. # # If you plan to use a reverse proxy, please see @@ -2945,7 +2945,7 @@ Normally, the connection to the key server is validated via TLS certificates. Additional security can be provided by configuring a `verify key`, which will make synapse check that the response is signed by that key. -This setting supercedes an older setting named `perspectives`. The old format +This setting supersedes an older setting named `perspectives`. The old format is still supported for backwards-compatibility, but it is deprecated. `trusted_key_servers` defaults to matrix.org, but using it will generate a From c1c6c95d72b5c9fc6c0e527eeb6b9d3a59889b16 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 8 Sep 2023 14:50:13 +0100 Subject: [PATCH 26/75] Log values at DEBUG level with execute_values (#16281) --- changelog.d/16281.misc | 1 + synapse/storage/database.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16281.misc diff --git a/changelog.d/16281.misc b/changelog.d/16281.misc new file mode 100644 index 000000000000..de48396aff14 --- /dev/null +++ b/changelog.d/16281.misc @@ -0,0 +1 @@ +Include values in SQL debug when using `execute_values` with Postgres. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 55ac313f33b0..6c5fcdcec37d 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -422,10 +422,11 @@ def execute_values( return self._do_execute( # TODO: is it safe for values to be Iterable[Iterable[Any]] here? # https://www.psycopg.org/docs/extras.html?highlight=execute_batch#psycopg2.extras.execute_values says values should be Sequence[Sequence] - lambda the_sql: execute_values( - self.txn, the_sql, values, template=template, fetch=fetch + lambda the_sql, the_values: execute_values( + self.txn, the_sql, the_values, template=template, fetch=fetch ), sql, + values, ) def execute(self, sql: str, parameters: SQLQueryParameters = ()) -> None: From aa483cb4c905bbe483ffe8e8a8f439655a57481b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 8 Sep 2023 11:24:36 -0400 Subject: [PATCH 27/75] Update ruff config (#16283) Enable additional checks & clean-up unneeded configuration. --- changelog.d/16283.misc | 1 + contrib/cmdclient/http.py | 2 -- docker/start.py | 2 +- pyproject.toml | 28 +++++++++++-------- scripts-dev/mypy_synapse_plugin.py | 7 +++-- synapse/_scripts/update_synapse_database.py | 1 - synapse/events/snapshot.py | 2 -- synapse/media/url_previewer.py | 4 +-- synapse/storage/background_updates.py | 2 -- synmark/suites/logging.py | 2 +- tests/handlers/test_device.py | 2 +- tests/handlers/test_federation.py | 2 +- tests/logging/test_remote_handler.py | 12 ++++---- .../replication/tcp/streams/test_to_device.py | 2 +- tests/rest/admin/test_federation.py | 6 ++-- tests/rest/client/test_account.py | 2 +- tests/rest/client/test_login.py | 8 +++--- tests/rest/client/test_register.py | 6 ++-- tests/storage/databases/main/test_lock.py | 2 +- tests/storage/test_event_chain.py | 6 ++-- tests/storage/test_event_federation.py | 6 ++-- tests/storage/test_profile.py | 4 +-- tests/storage/test_txn_limit.py | 2 +- tests/storage/test_user_filters.py | 4 +-- tests/test_visibility.py | 8 +++--- tests/util/caches/test_descriptors.py | 4 +-- 26 files changed, 63 insertions(+), 64 deletions(-) create mode 100644 changelog.d/16283.misc diff --git a/changelog.d/16283.misc b/changelog.d/16283.misc new file mode 100644 index 000000000000..4b9d6f76aef9 --- /dev/null +++ b/changelog.d/16283.misc @@ -0,0 +1 @@ +Enable additional linting checks. diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py index 1310f078e3ac..508de5dcbd2f 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py @@ -37,7 +37,6 @@ def put_json(self, url, data): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. """ - pass def get_json(self, url, args=None): """Gets some json from the given host homeserver and path @@ -53,7 +52,6 @@ def get_json(self, url, args=None): Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. """ - pass class TwistedHttpClient(HttpClient): diff --git a/docker/start.py b/docker/start.py index aebc7e4aaa52..12c444da9a39 100755 --- a/docker/start.py +++ b/docker/start.py @@ -239,7 +239,7 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None: log("Could not find %s, will not use" % (jemallocpath,)) # if there are no config files passed to synapse, try adding the default file - if not any(p.startswith("--config-path") or p.startswith("-c") for p in args): + if not any(p.startswith(("--config-path", "-c")) for p in args): config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") config_path = environ.get( "SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml" diff --git a/pyproject.toml b/pyproject.toml index 5b43abe9071c..8747782b2922 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,33 +43,39 @@ target-version = ['py38', 'py39', 'py310', 'py311'] [tool.ruff] line-length = 88 -# See https://github.com/charliermarsh/ruff/#pycodestyle +# See https://beta.ruff.rs/docs/rules/#error-e # for error codes. The ones we ignore are: -# E731: do not assign a lambda expression, use a def # E501: Line too long (black enforces this for us) +# E731: do not assign a lambda expression, use a def # # flake8-bugbear compatible checks. Its error codes are described at -# https://github.com/charliermarsh/ruff/#flake8-bugbear -# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks +# https://beta.ruff.rs/docs/rules/#flake8-bugbear-b # B023: Functions defined inside a loop must not use variables redefined in the loop -# B024: Abstract base class with no abstract method. ignore = [ - "B019", "B023", - "B024", "E501", "E731", ] select = [ - # pycodestyle checks. + # pycodestyle "E", "W", - # pyflakes checks. + # pyflakes "F", - # flake8-bugbear checks. + # flake8-bugbear "B0", - # flake8-comprehensions checks. + # flake8-comprehensions "C4", + # flake8-2020 + "YTT", + # flake8-slots + "SLOT", + # flake8-debugger + "T10", + # flake8-pie + "PIE", + # flake8-executable + "EXE", ] [tool.isort] diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 8058e9c993b1..a0b3854f1b3f 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -30,9 +30,10 @@ def get_method_signature_hook( self, fullname: str ) -> Optional[Callable[[MethodSigContext], CallableType]]: if fullname.startswith( - "synapse.util.caches.descriptors.CachedFunction.__call__" - ) or fullname.startswith( - "synapse.util.caches.descriptors._LruCachedFunction.__call__" + ( + "synapse.util.caches.descriptors.CachedFunction.__call__", + "synapse.util.caches.descriptors._LruCachedFunction.__call__", + ) ): return cached_function_method_signature return None diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py index f97aecf8d5cd..992ae4388124 100644 --- a/synapse/_scripts/update_synapse_database.py +++ b/synapse/_scripts/update_synapse_database.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a9e3d4e55689..5bdfa3a8aced 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -55,7 +55,6 @@ async def persist( A method to convert an UnpersistedEventContext to an EventContext, suitable for sending to the database with the associated event. """ - pass @abstractmethod async def get_prev_state_ids( @@ -69,7 +68,6 @@ async def get_prev_state_ids( state_filter: specifies the type of state event to fetch from DB, example: EventTypes.JoinRules """ - pass @attr.s(slots=True, auto_attribs=True) diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index 70b32cee1794..9b5a3dd5f405 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -846,9 +846,7 @@ def _is_media(content_type: str) -> bool: def _is_html(content_type: str) -> bool: content_type = content_type.lower() - return content_type.startswith("text/html") or content_type.startswith( - "application/xhtml" - ) + return content_type.startswith(("text/html", "application/xhtml")) def _is_json(content_type: str) -> bool: diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 7619f405fa09..99ebd96f8426 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -62,7 +62,6 @@ class Constraint(metaclass=abc.ABCMeta): @abc.abstractmethod def make_check_clause(self, table: str) -> str: """Returns an SQL expression that checks the row passes the constraint.""" - pass @abc.abstractmethod def make_constraint_clause_postgres(self) -> str: @@ -70,7 +69,6 @@ def make_constraint_clause_postgres(self) -> str: Only used on Postgres DBs """ - pass @attr.s(auto_attribs=True) diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index 8beb077e0a33..04e5b29dc95d 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -112,7 +112,7 @@ class Config: start = perf_counter() # Send a bunch of useful messages - for i in range(0, loops): + for i in range(loops): logger.info("test message %s", i) if len(handler._buffer) == handler.maximum_buffer: diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 9659a4a3553b..79d327499baa 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -223,7 +223,7 @@ def test_delete_device_and_big_device_inbox(self) -> None: # queue a bunch of messages in the inbox requester = create_requester(sender, device_id=DEVICE_ID) - for i in range(0, DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10): + for i in range(DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10): self.get_success( self.device_message_handler.send_device_message( requester, "message_type", {receiver: {"*": {"val": i}}} diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 21d63ab1f297..4fc074241341 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -262,7 +262,7 @@ def test_backfill_with_many_backward_extremities(self) -> None: if (ev.type, ev.state_key) in {("m.room.create", ""), ("m.room.member", remote_server_user_id)} ] - for _ in range(0, 8): + for _ in range(8): event = make_event_from_dict( self.add_hashes_and_signatures_from_other_server( { diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index 5191e31a8ae8..45eac100bf02 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -78,11 +78,11 @@ def test_log_backpressure_debug(self) -> None: logger = self.get_logger(handler) # Send some debug messages - for i in range(0, 3): + for i in range(3): logger.debug("debug %s" % (i,)) # Send a bunch of useful messages - for i in range(0, 7): + for i in range(7): logger.info("info %s" % (i,)) # The last debug message pushes it past the maximum buffer @@ -108,15 +108,15 @@ def test_log_backpressure_info(self) -> None: logger = self.get_logger(handler) # Send some debug messages - for i in range(0, 3): + for i in range(3): logger.debug("debug %s" % (i,)) # Send a bunch of useful messages - for i in range(0, 10): + for i in range(10): logger.warning("warn %s" % (i,)) # Send a bunch of info messages - for i in range(0, 3): + for i in range(3): logger.info("info %s" % (i,)) # The last debug message pushes it past the maximum buffer @@ -144,7 +144,7 @@ def test_log_backpressure_cut_middle(self) -> None: logger = self.get_logger(handler) # Send a bunch of useful messages - for i in range(0, 20): + for i in range(20): logger.warning("warn %s" % (i,)) # Allow the reconnection diff --git a/tests/replication/tcp/streams/test_to_device.py b/tests/replication/tcp/streams/test_to_device.py index fb9eac668f19..ab379e8cf1eb 100644 --- a/tests/replication/tcp/streams/test_to_device.py +++ b/tests/replication/tcp/streams/test_to_device.py @@ -49,7 +49,7 @@ def test_to_device_stream(self) -> None: # add messages to the device inbox for user1 up until the # limit defined for a stream update batch - for i in range(0, _STREAM_UPDATE_TARGET_ROW_COUNT): + for i in range(_STREAM_UPDATE_TARGET_ROW_COUNT): msg["content"] = {"device": {}} messages = {user1: {"device": msg}} diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index 4c7864c629f3..0e2824d1b532 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -510,7 +510,7 @@ def _create_destinations(self, number_destinations: int) -> None: Args: number_destinations: Number of destinations to be created """ - for i in range(0, number_destinations): + for i in range(number_destinations): dest = f"sub{i}.example.com" self._create_destination(dest, 50, 50, 50, 100) @@ -690,7 +690,7 @@ def test_order_direction(self) -> None: self._check_fields(channel_desc.json_body["rooms"]) # test that both lists have different directions - for i in range(0, number_rooms): + for i in range(number_rooms): self.assertEqual( channel_asc.json_body["rooms"][i]["room_id"], channel_desc.json_body["rooms"][number_rooms - 1 - i]["room_id"], @@ -777,7 +777,7 @@ def _create_destination_rooms(self, number_rooms: int) -> None: Args: number_rooms: Number of rooms to be created """ - for _ in range(0, number_rooms): + for _ in range(number_rooms): room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok ) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 4a0eca5b3073..cffbda9a7de9 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -575,7 +575,7 @@ def test_background_update_deletes_deactivated_users_server_side_backup_keys( # create a bunch of users and add keys for them users = [] - for i in range(0, 20): + for i in range(20): user_id = self.register_user("missPiggy" + str(i), "test") users.append((user_id,)) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index a2a65895647f..768d7ad4c217 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -176,10 +176,10 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: def test_POST_ratelimiting_per_address(self) -> None: # Create different users so we're sure not to be bothered by the per-user # ratelimiter. - for i in range(0, 6): + for i in range(6): self.register_user("kermit" + str(i), "monkey") - for i in range(0, 6): + for i in range(6): params = { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": "kermit" + str(i)}, @@ -228,7 +228,7 @@ def test_POST_ratelimiting_per_address(self) -> None: def test_POST_ratelimiting_per_account(self) -> None: self.register_user("kermit", "monkey") - for i in range(0, 6): + for i in range(6): params = { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": "kermit"}, @@ -277,7 +277,7 @@ def test_POST_ratelimiting_per_account(self) -> None: def test_POST_ratelimiting_per_account_failed_attempts(self) -> None: self.register_user("kermit", "monkey") - for i in range(0, 6): + for i in range(6): params = { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": "kermit"}, diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index c33393dc284b..ba4e017a0e80 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -169,7 +169,7 @@ def test_POST_disabled_guest_registration(self) -> None: @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting_guest(self) -> None: - for i in range(0, 6): + for i in range(6): url = self.url + b"?kind=guest" channel = self.make_request(b"POST", url, b"{}") @@ -187,7 +187,7 @@ def test_POST_ratelimiting_guest(self) -> None: @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting(self) -> None: - for i in range(0, 6): + for i in range(6): request_data = { "username": "kermit" + str(i), "password": "monkey", @@ -1223,7 +1223,7 @@ def test_GET_token_invalid(self) -> None: def test_GET_ratelimiting(self) -> None: token = "1234" - for i in range(0, 6): + for i in range(6): channel = self.make_request( b"GET", f"{self.url}?token={token}", diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index 650b4941bab6..35f77052a729 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -382,7 +382,7 @@ def test_maintain_lock(self) -> None: self.get_success(lock.__aenter__()) # Wait for ages with the lock, we should not be able to get the lock. - for _ in range(0, 10): + for _ in range(10): self.reactor.advance((_RENEWAL_INTERVAL_MS / 1000)) lock2 = self.get_success( diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index 48ebfadaab31..b55dd07f1496 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -664,7 +664,7 @@ def test_background_update_single_large_room(self) -> None: # Add a bunch of state so that it takes multiple iterations of the # background update to process the room. - for i in range(0, 150): + for i in range(150): self.helper.send_state( room_id, event_type="m.test", body={"index": i}, tok=self.token ) @@ -718,12 +718,12 @@ def test_background_update_multiple_large_room(self) -> None: # Add a bunch of state so that it takes multiple iterations of the # background update to process the room. - for i in range(0, 150): + for i in range(150): self.helper.send_state( room_id1, event_type="m.test", body={"index": i}, tok=self.token ) - for i in range(0, 150): + for i in range(150): self.helper.send_state( room_id2, event_type="m.test", body={"index": i}, tok=self.token ) diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 7a4ecab2d534..d3e20f44b29a 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -227,7 +227,7 @@ def insert_event(txn: Cursor, i: int) -> None: (room_id, event_id), ) - for i in range(0, 20): + for i in range(20): self.get_success( self.store.db_pool.runInteraction("insert", insert_event, i) ) @@ -235,7 +235,7 @@ def insert_event(txn: Cursor, i: int) -> None: # this should get the last ten r = self.get_success(self.store.get_prev_events_for_room(room_id)) self.assertEqual(10, len(r)) - for i in range(0, 10): + for i in range(10): self.assertEqual("$event_%i:local" % (19 - i), r[i]) def test_get_rooms_with_many_extremities(self) -> None: @@ -277,7 +277,7 @@ def insert_event(txn: LoggingTransaction, i: int, room_id: str) -> None: (room_id, event_id), ) - for i in range(0, 20): + for i in range(20): self.get_success( self.store.db_pool.runInteraction("insert", insert_event, i, room1) ) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index fe5bb7791336..95f99f413011 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -82,7 +82,7 @@ def f(txn: LoggingTransaction) -> None: self.get_success(self.store.db_pool.runInteraction("", f)) - for i in range(0, 70): + for i in range(70): self.get_success( self.store.db_pool.simple_insert( "profiles", @@ -115,7 +115,7 @@ def f(txn: LoggingTransaction) -> None: ) expected_values = [] - for i in range(0, 70): + for i in range(70): expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) res = self.get_success( diff --git a/tests/storage/test_txn_limit.py b/tests/storage/test_txn_limit.py index 15ea4770bd7e..22f074982f98 100644 --- a/tests/storage/test_txn_limit.py +++ b/tests/storage/test_txn_limit.py @@ -38,5 +38,5 @@ def do_select(txn: Cursor) -> None: db_pool = self.hs.get_datastores().databases[0] # force txn limit to roll over at least once - for _ in range(0, 1001): + for _ in range(1001): self.get_success_or_raise(db_pool.runInteraction("test_select", do_select)) diff --git a/tests/storage/test_user_filters.py b/tests/storage/test_user_filters.py index bab802f56ec6..d4637d9d1ebb 100644 --- a/tests/storage/test_user_filters.py +++ b/tests/storage/test_user_filters.py @@ -45,7 +45,7 @@ def f(txn: LoggingTransaction) -> None: self.get_success(self.store.db_pool.runInteraction("", f)) - for i in range(0, 70): + for i in range(70): self.get_success( self.store.db_pool.simple_insert( "user_filters", @@ -82,7 +82,7 @@ def f(txn: LoggingTransaction) -> None: ) expected_values = [] - for i in range(0, 70): + for i in range(70): expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) res = self.get_success( diff --git a/tests/test_visibility.py b/tests/test_visibility.py index a46c29ddf4e4..434902c3f096 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -51,12 +51,12 @@ def test_filtering(self) -> None: # before we do that, we persist some other events to act as state. self._inject_visibility("@admin:hs", "joined") - for i in range(0, 10): + for i in range(10): self._inject_room_member("@resident%i:hs" % i) events_to_filter = [] - for i in range(0, 10): + for i in range(10): user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server") evt = self._inject_room_member(user, extra_content={"a": "b"}) events_to_filter.append(evt) @@ -74,7 +74,7 @@ def test_filtering(self) -> None: ) # the result should be 5 redacted events, and 5 unredacted events. - for i in range(0, 5): + for i in range(5): self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id) self.assertNotIn("a", filtered[i].content) @@ -177,7 +177,7 @@ def test_erased_user(self) -> None: ) ) - for i in range(0, len(events_to_filter)): + for i in range(len(events_to_filter)): self.assertEqual( events_to_filter[i].event_id, filtered[i].event_id, diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 064f4987dfeb..168419f440fb 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -623,14 +623,14 @@ def func(self, key: int) -> int: a = A() - for k in range(0, 12): + for k in range(12): yield a.func(k) self.assertEqual(callcount[0], 12) # There must have been at least 2 evictions, meaning if we calculate # all 12 values again, we must get called at least 2 more times - for k in range(0, 12): + for k in range(12): yield a.func(k) self.assertTrue( From a0ed55ef129285e87d9947ae578ff275958169f7 Mon Sep 17 00:00:00 2001 From: V02460 Date: Fri, 8 Sep 2023 18:55:43 +0200 Subject: [PATCH 28/75] Upgrade CI run of Python 3.12 from rc1 to rc2 (#16280) --- .ci/scripts/calculate_jobs.py | 2 +- changelog.d/16280.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16280.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 661887e20985..08af332b6d5c 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -47,7 +47,7 @@ def set_output(key: str, value: str): "database": "sqlite", "extras": "all", } - for version in ("3.9", "3.10", "3.11", "3.12.0-rc.1") + for version in ("3.9", "3.10", "3.11", "3.12.0-rc.2") ) trial_postgres_tests = [ diff --git a/changelog.d/16280.misc b/changelog.d/16280.misc new file mode 100644 index 000000000000..2d8b414a3b48 --- /dev/null +++ b/changelog.d/16280.misc @@ -0,0 +1 @@ +Upgrade CI run of Python 3.12 from rc1 to rc2. From edd83f23b710f0caae05d5766b474de3b6f24e9e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 8 Sep 2023 19:29:38 +0100 Subject: [PATCH 29/75] Improve type hints for attrs classes (#16276) --- changelog.d/16276.misc | 1 + synapse/config/oembed.py | 2 +- synapse/storage/controllers/persist_events.py | 8 +++--- synapse/util/async_helpers.py | 25 ++++++++----------- synapse/util/caches/dictionary_cache.py | 10 +++----- synapse/util/caches/expiringcache.py | 20 +++++++++------ synapse/util/caches/ttlcache.py | 10 ++++---- 7 files changed, 37 insertions(+), 39 deletions(-) create mode 100644 changelog.d/16276.misc diff --git a/changelog.d/16276.misc b/changelog.d/16276.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/16276.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py index d7959639ee62..59bc0b55f4c8 100644 --- a/synapse/config/oembed.py +++ b/synapse/config/oembed.py @@ -30,7 +30,7 @@ class OEmbedEndpointConfig: # The API endpoint to fetch. api_endpoint: str # The patterns to match. - url_patterns: List[Pattern] + url_patterns: List[Pattern[str]] # The supported formats. formats: Optional[List[str]] diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index abd1d149dbc5..6864f9309020 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -154,12 +154,13 @@ def try_merge(self, task: "_EventPersistQueueTask") -> bool: _EventPersistQueueTask = Union[_PersistEventsTask, _UpdateCurrentStateTask] +_PersistResult = TypeVar("_PersistResult") @attr.s(auto_attribs=True, slots=True) -class _EventPersistQueueItem: +class _EventPersistQueueItem(Generic[_PersistResult]): task: _EventPersistQueueTask - deferred: ObservableDeferred + deferred: ObservableDeferred[_PersistResult] parent_opentracing_span_contexts: List = attr.ib(factory=list) """A list of opentracing spans waiting for this batch""" @@ -168,9 +169,6 @@ class _EventPersistQueueItem: """The opentracing span under which the persistence actually happened""" -_PersistResult = TypeVar("_PersistResult") - - class _EventPeristenceQueue(Generic[_PersistResult]): """Queues up tasks so that they can be processed with only one concurrent transaction per room. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 943ad5445604..0cbeb0c365d6 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -19,6 +19,7 @@ import inspect import itertools import logging +import typing from contextlib import asynccontextmanager from typing import ( Any, @@ -29,6 +30,7 @@ Collection, Coroutine, Dict, + Generator, Generic, Hashable, Iterable, @@ -398,7 +400,7 @@ class _LinearizerEntry: # The number of things executing. count: int # Deferreds for the things blocked from executing. - deferreds: collections.OrderedDict + deferreds: typing.OrderedDict["defer.Deferred[None]", Literal[1]] class Linearizer: @@ -717,30 +719,25 @@ def failure_cb(val: Failure) -> None: return new_d -# This class can't be generic because it uses slots with attrs. -# See: https://github.com/python-attrs/attrs/issues/313 @attr.s(slots=True, frozen=True, auto_attribs=True) -class DoneAwaitable: # should be: Generic[R] +class DoneAwaitable(Awaitable[R]): """Simple awaitable that returns the provided value.""" - value: Any # should be: R + value: R - def __await__(self) -> Any: - return self - - def __iter__(self) -> "DoneAwaitable": - return self - - def __next__(self) -> None: - raise StopIteration(self.value) + def __await__(self) -> Generator[Any, None, R]: + yield None + return self.value def maybe_awaitable(value: Union[Awaitable[R], R]) -> Awaitable[R]: """Convert a value to an awaitable if not already an awaitable.""" if inspect.isawaitable(value): - assert isinstance(value, Awaitable) return value + # For some reason mypy doesn't deduce that value is not Awaitable here, even though + # inspect.isawaitable returns a TypeGuard. + assert not isinstance(value, Awaitable) return DoneAwaitable(value) diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index 5eaf70c7abba..2fbc7b1e6c32 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -14,7 +14,7 @@ import enum import logging import threading -from typing import Any, Dict, Generic, Iterable, Optional, Set, Tuple, TypeVar, Union +from typing import Dict, Generic, Iterable, Optional, Set, Tuple, TypeVar, Union import attr from typing_extensions import Literal @@ -33,10 +33,8 @@ DV = TypeVar("DV") -# This class can't be generic because it uses slots with attrs. -# See: https://github.com/python-attrs/attrs/issues/313 @attr.s(slots=True, frozen=True, auto_attribs=True) -class DictionaryEntry: # should be: Generic[DKT, DV]. +class DictionaryEntry(Generic[DKT, DV]): """Returned when getting an entry from the cache If `full` is true then `known_absent` will be the empty set. @@ -50,8 +48,8 @@ class DictionaryEntry: # should be: Generic[DKT, DV]. """ full: bool - known_absent: Set[Any] # should be: Set[DKT] - value: Dict[Any, Any] # should be: Dict[DKT, DV] + known_absent: Set[DKT] + value: Dict[DKT, DV] def __len__(self) -> int: return len(self.value) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 01ad02af6703..8e4c34039dac 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -14,7 +14,7 @@ import logging from collections import OrderedDict -from typing import Any, Generic, Optional, TypeVar, Union, overload +from typing import Any, Generic, Iterable, Optional, TypeVar, Union, overload import attr from typing_extensions import Literal @@ -73,7 +73,7 @@ def __init__( self._expiry_ms = expiry_ms self._reset_expiry_on_get = reset_expiry_on_get - self._cache: OrderedDict[KT, _CacheEntry] = OrderedDict() + self._cache: OrderedDict[KT, _CacheEntry[VT]] = OrderedDict() self.iterable = iterable @@ -100,7 +100,10 @@ def evict(self) -> None: while self._max_size and len(self) > self._max_size: _key, value = self._cache.popitem(last=False) if self.iterable: - self.metrics.inc_evictions(EvictionReason.size, len(value.value)) + # type-ignore, here and below: if self.iterable is true, then the value + # type VT should be Sized (i.e. have a __len__ method). We don't enforce + # this via the type system at present. + self.metrics.inc_evictions(EvictionReason.size, len(value.value)) # type: ignore[arg-type] else: self.metrics.inc_evictions(EvictionReason.size) @@ -134,7 +137,7 @@ def pop(self, key: KT, default: T = SENTINEL) -> Union[VT, T]: return default if self.iterable: - self.metrics.inc_evictions(EvictionReason.invalidation, len(value.value)) + self.metrics.inc_evictions(EvictionReason.invalidation, len(value.value)) # type: ignore[arg-type] else: self.metrics.inc_evictions(EvictionReason.invalidation) @@ -182,7 +185,7 @@ async def _prune_cache(self) -> None: for k in keys_to_delete: value = self._cache.pop(k) if self.iterable: - self.metrics.inc_evictions(EvictionReason.time, len(value.value)) + self.metrics.inc_evictions(EvictionReason.time, len(value.value)) # type: ignore[arg-type] else: self.metrics.inc_evictions(EvictionReason.time) @@ -195,7 +198,8 @@ async def _prune_cache(self) -> None: def __len__(self) -> int: if self.iterable: - return sum(len(entry.value) for entry in self._cache.values()) + g: Iterable[int] = (len(entry.value) for entry in self._cache.values()) # type: ignore[arg-type] + return sum(g) else: return len(self._cache) @@ -218,6 +222,6 @@ def set_cache_factor(self, factor: float) -> bool: @attr.s(slots=True, auto_attribs=True) -class _CacheEntry: +class _CacheEntry(Generic[VT]): time: int - value: Any + value: VT diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py index f6b3ee31e4b8..48a6e4a9067f 100644 --- a/synapse/util/caches/ttlcache.py +++ b/synapse/util/caches/ttlcache.py @@ -35,10 +35,10 @@ class TTLCache(Generic[KT, VT]): def __init__(self, cache_name: str, timer: Callable[[], float] = time.time): # map from key to _CacheEntry - self._data: Dict[KT, _CacheEntry] = {} + self._data: Dict[KT, _CacheEntry[KT, VT]] = {} # the _CacheEntries, sorted by expiry time - self._expiry_list: SortedList[_CacheEntry] = SortedList() + self._expiry_list: SortedList[_CacheEntry[KT, VT]] = SortedList() self._timer = timer @@ -160,11 +160,11 @@ def expire(self) -> None: @attr.s(frozen=True, slots=True, auto_attribs=True) -class _CacheEntry: # Should be Generic[KT, VT]. See python-attrs/attrs#313 +class _CacheEntry(Generic[KT, VT]): """TTLCache entry""" # expiry_time is the first attribute, so that entries are sorted by expiry. expiry_time: float ttl: float - key: Any # should be KT - value: Any # should be VT + key: KT + value: VT From e8ebc730ca76cb37017acc7d6b7ff28230ec3a97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:53:24 +0100 Subject: [PATCH 30/75] Bump serde_json from 1.0.105 to 1.0.106 (#16296) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 95a713e437a7..4e233b168396 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -352,9 +352,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" +checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" dependencies = [ "itoa", "ryu", From aafcaf277e9dc4116bafac056d0cf740460ee10f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:53:46 +0100 Subject: [PATCH 31/75] Bump black from 23.7.0 to 23.9.1 (#16295) --- poetry.lock | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/poetry.lock b/poetry.lock index b577ae4f18df..3e9482bdbccb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -148,33 +148,33 @@ lxml = ["lxml"] [[package]] name = "black" -version = "23.7.0" +version = "23.9.1" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-23.7.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:5c4bc552ab52f6c1c506ccae05681fab58c3f72d59ae6e6639e8885e94fe2587"}, - {file = "black-23.7.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:552513d5cd5694590d7ef6f46e1767a4df9af168d449ff767b13b084c020e63f"}, - {file = "black-23.7.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:86cee259349b4448adb4ef9b204bb4467aae74a386bce85d56ba4f5dc0da27be"}, - {file = "black-23.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:501387a9edcb75d7ae8a4412bb8749900386eaef258f1aefab18adddea1936bc"}, - {file = "black-23.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:fb074d8b213749fa1d077d630db0d5f8cc3b2ae63587ad4116e8a436e9bbe995"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b5b0ee6d96b345a8b420100b7d71ebfdd19fab5e8301aff48ec270042cd40ac2"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:893695a76b140881531062d48476ebe4a48f5d1e9388177e175d76234ca247cd"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:c333286dc3ddca6fdff74670b911cccedacb4ef0a60b34e491b8a67c833b343a"}, - {file = "black-23.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831d8f54c3a8c8cf55f64d0422ee875eecac26f5f649fb6c1df65316b67c8926"}, - {file = "black-23.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:7f3bf2dec7d541b4619b8ce526bda74a6b0bffc480a163fed32eb8b3c9aed8ad"}, - {file = "black-23.7.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:f9062af71c59c004cd519e2fb8f5d25d39e46d3af011b41ab43b9c74e27e236f"}, - {file = "black-23.7.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:01ede61aac8c154b55f35301fac3e730baf0c9cf8120f65a9cd61a81cfb4a0c3"}, - {file = "black-23.7.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:327a8c2550ddc573b51e2c352adb88143464bb9d92c10416feb86b0f5aee5ff6"}, - {file = "black-23.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1c6022b86f83b632d06f2b02774134def5d4d4f1dac8bef16d90cda18ba28a"}, - {file = "black-23.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:27eb7a0c71604d5de083757fbdb245b1a4fae60e9596514c6ec497eb63f95320"}, - {file = "black-23.7.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:8417dbd2f57b5701492cd46edcecc4f9208dc75529bcf76c514864e48da867d9"}, - {file = "black-23.7.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:47e56d83aad53ca140da0af87678fb38e44fd6bc0af71eebab2d1f59b1acf1d3"}, - {file = "black-23.7.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:25cc308838fe71f7065df53aedd20327969d05671bac95b38fdf37ebe70ac087"}, - {file = "black-23.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:642496b675095d423f9b8448243336f8ec71c9d4d57ec17bf795b67f08132a91"}, - {file = "black-23.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:ad0014efc7acf0bd745792bd0d8857413652979200ab924fbf239062adc12491"}, - {file = "black-23.7.0-py3-none-any.whl", hash = "sha256:9fd59d418c60c0348505f2ddf9609c1e1de8e7493eab96198fc89d9f865e7a96"}, - {file = "black-23.7.0.tar.gz", hash = "sha256:022a582720b0d9480ed82576c920a8c1dde97cc38ff11d8d8859b3bd6ca9eedb"}, + {file = "black-23.9.1-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301"}, + {file = "black-23.9.1-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100"}, + {file = "black-23.9.1-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71"}, + {file = "black-23.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7"}, + {file = "black-23.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80"}, + {file = "black-23.9.1-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f"}, + {file = "black-23.9.1-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe"}, + {file = "black-23.9.1-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186"}, + {file = "black-23.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f"}, + {file = "black-23.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300"}, + {file = "black-23.9.1-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948"}, + {file = "black-23.9.1-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855"}, + {file = "black-23.9.1-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204"}, + {file = "black-23.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377"}, + {file = "black-23.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573"}, + {file = "black-23.9.1-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c"}, + {file = "black-23.9.1-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325"}, + {file = "black-23.9.1-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393"}, + {file = "black-23.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9"}, + {file = "black-23.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f"}, + {file = "black-23.9.1-py3-none-any.whl", hash = "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9"}, + {file = "black-23.9.1.tar.gz", hash = "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d"}, ] [package.dependencies] @@ -184,7 +184,7 @@ packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} [package.extras] colorama = ["colorama (>=0.4.3)"] From ee65d8f750cf3e894939a4f5ca9e65f90caf994e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:53:56 +0100 Subject: [PATCH 32/75] Bump mypy-zope from 1.0.0 to 1.0.1 (#16291) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3e9482bdbccb..381a66af4daa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1502,17 +1502,17 @@ files = [ [[package]] name = "mypy-zope" -version = "1.0.0" +version = "1.0.1" description = "Plugin for mypy to support zope interfaces" optional = false python-versions = "*" files = [ - {file = "mypy-zope-1.0.0.tar.gz", hash = "sha256:be815c2fcb5333aa87e8ec682029ad3214142fe2a05ea383f9ff2d77c98008b7"}, - {file = "mypy_zope-1.0.0-py3-none-any.whl", hash = "sha256:9732e9b2198f2aec3343b38a51905ff49d44dc9e39e8e8bc6fc490b232388209"}, + {file = "mypy-zope-1.0.1.tar.gz", hash = "sha256:003953896629d762d7f497135171ad549df42a8ac63c1521a230832dd6f7fc25"}, + {file = "mypy_zope-1.0.1-py3-none-any.whl", hash = "sha256:ffa291a7af9f5904ce9f0e56de44323a4476e28aaf0d68361b62b1b0e997d0b8"}, ] [package.dependencies] -mypy = ">=1.0.0,<1.5.0" +mypy = ">=1.0.0,<1.6.0" "zope.interface" = "*" "zope.schema" = "*" From f93cd6abbb569dc057ded9d77b5e27971469102c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:54:28 +0100 Subject: [PATCH 33/75] Bump types-setuptools from 68.0.0.3 to 68.2.0.0 (#16292) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 381a66af4daa..3705b100af46 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3058,13 +3058,13 @@ types-urllib3 = "*" [[package]] name = "types-setuptools" -version = "68.0.0.3" +version = "68.2.0.0" description = "Typing stubs for setuptools" optional = false python-versions = "*" files = [ - {file = "types-setuptools-68.0.0.3.tar.gz", hash = "sha256:d57ae6076100b5704b3cc869fdefc671e1baf4c2cd6643f84265dfc0b955bf05"}, - {file = "types_setuptools-68.0.0.3-py3-none-any.whl", hash = "sha256:fec09e5c18264c5c09351c00be01a34456fb7a88e457abe97401325f84ad9d36"}, + {file = "types-setuptools-68.2.0.0.tar.gz", hash = "sha256:a4216f1e2ef29d089877b3af3ab2acf489eb869ccaf905125c69d2dc3932fd85"}, + {file = "types_setuptools-68.2.0.0-py3-none-any.whl", hash = "sha256:77edcc843e53f8fc83bb1a840684841f3dc804ec94562623bfa2ea70d5a2ba1b"}, ] [[package]] From b0e93b63d43f81c05de6c44f387cba6bed26cc70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:54:37 +0100 Subject: [PATCH 34/75] Bump types-pillow from 10.0.0.2 to 10.0.0.3 (#16293) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3705b100af46..e4cea282820a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2997,13 +2997,13 @@ files = [ [[package]] name = "types-pillow" -version = "10.0.0.2" +version = "10.0.0.3" description = "Typing stubs for Pillow" optional = false python-versions = "*" files = [ - {file = "types-Pillow-10.0.0.2.tar.gz", hash = "sha256:fe09380ab22d412ced989a067e9ee4af719fa3a47ba1b53b232b46514a871042"}, - {file = "types_Pillow-10.0.0.2-py3-none-any.whl", hash = "sha256:29d51a3ce6ef51fabf728a504d33b4836187ff14256b2e86996d55c91ab214b1"}, + {file = "types-Pillow-10.0.0.3.tar.gz", hash = "sha256:ae0c877d363da349bbb82c5463c9e78037290cc07d3714cb0ceaf5d2f7f5c825"}, + {file = "types_Pillow-10.0.0.3-py3-none-any.whl", hash = "sha256:54a49f3c6a3f5e95ebeee396d7773dde22ce2515d594f9c0596c0a983558f0d4"}, ] [[package]] From 151e4bbc45dbf7b767b1a6a74ffb4cd7889ccf78 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 Sep 2023 13:11:02 +0100 Subject: [PATCH 35/75] Filter out down hosts when retrying fetching device lists (#16298) --- changelog.d/16298.misc | 1 + synapse/handlers/device.py | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16298.misc diff --git a/changelog.d/16298.misc b/changelog.d/16298.misc new file mode 100644 index 000000000000..75b546d42414 --- /dev/null +++ b/changelog.d/16298.misc @@ -0,0 +1 @@ +Don't try refetching device lists for users on remote hosts that are marked as "down". diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 9356ae998e5e..9d240ad4ee30 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -58,7 +58,10 @@ from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.cancellation import cancellable from synapse.util.metrics import measure_func -from synapse.util.retryutils import NotRetryingDestination +from synapse.util.retryutils import ( + NotRetryingDestination, + filter_destinations_by_retry_limiter, +) if TYPE_CHECKING: from synapse.server import HomeServer @@ -1269,8 +1272,18 @@ async def _maybe_retry_device_resync(self) -> None: self._resync_retry_in_progress = True # Get all of the users that need resyncing. need_resync = await self.store.get_user_ids_requiring_device_list_resync() + + # Filter out users whose host is marked as "down" up front. + hosts = await filter_destinations_by_retry_limiter( + {get_domain_from_id(u) for u in need_resync}, self.clock, self.store + ) + hosts = set(hosts) + # Iterate over the set of user IDs. for user_id in need_resync: + if get_domain_from_id(user_id) not in hosts: + continue + try: # Try to resync the current user's devices list. result = (await self.multi_user_device_resync([user_id], False))[ From 9400dc05357b4272425c7be47ceeced26fa3f28c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 11 Sep 2023 09:49:48 -0400 Subject: [PATCH 36/75] Add the List-Unsubscribe header for notification emails. (#16274) Adds both the List-Unsubscribe (RFC2369) and List-Unsubscribe-Post (RFC8058) headers to push notification emails, which together should: * Show an "Unsubscribe" link in the MUA UI when viewing Synapse notification emails. * Enable "one-click" unsubscribe (the user never leaves their MUA, which automatically makes a POST request to the specified endpoint). --- changelog.d/16274.feature | 1 + synapse/handlers/send_email.py | 10 +++- synapse/push/mailer.py | 33 +++++++++++-- synapse/rest/synapse/client/unsubscribe.py | 17 +++++++ tests/push/test_email.py | 55 ++++++++++++++++++++++ 5 files changed, 110 insertions(+), 6 deletions(-) create mode 100644 changelog.d/16274.feature diff --git a/changelog.d/16274.feature b/changelog.d/16274.feature new file mode 100644 index 000000000000..0d9da2bbef75 --- /dev/null +++ b/changelog.d/16274.feature @@ -0,0 +1 @@ +Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index 05e21509deac..4f5fe62fe802 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -17,7 +17,7 @@ from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from io import BytesIO -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any, Dict, Optional from pkg_resources import parse_version @@ -151,6 +151,7 @@ async def send_email( app_name: str, html: str, text: str, + additional_headers: Optional[Dict[str, str]] = None, ) -> None: """Send a multipart email with the given information. @@ -160,6 +161,7 @@ async def send_email( app_name: The app name to include in the From header. html: The HTML content to include in the email. text: The plain text content to include in the email. + additional_headers: A map of additional headers to include. """ try: from_string = self._from % {"app": app_name} @@ -181,6 +183,7 @@ async def send_email( multipart_msg["To"] = email_address multipart_msg["Date"] = email.utils.formatdate() multipart_msg["Message-ID"] = email.utils.make_msgid() + # Discourage automatic responses to Synapse's emails. # Per RFC 3834, automatic responses should not be sent if the "Auto-Submitted" # header is present with any value other than "no". See @@ -194,6 +197,11 @@ async def send_email( # https://stackoverflow.com/a/25324691/5252017 # https://stackoverflow.com/a/61646381/5252017 multipart_msg["X-Auto-Response-Suppress"] = "All" + + if additional_headers: + for header, value in additional_headers.items(): + multipart_msg[header] = value + multipart_msg.attach(text_part) multipart_msg.attach(html_part) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 79e0627b6a66..b6cad18c2dfd 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -298,20 +298,26 @@ async def _fetch_room_state(room_id: str) -> None: notifs_by_room, state_by_room, notif_events, reason ) + unsubscribe_link = self._make_unsubscribe_link(user_id, app_id, email_address) + template_vars: TemplateVars = { "user_display_name": user_display_name, - "unsubscribe_link": self._make_unsubscribe_link( - user_id, app_id, email_address - ), + "unsubscribe_link": unsubscribe_link, "summary_text": summary_text, "rooms": rooms, "reason": reason, } - await self.send_email(email_address, summary_text, template_vars) + await self.send_email( + email_address, summary_text, template_vars, unsubscribe_link + ) async def send_email( - self, email_address: str, subject: str, extra_template_vars: TemplateVars + self, + email_address: str, + subject: str, + extra_template_vars: TemplateVars, + unsubscribe_link: Optional[str] = None, ) -> None: """Send an email with the given information and template text""" template_vars: TemplateVars = { @@ -330,6 +336,23 @@ async def send_email( app_name=self.app_name, html=html_text, text=plain_text, + # Include the List-Unsubscribe header which some clients render in the UI. + # Per RFC 2369, this can be a URL or mailto URL. See + # https://www.rfc-editor.org/rfc/rfc2369.html#section-3.2 + # + # It is preferred to use email, but Synapse doesn't support incoming email. + # + # Also include the List-Unsubscribe-Post header from RFC 8058. See + # https://www.rfc-editor.org/rfc/rfc8058.html#section-3.1 + # + # Note that many email clients will not render the unsubscribe link + # unless DKIM, etc. is properly setup. + additional_headers={ + "List-Unsubscribe-Post": "List-Unsubscribe=One-Click", + "List-Unsubscribe": f"<{unsubscribe_link}>", + } + if unsubscribe_link + else None, ) async def _get_room_vars( diff --git a/synapse/rest/synapse/client/unsubscribe.py b/synapse/rest/synapse/client/unsubscribe.py index 60321018f94d..050fd7bba1d6 100644 --- a/synapse/rest/synapse/client/unsubscribe.py +++ b/synapse/rest/synapse/client/unsubscribe.py @@ -38,6 +38,10 @@ def __init__(self, hs: "HomeServer"): self.macaroon_generator = hs.get_macaroon_generator() async def _async_render_GET(self, request: SynapseRequest) -> None: + """ + Handle a user opening an unsubscribe link in the browser, either via an + HTML/Text email or via the List-Unsubscribe header. + """ token = parse_string(request, "access_token", required=True) app_id = parse_string(request, "app_id", required=True) pushkey = parse_string(request, "pushkey", required=True) @@ -62,3 +66,16 @@ async def _async_render_GET(self, request: SynapseRequest) -> None: 200, UnsubscribeResource.SUCCESS_HTML, ) + + async def _async_render_POST(self, request: SynapseRequest) -> None: + """ + Handle a mail user agent POSTing to the unsubscribe URL via the + List-Unsubscribe & List-Unsubscribe-Post headers. + """ + + # TODO Assert that the body has a single field + + # Assert the body has form encoded key/value pair of + # List-Unsubscribe=One-Click. + + await self._async_render_GET(request) diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 4b5c96aeaea8..73a430ddc66a 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -13,10 +13,12 @@ # limitations under the License. import email.message import os +from http import HTTPStatus from typing import Any, Dict, List, Sequence, Tuple import attr import pkg_resources +from parameterized import parameterized from twisted.internet.defer import Deferred from twisted.test.proto_helpers import MemoryReactor @@ -25,9 +27,11 @@ from synapse.api.errors import Codes, SynapseError from synapse.push.emailpusher import EmailPusher from synapse.rest.client import login, room +from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource from synapse.server import HomeServer from synapse.util import Clock +from tests.server import FakeSite, make_request from tests.unittest import HomeserverTestCase @@ -175,6 +179,57 @@ def test_simple_sends_email(self) -> None: self._check_for_mail() + @parameterized.expand([(False,), (True,)]) + def test_unsubscribe(self, use_post: bool) -> None: + # Create a simple room with two users + room = self.helper.create_room_as(self.user_id, tok=self.access_token) + self.helper.invite( + room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id + ) + self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) + + # The other user sends a single message. + self.helper.send(room, body="Hi!", tok=self.others[0].token) + + # We should get emailed about that message + args, kwargs = self._check_for_mail() + + # That email should contain an unsubscribe link in the body and header. + msg: bytes = args[5] + + # Multipart: plain text, base 64 encoded; html, base 64 encoded + multipart_msg = email.message_from_bytes(msg) + txt = multipart_msg.get_payload()[0].get_payload(decode=True).decode() + html = multipart_msg.get_payload()[1].get_payload(decode=True).decode() + self.assertIn("/_synapse/client/unsubscribe", txt) + self.assertIn("/_synapse/client/unsubscribe", html) + + # The unsubscribe headers should exist. + assert multipart_msg.get("List-Unsubscribe") is not None + self.assertIsNotNone(multipart_msg.get("List-Unsubscribe-Post")) + + # Open the unsubscribe link. + unsubscribe_link = multipart_msg["List-Unsubscribe"].strip("<>") + unsubscribe_resource = UnsubscribeResource(self.hs) + channel = make_request( + self.reactor, + FakeSite(unsubscribe_resource, self.reactor), + "POST" if use_post else "GET", + unsubscribe_link, + shorthand=False, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + # Ensure the pusher was removed. + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) + ) + self.assertEqual(pushers, []) + def test_invite_sends_email(self) -> None: # Create a room and invite the user to it room = self.helper.create_room_as(self.others[0].id, tok=self.others[0].token) From 2b35626b6b7aed52a626734a5a85fe77c847251d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 Sep 2023 11:08:04 +0100 Subject: [PATCH 37/75] Refactor storing of server keys (#16261) --- changelog.d/16261.misc | 1 + synapse/crypto/keyring.py | 35 +--- synapse/storage/databases/main/keys.py | 219 ++++++++----------------- tests/crypto/test_keyring.py | 53 ++---- tests/storage/test_keys.py | 137 ---------------- tests/unittest.py | 26 +-- 6 files changed, 106 insertions(+), 365 deletions(-) create mode 100644 changelog.d/16261.misc delete mode 100644 tests/storage/test_keys.py diff --git a/changelog.d/16261.misc b/changelog.d/16261.misc new file mode 100644 index 000000000000..d3ad59ca4a47 --- /dev/null +++ b/changelog.d/16261.misc @@ -0,0 +1 @@ +Simplify server key storage. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 260aab32417a..fe86f54d800e 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -23,12 +23,7 @@ get_verify_key, is_signing_algorithm_supported, ) -from signedjson.sign import ( - SignatureVerifyException, - encode_canonical_json, - signature_ids, - verify_signed_json, -) +from signedjson.sign import SignatureVerifyException, signature_ids, verify_signed_json from signedjson.types import VerifyKey from unpaddedbase64 import decode_base64 @@ -596,24 +591,12 @@ async def process_v2_response( verify_key=verify_key, valid_until_ts=key_data["expired_ts"] ) - key_json_bytes = encode_canonical_json(response_json) - - await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.store.store_server_keys_json, - server_name=server_name, - key_id=key_id, - from_server=from_server, - ts_now_ms=time_added_ms, - ts_expires_ms=ts_valid_until_ms, - key_json_bytes=key_json_bytes, - ) - for key_id in verify_keys - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) + await self.store.store_server_keys_response( + server_name=server_name, + from_server=from_server, + ts_added_ms=time_added_ms, + verify_keys=verify_keys, + response_json=response_json, ) return verify_keys @@ -775,10 +758,6 @@ async def get_server_verify_key_v2_indirect( keys.setdefault(server_name, {}).update(processed_response) - await self.store.store_server_signature_keys( - perspective_name, time_now_ms, added_keys - ) - return keys def _validate_perspectives_response( diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 57aa4921e150..41563371dcd2 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -16,14 +16,17 @@ import itertools import json import logging -from typing import Dict, Iterable, Mapping, Optional, Tuple +from typing import Dict, Iterable, Optional, Tuple +from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes from unpaddedbase64 import decode_base64 +from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.keys import FetchKeyResult, FetchKeyResultForRemote from synapse.storage.types import Cursor +from synapse.types import JsonDict from synapse.util.caches.descriptors import cached, cachedList from synapse.util.iterutils import batch_iter @@ -36,162 +39,84 @@ class KeyStore(CacheInvalidationWorkerStore): """Persistence for signature verification keys""" - @cached() - def _get_server_signature_key( - self, server_name_and_key_id: Tuple[str, str] - ) -> FetchKeyResult: - raise NotImplementedError() - - @cachedList( - cached_method_name="_get_server_signature_key", - list_name="server_name_and_key_ids", - ) - async def get_server_signature_keys( - self, server_name_and_key_ids: Iterable[Tuple[str, str]] - ) -> Dict[Tuple[str, str], FetchKeyResult]: - """ - Args: - server_name_and_key_ids: - iterable of (server_name, key-id) tuples to fetch keys for - - Returns: - A map from (server_name, key_id) -> FetchKeyResult, or None if the - key is unknown - """ - keys = {} - - def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str], ...]) -> None: - """Processes a batch of keys to fetch, and adds the result to `keys`.""" - - # batch_iter always returns tuples so it's safe to do len(batch) - sql = """ - SELECT server_name, key_id, verify_key, ts_valid_until_ms - FROM server_signature_keys WHERE 1=0 - """ + " OR (server_name=? AND key_id=?)" * len( - batch - ) - - txn.execute(sql, tuple(itertools.chain.from_iterable(batch))) - - for row in txn: - server_name, key_id, key_bytes, ts_valid_until_ms = row - - if ts_valid_until_ms is None: - # Old keys may be stored with a ts_valid_until_ms of null, - # in which case we treat this as if it was set to `0`, i.e. - # it won't match key requests that define a minimum - # `ts_valid_until_ms`. - ts_valid_until_ms = 0 - - keys[(server_name, key_id)] = FetchKeyResult( - verify_key=decode_verify_key_bytes(key_id, bytes(key_bytes)), - valid_until_ts=ts_valid_until_ms, - ) - - def _txn(txn: Cursor) -> Dict[Tuple[str, str], FetchKeyResult]: - for batch in batch_iter(server_name_and_key_ids, 50): - _get_keys(txn, batch) - return keys - - return await self.db_pool.runInteraction("get_server_signature_keys", _txn) - - async def store_server_signature_keys( + async def store_server_keys_response( self, + server_name: str, from_server: str, ts_added_ms: int, - verify_keys: Mapping[Tuple[str, str], FetchKeyResult], + verify_keys: Dict[str, FetchKeyResult], + response_json: JsonDict, ) -> None: - """Stores NACL verification keys for remote servers. + """Stores the keys for the given server that we got from `from_server`. + Args: - from_server: Where the verification keys were looked up - ts_added_ms: The time to record that the key was added - verify_keys: - keys to be stored. Each entry is a triplet of - (server_name, key_id, key). + server_name: The owner of the keys + from_server: Which server we got the keys from + ts_added_ms: When we're adding the keys + verify_keys: The decoded keys + response_json: The full *signed* response JSON that contains the keys. """ - key_values = [] - value_values = [] - invalidations = [] - for (server_name, key_id), fetch_result in verify_keys.items(): - key_values.append((server_name, key_id)) - value_values.append( - ( - from_server, - ts_added_ms, - fetch_result.valid_until_ts, - db_binary_type(fetch_result.verify_key.encode()), - ) - ) - # invalidate takes a tuple corresponding to the params of - # _get_server_signature_key. _get_server_signature_key only takes one - # param, which is itself the 2-tuple (server_name, key_id). - invalidations.append((server_name, key_id)) - await self.db_pool.simple_upsert_many( - table="server_signature_keys", - key_names=("server_name", "key_id"), - key_values=key_values, - value_names=( - "from_server", - "ts_added_ms", - "ts_valid_until_ms", - "verify_key", - ), - value_values=value_values, - desc="store_server_signature_keys", - ) + key_json_bytes = encode_canonical_json(response_json) + + def store_server_keys_response_txn(txn: LoggingTransaction) -> None: + self.db_pool.simple_upsert_many_txn( + txn, + table="server_signature_keys", + key_names=("server_name", "key_id"), + key_values=[(server_name, key_id) for key_id in verify_keys], + value_names=( + "from_server", + "ts_added_ms", + "ts_valid_until_ms", + "verify_key", + ), + value_values=[ + ( + from_server, + ts_added_ms, + fetch_result.valid_until_ts, + db_binary_type(fetch_result.verify_key.encode()), + ) + for fetch_result in verify_keys.values() + ], + ) - invalidate = self._get_server_signature_key.invalidate - for i in invalidations: - invalidate((i,)) + self.db_pool.simple_upsert_many_txn( + txn, + table="server_keys_json", + key_names=("server_name", "key_id", "from_server"), + key_values=[ + (server_name, key_id, from_server) for key_id in verify_keys + ], + value_names=( + "ts_added_ms", + "ts_valid_until_ms", + "key_json", + ), + value_values=[ + ( + ts_added_ms, + fetch_result.valid_until_ts, + db_binary_type(key_json_bytes), + ) + for fetch_result in verify_keys.values() + ], + ) - async def store_server_keys_json( - self, - server_name: str, - key_id: str, - from_server: str, - ts_now_ms: int, - ts_expires_ms: int, - key_json_bytes: bytes, - ) -> None: - """Stores the JSON bytes for a set of keys from a server - The JSON should be signed by the originating server, the intermediate - server, and by this server. Updates the value for the - (server_name, key_id, from_server) triplet if one already existed. - Args: - server_name: The name of the server. - key_id: The identifier of the key this JSON is for. - from_server: The server this JSON was fetched from. - ts_now_ms: The time now in milliseconds. - ts_valid_until_ms: The time when this json stops being valid. - key_json_bytes: The encoded JSON. - """ - await self.db_pool.simple_upsert( - table="server_keys_json", - keyvalues={ - "server_name": server_name, - "key_id": key_id, - "from_server": from_server, - }, - values={ - "server_name": server_name, - "key_id": key_id, - "from_server": from_server, - "ts_added_ms": ts_now_ms, - "ts_valid_until_ms": ts_expires_ms, - "key_json": db_binary_type(key_json_bytes), - }, - desc="store_server_keys_json", - ) + # invalidate takes a tuple corresponding to the params of + # _get_server_keys_json. _get_server_keys_json only takes one + # param, which is itself the 2-tuple (server_name, key_id). + for key_id in verify_keys: + self._invalidate_cache_and_stream( + txn, self._get_server_keys_json, ((server_name, key_id),) + ) + self._invalidate_cache_and_stream( + txn, self.get_server_key_json_for_remote, (server_name, key_id) + ) - # invalidate takes a tuple corresponding to the params of - # _get_server_keys_json. _get_server_keys_json only takes one - # param, which is itself the 2-tuple (server_name, key_id). - await self.invalidate_cache_and_stream( - "_get_server_keys_json", ((server_name, key_id),) - ) - await self.invalidate_cache_and_stream( - "get_server_key_json_for_remote", (server_name, key_id) + await self.db_pool.runInteraction( + "store_server_keys_response", store_server_keys_response_txn ) @cached() diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index f93ba5d4cf0c..c5700771b09c 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -13,7 +13,7 @@ # limitations under the License. import time from typing import Any, Dict, List, Optional, cast -from unittest.mock import AsyncMock, Mock +from unittest.mock import Mock import attr import canonicaljson @@ -189,23 +189,24 @@ def test_verify_json_for_server(self) -> None: kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key("1") - r = self.hs.get_datastores().main.store_server_keys_json( + r = self.hs.get_datastores().main.store_server_keys_response( "server9", - get_key_id(key1), from_server="test", - ts_now_ms=int(time.time() * 1000), - ts_expires_ms=1000, + ts_added_ms=int(time.time() * 1000), + verify_keys={ + get_key_id(key1): FetchKeyResult( + verify_key=get_verify_key(key1), valid_until_ts=1000 + ) + }, # The entire response gets signed & stored, just include the bits we # care about. - key_json_bytes=canonicaljson.encode_canonical_json( - { - "verify_keys": { - get_key_id(key1): { - "key": encode_verify_key_base64(get_verify_key(key1)) - } + response_json={ + "verify_keys": { + get_key_id(key1): { + "key": encode_verify_key_base64(get_verify_key(key1)) } } - ), + }, ) self.get_success(r) @@ -285,34 +286,6 @@ async def get_keys( d = kr.verify_json_for_server(self.hs.hostname, json1, 0) self.get_success(d) - def test_verify_json_for_server_with_null_valid_until_ms(self) -> None: - """Tests that we correctly handle key requests for keys we've stored - with a null `ts_valid_until_ms` - """ - mock_fetcher = Mock() - mock_fetcher.get_keys = AsyncMock(return_value={}) - - key1 = signedjson.key.generate_signing_key("1") - r = self.hs.get_datastores().main.store_server_signature_keys( - "server9", - int(time.time() * 1000), - # None is not a valid value in FetchKeyResult, but we're abusing this - # API to insert null values into the database. The nulls get converted - # to 0 when fetched in KeyStore.get_server_signature_keys. - {("server9", get_key_id(key1)): FetchKeyResult(get_verify_key(key1), None)}, # type: ignore[arg-type] - ) - self.get_success(r) - - json1: JsonDict = {} - signedjson.sign.sign_json(json1, "server9", key1) - - # should succeed on a signed object with a 0 minimum_valid_until_ms - d = self.hs.get_datastores().main.get_server_signature_keys( - [("server9", get_key_id(key1))] - ) - result = self.get_success(d) - self.assertEqual(result[("server9", get_key_id(key1))].valid_until_ts, 0) - def test_verify_json_dedupes_key_requests(self) -> None: """Two requests for the same key should be deduped.""" key1 = signedjson.key.generate_signing_key("1") diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py deleted file mode 100644 index 5d7c13e6d04c..000000000000 --- a/tests/storage/test_keys.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2017 Vector Creations Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import signedjson.key -import signedjson.types -import unpaddedbase64 - -from synapse.storage.keys import FetchKeyResult - -import tests.unittest - - -def decode_verify_key_base64( - key_id: str, key_base64: str -) -> signedjson.types.VerifyKey: - key_bytes = unpaddedbase64.decode_base64(key_base64) - return signedjson.key.decode_verify_key_bytes(key_id, key_bytes) - - -KEY_1 = decode_verify_key_base64( - "ed25519:key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw" -) -KEY_2 = decode_verify_key_base64( - "ed25519:key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" -) - - -class KeyStoreTestCase(tests.unittest.HomeserverTestCase): - def test_get_server_signature_keys(self) -> None: - store = self.hs.get_datastores().main - - key_id_1 = "ed25519:key1" - key_id_2 = "ed25519:KEY_ID_2" - self.get_success( - store.store_server_signature_keys( - "from_server", - 10, - { - ("server1", key_id_1): FetchKeyResult(KEY_1, 100), - ("server1", key_id_2): FetchKeyResult(KEY_2, 200), - }, - ) - ) - - res = self.get_success( - store.get_server_signature_keys( - [ - ("server1", key_id_1), - ("server1", key_id_2), - ("server1", "ed25519:key3"), - ] - ) - ) - - self.assertEqual(len(res.keys()), 3) - res1 = res[("server1", key_id_1)] - self.assertEqual(res1.verify_key, KEY_1) - self.assertEqual(res1.verify_key.version, "key1") - self.assertEqual(res1.valid_until_ts, 100) - - res2 = res[("server1", key_id_2)] - self.assertEqual(res2.verify_key, KEY_2) - # version comes from the ID it was stored with - self.assertEqual(res2.verify_key.version, "KEY_ID_2") - self.assertEqual(res2.valid_until_ts, 200) - - # non-existent result gives None - self.assertIsNone(res[("server1", "ed25519:key3")]) - - def test_cache(self) -> None: - """Check that updates correctly invalidate the cache.""" - - store = self.hs.get_datastores().main - - key_id_1 = "ed25519:key1" - key_id_2 = "ed25519:key2" - - self.get_success( - store.store_server_signature_keys( - "from_server", - 0, - { - ("srv1", key_id_1): FetchKeyResult(KEY_1, 100), - ("srv1", key_id_2): FetchKeyResult(KEY_2, 200), - }, - ) - ) - - res = self.get_success( - store.get_server_signature_keys([("srv1", key_id_1), ("srv1", key_id_2)]) - ) - self.assertEqual(len(res.keys()), 2) - - res1 = res[("srv1", key_id_1)] - self.assertEqual(res1.verify_key, KEY_1) - self.assertEqual(res1.valid_until_ts, 100) - - res2 = res[("srv1", key_id_2)] - self.assertEqual(res2.verify_key, KEY_2) - self.assertEqual(res2.valid_until_ts, 200) - - # we should be able to look up the same thing again without a db hit - res = self.get_success(store.get_server_signature_keys([("srv1", key_id_1)])) - self.assertEqual(len(res.keys()), 1) - self.assertEqual(res[("srv1", key_id_1)].verify_key, KEY_1) - - new_key_2 = signedjson.key.get_verify_key( - signedjson.key.generate_signing_key("key2") - ) - d = store.store_server_signature_keys( - "from_server", 10, {("srv1", key_id_2): FetchKeyResult(new_key_2, 300)} - ) - self.get_success(d) - - res = self.get_success( - store.get_server_signature_keys([("srv1", key_id_1), ("srv1", key_id_2)]) - ) - self.assertEqual(len(res.keys()), 2) - - res1 = res[("srv1", key_id_1)] - self.assertEqual(res1.verify_key, KEY_1) - self.assertEqual(res1.valid_until_ts, 100) - - res2 = res[("srv1", key_id_2)] - self.assertEqual(res2.verify_key, new_key_2) - self.assertEqual(res2.valid_until_ts, 300) diff --git a/tests/unittest.py b/tests/unittest.py index 5d3640d8ac24..dbaff361b4b7 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -70,6 +70,7 @@ ) from synapse.rest import RegisterServletsFunc from synapse.server import HomeServer +from synapse.storage.keys import FetchKeyResult from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util import Clock from synapse.util.httpresourcetree import create_resource_tree @@ -858,23 +859,22 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version) self.get_success( - hs.get_datastores().main.store_server_keys_json( + hs.get_datastores().main.store_server_keys_response( self.OTHER_SERVER_NAME, - verify_key_id, from_server=self.OTHER_SERVER_NAME, - ts_now_ms=clock.time_msec(), - ts_expires_ms=clock.time_msec() + 10000, - key_json_bytes=canonicaljson.encode_canonical_json( - { - "verify_keys": { - verify_key_id: { - "key": signedjson.key.encode_verify_key_base64( - verify_key - ) - } + ts_added_ms=clock.time_msec(), + verify_keys={ + verify_key_id: FetchKeyResult( + verify_key=verify_key, valid_until_ts=clock.time_msec() + 10000 + ), + }, + response_json={ + "verify_keys": { + verify_key_id: { + "key": signedjson.key.encode_verify_key_base64(verify_key) } } - ), + }, ) ) From 16ef6f1e3c8d0cfe959e4209fd04528658383ab4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 12 Sep 2023 07:12:31 -0400 Subject: [PATCH 38/75] Stop purging tables which are slated for removal. (#16273) --- changelog.d/16273.misc | 1 + synapse/storage/databases/main/purge_events.py | 4 ---- synapse/storage/schema/__init__.py | 6 +++++- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/16273.misc diff --git a/changelog.d/16273.misc b/changelog.d/16273.misc new file mode 100644 index 000000000000..19882f6754c6 --- /dev/null +++ b/changelog.d/16273.misc @@ -0,0 +1 @@ +Stop purging from tables slated for removal. diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index b52f48cf0444..dea0e0458c04 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -450,10 +450,6 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: "e2e_room_keys", "event_push_summary", "pusher_throttle", - "insertion_events", - "insertion_event_extremities", - "insertion_event_edges", - "batch_events", "room_account_data", "room_tags", # "rooms" happens last, to keep the foreign keys in the other tables diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 422f11f59e9e..5b50bd66bcb3 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 81 # remember to update the list below when updating +SCHEMA_VERSION = 82 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -117,6 +117,10 @@ Changes in SCHEMA_VERSION = 81 - The event_txn_id is no longer written to for new events. + +Changes in SCHEMA_VERSION = 82 + - The insertion_events, insertion_event_extremities, insertion_event_edges, and + batch_events tables are no longer purged in preparation for their removal. """ From ba48c563c98966400488c8972d2e9964f9510399 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 12 Sep 2023 07:16:09 -0400 Subject: [PATCH 39/75] Bump mypy from 1.4.1 to 1.5.1. (#16300) --- changelog.d/16300.misc | 1 + mypy.ini | 1 - poetry.lock | 68 +++++++++++++++------------------- synapse/logging/opentracing.py | 10 +---- 4 files changed, 32 insertions(+), 48 deletions(-) create mode 100644 changelog.d/16300.misc diff --git a/changelog.d/16300.misc b/changelog.d/16300.misc new file mode 100644 index 000000000000..8cc2e523699b --- /dev/null +++ b/changelog.d/16300.misc @@ -0,0 +1 @@ +Bump mypy from 1.4.1 to 1.5.1. diff --git a/mypy.ini b/mypy.ini index fb5f44c939d8..88aea301b9d1 100644 --- a/mypy.ini +++ b/mypy.ini @@ -23,7 +23,6 @@ warn_unused_ignores = True # warn_return_any = True # no_implicit_reexport = True strict_equality = True -strict_concatenate = True # Run mypy type checking with the minimum supported Python version to catch new usage # that isn't backwards-compatible (types, overloads, etc). diff --git a/poetry.lock b/poetry.lock index e4cea282820a..c01312579e42 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1445,37 +1445,38 @@ files = [ [[package]] name = "mypy" -version = "1.4.1" +version = "1.5.1" description = "Optional static typing for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "mypy-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:566e72b0cd6598503e48ea610e0052d1b8168e60a46e0bfd34b3acf2d57f96a8"}, - {file = "mypy-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878"}, - {file = "mypy-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dde1d180cd84f0624c5dcaaa89c89775550a675aff96b5848de78fb11adabcd"}, - {file = "mypy-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c4d8e89aa7de683e2056a581ce63c46a0c41e31bd2b6d34144e2c80f5ea53dc"}, - {file = "mypy-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:bfdca17c36ae01a21274a3c387a63aa1aafe72bff976522886869ef131b937f1"}, - {file = "mypy-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7549fbf655e5825d787bbc9ecf6028731973f78088fbca3a1f4145c39ef09462"}, - {file = "mypy-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:98324ec3ecf12296e6422939e54763faedbfcc502ea4a4c38502082711867258"}, - {file = "mypy-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:141dedfdbfe8a04142881ff30ce6e6653c9685b354876b12e4fe6c78598b45e2"}, - {file = "mypy-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8207b7105829eca6f3d774f64a904190bb2231de91b8b186d21ffd98005f14a7"}, - {file = "mypy-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:16f0db5b641ba159eff72cff08edc3875f2b62b2fa2bc24f68c1e7a4e8232d01"}, - {file = "mypy-1.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:470c969bb3f9a9efcedbadcd19a74ffb34a25f8e6b0e02dae7c0e71f8372f97b"}, - {file = "mypy-1.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5952d2d18b79f7dc25e62e014fe5a23eb1a3d2bc66318df8988a01b1a037c5b"}, - {file = "mypy-1.4.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:190b6bab0302cec4e9e6767d3eb66085aef2a1cc98fe04936d8a42ed2ba77bb7"}, - {file = "mypy-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9d40652cc4fe33871ad3338581dca3297ff5f2213d0df345bcfbde5162abf0c9"}, - {file = "mypy-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01fd2e9f85622d981fd9063bfaef1aed6e336eaacca00892cd2d82801ab7c042"}, - {file = "mypy-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2460a58faeea905aeb1b9b36f5065f2dc9a9c6e4c992a6499a2360c6c74ceca3"}, - {file = "mypy-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2746d69a8196698146a3dbe29104f9eb6a2a4d8a27878d92169a6c0b74435b6"}, - {file = "mypy-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae704dcfaa180ff7c4cfbad23e74321a2b774f92ca77fd94ce1049175a21c97f"}, - {file = "mypy-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:43d24f6437925ce50139a310a64b2ab048cb2d3694c84c71c3f2a1626d8101dc"}, - {file = "mypy-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c482e1246726616088532b5e964e39765b6d1520791348e6c9dc3af25b233828"}, - {file = "mypy-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:43b592511672017f5b1a483527fd2684347fdffc041c9ef53428c8dc530f79a3"}, - {file = "mypy-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34a9239d5b3502c17f07fd7c0b2ae6b7dd7d7f6af35fbb5072c6208e76295816"}, - {file = "mypy-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5703097c4936bbb9e9bce41478c8d08edd2865e177dc4c52be759f81ee4dd26c"}, - {file = "mypy-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e02d700ec8d9b1859790c0475df4e4092c7bf3272a4fd2c9f33d87fac4427b8f"}, - {file = "mypy-1.4.1-py3-none-any.whl", hash = "sha256:45d32cec14e7b97af848bddd97d85ea4f0db4d5a149ed9676caa4eb2f7402bb4"}, - {file = "mypy-1.4.1.tar.gz", hash = "sha256:9bbcd9ab8ea1f2e1c8031c21445b511442cc45c89951e49bbf852cbb70755b1b"}, + {file = "mypy-1.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f33592ddf9655a4894aef22d134de7393e95fcbdc2d15c1ab65828eee5c66c70"}, + {file = "mypy-1.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:258b22210a4a258ccd077426c7a181d789d1121aca6db73a83f79372f5569ae0"}, + {file = "mypy-1.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9ec1f695f0c25986e6f7f8778e5ce61659063268836a38c951200c57479cc12"}, + {file = "mypy-1.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:abed92d9c8f08643c7d831300b739562b0a6c9fcb028d211134fc9ab20ccad5d"}, + {file = "mypy-1.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:a156e6390944c265eb56afa67c74c0636f10283429171018446b732f1a05af25"}, + {file = "mypy-1.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6ac9c21bfe7bc9f7f1b6fae441746e6a106e48fc9de530dea29e8cd37a2c0cc4"}, + {file = "mypy-1.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:51cb1323064b1099e177098cb939eab2da42fea5d818d40113957ec954fc85f4"}, + {file = "mypy-1.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:596fae69f2bfcb7305808c75c00f81fe2829b6236eadda536f00610ac5ec2243"}, + {file = "mypy-1.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:32cb59609b0534f0bd67faebb6e022fe534bdb0e2ecab4290d683d248be1b275"}, + {file = "mypy-1.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:159aa9acb16086b79bbb0016145034a1a05360626046a929f84579ce1666b315"}, + {file = "mypy-1.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f6b0e77db9ff4fda74de7df13f30016a0a663928d669c9f2c057048ba44f09bb"}, + {file = "mypy-1.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:26f71b535dfc158a71264e6dc805a9f8d2e60b67215ca0bfa26e2e1aa4d4d373"}, + {file = "mypy-1.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fc3a600f749b1008cc75e02b6fb3d4db8dbcca2d733030fe7a3b3502902f161"}, + {file = "mypy-1.5.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:26fb32e4d4afa205b24bf645eddfbb36a1e17e995c5c99d6d00edb24b693406a"}, + {file = "mypy-1.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:82cb6193de9bbb3844bab4c7cf80e6227d5225cc7625b068a06d005d861ad5f1"}, + {file = "mypy-1.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4a465ea2ca12804d5b34bb056be3a29dc47aea5973b892d0417c6a10a40b2d65"}, + {file = "mypy-1.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9fece120dbb041771a63eb95e4896791386fe287fefb2837258925b8326d6160"}, + {file = "mypy-1.5.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d28ddc3e3dfeab553e743e532fb95b4e6afad51d4706dd22f28e1e5e664828d2"}, + {file = "mypy-1.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:57b10c56016adce71fba6bc6e9fd45d8083f74361f629390c556738565af8eeb"}, + {file = "mypy-1.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:ff0cedc84184115202475bbb46dd99f8dcb87fe24d5d0ddfc0fe6b8575c88d2f"}, + {file = "mypy-1.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8f772942d372c8cbac575be99f9cc9d9fb3bd95c8bc2de6c01411e2c84ebca8a"}, + {file = "mypy-1.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5d627124700b92b6bbaa99f27cbe615c8ea7b3402960f6372ea7d65faf376c14"}, + {file = "mypy-1.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:361da43c4f5a96173220eb53340ace68cda81845cd88218f8862dfb0adc8cddb"}, + {file = "mypy-1.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:330857f9507c24de5c5724235e66858f8364a0693894342485e543f5b07c8693"}, + {file = "mypy-1.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:c543214ffdd422623e9fedd0869166c2f16affe4ba37463975043ef7d2ea8770"}, + {file = "mypy-1.5.1-py3-none-any.whl", hash = "sha256:f757063a83970d67c444f6e01d9550a7402322af3557ce7630d3c957386fa8f5"}, + {file = "mypy-1.5.1.tar.gz", hash = "sha256:b031b9601f1060bf1281feab89697324726ba0c0bae9d7cd7ab4b690940f0b92"}, ] [package.dependencies] @@ -1486,7 +1487,6 @@ typing-extensions = ">=4.1.0" [package.extras] dmypy = ["psutil (>=4.0)"] install-types = ["pip"] -python2 = ["typed-ast (>=1.4.0,<2)"] reports = ["lxml"] [[package]] @@ -2077,7 +2077,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2085,15 +2084,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2110,7 +2102,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2118,7 +2109,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 5c3045e197e9..4454fe29a5db 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -991,11 +991,7 @@ def _decorator(func: Callable[P, R]) -> Callable[P, R]: if not opentracing: return func - # type-ignore: mypy seems to be confused by the ParamSpecs here. - # I think the problem is https://github.com/python/mypy/issues/12909 - return _custom_sync_async_decorator( - func, _wrapping_logic # type: ignore[arg-type] - ) + return _custom_sync_async_decorator(func, _wrapping_logic) return _decorator @@ -1040,9 +1036,7 @@ def _wrapping_logic( set_tag(SynapseTags.FUNC_KWARGS, str(kwargs)) yield - # type-ignore: mypy seems to be confused by the ParamSpecs here. - # I think the problem is https://github.com/python/mypy/issues/12909 - return _custom_sync_async_decorator(func, _wrapping_logic) # type: ignore[arg-type] + return _custom_sync_async_decorator(func, _wrapping_logic) @contextlib.contextmanager From 48387c56f11f2f4173291feaf36375ae68bb5507 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 12 Sep 2023 15:34:10 +0200 Subject: [PATCH 40/75] Update changelog --- CHANGES.md | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 13c53d260679..8513ca47f046 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,18 @@ # Synapse 1.92.1 (2023-09-12) -Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. +This minor release was needed only because of CI-related trouble on [v1.92.0](https://github.com/matrix-org/synapse/releases/tag/v1.92.0), which was never released. + +### Internal Changes + +- Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. # Synapse 1.92.0 (2023-09-12) +This release includes the same [bugfix](https://github.com/matrix-org/synapse/issues/16258) as Synapse 1.91.2. + +This version was never released following a CI build failure, cf [v1.92.1 changelog](https://github.com/matrix-org/synapse/releases/tag/v1.92.1). + ### Bugfixes - Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) @@ -15,6 +23,13 @@ Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. - Update the release script to work on macOS. ([\#16266](https://github.com/matrix-org/synapse/issues/16266)) +# Synapse 1.91.2 (2023-09-06) + +### Bugfixes + +- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) + + # Synapse 1.92.0rc1 (2023-09-05) ### Features @@ -70,12 +85,6 @@ Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. * Bump types-psycopg2 from 2.9.21.10 to 2.9.21.11. ([\#16200](https://github.com/matrix-org/synapse/issues/16200)) * Bump types-pyyaml from 6.0.12.10 to 6.0.12.11. ([\#16199](https://github.com/matrix-org/synapse/issues/16199)) -# Synapse 1.91.2 (2023-09-06) - -### Bugfixes - -- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) - # Synapse 1.91.1 (2023-09-04) From ab13fb08bf7c20a992ec2796c72d0fbb2a06545c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Sep 2023 10:51:50 +0100 Subject: [PATCH 41/75] Improve logging of replication (#16309) --- changelog.d/16309.misc | 1 + synapse/replication/tcp/handler.py | 2 +- synapse/replication/tcp/resource.py | 7 ++++++- 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16309.misc diff --git a/changelog.d/16309.misc b/changelog.d/16309.misc new file mode 100644 index 000000000000..bef5563ee99d --- /dev/null +++ b/changelog.d/16309.misc @@ -0,0 +1 @@ +Small improvements to logging in replication code. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index d9045d7b73f5..5642666411cc 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -644,7 +644,7 @@ async def _process_position( [stream.parse_row(row) for row in rows], ) - logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token) + logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token) # We've now caught up to position sent to us, notify handler. await self._replication_data_handler.on_position( diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 347467d863ec..1d9a29d22ee6 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -191,7 +191,12 @@ async def _run_notifier_loop(self) -> None: if updates: logger.info( - "Streaming: %s -> %s", stream.NAME, updates[-1][0] + "Streaming: %s -> %s (limited: %s, updates: %s, max token: %s)", + stream.NAME, + updates[-1][0], + limited, + len(updates), + current_token, ) stream_updates_counter.labels(stream.NAME).inc(len(updates)) From be3c7b08a3e6888e60497a80ebd143bd4df9a719 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Sep 2023 11:54:16 +0100 Subject: [PATCH 42/75] Fix deleting device inbox when using background worker (#16311) Introduced in #16240 The action for the task was only defined on the "master" handler, rather than the base worker one. --- changelog.d/16311.misc | 1 + synapse/handlers/device.py | 62 +++++++++++++++++++------------------- 2 files changed, 32 insertions(+), 31 deletions(-) create mode 100644 changelog.d/16311.misc diff --git a/changelog.d/16311.misc b/changelog.d/16311.misc new file mode 100644 index 000000000000..4f266c1fb029 --- /dev/null +++ b/changelog.d/16311.misc @@ -0,0 +1 @@ +Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 9d240ad4ee30..e2ae3da67ebc 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -91,9 +91,14 @@ def __init__(self, hs: "HomeServer"): self._query_appservices_for_keys = ( hs.config.experimental.msc3984_appservice_key_query ) + self._task_scheduler = hs.get_task_scheduler() self.device_list_updater = DeviceListWorkerUpdater(hs) + self._task_scheduler.register_action( + self._delete_device_messages, DELETE_DEVICE_MSGS_TASK_NAME + ) + @trace async def get_devices_by_user(self, user_id: str) -> List[JsonDict]: """ @@ -383,6 +388,32 @@ async def handle_room_un_partial_stated(self, room_id: str) -> None: "Trying handling device list state for partial join: not supported on workers." ) + DEVICE_MSGS_DELETE_BATCH_LIMIT = 100 + + async def _delete_device_messages( + self, + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + """Scheduler task to delete device messages in batch of `DEVICE_MSGS_DELETE_BATCH_LIMIT`.""" + assert task.params is not None + user_id = task.params["user_id"] + device_id = task.params["device_id"] + up_to_stream_id = task.params["up_to_stream_id"] + + res = await self.store.delete_messages_for_device( + user_id=user_id, + device_id=device_id, + up_to_stream_id=up_to_stream_id, + limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, + ) + + if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: + return TaskStatus.COMPLETE, None, None + else: + # There is probably still device messages to be deleted, let's keep the task active and it will be run + # again in a subsequent scheduler loop run (probably the next one, if not too many tasks are running). + return TaskStatus.ACTIVE, None, None + class DeviceHandler(DeviceWorkerHandler): device_list_updater: "DeviceListUpdater" @@ -394,7 +425,6 @@ def __init__(self, hs: "HomeServer"): self._account_data_handler = hs.get_account_data_handler() self._storage_controllers = hs.get_storage_controllers() self.db_pool = hs.get_datastores().main.db_pool - self._task_scheduler = hs.get_task_scheduler() self.device_list_updater = DeviceListUpdater(hs, self) @@ -428,10 +458,6 @@ def __init__(self, hs: "HomeServer"): self._delete_stale_devices, ) - self._task_scheduler.register_action( - self._delete_device_messages, DELETE_DEVICE_MSGS_TASK_NAME - ) - def _check_device_name_length(self, name: Optional[str]) -> None: """ Checks whether a device name is longer than the maximum allowed length. @@ -590,32 +616,6 @@ async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: await self.notify_device_update(user_id, device_ids) - DEVICE_MSGS_DELETE_BATCH_LIMIT = 100 - - async def _delete_device_messages( - self, - task: ScheduledTask, - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: - """Scheduler task to delete device messages in batch of `DEVICE_MSGS_DELETE_BATCH_LIMIT`.""" - assert task.params is not None - user_id = task.params["user_id"] - device_id = task.params["device_id"] - up_to_stream_id = task.params["up_to_stream_id"] - - res = await self.store.delete_messages_for_device( - user_id=user_id, - device_id=device_id, - up_to_stream_id=up_to_stream_id, - limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, - ) - - if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: - return TaskStatus.COMPLETE, None, None - else: - # There is probably still device messages to be deleted, let's keep the task active and it will be run - # again in a subsequent scheduler loop run (probably the next one, if not too many tasks are running). - return TaskStatus.ACTIVE, None, None - async def update_device(self, user_id: str, device_id: str, content: dict) -> None: """Update the given device From e9addf6a01ab173bcf0aeeae35d7052a5bde9454 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Sep 2023 11:59:44 +0100 Subject: [PATCH 43/75] Don't schedule an async task on every sync (#16312) --- changelog.d/16312.misc | 1 + synapse/handlers/sync.py | 37 ++++++++++++++++++++++++++----------- 2 files changed, 27 insertions(+), 11 deletions(-) create mode 100644 changelog.d/16312.misc diff --git a/changelog.d/16312.misc b/changelog.d/16312.misc new file mode 100644 index 000000000000..4f266c1fb029 --- /dev/null +++ b/changelog.d/16312.misc @@ -0,0 +1 @@ +Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 0ccd7d250c4b..f1f19666d7cf 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -362,21 +362,36 @@ async def _wait_for_sync_for_user( # (since we now know that the device has received them) if since_token is not None: since_stream_id = since_token.to_device_key - # Delete device messages asynchronously and in batches using the task scheduler - await self._task_scheduler.schedule_task( - DELETE_DEVICE_MSGS_TASK_NAME, - resource_id=sync_config.device_id, - params={ - "user_id": sync_config.user.to_string(), - "device_id": sync_config.device_id, - "up_to_stream_id": since_stream_id, - }, + # Fast path: delete a limited number of to-device messages up front. + # We do this to avoid the overhead of scheduling a task for every + # sync. + device_deletion_limit = 100 + deleted = await self.store.delete_messages_for_device( + sync_config.user.to_string(), + sync_config.device_id, + since_stream_id, + limit=device_deletion_limit, ) logger.debug( - "Deletion of to-device messages up to %d scheduled", - since_stream_id, + "Deleted %d to-device messages up to %d", deleted, since_stream_id ) + # If we hit the limit, schedule a background task to delete the rest. + if deleted >= device_deletion_limit: + await self._task_scheduler.schedule_task( + DELETE_DEVICE_MSGS_TASK_NAME, + resource_id=sync_config.device_id, + params={ + "user_id": sync_config.user.to_string(), + "device_id": sync_config.device_id, + "up_to_stream_id": since_stream_id, + }, + ) + logger.debug( + "Deletion of to-device messages up to %d scheduled", + since_stream_id, + ) + if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. From d38d0dffc94b6269ed7ff5163d60958be3e6c304 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 13 Sep 2023 07:57:19 -0400 Subject: [PATCH 44/75] Use StrCollection in additional places. (#16301) --- changelog.d/16301.misc | 1 + synapse/app/_base.py | 12 +++---- synapse/config/_base.py | 3 +- synapse/events/__init__.py | 5 ++- synapse/events/builder.py | 8 ++--- synapse/events/validator.py | 6 ++-- synapse/http/client.py | 5 ++- synapse/http/servlet.py | 33 +++++++++---------- synapse/metrics/__init__.py | 8 ++--- synapse/notifier.py | 6 ++-- synapse/rest/client/_base.py | 4 +-- synapse/state/__init__.py | 13 ++++---- synapse/state/v1.py | 5 ++- synapse/state/v2.py | 7 ++-- .../databases/main/event_federation.py | 4 +-- synapse/visibility.py | 6 ++-- 16 files changed, 59 insertions(+), 67 deletions(-) create mode 100644 changelog.d/16301.misc diff --git a/changelog.d/16301.misc b/changelog.d/16301.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/16301.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index a94b57a67192..9ac7e4313ec5 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -27,9 +27,7 @@ Any, Awaitable, Callable, - Collection, Dict, - Iterable, List, NoReturn, Optional, @@ -76,7 +74,7 @@ from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( load_legacy_third_party_event_rules, ) -from synapse.types import ISynapseReactor +from synapse.types import ISynapseReactor, StrCollection from synapse.util import SYNAPSE_VERSION from synapse.util.caches.lrucache import setup_expire_lru_cache_entries from synapse.util.daemonize import daemonize_process @@ -278,7 +276,7 @@ async def wrapper() -> None: reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper())) -def listen_metrics(bind_addresses: Iterable[str], port: int) -> None: +def listen_metrics(bind_addresses: StrCollection, port: int) -> None: """ Start Prometheus metrics server. """ @@ -315,7 +313,7 @@ def _set_prometheus_client_use_created_metrics(new_value: bool) -> None: def listen_manhole( - bind_addresses: Collection[str], + bind_addresses: StrCollection, port: int, manhole_settings: ManholeConfig, manhole_globals: dict, @@ -339,7 +337,7 @@ def listen_manhole( def listen_tcp( - bind_addresses: Collection[str], + bind_addresses: StrCollection, port: int, factory: ServerFactory, reactor: IReactorTCP = reactor, @@ -448,7 +446,7 @@ def listen_http( def listen_ssl( - bind_addresses: Collection[str], + bind_addresses: StrCollection, port: int, factory: ServerFactory, context_factory: IOpenSSLContextFactory, diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 58856839e16f..c5816105f42c 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -26,7 +26,6 @@ from typing import ( Any, ClassVar, - Collection, Dict, Iterable, Iterator, @@ -384,7 +383,7 @@ class RootConfig: config_classes: List[Type[Config]] = [] - def __init__(self, config_files: Collection[str] = ()): + def __init__(self, config_files: StrSequence = ()): # Capture absolute paths here, so we can reload config after we daemonize. self.config_files = [os.path.abspath(path) for path in config_files] diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 35257a3b1ba0..3c1777b7ec57 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -25,7 +25,6 @@ Iterable, List, Optional, - Sequence, Tuple, Type, TypeVar, @@ -408,7 +407,7 @@ def items(self) -> List[Tuple[str, Optional[Any]]]: def keys(self) -> Iterable[str]: return self._dict.keys() - def prev_event_ids(self) -> Sequence[str]: + def prev_event_ids(self) -> List[str]: """Returns the list of prev event IDs. The order matches the order specified in the event, though there is no meaning to it. @@ -553,7 +552,7 @@ def event_id(self) -> str: self._event_id = "$" + encode_base64(compute_event_reference_hash(self)[1]) return self._event_id - def prev_event_ids(self) -> Sequence[str]: + def prev_event_ids(self) -> List[str]: """Returns the list of prev event IDs. The order matches the order specified in the event, though there is no meaning to it. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 14ea0e6640a6..1165c017baa8 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import attr from signedjson.types import SigningKey @@ -28,7 +28,7 @@ from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict from synapse.state import StateHandler from synapse.storage.databases.main import DataStore -from synapse.types import EventID, JsonDict +from synapse.types import EventID, JsonDict, StrCollection from synapse.types.state import StateFilter from synapse.util import Clock from synapse.util.stringutils import random_string @@ -103,7 +103,7 @@ def is_state(self) -> bool: async def build( self, - prev_event_ids: Collection[str], + prev_event_ids: StrCollection, auth_event_ids: Optional[List[str]], depth: Optional[int] = None, ) -> EventBase: @@ -136,7 +136,7 @@ async def build( format_version = self.room_version.event_format # The types of auth/prev events changes between event versions. - prev_events: Union[Collection[str], List[Tuple[str, Dict[str, str]]]] + prev_events: Union[StrCollection, List[Tuple[str, Dict[str, str]]]] auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]] if format_version == EventFormatVersions.ROOM_V1_V2: auth_events = await self._store.add_event_hashes(auth_event_ids) diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 34625dd7a185..5da50cb0d20b 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import collections.abc -from typing import Iterable, List, Type, Union, cast +from typing import List, Type, Union, cast import jsonschema from pydantic import Field, StrictBool, StrictStr @@ -36,7 +36,7 @@ from synapse.federation.federation_server import server_matches_acl_event from synapse.http.servlet import validate_json_object from synapse.rest.models import RequestBodyModel -from synapse.types import EventID, JsonDict, RoomID, UserID +from synapse.types import EventID, JsonDict, RoomID, StrCollection, UserID class EventValidator: @@ -225,7 +225,7 @@ def validate_builder(self, event: Union[EventBase, EventBuilder]) -> None: self._ensure_state_event(event) - def _ensure_strings(self, d: JsonDict, keys: Iterable[str]) -> None: + def _ensure_strings(self, d: JsonDict, keys: StrCollection) -> None: for s in keys: if s not in d: raise SynapseError(400, "'%s' not in content" % (s,)) diff --git a/synapse/http/client.py b/synapse/http/client.py index ca2cdbc6e243..c750e03b3659 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -78,7 +78,7 @@ from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import set_tag, start_active_span, tags -from synapse.types import ISynapseReactor +from synapse.types import ISynapseReactor, StrSequence from synapse.util import json_decoder from synapse.util.async_helpers import timeout_deferred @@ -108,10 +108,9 @@ # the value actually has to be a List, but List is invariant so we can't specify that # the entries can either be Lists or bytes. RawHeaderValue = Union[ - List[str], + StrSequence, List[bytes], List[Union[str, bytes]], - Tuple[str, ...], Tuple[bytes, ...], Tuple[Union[str, bytes], ...], ] diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index fc6279362881..5d79d31579b1 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -18,7 +18,6 @@ from http import HTTPStatus from typing import ( TYPE_CHECKING, - Iterable, List, Mapping, Optional, @@ -38,7 +37,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.http import redact_uri from synapse.http.server import HttpServer -from synapse.types import JsonDict, RoomAlias, RoomID +from synapse.types import JsonDict, RoomAlias, RoomID, StrCollection from synapse.util import json_decoder if TYPE_CHECKING: @@ -340,7 +339,7 @@ def parse_string( name: str, default: str, *, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> str: ... @@ -352,7 +351,7 @@ def parse_string( name: str, *, required: Literal[True], - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> str: ... @@ -365,7 +364,7 @@ def parse_string( *, default: Optional[str] = None, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[str]: ... @@ -376,7 +375,7 @@ def parse_string( name: str, default: Optional[str] = None, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[str]: """ @@ -485,7 +484,7 @@ def parse_enum( def _parse_string_value( value: bytes, - allowed_values: Optional[Iterable[str]], + allowed_values: Optional[StrCollection], name: str, encoding: str, ) -> str: @@ -511,7 +510,7 @@ def parse_strings_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, *, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[List[str]]: ... @@ -523,7 +522,7 @@ def parse_strings_from_args( name: str, default: List[str], *, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> List[str]: ... @@ -535,7 +534,7 @@ def parse_strings_from_args( name: str, *, required: Literal[True], - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> List[str]: ... @@ -548,7 +547,7 @@ def parse_strings_from_args( default: Optional[List[str]] = None, *, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[List[str]]: ... @@ -559,7 +558,7 @@ def parse_strings_from_args( name: str, default: Optional[List[str]] = None, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[List[str]]: """ @@ -610,7 +609,7 @@ def parse_string_from_args( name: str, default: Optional[str] = None, *, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[str]: ... @@ -623,7 +622,7 @@ def parse_string_from_args( default: Optional[str] = None, *, required: Literal[True], - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> str: ... @@ -635,7 +634,7 @@ def parse_string_from_args( name: str, default: Optional[str] = None, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[str]: ... @@ -646,7 +645,7 @@ def parse_string_from_args( name: str, default: Optional[str] = None, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[str]: """ @@ -821,7 +820,7 @@ def parse_and_validate_json_object_from_request( return validate_json_object(content, model_type) -def assert_params_in_dict(body: JsonDict, required: Iterable[str]) -> None: +def assert_params_in_dict(body: JsonDict, required: StrCollection) -> None: absent = [] for k in required: if k not in body: diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 39fc629937a8..3cf2fbc3e288 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -25,7 +25,6 @@ Iterable, Mapping, Optional, - Sequence, Set, Tuple, Type, @@ -49,6 +48,7 @@ from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager from synapse.metrics._twisted_exposition import MetricsResource, generate_latest from synapse.metrics._types import Collector +from synapse.types import StrSequence from synapse.util import SYNAPSE_VERSION logger = logging.getLogger(__name__) @@ -81,7 +81,7 @@ class LaterGauge(Collector): name: str desc: str - labels: Optional[Sequence[str]] = attr.ib(hash=False) + labels: Optional[StrSequence] = attr.ib(hash=False) # callback: should either return a value (if there are no labels for this metric), # or dict mapping from a label tuple to a value caller: Callable[ @@ -143,8 +143,8 @@ def __init__( self, name: str, desc: str, - labels: Sequence[str], - sub_metrics: Sequence[str], + labels: StrSequence, + sub_metrics: StrSequence, ): self.name = name self.desc = desc diff --git a/synapse/notifier.py b/synapse/notifier.py index 68115bca7061..fc39e5c96373 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -104,7 +104,7 @@ class _NotifierUserStream: def __init__( self, user_id: str, - rooms: Collection[str], + rooms: StrCollection, current_token: StreamToken, time_now_ms: int, ): @@ -457,7 +457,7 @@ def on_new_event( stream_key: str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, - rooms: Optional[Collection[str]] = None, + rooms: Optional[StrCollection] = None, ) -> None: """Used to inform listeners that something has happened event wise. @@ -529,7 +529,7 @@ async def wait_for_events( user_id: str, timeout: int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]], - room_ids: Optional[Collection[str]] = None, + room_ids: Optional[StrCollection] = None, from_token: StreamToken = StreamToken.START, ) -> T: """Wait until the callback returns a non empty response or the diff --git a/synapse/rest/client/_base.py b/synapse/rest/client/_base.py index 5c1c19e1f362..73c568ef7572 100644 --- a/synapse/rest/client/_base.py +++ b/synapse/rest/client/_base.py @@ -20,14 +20,14 @@ from synapse.api.errors import InteractiveAuthIncompleteError from synapse.api.urls import CLIENT_API_PREFIX -from synapse.types import JsonDict +from synapse.types import JsonDict, StrCollection logger = logging.getLogger(__name__) def client_patterns( path_regex: str, - releases: Iterable[str] = ("r0", "v3"), + releases: StrCollection = ("r0", "v3"), unstable: bool = True, v1: bool = False, ) -> Iterable[Pattern]: diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 1b91cf5eaa96..e977ed10444a 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -20,7 +20,6 @@ Any, Awaitable, Callable, - Collection, DefaultDict, Dict, FrozenSet, @@ -49,7 +48,7 @@ from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour -from synapse.types import StateMap +from synapse.types import StateMap, StrCollection from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache @@ -197,7 +196,7 @@ def __init__(self, hs: "HomeServer"): async def compute_state_after_events( self, room_id: str, - event_ids: Collection[str], + event_ids: StrCollection, state_filter: Optional[StateFilter] = None, await_full_state: bool = True, ) -> StateMap[str]: @@ -231,7 +230,7 @@ async def compute_state_after_events( return await ret.get_state(self._state_storage_controller, state_filter) async def get_current_user_ids_in_room( - self, room_id: str, latest_event_ids: Collection[str] + self, room_id: str, latest_event_ids: StrCollection ) -> Set[str]: """ Get the users IDs who are currently in a room. @@ -256,7 +255,7 @@ async def get_current_user_ids_in_room( return await self.store.get_joined_user_ids_from_state(room_id, state) async def get_hosts_in_room_at_events( - self, room_id: str, event_ids: Collection[str] + self, room_id: str, event_ids: StrCollection ) -> FrozenSet[str]: """Get the hosts that were in a room at the given event ids @@ -470,7 +469,7 @@ async def compute_event_context( @trace @measure_func() async def resolve_state_groups_for_events( - self, room_id: str, event_ids: Collection[str], await_full_state: bool = True + self, room_id: str, event_ids: StrCollection, await_full_state: bool = True ) -> _StateCacheEntry: """Given a list of event_ids this method fetches the state at each event, resolves conflicts between them and returns them. @@ -882,7 +881,7 @@ class StateResolutionStore: store: "DataStore" def get_events( - self, event_ids: Collection[str], allow_rejected: bool = False + self, event_ids: StrCollection, allow_rejected: bool = False ) -> Awaitable[Dict[str, EventBase]]: """Get events from the database diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 500e38469537..c76a2f082ea4 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -17,7 +17,6 @@ from typing import ( Awaitable, Callable, - Collection, Dict, Iterable, List, @@ -32,7 +31,7 @@ from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase -from synapse.types import MutableStateMap, StateMap +from synapse.types import MutableStateMap, StateMap, StrCollection logger = logging.getLogger(__name__) @@ -45,7 +44,7 @@ async def resolve_events_with_store( room_version: RoomVersion, state_sets: Sequence[StateMap[str]], event_map: Optional[Dict[str, EventBase]], - state_map_factory: Callable[[Collection[str]], Awaitable[Dict[str, EventBase]]], + state_map_factory: Callable[[StrCollection], Awaitable[Dict[str, EventBase]]], ) -> StateMap[str]: """ Args: diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 44c49274a983..1752f95db8cc 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -19,7 +19,6 @@ Any, Awaitable, Callable, - Collection, Dict, Generator, Iterable, @@ -39,7 +38,7 @@ from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase -from synapse.types import MutableStateMap, StateMap +from synapse.types import MutableStateMap, StateMap, StrCollection logger = logging.getLogger(__name__) @@ -56,7 +55,7 @@ class StateResolutionStore(Protocol): # This is usually synapse.state.StateResolutionStore, but it's replaced with a # TestStateResolutionStore in tests. def get_events( - self, event_ids: Collection[str], allow_rejected: bool = False + self, event_ids: StrCollection, allow_rejected: bool = False ) -> Awaitable[Dict[str, EventBase]]: ... @@ -366,7 +365,7 @@ async def _get_auth_chain_difference( union = unpersisted_set_ids[0].union(*unpersisted_set_ids[1:]) intersection = unpersisted_set_ids[0].intersection(*unpersisted_set_ids[1:]) - auth_difference_unpersisted_part: Collection[str] = union - intersection + auth_difference_unpersisted_part: StrCollection = union - intersection else: auth_difference_unpersisted_part = () state_sets_ids = [set(state_set.values()) for state_set in state_sets] diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index fab7008a8f85..09de8f55e277 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -47,7 +47,7 @@ from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.signatures import SignatureWorkerStore from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import JsonDict, StrCollection +from synapse.types import JsonDict, StrCollection, StrSequence from synapse.util import json_encoder from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache @@ -1179,7 +1179,7 @@ def _get_rooms_with_many_extremities_txn(txn: LoggingTransaction) -> List[str]: ) @cached(max_entries=5000, iterable=True) - async def get_latest_event_ids_in_room(self, room_id: str) -> Sequence[str]: + async def get_latest_event_ids_in_room(self, room_id: str) -> StrSequence: return await self.db_pool.simple_select_onecol( table="event_forward_extremities", keyvalues={"room_id": room_id}, diff --git a/synapse/visibility.py b/synapse/visibility.py index eac10f643810..f15fdd83146e 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -36,7 +36,7 @@ from synapse.logging.opentracing import trace from synapse.storage.controllers import StorageControllers from synapse.storage.databases.main import DataStore -from synapse.types import RetentionPolicy, StateMap, get_domain_from_id +from synapse.types import RetentionPolicy, StateMap, StrCollection, get_domain_from_id from synapse.types.state import StateFilter from synapse.util import Clock @@ -150,12 +150,12 @@ def allowed(event: EventBase) -> Optional[EventBase]: async def filter_event_for_clients_with_state( store: DataStore, - user_ids: Collection[str], + user_ids: StrCollection, event: EventBase, context: EventContext, is_peeking: bool = False, filter_send_to_client: bool = True, -) -> Collection[str]: +) -> StrCollection: """ Checks to see if an event is visible to the users in the list at the time of the event. From 7afb5e041004bab8b0aaf7909ce3c7a9ef80077f Mon Sep 17 00:00:00 2001 From: Hanadi Date: Wed, 13 Sep 2023 14:33:39 +0200 Subject: [PATCH 45/75] Fix using dehydrated devices (MSC2697) & refresh tokens (#16288) Refresh tokens were not correctly moved to the rehydrated device (similar to how the access token is currently handled). This resulted in invalid refresh tokens after rehydration. --- changelog.d/16288.bugfix | 1 + synapse/handlers/device.py | 7 ++++--- .../storage/databases/main/registration.py | 20 +++++++++++++++++++ tests/handlers/test_device.py | 10 +++++++++- 4 files changed, 34 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16288.bugfix diff --git a/changelog.d/16288.bugfix b/changelog.d/16288.bugfix new file mode 100644 index 000000000000..f08d10d1f3c5 --- /dev/null +++ b/changelog.d/16288.bugfix @@ -0,0 +1 @@ +Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index e2ae3da67ebc..0d3d5ebc86d7 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -758,12 +758,13 @@ async def rehydrate_device( # If the dehydrated device was successfully deleted (the device ID # matched the stored dehydrated device), then modify the access - # token to use the dehydrated device's ID and copy the old device - # display name to the dehydrated device, and destroy the old device - # ID + # token and refresh token to use the dehydrated device's ID and + # copy the old device display name to the dehydrated device, + # and destroy the old device ID old_device_id = await self.store.set_device_for_access_token( access_token, device_id ) + await self.store.set_device_for_refresh_token(user_id, old_device_id, device_id) old_device = await self.store.get_device(user_id, old_device_id) if old_device is None: raise errors.NotFoundError() diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 7e85b73e8e3a..e34156dc5584 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -2312,6 +2312,26 @@ async def add_refresh_token_to_user( return next_id + async def set_device_for_refresh_token( + self, user_id: str, old_device_id: str, device_id: str + ) -> None: + """Moves refresh tokens from old device to current device + + Args: + user_id: The user of the devices. + old_device_id: The old device. + device_id: The new device ID. + Returns: + None + """ + + await self.db_pool.simple_update( + "refresh_tokens", + keyvalues={"user_id": user_id, "device_id": old_device_id}, + updatevalues={"device_id": device_id}, + desc="set_device_for_refresh_token", + ) + def _set_device_for_access_token_txn( self, txn: LoggingTransaction, token: str, device_id: str ) -> str: diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 79d327499baa..d4ed068357ae 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -461,6 +461,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.message_handler = hs.get_device_message_handler() self.registration = hs.get_registration_handler() self.auth = hs.get_auth() + self.auth_handler = hs.get_auth_handler() self.store = hs.get_datastores().main return hs @@ -487,11 +488,12 @@ def test_dehydrate_and_rehydrate_device(self) -> None: self.assertEqual(device_data, {"device_data": {"foo": "bar"}}) # Create a new login for the user and dehydrated the device - device_id, access_token, _expiration_time, _refresh_token = self.get_success( + device_id, access_token, _expiration_time, refresh_token = self.get_success( self.registration.register_device( user_id=user_id, device_id=None, initial_display_name="new device", + should_issue_refresh_token=True, ) ) @@ -522,6 +524,12 @@ def test_dehydrate_and_rehydrate_device(self) -> None: self.assertEqual(user_info.device_id, retrieved_device_id) + # make sure the user device has the refresh token + assert refresh_token is not None + self.get_success( + self.auth_handler.refresh_token(refresh_token, 5 * 60 * 1000, 5 * 60 * 1000) + ) + # make sure the device has the display name that was set from the login res = self.get_success(self.handler.get_device(user_id, retrieved_device_id)) From 032cf84f524a972f38977a67d61163f08d9dcf2a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Sep 2023 16:17:06 +0100 Subject: [PATCH 46/75] Remove a reference cycle in background process (#16314) --- changelog.d/16314.misc | 1 + synapse/metrics/background_process_metrics.py | 21 ++++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16314.misc diff --git a/changelog.d/16314.misc b/changelog.d/16314.misc new file mode 100644 index 000000000000..a32b07112a7d --- /dev/null +++ b/changelog.d/16314.misc @@ -0,0 +1 @@ +Remove a reference cycle for in background processes. diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 9ea4e23b3107..f1f1f0cdf9a0 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -322,13 +322,21 @@ def __init__(self, name: str, instance_id: Optional[Union[int, str]] = None): if instance_id is None: instance_id = id(self) super().__init__("%s-%s" % (name, instance_id)) - self._proc = _BackgroundProcess(name, self) + self._proc: Optional[_BackgroundProcess] = _BackgroundProcess(name, self) def start(self, rusage: "Optional[resource.struct_rusage]") -> None: """Log context has started running (again).""" super().start(rusage) + if self._proc is None: + logger.error( + "Background process re-entered without a proc: %s", + self.name, + stack_info=True, + ) + return + # We've become active again so we make sure we're in the list of active # procs. (Note that "start" here means we've become active, as opposed # to starting for the first time.) @@ -345,6 +353,14 @@ def __exit__( super().__exit__(type, value, traceback) + if self._proc is None: + logger.error( + "Background process exited without a proc: %s", + self.name, + stack_info=True, + ) + return + # The background process has finished. We explicitly remove and manually # update the metrics here so that if nothing is scraping metrics the set # doesn't infinitely grow. @@ -352,3 +368,6 @@ def __exit__( _background_processes_active_since_last_scrape.discard(self._proc) self._proc.update_metrics() + + # Set proc to None to break the reference cycle. + self._proc = None From 954921736b88de25c775c519a206449e46b3bf07 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Sep 2023 12:46:30 +0100 Subject: [PATCH 47/75] Refactor `get_user_by_id` (#16316) --- changelog.d/16316.misc | 1 + synapse/api/auth/internal.py | 2 +- synapse/api/auth/msc3861_delegated.py | 2 +- synapse/handlers/account.py | 2 +- synapse/handlers/admin.py | 49 ++++++------ synapse/handlers/message.py | 6 +- synapse/module_api/__init__.py | 4 +- synapse/rest/consent/consent_resource.py | 2 +- .../server_notices/consent_server_notices.py | 6 +- synapse/storage/databases/main/client_ips.py | 11 +++ .../storage/databases/main/registration.py | 76 ++++++------------- synapse/types/__init__.py | 10 ++- tests/api/test_auth.py | 12 ++- tests/storage/test_registration.py | 48 ++++++------ 14 files changed, 108 insertions(+), 123 deletions(-) create mode 100644 changelog.d/16316.misc diff --git a/changelog.d/16316.misc b/changelog.d/16316.misc new file mode 100644 index 000000000000..aa0644f278c4 --- /dev/null +++ b/changelog.d/16316.misc @@ -0,0 +1 @@ +Refactor `get_user_by_id`. diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py index 6a5fd44ec01c..a75f6f2cc44e 100644 --- a/synapse/api/auth/internal.py +++ b/synapse/api/auth/internal.py @@ -268,7 +268,7 @@ async def get_user_by_access_token( stored_user = await self.store.get_user_by_id(user_id) if not stored_user: raise InvalidClientTokenError("Unknown user_id %s" % user_id) - if not stored_user["is_guest"]: + if not stored_user.is_guest: raise InvalidClientTokenError( "Guest access token used for regular user" ) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index ef5d3f9b815c..31bb035cc846 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -300,7 +300,7 @@ async def get_user_by_access_token( user_id = UserID(username, self._hostname) # First try to find a user from the username claim - user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) + user_info = await self.store.get_user_by_id(user_id=user_id.to_string()) if user_info is None: # If the user does not exist, we should create it on the fly # TODO: we could use SCIM to provision users ahead of time and listen diff --git a/synapse/handlers/account.py b/synapse/handlers/account.py index c05a14304c1e..fa043cca867d 100644 --- a/synapse/handlers/account.py +++ b/synapse/handlers/account.py @@ -102,7 +102,7 @@ async def _get_local_account_status(self, user_id: UserID) -> JsonDict: """ status = {"exists": False} - userinfo = await self._main_store.get_userinfo_by_id(user_id.to_string()) + userinfo = await self._main_store.get_user_by_id(user_id.to_string()) if userinfo is not None: status = { diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 2f0e5f3b0a9e..7092ff3449ca 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -18,7 +18,7 @@ from synapse.api.constants import Direction, Membership from synapse.events import EventBase -from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID +from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID, UserInfo from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -57,38 +57,30 @@ async def get_whois(self, user: UserID) -> JsonDict: async def get_user(self, user: UserID) -> Optional[JsonDict]: """Function to get user details""" - user_info_dict = await self._store.get_user_by_id(user.to_string()) - if user_info_dict is None: + user_info: Optional[UserInfo] = await self._store.get_user_by_id( + user.to_string() + ) + if user_info is None: return None - # Restrict returned information to a known set of fields. This prevents additional - # fields added to get_user_by_id from modifying Synapse's external API surface. - user_info_to_return = { - "name", - "admin", - "deactivated", - "locked", - "shadow_banned", - "creation_ts", - "appservice_id", - "consent_server_notice_sent", - "consent_version", - "consent_ts", - "user_type", - "is_guest", - "last_seen_ts", + user_info_dict = { + "name": user.to_string(), + "admin": user_info.is_admin, + "deactivated": user_info.is_deactivated, + "locked": user_info.locked, + "shadow_banned": user_info.is_shadow_banned, + "creation_ts": user_info.creation_ts, + "appservice_id": user_info.appservice_id, + "consent_server_notice_sent": user_info.consent_server_notice_sent, + "consent_version": user_info.consent_version, + "consent_ts": user_info.consent_ts, + "user_type": user_info.user_type, + "is_guest": user_info.is_guest, } if self._msc3866_enabled: # Only include the approved flag if support for MSC3866 is enabled. - user_info_to_return.add("approved") - - # Restrict returned keys to a known set. - user_info_dict = { - key: value - for key, value in user_info_dict.items() - if key in user_info_to_return - } + user_info_dict["approved"] = user_info.approved # Add additional user metadata profile = await self._store.get_profileinfo(user) @@ -105,6 +97,9 @@ async def get_user(self, user: UserID) -> Optional[JsonDict]: user_info_dict["external_ids"] = external_ids user_info_dict["erased"] = await self._store.is_user_erased(user.to_string()) + last_seen_ts = await self._store.get_last_seen_for_user_id(user.to_string()) + user_info_dict["last_seen_ts"] = last_seen_ts + return user_info_dict async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> Any: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index d6be18cdefff..c036578a3dce 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -828,13 +828,13 @@ async def assert_accepted_privacy_policy(self, requester: Requester) -> None: u = await self.store.get_user_by_id(user_id) assert u is not None - if u["user_type"] in (UserTypes.SUPPORT, UserTypes.BOT): + if u.user_type in (UserTypes.SUPPORT, UserTypes.BOT): # support and bot users are not required to consent return - if u["appservice_id"] is not None: + if u.appservice_id is not None: # users registered by an appservice are exempt return - if u["consent_version"] == self.config.consent.user_consent_version: + if u.consent_version == self.config.consent.user_consent_version: return consent_uri = self._consent_uri_builder.build_user_consent_uri(user.localpart) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index d6efe10a28ba..7ec202be2342 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -572,7 +572,7 @@ async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]: Returns: UserInfo object if a user was found, otherwise None """ - return await self._store.get_userinfo_by_id(user_id) + return await self._store.get_user_by_id(user_id) async def get_user_by_req( self, @@ -1878,7 +1878,7 @@ async def put_global( raise TypeError(f"new_data must be a dict; got {type(new_data).__name__}") # Ensure the user exists, so we don't just write to users that aren't there. - if await self._store.get_userinfo_by_id(user_id) is None: + if await self._store.get_user_by_id(user_id) is None: raise ValueError(f"User {user_id} does not exist on this server.") await self._handler.add_account_data_for_user(user_id, data_type, new_data) diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 25f9ea285bca..88d3ec1baf61 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -129,7 +129,7 @@ async def _async_render_GET(self, request: Request) -> None: if u is None: raise NotFoundError("Unknown user") - has_consented = u["consent_version"] == version + has_consented = u.consent_version == version userhmac = userhmac_bytes.decode("ascii") try: diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index 94025ba41f7d..a879b6505e4e 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -79,15 +79,15 @@ async def maybe_send_server_notice_to_user(self, user_id: str) -> None: if u is None: return - if u["is_guest"] and not self._send_to_guests: + if u.is_guest and not self._send_to_guests: # don't send to guests return - if u["consent_version"] == self._current_consent_version: + if u.consent_version == self._current_consent_version: # user has already consented return - if u["consent_server_notice_sent"] == self._current_consent_version: + if u.consent_server_notice_sent == self._current_consent_version: # we've already sent a notice to the user return diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index d8d333e11d04..7da47c3dd727 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -764,3 +764,14 @@ async def get_user_ip_and_agents( } return list(results.values()) + + async def get_last_seen_for_user_id(self, user_id: str) -> Optional[int]: + """Get the last seen timestamp for a user, if we have it.""" + + return await self.db_pool.simple_select_one_onecol( + table="user_ips", + keyvalues={"user_id": user_id}, + retcol="MAX(last_seen)", + allow_none=True, + desc="get_last_seen_for_user_id", + ) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index e34156dc5584..cc964604e283 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -16,7 +16,7 @@ import logging import random import re -from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast import attr @@ -192,8 +192,8 @@ def __init__( ) @cached() - async def get_user_by_id(self, user_id: str) -> Optional[Mapping[str, Any]]: - """Deprecated: use get_userinfo_by_id instead""" + async def get_user_by_id(self, user_id: str) -> Optional[UserInfo]: + """Returns info about the user account, if it exists.""" def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]: # We could technically use simple_select_one here, but it would not perform @@ -202,16 +202,12 @@ def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]: txn.execute( """ SELECT - name, password_hash, is_guest, admin, consent_version, consent_ts, + name, is_guest, admin, consent_version, consent_ts, consent_server_notice_sent, appservice_id, creation_ts, user_type, deactivated, COALESCE(shadow_banned, FALSE) AS shadow_banned, COALESCE(approved, TRUE) AS approved, - COALESCE(locked, FALSE) AS locked, last_seen_ts + COALESCE(locked, FALSE) AS locked FROM users - LEFT JOIN ( - SELECT user_id, MAX(last_seen) AS last_seen_ts - FROM user_ips GROUP BY user_id - ) ls ON users.name = ls.user_id WHERE name = ? """, (user_id,), @@ -228,51 +224,23 @@ def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]: desc="get_user_by_id", func=get_user_by_id_txn, ) - - if row is not None: - # If we're using SQLite our boolean values will be integers. Because we - # present some of this data as is to e.g. server admins via REST APIs, we - # want to make sure we're returning the right type of data. - # Note: when adding a column name to this list, be wary of NULLable columns, - # since NULL values will be turned into False. - boolean_columns = [ - "admin", - "deactivated", - "shadow_banned", - "approved", - "locked", - ] - for column in boolean_columns: - row[column] = bool(row[column]) - - return row - - async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]: - """Get a UserInfo object for a user by user ID. - - Note! Currently uses the cache of `get_user_by_id`. Once that deprecated method is removed, - this method should be cached. - - Args: - user_id: The user to fetch user info for. - Returns: - `UserInfo` object if user found, otherwise `None`. - """ - user_data = await self.get_user_by_id(user_id) - if not user_data: + if row is None: return None + return UserInfo( - appservice_id=user_data["appservice_id"], - consent_server_notice_sent=user_data["consent_server_notice_sent"], - consent_version=user_data["consent_version"], - creation_ts=user_data["creation_ts"], - is_admin=bool(user_data["admin"]), - is_deactivated=bool(user_data["deactivated"]), - is_guest=bool(user_data["is_guest"]), - is_shadow_banned=bool(user_data["shadow_banned"]), - user_id=UserID.from_string(user_data["name"]), - user_type=user_data["user_type"], - last_seen_ts=user_data["last_seen_ts"], + appservice_id=row["appservice_id"], + consent_server_notice_sent=row["consent_server_notice_sent"], + consent_version=row["consent_version"], + consent_ts=row["consent_ts"], + creation_ts=row["creation_ts"], + is_admin=bool(row["admin"]), + is_deactivated=bool(row["deactivated"]), + is_guest=bool(row["is_guest"]), + is_shadow_banned=bool(row["shadow_banned"]), + user_id=UserID.from_string(row["name"]), + user_type=row["user_type"], + approved=bool(row["approved"]), + locked=bool(row["locked"]), ) async def is_trial_user(self, user_id: str) -> bool: @@ -290,10 +258,10 @@ async def is_trial_user(self, user_id: str) -> bool: now = self._clock.time_msec() days = self.config.server.mau_appservice_trial_days.get( - info["appservice_id"], self.config.server.mau_trial_days + info.appservice_id, self.config.server.mau_trial_days ) trial_duration_ms = days * 24 * 60 * 60 * 1000 - is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms + is_trial = (now - info.creation_ts * 1000) < trial_duration_ms return is_trial @cached() diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 488714f60cb6..76b0e3e694f7 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -933,33 +933,37 @@ def get_verify_key_from_cross_signing_key( @attr.s(auto_attribs=True, frozen=True, slots=True) class UserInfo: - """Holds information about a user. Result of get_userinfo_by_id. + """Holds information about a user. Result of get_user_by_id. Attributes: user_id: ID of the user. appservice_id: Application service ID that created this user. consent_server_notice_sent: Version of policy documents the user has been sent. consent_version: Version of policy documents the user has consented to. + consent_ts: Time the user consented creation_ts: Creation timestamp of the user. is_admin: True if the user is an admin. is_deactivated: True if the user has been deactivated. is_guest: True if the user is a guest user. is_shadow_banned: True if the user has been shadow-banned. user_type: User type (None for normal user, 'support' and 'bot' other options). - last_seen_ts: Last activity timestamp of the user. + approved: If the user has been "approved" to register on the server. + locked: Whether the user's account has been locked """ user_id: UserID appservice_id: Optional[int] consent_server_notice_sent: Optional[str] consent_version: Optional[str] + consent_ts: Optional[int] user_type: Optional[str] creation_ts: int is_admin: bool is_deactivated: bool is_guest: bool is_shadow_banned: bool - last_seen_ts: Optional[int] + approved: bool + locked: bool class UserProfile(TypedDict): diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index dcd01d56885c..e00d7215dfeb 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -188,8 +188,11 @@ def test_get_user_by_req_appservice_valid_token_valid_user_id(self) -> None: ) app_service.is_interested_in_user = Mock(return_value=True) self.store.get_app_service_by_token = Mock(return_value=app_service) - # This just needs to return a truth-y value. - self.store.get_user_by_id = AsyncMock(return_value={"is_guest": False}) + + class FakeUserInfo: + is_guest = False + + self.store.get_user_by_id = AsyncMock(return_value=FakeUserInfo()) self.store.get_user_by_access_token = AsyncMock(return_value=None) request = Mock(args={}) @@ -341,7 +344,10 @@ def test_get_user_from_macaroon(self) -> None: ) def test_get_guest_user_from_macaroon(self) -> None: - self.store.get_user_by_id = AsyncMock(return_value={"is_guest": True}) + class FakeUserInfo: + is_guest = True + + self.store.get_user_by_id = AsyncMock(return_value=FakeUserInfo()) self.store.get_user_by_access_token = AsyncMock(return_value=None) user_id = "@baldrick:matrix.org" diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 95c9792d546e..0cca34d355f6 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -16,7 +16,7 @@ from synapse.api.constants import UserTypes from synapse.api.errors import ThreepidValidationError from synapse.server import HomeServer -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, UserID, UserInfo from synapse.util import Clock from tests.unittest import HomeserverTestCase, override_config @@ -35,24 +35,22 @@ def test_register(self) -> None: self.get_success(self.store.register_user(self.user_id, self.pwhash)) self.assertEqual( - { + UserInfo( # TODO(paul): Surely this field should be 'user_id', not 'name' - "name": self.user_id, - "password_hash": self.pwhash, - "admin": 0, - "is_guest": 0, - "consent_version": None, - "consent_ts": None, - "consent_server_notice_sent": None, - "appservice_id": None, - "creation_ts": 0, - "user_type": None, - "deactivated": 0, - "locked": 0, - "shadow_banned": 0, - "approved": 1, - "last_seen_ts": None, - }, + user_id=UserID.from_string(self.user_id), + is_admin=False, + is_guest=False, + consent_server_notice_sent=None, + consent_ts=None, + consent_version=None, + appservice_id=None, + creation_ts=0, + user_type=None, + is_deactivated=False, + locked=False, + is_shadow_banned=False, + approved=True, + ), (self.get_success(self.store.get_user_by_id(self.user_id))), ) @@ -65,9 +63,11 @@ def test_consent(self) -> None: user = self.get_success(self.store.get_user_by_id(self.user_id)) assert user - self.assertEqual(user["consent_version"], "1") - self.assertGreater(user["consent_ts"], before_consent) - self.assertLess(user["consent_ts"], self.clock.time_msec()) + self.assertEqual(user.consent_version, "1") + self.assertIsNotNone(user.consent_ts) + assert user.consent_ts is not None + self.assertGreater(user.consent_ts, before_consent) + self.assertLess(user.consent_ts, self.clock.time_msec()) def test_add_tokens(self) -> None: self.get_success(self.store.register_user(self.user_id, self.pwhash)) @@ -215,7 +215,7 @@ def test_approval_not_required(self) -> None: user = self.get_success(self.store.get_user_by_id(self.user_id)) assert user is not None - self.assertTrue(user["approved"]) + self.assertTrue(user.approved) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertTrue(approved) @@ -228,7 +228,7 @@ def test_approval_required(self) -> None: user = self.get_success(self.store.get_user_by_id(self.user_id)) assert user is not None - self.assertFalse(user["approved"]) + self.assertFalse(user.approved) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertFalse(approved) @@ -248,7 +248,7 @@ def test_override(self) -> None: user = self.get_success(self.store.get_user_by_id(self.user_id)) self.assertIsNotNone(user) assert user is not None - self.assertEqual(user["approved"], 1) + self.assertEqual(user.approved, 1) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertTrue(approved) From 39dc5de39912828372acc4ecbaa46c9e2b3de97e Mon Sep 17 00:00:00 2001 From: 6543 <6543@obermui.de> Date: Thu, 14 Sep 2023 15:13:48 +0200 Subject: [PATCH 48/75] docs: Link to the Alpine Linux community package for Synapse (#16304) --- changelog.d/16304.doc | 1 + docs/setup/installation.md | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/16304.doc diff --git a/changelog.d/16304.doc b/changelog.d/16304.doc new file mode 100644 index 000000000000..53660ec9a4ce --- /dev/null +++ b/changelog.d/16304.doc @@ -0,0 +1 @@ +Link to the Alpine Linux community package for Synapse. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 0357d2a0fb82..1f13864a8fc7 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -155,6 +155,14 @@ sudo pip uninstall py-bcrypt sudo pip install py-bcrypt ``` +#### Alpine Linux + +6543 maintains [Synapse packages for Alpine Linux](https://pkgs.alpinelinux.org/packages?name=synapse&branch=edge) in the community repository. Install with: + +```sh +sudo apk add synapse +``` + #### Void Linux Synapse can be found in the void repositories as From e9e2904eb2c0b73eb4154faf41bd360e6168cc92 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Sep 2023 14:56:07 +0100 Subject: [PATCH 49/75] Speed up deleting to-device messages task (#16318) --- changelog.d/16318.misc | 1 + synapse/handlers/device.py | 27 ++++++++++++++------------- 2 files changed, 15 insertions(+), 13 deletions(-) create mode 100644 changelog.d/16318.misc diff --git a/changelog.d/16318.misc b/changelog.d/16318.misc new file mode 100644 index 000000000000..1433a2f24645 --- /dev/null +++ b/changelog.d/16318.misc @@ -0,0 +1 @@ +Speed up task to delete to-device messages. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 0d3d5ebc86d7..86ad96d030d2 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -388,7 +388,8 @@ async def handle_room_un_partial_stated(self, room_id: str) -> None: "Trying handling device list state for partial join: not supported on workers." ) - DEVICE_MSGS_DELETE_BATCH_LIMIT = 100 + DEVICE_MSGS_DELETE_BATCH_LIMIT = 1000 + DEVICE_MSGS_DELETE_SLEEP_MS = 1000 async def _delete_device_messages( self, @@ -400,19 +401,19 @@ async def _delete_device_messages( device_id = task.params["device_id"] up_to_stream_id = task.params["up_to_stream_id"] - res = await self.store.delete_messages_for_device( - user_id=user_id, - device_id=device_id, - up_to_stream_id=up_to_stream_id, - limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, - ) + # Delete the messages in batches to avoid too much DB load. + while True: + res = await self.store.delete_messages_for_device( + user_id=user_id, + device_id=device_id, + up_to_stream_id=up_to_stream_id, + limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, + ) - if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: - return TaskStatus.COMPLETE, None, None - else: - # There is probably still device messages to be deleted, let's keep the task active and it will be run - # again in a subsequent scheduler loop run (probably the next one, if not too many tasks are running). - return TaskStatus.ACTIVE, None, None + if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: + return TaskStatus.COMPLETE, None, None + + await self.clock.sleep(DeviceHandler.DEVICE_MSGS_DELETE_SLEEP_MS / 1000.0) class DeviceHandler(DeviceWorkerHandler): From 329597022ee02516e5cbee11fcd566e05609b724 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Sep 2023 16:20:47 +0100 Subject: [PATCH 50/75] Some minor performance fixes for task schedular (#16313) --- changelog.d/16313.misc | 1 + synapse/replication/tcp/handler.py | 6 +- .../storage/databases/main/task_scheduler.py | 6 ++ .../delta/82/02_scheduled_tasks_index.sql | 16 +++ synapse/util/task_scheduler.py | 100 ++++++++++++------ 5 files changed, 95 insertions(+), 34 deletions(-) create mode 100644 changelog.d/16313.misc create mode 100644 synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql diff --git a/changelog.d/16313.misc b/changelog.d/16313.misc new file mode 100644 index 000000000000..4f266c1fb029 --- /dev/null +++ b/changelog.d/16313.misc @@ -0,0 +1 @@ +Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 5642666411cc..b668bb5da1de 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -672,14 +672,12 @@ def on_LOCK_RELEASED( cmd.instance_name, cmd.lock_name, cmd.lock_key ) - async def on_NEW_ACTIVE_TASK( + def on_NEW_ACTIVE_TASK( self, conn: IReplicationConnection, cmd: NewActiveTaskCommand ) -> None: """Called when get a new NEW_ACTIVE_TASK command.""" if self._task_scheduler: - task = await self._task_scheduler.get_task(cmd.data) - if task: - await self._task_scheduler._launch_task(task) + self._task_scheduler.launch_task_by_id(cmd.data) def new_connection(self, connection: IReplicationConnection) -> None: """Called when we have a new connection.""" diff --git a/synapse/storage/databases/main/task_scheduler.py b/synapse/storage/databases/main/task_scheduler.py index 9ab120eea9ca..5c5372a8259d 100644 --- a/synapse/storage/databases/main/task_scheduler.py +++ b/synapse/storage/databases/main/task_scheduler.py @@ -53,6 +53,7 @@ async def get_scheduled_tasks( resource_id: Optional[str] = None, statuses: Optional[List[TaskStatus]] = None, max_timestamp: Optional[int] = None, + limit: Optional[int] = None, ) -> List[ScheduledTask]: """Get a list of scheduled tasks from the DB. @@ -62,6 +63,7 @@ async def get_scheduled_tasks( statuses: Limit the returned tasks to the specific statuses max_timestamp: Limit the returned tasks to the ones that have a timestamp inferior to the specified one + limit: Only return `limit` number of rows if set. Returns: a list of `ScheduledTask`, ordered by increasing timestamps """ @@ -94,6 +96,10 @@ def get_scheduled_tasks_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]: sql = sql + " ORDER BY timestamp" + if limit is not None: + sql += " LIMIT ?" + args.append(limit) + txn.execute(sql, args) return self.db_pool.cursor_to_dict(txn) diff --git a/synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql b/synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql new file mode 100644 index 000000000000..6b9027513961 --- /dev/null +++ b/synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE INDEX IF NOT EXISTS scheduled_tasks_timestamp ON scheduled_tasks(timestamp); diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index b7de201bdeda..caf13b3474be 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -15,12 +15,14 @@ import logging from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Set, Tuple -from prometheus_client import Gauge - from twisted.python.failure import Failure from synapse.logging.context import nested_logging_context -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics import LaterGauge +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.types import JsonMapping, ScheduledTask, TaskStatus from synapse.util.stringutils import random_string @@ -30,12 +32,6 @@ logger = logging.getLogger(__name__) -running_tasks_gauge = Gauge( - "synapse_scheduler_running_tasks", - "The number of concurrent running tasks handled by the TaskScheduler", -) - - class TaskScheduler: """ This is a simple task sheduler aimed at resumable tasks: usually we use `run_in_background` @@ -70,6 +66,8 @@ class TaskScheduler: # Precision of the scheduler, evaluation of tasks to run will only happen # every `SCHEDULE_INTERVAL_MS` ms SCHEDULE_INTERVAL_MS = 1 * 60 * 1000 # 1mn + # How often to clean up old tasks. + CLEANUP_INTERVAL_MS = 30 * 60 * 1000 # Time before a complete or failed task is deleted from the DB KEEP_TASKS_FOR_MS = 7 * 24 * 60 * 60 * 1000 # 1 week # Maximum number of tasks that can run at the same time @@ -92,14 +90,26 @@ def __init__(self, hs: "HomeServer"): ] = {} self._run_background_tasks = hs.config.worker.run_background_tasks + # Flag to make sure we only try and launch new tasks once at a time. + self._launching_new_tasks = False + if self._run_background_tasks: self._clock.looping_call( - run_as_background_process, + self._launch_scheduled_tasks, + TaskScheduler.SCHEDULE_INTERVAL_MS, + ) + self._clock.looping_call( + self._clean_scheduled_tasks, TaskScheduler.SCHEDULE_INTERVAL_MS, - "handle_scheduled_tasks", - self._handle_scheduled_tasks, ) + LaterGauge( + "synapse_scheduler_running_tasks", + "The number of concurrent running tasks handled by the TaskScheduler", + labels=None, + caller=lambda: len(self._running_tasks), + ) + def register_action( self, function: Callable[ @@ -234,6 +244,7 @@ async def get_tasks( resource_id: Optional[str] = None, statuses: Optional[List[TaskStatus]] = None, max_timestamp: Optional[int] = None, + limit: Optional[int] = None, ) -> List[ScheduledTask]: """Get a list of tasks. Returns all the tasks if no args is provided. @@ -247,6 +258,7 @@ async def get_tasks( statuses: Limit the returned tasks to the specific statuses max_timestamp: Limit the returned tasks to the ones that have a timestamp inferior to the specified one + limit: Only return `limit` number of rows if set. Returns A list of `ScheduledTask`, ordered by increasing timestamps @@ -256,6 +268,7 @@ async def get_tasks( resource_id=resource_id, statuses=statuses, max_timestamp=max_timestamp, + limit=limit, ) async def delete_task(self, id: str) -> None: @@ -273,34 +286,58 @@ async def delete_task(self, id: str) -> None: raise Exception(f"Task {id} is currently ACTIVE and can't be deleted") await self._store.delete_scheduled_task(id) - async def _handle_scheduled_tasks(self) -> None: - """Main loop taking care of launching tasks and cleaning up old ones.""" - await self._launch_scheduled_tasks() - await self._clean_scheduled_tasks() + def launch_task_by_id(self, id: str) -> None: + """Try launching the task with the given ID.""" + # Don't bother trying to launch new tasks if we're already at capacity. + if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: + return + + run_as_background_process("launch_task_by_id", self._launch_task_by_id, id) + + async def _launch_task_by_id(self, id: str) -> None: + """Helper async function for `launch_task_by_id`.""" + task = await self.get_task(id) + if task: + await self._launch_task(task) + @wrap_as_background_process("launch_scheduled_tasks") async def _launch_scheduled_tasks(self) -> None: """Retrieve and launch scheduled tasks that should be running at that time.""" - for task in await self.get_tasks(statuses=[TaskStatus.ACTIVE]): - await self._launch_task(task) - for task in await self.get_tasks( - statuses=[TaskStatus.SCHEDULED], max_timestamp=self._clock.time_msec() - ): - await self._launch_task(task) + # Don't bother trying to launch new tasks if we're already at capacity. + if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: + return + + if self._launching_new_tasks: + return - running_tasks_gauge.set(len(self._running_tasks)) + self._launching_new_tasks = True + try: + for task in await self.get_tasks( + statuses=[TaskStatus.ACTIVE], limit=self.MAX_CONCURRENT_RUNNING_TASKS + ): + await self._launch_task(task) + for task in await self.get_tasks( + statuses=[TaskStatus.SCHEDULED], + max_timestamp=self._clock.time_msec(), + limit=self.MAX_CONCURRENT_RUNNING_TASKS, + ): + await self._launch_task(task) + + finally: + self._launching_new_tasks = False + + @wrap_as_background_process("clean_scheduled_tasks") async def _clean_scheduled_tasks(self) -> None: """Clean old complete or failed jobs to avoid clutter the DB.""" + now = self._clock.time_msec() for task in await self._store.get_scheduled_tasks( - statuses=[TaskStatus.FAILED, TaskStatus.COMPLETE] + statuses=[TaskStatus.FAILED, TaskStatus.COMPLETE], + max_timestamp=now - TaskScheduler.KEEP_TASKS_FOR_MS, ): # FAILED and COMPLETE tasks should never be running assert task.id not in self._running_tasks - if ( - self._clock.time_msec() - > task.timestamp + TaskScheduler.KEEP_TASKS_FOR_MS - ): - await self._store.delete_scheduled_task(task.id) + await self._store.delete_scheduled_task(task.id) async def _launch_task(self, task: ScheduledTask) -> None: """Launch a scheduled task now. @@ -339,6 +376,9 @@ async def wrapper() -> None: ) self._running_tasks.remove(task.id) + # Try launch a new task since we've finished with this one. + self._clock.call_later(1, self._launch_scheduled_tasks) + if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: return @@ -355,4 +395,4 @@ async def wrapper() -> None: self._running_tasks.add(task.id) await self.update_task(task.id, status=TaskStatus.ACTIVE) - run_as_background_process(task.action, wrapper) + run_as_background_process(f"task-{task.action}", wrapper) From edec0b93cabbe5e03d658a2aa4c2c1b79cf8e85e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Sep 2023 09:10:24 +0100 Subject: [PATCH 51/75] Only use literal strings for process names (#16315) --- changelog.d/16315.misc | 1 + synapse/appservice/scheduler.py | 13 ++++--------- synapse/metrics/background_process_metrics.py | 7 +++++-- synapse/util/caches/expiringcache.py | 4 +--- 4 files changed, 11 insertions(+), 14 deletions(-) create mode 100644 changelog.d/16315.misc diff --git a/changelog.d/16315.misc b/changelog.d/16315.misc new file mode 100644 index 000000000000..d88782c979cb --- /dev/null +++ b/changelog.d/16315.misc @@ -0,0 +1 @@ +Only use literal strings for background process names. diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 3a319b0d42d9..79f95f7653c8 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -200,9 +200,7 @@ def start_background_request(self, service: ApplicationService) -> None: if service.id in self.requests_in_flight: return - run_as_background_process( - "as-sender-%s" % (service.id,), self._send_request, service - ) + run_as_background_process("as-sender", self._send_request, service) async def _send_request(self, service: ApplicationService) -> None: # sanity-check: we shouldn't get here if this service already has a sender @@ -478,14 +476,11 @@ def __init__( self.backoff_counter = 1 def recover(self) -> None: - def _retry() -> None: - run_as_background_process( - "as-recoverer-%s" % (self.service.id,), self.retry - ) - delay = 2**self.backoff_counter logger.info("Scheduling retries on %s in %fs", self.service.id, delay) - self.clock.call_later(delay, _retry) + self.clock.call_later( + delay, run_as_background_process, "as-recoverer", self.retry + ) def _backoff(self) -> None: # cap the backoff to be around 8.5min => (2^9) = 512 secs diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index f1f1f0cdf9a0..fceb7a9f3c0d 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -48,6 +48,9 @@ if TYPE_CHECKING: import resource + # Old versions don't have `LiteralString` + from typing_extensions import LiteralString + logger = logging.getLogger(__name__) @@ -191,7 +194,7 @@ def update_metrics(self) -> None: def run_as_background_process( - desc: str, + desc: "LiteralString", func: Callable[..., Awaitable[Optional[R]]], *args: Any, bg_start_span: bool = True, @@ -259,7 +262,7 @@ async def run() -> Optional[R]: def wrap_as_background_process( - desc: str, + desc: "LiteralString", ) -> Callable[ [Callable[P, Awaitable[Optional[R]]]], Callable[P, "defer.Deferred[Optional[R]]"], diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 8e4c34039dac..e73cf66080ea 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -84,9 +84,7 @@ def __init__( return def f() -> "defer.Deferred[None]": - return run_as_background_process( - "prune_cache_%s" % self._cache_name, self._prune_cache - ) + return run_as_background_process("prune_cache", self._prune_cache) self._clock.looping_call(f, self._expiry_ms / 2) From 2a0f86f88fdb3d450212541ba7db57df6a184ae3 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Fri, 15 Sep 2023 03:16:45 -0500 Subject: [PATCH 52/75] Convert `_insert_graph_receipts_txn` to `simple_upsert` (#16299) --- changelog.d/16299.misc | 1 + synapse/storage/database.py | 3 +++ synapse/storage/databases/main/receipts.py | 23 +++++++++------------- 3 files changed, 13 insertions(+), 14 deletions(-) create mode 100644 changelog.d/16299.misc diff --git a/changelog.d/16299.misc b/changelog.d/16299.misc new file mode 100644 index 000000000000..d4546691518d --- /dev/null +++ b/changelog.d/16299.misc @@ -0,0 +1 @@ +Refactor `receipts_graph` Postgres transactions to stop error messages. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 6c5fcdcec37d..697bc5651c91 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1193,6 +1193,7 @@ async def simple_upsert( keyvalues: Dict[str, Any], values: Dict[str, Any], insertion_values: Optional[Dict[str, Any]] = None, + where_clause: Optional[str] = None, desc: str = "simple_upsert", ) -> bool: """Insert a row with values + insertion_values; on conflict, update with values. @@ -1243,6 +1244,7 @@ async def simple_upsert( keyvalues: The unique key columns and their new values values: The nonunique columns and their new values insertion_values: additional key/values to use only when inserting + where_clause: An index predicate to apply to the upsert. desc: description of the transaction, for logging and metrics Returns: Returns True if a row was inserted or updated (i.e. if `values` is @@ -1263,6 +1265,7 @@ async def simple_upsert( keyvalues, values, insertion_values, + where_clause, db_autocommit=autocommit, ) except self.engine.module.IntegrityError as e: diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index e4d10ff250d1..a074c439895e 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -795,9 +795,7 @@ async def insert_receipt( now - event_ts, ) - await self.db_pool.runInteraction( - "insert_graph_receipt", - self._insert_graph_receipt_txn, + await self._insert_graph_receipt( room_id, receipt_type, user_id, @@ -810,9 +808,8 @@ async def insert_receipt( return stream_id, max_persisted_id - def _insert_graph_receipt_txn( + async def _insert_graph_receipt( self, - txn: LoggingTransaction, room_id: str, receipt_type: str, user_id: str, @@ -822,13 +819,6 @@ def _insert_graph_receipt_txn( ) -> None: assert self._can_write_to_receipts - txn.call_after( - self._get_receipts_for_user_with_orderings.invalidate, - (user_id, receipt_type), - ) - # FIXME: This shouldn't invalidate the whole cache - txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,)) - keyvalues = { "room_id": room_id, "receipt_type": receipt_type, @@ -840,8 +830,8 @@ def _insert_graph_receipt_txn( else: keyvalues["thread_id"] = thread_id - self.db_pool.simple_upsert_txn( - txn, + await self.db_pool.simple_upsert( + desc="insert_graph_receipt", table="receipts_graph", keyvalues=keyvalues, values={ @@ -851,6 +841,11 @@ def _insert_graph_receipt_txn( where_clause=where_clause, ) + self._get_receipts_for_user_with_orderings.invalidate((user_id, receipt_type)) + + # FIXME: This shouldn't invalidate the whole cache + self._get_linearized_receipts_for_room.invalidate((room_id,)) + class ReceiptsBackgroundUpdateStore(SQLBaseStore): POPULATE_RECEIPT_EVENT_STREAM_ORDERING = "populate_event_stream_ordering" From dd44ee00b6cf4d900e56857039320660400cff37 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Fri, 15 Sep 2023 15:37:44 +0200 Subject: [PATCH 53/75] Add automatic purge after all users forget a room (#15488) Also add restore of purge/shutdown rooms after a synapse restart. Co-authored-by: Eric Eastwood Co-authored-by: Erik Johnston --- changelog.d/15488.feature | 1 + .../configuration/config_documentation.md | 11 + synapse/app/generic_worker.py | 2 + synapse/config/server.py | 11 + synapse/handlers/pagination.py | 464 ++++++------------ synapse/handlers/room.py | 177 ++++--- synapse/handlers/room_member.py | 30 +- synapse/module_api/__init__.py | 13 +- synapse/rest/admin/__init__.py | 20 +- synapse/rest/admin/rooms.py | 78 +-- tests/rest/admin/test_room.py | 159 +++++- tests/rest/admin/test_server_notice.py | 20 +- tests/rest/client/test_rooms.py | 6 +- 13 files changed, 542 insertions(+), 450 deletions(-) create mode 100644 changelog.d/15488.feature diff --git a/changelog.d/15488.feature b/changelog.d/15488.feature new file mode 100644 index 000000000000..8684d84192a9 --- /dev/null +++ b/changelog.d/15488.feature @@ -0,0 +1 @@ +Add automatic purge after all users forgotten a room. Also add restore of purge/shutdown rooms after a synapse restart. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index a06b3d8a0680..885a7bf0a37f 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -936,6 +936,17 @@ Example configuration: redaction_retention_period: 28d ``` --- +### `forgotten_room_retention_period` + +How long to keep locally forgotten rooms before purging them from the DB. + +Defaults to `null`, meaning it's disabled. + +Example configuration: +```yaml +forgotten_room_retention_period: 28d +``` +--- ### `user_ips_max_age` How long to track users' last seen time and IPs in the database. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index d25e3548e075..f7c80eee210d 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -77,6 +77,7 @@ ) from synapse.storage.databases.main.presence import PresenceStore from synapse.storage.databases.main.profile import ProfileWorkerStore +from synapse.storage.databases.main.purge_events import PurgeEventsStore from synapse.storage.databases.main.push_rule import PushRulesWorkerStore from synapse.storage.databases.main.pusher import PusherWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore @@ -134,6 +135,7 @@ class GenericWorkerStore( RelationsWorkerStore, EventFederationWorkerStore, EventPushActionsWorkerStore, + PurgeEventsStore, StateGroupWorkerStore, SignatureWorkerStore, UserErasureWorkerStore, diff --git a/synapse/config/server.py b/synapse/config/server.py index b46fa5159309..72d30da30082 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -486,6 +486,17 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: else: self.redaction_retention_period = None + # How long to keep locally forgotten rooms before purging them from the DB. + forgotten_room_retention_period = config.get( + "forgotten_room_retention_period", None + ) + if forgotten_room_retention_period is not None: + self.forgotten_room_retention_period: Optional[int] = self.parse_duration( + forgotten_room_retention_period + ) + else: + self.forgotten_room_retention_period = None + # How long to keep entries in the `users_ips` table. user_ips_max_age = config.get("user_ips_max_age", "28d") if user_ips_max_age is not None: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 19cf5a2b4393..878f267a4e45 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -13,9 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Set - -import attr +from typing import TYPE_CHECKING, List, Optional, Set, Tuple, cast from twisted.python.failure import Failure @@ -23,16 +21,22 @@ from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import SerializeEventConfig -from synapse.handlers.room import ShutdownRoomResponse +from synapse.handlers.room import ShutdownRoomParams, ShutdownRoomResponse from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging.opentracing import trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.admin._base import assert_user_is_admin from synapse.streams.config import PaginationConfig -from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType +from synapse.types import ( + JsonDict, + JsonMapping, + Requester, + ScheduledTask, + StreamKeyType, + TaskStatus, +) from synapse.types.state import StateFilter from synapse.util.async_helpers import ReadWriteLock -from synapse.util.stringutils import random_string from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -53,80 +57,11 @@ PURGE_PAGINATION_LOCK_NAME = "purge_pagination_lock" -@attr.s(slots=True, auto_attribs=True) -class PurgeStatus: - """Object tracking the status of a purge request - - This class contains information on the progress of a purge request, for - return by get_purge_status. - """ - - STATUS_ACTIVE = 0 - STATUS_COMPLETE = 1 - STATUS_FAILED = 2 - - STATUS_TEXT = { - STATUS_ACTIVE: "active", - STATUS_COMPLETE: "complete", - STATUS_FAILED: "failed", - } - - # Save the error message if an error occurs - error: str = "" - - # Tracks whether this request has completed. One of STATUS_{ACTIVE,COMPLETE,FAILED}. - status: int = STATUS_ACTIVE - - def asdict(self) -> JsonDict: - ret = {"status": PurgeStatus.STATUS_TEXT[self.status]} - if self.error: - ret["error"] = self.error - return ret - - -@attr.s(slots=True, auto_attribs=True) -class DeleteStatus: - """Object tracking the status of a delete room request +PURGE_HISTORY_ACTION_NAME = "purge_history" - This class contains information on the progress of a delete room request, for - return by get_delete_status. - """ +PURGE_ROOM_ACTION_NAME = "purge_room" - STATUS_PURGING = 0 - STATUS_COMPLETE = 1 - STATUS_FAILED = 2 - STATUS_SHUTTING_DOWN = 3 - - STATUS_TEXT = { - STATUS_PURGING: "purging", - STATUS_COMPLETE: "complete", - STATUS_FAILED: "failed", - STATUS_SHUTTING_DOWN: "shutting_down", - } - - # Tracks whether this request has completed. - # One of STATUS_{PURGING,COMPLETE,FAILED,SHUTTING_DOWN}. - status: int = STATUS_PURGING - - # Save the error message if an error occurs - error: str = "" - - # Saves the result of an action to give it back to REST API - shutdown_room: ShutdownRoomResponse = { - "kicked_users": [], - "failed_to_kick_users": [], - "local_aliases": [], - "new_room_id": None, - } - - def asdict(self) -> JsonDict: - ret = { - "status": DeleteStatus.STATUS_TEXT[self.status], - "shutdown_room": self.shutdown_room, - } - if self.error: - ret["error"] = self.error - return ret +SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME = "shutdown_and_purge_room" class PaginationHandler: @@ -136,9 +71,6 @@ class PaginationHandler: paginating during a purge. """ - # when to remove a completed deletion/purge from the results map - CLEAR_PURGE_AFTER_MS = 1000 * 3600 * 24 # 24 hours - def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() @@ -150,17 +82,11 @@ def __init__(self, hs: "HomeServer"): self._room_shutdown_handler = hs.get_room_shutdown_handler() self._relations_handler = hs.get_relations_handler() self._worker_locks = hs.get_worker_locks_handler() + self._task_scheduler = hs.get_task_scheduler() self.pagination_lock = ReadWriteLock() # IDs of rooms in which there currently an active purge *or delete* operation. self._purges_in_progress_by_room: Set[str] = set() - # map from purge id to PurgeStatus - self._purges_by_id: Dict[str, PurgeStatus] = {} - # map from purge id to DeleteStatus - self._delete_by_id: Dict[str, DeleteStatus] = {} - # map from room id to delete ids - # Dict[`room_id`, List[`delete_id`]] - self._delete_by_room: Dict[str, List[str]] = {} self._event_serializer = hs.get_event_client_serializer() self._retention_default_max_lifetime = ( @@ -173,6 +99,9 @@ def __init__(self, hs: "HomeServer"): self._retention_allowed_lifetime_max = ( hs.config.retention.retention_allowed_lifetime_max ) + self._forgotten_room_retention_period = ( + hs.config.server.forgotten_room_retention_period + ) self._is_master = hs.config.worker.worker_app is None if hs.config.retention.retention_enabled and self._is_master: @@ -189,6 +118,14 @@ def __init__(self, hs: "HomeServer"): job.longest_max_lifetime, ) + self._task_scheduler.register_action( + self._purge_history, PURGE_HISTORY_ACTION_NAME + ) + self._task_scheduler.register_action(self._purge_room, PURGE_ROOM_ACTION_NAME) + self._task_scheduler.register_action( + self._shutdown_and_purge_room, SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME + ) + async def purge_history_for_rooms_in_range( self, min_ms: Optional[int], max_ms: Optional[int] ) -> None: @@ -224,7 +161,7 @@ async def purge_history_for_rooms_in_range( include_null = False logger.info( - "[purge] Running purge job for %s < max_lifetime <= %s (include NULLs = %s)", + "[purge] Running retention purge job for %s < max_lifetime <= %s (include NULLs = %s)", min_ms, max_ms, include_null, @@ -239,10 +176,10 @@ async def purge_history_for_rooms_in_range( for room_id, retention_policy in rooms.items(): logger.info("[purge] Attempting to purge messages in room %s", room_id) - if room_id in self._purges_in_progress_by_room: + if len(await self.get_delete_tasks_by_room(room_id, only_active=True)) > 0: logger.warning( - "[purge] not purging room %s as there's an ongoing purge running" - " for this room", + "[purge] not purging room %s for retention as there's an ongoing purge" + " running for this room", room_id, ) continue @@ -295,27 +232,20 @@ async def purge_history_for_rooms_in_range( (stream, topo, _event_id) = r token = "t%d-%d" % (topo, stream) - purge_id = random_string(16) - - self._purges_by_id[purge_id] = PurgeStatus() - - logger.info( - "Starting purging events in room %s (purge_id %s)" % (room_id, purge_id) - ) + logger.info("Starting purging events in room %s", room_id) # We want to purge everything, including local events, and to run the purge in # the background so that it's not blocking any other operation apart from # other purges in the same room. run_as_background_process( - "_purge_history", - self._purge_history, - purge_id, + PURGE_HISTORY_ACTION_NAME, + self.purge_history, room_id, token, True, ) - def start_purge_history( + async def start_purge_history( self, room_id: str, token: str, delete_local_events: bool = False ) -> str: """Start off a history purge on a room. @@ -329,40 +259,58 @@ def start_purge_history( Returns: unique ID for this purge transaction. """ - if room_id in self._purges_in_progress_by_room: - raise SynapseError( - 400, "History purge already in progress for %s" % (room_id,) - ) - - purge_id = random_string(16) + purge_id = await self._task_scheduler.schedule_task( + PURGE_HISTORY_ACTION_NAME, + resource_id=room_id, + params={"token": token, "delete_local_events": delete_local_events}, + ) # we log the purge_id here so that it can be tied back to the # request id in the log lines. logger.info("[purge] starting purge_id %s", purge_id) - self._purges_by_id[purge_id] = PurgeStatus() - run_as_background_process( - "purge_history", - self._purge_history, - purge_id, - room_id, - token, - delete_local_events, - ) return purge_id async def _purge_history( - self, purge_id: str, room_id: str, token: str, delete_local_events: bool - ) -> None: + self, + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + """ + Scheduler action to purge some history of a room. + """ + if ( + task.resource_id is None + or task.params is None + or "token" not in task.params + or "delete_local_events" not in task.params + ): + return ( + TaskStatus.FAILED, + None, + "Not enough parameters passed to _purge_history", + ) + err = await self.purge_history( + task.resource_id, + task.params["token"], + task.params["delete_local_events"], + ) + if err is not None: + return TaskStatus.FAILED, None, err + return TaskStatus.COMPLETE, None, None + + async def purge_history( + self, + room_id: str, + token: str, + delete_local_events: bool, + ) -> Optional[str]: """Carry out a history purge on a room. Args: - purge_id: The ID for this purge. room_id: The room to purge from token: topological token to delete events before delete_local_events: True to delete local events as well as remote ones """ - self._purges_in_progress_by_room.add(room_id) try: async with self._worker_locks.acquire_read_write_lock( PURGE_PAGINATION_LOCK_NAME, room_id, write=True @@ -371,57 +319,68 @@ async def _purge_history( room_id, token, delete_local_events ) logger.info("[purge] complete") - self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE + return None except Exception: f = Failure() logger.error( "[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject()) ) - self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED - self._purges_by_id[purge_id].error = f.getErrorMessage() - finally: - self._purges_in_progress_by_room.discard(room_id) - - # remove the purge from the list 24 hours after it completes - def clear_purge() -> None: - del self._purges_by_id[purge_id] - - self.hs.get_reactor().callLater( - PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000, clear_purge - ) - - def get_purge_status(self, purge_id: str) -> Optional[PurgeStatus]: - """Get the current status of an active purge + return f.getErrorMessage() - Args: - purge_id: purge_id returned by start_purge_history - """ - return self._purges_by_id.get(purge_id) - - def get_delete_status(self, delete_id: str) -> Optional[DeleteStatus]: + async def get_delete_task(self, delete_id: str) -> Optional[ScheduledTask]: """Get the current status of an active deleting Args: delete_id: delete_id returned by start_shutdown_and_purge_room + or start_purge_history. """ - return self._delete_by_id.get(delete_id) + return await self._task_scheduler.get_task(delete_id) - def get_delete_ids_by_room(self, room_id: str) -> Optional[StrCollection]: - """Get all active delete ids by room + async def get_delete_tasks_by_room( + self, room_id: str, only_active: Optional[bool] = False + ) -> List[ScheduledTask]: + """Get complete, failed or active delete tasks by room Args: room_id: room_id that is deleted + only_active: if True, completed&failed tasks will be omitted + """ + statuses = [TaskStatus.ACTIVE] + if not only_active: + statuses += [TaskStatus.COMPLETE, TaskStatus.FAILED] + + return await self._task_scheduler.get_tasks( + actions=[PURGE_ROOM_ACTION_NAME, SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME], + resource_id=room_id, + statuses=statuses, + ) + + async def _purge_room( + self, + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + """ + Scheduler action to purge a room. """ - return self._delete_by_room.get(room_id) + if not task.resource_id: + raise Exception("No room id passed to purge_room task") + params = task.params if task.params else {} + await self.purge_room(task.resource_id, params.get("force", False)) + return TaskStatus.COMPLETE, None, None - async def purge_room(self, room_id: str, force: bool = False) -> None: + async def purge_room( + self, + room_id: str, + force: bool, + ) -> None: """Purge the given room from the database. - This function is part the delete room v1 API. Args: room_id: room to be purged force: set true to skip checking for joined users. """ + logger.info("starting purge room_id=%s force=%s", room_id, force) + async with self._worker_locks.acquire_multi_read_write_lock( [ (PURGE_PAGINATION_LOCK_NAME, room_id), @@ -430,13 +389,20 @@ async def purge_room(self, room_id: str, force: bool = False) -> None: write=True, ): # first check that we have no users in this room - if not force: - joined = await self.store.is_host_joined(room_id, self._server_name) - if joined: + joined = await self.store.is_host_joined(room_id, self._server_name) + if joined: + if force: + logger.info( + "force-purging room %s with some local users still joined", + room_id, + ) + else: raise SynapseError(400, "Users are still joined to this room") await self._storage_controllers.purge_events.purge_room(room_id) + logger.info("purge complete for room_id %s", room_id) + @trace async def get_messages( self, @@ -711,177 +677,72 @@ async def get_messages( async def _shutdown_and_purge_room( self, - delete_id: str, - room_id: str, - requester_user_id: Optional[str], - new_room_user_id: Optional[str] = None, - new_room_name: Optional[str] = None, - message: Optional[str] = None, - block: bool = False, - purge: bool = True, - force_purge: bool = False, - ) -> None: + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: """ - Shuts down and purges a room. - - See `RoomShutdownHandler.shutdown_room` for details of creation of the new room - - Args: - delete_id: The ID for this delete. - room_id: The ID of the room to shut down. - requester_user_id: - User who requested the action. Will be recorded as putting the room on the - blocking list. - If None, the action was not manually requested but instead - triggered automatically, e.g. through a Synapse module - or some other policy. - MUST NOT be None if block=True. - new_room_user_id: - If set, a new room will be created with this user ID - as the creator and admin, and all users in the old room will be - moved into that room. If not set, no new room will be created - and the users will just be removed from the old room. - new_room_name: - A string representing the name of the room that new users will - be invited to. Defaults to `Content Violation Notification` - message: - A string containing the first message that will be sent as - `new_room_user_id` in the new room. Ideally this will clearly - convey why the original room was shut down. - Defaults to `Sharing illegal content on this server is not - permitted and rooms in violation will be blocked.` - block: - If set to `true`, this room will be added to a blocking list, - preventing future attempts to join the room. Defaults to `false`. - purge: - If set to `true`, purge the given room from the database. - force_purge: - If set to `true`, the room will be purged from database - also if it fails to remove some users from room. - - Saves a `RoomShutdownHandler.ShutdownRoomResponse` in `DeleteStatus`: + Scheduler action to shutdown and purge a room. """ + if task.resource_id is None or task.params is None: + raise Exception( + "No room id and/or no parameters passed to shutdown_and_purge_room task" + ) - self._purges_in_progress_by_room.add(room_id) - try: - async with self._worker_locks.acquire_read_write_lock( - PURGE_PAGINATION_LOCK_NAME, room_id, write=True - ): - self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN - self._delete_by_id[ - delete_id - ].shutdown_room = await self._room_shutdown_handler.shutdown_room( - room_id=room_id, - requester_user_id=requester_user_id, - new_room_user_id=new_room_user_id, - new_room_name=new_room_name, - message=message, - block=block, - ) - self._delete_by_id[delete_id].status = DeleteStatus.STATUS_PURGING + room_id = task.resource_id - if purge: - logger.info("starting purge room_id %s", room_id) + async def update_result(result: Optional[JsonMapping]) -> None: + await self._task_scheduler.update_task(task.id, result=result) - # first check that we have no users in this room - if not force_purge: - joined = await self.store.is_host_joined( - room_id, self._server_name - ) - if joined: - raise SynapseError( - 400, "Users are still joined to this room" - ) + shutdown_result = ( + cast(ShutdownRoomResponse, task.result) if task.result else None + ) - await self._storage_controllers.purge_events.purge_room(room_id) + shutdown_result = await self._room_shutdown_handler.shutdown_room( + room_id, + cast(ShutdownRoomParams, task.params), + shutdown_result, + update_result, + ) - logger.info("purge complete for room_id %s", room_id) - self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE - except Exception: - f = Failure() - logger.error( - "failed", - exc_info=(f.type, f.value, f.getTracebackObject()), - ) - self._delete_by_id[delete_id].status = DeleteStatus.STATUS_FAILED - self._delete_by_id[delete_id].error = f.getErrorMessage() - finally: - self._purges_in_progress_by_room.discard(room_id) - - # remove the delete from the list 24 hours after it completes - def clear_delete() -> None: - del self._delete_by_id[delete_id] - self._delete_by_room[room_id].remove(delete_id) - if not self._delete_by_room[room_id]: - del self._delete_by_room[room_id] - - self.hs.get_reactor().callLater( - PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000, clear_delete + if task.params.get("purge", False): + await self.purge_room( + room_id, + task.params.get("force_purge", False), ) - def start_shutdown_and_purge_room( + return (TaskStatus.COMPLETE, shutdown_result, None) + + async def start_shutdown_and_purge_room( self, room_id: str, - requester_user_id: Optional[str], - new_room_user_id: Optional[str] = None, - new_room_name: Optional[str] = None, - message: Optional[str] = None, - block: bool = False, - purge: bool = True, - force_purge: bool = False, + shutdown_params: ShutdownRoomParams, ) -> str: """Start off shut down and purge on a room. Args: room_id: The ID of the room to shut down. - requester_user_id: - User who requested the action and put the room on the - blocking list. - If None, the action was not manually requested but instead - triggered automatically, e.g. through a Synapse module - or some other policy. - MUST NOT be None if block=True. - new_room_user_id: - If set, a new room will be created with this user ID - as the creator and admin, and all users in the old room will be - moved into that room. If not set, no new room will be created - and the users will just be removed from the old room. - new_room_name: - A string representing the name of the room that new users will - be invited to. Defaults to `Content Violation Notification` - message: - A string containing the first message that will be sent as - `new_room_user_id` in the new room. Ideally this will clearly - convey why the original room was shut down. - Defaults to `Sharing illegal content on this server is not - permitted and rooms in violation will be blocked.` - block: - If set to `true`, this room will be added to a blocking list, - preventing future attempts to join the room. Defaults to `false`. - purge: - If set to `true`, purge the given room from the database. - force_purge: - If set to `true`, the room will be purged from database - also if it fails to remove some users from room. + shutdown_params: parameters for the shutdown Returns: unique ID for this delete transaction. """ - if room_id in self._purges_in_progress_by_room: - raise SynapseError( - 400, "History purge already in progress for %s" % (room_id,) - ) + if len(await self.get_delete_tasks_by_room(room_id, only_active=True)) > 0: + raise SynapseError(400, "Purge already in progress for %s" % (room_id,)) # This check is double to `RoomShutdownHandler.shutdown_room` # But here the requester get a direct response / error with HTTP request # and do not have to check the purge status + new_room_user_id = shutdown_params["new_room_user_id"] if new_room_user_id is not None: if not self.hs.is_mine_id(new_room_user_id): raise SynapseError( 400, "User must be our own: %s" % (new_room_user_id,) ) - delete_id = random_string(16) + delete_id = await self._task_scheduler.schedule_task( + SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, + resource_id=room_id, + params=shutdown_params, + ) # we log the delete_id here so that it can be tied back to the # request id in the log lines. @@ -891,19 +752,4 @@ def start_shutdown_and_purge_room( delete_id, ) - self._delete_by_id[delete_id] = DeleteStatus() - self._delete_by_room.setdefault(room_id, []).append(delete_id) - run_as_background_process( - "shutdown_and_purge_room", - self._shutdown_and_purge_room, - delete_id, - room_id, - requester_user_id, - new_room_user_id, - new_room_name, - message, - block, - purge, - force_purge, - ) return delete_id diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 7a762c851101..a0c3b168197b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -20,7 +20,7 @@ import string from collections import OrderedDict from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional, Tuple import attr from typing_extensions import TypedDict @@ -54,11 +54,11 @@ from synapse.events.snapshot import UnpersistedEventContext from synapse.events.utils import copy_and_fixup_power_levels_contents from synapse.handlers.relations import BundledAggregations -from synapse.module_api import NOT_SPAM from synapse.rest.admin._base import assert_user_is_admin from synapse.streams import EventSource from synapse.types import ( JsonDict, + JsonMapping, MutableStateMap, Requester, RoomAlias, @@ -454,7 +454,7 @@ async def clone_existing_room( spam_check = await self._spam_checker_module_callbacks.user_may_create_room( user_id ) - if spam_check != NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise SynapseError( 403, "You are not permitted to create rooms", @@ -768,7 +768,7 @@ async def create_room( spam_check = await self._spam_checker_module_callbacks.user_may_create_room( user_id ) - if spam_check != NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise SynapseError( 403, "You are not permitted to create rooms", @@ -1750,6 +1750,45 @@ def get_current_key_for_room(self, room_id: str) -> Awaitable[RoomStreamToken]: return self.store.get_current_room_stream_token_for_room_id(room_id) +class ShutdownRoomParams(TypedDict): + """ + Attributes: + requester_user_id: + User who requested the action. Will be recorded as putting the room on the + blocking list. + new_room_user_id: + If set, a new room will be created with this user ID + as the creator and admin, and all users in the old room will be + moved into that room. If not set, no new room will be created + and the users will just be removed from the old room. + new_room_name: + A string representing the name of the room that new users will + be invited to. Defaults to `Content Violation Notification` + message: + A string containing the first message that will be sent as + `new_room_user_id` in the new room. Ideally this will clearly + convey why the original room was shut down. + Defaults to `Sharing illegal content on this server is not + permitted and rooms in violation will be blocked.` + block: + If set to `true`, this room will be added to a blocking list, + preventing future attempts to join the room. Defaults to `false`. + purge: + If set to `true`, purge the given room from the database. + force_purge: + If set to `true`, the room will be purged from database + even if there are still users joined to the room. + """ + + requester_user_id: Optional[str] + new_room_user_id: Optional[str] + new_room_name: Optional[str] + message: Optional[str] + block: bool + purge: bool + force_purge: bool + + class ShutdownRoomResponse(TypedDict): """ Attributes: @@ -1787,12 +1826,12 @@ def __init__(self, hs: "HomeServer"): async def shutdown_room( self, room_id: str, - requester_user_id: Optional[str], - new_room_user_id: Optional[str] = None, - new_room_name: Optional[str] = None, - message: Optional[str] = None, - block: bool = False, - ) -> ShutdownRoomResponse: + params: ShutdownRoomParams, + result: Optional[ShutdownRoomResponse] = None, + update_result_fct: Optional[ + Callable[[Optional[JsonMapping]], Awaitable[None]] + ] = None, + ) -> Optional[ShutdownRoomResponse]: """ Shuts down a room. Moves all local users and room aliases automatically to a new room if `new_room_user_id` is set. Otherwise local users only @@ -1808,52 +1847,23 @@ async def shutdown_room( Args: room_id: The ID of the room to shut down. - requester_user_id: - User who requested the action and put the room on the - blocking list. - If None, the action was not manually requested but instead - triggered automatically, e.g. through a Synapse module - or some other policy. - MUST NOT be None if block=True. - new_room_user_id: - If set, a new room will be created with this user ID - as the creator and admin, and all users in the old room will be - moved into that room. If not set, no new room will be created - and the users will just be removed from the old room. - new_room_name: - A string representing the name of the room that new users will - be invited to. Defaults to `Content Violation Notification` - message: - A string containing the first message that will be sent as - `new_room_user_id` in the new room. Ideally this will clearly - convey why the original room was shut down. - Defaults to `Sharing illegal content on this server is not - permitted and rooms in violation will be blocked.` - block: - If set to `True`, users will be prevented from joining the old - room. This option can also be used to pre-emptively block a room, - even if it's unknown to this homeserver. In this case, the room - will be blocked, and no further action will be taken. If `False`, - attempting to delete an unknown room is invalid. - - Defaults to `False`. - - Returns: a dict containing the following keys: - kicked_users: An array of users (`user_id`) that were kicked. - failed_to_kick_users: - An array of users (`user_id`) that that were not kicked. - local_aliases: - An array of strings representing the local aliases that were - migrated from the old room to the new. - new_room_id: - A string representing the room ID of the new room, or None if - no such room was created. - """ + delete_id: The delete ID identifying this delete request + params: parameters for the shutdown, cf `ShutdownRoomParams` + result: current status of the shutdown, if it was interrupted + update_result_fct: function called when `result` is updated locally - if not new_room_name: - new_room_name = self.DEFAULT_ROOM_NAME - if not message: - message = self.DEFAULT_MESSAGE + Returns: a dict matching `ShutdownRoomResponse`. + """ + requester_user_id = params["requester_user_id"] + new_room_user_id = params["new_room_user_id"] + block = params["block"] + + new_room_name = ( + params["new_room_name"] + if params["new_room_name"] + else self.DEFAULT_ROOM_NAME + ) + message = params["message"] if params["message"] else self.DEFAULT_MESSAGE if not RoomID.is_valid(room_id): raise SynapseError(400, "%s is not a legal room ID" % (room_id,)) @@ -1865,6 +1875,17 @@ async def shutdown_room( 403, "Shutdown of this room is forbidden", Codes.FORBIDDEN ) + result = ( + result + if result + else { + "kicked_users": [], + "failed_to_kick_users": [], + "local_aliases": [], + "new_room_id": None, + } + ) + # Action the block first (even if the room doesn't exist yet) if block: if requester_user_id is None: @@ -1877,14 +1898,10 @@ async def shutdown_room( if not await self.store.get_room(room_id): # if we don't know about the room, there is nothing left to do. - return { - "kicked_users": [], - "failed_to_kick_users": [], - "local_aliases": [], - "new_room_id": None, - } + return result - if new_room_user_id is not None: + new_room_id = result.get("new_room_id") + if new_room_user_id is not None and new_room_id is None: if not self.hs.is_mine_id(new_room_user_id): raise SynapseError( 400, "User must be our own: %s" % (new_room_user_id,) @@ -1904,6 +1921,10 @@ async def shutdown_room( ratelimit=False, ) + result["new_room_id"] = new_room_id + if update_result_fct: + await update_result_fct(result) + logger.info( "Shutting down room %r, joining to new room: %r", room_id, new_room_id ) @@ -1917,12 +1938,9 @@ async def shutdown_room( stream_id, ) else: - new_room_id = None logger.info("Shutting down room %r", room_id) users = await self.store.get_users_in_room(room_id) - kicked_users = [] - failed_to_kick_users = [] for user_id in users: if not self.hs.is_mine_id(user_id): continue @@ -1951,7 +1969,9 @@ async def shutdown_room( stream_id, ) - await self.room_member_handler.forget(target_requester.user, room_id) + await self.room_member_handler.forget( + target_requester.user, room_id, do_not_schedule_purge=True + ) # Join users to new room if new_room_user_id: @@ -1966,15 +1986,23 @@ async def shutdown_room( require_consent=False, ) - kicked_users.append(user_id) + result["kicked_users"].append(user_id) + if update_result_fct: + await update_result_fct(result) except Exception: logger.exception( "Failed to leave old room and join new room for %r", user_id ) - failed_to_kick_users.append(user_id) + result["failed_to_kick_users"].append(user_id) + if update_result_fct: + await update_result_fct(result) # Send message in new room and move aliases if new_room_user_id: + room_creator_requester = create_requester( + new_room_user_id, authenticated_entity=requester_user_id + ) + await self.event_creation_handler.create_and_send_nonmember_event( room_creator_requester, { @@ -1986,18 +2014,15 @@ async def shutdown_room( ratelimit=False, ) - aliases_for_room = await self.store.get_aliases_for_room(room_id) + result["local_aliases"] = list( + await self.store.get_aliases_for_room(room_id) + ) assert new_room_id is not None await self.store.update_aliases_for_room( room_id, new_room_id, requester_user_id ) else: - aliases_for_room = [] + result["local_aliases"] = [] - return { - "kicked_users": kicked_users, - "failed_to_kick_users": failed_to_kick_users, - "local_aliases": list(aliases_for_room), - "new_room_id": new_room_id, - } + return result diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index de0f04e3fe48..90343c230604 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -37,13 +37,13 @@ from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase from synapse.events.snapshot import EventContext +from synapse.handlers.pagination import PURGE_ROOM_ACTION_NAME from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging import opentracing from synapse.metrics import event_processing_positions from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.module_api import NOT_SPAM from synapse.types import ( JsonDict, Requester, @@ -169,6 +169,10 @@ def __init__(self, hs: "HomeServer"): self.request_ratelimiter = hs.get_request_ratelimiter() hs.get_notifier().add_new_join_in_room_callback(self._on_user_joined_room) + self._forgotten_room_retention_period = ( + hs.config.server.forgotten_room_retention_period + ) + def _on_user_joined_room(self, event_id: str, room_id: str) -> None: """Notify the rate limiter that a room join has occurred. @@ -278,7 +282,9 @@ async def _user_left_room(self, target: UserID, room_id: str) -> None: """ raise NotImplementedError() - async def forget(self, user: UserID, room_id: str) -> None: + async def forget( + self, user: UserID, room_id: str, do_not_schedule_purge: bool = False + ) -> None: user_id = user.to_string() member = await self._storage_controllers.state.get_current_state_event( @@ -298,6 +304,20 @@ async def forget(self, user: UserID, room_id: str) -> None: # the table `current_state_events` and `get_current_state_events` is `None`. await self.store.forget(user_id, room_id) + # If everyone locally has left the room, then there is no reason for us to keep the + # room around and we automatically purge room after a little bit + if ( + not do_not_schedule_purge + and self._forgotten_room_retention_period + and await self.store.is_locally_forgotten_room(room_id) + ): + await self.hs.get_task_scheduler().schedule_task( + PURGE_ROOM_ACTION_NAME, + resource_id=room_id, + timestamp=self.clock.time_msec() + + self._forgotten_room_retention_period, + ) + async def ratelimit_multiple_invites( self, requester: Optional[Requester], @@ -804,7 +824,7 @@ async def update_membership_locked( spam_check = await self._spam_checker_module_callbacks.user_may_invite( requester.user.to_string(), target_id, room_id ) - if spam_check != NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: logger.info("Blocking invite due to spam checker") block_invite_result = spam_check @@ -939,7 +959,7 @@ async def update_membership_locked( target.to_string(), room_id, is_invited=inviter is not None ) ) - if spam_check != NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise SynapseError( 403, "Not allowed to join this room", @@ -1557,7 +1577,7 @@ async def do_3pid_invite( room_id=room_id, ) ) - if spam_check != NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise SynapseError( 403, "Cannot send threepid invite", diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 7ec202be2342..65e2aca4560a 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1741,7 +1741,18 @@ async def delete_room(self, room_id: str) -> None: """ # Future extensions to this method might want to e.g. allow use of `force_purge`. # TODO In the future we should make sure this is persistent. - self._hs.get_pagination_handler().start_shutdown_and_purge_room(room_id, None) + await self._hs.get_pagination_handler().start_shutdown_and_purge_room( + room_id, + { + "new_room_user_id": None, + "new_room_name": None, + "message": None, + "requester_user_id": None, + "block": False, + "purge": True, + "force_purge": False, + }, + ) async def set_displayname( self, diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 0d42c89ff776..7d0b4b55a0df 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -21,6 +21,7 @@ from typing import TYPE_CHECKING, Optional, Tuple from synapse.api.errors import Codes, NotFoundError, SynapseError +from synapse.handlers.pagination import PURGE_HISTORY_ACTION_NAME from synapse.http.server import HttpServer, JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -93,7 +94,7 @@ UserTokenRestServlet, WhoisRestServlet, ) -from synapse.types import JsonDict, RoomStreamToken +from synapse.types import JsonDict, RoomStreamToken, TaskStatus from synapse.util import SYNAPSE_VERSION if TYPE_CHECKING: @@ -196,7 +197,7 @@ async def on_POST( errcode=Codes.BAD_JSON, ) - purge_id = self.pagination_handler.start_purge_history( + purge_id = await self.pagination_handler.start_purge_history( room_id, token, delete_local_events=delete_local_events ) @@ -215,11 +216,20 @@ async def on_GET( ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - purge_status = self.pagination_handler.get_purge_status(purge_id) - if purge_status is None: + purge_task = await self.pagination_handler.get_delete_task(purge_id) + if purge_task is None or purge_task.action != PURGE_HISTORY_ACTION_NAME: raise NotFoundError("purge id '%s' not found" % purge_id) - return HTTPStatus.OK, purge_status.asdict() + result: JsonDict = { + "status": purge_task.status + if purge_task.status == TaskStatus.COMPLETE + or purge_task.status == TaskStatus.FAILED + else "active", + } + if purge_task.error: + result["error"] = purge_task.error + + return HTTPStatus.OK, result ######################################################################################## diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 1d655602650d..436718c8b227 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -19,6 +19,10 @@ from synapse.api.constants import Direction, EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.api.filtering import Filter +from synapse.handlers.pagination import ( + PURGE_ROOM_ACTION_NAME, + SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, +) from synapse.http.servlet import ( ResolveRoomIdMixin, RestServlet, @@ -36,7 +40,7 @@ ) from synapse.storage.databases.main.room import RoomSortOrder from synapse.streams.config import PaginationConfig -from synapse.types import JsonDict, RoomID, UserID, create_requester +from synapse.types import JsonDict, RoomID, ScheduledTask, UserID, create_requester from synapse.types.state import StateFilter from synapse.util import json_decoder @@ -117,20 +121,30 @@ async def on_DELETE( 403, "Shutdown of this room is forbidden", Codes.FORBIDDEN ) - delete_id = self._pagination_handler.start_shutdown_and_purge_room( + delete_id = await self._pagination_handler.start_shutdown_and_purge_room( room_id=room_id, - new_room_user_id=content.get("new_room_user_id"), - new_room_name=content.get("room_name"), - message=content.get("message"), - requester_user_id=requester.user.to_string(), - block=block, - purge=purge, - force_purge=force_purge, + shutdown_params={ + "new_room_user_id": content.get("new_room_user_id"), + "new_room_name": content.get("room_name"), + "message": content.get("message"), + "requester_user_id": requester.user.to_string(), + "block": block, + "purge": purge, + "force_purge": force_purge, + }, ) return HTTPStatus.OK, {"delete_id": delete_id} +def _convert_delete_task_to_response(task: ScheduledTask) -> JsonDict: + return { + "delete_id": task.id, + "status": task.status, + "shutdown_room": task.result, + } + + class DeleteRoomStatusByRoomIdRestServlet(RestServlet): """Get the status of the delete room background task.""" @@ -150,21 +164,16 @@ async def on_GET( HTTPStatus.BAD_REQUEST, "%s is not a legal room ID" % (room_id,) ) - delete_ids = self._pagination_handler.get_delete_ids_by_room(room_id) - if delete_ids is None: - raise NotFoundError("No delete task for room_id '%s' found" % room_id) + delete_tasks = await self._pagination_handler.get_delete_tasks_by_room(room_id) - response = [] - for delete_id in delete_ids: - delete = self._pagination_handler.get_delete_status(delete_id) - if delete: - response += [ - { - "delete_id": delete_id, - **delete.asdict(), - } - ] - return HTTPStatus.OK, {"results": cast(JsonDict, response)} + if delete_tasks: + return HTTPStatus.OK, { + "results": [ + _convert_delete_task_to_response(task) for task in delete_tasks + ], + } + else: + raise NotFoundError("No delete task for room_id '%s' found" % room_id) class DeleteRoomStatusByDeleteIdRestServlet(RestServlet): @@ -181,11 +190,14 @@ async def on_GET( ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) - delete_status = self._pagination_handler.get_delete_status(delete_id) - if delete_status is None: + delete_task = await self._pagination_handler.get_delete_task(delete_id) + if delete_task is None or ( + delete_task.action != PURGE_ROOM_ACTION_NAME + and delete_task.action != SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME + ): raise NotFoundError("delete id '%s' not found" % delete_id) - return HTTPStatus.OK, cast(JsonDict, delete_status.asdict()) + return HTTPStatus.OK, _convert_delete_task_to_response(delete_task) class ListRoomRestServlet(RestServlet): @@ -349,11 +361,15 @@ async def _delete_room( ret = await room_shutdown_handler.shutdown_room( room_id=room_id, - new_room_user_id=content.get("new_room_user_id"), - new_room_name=content.get("room_name"), - message=content.get("message"), - requester_user_id=requester.user.to_string(), - block=block, + params={ + "new_room_user_id": content.get("new_room_user_id"), + "new_room_name": content.get("room_name"), + "message": content.get("message"), + "requester_user_id": requester.user.to_string(), + "block": block, + "purge": purge, + "force_purge": force_purge, + }, ) # Purge room diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index eb50086c508e..6ed451d7c465 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -15,26 +15,34 @@ import time import urllib.parse from typing import List, Optional -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from parameterized import parameterized +from twisted.internet.task import deferLater from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import EventTypes, Membership, RoomTypes from synapse.api.errors import Codes -from synapse.handlers.pagination import PaginationHandler, PurgeStatus +from synapse.handlers.pagination import ( + PURGE_ROOM_ACTION_NAME, + SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, +) from synapse.rest.client import directory, events, login, room from synapse.server import HomeServer +from synapse.types import UserID from synapse.util import Clock -from synapse.util.stringutils import random_string +from synapse.util.task_scheduler import TaskScheduler from tests import unittest """Tests admin REST events for /rooms paths.""" +ONE_HOUR_IN_S = 3600 + + class DeleteRoomTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, @@ -46,6 +54,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.event_creation_handler = hs.get_event_creation_handler() + self.task_scheduler = hs.get_task_scheduler() hs.config.consent.user_consent_version = "1" consent_uri_builder = Mock() @@ -476,6 +485,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.event_creation_handler = hs.get_event_creation_handler() + self.task_scheduler = hs.get_task_scheduler() hs.config.consent.user_consent_version = "1" consent_uri_builder = Mock() @@ -502,6 +512,9 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ) self.url_status_by_delete_id = "/_synapse/admin/v2/rooms/delete_status/" + self.room_member_handler = hs.get_room_member_handler() + self.pagination_handler = hs.get_pagination_handler() + @parameterized.expand( [ ("DELETE", "/_synapse/admin/v2/rooms/%s"), @@ -661,7 +674,7 @@ def test_delete_expired_status(self) -> None: delete_id1 = channel.json_body["delete_id"] # go ahead - self.reactor.advance(PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000 / 2) + self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2) # second task channel = self.make_request( @@ -686,12 +699,14 @@ def test_delete_expired_status(self) -> None: self.assertEqual(2, len(channel.json_body["results"])) self.assertEqual("complete", channel.json_body["results"][0]["status"]) self.assertEqual("complete", channel.json_body["results"][1]["status"]) - self.assertEqual(delete_id1, channel.json_body["results"][0]["delete_id"]) - self.assertEqual(delete_id2, channel.json_body["results"][1]["delete_id"]) + delete_ids = {delete_id1, delete_id2} + self.assertTrue(channel.json_body["results"][0]["delete_id"] in delete_ids) + delete_ids.remove(channel.json_body["results"][0]["delete_id"]) + self.assertTrue(channel.json_body["results"][1]["delete_id"] in delete_ids) # get status after more than clearing time for first task # second task is not cleared - self.reactor.advance(PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000 / 2) + self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2) channel = self.make_request( "GET", @@ -705,7 +720,7 @@ def test_delete_expired_status(self) -> None: self.assertEqual(delete_id2, channel.json_body["results"][0]["delete_id"]) # get status after more than clearing time for all tasks - self.reactor.advance(PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000 / 2) + self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2) channel = self.make_request( "GET", @@ -721,6 +736,13 @@ def test_delete_same_room_twice(self) -> None: body = {"new_room_user_id": self.admin_user} + # Mock PaginationHandler.purge_room to sleep for 100s, so we have time to do a second call + # before the purge is over. Note that it doesn't purge anymore, but we don't care. + async def purge_room(room_id: str, force: bool) -> None: + await deferLater(self.hs.get_reactor(), 100, lambda: None) + + self.pagination_handler.purge_room = AsyncMock(side_effect=purge_room) # type: ignore[method-assign] + # first call to delete room # and do not wait for finish the task first_channel = self.make_request( @@ -728,7 +750,6 @@ def test_delete_same_room_twice(self) -> None: self.url.encode("ascii"), content=body, access_token=self.admin_user_tok, - await_result=False, ) # second call to delete room @@ -742,7 +763,7 @@ def test_delete_same_room_twice(self) -> None: self.assertEqual(400, second_channel.code, msg=second_channel.json_body) self.assertEqual(Codes.UNKNOWN, second_channel.json_body["errcode"]) self.assertEqual( - f"History purge already in progress for {self.room_id}", + f"Purge already in progress for {self.room_id}", second_channel.json_body["error"], ) @@ -751,6 +772,9 @@ def test_delete_same_room_twice(self) -> None: self.assertEqual(200, first_channel.code, msg=first_channel.json_body) self.assertIn("delete_id", first_channel.json_body) + # wait for purge_room to finish + self.pump(1) + # check status after finish the task self._test_result( first_channel.json_body["delete_id"], @@ -972,6 +996,115 @@ def test_shutdown_room_block_peek(self) -> None: # Assert we can no longer peek into the room self._assert_peek(self.room_id, expect_code=403) + @unittest.override_config({"forgotten_room_retention_period": "1d"}) + def test_purge_forgotten_room(self) -> None: + # Create a test room + room_id = self.helper.create_room_as( + self.admin_user, + tok=self.admin_user_tok, + ) + + self.helper.leave(room_id, user=self.admin_user, tok=self.admin_user_tok) + self.get_success( + self.room_member_handler.forget( + UserID.from_string(self.admin_user), room_id + ) + ) + + # Test that room is not yet purged + with self.assertRaises(AssertionError): + self._is_purged(room_id) + + # Advance 24 hours in the future, past the `forgotten_room_retention_period` + self.reactor.advance(24 * ONE_HOUR_IN_S) + + self._is_purged(room_id) + + def test_scheduled_purge_room(self) -> None: + # Create a test room + room_id = self.helper.create_room_as( + self.admin_user, + tok=self.admin_user_tok, + ) + self.helper.leave(room_id, user=self.admin_user, tok=self.admin_user_tok) + + # Schedule a purge 10 seconds in the future + self.get_success( + self.task_scheduler.schedule_task( + PURGE_ROOM_ACTION_NAME, + resource_id=room_id, + timestamp=self.clock.time_msec() + 10 * 1000, + ) + ) + + # Test that room is not yet purged + with self.assertRaises(AssertionError): + self._is_purged(room_id) + + # Wait for next scheduler run + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS) + + self._is_purged(room_id) + + def test_schedule_shutdown_room(self) -> None: + # Create a test room + room_id = self.helper.create_room_as( + self.other_user, + tok=self.other_user_tok, + ) + + # Schedule a shutdown 10 seconds in the future + delete_id = self.get_success( + self.task_scheduler.schedule_task( + SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, + resource_id=room_id, + params={ + "requester_user_id": self.admin_user, + "new_room_user_id": self.admin_user, + "new_room_name": None, + "message": None, + "block": False, + "purge": True, + "force_purge": True, + }, + timestamp=self.clock.time_msec() + 10 * 1000, + ) + ) + + # Test that room is not yet shutdown + self._is_member(room_id, self.other_user) + + # Test that room is not yet purged + with self.assertRaises(AssertionError): + self._is_purged(room_id) + + # Wait for next scheduler run + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS) + + # Test that all users has been kicked (room is shutdown) + self._has_no_members(room_id) + + self._is_purged(room_id) + + # Retrieve delete results + result = self.make_request( + "GET", + self.url_status_by_delete_id + delete_id, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, result.code, msg=result.json_body) + + # Check that the user is in kicked_users + self.assertIn( + self.other_user, result.json_body["shutdown_room"]["kicked_users"] + ) + + new_room_id = result.json_body["shutdown_room"]["new_room_id"] + self.assertTrue(new_room_id) + + # Check that the user is actually in the new room + self._is_member(new_room_id, self.other_user) + def _is_blocked(self, room_id: str, expect: bool = True) -> None: """Assert that the room is blocked or not""" d = self.store.is_room_blocked(room_id) @@ -1034,7 +1167,6 @@ def _test_result( kicked_user: a user_id which is kicked from the room expect_new_room: if we expect that a new room was created """ - # get information by room_id channel_room_id = self.make_request( "GET", @@ -1957,11 +2089,8 @@ def test_room_messages_purge(self) -> None: self.assertEqual(len(chunk), 2, [event["content"] for event in chunk]) # Purge every event before the second event. - purge_id = random_string(16) - pagination_handler._purges_by_id[purge_id] = PurgeStatus() self.get_success( - pagination_handler._purge_history( - purge_id=purge_id, + pagination_handler.purge_history( room_id=self.room_id, token=second_token_str, delete_local_events=True, diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index 28b999573e75..dfd14f5751bf 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -22,6 +22,7 @@ from synapse.storage.roommember import RoomsForUser from synapse.types import JsonDict from synapse.util import Clock +from synapse.util.stringutils import random_string from tests import unittest from tests.unittest import override_config @@ -413,11 +414,24 @@ def test_send_server_notice_delete_room(self) -> None: self.assertEqual(messages[0]["content"]["body"], "test msg one") self.assertEqual(messages[0]["sender"], "@notices:test") + random_string(16) + # shut down and purge room self.get_success( - self.room_shutdown_handler.shutdown_room(first_room_id, self.admin_user) - ) - self.get_success(self.pagination_handler.purge_room(first_room_id)) + self.room_shutdown_handler.shutdown_room( + first_room_id, + { + "requester_user_id": self.admin_user, + "new_room_user_id": None, + "new_room_name": None, + "message": None, + "block": False, + "purge": True, + "force_purge": False, + }, + ) + ) + self.get_success(self.pagination_handler.purge_room(first_room_id, force=False)) # user is not member anymore self._check_invite_and_join_status(self.other_user, 0, 0) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 47c1d38ad7dd..7627823d3fd3 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -41,7 +41,6 @@ from synapse.appservice import ApplicationService from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.handlers.pagination import PurgeStatus from synapse.rest import admin from synapse.rest.client import account, directory, login, profile, register, room, sync from synapse.server import HomeServer @@ -2086,11 +2085,8 @@ def test_room_messages_purge(self) -> None: self.assertEqual(len(chunk), 2, [event["content"] for event in chunk]) # Purge every event before the second event. - purge_id = random_string(16) - pagination_handler._purges_by_id[purge_id] = PurgeStatus() self.get_success( - pagination_handler._purge_history( - purge_id=purge_id, + pagination_handler.purge_history( room_id=self.room_id, token=second_token_str, delete_local_events=True, From d2eacfe051d21be8174f1accb92263150654b6b9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 10:17:48 +0100 Subject: [PATCH 54/75] Bump docker/build-push-action from 4 to 5 (#16336) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8a69dc4986e9..f6772993dd49 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -68,7 +68,7 @@ jobs: type=pep440,pattern={{raw}} - name: Build and push all platforms - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: push: true labels: | From 47f9837008e438297741ba6c0d6ce3a417211df6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 10:19:05 +0100 Subject: [PATCH 55/75] Bump docker/setup-qemu-action from 2 to 3 (#16338) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- .github/workflows/release-artifacts.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f6772993dd49..f9d2ce67d2b1 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Set up QEMU id: qemu - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 with: platforms: arm64 diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index f331f67d9728..f031127cceaa 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -134,7 +134,7 @@ jobs: - name: Set up QEMU to emulate aarch64 if: matrix.arch == 'aarch64' - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 with: platforms: arm64 From 4cdc2ed7dc2bc1234964e5f405858ce14e96eb68 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 10:20:50 +0100 Subject: [PATCH 56/75] Bump docker/metadata-action from 4 to 5 (#16337) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/push_complement_image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index b76c4cb32382..8e41611f2cb1 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -55,7 +55,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Work out labels for complement image id: meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: ghcr.io/${{ github.repository }}/complement-synapse tags: | From ab5f4f906d7e0219ebf872cc6853b4f69af11dc9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 10:21:14 +0100 Subject: [PATCH 57/75] Bump docker/login-action from 2 to 3 (#16339) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 4 ++-- .github/workflows/push_complement_image.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f9d2ce67d2b1..a1793b901084 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -40,13 +40,13 @@ jobs: echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV - name: Log in to DockerHub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Log in to GHCR - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index 8e41611f2cb1..e994b122cd8b 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -48,7 +48,7 @@ jobs: with: ref: master - name: Login to registry - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} From fedaebc440da3a57c4df97787a3629df85a35e38 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 10:21:32 +0100 Subject: [PATCH 58/75] Bump typing-extensions from 4.7.1 to 4.8.0 (#16341) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index c01312579e42..9d5ec8453599 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2077,6 +2077,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2084,8 +2085,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2102,6 +2110,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2109,6 +2118,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -3070,13 +3080,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.7.1" -description = "Backported and Experimental Type Hints for Python 3.7+" +version = "4.8.0" +description = "Backported and Experimental Type Hints for Python 3.8+" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, - {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, + {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, + {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, ] [[package]] From 6946209e671ec278d7648434500aeb2639c8c3c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Joaqu=C3=ADn=20Atria?= Date: Mon, 18 Sep 2023 12:32:01 +0100 Subject: [PATCH 59/75] Set email charset as utf-8 rather than utf8 (#16329) --- changelog.d/16329.bugfix | 1 + synapse/handlers/send_email.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16329.bugfix diff --git a/changelog.d/16329.bugfix b/changelog.d/16329.bugfix new file mode 100644 index 000000000000..2f1f7e8ffe54 --- /dev/null +++ b/changelog.d/16329.bugfix @@ -0,0 +1 @@ +Use standard name for UTF-8 charset in emails. diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index 4f5fe62fe802..657d9b35591f 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -174,8 +174,8 @@ async def send_email( if raw_to == "": raise RuntimeError("Invalid 'to' address") - html_part = MIMEText(html, "html", "utf8") - text_part = MIMEText(text, "plain", "utf8") + html_part = MIMEText(html, "html", "utf-8") + text_part = MIMEText(text, "plain", "utf-8") multipart_msg = MIMEMultipart("alternative") multipart_msg["Subject"] = subject From 5ad1714d420445ec55e600ee52c5561e82a6d516 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:46:02 +0200 Subject: [PATCH 60/75] Bump pillow from 10.0.0 to 10.0.1 (#16344) Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.0.0 to 10.0.1. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/10.0.0...10.0.1) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 112 ++++++++++++++++++++++++++-------------------------- 1 file changed, 55 insertions(+), 57 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9d5ec8453599..17d0993a8b50 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1618,67 +1618,65 @@ files = [ [[package]] name = "pillow" -version = "10.0.0" +version = "10.0.1" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"}, - {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"}, - {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"}, - {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"}, - {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"}, - {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"}, - {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"}, - {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"}, - {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"}, - {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"}, - {file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"}, - {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"}, - {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"}, - {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"}, - {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"}, - {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"}, - {file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"}, - {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"}, - {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"}, - {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"}, - {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"}, - {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"}, - {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"}, - {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"}, - {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"}, - {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"}, - {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"}, - {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"}, + {file = "Pillow-10.0.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:8f06be50669087250f319b706decf69ca71fdecd829091a37cc89398ca4dc17a"}, + {file = "Pillow-10.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50bd5f1ebafe9362ad622072a1d2f5850ecfa44303531ff14353a4059113b12d"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6a90167bcca1216606223a05e2cf991bb25b14695c518bc65639463d7db722d"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11c9102c56ffb9ca87134bd025a43d2aba3f1155f508eff88f694b33a9c6d19"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:186f7e04248103482ea6354af6d5bcedb62941ee08f7f788a1c7707bc720c66f"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0462b1496505a3462d0f35dc1c4d7b54069747d65d00ef48e736acda2c8cbdff"}, + {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d889b53ae2f030f756e61a7bff13684dcd77e9af8b10c6048fb2c559d6ed6eaf"}, + {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:552912dbca585b74d75279a7570dd29fa43b6d93594abb494ebb31ac19ace6bd"}, + {file = "Pillow-10.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:787bb0169d2385a798888e1122c980c6eff26bf941a8ea79747d35d8f9210ca0"}, + {file = "Pillow-10.0.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1"}, + {file = "Pillow-10.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d7e91b4379f7a76b31c2dda84ab9e20c6220488e50f7822e59dac36b0cd92b1"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e9adb3f22d4c416e7cd79b01375b17159d6990003633ff1d8377e21b7f1b21"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93139acd8109edcdeffd85e3af8ae7d88b258b3a1e13a038f542b79b6d255c54"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:92a23b0431941a33242b1f0ce6c88a952e09feeea9af4e8be48236a68ffe2205"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cbe68deb8580462ca0d9eb56a81912f59eb4542e1ef8f987405e35a0179f4ea2"}, + {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:522ff4ac3aaf839242c6f4e5b406634bfea002469656ae8358644fc6c4856a3b"}, + {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:84efb46e8d881bb06b35d1d541aa87f574b58e87f781cbba8d200daa835b42e1"}, + {file = "Pillow-10.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:898f1d306298ff40dc1b9ca24824f0488f6f039bc0e25cfb549d3195ffa17088"}, + {file = "Pillow-10.0.1-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:bcf1207e2f2385a576832af02702de104be71301c2696d0012b1b93fe34aaa5b"}, + {file = "Pillow-10.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d6c9049c6274c1bb565021367431ad04481ebb54872edecfcd6088d27edd6ed"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28444cb6ad49726127d6b340217f0627abc8732f1194fd5352dec5e6a0105635"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de596695a75496deb3b499c8c4f8e60376e0516e1a774e7bc046f0f48cd620ad"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:2872f2d7846cf39b3dbff64bc1104cc48c76145854256451d33c5faa55c04d1a"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4ce90f8a24e1c15465048959f1e94309dfef93af272633e8f37361b824532e91"}, + {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ee7810cf7c83fa227ba9125de6084e5e8b08c59038a7b2c9045ef4dde61663b4"}, + {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b1be1c872b9b5fcc229adeadbeb51422a9633abd847c0ff87dc4ef9bb184ae08"}, + {file = "Pillow-10.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:98533fd7fa764e5f85eebe56c8e4094db912ccbe6fbf3a58778d543cadd0db08"}, + {file = "Pillow-10.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:764d2c0daf9c4d40ad12fbc0abd5da3af7f8aa11daf87e4fa1b834000f4b6b0a"}, + {file = "Pillow-10.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:697a06bdcedd473b35e50a7e7506b1d8ceb832dc238a336bd6f4f5aa91a4b500"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f665d1e6474af9f9da5e86c2a3a2d2d6204e04d5af9c06b9d42afa6ebde3f21"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:2fa6dd2661838c66f1a5473f3b49ab610c98a128fc08afbe81b91a1f0bf8c51d"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:3a04359f308ebee571a3127fdb1bd01f88ba6f6fb6d087f8dd2e0d9bff43f2a7"}, + {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:723bd25051454cea9990203405fa6b74e043ea76d4968166dfd2569b0210886a"}, + {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:71671503e3015da1b50bd18951e2f9daf5b6ffe36d16f1eb2c45711a301521a7"}, + {file = "Pillow-10.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:44e7e4587392953e5e251190a964675f61e4dae88d1e6edbe9f36d6243547ff3"}, + {file = "Pillow-10.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:3855447d98cced8670aaa63683808df905e956f00348732448b5a6df67ee5849"}, + {file = "Pillow-10.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ed2d9c0704f2dc4fa980b99d565c0c9a543fe5101c25b3d60488b8ba80f0cce1"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5bb289bb835f9fe1a1e9300d011eef4d69661bb9b34d5e196e5e82c4cb09b37"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a0d3e54ab1df9df51b914b2233cf779a5a10dfd1ce339d0421748232cea9876"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:2cc6b86ece42a11f16f55fe8903595eff2b25e0358dec635d0a701ac9586588f"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ca26ba5767888c84bf5a0c1a32f069e8204ce8c21d00a49c90dabeba00ce0145"}, + {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f0b4b06da13275bc02adfeb82643c4a6385bd08d26f03068c2796f60d125f6f2"}, + {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bc2e3069569ea9dbe88d6b8ea38f439a6aad8f6e7a6283a38edf61ddefb3a9bf"}, + {file = "Pillow-10.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8b451d6ead6e3500b6ce5c7916a43d8d8d25ad74b9102a629baccc0808c54971"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:32bec7423cdf25c9038fef614a853c9d25c07590e1a870ed471f47fb80b244db"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cf63d2c6928b51d35dfdbda6f2c1fddbe51a6bc4a9d4ee6ea0e11670dd981e"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f6d3d4c905e26354e8f9d82548475c46d8e0889538cb0657aa9c6f0872a37aa4"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:847e8d1017c741c735d3cd1883fa7b03ded4f825a6e5fcb9378fd813edee995f"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:7f771e7219ff04b79e231d099c0a28ed83aa82af91fd5fa9fdb28f5b8d5addaf"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459307cacdd4138edee3875bbe22a2492519e060660eaf378ba3b405d1c66317"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b059ac2c4c7a97daafa7dc850b43b2d3667def858a4f112d1aa082e5c3d6cf7d"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d6caf3cd38449ec3cd8a68b375e0c6fe4b6fd04edb6c9766b55ef84a6e8ddf2d"}, + {file = "Pillow-10.0.1.tar.gz", hash = "sha256:d72967b06be9300fed5cfbc8b5bafceec48bf7cdc7dab66b1d2549035287191d"}, ] [package.extras] From 4663d555021ce53c57e418eb1ee4445eab276bc4 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 18 Sep 2023 15:01:23 +0200 Subject: [PATCH 61/75] Mandate Pillow>=10.0.1 because of libwebp CVE (#16347) --- changelog.d/16347.misc | 1 + pyproject.toml | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16347.misc diff --git a/changelog.d/16347.misc b/changelog.d/16347.misc new file mode 100644 index 000000000000..f4f5bfb2de62 --- /dev/null +++ b/changelog.d/16347.misc @@ -0,0 +1 @@ +Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. diff --git a/pyproject.toml b/pyproject.toml index ea2d75fa8cdd..9c9a5dc2bc3d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -180,7 +180,9 @@ PyYAML = ">=3.13" pyasn1 = ">=0.1.9" pyasn1-modules = ">=0.0.7" bcrypt = ">=3.1.7" -Pillow = ">=5.4.0" +# 10.0.1 minimum is mandatory here because of libwebp CVE-2023-4863. +# Packagers that already took care of libwebp can lower that down to 5.4.0. +Pillow = ">=10.0.1" # We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2. sortedcontainers = ">=1.5.2" pymacaroons = ">=0.13.0" From 63d28a88c1d18c64ea7e23b6dd7483e6d5dcf881 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 18 Sep 2023 09:02:12 -0400 Subject: [PATCH 62/75] Additional validation of receipts (#16327) Reject invalid receipts with a reasonable error message & expands tests for receipts. --- changelog.d/16327.bugfix | 1 + synapse/handlers/receipts.py | 26 +++- synapse/rest/client/read_marker.py | 2 +- synapse/rest/client/receipts.py | 2 +- tests/rest/client/test_receipts.py | 221 +++++++++++++++++++++++++++-- tests/rest/client/test_sync.py | 154 +------------------- 6 files changed, 241 insertions(+), 165 deletions(-) create mode 100644 changelog.d/16327.bugfix diff --git a/changelog.d/16327.bugfix b/changelog.d/16327.bugfix new file mode 100644 index 000000000000..be3d1b4f2153 --- /dev/null +++ b/changelog.d/16327.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where invalid receipts would be accepted. diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 2bacdebfb5f9..c7edada35352 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -37,6 +37,8 @@ def __init__(self, hs: "HomeServer"): self.server_name = hs.config.server.server_name self.store = hs.get_datastores().main self.event_auth_handler = hs.get_event_auth_handler() + self.event_handler = hs.get_event_handler() + self._storage_controllers = hs.get_storage_controllers() self.hs = hs @@ -81,6 +83,20 @@ async def _received_remote_receipt(self, origin: str, content: JsonDict) -> None ) continue + # Let's check that the origin server is in the room before accepting the receipt. + # We don't want to block waiting on a partial state so take an + # approximation if needed. + domains = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation( + room_id + ) + if origin not in domains: + logger.info( + "Ignoring receipt for room %r from server %s as they're not in the room", + room_id, + origin, + ) + continue + for receipt_type, users in room_values.items(): for user_id, user_values in users.items(): if get_domain_from_id(user_id) != origin: @@ -158,17 +174,23 @@ async def received_client_receipt( self, room_id: str, receipt_type: str, - user_id: str, + user_id: UserID, event_id: str, thread_id: Optional[str], ) -> None: """Called when a client tells us a local user has read up to the given event_id in the room. """ + + # Ensure the room/event exists, this will raise an error if the user + # cannot view the event. + if not await self.event_handler.get_event(user_id, room_id, event_id): + return + receipt = ReadReceipt( room_id=room_id, receipt_type=receipt_type, - user_id=user_id, + user_id=user_id.to_string(), event_ids=[event_id], thread_id=thread_id, data={"ts": int(self.clock.time_msec())}, diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py index 1707e519723a..15e4d56cdba9 100644 --- a/synapse/rest/client/read_marker.py +++ b/synapse/rest/client/read_marker.py @@ -84,7 +84,7 @@ async def on_POST( await self.receipts_handler.received_client_receipt( room_id, receipt_type, - user_id=requester.user.to_string(), + user_id=requester.user, event_id=event_id, # Setting the thread ID is not possible with the /read_markers endpoint. thread_id=None, diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py index 869a37445950..814d075fafde 100644 --- a/synapse/rest/client/receipts.py +++ b/synapse/rest/client/receipts.py @@ -108,7 +108,7 @@ async def on_POST( await self.receipts_handler.received_client_receipt( room_id, receipt_type, - user_id=requester.user.to_string(), + user_id=requester.user, event_id=event_id, thread_id=thread_id, ) diff --git a/tests/rest/client/test_receipts.py b/tests/rest/client/test_receipts.py index 2a7fcea38607..ec638c89b722 100644 --- a/tests/rest/client/test_receipts.py +++ b/tests/rest/client/test_receipts.py @@ -11,11 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from http import HTTPStatus +from typing import Optional + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin -from synapse.rest.client import login, receipts, register +from synapse.api.constants import EduTypes, EventTypes, HistoryVisibility, ReceiptTypes +from synapse.rest.client import login, receipts, room, sync from synapse.server import HomeServer +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest @@ -24,30 +29,113 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): servlets = [ login.register_servlets, - register.register_servlets, receipts.register_servlets, synapse.rest.admin.register_servlets, + room.register_servlets, + sync.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.owner = self.register_user("owner", "pass") - self.owner_tok = self.login("owner", "pass") + self.url = "/sync?since=%s" + self.next_batch = "s0" + + # Register the first user + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + # Create the room + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + + # Register the second user + self.user2 = self.register_user("kermit2", "monkey") + self.tok2 = self.login("kermit2", "monkey") + + # Join the second user + self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) def test_send_receipt(self) -> None: + # Send a message. + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + self.assertNotEqual(self._get_read_receipt(), None) + + def test_send_receipt_unknown_event(self) -> None: + """Receipts sent for unknown events are ignored to not break message retention.""" + # Attempt to send a receipt to an unknown room. channel = self.make_request( "POST", "/rooms/!abc:beep/receipt/m.read/$def", content={}, - access_token=self.owner_tok, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertIsNone(self._get_read_receipt()) + + # Attempt to send a receipt to an unknown event. + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/m.read/$def", + content={}, + access_token=self.tok2, ) self.assertEqual(channel.code, 200, channel.result) + self.assertIsNone(self._get_read_receipt()) + + def test_send_receipt_unviewable_event(self) -> None: + """Receipts sent for unviewable events are errors.""" + # Create a room where new users can't see events from before their join + # & send events into it. + room_id = self.helper.create_room_as( + self.user_id, + tok=self.tok, + extra_content={ + "preset": "private_chat", + "initial_state": [ + { + "content": {"history_visibility": HistoryVisibility.JOINED}, + "state_key": "", + "type": EventTypes.RoomHistoryVisibility, + } + ], + }, + ) + res = self.helper.send(room_id, body="hello", tok=self.tok) + + # Attempt to send a receipt from the wrong user. + channel = self.make_request( + "POST", + f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + content={}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 403, channel.result) + + # Join the user to the room, but they still can't see the event. + self.helper.invite(room_id, self.user_id, self.user2, tok=self.tok) + self.helper.join(room=room_id, user=self.user2, tok=self.tok2) + + channel = self.make_request( + "POST", + f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + content={}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 403, channel.result) def test_send_receipt_invalid_room_id(self) -> None: channel = self.make_request( "POST", "/rooms/not-a-room-id/receipt/m.read/$def", content={}, - access_token=self.owner_tok, + access_token=self.tok, ) self.assertEqual(channel.code, 400, channel.result) self.assertEqual( @@ -59,7 +147,7 @@ def test_send_receipt_invalid_event_id(self) -> None: "POST", "/rooms/!abc:beep/receipt/m.read/not-an-event-id", content={}, - access_token=self.owner_tok, + access_token=self.tok, ) self.assertEqual(channel.code, 400, channel.result) self.assertEqual( @@ -71,6 +159,123 @@ def test_send_receipt_invalid_receipt_type(self) -> None: "POST", "/rooms/!abc:beep/receipt/invalid-receipt-type/$def", content={}, - access_token=self.owner_tok, + access_token=self.tok, ) self.assertEqual(channel.code, 400, channel.result) + + def test_private_read_receipts(self) -> None: + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a private read receipt to tell the server the first user's message was read + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + + # Test that the first user can't see the other user's private read receipt + self.assertIsNone(self._get_read_receipt()) + + def test_public_receipt_can_override_private(self) -> None: + """ + Sending a public read receipt to the same event which has a private read + receipt should cause that receipt to become public. + """ + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a private read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + self.assertIsNone(self._get_read_receipt()) + + # Send a public read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + + # Test that we did override the private read receipt + self.assertNotEqual(self._get_read_receipt(), None) + + def test_private_receipt_cannot_override_public(self) -> None: + """ + Sending a private read receipt to the same event which has a public read + receipt should cause no change. + """ + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a public read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + self.assertNotEqual(self._get_read_receipt(), None) + + # Send a private read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + + # Test that we didn't override the public read receipt + self.assertIsNone(self._get_read_receipt()) + + def test_read_receipt_with_empty_body_is_rejected(self) -> None: + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a read receipt for this message with an empty body + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/m.read/{res['event_id']}", + access_token=self.tok2, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST) + self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON", channel.json_body) + + def _get_read_receipt(self) -> Optional[JsonDict]: + """Syncs and returns the read receipt.""" + + # Checks if event is a read receipt + def is_read_receipt(event: JsonDict) -> bool: + return event["type"] == EduTypes.RECEIPT + + # Sync + channel = self.make_request( + "GET", + self.url % self.next_batch, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200) + + # Store the next batch for the next request. + self.next_batch = channel.json_body["next_batch"] + + if channel.json_body.get("rooms", None) is None: + return None + + # Return the read receipt + ephemeral_events = channel.json_body["rooms"]["join"][self.room_id][ + "ephemeral" + ]["events"] + receipt_event = filter(is_read_receipt, ephemeral_events) + return next(receipt_event, None) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 9c876c7a3230..d60665254eb5 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import json -from http import HTTPStatus -from typing import List, Optional +from typing import List from parameterized import parameterized @@ -22,7 +21,6 @@ import synapse.rest.admin from synapse.api.constants import ( - EduTypes, EventContentFields, EventTypes, ReceiptTypes, @@ -376,156 +374,6 @@ def test_knock_room_state(self) -> None: ) -class ReadReceiptsTestCase(unittest.HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets, - login.register_servlets, - receipts.register_servlets, - room.register_servlets, - sync.register_servlets, - ] - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() - - return self.setup_test_homeserver(config=config) - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.url = "/sync?since=%s" - self.next_batch = "s0" - - # Register the first user - self.user_id = self.register_user("kermit", "monkey") - self.tok = self.login("kermit", "monkey") - - # Create the room - self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - - # Register the second user - self.user2 = self.register_user("kermit2", "monkey") - self.tok2 = self.login("kermit2", "monkey") - - # Join the second user - self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) - - def test_private_read_receipts(self) -> None: - # Send a message as the first user - res = self.helper.send(self.room_id, body="hello", tok=self.tok) - - # Send a private read receipt to tell the server the first user's message was read - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", - {}, - access_token=self.tok2, - ) - self.assertEqual(channel.code, 200) - - # Test that the first user can't see the other user's private read receipt - self.assertIsNone(self._get_read_receipt()) - - def test_public_receipt_can_override_private(self) -> None: - """ - Sending a public read receipt to the same event which has a private read - receipt should cause that receipt to become public. - """ - # Send a message as the first user - res = self.helper.send(self.room_id, body="hello", tok=self.tok) - - # Send a private read receipt - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", - {}, - access_token=self.tok2, - ) - self.assertEqual(channel.code, 200) - self.assertIsNone(self._get_read_receipt()) - - # Send a public read receipt - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", - {}, - access_token=self.tok2, - ) - self.assertEqual(channel.code, 200) - - # Test that we did override the private read receipt - self.assertNotEqual(self._get_read_receipt(), None) - - def test_private_receipt_cannot_override_public(self) -> None: - """ - Sending a private read receipt to the same event which has a public read - receipt should cause no change. - """ - # Send a message as the first user - res = self.helper.send(self.room_id, body="hello", tok=self.tok) - - # Send a public read receipt - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", - {}, - access_token=self.tok2, - ) - self.assertEqual(channel.code, 200) - self.assertNotEqual(self._get_read_receipt(), None) - - # Send a private read receipt - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", - {}, - access_token=self.tok2, - ) - self.assertEqual(channel.code, 200) - - # Test that we didn't override the public read receipt - self.assertIsNone(self._get_read_receipt()) - - def test_read_receipt_with_empty_body_is_rejected(self) -> None: - # Send a message as the first user - res = self.helper.send(self.room_id, body="hello", tok=self.tok) - - # Send a read receipt for this message with an empty body - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/m.read/{res['event_id']}", - access_token=self.tok2, - ) - self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST) - self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON", channel.json_body) - - def _get_read_receipt(self) -> Optional[JsonDict]: - """Syncs and returns the read receipt.""" - - # Checks if event is a read receipt - def is_read_receipt(event: JsonDict) -> bool: - return event["type"] == EduTypes.RECEIPT - - # Sync - channel = self.make_request( - "GET", - self.url % self.next_batch, - access_token=self.tok, - ) - self.assertEqual(channel.code, 200) - - # Store the next batch for the next request. - self.next_batch = channel.json_body["next_batch"] - - if channel.json_body.get("rooms", None) is None: - return None - - # Return the read receipt - ephemeral_events = channel.json_body["rooms"]["join"][self.room_id][ - "ephemeral" - ]["events"] - receipt_event = filter(is_read_receipt, ephemeral_events) - return next(receipt_event, None) - - class UnreadMessagesTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, From 85bfd4735e0b6e31d530f692d7113b4fec89e6b3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 18 Sep 2023 09:29:05 -0400 Subject: [PATCH 63/75] Return an immutable value from get_latest_event_ids_in_room. (#16326) --- changelog.d/16326.misc | 1 + synapse/events/builder.py | 2 +- synapse/handlers/federation_event.py | 8 +++--- synapse/storage/controllers/persist_events.py | 9 +++---- .../databases/main/event_federation.py | 8 +++--- synapse/storage/databases/main/events.py | 2 +- tests/handlers/test_presence.py | 2 +- tests/replication/storage/test_events.py | 4 +-- tests/replication/tcp/streams/test_events.py | 10 +++---- .../test_federation_sender_shard.py | 2 +- tests/storage/test_cleanup_extrems.py | 14 +++++----- tests/test_federation.py | 26 ++++++++++++------- 12 files changed, 48 insertions(+), 40 deletions(-) create mode 100644 changelog.d/16326.misc diff --git a/changelog.d/16326.misc b/changelog.d/16326.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/16326.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 1165c017baa8..43469b170fee 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -103,7 +103,7 @@ def is_state(self) -> bool: async def build( self, - prev_event_ids: StrCollection, + prev_event_ids: List[str], auth_event_ids: Optional[List[str]], depth: Optional[int] = None, ) -> EventBase: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index d32d224d5640..eedde97ab093 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -723,12 +723,11 @@ async def _get_missing_events_for_pdu( if not prevs - seen: return - latest_list = await self._store.get_latest_event_ids_in_room(room_id) + latest_frozen = await self._store.get_latest_event_ids_in_room(room_id) # We add the prev events that we have seen to the latest # list to ensure the remote server doesn't give them to us - latest = set(latest_list) - latest |= seen + latest = seen | latest_frozen logger.info( "Requesting missing events between %s and %s", @@ -1976,8 +1975,7 @@ async def _check_for_soft_fail( # partial and full state and may not be accurate. return - extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id) - extrem_ids = set(extrem_ids_list) + extrem_ids = await self._store.get_latest_event_ids_in_room(event.room_id) prev_event_ids = set(event.prev_event_ids()) if extrem_ids == prev_event_ids: diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 6864f9309020..f39ae2d63536 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -19,6 +19,7 @@ from collections import deque from typing import ( TYPE_CHECKING, + AbstractSet, Any, Awaitable, Callable, @@ -618,7 +619,7 @@ async def _persist_event_batch( ) for room_id, ev_ctx_rm in events_by_room.items(): - latest_event_ids = set( + latest_event_ids = ( await self.main_store.get_latest_event_ids_in_room(room_id) ) new_latest_event_ids = await self._calculate_new_extremities( @@ -740,7 +741,7 @@ async def _calculate_new_extremities( self, room_id: str, event_contexts: List[Tuple[EventBase, EventContext]], - latest_event_ids: Collection[str], + latest_event_ids: AbstractSet[str], ) -> Set[str]: """Calculates the new forward extremities for a room given events to persist. @@ -758,8 +759,6 @@ async def _calculate_new_extremities( and not event.internal_metadata.is_soft_failed() ] - latest_event_ids = set(latest_event_ids) - # start with the existing forward extremities result = set(latest_event_ids) @@ -798,7 +797,7 @@ async def _get_new_state_after_events( self, room_id: str, events_context: List[Tuple[EventBase, EventContext]], - old_latest_event_ids: Set[str], + old_latest_event_ids: AbstractSet[str], new_latest_event_ids: Set[str], ) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]], Set[str]]: """Calculate the current state dict after adding some new events to diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 09de8f55e277..afffa549853d 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -19,6 +19,7 @@ TYPE_CHECKING, Collection, Dict, + FrozenSet, Iterable, List, Optional, @@ -47,7 +48,7 @@ from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.signatures import SignatureWorkerStore from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import JsonDict, StrCollection, StrSequence +from synapse.types import JsonDict, StrCollection from synapse.util import json_encoder from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache @@ -1179,13 +1180,14 @@ def _get_rooms_with_many_extremities_txn(txn: LoggingTransaction) -> List[str]: ) @cached(max_entries=5000, iterable=True) - async def get_latest_event_ids_in_room(self, room_id: str) -> StrSequence: - return await self.db_pool.simple_select_onecol( + async def get_latest_event_ids_in_room(self, room_id: str) -> FrozenSet[str]: + event_ids = await self.db_pool.simple_select_onecol( table="event_forward_extremities", keyvalues={"room_id": room_id}, retcol="event_id", desc="get_latest_event_ids_in_room", ) + return frozenset(event_ids) async def get_min_depth(self, room_id: str) -> Optional[int]: """For the given room, get the minimum depth we have seen for it.""" diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 0c1ed752406f..bc8474a5897e 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -222,7 +222,7 @@ async def _persist_events_and_state_updates( for room_id, latest_event_ids in new_forward_extremities.items(): self.store.get_latest_event_ids_in_room.prefill( - (room_id,), list(latest_event_ids) + (room_id,), frozenset(latest_event_ids) ) async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]: diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 638787b029b8..41c8c44e0241 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -1858,7 +1858,7 @@ def _add_new_user(self, room_id: str, user_id: str) -> None: ) event = self.get_success( - builder.build(prev_event_ids=prev_event_ids, auth_event_ids=None) + builder.build(prev_event_ids=list(prev_event_ids), auth_event_ids=None) ) self.get_success(self.federation_event_handler.on_receive_pdu(hostname, event)) diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py index af25815fa56e..33c277a38a15 100644 --- a/tests/replication/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -90,7 +90,7 @@ def tearDown(self) -> None: def test_get_latest_event_ids_in_room(self) -> None: create = self.persist(type="m.room.create", key="", creator=USER_ID) self.replicate() - self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id]) + self.check("get_latest_event_ids_in_room", (ROOM_ID,), {create.event_id}) join = self.persist( type="m.room.member", @@ -99,7 +99,7 @@ def test_get_latest_event_ids_in_room(self) -> None: prev_events=[(create.event_id, {})], ) self.replicate() - self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id]) + self.check("get_latest_event_ids_in_room", (ROOM_ID,), {join.event_id}) def test_redactions(self) -> None: self.persist(type="m.room.create", key="", creator=USER_ID) diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 65ef4bb16055..128fc3e0460c 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List, Optional, Sequence +from typing import Any, List, Optional from twisted.test.proto_helpers import MemoryReactor @@ -139,7 +139,7 @@ def test_update_function_huge_state_change(self) -> None: ) # this is the point in the DAG where we make a fork - fork_point: Sequence[str] = self.get_success( + fork_point = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) @@ -294,7 +294,7 @@ def test_update_function_state_row_limit(self) -> None: ) # this is the point in the DAG where we make a fork - fork_point: Sequence[str] = self.get_success( + fork_point = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) @@ -316,14 +316,14 @@ def test_update_function_state_row_limit(self) -> None: self.test_handler.received_rdata_rows.clear() # now roll back all that state by de-modding the users - prev_events = fork_point + prev_events = list(fork_point) pl_events = [] for u in user_ids: pls["users"][u] = 0 e = self.get_success( inject_event( self.hs, - prev_event_ids=list(prev_events), + prev_event_ids=prev_events, type=EventTypes.PowerLevels, state_key="", sender=self.user_id, diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 9b28cd474fbf..59f4fdc70bca 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -261,7 +261,7 @@ def create_room_with_remote_server( builder = factory.for_room_version(room_version, event_dict) join_event = self.get_success( - builder.build(prev_event_ids=prev_event_ids, auth_event_ids=None) + builder.build(prev_event_ids=list(prev_event_ids), auth_event_ids=None) ) self.get_success(federation.on_send_membership_event(remote_server, join_event)) diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 7de109966d61..ceb9597dd312 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -120,7 +120,7 @@ def test_soft_failed_extremities_handled_correctly(self) -> None: self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(latest_event_ids, [event_id_4]) + self.assertEqual(latest_event_ids, {event_id_4}) def test_basic_cleanup(self) -> None: """Test that extremities are correctly calculated in the presence of @@ -147,7 +147,7 @@ def test_basic_cleanup(self) -> None: latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b}) + self.assertEqual(latest_event_ids, {event_id_a, event_id_b}) # Run the background update and check it did the right thing self.run_background_update() @@ -155,7 +155,7 @@ def test_basic_cleanup(self) -> None: latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(latest_event_ids, [event_id_b]) + self.assertEqual(latest_event_ids, {event_id_b}) def test_chain_of_fail_cleanup(self) -> None: """Test that extremities are correctly calculated in the presence of @@ -185,7 +185,7 @@ def test_chain_of_fail_cleanup(self) -> None: latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b}) + self.assertEqual(latest_event_ids, {event_id_a, event_id_b}) # Run the background update and check it did the right thing self.run_background_update() @@ -193,7 +193,7 @@ def test_chain_of_fail_cleanup(self) -> None: latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(latest_event_ids, [event_id_b]) + self.assertEqual(latest_event_ids, {event_id_b}) def test_forked_graph_cleanup(self) -> None: r"""Test that extremities are correctly calculated in the presence of @@ -240,7 +240,7 @@ def test_forked_graph_cleanup(self) -> None: latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b, event_id_c}) + self.assertEqual(latest_event_ids, {event_id_a, event_id_b, event_id_c}) # Run the background update and check it did the right thing self.run_background_update() @@ -248,7 +248,7 @@ def test_forked_graph_cleanup(self) -> None: latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), {event_id_b, event_id_c}) + self.assertEqual(latest_event_ids, {event_id_b, event_id_c}) class CleanupExtremDummyEventsTestCase(HomeserverTestCase): diff --git a/tests/test_federation.py b/tests/test_federation.py index f8ade6da3852..1b0504709edc 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -51,9 +51,15 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = self.hs.get_datastores().main # Figure out what the most recent event is - most_recent = self.get_success( - self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) - )[0] + most_recent = next( + iter( + self.get_success( + self.hs.get_datastores().main.get_latest_event_ids_in_room( + self.room_id + ) + ) + ) + ) join_event = make_event_from_dict( { @@ -100,8 +106,8 @@ async def _check_sigs_and_hash_for_pulled_events_and_fetch( # Make sure we actually joined the room self.assertEqual( - self.get_success(self.store.get_latest_event_ids_in_room(self.room_id))[0], - "$join:test.serv", + self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)), + {"$join:test.serv"}, ) def test_cant_hide_direct_ancestors(self) -> None: @@ -127,9 +133,11 @@ async def post_json( self.http_client.post_json = post_json # Figure out what the most recent event is - most_recent = self.get_success( - self.store.get_latest_event_ids_in_room(self.room_id) - )[0] + most_recent = next( + iter( + self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)) + ) + ) # Now lie about an event lying_event = make_event_from_dict( @@ -165,7 +173,7 @@ async def post_json( # Make sure the invalid event isn't there extrem = self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)) - self.assertEqual(extrem[0], "$join:test.serv") + self.assertEqual(extrem, {"$join:test.serv"}) def test_retry_device_list_resync(self) -> None: """Tests that device lists are marked as stale if they couldn't be synced, and From c1e244c8f70ff1a23e358e1608c555f9722dee1f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 18 Sep 2023 09:55:04 -0400 Subject: [PATCH 64/75] Make cached account data/tags/admin types immutable (#16325) --- changelog.d/16325.misc | 1 + synapse/app/admin_cmd.py | 14 +++++----- synapse/handlers/admin.py | 18 ++++++------- synapse/handlers/sync.py | 27 +++++++++++-------- synapse/rest/admin/users.py | 8 +++--- synapse/rest/client/account_data.py | 10 +++---- .../storage/databases/main/account_data.py | 14 +++++----- .../databases/main/experimental_features.py | 7 +++-- synapse/storage/databases/main/tags.py | 6 ++--- 9 files changed, 55 insertions(+), 50 deletions(-) create mode 100644 changelog.d/16325.misc diff --git a/changelog.d/16325.misc b/changelog.d/16325.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/16325.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index f9aada269a0a..aa24f7da6cae 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -17,7 +17,7 @@ import os import sys import tempfile -from typing import List, Mapping, Optional +from typing import List, Mapping, Optional, Sequence from twisted.internet import defer, task @@ -57,7 +57,7 @@ from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.databases.main.tags import TagsWorkerStore from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore -from synapse.types import JsonDict, StateMap +from synapse.types import JsonMapping, StateMap from synapse.util import SYNAPSE_VERSION from synapse.util.logcontext import LoggingContext @@ -198,7 +198,7 @@ def write_knock( for event in state.values(): json.dump(event, fp=f) - def write_profile(self, profile: JsonDict) -> None: + def write_profile(self, profile: JsonMapping) -> None: user_directory = os.path.join(self.base_directory, "user_data") os.makedirs(user_directory, exist_ok=True) profile_file = os.path.join(user_directory, "profile") @@ -206,7 +206,7 @@ def write_profile(self, profile: JsonDict) -> None: with open(profile_file, "a") as f: json.dump(profile, fp=f) - def write_devices(self, devices: List[JsonDict]) -> None: + def write_devices(self, devices: Sequence[JsonMapping]) -> None: user_directory = os.path.join(self.base_directory, "user_data") os.makedirs(user_directory, exist_ok=True) device_file = os.path.join(user_directory, "devices") @@ -215,7 +215,7 @@ def write_devices(self, devices: List[JsonDict]) -> None: with open(device_file, "a") as f: json.dump(device, fp=f) - def write_connections(self, connections: List[JsonDict]) -> None: + def write_connections(self, connections: Sequence[JsonMapping]) -> None: user_directory = os.path.join(self.base_directory, "user_data") os.makedirs(user_directory, exist_ok=True) connection_file = os.path.join(user_directory, "connections") @@ -225,7 +225,7 @@ def write_connections(self, connections: List[JsonDict]) -> None: json.dump(connection, fp=f) def write_account_data( - self, file_name: str, account_data: Mapping[str, JsonDict] + self, file_name: str, account_data: Mapping[str, JsonMapping] ) -> None: account_data_directory = os.path.join( self.base_directory, "user_data", "account_data" @@ -237,7 +237,7 @@ def write_account_data( with open(account_data_file, "a") as f: json.dump(account_data, fp=f) - def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None: + def write_media_id(self, media_id: str, media_metadata: JsonMapping) -> None: file_directory = os.path.join(self.base_directory, "media_ids") os.makedirs(file_directory, exist_ok=True) media_id_file = os.path.join(file_directory, media_id) diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 7092ff3449ca..ba9704a065c5 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -14,11 +14,11 @@ import abc import logging -from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Set +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Set from synapse.api.constants import Direction, Membership from synapse.events import EventBase -from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID, UserInfo +from synapse.types import JsonMapping, RoomStreamToken, StateMap, UserID, UserInfo from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -35,7 +35,7 @@ def __init__(self, hs: "HomeServer"): self._state_storage_controller = self._storage_controllers.state self._msc3866_enabled = hs.config.experimental.msc3866.enabled - async def get_whois(self, user: UserID) -> JsonDict: + async def get_whois(self, user: UserID) -> JsonMapping: connections = [] sessions = await self._store.get_user_ip_and_agents(user) @@ -55,7 +55,7 @@ async def get_whois(self, user: UserID) -> JsonDict: return ret - async def get_user(self, user: UserID) -> Optional[JsonDict]: + async def get_user(self, user: UserID) -> Optional[JsonMapping]: """Function to get user details""" user_info: Optional[UserInfo] = await self._store.get_user_by_id( user.to_string() @@ -344,7 +344,7 @@ def write_knock( raise NotImplementedError() @abc.abstractmethod - def write_profile(self, profile: JsonDict) -> None: + def write_profile(self, profile: JsonMapping) -> None: """Write the profile of a user. Args: @@ -353,7 +353,7 @@ def write_profile(self, profile: JsonDict) -> None: raise NotImplementedError() @abc.abstractmethod - def write_devices(self, devices: List[JsonDict]) -> None: + def write_devices(self, devices: Sequence[JsonMapping]) -> None: """Write the devices of a user. Args: @@ -362,7 +362,7 @@ def write_devices(self, devices: List[JsonDict]) -> None: raise NotImplementedError() @abc.abstractmethod - def write_connections(self, connections: List[JsonDict]) -> None: + def write_connections(self, connections: Sequence[JsonMapping]) -> None: """Write the connections of a user. Args: @@ -372,7 +372,7 @@ def write_connections(self, connections: List[JsonDict]) -> None: @abc.abstractmethod def write_account_data( - self, file_name: str, account_data: Mapping[str, JsonDict] + self, file_name: str, account_data: Mapping[str, JsonMapping] ) -> None: """Write the account data of a user. @@ -383,7 +383,7 @@ def write_account_data( raise NotImplementedError() @abc.abstractmethod - def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None: + def write_media_id(self, media_id: str, media_metadata: JsonMapping) -> None: """Write the media's metadata of a user. Exports only the metadata, as this can be fetched from the database via read only. In order to access the files, a connection to the correct diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f1f19666d7cf..1a4d394eda5d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -57,6 +57,7 @@ from synapse.types import ( DeviceListUpdates, JsonDict, + JsonMapping, MutableStateMap, Requester, RoomStreamToken, @@ -1793,19 +1794,23 @@ async def _generate_sync_entry_for_account_data( ) if push_rules_changed: - global_account_data = dict(global_account_data) - global_account_data[ - AccountDataTypes.PUSH_RULES - ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) + global_account_data = { + AccountDataTypes.PUSH_RULES: await self._push_rules_handler.push_rules_for_user( + sync_config.user + ), + **global_account_data, + } else: all_global_account_data = await self.store.get_global_account_data_for_user( user_id ) - global_account_data = dict(all_global_account_data) - global_account_data[ - AccountDataTypes.PUSH_RULES - ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) + global_account_data = { + AccountDataTypes.PUSH_RULES: await self._push_rules_handler.push_rules_for_user( + sync_config.user + ), + **all_global_account_data, + } account_data_for_user = ( await sync_config.filter_collection.filter_global_account_data( @@ -1909,7 +1914,7 @@ async def _generate_sync_entry_for_rooms( blocks_all_rooms or sync_result_builder.sync_config.filter_collection.blocks_all_room_account_data() ): - account_data_by_room: Mapping[str, Mapping[str, JsonDict]] = {} + account_data_by_room: Mapping[str, Mapping[str, JsonMapping]] = {} elif since_token and not sync_result_builder.full_state: account_data_by_room = ( await self.store.get_updated_room_account_data_for_user( @@ -2349,8 +2354,8 @@ async def _generate_room_entry( sync_result_builder: "SyncResultBuilder", room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], - tags: Optional[Mapping[str, Mapping[str, Any]]], - account_data: Mapping[str, JsonDict], + tags: Optional[Mapping[str, JsonMapping]], + account_data: Mapping[str, JsonMapping], always_include: bool = False, ) -> None: """Populates the `joined` and `archived` section of `sync_result_builder` diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 91898a5c135c..9aaa88e22987 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -39,7 +39,7 @@ from synapse.rest.client._base import client_patterns from synapse.storage.databases.main.registration import ExternalIDReuseException from synapse.storage.databases.main.stats import UserSortOrder -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, JsonMapping, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -211,7 +211,7 @@ def __init__(self, hs: "HomeServer"): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: await assert_requester_is_admin(self.auth, request) target_user = UserID.from_string(user_id) @@ -226,7 +226,7 @@ async def on_GET( async def on_PUT( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) @@ -658,7 +658,7 @@ def __init__(self, hs: "HomeServer"): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: target_user = UserID.from_string(user_id) requester = await self.auth.get_user_by_req(request) diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py index b1f9e9dc9ba5..ce0c4e774202 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py @@ -20,7 +20,7 @@ from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest -from synapse.types import JsonDict, RoomID +from synapse.types import JsonDict, JsonMapping, RoomID from ._base import client_patterns @@ -95,7 +95,7 @@ async def on_PUT( async def on_GET( self, request: SynapseRequest, user_id: str, account_data_type: str - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot get account data for other users.") @@ -106,7 +106,7 @@ async def on_GET( and account_data_type == AccountDataTypes.PUSH_RULES ): account_data: Optional[ - JsonDict + JsonMapping ] = await self._push_rules_handler.push_rules_for_user(requester.user) else: account_data = await self.store.get_global_account_data_by_type_for_user( @@ -236,7 +236,7 @@ async def on_GET( user_id: str, room_id: str, account_data_type: str, - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot get account data for other users.") @@ -253,7 +253,7 @@ async def on_GET( self._hs.config.experimental.msc4010_push_rules_account_data and account_data_type == AccountDataTypes.PUSH_RULES ): - account_data: Optional[JsonDict] = {} + account_data: Optional[JsonMapping] = {} else: account_data = await self.store.get_account_data_for_room_and_type( user_id, room_id, account_data_type diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 8f7bdbc61a7b..80f146dd530a 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -43,7 +43,7 @@ MultiWriterIdGenerator, StreamIdGenerator, ) -from synapse.types import JsonDict +from synapse.types import JsonDict, JsonMapping from synapse.util import json_encoder from synapse.util.caches.descriptors import cached from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -119,7 +119,7 @@ def get_max_account_data_stream_id(self) -> int: @cached() async def get_global_account_data_for_user( self, user_id: str - ) -> Mapping[str, JsonDict]: + ) -> Mapping[str, JsonMapping]: """ Get all the global client account_data for a user. @@ -164,7 +164,7 @@ def get_global_account_data_for_user( @cached() async def get_room_account_data_for_user( self, user_id: str - ) -> Mapping[str, Mapping[str, JsonDict]]: + ) -> Mapping[str, Mapping[str, JsonMapping]]: """ Get all of the per-room client account_data for a user. @@ -213,7 +213,7 @@ def get_room_account_data_for_user_txn( @cached(num_args=2, max_entries=5000, tree=True) async def get_global_account_data_by_type_for_user( self, user_id: str, data_type: str - ) -> Optional[JsonDict]: + ) -> Optional[JsonMapping]: """ Returns: The account data. @@ -265,7 +265,7 @@ def get_latest_stream_id_for_global_account_data_by_type_for_user_txn( @cached(num_args=2, tree=True) async def get_account_data_for_room( self, user_id: str, room_id: str - ) -> Mapping[str, JsonDict]: + ) -> Mapping[str, JsonMapping]: """Get all the client account_data for a user for a room. Args: @@ -296,7 +296,7 @@ def get_account_data_for_room_txn( @cached(num_args=3, max_entries=5000, tree=True) async def get_account_data_for_room_and_type( self, user_id: str, room_id: str, account_data_type: str - ) -> Optional[JsonDict]: + ) -> Optional[JsonMapping]: """Get the client account_data of given type for a user for a room. Args: @@ -394,7 +394,7 @@ def get_updated_room_account_data_txn( async def get_updated_global_account_data_for_user( self, user_id: str, stream_id: int - ) -> Dict[str, JsonDict]: + ) -> Mapping[str, JsonMapping]: """Get all the global account_data that's changed for a user. Args: diff --git a/synapse/storage/databases/main/experimental_features.py b/synapse/storage/databases/main/experimental_features.py index cf3226ae5a70..654f924019a3 100644 --- a/synapse/storage/databases/main/experimental_features.py +++ b/synapse/storage/databases/main/experimental_features.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Dict +from typing import TYPE_CHECKING, Dict, FrozenSet from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main import CacheInvalidationWorkerStore -from synapse.types import StrCollection from synapse.util.caches.descriptors import cached if TYPE_CHECKING: @@ -34,7 +33,7 @@ def __init__( super().__init__(database, db_conn, hs) @cached() - async def list_enabled_features(self, user_id: str) -> StrCollection: + async def list_enabled_features(self, user_id: str) -> FrozenSet[str]: """ Checks to see what features are enabled for a given user Args: @@ -49,7 +48,7 @@ async def list_enabled_features(self, user_id: str) -> StrCollection: ["feature"], ) - return [feature["feature"] for feature in enabled] + return frozenset(feature["feature"] for feature in enabled) async def set_features_for_user( self, diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index c149a9eacba7..61403a98cf95 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -23,7 +23,7 @@ from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main.account_data import AccountDataWorkerStore from synapse.storage.util.id_generators import AbstractStreamIdGenerator -from synapse.types import JsonDict +from synapse.types import JsonDict, JsonMapping from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -34,7 +34,7 @@ class TagsWorkerStore(AccountDataWorkerStore): @cached() async def get_tags_for_user( self, user_id: str - ) -> Mapping[str, Mapping[str, JsonDict]]: + ) -> Mapping[str, Mapping[str, JsonMapping]]: """Get all the tags for a user. @@ -109,7 +109,7 @@ def get_all_updated_tags_txn( async def get_updated_tags( self, user_id: str, stream_id: int - ) -> Mapping[str, Mapping[str, JsonDict]]: + ) -> Mapping[str, Mapping[str, JsonMapping]]: """Get all the tags for the rooms where the tags have changed since the given version From eef2b9e34418e902baab1e730eb805eb56034cc2 Mon Sep 17 00:00:00 2001 From: Hanadi Date: Mon, 18 Sep 2023 16:37:51 +0200 Subject: [PATCH 65/75] Filter locked users in the admin API (#16328) Co-authored-by: Hanadi Tamimi --- changelog.d/16328.feature | 1 + docs/admin_api/user_admin_api.md | 17 +++++++++----- synapse/rest/admin/users.py | 6 ++++- synapse/storage/databases/main/__init__.py | 7 +++++- synapse/storage/databases/main/stats.py | 1 + tests/rest/admin/test_user.py | 26 ++++++++++++++++++++++ 6 files changed, 51 insertions(+), 7 deletions(-) create mode 100644 changelog.d/16328.feature diff --git a/changelog.d/16328.feature b/changelog.d/16328.feature new file mode 100644 index 000000000000..9fadf766cc8c --- /dev/null +++ b/changelog.d/16328.feature @@ -0,0 +1 @@ +Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 975a7a0da4ab..f83facabe4d6 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -54,7 +54,8 @@ It returns a JSON body like the following: "external_id": "" } ], - "user_type": null + "user_type": null, + "locked": false } ``` @@ -103,7 +104,8 @@ with a body of: ], "admin": false, "deactivated": false, - "user_type": null + "user_type": null, + "locked": false } ``` @@ -184,7 +186,8 @@ A response body like the following is returned: "shadow_banned": 0, "displayname": "", "avatar_url": null, - "creation_ts": 1560432668000 + "creation_ts": 1560432668000, + "locked": false }, { "name": "", "is_guest": 0, @@ -195,7 +198,8 @@ A response body like the following is returned: "shadow_banned": 0, "displayname": "", "avatar_url": "", - "creation_ts": 1561550621000 + "creation_ts": 1561550621000, + "locked": false } ], "next_token": "100", @@ -249,6 +253,8 @@ The following parameters should be set in the URL: - `not_user_type` - Exclude certain user types, such as bot users, from the request. Can be provided multiple times. Possible values are `bot`, `support` or "empty string". "empty string" here means to exclude users without a type. +- `locked` - string representing a bool - Is optional and if `true` will **include** locked users. + Defaults to `false` to exclude locked users. Note: Introduced in v1.93. Caution. The database only has indexes on the columns `name` and `creation_ts`. This means that if a different sort order is used (`is_guest`, `admin`, @@ -274,10 +280,11 @@ The following fields are returned in the JSON response body: - `avatar_url` - string - The user's avatar URL if they have set one. - `creation_ts` - integer - The user's creation timestamp in ms. - `last_seen_ts` - integer - The user's last activity timestamp in ms. - + - `locked` - bool - Status if that user has been marked as locked. Note: Introduced in v1.93. - `next_token`: string representing a positive integer - Indication for pagination. See above. - `total` - integer - Total number of media. +*Added in Synapse 1.93:* the `locked` query parameter and response field. ## Query current sessions for a user diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 9aaa88e22987..5b743a1d03f2 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -66,6 +66,7 @@ class UsersRestServletV2(RestServlet): The parameter `deactivated` can be used to include deactivated users. The parameter `order_by` can be used to order the result. The parameter `not_user_type` can be used to exclude certain user types. + The parameter `locked` can be used to include locked users. Possible values are `bot`, `support` or "empty string". "empty string" here means to exclude users without a type. """ @@ -107,8 +108,9 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "The guests parameter is not supported when MSC3861 is enabled.", errcode=Codes.INVALID_PARAM, ) - deactivated = parse_boolean(request, "deactivated", default=False) + deactivated = parse_boolean(request, "deactivated", default=False) + locked = parse_boolean(request, "locked", default=False) admins = parse_boolean(request, "admins") # If support for MSC3866 is not enabled, apply no filtering based on the @@ -133,6 +135,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: UserSortOrder.SHADOW_BANNED.value, UserSortOrder.CREATION_TS.value, UserSortOrder.LAST_SEEN_TS.value, + UserSortOrder.LOCKED.value, ), ) @@ -154,6 +157,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: direction, approved, not_user_types, + locked, ) # If support for MSC3866 is not enabled, don't show the approval flag. diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 0836e247ef5f..101403578c06 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -175,6 +175,7 @@ async def get_users_paginate( direction: Direction = Direction.FORWARDS, approved: bool = True, not_user_types: Optional[List[str]] = None, + locked: bool = False, ) -> Tuple[List[JsonDict], int]: """Function to retrieve a paginated list of users from users list. This will return a json list of users and the @@ -194,6 +195,7 @@ async def get_users_paginate( direction: sort ascending or descending approved: whether to include approved users not_user_types: list of user types to exclude + locked: whether to include locked users Returns: A tuple of a list of mappings from user to information and a count of total users. """ @@ -226,6 +228,9 @@ def get_users_paginate_txn( if not deactivated: filters.append("deactivated = 0") + if not locked: + filters.append("locked IS FALSE") + if admins is not None: if admins: filters.append("admin = 1") @@ -290,7 +295,7 @@ def get_users_paginate_txn( sql = f""" SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, displayname, avatar_url, creation_ts * 1000 as creation_ts, approved, - eu.user_id is not null as erased, last_seen_ts + eu.user_id is not null as erased, last_seen_ts, locked {sql_base} ORDER BY {order_by_column} {order}, u.name ASC LIMIT ? OFFSET ? diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 3a2966b9e46a..9d403919e430 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -108,6 +108,7 @@ class UserSortOrder(Enum): SHADOW_BANNED = "shadow_banned" CREATION_TS = "creation_ts" LAST_SEEN_TS = "last_seen_ts" + LOCKED = "locked" class StatsStore(StateDeltasStore): diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 761871b933e2..b326ad2c9037 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1146,6 +1146,32 @@ def test_erasure_status(self) -> None: users = {user["name"]: user for user in channel.json_body["users"]} self.assertIs(users[user_id]["erased"], True) + def test_filter_locked(self) -> None: + # Create a new user. + user_id = self.register_user("lockme", "lockme") + + # Lock them + self.get_success(self.store.set_user_locked_status(user_id, True)) + + # Locked user should appear in list users API + channel = self.make_request( + "GET", + self.url + "?locked=true", + access_token=self.admin_user_tok, + ) + users = {user["name"]: user for user in channel.json_body["users"]} + self.assertIn(user_id, users) + self.assertTrue(users[user_id]["locked"]) + + # Locked user should not appear in list users API + channel = self.make_request( + "GET", + self.url + "?locked=false", + access_token=self.admin_user_tok, + ) + users = {user["name"]: user for user in channel.json_body["users"]} + self.assertNotIn(user_id, users) + def _order_test( self, expected_user_list: List[str], From 47d4bb605740db04222d4cc9f083821b6d839a63 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 18 Sep 2023 10:48:02 -0400 Subject: [PATCH 66/75] Stop patching EventBase.__eq__ in tests. (#16349) It is clearer to directly test equality instead of doing indirect assertions via patching __eq__. --- changelog.d/16349.misc | 1 + tests/replication/storage/_base.py | 17 +++++--- tests/replication/storage/test_events.py | 49 +++++++++--------------- 3 files changed, 31 insertions(+), 36 deletions(-) create mode 100644 changelog.d/16349.misc diff --git a/changelog.d/16349.misc b/changelog.d/16349.misc new file mode 100644 index 000000000000..8ce27a15990d --- /dev/null +++ b/changelog.d/16349.misc @@ -0,0 +1 @@ +Avoid patching code in tests. diff --git a/tests/replication/storage/_base.py b/tests/replication/storage/_base.py index de26a62ae19f..afcc80a8b3f5 100644 --- a/tests/replication/storage/_base.py +++ b/tests/replication/storage/_base.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Iterable, Optional +from typing import Any, Callable, Iterable, Optional from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor @@ -47,24 +47,31 @@ def replicate(self) -> None: self.pump(0.1) def check( - self, method: str, args: Iterable[Any], expected_result: Optional[Any] = None + self, + method: str, + args: Iterable[Any], + expected_result: Optional[Any] = None, + asserter: Optional[Callable[[Any, Any, Optional[Any]], None]] = None, ) -> None: + if asserter is None: + asserter = self.assertEqual + master_result = self.get_success(getattr(self.master_store, method)(*args)) worker_result = self.get_success(getattr(self.worker_store, method)(*args)) if expected_result is not None: - self.assertEqual( + asserter( master_result, expected_result, "Expected master result to be %r but was %r" % (expected_result, master_result), ) - self.assertEqual( + asserter( worker_result, expected_result, "Expected worker result to be %r but was %r" % (expected_result, worker_result), ) - self.assertEqual( + asserter( master_result, worker_result, "Worker result %r does not match master result %r" diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py index 33c277a38a15..17716253f832 100644 --- a/tests/replication/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Callable, Iterable, List, Optional, Tuple +from typing import Any, Iterable, List, Optional, Tuple from canonicaljson import encode_canonical_json from parameterized import parameterized @@ -21,7 +21,7 @@ from synapse.api.constants import ReceiptTypes from synapse.api.room_versions import RoomVersions -from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.handlers.room import RoomEventSource from synapse.server import HomeServer @@ -46,32 +46,9 @@ logger = logging.getLogger(__name__) -def dict_equals(self: EventBase, other: EventBase) -> bool: - me = encode_canonical_json(self.get_pdu_json()) - them = encode_canonical_json(other.get_pdu_json()) - return me == them - - -def patch__eq__(cls: object) -> Callable[[], None]: - eq = getattr(cls, "__eq__", None) - cls.__eq__ = dict_equals # type: ignore[assignment] - - def unpatch() -> None: - if eq is not None: - cls.__eq__ = eq # type: ignore[method-assign] - - return unpatch - - class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): STORE_TYPE = EventsWorkerStore - def setUp(self) -> None: - # Patch up the equality operator for events so that we can check - # whether lists of events match using assertEqual - self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(EventBase)] - super().setUp() - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) @@ -84,8 +61,14 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ) ) - def tearDown(self) -> None: - [unpatch() for unpatch in self.unpatches] + def assertEventsEqual( + self, first: EventBase, second: EventBase, msg: Optional[Any] = None + ) -> None: + self.assertEqual( + encode_canonical_json(first.get_pdu_json()), + encode_canonical_json(second.get_pdu_json()), + msg, + ) def test_get_latest_event_ids_in_room(self) -> None: create = self.persist(type="m.room.create", key="", creator=USER_ID) @@ -107,7 +90,7 @@ def test_redactions(self) -> None: msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") self.replicate() - self.check("get_event", [msg.event_id], msg) + self.check("get_event", [msg.event_id], msg, asserter=self.assertEventsEqual) redaction = self.persist(type="m.room.redaction", redacts=msg.event_id) self.replicate() @@ -119,7 +102,9 @@ def test_redactions(self) -> None: redacted = make_event_from_dict( msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() ) - self.check("get_event", [msg.event_id], redacted) + self.check( + "get_event", [msg.event_id], redacted, asserter=self.assertEventsEqual + ) def test_backfilled_redactions(self) -> None: self.persist(type="m.room.create", key="", creator=USER_ID) @@ -127,7 +112,7 @@ def test_backfilled_redactions(self) -> None: msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") self.replicate() - self.check("get_event", [msg.event_id], msg) + self.check("get_event", [msg.event_id], msg, asserter=self.assertEventsEqual) redaction = self.persist( type="m.room.redaction", redacts=msg.event_id, backfill=True @@ -141,7 +126,9 @@ def test_backfilled_redactions(self) -> None: redacted = make_event_from_dict( msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() ) - self.check("get_event", [msg.event_id], redacted) + self.check( + "get_event", [msg.event_id], redacted, asserter=self.assertEventsEqual + ) def test_invites(self) -> None: self.persist(type="m.room.create", key="", creator=USER_ID) From eee2b6642ddb28a6e0f850b958baa39eb74dc891 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 11:30:43 -0400 Subject: [PATCH 67/75] Bump ruff from 0.0.286 to 0.0.290 (#16342) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Patrick Cloke --- poetry.lock | 38 +++++++++---------- pyproject.toml | 2 +- .../databases/main/event_push_actions.py | 5 +-- synapse/storage/databases/main/events.py | 10 +---- 4 files changed, 22 insertions(+), 33 deletions(-) diff --git a/poetry.lock b/poetry.lock index 17d0993a8b50..8264e814b471 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2332,28 +2332,28 @@ files = [ [[package]] name = "ruff" -version = "0.0.286" +version = "0.0.290" description = "An extremely fast Python linter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.286-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:8e22cb557e7395893490e7f9cfea1073d19a5b1dd337f44fd81359b2767da4e9"}, - {file = "ruff-0.0.286-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:68ed8c99c883ae79a9133cb1a86d7130feee0397fdf5ba385abf2d53e178d3fa"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8301f0bb4ec1a5b29cfaf15b83565136c47abefb771603241af9d6038f8981e8"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acc4598f810bbc465ce0ed84417ac687e392c993a84c7eaf3abf97638701c1ec"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88c8e358b445eb66d47164fa38541cfcc267847d1e7a92dd186dddb1a0a9a17f"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:0433683d0c5dbcf6162a4beb2356e820a593243f1fa714072fec15e2e4f4c939"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddb61a0c4454cbe4623f4a07fef03c5ae921fe04fede8d15c6e36703c0a73b07"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:47549c7c0be24c8ae9f2bce6f1c49fbafea83bca80142d118306f08ec7414041"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:559aa793149ac23dc4310f94f2c83209eedb16908a0343663be19bec42233d25"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d73cfb1c3352e7aa0ce6fb2321f36fa1d4a2c48d2ceac694cb03611ddf0e4db6"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:3dad93b1f973c6d1db4b6a5da8690c5625a3fa32bdf38e543a6936e634b83dc3"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26afc0851f4fc3738afcf30f5f8b8612a31ac3455cb76e611deea80f5c0bf3ce"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:9b6b116d1c4000de1b9bf027131dbc3b8a70507788f794c6b09509d28952c512"}, - {file = "ruff-0.0.286-py3-none-win32.whl", hash = "sha256:556e965ac07c1e8c1c2d759ac512e526ecff62c00fde1a046acb088d3cbc1a6c"}, - {file = "ruff-0.0.286-py3-none-win_amd64.whl", hash = "sha256:5d295c758961376c84aaa92d16e643d110be32add7465e197bfdaec5a431a107"}, - {file = "ruff-0.0.286-py3-none-win_arm64.whl", hash = "sha256:1d6142d53ab7f164204b3133d053c4958d4d11ec3a39abf23a40b13b0784e3f0"}, - {file = "ruff-0.0.286.tar.gz", hash = "sha256:f1e9d169cce81a384a26ee5bb8c919fe9ae88255f39a1a69fd1ebab233a85ed2"}, + {file = "ruff-0.0.290-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:0e2b09ac4213b11a3520221083866a5816616f3ae9da123037b8ab275066fbac"}, + {file = "ruff-0.0.290-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:4ca6285aa77b3d966be32c9a3cd531655b3d4a0171e1f9bf26d66d0372186767"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35e3550d1d9f2157b0fcc77670f7bb59154f223bff281766e61bdd1dd854e0c5"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d748c8bd97874f5751aed73e8dde379ce32d16338123d07c18b25c9a2796574a"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:982af5ec67cecd099e2ef5e238650407fb40d56304910102d054c109f390bf3c"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bbd37352cea4ee007c48a44c9bc45a21f7ba70a57edfe46842e346651e2b995a"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d9be6351b7889462912e0b8185a260c0219c35dfd920fb490c7f256f1d8313e"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75cdc7fe32dcf33b7cec306707552dda54632ac29402775b9e212a3c16aad5e6"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb07f37f7aecdbbc91d759c0c09870ce0fb3eed4025eebedf9c4b98c69abd527"}, + {file = "ruff-0.0.290-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2ab41bc0ba359d3f715fc7b705bdeef19c0461351306b70a4e247f836b9350ed"}, + {file = "ruff-0.0.290-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:150bf8050214cea5b990945b66433bf9a5e0cef395c9bc0f50569e7de7540c86"}, + {file = "ruff-0.0.290-py3-none-musllinux_1_2_i686.whl", hash = "sha256:75386ebc15fe5467248c039f5bf6a0cfe7bfc619ffbb8cd62406cd8811815fca"}, + {file = "ruff-0.0.290-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ac93eadf07bc4ab4c48d8bb4e427bf0f58f3a9c578862eb85d99d704669f5da0"}, + {file = "ruff-0.0.290-py3-none-win32.whl", hash = "sha256:461fbd1fb9ca806d4e3d5c745a30e185f7cf3ca77293cdc17abb2f2a990ad3f7"}, + {file = "ruff-0.0.290-py3-none-win_amd64.whl", hash = "sha256:f1f49f5ec967fd5778813780b12a5650ab0ebcb9ddcca28d642c689b36920796"}, + {file = "ruff-0.0.290-py3-none-win_arm64.whl", hash = "sha256:ae5a92dfbdf1f0c689433c223f8dac0782c2b2584bd502dfdbc76475669f1ba1"}, + {file = "ruff-0.0.290.tar.gz", hash = "sha256:949fecbc5467bb11b8db810a7fa53c7e02633856ee6bd1302b2f43adcd71b88d"}, ] [[package]] @@ -3347,4 +3347,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "4a3a82becd89b91e76e2bc2f8ba72123f665c517d9b841d9a34cd01b83a1adc3" +content-hash = "104f108b3c966be05e17cf9975b4061942b354fe9a57cbf7372371fd56b1bf24" diff --git a/pyproject.toml b/pyproject.toml index 7f1e7731595b..de4dd61ea5ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" black = ">=22.7.0" -ruff = "0.0.286" +ruff = "0.0.290" # Typechecking lxml-stubs = ">=0.4.0" diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index b958a39aebb1..ba99e63d265a 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -1599,10 +1599,7 @@ def _rotate_notifs_before_txn( txn, table="event_push_summary", key_names=("user_id", "room_id", "thread_id"), - key_values=[ - (user_id, room_id, thread_id) - for user_id, room_id, thread_id in summaries - ], + key_values=list(summaries), value_names=("notif_count", "unread_count", "stream_ordering"), value_values=[ ( diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index bc8474a5897e..790d058c43f9 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -827,15 +827,7 @@ def _add_chain_cover_index( "target_chain_id", "target_sequence_number", ), - values=[ - (source_id, source_seq, target_id, target_seq) - for ( - source_id, - source_seq, - target_id, - target_seq, - ) in chain_links.get_additions() - ], + values=list(chain_links.get_additions()), ) @staticmethod From 118036eeabf72c268e53f2b7521698643d486387 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 18 Sep 2023 13:21:00 -0400 Subject: [PATCH 68/75] Test against PostgreSQL 16. (#16351) --- .ci/scripts/calculate_jobs.py | 2 +- changelog.d/16351.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16351.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 08af332b6d5c..7575683ab47e 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -64,7 +64,7 @@ def set_output(key: str, value: str): { "python-version": "3.11", "database": "postgres", - "postgres-version": "15", + "postgres-version": "16", "extras": "all", } ) diff --git a/changelog.d/16351.misc b/changelog.d/16351.misc new file mode 100644 index 000000000000..b955b3da0821 --- /dev/null +++ b/changelog.d/16351.misc @@ -0,0 +1 @@ +Test against PostgreSQL 16. From 83f73d5d6720f25d222ae79ef338e539a0385100 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:26:19 -0400 Subject: [PATCH 69/75] Bump furo from 2023.8.19 to 2023.9.10 (#16340) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8264e814b471..e1b3733c458d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -555,13 +555,13 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", [[package]] name = "furo" -version = "2023.8.19" +version = "2023.9.10" description = "A clean customisable Sphinx documentation theme." optional = false python-versions = ">=3.8" files = [ - {file = "furo-2023.8.19-py3-none-any.whl", hash = "sha256:12f99f87a1873b6746228cfde18f77244e6c1ffb85d7fed95e638aae70d80590"}, - {file = "furo-2023.8.19.tar.gz", hash = "sha256:e671ee638ab3f1b472f4033b0167f502ab407830e0db0f843b1c1028119c9cd1"}, + {file = "furo-2023.9.10-py3-none-any.whl", hash = "sha256:513092538537dc5c596691da06e3c370714ec99bc438680edc1debffb73e5bfc"}, + {file = "furo-2023.9.10.tar.gz", hash = "sha256:5707530a476d2a63b8cad83b4f961f3739a69f4b058bcf38a03a39fa537195b2"}, ] [package.dependencies] From 1f477d65f5602abfa02e19b295b5d8144c10577c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:26:35 -0400 Subject: [PATCH 70/75] Bump serde_json from 1.0.106 to 1.0.107 (#16345) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e233b168396..ea9aa18a5cbd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -352,9 +352,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.106" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", From 3d60b07cdeb2f91a3d41b6f1190967c1c84bd325 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Tue, 19 Sep 2023 13:24:47 +0200 Subject: [PATCH 71/75] Use string for federation_client_minimum_tls_version documentation examples (#16353) --- changelog.d/16353.doc | 1 + docs/usage/configuration/config_documentation.md | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16353.doc diff --git a/changelog.d/16353.doc b/changelog.d/16353.doc new file mode 100644 index 000000000000..80af22ed5320 --- /dev/null +++ b/changelog.d/16353.doc @@ -0,0 +1 @@ +Use string for federation_client_minimum_tls_version documentation examples. Contributed by @jcgruenhage. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 885a7bf0a37f..54315a417ec1 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1133,14 +1133,14 @@ federation_verify_certificates: false The minimum TLS version that will be used for outbound federation requests. -Defaults to `1`. Configurable to `1`, `1.1`, `1.2`, or `1.3`. Note -that setting this value higher than `1.2` will prevent federation to most -of the public Matrix network: only configure it to `1.3` if you have an +Defaults to `"1"`. Configurable to `"1"`, `"1.1"`, `"1.2"`, or `"1.3"`. Note +that setting this value higher than `"1.2"` will prevent federation to most +of the public Matrix network: only configure it to `"1.3"` if you have an entirely private federation setup and you can ensure TLS 1.3 support. Example configuration: ```yaml -federation_client_minimum_tls_version: 1.2 +federation_client_minimum_tls_version: "1.2" ``` --- ### `federation_certificate_verification_whitelist` From 9caeb9be10202d15bfd4d3f2cff60793b1d553e2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 19 Sep 2023 07:56:49 -0400 Subject: [PATCH 72/75] 1.93.0rc1 --- CHANGES.md | 80 +++++++++++++++++++++++++++++++++++++++ changelog.d/15488.feature | 1 - changelog.d/15997.misc | 1 - changelog.d/16066.bugfix | 1 - changelog.d/16090.misc | 1 - changelog.d/16137.feature | 1 - changelog.d/16170.bugfix | 1 - changelog.d/16171.bugfix | 1 - changelog.d/16172.bugfix | 1 - changelog.d/16174.bugfix | 1 - changelog.d/16219.feature | 1 - changelog.d/16227.feature | 1 - changelog.d/16235.misc | 1 - changelog.d/16240.misc | 1 - changelog.d/16248.misc | 1 - changelog.d/16251.bugfix | 1 - changelog.d/16252.bugfix | 1 - changelog.d/16257.bugfix | 1 - changelog.d/16260.misc | 1 - changelog.d/16261.misc | 1 - changelog.d/16262.feature | 1 - changelog.d/16263.misc | 1 - changelog.d/16264.misc | 1 - changelog.d/16265.feature | 1 - changelog.d/16272.bugfix | 1 - changelog.d/16273.misc | 1 - changelog.d/16274.feature | 1 - changelog.d/16276.misc | 1 - changelog.d/16277.misc | 1 - changelog.d/16278.misc | 1 - changelog.d/16280.misc | 1 - changelog.d/16281.misc | 1 - changelog.d/16282.doc | 1 - changelog.d/16283.misc | 1 - changelog.d/16288.bugfix | 1 - changelog.d/16298.misc | 1 - changelog.d/16299.misc | 1 - changelog.d/16300.misc | 1 - changelog.d/16301.misc | 1 - changelog.d/16304.doc | 1 - changelog.d/16309.misc | 1 - changelog.d/16311.misc | 1 - changelog.d/16312.misc | 1 - changelog.d/16313.misc | 1 - changelog.d/16314.misc | 1 - changelog.d/16315.misc | 1 - changelog.d/16316.misc | 1 - changelog.d/16318.misc | 1 - changelog.d/16325.misc | 1 - changelog.d/16326.misc | 1 - changelog.d/16327.bugfix | 1 - changelog.d/16328.feature | 1 - changelog.d/16329.bugfix | 1 - changelog.d/16347.misc | 1 - changelog.d/16349.misc | 1 - changelog.d/16351.misc | 1 - changelog.d/16353.doc | 1 - debian/changelog | 6 +++ pyproject.toml | 2 +- 59 files changed, 87 insertions(+), 57 deletions(-) delete mode 100644 changelog.d/15488.feature delete mode 100644 changelog.d/15997.misc delete mode 100644 changelog.d/16066.bugfix delete mode 100644 changelog.d/16090.misc delete mode 100644 changelog.d/16137.feature delete mode 100644 changelog.d/16170.bugfix delete mode 100644 changelog.d/16171.bugfix delete mode 100644 changelog.d/16172.bugfix delete mode 100644 changelog.d/16174.bugfix delete mode 100644 changelog.d/16219.feature delete mode 100644 changelog.d/16227.feature delete mode 100644 changelog.d/16235.misc delete mode 100644 changelog.d/16240.misc delete mode 100644 changelog.d/16248.misc delete mode 100644 changelog.d/16251.bugfix delete mode 100644 changelog.d/16252.bugfix delete mode 100644 changelog.d/16257.bugfix delete mode 100644 changelog.d/16260.misc delete mode 100644 changelog.d/16261.misc delete mode 100644 changelog.d/16262.feature delete mode 100644 changelog.d/16263.misc delete mode 100644 changelog.d/16264.misc delete mode 100644 changelog.d/16265.feature delete mode 100644 changelog.d/16272.bugfix delete mode 100644 changelog.d/16273.misc delete mode 100644 changelog.d/16274.feature delete mode 100644 changelog.d/16276.misc delete mode 100644 changelog.d/16277.misc delete mode 100644 changelog.d/16278.misc delete mode 100644 changelog.d/16280.misc delete mode 100644 changelog.d/16281.misc delete mode 100644 changelog.d/16282.doc delete mode 100644 changelog.d/16283.misc delete mode 100644 changelog.d/16288.bugfix delete mode 100644 changelog.d/16298.misc delete mode 100644 changelog.d/16299.misc delete mode 100644 changelog.d/16300.misc delete mode 100644 changelog.d/16301.misc delete mode 100644 changelog.d/16304.doc delete mode 100644 changelog.d/16309.misc delete mode 100644 changelog.d/16311.misc delete mode 100644 changelog.d/16312.misc delete mode 100644 changelog.d/16313.misc delete mode 100644 changelog.d/16314.misc delete mode 100644 changelog.d/16315.misc delete mode 100644 changelog.d/16316.misc delete mode 100644 changelog.d/16318.misc delete mode 100644 changelog.d/16325.misc delete mode 100644 changelog.d/16326.misc delete mode 100644 changelog.d/16327.bugfix delete mode 100644 changelog.d/16328.feature delete mode 100644 changelog.d/16329.bugfix delete mode 100644 changelog.d/16347.misc delete mode 100644 changelog.d/16349.misc delete mode 100644 changelog.d/16351.misc delete mode 100644 changelog.d/16353.doc diff --git a/CHANGES.md b/CHANGES.md index b59503e083ca..f95a894c4f59 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,83 @@ +# Synapse 1.93.0rc1 (2023-09-19) + +### Features + +- Add automatic purge after all users forgotten a room. Also add restore of purge/shutdown rooms after a synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) +- Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). ([\#16137](https://github.com/matrix-org/synapse/issues/16137)) +- Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. ([\#16219](https://github.com/matrix-org/synapse/issues/16219)) +- Add span information to requests sent to appservices. Contributed by MTRNord. ([\#16227](https://github.com/matrix-org/synapse/issues/16227)) +- Add the ability to enable/disable registrations when in the CAS flow. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262)) +- Allow `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265)) +- Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. ([\#16274](https://github.com/matrix-org/synapse/issues/16274)) +- Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. ([\#16328](https://github.com/matrix-org/synapse/issues/16328)) + +### Bugfixes + +- Fix a long-standing bug where multi-device accounts could cause high load due to presence. ([\#16066](https://github.com/matrix-org/synapse/issues/16066), [\#16170](https://github.com/matrix-org/synapse/issues/16170), [\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172), [\#16174](https://github.com/matrix-org/synapse/issues/16174)) +- Fix a long-standing bug where appservices using MSC2409 to receive to_device messages, would only get messages for one user. ([\#16251](https://github.com/matrix-org/synapse/issues/16251)) +- Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. ([\#16252](https://github.com/matrix-org/synapse/issues/16252)) +- Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. ([\#16257](https://github.com/matrix-org/synapse/issues/16257)) +- Avoid temporary storage of sensitive information. ([\#16272](https://github.com/matrix-org/synapse/issues/16272)) +- Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. ([\#16288](https://github.com/matrix-org/synapse/issues/16288)) +- Fix a long-standing bug where invalid receipts would be accepted. ([\#16327](https://github.com/matrix-org/synapse/issues/16327)) +- Use standard name for UTF-8 charset in emails. ([\#16329](https://github.com/matrix-org/synapse/issues/16329)) + +### Improved Documentation + +- Fix typos in the documentation. ([\#16282](https://github.com/matrix-org/synapse/issues/16282)) +- Link to the Alpine Linux community package for Synapse. ([\#16304](https://github.com/matrix-org/synapse/issues/16304)) +- Use string for federation_client_minimum_tls_version documentation examples. Contributed by @jcgruenhage. ([\#16353](https://github.com/matrix-org/synapse/issues/16353)) + +### Internal Changes + +- Allow modules to delete rooms. ([\#15997](https://github.com/matrix-org/synapse/issues/15997)) +- Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. ([\#16090](https://github.com/matrix-org/synapse/issues/16090), [\#16263](https://github.com/matrix-org/synapse/issues/16263)) +- Fix type checking when using the new version of Twisted. ([\#16235](https://github.com/matrix-org/synapse/issues/16235)) +- Delete device messages asynchronously and in staged batches using the task scheduler. ([\#16240](https://github.com/matrix-org/synapse/issues/16240), [\#16311](https://github.com/matrix-org/synapse/issues/16311), [\#16312](https://github.com/matrix-org/synapse/issues/16312), [\#16313](https://github.com/matrix-org/synapse/issues/16313)) +- Bump minimum supported Rust version to 1.61.0. ([\#16248](https://github.com/matrix-org/synapse/issues/16248)) +- Update rust to version 1.71.1 in the nix development environment. ([\#16260](https://github.com/matrix-org/synapse/issues/16260)) +- Simplify server key storage. ([\#16261](https://github.com/matrix-org/synapse/issues/16261)) +- Reduce CPU overhead of change password endpoint. ([\#16264](https://github.com/matrix-org/synapse/issues/16264)) +- Stop purging from tables slated for removal. ([\#16273](https://github.com/matrix-org/synapse/issues/16273)) +- Improve type hints. ([\#16276](https://github.com/matrix-org/synapse/issues/16276), [\#16301](https://github.com/matrix-org/synapse/issues/16301), [\#16325](https://github.com/matrix-org/synapse/issues/16325), [\#16326](https://github.com/matrix-org/synapse/issues/16326)) +- Raise setuptools_rust version cap to 1.7.0. ([\#16277](https://github.com/matrix-org/synapse/issues/16277)) +- Fix using the new task scheduler causing lots of CPU to be used. ([\#16278](https://github.com/matrix-org/synapse/issues/16278)) +- Upgrade CI run of Python 3.12 from rc1 to rc2. ([\#16280](https://github.com/matrix-org/synapse/issues/16280)) +- Include values in SQL debug when using `execute_values` with Postgres. ([\#16281](https://github.com/matrix-org/synapse/issues/16281)) +- Enable additional linting checks. ([\#16283](https://github.com/matrix-org/synapse/issues/16283)) +- Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298)) +- Refactor `receipts_graph` Postgres transactions to stop error messages. ([\#16299](https://github.com/matrix-org/synapse/issues/16299)) +- Bump mypy from 1.4.1 to 1.5.1. ([\#16300](https://github.com/matrix-org/synapse/issues/16300)) +- Small improvements to logging in replication code. ([\#16309](https://github.com/matrix-org/synapse/issues/16309)) +- Remove a reference cycle for in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314)) +- Only use literal strings for background process names. ([\#16315](https://github.com/matrix-org/synapse/issues/16315)) +- Refactor `get_user_by_id`. ([\#16316](https://github.com/matrix-org/synapse/issues/16316)) +- Speed up task to delete to-device messages. ([\#16318](https://github.com/matrix-org/synapse/issues/16318)) +- Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. ([\#16347](https://github.com/matrix-org/synapse/issues/16347)) +- Avoid patching code in tests. ([\#16349](https://github.com/matrix-org/synapse/issues/16349)) +- Test against PostgreSQL 16. ([\#16351](https://github.com/matrix-org/synapse/issues/16351)) + +### Updates to locked dependencies + +* Bump black from 23.7.0 to 23.9.1. ([\#16295](https://github.com/matrix-org/synapse/issues/16295)) +* Bump docker/build-push-action from 4 to 5. ([\#16336](https://github.com/matrix-org/synapse/issues/16336)) +* Bump docker/login-action from 2 to 3. ([\#16339](https://github.com/matrix-org/synapse/issues/16339)) +* Bump docker/metadata-action from 4 to 5. ([\#16337](https://github.com/matrix-org/synapse/issues/16337)) +* Bump docker/setup-qemu-action from 2 to 3. ([\#16338](https://github.com/matrix-org/synapse/issues/16338)) +* Bump furo from 2023.8.19 to 2023.9.10. ([\#16340](https://github.com/matrix-org/synapse/issues/16340)) +* Bump gitpython from 3.1.32 to 3.1.34. ([\#16267](https://github.com/matrix-org/synapse/issues/16267)) +* Bump gitpython from 3.1.34 to 3.1.35. ([\#16279](https://github.com/matrix-org/synapse/issues/16279)) +* Bump mypy-zope from 1.0.0 to 1.0.1. ([\#16291](https://github.com/matrix-org/synapse/issues/16291)) +* Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344)) +* Bump regex from 1.9.4 to 1.9.5. ([\#16233](https://github.com/matrix-org/synapse/issues/16233)) +* Bump ruff from 0.0.286 to 0.0.290. ([\#16342](https://github.com/matrix-org/synapse/issues/16342)) +* Bump serde_json from 1.0.105 to 1.0.106. ([\#16296](https://github.com/matrix-org/synapse/issues/16296)) +* Bump serde_json from 1.0.106 to 1.0.107. ([\#16345](https://github.com/matrix-org/synapse/issues/16345)) +* Bump twisted from 22.10.0 to 23.8.0. ([\#16235](https://github.com/matrix-org/synapse/issues/16235)) +* Bump types-pillow from 10.0.0.2 to 10.0.0.3. ([\#16293](https://github.com/matrix-org/synapse/issues/16293)) +* Bump types-setuptools from 68.0.0.3 to 68.2.0.0. ([\#16292](https://github.com/matrix-org/synapse/issues/16292)) +* Bump typing-extensions from 4.7.1 to 4.8.0. ([\#16341](https://github.com/matrix-org/synapse/issues/16341)) + # Synapse 1.92.3 (2023-09-18) This is again a security update targeted at mitigating [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863). diff --git a/changelog.d/15488.feature b/changelog.d/15488.feature deleted file mode 100644 index 8684d84192a9..000000000000 --- a/changelog.d/15488.feature +++ /dev/null @@ -1 +0,0 @@ -Add automatic purge after all users forgotten a room. Also add restore of purge/shutdown rooms after a synapse restart. diff --git a/changelog.d/15997.misc b/changelog.d/15997.misc deleted file mode 100644 index 94768c3cb82d..000000000000 --- a/changelog.d/15997.misc +++ /dev/null @@ -1 +0,0 @@ -Allow modules to delete rooms. \ No newline at end of file diff --git a/changelog.d/16066.bugfix b/changelog.d/16066.bugfix deleted file mode 100644 index 83649cf42a4a..000000000000 --- a/changelog.d/16066.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16090.misc b/changelog.d/16090.misc deleted file mode 100644 index d54ef936c7d2..000000000000 --- a/changelog.d/16090.misc +++ /dev/null @@ -1 +0,0 @@ -Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. \ No newline at end of file diff --git a/changelog.d/16137.feature b/changelog.d/16137.feature deleted file mode 100644 index bba6f161cdcf..000000000000 --- a/changelog.d/16137.feature +++ /dev/null @@ -1 +0,0 @@ -Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). diff --git a/changelog.d/16170.bugfix b/changelog.d/16170.bugfix deleted file mode 100644 index 83649cf42a4a..000000000000 --- a/changelog.d/16170.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16171.bugfix b/changelog.d/16171.bugfix deleted file mode 100644 index 83649cf42a4a..000000000000 --- a/changelog.d/16171.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16172.bugfix b/changelog.d/16172.bugfix deleted file mode 100644 index 83649cf42a4a..000000000000 --- a/changelog.d/16172.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16174.bugfix b/changelog.d/16174.bugfix deleted file mode 100644 index 83649cf42a4a..000000000000 --- a/changelog.d/16174.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16219.feature b/changelog.d/16219.feature deleted file mode 100644 index c789f2abb761..000000000000 --- a/changelog.d/16219.feature +++ /dev/null @@ -1 +0,0 @@ -Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. \ No newline at end of file diff --git a/changelog.d/16227.feature b/changelog.d/16227.feature deleted file mode 100644 index 510062b622c0..000000000000 --- a/changelog.d/16227.feature +++ /dev/null @@ -1 +0,0 @@ -Add span information to requests sent to appservices. Contributed by MTRNord. \ No newline at end of file diff --git a/changelog.d/16235.misc b/changelog.d/16235.misc deleted file mode 100644 index b1533f93b608..000000000000 --- a/changelog.d/16235.misc +++ /dev/null @@ -1 +0,0 @@ -Fix type checking when using the new version of Twisted. diff --git a/changelog.d/16240.misc b/changelog.d/16240.misc deleted file mode 100644 index 4f266c1fb029..000000000000 --- a/changelog.d/16240.misc +++ /dev/null @@ -1 +0,0 @@ -Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/changelog.d/16248.misc b/changelog.d/16248.misc deleted file mode 100644 index 0a5ed6dccb17..000000000000 --- a/changelog.d/16248.misc +++ /dev/null @@ -1 +0,0 @@ -Bump minimum supported Rust version to 1.61.0. diff --git a/changelog.d/16251.bugfix b/changelog.d/16251.bugfix deleted file mode 100644 index 6d3157c7aa31..000000000000 --- a/changelog.d/16251.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where appservices using MSC2409 to receive to_device messages, would only get messages for one user. \ No newline at end of file diff --git a/changelog.d/16252.bugfix b/changelog.d/16252.bugfix deleted file mode 100644 index 881bc00e6153..000000000000 --- a/changelog.d/16252.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. diff --git a/changelog.d/16257.bugfix b/changelog.d/16257.bugfix deleted file mode 100644 index 28a53197493c..000000000000 --- a/changelog.d/16257.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. diff --git a/changelog.d/16260.misc b/changelog.d/16260.misc deleted file mode 100644 index 9f3289d7d4a5..000000000000 --- a/changelog.d/16260.misc +++ /dev/null @@ -1 +0,0 @@ -Update rust to version 1.71.1 in the nix development environment. \ No newline at end of file diff --git a/changelog.d/16261.misc b/changelog.d/16261.misc deleted file mode 100644 index d3ad59ca4a47..000000000000 --- a/changelog.d/16261.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify server key storage. diff --git a/changelog.d/16262.feature b/changelog.d/16262.feature deleted file mode 100644 index 7c8e7e349bca..000000000000 --- a/changelog.d/16262.feature +++ /dev/null @@ -1 +0,0 @@ -Add the ability to enable/disable registrations when in the CAS flow. Contributed by Aurélien Grimpard. diff --git a/changelog.d/16263.misc b/changelog.d/16263.misc deleted file mode 100644 index d54ef936c7d2..000000000000 --- a/changelog.d/16263.misc +++ /dev/null @@ -1 +0,0 @@ -Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. \ No newline at end of file diff --git a/changelog.d/16264.misc b/changelog.d/16264.misc deleted file mode 100644 index a744434bef06..000000000000 --- a/changelog.d/16264.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce CPU overhead of change password endpoint. diff --git a/changelog.d/16265.feature b/changelog.d/16265.feature deleted file mode 100644 index 3ffa16dbcb64..000000000000 --- a/changelog.d/16265.feature +++ /dev/null @@ -1 +0,0 @@ -Allow `/notifications` endpoint to be routed to workers. diff --git a/changelog.d/16272.bugfix b/changelog.d/16272.bugfix deleted file mode 100644 index afb22a999f90..000000000000 --- a/changelog.d/16272.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid temporary storage of sensitive information. diff --git a/changelog.d/16273.misc b/changelog.d/16273.misc deleted file mode 100644 index 19882f6754c6..000000000000 --- a/changelog.d/16273.misc +++ /dev/null @@ -1 +0,0 @@ -Stop purging from tables slated for removal. diff --git a/changelog.d/16274.feature b/changelog.d/16274.feature deleted file mode 100644 index 0d9da2bbef75..000000000000 --- a/changelog.d/16274.feature +++ /dev/null @@ -1 +0,0 @@ -Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. diff --git a/changelog.d/16276.misc b/changelog.d/16276.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/16276.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16277.misc b/changelog.d/16277.misc deleted file mode 100644 index c131a46ec32b..000000000000 --- a/changelog.d/16277.misc +++ /dev/null @@ -1 +0,0 @@ -Raise setuptools_rust version cap to 1.7.0. diff --git a/changelog.d/16278.misc b/changelog.d/16278.misc deleted file mode 100644 index e82a470c45b6..000000000000 --- a/changelog.d/16278.misc +++ /dev/null @@ -1 +0,0 @@ -Fix using the new task scheduler causing lots of CPU to be used. diff --git a/changelog.d/16280.misc b/changelog.d/16280.misc deleted file mode 100644 index 2d8b414a3b48..000000000000 --- a/changelog.d/16280.misc +++ /dev/null @@ -1 +0,0 @@ -Upgrade CI run of Python 3.12 from rc1 to rc2. diff --git a/changelog.d/16281.misc b/changelog.d/16281.misc deleted file mode 100644 index de48396aff14..000000000000 --- a/changelog.d/16281.misc +++ /dev/null @@ -1 +0,0 @@ -Include values in SQL debug when using `execute_values` with Postgres. diff --git a/changelog.d/16282.doc b/changelog.d/16282.doc deleted file mode 100644 index b249ea4f9fb4..000000000000 --- a/changelog.d/16282.doc +++ /dev/null @@ -1 +0,0 @@ -Fix typos in the documentation. diff --git a/changelog.d/16283.misc b/changelog.d/16283.misc deleted file mode 100644 index 4b9d6f76aef9..000000000000 --- a/changelog.d/16283.misc +++ /dev/null @@ -1 +0,0 @@ -Enable additional linting checks. diff --git a/changelog.d/16288.bugfix b/changelog.d/16288.bugfix deleted file mode 100644 index f08d10d1f3c5..000000000000 --- a/changelog.d/16288.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. diff --git a/changelog.d/16298.misc b/changelog.d/16298.misc deleted file mode 100644 index 75b546d42414..000000000000 --- a/changelog.d/16298.misc +++ /dev/null @@ -1 +0,0 @@ -Don't try refetching device lists for users on remote hosts that are marked as "down". diff --git a/changelog.d/16299.misc b/changelog.d/16299.misc deleted file mode 100644 index d4546691518d..000000000000 --- a/changelog.d/16299.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `receipts_graph` Postgres transactions to stop error messages. diff --git a/changelog.d/16300.misc b/changelog.d/16300.misc deleted file mode 100644 index 8cc2e523699b..000000000000 --- a/changelog.d/16300.misc +++ /dev/null @@ -1 +0,0 @@ -Bump mypy from 1.4.1 to 1.5.1. diff --git a/changelog.d/16301.misc b/changelog.d/16301.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/16301.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16304.doc b/changelog.d/16304.doc deleted file mode 100644 index 53660ec9a4ce..000000000000 --- a/changelog.d/16304.doc +++ /dev/null @@ -1 +0,0 @@ -Link to the Alpine Linux community package for Synapse. diff --git a/changelog.d/16309.misc b/changelog.d/16309.misc deleted file mode 100644 index bef5563ee99d..000000000000 --- a/changelog.d/16309.misc +++ /dev/null @@ -1 +0,0 @@ -Small improvements to logging in replication code. diff --git a/changelog.d/16311.misc b/changelog.d/16311.misc deleted file mode 100644 index 4f266c1fb029..000000000000 --- a/changelog.d/16311.misc +++ /dev/null @@ -1 +0,0 @@ -Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/changelog.d/16312.misc b/changelog.d/16312.misc deleted file mode 100644 index 4f266c1fb029..000000000000 --- a/changelog.d/16312.misc +++ /dev/null @@ -1 +0,0 @@ -Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/changelog.d/16313.misc b/changelog.d/16313.misc deleted file mode 100644 index 4f266c1fb029..000000000000 --- a/changelog.d/16313.misc +++ /dev/null @@ -1 +0,0 @@ -Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/changelog.d/16314.misc b/changelog.d/16314.misc deleted file mode 100644 index a32b07112a7d..000000000000 --- a/changelog.d/16314.misc +++ /dev/null @@ -1 +0,0 @@ -Remove a reference cycle for in background processes. diff --git a/changelog.d/16315.misc b/changelog.d/16315.misc deleted file mode 100644 index d88782c979cb..000000000000 --- a/changelog.d/16315.misc +++ /dev/null @@ -1 +0,0 @@ -Only use literal strings for background process names. diff --git a/changelog.d/16316.misc b/changelog.d/16316.misc deleted file mode 100644 index aa0644f278c4..000000000000 --- a/changelog.d/16316.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `get_user_by_id`. diff --git a/changelog.d/16318.misc b/changelog.d/16318.misc deleted file mode 100644 index 1433a2f24645..000000000000 --- a/changelog.d/16318.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up task to delete to-device messages. diff --git a/changelog.d/16325.misc b/changelog.d/16325.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/16325.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16326.misc b/changelog.d/16326.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/16326.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16327.bugfix b/changelog.d/16327.bugfix deleted file mode 100644 index be3d1b4f2153..000000000000 --- a/changelog.d/16327.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where invalid receipts would be accepted. diff --git a/changelog.d/16328.feature b/changelog.d/16328.feature deleted file mode 100644 index 9fadf766cc8c..000000000000 --- a/changelog.d/16328.feature +++ /dev/null @@ -1 +0,0 @@ -Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. diff --git a/changelog.d/16329.bugfix b/changelog.d/16329.bugfix deleted file mode 100644 index 2f1f7e8ffe54..000000000000 --- a/changelog.d/16329.bugfix +++ /dev/null @@ -1 +0,0 @@ -Use standard name for UTF-8 charset in emails. diff --git a/changelog.d/16347.misc b/changelog.d/16347.misc deleted file mode 100644 index f4f5bfb2de62..000000000000 --- a/changelog.d/16347.misc +++ /dev/null @@ -1 +0,0 @@ -Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. diff --git a/changelog.d/16349.misc b/changelog.d/16349.misc deleted file mode 100644 index 8ce27a15990d..000000000000 --- a/changelog.d/16349.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid patching code in tests. diff --git a/changelog.d/16351.misc b/changelog.d/16351.misc deleted file mode 100644 index b955b3da0821..000000000000 --- a/changelog.d/16351.misc +++ /dev/null @@ -1 +0,0 @@ -Test against PostgreSQL 16. diff --git a/changelog.d/16353.doc b/changelog.d/16353.doc deleted file mode 100644 index 80af22ed5320..000000000000 --- a/changelog.d/16353.doc +++ /dev/null @@ -1 +0,0 @@ -Use string for federation_client_minimum_tls_version documentation examples. Contributed by @jcgruenhage. diff --git a/debian/changelog b/debian/changelog index 254ca26fd81c..192eedd45c7e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.93.0~rc1) stable; urgency=medium + + * New synapse release 1.93.0rc1. + + -- Synapse Packaging team Tue, 19 Sep 2023 11:55:00 +0000 + matrix-synapse-py3 (1.92.3) stable; urgency=medium * New Synapse release 1.92.3. diff --git a/pyproject.toml b/pyproject.toml index de4dd61ea5ac..f69336a73fc6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,7 +95,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.92.3" +version = "1.93.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From ac293357d05467703c505a50355db79cd8f1911a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 19 Sep 2023 08:03:38 -0400 Subject: [PATCH 73/75] Tweak changelog. --- CHANGES.md | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f95a894c4f59..7b150f374987 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,11 +2,11 @@ ### Features -- Add automatic purge after all users forgotten a room. Also add restore of purge/shutdown rooms after a synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) +- Add automatic purge after all users have forgotten a room. Also restores purge/shutdown rooms after a Synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) - Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). ([\#16137](https://github.com/matrix-org/synapse/issues/16137)) - Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. ([\#16219](https://github.com/matrix-org/synapse/issues/16219)) - Add span information to requests sent to appservices. Contributed by MTRNord. ([\#16227](https://github.com/matrix-org/synapse/issues/16227)) -- Add the ability to enable/disable registrations when in the CAS flow. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262)) +- Add the ability to enable/disable registrations when using CAS. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262)) - Allow `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265)) - Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. ([\#16274](https://github.com/matrix-org/synapse/issues/16274)) - Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. ([\#16328](https://github.com/matrix-org/synapse/issues/16328)) @@ -14,7 +14,7 @@ ### Bugfixes - Fix a long-standing bug where multi-device accounts could cause high load due to presence. ([\#16066](https://github.com/matrix-org/synapse/issues/16066), [\#16170](https://github.com/matrix-org/synapse/issues/16170), [\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172), [\#16174](https://github.com/matrix-org/synapse/issues/16174)) -- Fix a long-standing bug where appservices using MSC2409 to receive to_device messages, would only get messages for one user. ([\#16251](https://github.com/matrix-org/synapse/issues/16251)) +- Fix a long-standing bug where appservices using [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) to receive `to_device` messages would only get messages for one user. ([\#16251](https://github.com/matrix-org/synapse/issues/16251)) - Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. ([\#16252](https://github.com/matrix-org/synapse/issues/16252)) - Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. ([\#16257](https://github.com/matrix-org/synapse/issues/16257)) - Avoid temporary storage of sensitive information. ([\#16272](https://github.com/matrix-org/synapse/issues/16272)) @@ -26,7 +26,7 @@ - Fix typos in the documentation. ([\#16282](https://github.com/matrix-org/synapse/issues/16282)) - Link to the Alpine Linux community package for Synapse. ([\#16304](https://github.com/matrix-org/synapse/issues/16304)) -- Use string for federation_client_minimum_tls_version documentation examples. Contributed by @jcgruenhage. ([\#16353](https://github.com/matrix-org/synapse/issues/16353)) +- Use string for `federation_client_minimum_tls_version` documentation examples. Contributed by @jcgruenhage. ([\#16353](https://github.com/matrix-org/synapse/issues/16353)) ### Internal Changes @@ -40,39 +40,36 @@ - Reduce CPU overhead of change password endpoint. ([\#16264](https://github.com/matrix-org/synapse/issues/16264)) - Stop purging from tables slated for removal. ([\#16273](https://github.com/matrix-org/synapse/issues/16273)) - Improve type hints. ([\#16276](https://github.com/matrix-org/synapse/issues/16276), [\#16301](https://github.com/matrix-org/synapse/issues/16301), [\#16325](https://github.com/matrix-org/synapse/issues/16325), [\#16326](https://github.com/matrix-org/synapse/issues/16326)) -- Raise setuptools_rust version cap to 1.7.0. ([\#16277](https://github.com/matrix-org/synapse/issues/16277)) +- Raise `setuptools_rust` version cap to 1.7.0. ([\#16277](https://github.com/matrix-org/synapse/issues/16277)) - Fix using the new task scheduler causing lots of CPU to be used. ([\#16278](https://github.com/matrix-org/synapse/issues/16278)) - Upgrade CI run of Python 3.12 from rc1 to rc2. ([\#16280](https://github.com/matrix-org/synapse/issues/16280)) - Include values in SQL debug when using `execute_values` with Postgres. ([\#16281](https://github.com/matrix-org/synapse/issues/16281)) - Enable additional linting checks. ([\#16283](https://github.com/matrix-org/synapse/issues/16283)) - Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298)) - Refactor `receipts_graph` Postgres transactions to stop error messages. ([\#16299](https://github.com/matrix-org/synapse/issues/16299)) -- Bump mypy from 1.4.1 to 1.5.1. ([\#16300](https://github.com/matrix-org/synapse/issues/16300)) - Small improvements to logging in replication code. ([\#16309](https://github.com/matrix-org/synapse/issues/16309)) - Remove a reference cycle for in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314)) - Only use literal strings for background process names. ([\#16315](https://github.com/matrix-org/synapse/issues/16315)) - Refactor `get_user_by_id`. ([\#16316](https://github.com/matrix-org/synapse/issues/16316)) - Speed up task to delete to-device messages. ([\#16318](https://github.com/matrix-org/synapse/issues/16318)) -- Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. ([\#16347](https://github.com/matrix-org/synapse/issues/16347)) - Avoid patching code in tests. ([\#16349](https://github.com/matrix-org/synapse/issues/16349)) - Test against PostgreSQL 16. ([\#16351](https://github.com/matrix-org/synapse/issues/16351)) ### Updates to locked dependencies +* Bump mypy from 1.4.1 to 1.5.1. ([\#16300](https://github.com/matrix-org/synapse/issues/16300)) * Bump black from 23.7.0 to 23.9.1. ([\#16295](https://github.com/matrix-org/synapse/issues/16295)) * Bump docker/build-push-action from 4 to 5. ([\#16336](https://github.com/matrix-org/synapse/issues/16336)) * Bump docker/login-action from 2 to 3. ([\#16339](https://github.com/matrix-org/synapse/issues/16339)) * Bump docker/metadata-action from 4 to 5. ([\#16337](https://github.com/matrix-org/synapse/issues/16337)) * Bump docker/setup-qemu-action from 2 to 3. ([\#16338](https://github.com/matrix-org/synapse/issues/16338)) * Bump furo from 2023.8.19 to 2023.9.10. ([\#16340](https://github.com/matrix-org/synapse/issues/16340)) -* Bump gitpython from 3.1.32 to 3.1.34. ([\#16267](https://github.com/matrix-org/synapse/issues/16267)) -* Bump gitpython from 3.1.34 to 3.1.35. ([\#16279](https://github.com/matrix-org/synapse/issues/16279)) +* Bump gitpython from 3.1.32 to 3.1.35. ([\#16267](https://github.com/matrix-org/synapse/issues/16267), [\#16279](https://github.com/matrix-org/synapse/issues/16279)) * Bump mypy-zope from 1.0.0 to 1.0.1. ([\#16291](https://github.com/matrix-org/synapse/issues/16291)) * Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344)) * Bump regex from 1.9.4 to 1.9.5. ([\#16233](https://github.com/matrix-org/synapse/issues/16233)) * Bump ruff from 0.0.286 to 0.0.290. ([\#16342](https://github.com/matrix-org/synapse/issues/16342)) -* Bump serde_json from 1.0.105 to 1.0.106. ([\#16296](https://github.com/matrix-org/synapse/issues/16296)) -* Bump serde_json from 1.0.106 to 1.0.107. ([\#16345](https://github.com/matrix-org/synapse/issues/16345)) +* Bump serde_json from 1.0.105 to 1.0.107. ([\#16296](https://github.com/matrix-org/synapse/issues/16296), [\#16345](https://github.com/matrix-org/synapse/issues/16345)) * Bump twisted from 22.10.0 to 23.8.0. ([\#16235](https://github.com/matrix-org/synapse/issues/16235)) * Bump types-pillow from 10.0.0.2 to 10.0.0.3. ([\#16293](https://github.com/matrix-org/synapse/issues/16293)) * Bump types-setuptools from 68.0.0.3 to 68.2.0.0. ([\#16292](https://github.com/matrix-org/synapse/issues/16292)) From 4345ca066dc82b2d00da78e60af0184dd2833d9a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 19 Sep 2023 08:35:44 -0400 Subject: [PATCH 74/75] Update changelog. --- CHANGES.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 7b150f374987..eb537f9f6a2b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,12 +2,13 @@ ### Features -- Add automatic purge after all users have forgotten a room. Also restores purge/shutdown rooms after a Synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) +- Add automatic purge after all users have forgotten a room. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) +- Restore room purge/shutdown after a Synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) - Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). ([\#16137](https://github.com/matrix-org/synapse/issues/16137)) - Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. ([\#16219](https://github.com/matrix-org/synapse/issues/16219)) - Add span information to requests sent to appservices. Contributed by MTRNord. ([\#16227](https://github.com/matrix-org/synapse/issues/16227)) - Add the ability to enable/disable registrations when using CAS. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262)) -- Allow `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265)) +- Allow the `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265)) - Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. ([\#16274](https://github.com/matrix-org/synapse/issues/16274)) - Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. ([\#16328](https://github.com/matrix-org/synapse/issues/16328)) @@ -21,6 +22,7 @@ - Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. ([\#16288](https://github.com/matrix-org/synapse/issues/16288)) - Fix a long-standing bug where invalid receipts would be accepted. ([\#16327](https://github.com/matrix-org/synapse/issues/16327)) - Use standard name for UTF-8 charset in emails. ([\#16329](https://github.com/matrix-org/synapse/issues/16329)) +- Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298)) ### Improved Documentation @@ -45,10 +47,9 @@ - Upgrade CI run of Python 3.12 from rc1 to rc2. ([\#16280](https://github.com/matrix-org/synapse/issues/16280)) - Include values in SQL debug when using `execute_values` with Postgres. ([\#16281](https://github.com/matrix-org/synapse/issues/16281)) - Enable additional linting checks. ([\#16283](https://github.com/matrix-org/synapse/issues/16283)) -- Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298)) - Refactor `receipts_graph` Postgres transactions to stop error messages. ([\#16299](https://github.com/matrix-org/synapse/issues/16299)) - Small improvements to logging in replication code. ([\#16309](https://github.com/matrix-org/synapse/issues/16309)) -- Remove a reference cycle for in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314)) +- Remove a reference cycle in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314)) - Only use literal strings for background process names. ([\#16315](https://github.com/matrix-org/synapse/issues/16315)) - Refactor `get_user_by_id`. ([\#16316](https://github.com/matrix-org/synapse/issues/16316)) - Speed up task to delete to-device messages. ([\#16318](https://github.com/matrix-org/synapse/issues/16318)) From 88ba67eb91215a708f321e16559fe3c2c0d0a407 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Sep 2023 15:56:54 +0100 Subject: [PATCH 75/75] 1.93.0 --- CHANGES.md | 20 ++++++++++++++++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index eb537f9f6a2b..c1ea40de20cc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,23 @@ +# Synapse 1.93.0 (2023-09-26) + +No significant changes since 1.93.0rc1. + + +## Security advisory + +The following issues are fixed in 1.93.0 (and RCs). + +- [GHSA-4f74-84v3-j9q5](https://github.com/matrix-org/synapse/security/advisories/GHSA-4f74-84v3-j9q5) / [CVE-2023-41335](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-41335) — Low Severity + + Temporary storage of plaintext passwords during password changes. + +- [GHSA-7565-cq32-vx2x](https://github.com/matrix-org/synapse/security/advisories/GHSA-7565-cq32-vx2x) / [CVE-2023-42453](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-42453) — Low Severity + + Improper validation of receipts allows forged read receipts. + +See the advisories for more details. If you have any questions, email security@matrix.org. + + # Synapse 1.93.0rc1 (2023-09-19) ### Features diff --git a/debian/changelog b/debian/changelog index 192eedd45c7e..7be71019b4fc 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.93.0) stable; urgency=medium + + * New Synapse release 1.93.0. + + -- Synapse Packaging team Tue, 26 Sep 2023 15:54:40 +0100 + matrix-synapse-py3 (1.93.0~rc1) stable; urgency=medium * New synapse release 1.93.0rc1. diff --git a/pyproject.toml b/pyproject.toml index f69336a73fc6..5a1b0ec437e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,7 +95,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.93.0rc1" +version = "1.93.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0"