From ffd0b4c07958548844291d58e603e9b6d72f8b76 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Tue, 2 Dec 2025 16:45:28 +0000
Subject: [PATCH 01/59] Add a 14-day cooldown for dependency updates (#19258)
---
.github/dependabot.yml | 16 ++++++++++++++++
changelog.d/19258.misc | 1 +
2 files changed, 17 insertions(+)
create mode 100644 changelog.d/19258.misc
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 7ce353ed64..34484438c8 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -5,19 +5,35 @@ updates:
directory: "/"
schedule:
interval: "weekly"
+ # Prevent pulling packages that were recently updated to help mitigate
+ # supply chain attacks. 14 days was taken from the recommendation at
+ # https://blog.yossarian.net/2025/11/21/We-should-all-be-using-dependency-cooldowns
+ # where the author noted that 9/10 attacks would have been mitigated by a
+ # two week cooldown.
+ #
+ # The cooldown only applies to general updates; security updates will still
+ # be pulled in as soon as possible.
+ cooldown:
+ default-days: 14
- package-ecosystem: "docker"
directory: "/docker"
schedule:
interval: "weekly"
+ cooldown:
+ default-days: 14
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
+ cooldown:
+ default-days: 14
- package-ecosystem: "cargo"
directory: "/"
versioning-strategy: "lockfile-only"
schedule:
interval: "weekly"
+ cooldown:
+ default-days: 14
diff --git a/changelog.d/19258.misc b/changelog.d/19258.misc
new file mode 100644
index 0000000000..9155f9d20f
--- /dev/null
+++ b/changelog.d/19258.misc
@@ -0,0 +1 @@
+Require 14 days to pass before pulling in general dependency updates to help mitigate upstream supply chain attacks.
\ No newline at end of file
From 0dfc21ca9f877806bfa9d3cd49432da78466757c Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Tue, 2 Dec 2025 16:45:41 +0000
Subject: [PATCH 02/59] Remove "Updates to locked dependencies" section from
changelog (#19254)
---
changelog.d/19254.removal | 1 +
scripts-dev/release.py | 50 +--------------------------------------
2 files changed, 2 insertions(+), 49 deletions(-)
create mode 100644 changelog.d/19254.removal
diff --git a/changelog.d/19254.removal b/changelog.d/19254.removal
new file mode 100644
index 0000000000..ee527cef99
--- /dev/null
+++ b/changelog.d/19254.removal
@@ -0,0 +1 @@
+Remove the "Updates to locked dependencies" section from the changelog due to lack of use and the maintenance burden.
\ No newline at end of file
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index 17eadbf6c3..3aed4b2f76 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -32,7 +32,7 @@
import urllib.request
from os import path
from tempfile import TemporaryDirectory
-from typing import Any, Match
+from typing import Any
import attr
import click
@@ -968,10 +968,6 @@ def generate_and_write_changelog(
new_changes = new_changes.replace(
"No significant changes.", f"No significant changes since {current_version}."
)
- new_changes += build_dependabot_changelog(
- repo,
- current_version,
- )
# Prepend changes to changelog
with open("CHANGES.md", "r+") as f:
@@ -986,49 +982,5 @@ def generate_and_write_changelog(
os.remove(filename)
-def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> str:
- """Summarise dependabot commits between `current_version` and `release_branch`.
-
- Returns an empty string if there have been no such commits; otherwise outputs a
- third-level markdown header followed by an unordered list."""
- last_release_commit = repo.tag("v" + str(current_version)).commit
- rev_spec = f"{last_release_commit.hexsha}.."
- commits = list(git.objects.Commit.iter_items(repo, rev_spec))
- messages = []
- for commit in reversed(commits):
- if commit.author.name == "dependabot[bot]":
- message: str | bytes = commit.message
- if isinstance(message, bytes):
- message = message.decode("utf-8")
- messages.append(message.split("\n", maxsplit=1)[0])
-
- if not messages:
- print(f"No dependabot commits in range {rev_spec}", file=sys.stderr)
- return ""
-
- messages.sort()
-
- def replacer(match: Match[str]) -> str:
- desc = match.group(1)
- number = match.group(2)
- return f"* {desc}. ([\\#{number}](https://github.com/element-hq/synapse/issues/{number}))"
-
- for i, message in enumerate(messages):
- messages[i] = re.sub(r"(.*) \(#(\d+)\)$", replacer, message)
- messages.insert(0, "### Updates to locked dependencies\n")
- # Add an extra blank line to the bottom of the section
- messages.append("")
- return "\n".join(messages)
-
-
-@cli.command()
-@click.argument("since")
-def test_dependabot_changelog(since: str) -> None:
- """Test building the dependabot changelog.
-
- Summarises all dependabot commits between the SINCE tag and the current git HEAD."""
- print(build_dependabot_changelog(git.Repo("."), version.Version(since)))
-
-
if __name__ == "__main__":
cli()
From 3d28e2213f3b074eb598381b6b6ee33bd3051384 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Tue, 2 Dec 2025 16:45:54 +0000
Subject: [PATCH 03/59] Dependabot: allow 10 open PRs for general updates
(#19253)
---
.github/dependabot.yml | 7 +++++++
changelog.d/19253.misc | 1 +
2 files changed, 8 insertions(+)
create mode 100644 changelog.d/19253.misc
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 34484438c8..cfaa3c04e3 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -1,8 +1,12 @@
version: 2
+# As dependabot is currently only run on a weekly basis, we raise the
+# open-pull-requests-limit to 10 (from the default of 5) to better ensure we
+# don't continuously grow a backlog of updates.
updates:
- # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
package-ecosystem: "pip"
directory: "/"
+ open-pull-requests-limit: 10
schedule:
interval: "weekly"
# Prevent pulling packages that were recently updated to help mitigate
@@ -18,6 +22,7 @@ updates:
- package-ecosystem: "docker"
directory: "/docker"
+ open-pull-requests-limit: 10
schedule:
interval: "weekly"
cooldown:
@@ -25,6 +30,7 @@ updates:
- package-ecosystem: "github-actions"
directory: "/"
+ open-pull-requests-limit: 10
schedule:
interval: "weekly"
cooldown:
@@ -32,6 +38,7 @@ updates:
- package-ecosystem: "cargo"
directory: "/"
+ open-pull-requests-limit: 10
versioning-strategy: "lockfile-only"
schedule:
interval: "weekly"
diff --git a/changelog.d/19253.misc b/changelog.d/19253.misc
new file mode 100644
index 0000000000..1d45f936f6
--- /dev/null
+++ b/changelog.d/19253.misc
@@ -0,0 +1 @@
+Raise the limit for concurrently-open non-security @dependabot PRs from 5 to 10.
\ No newline at end of file
From f86918e5622406ae73e94d78d45544dcaccde766 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Tue, 2 Dec 2025 16:46:08 +0000
Subject: [PATCH 04/59] Remove the currently broken netlify GHA workflow
(#19262)
---
.github/workflows/docs-pr-netlify.yaml | 34 --------------------------
changelog.d/19262.misc | 1 +
2 files changed, 1 insertion(+), 34 deletions(-)
delete mode 100644 .github/workflows/docs-pr-netlify.yaml
create mode 100644 changelog.d/19262.misc
diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml
deleted file mode 100644
index 53a2d6b597..0000000000
--- a/.github/workflows/docs-pr-netlify.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-name: Deploy documentation PR preview
-
-on:
- workflow_run:
- workflows: [ "Prepare documentation PR preview" ]
- types:
- - completed
-
-jobs:
- netlify:
- if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
- runs-on: ubuntu-latest
- steps:
- # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
- # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- - name: 📥 Download artifact
- uses: dawidd6/action-download-artifact@ac66b43f0e6a346234dd65d4d0c8fbb31cb316e5 # v11
- with:
- workflow: docs-pr.yaml
- run_id: ${{ github.event.workflow_run.id }}
- name: book
- path: book
-
- - name: 📤 Deploy to Netlify
- uses: matrix-org/netlify-pr-preview@9805cd123fc9a7e421e35340a05e1ebc5dee46b5 # v3
- with:
- path: book
- owner: ${{ github.event.workflow_run.head_repository.owner.login }}
- branch: ${{ github.event.workflow_run.head_branch }}
- revision: ${{ github.event.workflow_run.head_sha }}
- token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
- site_id: ${{ secrets.NETLIFY_SITE_ID }}
- desc: Documentation preview
- deployment_env: PR Documentation Preview
diff --git a/changelog.d/19262.misc b/changelog.d/19262.misc
new file mode 100644
index 0000000000..31906e6623
--- /dev/null
+++ b/changelog.d/19262.misc
@@ -0,0 +1 @@
+Drop the broken netlify documentation workflow until a new one is implemented.
\ No newline at end of file
From 39316672da675458ba2a5809f71114a5844d8906 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Tue, 2 Dec 2025 10:58:06 -0600
Subject: [PATCH 05/59] Be able to `shutdown` homeserver that hasn't `setup`
(#19187)
For example, a homeserver can fail to `setup` if it fails to connect to
the database.
Fix https://github.com/element-hq/synapse/issues/19188
Follow-up to https://github.com/element-hq/synapse/pull/18828
### Background
As part of Element's plan to support a light form of vhosting (virtual
host) (multiple instances of Synapse in the same Python process) (c.f
Synapse Pro for small hosts), we're currently diving into the details
and implications of running multiple instances of Synapse in the same
Python process.
"Clean tenant deprovisioning" tracked internally by
https://github.com/element-hq/synapse-small-hosts/issues/50
---
changelog.d/19187.misc | 1 +
synapse/api/errors.py | 6 +
synapse/crypto/keyring.py | 135 +++++++++++-----
synapse/server.py | 35 +++-
tests/app/test_homeserver_shutdown.py | 221 +++++++++++++++++---------
tests/crypto/test_keyring.py | 15 +-
tests/server.py | 125 +++++++++------
7 files changed, 358 insertions(+), 180 deletions(-)
create mode 100644 changelog.d/19187.misc
diff --git a/changelog.d/19187.misc b/changelog.d/19187.misc
new file mode 100644
index 0000000000..d831de38c8
--- /dev/null
+++ b/changelog.d/19187.misc
@@ -0,0 +1 @@
+Fix `HomeServer.shutdown()` failing if the homeserver hasn't been setup yet.
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 37b909a1a7..c299ca84d9 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -856,6 +856,12 @@ def to_synapse_error(self) -> SynapseError:
return ProxiedRequestError(self.code, errmsg, errcode, j)
+class HomeServerNotSetupException(Exception):
+ """
+ Raised when an operation is attempted on the HomeServer before setup() has been called.
+ """
+
+
class ShadowBanError(Exception):
"""
Raised when a shadow-banned user attempts to perform an action.
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 3abb644df5..883f682e77 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -21,6 +21,7 @@
import abc
import logging
+from contextlib import ExitStack
from typing import TYPE_CHECKING, Callable, Iterable
import attr
@@ -150,57 +151,81 @@ class Keyring:
"""
def __init__(
- self, hs: "HomeServer", key_fetchers: "Iterable[KeyFetcher] | None" = None
+ self,
+ hs: "HomeServer",
+ test_only_key_fetchers: "list[KeyFetcher] | None" = None,
):
- self.server_name = hs.hostname
+ """
+ Args:
+ hs: The HomeServer instance
+ test_only_key_fetchers: Dependency injection for tests only. If provided,
+ these key fetchers will be used instead of the default ones.
+ """
+ # Clean-up to avoid partial initialization leaving behind references.
+ with ExitStack() as exit:
+ self.server_name = hs.hostname
+
+ self._key_fetchers: list[KeyFetcher] = []
+ if test_only_key_fetchers is None:
+ # Always fetch keys from the database.
+ store_key_fetcher = StoreKeyFetcher(hs)
+ exit.callback(store_key_fetcher.shutdown)
+ self._key_fetchers.append(store_key_fetcher)
+
+ # Fetch keys from configured trusted key servers, if any exist.
+ key_servers = hs.config.key.key_servers
+ if key_servers:
+ perspectives_key_fetcher = PerspectivesKeyFetcher(hs)
+ exit.callback(perspectives_key_fetcher.shutdown)
+ self._key_fetchers.append(perspectives_key_fetcher)
+
+ # Finally, fetch keys from the origin server directly.
+ server_key_fetcher = ServerKeyFetcher(hs)
+ exit.callback(server_key_fetcher.shutdown)
+ self._key_fetchers.append(server_key_fetcher)
+ else:
+ self._key_fetchers = test_only_key_fetchers
+
+ self._fetch_keys_queue: BatchingQueue[
+ _FetchKeyRequest, dict[str, dict[str, FetchKeyResult]]
+ ] = BatchingQueue(
+ name="keyring_server",
+ hs=hs,
+ clock=hs.get_clock(),
+ # The method called to fetch each key
+ process_batch_callback=self._inner_fetch_key_requests,
+ )
+ exit.callback(self._fetch_keys_queue.shutdown)
- if key_fetchers is None:
- # Always fetch keys from the database.
- mutable_key_fetchers: list[KeyFetcher] = [StoreKeyFetcher(hs)]
- # Fetch keys from configured trusted key servers, if any exist.
- key_servers = hs.config.key.key_servers
- if key_servers:
- mutable_key_fetchers.append(PerspectivesKeyFetcher(hs))
- # Finally, fetch keys from the origin server directly.
- mutable_key_fetchers.append(ServerKeyFetcher(hs))
-
- self._key_fetchers: Iterable[KeyFetcher] = tuple(mutable_key_fetchers)
- else:
- self._key_fetchers = key_fetchers
-
- self._fetch_keys_queue: BatchingQueue[
- _FetchKeyRequest, dict[str, dict[str, FetchKeyResult]]
- ] = BatchingQueue(
- name="keyring_server",
- hs=hs,
- clock=hs.get_clock(),
- # The method called to fetch each key
- process_batch_callback=self._inner_fetch_key_requests,
- )
+ self._is_mine_server_name = hs.is_mine_server_name
- self._is_mine_server_name = hs.is_mine_server_name
+ # build a FetchKeyResult for each of our own keys, to shortcircuit the
+ # fetcher.
+ self._local_verify_keys: dict[str, FetchKeyResult] = {}
+ for key_id, key in hs.config.key.old_signing_keys.items():
+ self._local_verify_keys[key_id] = FetchKeyResult(
+ verify_key=key, valid_until_ts=key.expired
+ )
- # build a FetchKeyResult for each of our own keys, to shortcircuit the
- # fetcher.
- self._local_verify_keys: dict[str, FetchKeyResult] = {}
- for key_id, key in hs.config.key.old_signing_keys.items():
- self._local_verify_keys[key_id] = FetchKeyResult(
- verify_key=key, valid_until_ts=key.expired
+ vk = get_verify_key(hs.signing_key)
+ self._local_verify_keys[f"{vk.alg}:{vk.version}"] = FetchKeyResult(
+ verify_key=vk,
+ valid_until_ts=2**63, # fake future timestamp
)
- vk = get_verify_key(hs.signing_key)
- self._local_verify_keys[f"{vk.alg}:{vk.version}"] = FetchKeyResult(
- verify_key=vk,
- valid_until_ts=2**63, # fake future timestamp
- )
+ # We reached the end of the block which means everything was successful, so
+ # no exit handlers are needed (remove them all).
+ exit.pop_all()
def shutdown(self) -> None:
"""
Prepares the KeyRing for garbage collection by shutting down it's queues.
"""
self._fetch_keys_queue.shutdown()
+
for key_fetcher in self._key_fetchers:
key_fetcher.shutdown()
+ self._key_fetchers.clear()
async def verify_json_for_server(
self,
@@ -521,9 +546,21 @@ class StoreKeyFetcher(KeyFetcher):
"""KeyFetcher impl which fetches keys from our data store"""
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
- self.store = hs.get_datastores().main
+ # Clean-up to avoid partial initialization leaving behind references.
+ with ExitStack() as exit:
+ super().__init__(hs)
+ # `KeyFetcher` keeps a reference to `hs` which we need to clean up if
+ # something goes wrong so we can cleanly shutdown the homeserver.
+ exit.callback(super().shutdown)
+
+ # An error can be raised here if someone tried to create a `StoreKeyFetcher`
+ # before the homeserver is fully set up (`HomeServerNotSetupException:
+ # HomeServer.setup must be called before getting datastores`).
+ self.store = hs.get_datastores().main
+
+ # We reached the end of the block which means everything was successful, so
+ # no exit handlers are needed (remove them all).
+ exit.pop_all()
async def _fetch_keys(
self, keys_to_fetch: list[_FetchKeyRequest]
@@ -543,9 +580,21 @@ async def _fetch_keys(
class BaseV2KeyFetcher(KeyFetcher):
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
- self.store = hs.get_datastores().main
+ # Clean-up to avoid partial initialization leaving behind references.
+ with ExitStack() as exit:
+ super().__init__(hs)
+ # `KeyFetcher` keeps a reference to `hs` which we need to clean up if
+ # something goes wrong so we can cleanly shutdown the homeserver.
+ exit.callback(super().shutdown)
+
+ # An error can be raised here if someone tried to create a `StoreKeyFetcher`
+ # before the homeserver is fully set up (`HomeServerNotSetupException:
+ # HomeServer.setup must be called before getting datastores`).
+ self.store = hs.get_datastores().main
+
+ # We reached the end of the block which means everything was successful, so
+ # no exit handlers are needed (remove them all).
+ exit.pop_all()
async def process_v2_response(
self, from_server: str, response_json: JsonDict, time_added_ms: int
diff --git a/synapse/server.py b/synapse/server.py
index de0a2b098c..88662c5b28 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -54,6 +54,7 @@
from synapse.api.auth.internal import InternalAuth
from synapse.api.auth.mas import MasDelegatedAuth
from synapse.api.auth_blocking import AuthBlocking
+from synapse.api.errors import HomeServerNotSetupException
from synapse.api.filtering import Filtering
from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter
from synapse.app._base import unregister_sighups
@@ -399,7 +400,7 @@ def run_as_background_process(
"""
if self._is_shutdown:
raise Exception(
- f"Cannot start background process. HomeServer has been shutdown {len(self._background_processes)} {len(self.get_clock()._looping_calls)} {len(self.get_clock()._call_id_to_delayed_call)}"
+ "Cannot start background process. HomeServer has been shutdown"
)
# Ignore linter error as this is the one location this should be called.
@@ -466,7 +467,17 @@ async def shutdown(self) -> None:
# TODO: Cleanup replication pieces
- self.get_keyring().shutdown()
+ keyring: Keyring | None = None
+ try:
+ keyring = self.get_keyring()
+ except HomeServerNotSetupException:
+ # If the homeserver wasn't fully setup, keyring won't have existed before
+ # this and will fail to be initialized but it cleans itself up for any
+ # partial initialization problem.
+ pass
+
+ if keyring:
+ keyring.shutdown()
# Cleanup metrics associated with the homeserver
for later_gauge in all_later_gauges_to_clean_up_on_shutdown.values():
@@ -478,8 +489,12 @@ async def shutdown(self) -> None:
self.config.server.server_name
)
- for db in self.get_datastores().databases:
- db.stop_background_updates()
+ try:
+ for db in self.get_datastores().databases:
+ db.stop_background_updates()
+ except HomeServerNotSetupException:
+ # If the homeserver wasn't fully setup, the datastores won't exist
+ pass
if self.should_send_federation():
try:
@@ -513,8 +528,12 @@ async def shutdown(self) -> None:
pass
self._background_processes.clear()
- for db in self.get_datastores().databases:
- db._db_pool.close()
+ try:
+ for db in self.get_datastores().databases:
+ db._db_pool.close()
+ except HomeServerNotSetupException:
+ # If the homeserver wasn't fully setup, the datastores won't exist
+ pass
def register_async_shutdown_handler(
self,
@@ -677,7 +696,9 @@ def get_clock(self) -> Clock:
def get_datastores(self) -> Databases:
if not self.datastores:
- raise Exception("HomeServer.setup must be called before getting datastores")
+ raise HomeServerNotSetupException(
+ "HomeServer.setup must be called before getting datastores"
+ )
return self.datastores
diff --git a/tests/app/test_homeserver_shutdown.py b/tests/app/test_homeserver_shutdown.py
index f127e5571d..0f5d1c7338 100644
--- a/tests/app/test_homeserver_shutdown.py
+++ b/tests/app/test_homeserver_shutdown.py
@@ -19,7 +19,10 @@
#
import gc
+import sys
import weakref
+from typing import Any
+from unittest.mock import patch
from synapse.app.homeserver import SynapseHomeServer
from synapse.logging.context import LoggingContext
@@ -81,45 +84,12 @@ async def shutdown() -> None:
# Ensure the `HomeServer` hs been garbage collected by attempting to use the
# weakref to it.
- if hs_ref() is not None:
- self.fail("HomeServer reference should not be valid at this point")
-
- # To help debug this test when it fails, it is useful to leverage the
- # `objgraph` module.
- # The following code serves as an example of what I have found to be useful
- # when tracking down references holding the `SynapseHomeServer` in memory:
- #
- # all_objects = gc.get_objects()
- # for obj in all_objects:
- # try:
- # # These are a subset of types that are typically involved with
- # # holding the `HomeServer` in memory. You may want to inspect
- # # other types as well.
- # if isinstance(obj, DataStore):
- # print(sys.getrefcount(obj), "refs to", obj)
- # if not isinstance(obj, weakref.ProxyType):
- # db_obj = obj
- # if isinstance(obj, SynapseHomeServer):
- # print(sys.getrefcount(obj), "refs to", obj)
- # if not isinstance(obj, weakref.ProxyType):
- # synapse_hs = obj
- # if isinstance(obj, SynapseSite):
- # print(sys.getrefcount(obj), "refs to", obj)
- # if not isinstance(obj, weakref.ProxyType):
- # sysite = obj
- # if isinstance(obj, DatabasePool):
- # print(sys.getrefcount(obj), "refs to", obj)
- # if not isinstance(obj, weakref.ProxyType):
- # dbpool = obj
- # except Exception:
- # pass
- #
- # print(sys.getrefcount(hs_ref()), "refs to", hs_ref())
- #
- # # The following values for `max_depth` and `too_many` have been found to
- # # render a useful amount of information without taking an overly long time
- # # to generate the result.
- # objgraph.show_backrefs(synapse_hs, max_depth=10, too_many=10)
+ hs_after_shutdown = hs_ref()
+ if hs_after_shutdown is not None:
+ self.fail(
+ "HomeServer reference should not be valid at this point "
+ f"{get_memory_debug_info_for_object(hs_after_shutdown)}",
+ )
@logcontext_clean
def test_clean_homeserver_shutdown_mid_background_updates(self) -> None:
@@ -165,42 +135,137 @@ async def shutdown() -> None:
# Ensure the `HomeServer` hs been garbage collected by attempting to use the
# weakref to it.
- if hs_ref() is not None:
- self.fail("HomeServer reference should not be valid at this point")
-
- # To help debug this test when it fails, it is useful to leverage the
- # `objgraph` module.
- # The following code serves as an example of what I have found to be useful
- # when tracking down references holding the `SynapseHomeServer` in memory:
- #
- # all_objects = gc.get_objects()
- # for obj in all_objects:
- # try:
- # # These are a subset of types that are typically involved with
- # # holding the `HomeServer` in memory. You may want to inspect
- # # other types as well.
- # if isinstance(obj, DataStore):
- # print(sys.getrefcount(obj), "refs to", obj)
- # if not isinstance(obj, weakref.ProxyType):
- # db_obj = obj
- # if isinstance(obj, SynapseHomeServer):
- # print(sys.getrefcount(obj), "refs to", obj)
- # if not isinstance(obj, weakref.ProxyType):
- # synapse_hs = obj
- # if isinstance(obj, SynapseSite):
- # print(sys.getrefcount(obj), "refs to", obj)
- # if not isinstance(obj, weakref.ProxyType):
- # sysite = obj
- # if isinstance(obj, DatabasePool):
- # print(sys.getrefcount(obj), "refs to", obj)
- # if not isinstance(obj, weakref.ProxyType):
- # dbpool = obj
- # except Exception:
- # pass
- #
- # print(sys.getrefcount(hs_ref()), "refs to", hs_ref())
- #
- # # The following values for `max_depth` and `too_many` have been found to
- # # render a useful amount of information without taking an overly long time
- # # to generate the result.
- # objgraph.show_backrefs(synapse_hs, max_depth=10, too_many=10)
+ hs_after_shutdown = hs_ref()
+ if hs_after_shutdown is not None:
+ self.fail(
+ "HomeServer reference should not be valid at this point "
+ f"{get_memory_debug_info_for_object(hs_after_shutdown)}",
+ )
+
+ @logcontext_clean
+ def test_clean_homeserver_shutdown_when_failed_to_setup(self) -> None:
+ """
+ Ensure the `SynapseHomeServer` can be fully shutdown and garbage collected if it
+ fails to be `setup`.
+ """
+ self.reactor, self.clock = get_clock()
+
+ # Patch `hs.setup()` to do nothing, so that the homeserver is not fully setup.
+ with patch.object(SynapseHomeServer, "setup", return_value=None) as mock_setup:
+ # Patch out the call to `start_test_homeserver` since we want access to the
+ # homeserver even before the server is setup (let alone started)
+ with patch("tests.server.start_test_homeserver", return_value=None):
+ self.hs = setup_test_homeserver(
+ cleanup_func=self.addCleanup,
+ reactor=self.reactor,
+ homeserver_to_use=SynapseHomeServer,
+ clock=self.clock,
+ )
+ # Sanity check that we patched the correct method (make sure it was the
+ # thing that was called)
+ mock_setup.assert_called_once_with()
+
+ hs_ref = weakref.ref(self.hs)
+
+ # Run the reactor so any `callWhenRunning` functions can be cleared out.
+ self.reactor.run()
+ # This would normally happen as part of `HomeServer.shutdown` but the `MemoryReactor`
+ # we use in tests doesn't handle this properly (see doc comment)
+ cleanup_test_reactor_system_event_triggers(self.reactor)
+
+ async def shutdown() -> None:
+ # Use a logcontext just to double-check that we don't mangle the logcontext
+ # during shutdown.
+ with LoggingContext(name="hs_shutdown", server_name=self.hs.hostname):
+ await self.hs.shutdown()
+
+ self.get_success(shutdown())
+
+ # Cleanup the internal reference in our test case
+ del self.hs
+
+ # Force garbage collection.
+ gc.collect()
+
+ # Ensure the `HomeServer` hs been garbage collected by attempting to use the
+ # weakref to it.
+ hs_after_shutdown = hs_ref()
+ if hs_after_shutdown is not None:
+ self.fail(
+ "HomeServer reference should not be valid at this point "
+ f"{get_memory_debug_info_for_object(hs_after_shutdown)}",
+ )
+
+
+def get_memory_debug_info_for_object(object: Any) -> dict[str, Any]:
+ """
+ Gathers some useful information to make it easier to figure out why the `object` is
+ still in memory.
+
+ Args:
+ object: The object to gather debug information for.
+ """
+ debug: dict[str, Any] = {}
+ if object is not None:
+ # The simplest tracing we can do is show the reference count for the object.
+ debug["reference_count"] = sys.getrefcount(object)
+
+ # Find the list of objects that directly refer to the object.
+ #
+ # Note: The `ref_count` can be >0 but `referrers` can be empty because
+ # the all of the objects were frozen. Look at the
+ # `frozen_object_count` to detect this scenario.
+ referrers = gc.get_referrers(object)
+ debug["gc_referrer_count"] = len(referrers)
+ debug["gc_referrers"] = referrers
+
+ # We don't expect to see frozen objects in normal operation of the
+ # `multi_synapse` shard.
+ #
+ # We can see frozen objects if you forget to `freeze=False` when
+ # starting the `SynapseHomeServer`. Frozen objects mean they are
+ # never considered for garbage collection. If the
+ # `SynapseHomeServer` (or anything that references the homeserver)
+ # is frozen, the homeserver can never be garbage collected and will
+ # linger in memory forever.
+ freeze_count = gc.get_freeze_count()
+ debug["gc_global_frozen_object_count"] = freeze_count
+
+ # To help debug this test when it fails, it is useful to leverage the
+ # `objgraph` module.
+ # The following code serves as an example of what I have found to be useful
+ # when tracking down references holding the `SynapseHomeServer` in memory:
+ #
+ # all_objects = gc.get_objects()
+ # for obj in all_objects:
+ # try:
+ # # These are a subset of types that are typically involved with
+ # # holding the `HomeServer` in memory. You may want to inspect
+ # # other types as well.
+ # if isinstance(obj, DataStore):
+ # print(sys.getrefcount(obj), "refs to", obj)
+ # if not isinstance(obj, weakref.ProxyType):
+ # db_obj = obj
+ # if isinstance(obj, SynapseHomeServer):
+ # print(sys.getrefcount(obj), "refs to", obj)
+ # if not isinstance(obj, weakref.ProxyType):
+ # synapse_hs = obj
+ # if isinstance(obj, SynapseSite):
+ # print(sys.getrefcount(obj), "refs to", obj)
+ # if not isinstance(obj, weakref.ProxyType):
+ # sysite = obj
+ # if isinstance(obj, DatabasePool):
+ # print(sys.getrefcount(obj), "refs to", obj)
+ # if not isinstance(obj, weakref.ProxyType):
+ # dbpool = obj
+ # except Exception:
+ # pass
+ #
+ # print(sys.getrefcount(hs_ref()), "refs to", hs_ref())
+ #
+ # # The following values for `max_depth` and `too_many` have been found to
+ # # render a useful amount of information without taking an overly long time
+ # # to generate the result.
+ # objgraph.show_backrefs(synapse_hs, max_depth=10, too_many=10)
+
+ return debug
diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py
index d3e8da97f8..3cc905f699 100644
--- a/tests/crypto/test_keyring.py
+++ b/tests/crypto/test_keyring.py
@@ -95,7 +95,12 @@ def check_context(
def test_verify_json_objects_for_server_awaits_previous_requests(self) -> None:
mock_fetcher = Mock()
mock_fetcher.get_keys = Mock()
- kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher,))
+ kr = keyring.Keyring(
+ self.hs,
+ test_only_key_fetchers=[
+ mock_fetcher,
+ ],
+ )
# a signed object that we are going to try to validate
key1 = signedjson.key.generate_signing_key("1")
@@ -286,7 +291,7 @@ async def get_keys(
mock_fetcher = Mock()
mock_fetcher.get_keys = Mock(side_effect=get_keys)
kr = keyring.Keyring(
- self.hs, key_fetchers=(StoreKeyFetcher(self.hs), mock_fetcher)
+ self.hs, test_only_key_fetchers=[StoreKeyFetcher(self.hs), mock_fetcher]
)
# sign the json
@@ -313,7 +318,7 @@ async def get_keys(
mock_fetcher = Mock()
mock_fetcher.get_keys = Mock(side_effect=get_keys)
- kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher,))
+ kr = keyring.Keyring(self.hs, test_only_key_fetchers=[mock_fetcher])
json1: JsonDict = {}
signedjson.sign.sign_json(json1, "server1", key1)
@@ -363,7 +368,9 @@ async def get_keys2(
mock_fetcher1.get_keys = Mock(side_effect=get_keys1)
mock_fetcher2 = Mock()
mock_fetcher2.get_keys = Mock(side_effect=get_keys2)
- kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher1, mock_fetcher2))
+ kr = keyring.Keyring(
+ self.hs, test_only_key_fetchers=[mock_fetcher1, mock_fetcher2]
+ )
json1: JsonDict = {}
signedjson.sign.sign_json(json1, "server1", key1)
diff --git a/tests/server.py b/tests/server.py
index 30337f3e38..4fb7dea5ec 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -1074,10 +1074,10 @@ def setup_test_homeserver(
If no datastore is supplied, one is created and given to the homeserver.
Args:
- cleanup_func : The function used to register a cleanup routine for
- after the test. If the function returns a Deferred, the
- test case will wait until the Deferred has fired before
- proceeding to the next cleanup function.
+ cleanup_func: The function used to register a cleanup routine for
+ after the test. If the function returns a Deferred, the
+ test case will wait until the Deferred has fired before
+ proceeding to the next cleanup function.
server_name: Homeserver name
config: Homeserver config
reactor: Twisted reactor
@@ -1190,6 +1190,53 @@ def setup_test_homeserver(
cur.close()
db_conn.close()
+ def cleanup() -> None:
+ import psycopg2
+
+ dropped = False
+
+ # Drop the test database
+ db_conn = db_engine.module.connect(
+ dbname=POSTGRES_BASE_DB,
+ user=POSTGRES_USER,
+ host=POSTGRES_HOST,
+ port=POSTGRES_PORT,
+ password=POSTGRES_PASSWORD,
+ )
+ db_engine.attempt_to_set_autocommit(db_conn, True)
+ cur = db_conn.cursor()
+
+ # Try a few times to drop the DB. Some things may hold on to the
+ # database for a few more seconds due to flakiness, preventing
+ # us from dropping it when the test is over. If we can't drop
+ # it, warn and move on.
+ for _ in range(5):
+ try:
+ cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
+ db_conn.commit()
+ dropped = True
+ except psycopg2.OperationalError as e:
+ warnings.warn(
+ "Couldn't drop old db: " + str(e),
+ category=UserWarning,
+ stacklevel=2,
+ )
+ time.sleep(0.5)
+
+ cur.close()
+ db_conn.close()
+
+ if not dropped:
+ warnings.warn(
+ "Failed to drop old DB.",
+ category=UserWarning,
+ stacklevel=2,
+ )
+
+ if not LEAVE_DB:
+ # Register the cleanup hook
+ cleanup_func(cleanup)
+
hs = homeserver_to_use(
server_name,
config=config,
@@ -1224,6 +1271,32 @@ def shutdown_hs_on_cleanup() -> "Deferred[None]":
with patch("synapse.storage.database.make_pool", side_effect=make_fake_db_pool):
hs.setup()
+ # Ideally, setup/start would be separated but since this is historically used
+ # throughout tests, we keep the existing behavior for now. We probably just need to
+ # rename this function.
+ start_test_homeserver(hs=hs, cleanup_func=cleanup_func, reactor=reactor)
+
+ return hs
+
+
+def start_test_homeserver(
+ *,
+ hs: HomeServer,
+ cleanup_func: Callable[[Callable[[], Optional["Deferred[None]"]]], None],
+ reactor: ISynapseReactor,
+) -> None:
+ """
+ Start a homeserver for testing.
+
+ Args:
+ hs: The homeserver to start.
+ cleanup_func: The function used to register a cleanup routine for
+ after the test. If the function returns a Deferred, the
+ test case will wait until the Deferred has fired before
+ proceeding to the next cleanup function.
+ reactor: Twisted reactor
+ """
+
# Register background tasks required by this server. This must be done
# somewhat manually due to the background tasks not being registered
# unless handlers are instantiated.
@@ -1245,53 +1318,11 @@ def shutdown_hs_on_cleanup() -> "Deferred[None]":
# We need to do cleanup on PostgreSQL
def cleanup() -> None:
- import psycopg2
-
# Close all the db pools
db_pool = database_pool()
if db_pool is not None:
db_pool._db_pool.close()
- dropped = False
-
- # Drop the test database
- db_conn = db_engine.module.connect(
- dbname=POSTGRES_BASE_DB,
- user=POSTGRES_USER,
- host=POSTGRES_HOST,
- port=POSTGRES_PORT,
- password=POSTGRES_PASSWORD,
- )
- db_engine.attempt_to_set_autocommit(db_conn, True)
- cur = db_conn.cursor()
-
- # Try a few times to drop the DB. Some things may hold on to the
- # database for a few more seconds due to flakiness, preventing
- # us from dropping it when the test is over. If we can't drop
- # it, warn and move on.
- for _ in range(5):
- try:
- cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,))
- db_conn.commit()
- dropped = True
- except psycopg2.OperationalError as e:
- warnings.warn(
- "Couldn't drop old db: " + str(e),
- category=UserWarning,
- stacklevel=2,
- )
- time.sleep(0.5)
-
- cur.close()
- db_conn.close()
-
- if not dropped:
- warnings.warn(
- "Failed to drop old DB.",
- category=UserWarning,
- stacklevel=2,
- )
-
if not LEAVE_DB:
# Register the cleanup hook
cleanup_func(cleanup)
@@ -1330,5 +1361,3 @@ def thread_pool() -> threadpool.ThreadPool:
load_legacy_third_party_event_rules(hs)
load_legacy_presence_router(hs)
load_legacy_password_auth_providers(hs)
-
- return hs
From 83023ce1e01646fe99bd76664e99907f09ad25a0 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Tue, 2 Dec 2025 11:28:46 -0600
Subject: [PATCH 06/59] Be able to `shutdown` homeserver that failed to `start`
(#19232)
For example, a homeserver can fail to `start` if the port is already in
use or the port number is invalid (not 0-65535)
Fix https://github.com/element-hq/synapse/issues/19189
Follow-up to https://github.com/element-hq/synapse/pull/18828
### Background
As part of Element's plan to support a light form of vhosting (virtual
host) (multiple instances of Synapse in the same Python process) (c.f
[Synapse Pro for small
hosts](https://docs.element.io/latest/element-server-suite-pro/synapse-pro-for-small-hosts/overview/)),
we're currently diving into the details and implications of running
multiple instances of Synapse in the same Python process.
"Clean tenant deprovisioning" tracked internally by
https://github.com/element-hq/synapse-small-hosts/issues/50
---
changelog.d/19232.misc | 1 +
synapse/app/_base.py | 114 ++++++++++++++++++++++++++++++-----------
synapse/http/site.py | 7 +++
3 files changed, 91 insertions(+), 31 deletions(-)
create mode 100644 changelog.d/19232.misc
diff --git a/changelog.d/19232.misc b/changelog.d/19232.misc
new file mode 100644
index 0000000000..6e3e2ff649
--- /dev/null
+++ b/changelog.d/19232.misc
@@ -0,0 +1 @@
+Fix `HomeServer.shutdown()` failing if the homeserver failed to `start`.
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 52bdb9e0d7..d1ed1201e5 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -41,7 +41,7 @@
from wsgiref.simple_server import WSGIServer
from cryptography.utils import CryptographyDeprecationWarning
-from typing_extensions import ParamSpec
+from typing_extensions import ParamSpec, assert_never
import twisted
from twisted.internet import defer, error, reactor as _reactor
@@ -64,7 +64,12 @@
from synapse.config import ConfigError
from synapse.config._base import format_config_error
from synapse.config.homeserver import HomeServerConfig
-from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig
+from synapse.config.server import (
+ ListenerConfig,
+ ManholeConfig,
+ TCPListenerConfig,
+ UnixListenerConfig,
+)
from synapse.crypto import context_factory
from synapse.events.auto_accept_invites import InviteAutoAccepter
from synapse.events.presence_router import load_legacy_presence_router
@@ -413,6 +418,37 @@ def listen_unix(
]
+class ListenerException(RuntimeError):
+ """
+ An exception raised when we fail to listen with the given `ListenerConfig`.
+
+ Attributes:
+ listener_config: The listener config that caused the exception.
+ """
+
+ def __init__(
+ self,
+ listener_config: ListenerConfig,
+ ):
+ listener_human_name = ""
+ port = ""
+ if isinstance(listener_config, TCPListenerConfig):
+ listener_human_name = "TCP port"
+ port = str(listener_config.port)
+ elif isinstance(listener_config, UnixListenerConfig):
+ listener_human_name = "unix socket"
+ port = listener_config.path
+ else:
+ assert_never(listener_config)
+
+ super().__init__(
+ "Failed to listen on %s (%s) with the given listener config: %s"
+ % (listener_human_name, port, listener_config)
+ )
+
+ self.listener_config = listener_config
+
+
def listen_http(
hs: "HomeServer",
listener_config: ListenerConfig,
@@ -447,39 +483,55 @@ def listen_http(
hs=hs,
)
- if isinstance(listener_config, TCPListenerConfig):
- if listener_config.is_tls():
- # refresh_certificate should have been called before this.
- assert context_factory is not None
- ports = listen_ssl(
- listener_config.bind_addresses,
- listener_config.port,
- site,
- context_factory,
- reactor=reactor,
+ try:
+ if isinstance(listener_config, TCPListenerConfig):
+ if listener_config.is_tls():
+ # refresh_certificate should have been called before this.
+ assert context_factory is not None
+ ports = listen_ssl(
+ listener_config.bind_addresses,
+ listener_config.port,
+ site,
+ context_factory,
+ reactor=reactor,
+ )
+ logger.info(
+ "Synapse now listening on TCP port %d (TLS)", listener_config.port
+ )
+ else:
+ ports = listen_tcp(
+ listener_config.bind_addresses,
+ listener_config.port,
+ site,
+ reactor=reactor,
+ )
+ logger.info(
+ "Synapse now listening on TCP port %d", listener_config.port
+ )
+
+ elif isinstance(listener_config, UnixListenerConfig):
+ ports = listen_unix(
+ listener_config.path, listener_config.mode, site, reactor=reactor
)
+ # getHost() returns a UNIXAddress which contains an instance variable of 'name'
+ # encoded as a byte string. Decode as utf-8 so pretty.
logger.info(
- "Synapse now listening on TCP port %d (TLS)", listener_config.port
+ "Synapse now listening on Unix Socket at: %s",
+ ports[0].getHost().name.decode("utf-8"),
)
else:
- ports = listen_tcp(
- listener_config.bind_addresses,
- listener_config.port,
- site,
- reactor=reactor,
- )
- logger.info("Synapse now listening on TCP port %d", listener_config.port)
-
- else:
- ports = listen_unix(
- listener_config.path, listener_config.mode, site, reactor=reactor
- )
- # getHost() returns a UNIXAddress which contains an instance variable of 'name'
- # encoded as a byte string. Decode as utf-8 so pretty.
- logger.info(
- "Synapse now listening on Unix Socket at: %s",
- ports[0].getHost().name.decode("utf-8"),
- )
+ assert_never(listener_config)
+ except Exception as exc:
+ # The Twisted interface says that "Users should not call this function
+ # themselves!" but this appears to be the correct/only way handle proper cleanup
+ # of the site when things go wrong. In the normal case, a `Port` is created
+ # which we can call `Port.stopListening()` on to do the same thing (but no
+ # `Port` is created when an error occurs).
+ #
+ # We use `site.stopFactory()` instead of `site.doStop()` as the latter assumes
+ # that `site.doStart()` was called (which won't be the case if an error occurs).
+ site.stopFactory()
+ raise ListenerException(listener_config) from exc
return ports
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 03d5d048b1..a1b0b8d9c2 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -815,6 +815,13 @@ def stopFactory(self) -> None:
protocol.transport.loseConnection()
self.connections.clear()
+ # Replace the resource tree with an empty resource to break circular references
+ # to the resource tree which holds a bunch of homeserver references. This is
+ # important if we try to call `hs.shutdown()` after `start` fails. For some
+ # reason, this doesn't seem to be necessary in the normal case where `start`
+ # succeeds and we call `hs.shutdown()` later.
+ self.resource = Resource()
+
def log(self, request: SynapseRequest) -> None: # type: ignore[override]
pass
From aff90a52453174ababf1f5571cc1724369785a3a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 2 Dec 2025 20:03:07 +0000
Subject: [PATCH 07/59] Bump bleach from 6.2.0 to 6.3.0 (#19265)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index a6de2a86f0..44368c4903 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -134,14 +134,14 @@ typecheck = ["mypy"]
[[package]]
name = "bleach"
-version = "6.2.0"
+version = "6.3.0"
description = "An easy safelist-based HTML-sanitizing tool."
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.10"
groups = ["main", "dev"]
files = [
- {file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"},
- {file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"},
+ {file = "bleach-6.3.0-py3-none-any.whl", hash = "sha256:fe10ec77c93ddf3d13a73b035abaac7a9f5e436513864ccdad516693213c65d6"},
+ {file = "bleach-6.3.0.tar.gz", hash = "sha256:6f3b91b1c0a02bb9a78b5a454c92506aa0fdf197e1d5e114d2e00c6f64306d22"},
]
[package.dependencies]
From d688daf41c372f0d5790a583b5706fab45cdce92 Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Tue, 2 Dec 2025 20:08:32 +0000
Subject: [PATCH 08/59] Fix bug where `Duration` was logged incorrectly
(#19267)
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---
changelog.d/19267.bugfix | 1 +
synapse/appservice/scheduler.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/19267.bugfix
diff --git a/changelog.d/19267.bugfix b/changelog.d/19267.bugfix
new file mode 100644
index 0000000000..6c7ed750ec
--- /dev/null
+++ b/changelog.d/19267.bugfix
@@ -0,0 +1 @@
+Fix bug where `Duration` was logged incorrectly.
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index befb4ae44b..c3a83d140c 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -506,7 +506,7 @@ def __init__(
def recover(self) -> None:
delay = Duration(seconds=2**self.backoff_counter)
- logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
+ logger.info("Scheduling retries on %s in %fs", self.service.id, delay.as_secs())
self.scheduled_recovery = self.clock.call_later(
delay,
self.hs.run_as_background_process,
From 93e658bd13860c0fde4e9b80aab7b332f3bd6a0a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 2 Dec 2025 20:27:05 +0000
Subject: [PATCH 09/59] Bump cryptography from 45.0.7 to 46.0.3 (#19266)
Bumps [cryptography](https://github.com/pyca/cryptography) from 45.0.7
to 46.0.3.
Changelog
Sourced from cryptography's
changelog.
46.0.3 - 2025-10-15
* Fixed compilation when using LibreSSL 4.2.0.
.. _v46-0-2:
46.0.2 - 2025-09-30
- Updated Windows, macOS, and Linux wheels to be compiled with OpenSSL
3.5.4.
.. _v46-0-1:
46.0.1 - 2025-09-16
* Fixed an issue where users installing via ``pip`` on Python 3.14
development
versions would not properly install a dependency.
* Fixed an issue building the free-threaded macOS 3.14 wheels.
.. _v46-0-0:
46.0.0 - 2025-09-16
- BACKWARDS INCOMPATIBLE: Support for Python 3.7 has
been removed.
- Support for OpenSSL < 3.0 is deprecated and will be removed in
the next
release.
- Support for
x86_64 macOS (including publishing wheels)
is deprecated
and will be removed in two releases. We will switch to publishing an
arm64 only wheel for macOS.
- Support for 32-bit Windows (including publishing wheels) is
deprecated
and will be removed in two releases. Users should move to a 64-bit
Python installation.
- Updated Windows, macOS, and Linux wheels to be compiled with OpenSSL
3.5.3.
- We now build
ppc64le manylinux wheels and
publish them to PyPI.
- We now build
win_arm64 (Windows on Arm) wheels and
publish them to PyPI.
- Added support for free-threaded Python 3.14.
- Removed the deprecated
get_attribute_for_oid method on
:class:~cryptography.x509.CertificateSigningRequest. Users
should use
:meth:~cryptography.x509.Attributes.get_attribute_for_oid
instead.
- Removed the deprecated
CAST5, SEED,
IDEA, and Blowfish
classes from the cipher module. These are still available in
:doc:/hazmat/decrepit/index.
- In X.509, when performing a PSS signature with a SHA-3 hash, it is
now
encoded with the official NIST SHA3 OID.
.. _v45-0-7:
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 264 +++++++++++++++++++++++++++++-----------------------
1 file changed, 150 insertions(+), 114 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 44368c4903..271be1456e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -176,83 +176,100 @@ files = [
[[package]]
name = "cffi"
-version = "1.17.1"
+version = "2.0.0"
description = "Foreign Function Interface for Python calling C code."
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
groups = ["main", "dev"]
files = [
- {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
- {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"},
- {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"},
- {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"},
- {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"},
- {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"},
- {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"},
- {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"},
- {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"},
- {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"},
- {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"},
- {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"},
- {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"},
- {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"},
- {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"},
- {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"},
- {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"},
- {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"},
- {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"},
- {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"},
- {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"},
- {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"},
- {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"},
- {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"},
- {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"},
- {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"},
- {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"},
- {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"},
- {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"},
- {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"},
- {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"},
- {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"},
- {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"},
- {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"},
- {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"},
- {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"},
- {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"},
+ {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"},
+ {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"},
+ {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"},
+ {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"},
+ {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"},
+ {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"},
+ {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"},
+ {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"},
+ {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"},
+ {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"},
+ {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"},
+ {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"},
+ {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"},
+ {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"},
+ {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"},
+ {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"},
+ {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"},
+ {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"},
+ {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"},
+ {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"},
+ {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"},
+ {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"},
+ {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"},
+ {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"},
+ {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"},
+ {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"},
+ {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"},
+ {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"},
+ {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"},
+ {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"},
+ {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"},
+ {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"},
+ {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"},
+ {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"},
+ {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"},
+ {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"},
+ {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"},
+ {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"},
+ {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"},
+ {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"},
+ {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"},
+ {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"},
+ {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"},
+ {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"},
+ {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"},
+ {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"},
+ {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"},
+ {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"},
+ {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"},
+ {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"},
+ {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"},
+ {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"},
+ {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"},
+ {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"},
+ {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"},
+ {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"},
+ {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"},
+ {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"},
+ {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"},
+ {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"},
+ {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"},
+ {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"},
+ {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"},
+ {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"},
+ {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"},
+ {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"},
+ {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"},
+ {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"},
+ {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"},
+ {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"},
+ {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"},
+ {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"},
+ {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"},
+ {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"},
+ {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"},
+ {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"},
+ {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"},
+ {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"},
+ {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"},
+ {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"},
+ {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"},
+ {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"},
+ {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"},
+ {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"},
]
[package.dependencies]
-pycparser = "*"
+pycparser = {version = "*", markers = "implementation_name != \"PyPy\""}
[[package]]
name = "charset-normalizer"
@@ -381,62 +398,80 @@ files = [
[[package]]
name = "cryptography"
-version = "45.0.7"
+version = "46.0.3"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false
-python-versions = "!=3.9.0,!=3.9.1,>=3.7"
+python-versions = "!=3.9.0,!=3.9.1,>=3.8"
groups = ["main", "dev"]
files = [
- {file = "cryptography-45.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:3be4f21c6245930688bd9e162829480de027f8bf962ede33d4f8ba7d67a00cee"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:67285f8a611b0ebc0857ced2081e30302909f571a46bfa7a3cc0ad303fe015c6"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:577470e39e60a6cd7780793202e63536026d9b8641de011ed9d8174da9ca5339"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:4bd3e5c4b9682bc112d634f2c6ccc6736ed3635fc3319ac2bb11d768cc5a00d8"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:465ccac9d70115cd4de7186e60cfe989de73f7bb23e8a7aa45af18f7412e75bf"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:16ede8a4f7929b4b7ff3642eba2bf79aa1d71f24ab6ee443935c0d269b6bc513"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8978132287a9d3ad6b54fcd1e08548033cc09dc6aacacb6c004c73c3eb5d3ac3"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b6a0e535baec27b528cb07a119f321ac024592388c5681a5ced167ae98e9fff3"},
- {file = "cryptography-45.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a24ee598d10befaec178efdff6054bc4d7e883f615bfbcd08126a0f4931c83a6"},
- {file = "cryptography-45.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fa26fa54c0a9384c27fcdc905a2fb7d60ac6e47d14bc2692145f2b3b1e2cfdbd"},
- {file = "cryptography-45.0.7-cp311-abi3-win32.whl", hash = "sha256:bef32a5e327bd8e5af915d3416ffefdbe65ed975b646b3805be81b23580b57b8"},
- {file = "cryptography-45.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:3808e6b2e5f0b46d981c24d79648e5c25c35e59902ea4391a0dcb3e667bf7443"},
- {file = "cryptography-45.0.7-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bfb4c801f65dd61cedfc61a83732327fafbac55a47282e6f26f073ca7a41c3b2"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:81823935e2f8d476707e85a78a405953a03ef7b7b4f55f93f7c2d9680e5e0691"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3994c809c17fc570c2af12c9b840d7cea85a9fd3e5c0e0491f4fa3c029216d59"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dad43797959a74103cb59c5dac71409f9c27d34c8a05921341fb64ea8ccb1dd4"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ce7a453385e4c4693985b4a4a3533e041558851eae061a58a5405363b098fcd3"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b04f85ac3a90c227b6e5890acb0edbaf3140938dbecf07bff618bf3638578cf1"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:48c41a44ef8b8c2e80ca4527ee81daa4c527df3ecbc9423c41a420a9559d0e27"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f3df7b3d0f91b88b2106031fd995802a2e9ae13e02c36c1fc075b43f420f3a17"},
- {file = "cryptography-45.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd342f085542f6eb894ca00ef70236ea46070c8a13824c6bde0dfdcd36065b9b"},
- {file = "cryptography-45.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1993a1bb7e4eccfb922b6cd414f072e08ff5816702a0bdb8941c247a6b1b287c"},
- {file = "cryptography-45.0.7-cp37-abi3-win32.whl", hash = "sha256:18fcf70f243fe07252dcb1b268a687f2358025ce32f9f88028ca5c364b123ef5"},
- {file = "cryptography-45.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:7285a89df4900ed3bfaad5679b1e668cb4b38a8de1ccbfc84b05f34512da0a90"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:de58755d723e86175756f463f2f0bddd45cc36fbd62601228a3f8761c9f58252"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a20e442e917889d1a6b3c570c9e3fa2fdc398c20868abcea268ea33c024c4083"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:258e0dff86d1d891169b5af222d362468a9570e2532923088658aa866eb11130"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d97cf502abe2ab9eff8bd5e4aca274da8d06dd3ef08b759a8d6143f4ad65d4b4"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:c987dad82e8c65ebc985f5dae5e74a3beda9d0a2a4daf8a1115f3772b59e5141"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c13b1e3afd29a5b3b2656257f14669ca8fa8d7956d509926f0b130b600b50ab7"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a862753b36620af6fc54209264f92c716367f2f0ff4624952276a6bbd18cbde"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:06ce84dc14df0bf6ea84666f958e6080cdb6fe1231be2a51f3fc1267d9f3fb34"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d0c5c6bac22b177bf8da7435d9d27a6834ee130309749d162b26c3105c0795a9"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:2f641b64acc00811da98df63df7d59fd4706c0df449da71cb7ac39a0732b40ae"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:f5414a788ecc6ee6bc58560e85ca624258a55ca434884445440a810796ea0e0b"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:1f3d56f73595376f4244646dd5c5870c14c196949807be39e79e7bd9bac3da63"},
- {file = "cryptography-45.0.7.tar.gz", hash = "sha256:4b1654dfc64ea479c242508eb8c724044f1e964a47d1d1cacc5132292d851971"},
+ {file = "cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a"},
+ {file = "cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc"},
+ {file = "cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d"},
+ {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb"},
+ {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849"},
+ {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8"},
+ {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec"},
+ {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91"},
+ {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e"},
+ {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926"},
+ {file = "cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71"},
+ {file = "cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac"},
+ {file = "cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018"},
+ {file = "cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb"},
+ {file = "cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c"},
+ {file = "cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217"},
+ {file = "cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5"},
+ {file = "cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715"},
+ {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54"},
+ {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459"},
+ {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422"},
+ {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7"},
+ {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044"},
+ {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665"},
+ {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3"},
+ {file = "cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20"},
+ {file = "cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de"},
+ {file = "cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914"},
+ {file = "cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db"},
+ {file = "cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21"},
+ {file = "cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936"},
+ {file = "cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683"},
+ {file = "cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d"},
+ {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0"},
+ {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc"},
+ {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3"},
+ {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971"},
+ {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac"},
+ {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04"},
+ {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506"},
+ {file = "cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963"},
+ {file = "cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4"},
+ {file = "cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df"},
+ {file = "cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f"},
+ {file = "cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372"},
+ {file = "cryptography-46.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32"},
+ {file = "cryptography-46.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c"},
+ {file = "cryptography-46.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea"},
+ {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b"},
+ {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb"},
+ {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717"},
+ {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9"},
+ {file = "cryptography-46.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c"},
+ {file = "cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1"},
]
[package.dependencies]
-cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""}
+cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""}
+typing-extensions = {version = ">=4.13.2", markers = "python_full_version < \"3.11.0\""}
[package.extras]
-docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""]
+docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"]
docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"]
-nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""]
-pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"]
+nox = ["nox[uv] (>=2024.4.15)"]
+pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
sdist = ["build (>=1.0.0)"]
ssh = ["bcrypt (>=3.1.5)"]
-test = ["certifi (>=2024)", "cryptography-vectors (==45.0.7)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
+test = ["certifi (>=2024)", "cryptography-vectors (==46.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
test-randomorder = ["pytest-randomly"]
[[package]]
@@ -1792,6 +1827,7 @@ description = "C parser in Python"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
groups = ["main", "dev"]
+markers = "implementation_name != \"PyPy\""
files = [
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
{file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
From e8710e7c5e45ff511141ba17facda276ecb861b0 Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Thu, 4 Dec 2025 23:49:24 +0000
Subject: [PATCH 10/59] Don't include debug logs in `Clock` unless explicitly
enabled (#19278)
Fixes #19276
This log with stack traces results in a ton of noise in the logs and is
confusing to users since it looks like it's an error in the logs.
This PR removes the stack trace from the log. This can be re-enabled on
demand if it is deemed necessary in the future.
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---
changelog.d/19278.misc | 1 +
synapse/util/clock.py | 42 ++++++++++++++++++++++++++++++++----------
2 files changed, 33 insertions(+), 10 deletions(-)
create mode 100644 changelog.d/19278.misc
diff --git a/changelog.d/19278.misc b/changelog.d/19278.misc
new file mode 100644
index 0000000000..d1425ff38c
--- /dev/null
+++ b/changelog.d/19278.misc
@@ -0,0 +1 @@
+Don't include debug logs in `Clock` unless explicitly enabled.
diff --git a/synapse/util/clock.py b/synapse/util/clock.py
index 6fd31864b7..4355704f8a 100644
--- a/synapse/util/clock.py
+++ b/synapse/util/clock.py
@@ -29,6 +29,7 @@
from twisted.internet.task import LoopingCall
from synapse.logging import context
+from synapse.logging.loggers import ExplicitlyConfiguredLogger
from synapse.types import ISynapseThreadlessReactor
from synapse.util import log_failure
from synapse.util.duration import Duration
@@ -39,6 +40,25 @@
logger = logging.getLogger(__name__)
+original_logger_class = logging.getLoggerClass()
+logging.setLoggerClass(ExplicitlyConfiguredLogger)
+clock_debug_logger = logging.getLogger("synapse.util.clock.debug")
+"""
+A logger for debugging what is scheduling calls.
+
+Ideally, these wouldn't be gated behind an `ExplicitlyConfiguredLogger` as including logs
+from this logger would be helpful to track when things are being scheduled. However, for
+these logs to be meaningful, they need to include a stack trace to show what initiated the
+call in the first place.
+
+Since the stack traces can create a lot of noise and make the logs hard to read (unless you're
+specifically debugging scheduling issues) we want users to opt-in to seeing these logs. To enable
+this, they must explicitly set `synapse.util.clock.debug` in the logging configuration. Note that
+this setting won't inherit the log level from the parent logger.
+"""
+# Restore the original logger class
+logging.setLoggerClass(original_logger_class)
+
class Clock:
"""
@@ -174,7 +194,7 @@ def _looping_call_common(
looping_call_context_string = "looping_call_now"
def wrapped_f(*args: P.args, **kwargs: P.kwargs) -> Deferred:
- logger.debug(
+ clock_debug_logger.debug(
"%s(%s): Executing callback", looping_call_context_string, instance_id
)
@@ -222,7 +242,7 @@ def wrapped_f(*args: P.args, **kwargs: P.kwargs) -> Deferred:
d.addErrback(log_failure, "Looping call died", consumeErrors=False)
self._looping_calls.append(call)
- logger.debug(
+ clock_debug_logger.debug(
"%s(%s): Scheduled looping call every %sms later",
looping_call_context_string,
instance_id,
@@ -283,7 +303,7 @@ def call_later(
raise Exception("Cannot start delayed call. Clock has been shutdown")
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
- logger.debug("call_later(%s): Executing callback", call_id)
+ clock_debug_logger.debug("call_later(%s): Executing callback", call_id)
assert context.current_context() is context.SENTINEL_CONTEXT, (
"Expected `call_later` callback from the reactor to start with the sentinel logcontext "
@@ -327,7 +347,7 @@ def wrapped_callback(*args: Any, **kwargs: Any) -> None:
delay.as_secs(), wrapped_callback, *args, **kwargs
) # type: ignore[call-later-not-tracked]
- logger.debug(
+ clock_debug_logger.debug(
"call_later(%s): Scheduled call for %ss later (tracked for shutdown: %s)",
call_id,
delay,
@@ -347,7 +367,7 @@ def cancel_call_later(
self, wrapped_call: "DelayedCallWrapper", ignore_errs: bool = False
) -> None:
try:
- logger.debug(
+ clock_debug_logger.debug(
"cancel_call_later: cancelling scheduled call %s", wrapped_call.call_id
)
wrapped_call.delayed_call.cancel()
@@ -367,7 +387,7 @@ def cancel_all_delayed_calls(self, ignore_errs: bool = True) -> None:
# will result in the call removing itself from the map mid-iteration.
for call_id, call in list(self._call_id_to_delayed_call.items()):
try:
- logger.debug(
+ clock_debug_logger.debug(
"cancel_all_delayed_calls: cancelling scheduled call %s", call_id
)
call.cancel()
@@ -396,7 +416,9 @@ def call_when_running(
instance_id = random_string_insecure_fast(5)
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
- logger.debug("call_when_running(%s): Executing callback", instance_id)
+ clock_debug_logger.debug(
+ "call_when_running(%s): Executing callback", instance_id
+ )
# Since this callback can be invoked immediately if the reactor is already
# running, we can't always assume that we're running in the sentinel
@@ -436,7 +458,7 @@ def wrapped_callback(*args: Any, **kwargs: Any) -> None:
# callWhenRunning should be called.
self._reactor.callWhenRunning(wrapped_callback, *args, **kwargs) # type: ignore[prefer-synapse-clock-call-when-running]
- logger.debug(
+ clock_debug_logger.debug(
"call_when_running(%s): Scheduled call",
instance_id,
# Find out who is scheduling the call which makes it easy to follow in the
@@ -472,7 +494,7 @@ def add_system_event_trigger(
instance_id = random_string_insecure_fast(5)
def wrapped_callback(*args: Any, **kwargs: Any) -> None:
- logger.debug(
+ clock_debug_logger.debug(
"add_system_event_trigger(%s): Executing %s %s callback",
instance_id,
phase,
@@ -509,7 +531,7 @@ def wrapped_callback(*args: Any, **kwargs: Any) -> None:
# logcontext to the reactor
context.run_in_background(callback, *args, **kwargs)
- logger.debug(
+ clock_debug_logger.debug(
"add_system_event_trigger(%s) for %s %s",
instance_id,
phase,
From a096fba969f43fb89d946d14a8b41ed23b7538f6 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 5 Dec 2025 10:48:01 +0000
Subject: [PATCH 11/59] Group non-breaking dependabot PRs together to reduce
review load (#18402)
---
.github/dependabot.yml | 46 ++++++++++++++++++++++++++++++++++++++++++
changelog.d/18402.misc | 1 +
2 files changed, 47 insertions(+)
create mode 100644 changelog.d/18402.misc
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index cfaa3c04e3..38920ead7a 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -9,6 +9,22 @@ updates:
open-pull-requests-limit: 10
schedule:
interval: "weekly"
+ # Group patch updates to packages together into a single PR, as they rarely
+ # if ever contain breaking changes that need to be reviewed separately.
+ #
+ # Less PRs means a streamlined review process.
+ #
+ # Python packages follow semantic versioning, and tend to only introduce
+ # breaking changes in major version bumps. Thus, we'll group minor and patch
+ # versions together.
+ groups:
+ minor-and-patches:
+ applies-to: version-updates
+ patterns:
+ - "*"
+ update-types:
+ - "minor"
+ - "patch"
# Prevent pulling packages that were recently updated to help mitigate
# supply chain attacks. 14 days was taken from the recommendation at
# https://blog.yossarian.net/2025/11/21/We-should-all-be-using-dependency-cooldowns
@@ -25,6 +41,16 @@ updates:
open-pull-requests-limit: 10
schedule:
interval: "weekly"
+ # For container versions, breaking changes are also typically only introduced in major
+ # package bumps.
+ groups:
+ minor-and-patches:
+ applies-to: version-updates
+ patterns:
+ - "*"
+ update-types:
+ - "minor"
+ - "patch"
cooldown:
default-days: 14
@@ -33,6 +59,16 @@ updates:
open-pull-requests-limit: 10
schedule:
interval: "weekly"
+ # Similarly for GitHub Actions, breaking changes are typically only introduced in major
+ # package bumps.
+ groups:
+ minor-and-patches:
+ applies-to: version-updates
+ patterns:
+ - "*"
+ update-types:
+ - "minor"
+ - "patch"
cooldown:
default-days: 14
@@ -42,5 +78,15 @@ updates:
versioning-strategy: "lockfile-only"
schedule:
interval: "weekly"
+ # The Rust ecosystem is special in that breaking changes are often introduced
+ # in minor version bumps, as packages typically stay pre-1.0 for a long time.
+ # Thus we specifically keep minor version bumps separate in their own PRs.
+ groups:
+ patches:
+ applies-to: version-updates
+ patterns:
+ - "*"
+ update-types:
+ - "patch"
cooldown:
default-days: 14
diff --git a/changelog.d/18402.misc b/changelog.d/18402.misc
new file mode 100644
index 0000000000..4b13652845
--- /dev/null
+++ b/changelog.d/18402.misc
@@ -0,0 +1 @@
+Group together dependabot update PRs to reduce the review load.
\ No newline at end of file
From 891983f3f42b12f7dedb84768c0bfa7b5dc49473 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 5 Dec 2025 22:11:58 +0000
Subject: [PATCH 12/59] Bump the minor-and-patches group with 3 updates
(#19280)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps the minor-and-patches group with 3 updates:
[mypy](https://github.com/python/mypy),
[mypy-zope](https://github.com/Shoobx/mypy-zope) and
[phonenumbers](https://github.com/daviddrysdale/python-phonenumbers).
Updates `mypy` from 1.17.1 to 1.18.2
Changelog
Sourced from mypy's
changelog.
Mypy 1.18.2
- Fix crash on recursive alias (Ivan Levkivskyi, PR 19845)
- Add additional guidance for stubtest errors when runtime is
object.__init__ (Stephen Morton, PR 19733)
- Fix handling of None values in f-string expressions in mypyc
(BobTheBuidler, PR 19846)
Acknowledgements
Thanks to all mypy contributors who contributed to this release:
- Ali Hamdan
- Anthony Sottile
- BobTheBuidler
- Brian Schubert
- Chainfire
- Charlie Denton
- Christoph Tyralla
- CoolCat467
- Daniel Hnyk
- Emily
- Emma Smith
- Ethan Sarp
- Ivan Levkivskyi
- Jahongir Qurbonov
- Jelle Zijlstra
- Joren Hammudoglu
- Jukka Lehtosalo
- Marc Mueller
- Omer Hadari
- Piotr Sawicki
- PrinceNaroliya
- Randolf Scholz
- Robsdedude
- Saul Shanabrook
- Shantanu
- Stanislav Terliakov
- Stephen Morton
- wyattscarpenter
I’d also like to thank my employer, Dropbox, for supporting mypy
development.
Mypy 1.17
We’ve just uploaded mypy 1.17 to the Python Package Index (PyPI).
Mypy is a static type checker for Python. This release includes new
features and bug fixes.
You can install it as follows:
python3 -m pip install -U mypy
You can read the full documentation for this release on Read the Docs.
... (truncated)
Commits
df05f05
remove +dev from version
01a7a12
Update changelog for 1.18.2 (#19873)
ca5abf0
Typeshed cherry-pick: Make type of unitest.mock.Any a
subclass of Any (#1...
9d794b5
[mypyc] fix: inappropriate Nones in f-strings (#19846)
2c0510c
stubtest: additional guidance on errors when runtime is
object.init (#19733)
2f3f03c
Bump version to 1.18.2+dev for point release
7669841
Fix crash on recursive alias in indirection.py (#19845)
03fbaa9
bump version to 1.18.1 due to wheels failure
b44a1fb
removed +dev from version
7197a99
Removed Unreleased in the Changelog for Release 1.18 (#19827)
- Additional commits viewable in compare
view
Updates `mypy-zope` from 1.0.13 to 1.0.14
Changelog
Sourced from mypy-zope's
changelog.
1.0.14 (2025-12-01)
- Support mypy-1.19
- Support mypy-1.18
Commits
38d22f3
Preparing release 1.0.14
76762ec
Maintain changelog
4971d98
Merge pull request #134
from Shoobx/dependabot/pip/mypy-gte-1.0.0-and-lt-1.20.0
47af89d
Update mypy requirement from <1.19.0,>=1.0.0 to
>=1.0.0,<1.20.0
0c596ff
Maintain changelog
dcaa278
Merge pull request #132
from Shoobx/dependabot/pip/mypy-gte-1.0.0-and-lt-1.19.0
8f7b677
Update mypy requirement from <1.18.0,>=1.0.0 to
>=1.0.0,<1.19.0
91b275b
Back to development: 1.0.14
- See full diff in compare
view
Updates `phonenumbers` from 9.0.18 to 9.0.19
Commits
**Does not** update `pysaml2` from 7.5.0 to 7.5.4 since this would
downgrade pyOpenSSL
Release notes
Sourced from pysaml2's
releases.
Version v7.5.4
v7.5.4 (2025-10-07)
- Minor refactor to handle
shelve.open and
dbm errors
- Remove import of deprecated
cgi module
- Replace deprecated
datetime.utcnow() by
datetime.now(timezone.utc)
- deps: Remove the
importlib_metadata dependency
- deps: Remove the
importlib_resources dependency
- deps: Update dependency versions and lockfile
- build: Update pyproject and lockfile to be compatible with PEP
621
- docs: Correct spelling mistakes
- docs: Fix interal references/links
- docs: Clarify units for accepted_time_diff config param
- docs: Correct documentation for contact_person
Version 7.5.3
7.5.3 (2025-10-04)
- #973
Fix prepare_for_negotiated_authenticate to avoid double signing redirect
requests
Version 7.5.2
7.5.2 (2025-02-10)
- Include the XSD of the XML Encryption Syntax and Processing Version
1.1 to the schema validator
Version 7.5.1
7.5.1 (2025-02-10)
- deps: restrict pyOpenSSL up to v24.2.1 until it is replaced
- deps: update dependncies for the lockfile and examples
Changelog
Sourced from pysaml2's
changelog.
v7.5.4 (2025-10-07)
- Minor refactor to handle
shelve.open and
dbm errors
- Remove import of deprecated
cgi module
- Replace deprecated
datetime.utcnow() by
datetime.now(timezone.utc)
- deps: Remove the
importlib_metadata dependency
- deps: Remove the
importlib_resources dependency
- deps: Update dependency versions and lockfile
- build: Update pyproject and lockfile to be compatible with PEP
621
- docs: Correct spelling mistakes
- docs: Fix interal references/links
- docs: Clarify units for accepted_time_diff config param
- docs: Correct documentation for contact_person
7.5.3 (2025-10-04)
- #973
Fix prepare_for_negotiated_authenticate to avoid double signing redirect
requests
7.5.2 (2025-02-10)
- Include the XSD of the XML Encryption Syntax and Processing Version
1.1 to the schema validator
7.5.1 (2025-02-10)
- deps: restrict pyOpenSSL up to v24.2.1 until it is replaced
- deps: update dependencies for the lockfile and examples
Commits
9cf71f7
Release version 7.5.4
c3ec719
Refactor _shelve_compat
1d6ea60
Remove import of deprecated cgi module
c45eb9d
Replace deprecated datetime.utcnow() by datetime.now(timezone.utc)
178f6d1
Remove unneeded dependencies
1f0a25a
remove importlib_metadata import
099f716
remove importlib_resources imports
3fa11ee
spelling updates.
4b7887f
update link.
bc8d3b4
update link.
- Additional commits viewable in compare
view
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore major version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's major version (unless you unignore this specific
dependency's major version or upgrade to it yourself)
- `@dependabot ignore minor version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's minor version (unless you unignore this specific
dependency's minor version or upgrade to it yourself)
- `@dependabot ignore ` will close this group update PR
and stop Dependabot creating any more for the specific dependency
(unless you unignore this specific dependency or upgrade to it yourself)
- `@dependabot unignore ` will remove all of the ignore
conditions of the specified dependency
- `@dependabot unignore ` will
remove the ignore condition of the specified dependency and ignore
conditions
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Devon Hudson
---
poetry.lock | 229 ++++++++++++++++--------
synapse/handlers/room_member.py | 2 +-
synapse/storage/databases/main/state.py | 2 +-
synapse/util/iterutils.py | 3 +-
tests/rest/admin/test_device.py | 7 +-
5 files changed, 164 insertions(+), 79 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 271be1456e..5fe740a025 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -31,7 +31,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
+markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
files = [
{file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"},
{file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"},
@@ -481,7 +481,7 @@ description = "XML bomb protection for Python stdlib modules"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
@@ -506,7 +506,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
@@ -556,7 +556,7 @@ description = "Python wrapper for hiredis"
optional = true
python-versions = ">=3.8"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"redis\""
+markers = "extra == \"redis\" or extra == \"all\""
files = [
{file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9937d9b69321b393fbace69f55423480f098120bc55a3316e1ca3508c4dbbd6f"},
{file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:50351b77f89ba6a22aff430b993653847f36b71d444509036baa0f2d79d1ebf4"},
@@ -879,7 +879,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"opentracing\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
@@ -1017,7 +1017,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
+markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
files = [
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
@@ -1026,6 +1026,92 @@ files = [
[package.dependencies]
pyasn1 = ">=0.4.6"
+[[package]]
+name = "librt"
+version = "0.6.3"
+description = "Mypyc runtime library"
+optional = false
+python-versions = ">=3.9"
+groups = ["dev"]
+files = [
+ {file = "librt-0.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:45660d26569cc22ed30adf583389d8a0d1b468f8b5e518fcf9bfe2cd298f9dd1"},
+ {file = "librt-0.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:54f3b2177fb892d47f8016f1087d21654b44f7fc4cf6571c1c6b3ea531ab0fcf"},
+ {file = "librt-0.6.3-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c5b31bed2c2f2fa1fcb4815b75f931121ae210dc89a3d607fb1725f5907f1437"},
+ {file = "librt-0.6.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8f8ed5053ef9fb08d34f1fd80ff093ccbd1f67f147633a84cf4a7d9b09c0f089"},
+ {file = "librt-0.6.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3f0e4bd9bcb0ee34fa3dbedb05570da50b285f49e52c07a241da967840432513"},
+ {file = "librt-0.6.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d8f89c8d20dfa648a3f0a56861946eb00e5b00d6b00eea14bc5532b2fcfa8ef1"},
+ {file = "librt-0.6.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ecc2c526547eacd20cb9fbba19a5268611dbc70c346499656d6cf30fae328977"},
+ {file = "librt-0.6.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fbedeb9b48614d662822ee514567d2d49a8012037fc7b4cd63f282642c2f4b7d"},
+ {file = "librt-0.6.3-cp310-cp310-win32.whl", hash = "sha256:0765b0fe0927d189ee14b087cd595ae636bef04992e03fe6dfdaa383866c8a46"},
+ {file = "librt-0.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:8c659f9fb8a2f16dc4131b803fa0144c1dadcb3ab24bb7914d01a6da58ae2457"},
+ {file = "librt-0.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:61348cc488b18d1b1ff9f3e5fcd5ac43ed22d3e13e862489d2267c2337285c08"},
+ {file = "librt-0.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64645b757d617ad5f98c08e07620bc488d4bced9ced91c6279cec418f16056fa"},
+ {file = "librt-0.6.3-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:26b8026393920320bb9a811b691d73c5981385d537ffc5b6e22e53f7b65d4122"},
+ {file = "librt-0.6.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d998b432ed9ffccc49b820e913c8f327a82026349e9c34fa3690116f6b70770f"},
+ {file = "librt-0.6.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e18875e17ef69ba7dfa9623f2f95f3eda6f70b536079ee6d5763ecdfe6cc9040"},
+ {file = "librt-0.6.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a218f85081fc3f70cddaed694323a1ad7db5ca028c379c214e3a7c11c0850523"},
+ {file = "librt-0.6.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1ef42ff4edd369e84433ce9b188a64df0837f4f69e3d34d3b34d4955c599d03f"},
+ {file = "librt-0.6.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0e0f2b79993fec23a685b3e8107ba5f8675eeae286675a216da0b09574fa1e47"},
+ {file = "librt-0.6.3-cp311-cp311-win32.whl", hash = "sha256:fd98cacf4e0fabcd4005c452cb8a31750258a85cab9a59fb3559e8078da408d7"},
+ {file = "librt-0.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:e17b5b42c8045867ca9d1f54af00cc2275198d38de18545edaa7833d7e9e4ac8"},
+ {file = "librt-0.6.3-cp311-cp311-win_arm64.whl", hash = "sha256:87597e3d57ec0120a3e1d857a708f80c02c42ea6b00227c728efbc860f067c45"},
+ {file = "librt-0.6.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:74418f718083009108dc9a42c21bf2e4802d49638a1249e13677585fcc9ca176"},
+ {file = "librt-0.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:514f3f363d1ebc423357d36222c37e5c8e6674b6eae8d7195ac9a64903722057"},
+ {file = "librt-0.6.3-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cf1115207a5049d1f4b7b4b72de0e52f228d6c696803d94843907111cbf80610"},
+ {file = "librt-0.6.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ad8ba80cdcea04bea7b78fcd4925bfbf408961e9d8397d2ee5d3ec121e20c08c"},
+ {file = "librt-0.6.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4018904c83eab49c814e2494b4e22501a93cdb6c9f9425533fe693c3117126f9"},
+ {file = "librt-0.6.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8983c5c06ac9c990eac5eb97a9f03fe41dc7e9d7993df74d9e8682a1056f596c"},
+ {file = "librt-0.6.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7769c579663a6f8dbf34878969ac71befa42067ce6bf78e6370bf0d1194997c"},
+ {file = "librt-0.6.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d3c9a07eafdc70556f8c220da4a538e715668c0c63cabcc436a026e4e89950bf"},
+ {file = "librt-0.6.3-cp312-cp312-win32.whl", hash = "sha256:38320386a48a15033da295df276aea93a92dfa94a862e06893f75ea1d8bbe89d"},
+ {file = "librt-0.6.3-cp312-cp312-win_amd64.whl", hash = "sha256:c0ecf4786ad0404b072196b5df774b1bb23c8aacdcacb6c10b4128bc7b00bd01"},
+ {file = "librt-0.6.3-cp312-cp312-win_arm64.whl", hash = "sha256:9f2a6623057989ebc469cd9cc8fe436c40117a0147627568d03f84aef7854c55"},
+ {file = "librt-0.6.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9e716f9012148a81f02f46a04fc4c663420c6fbfeacfac0b5e128cf43b4413d3"},
+ {file = "librt-0.6.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:669ff2495728009a96339c5ad2612569c6d8be4474e68f3f3ac85d7c3261f5f5"},
+ {file = "librt-0.6.3-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:349b6873ebccfc24c9efd244e49da9f8a5c10f60f07575e248921aae2123fc42"},
+ {file = "librt-0.6.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c74c26736008481c9f6d0adf1aedb5a52aff7361fea98276d1f965c0256ee70"},
+ {file = "librt-0.6.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:408a36ddc75e91918cb15b03460bdc8a015885025d67e68c6f78f08c3a88f522"},
+ {file = "librt-0.6.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e61ab234624c9ffca0248a707feffe6fac2343758a36725d8eb8a6efef0f8c30"},
+ {file = "librt-0.6.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:324462fe7e3896d592b967196512491ec60ca6e49c446fe59f40743d08c97917"},
+ {file = "librt-0.6.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:36b2ec8c15030002c7f688b4863e7be42820d7c62d9c6eece3db54a2400f0530"},
+ {file = "librt-0.6.3-cp313-cp313-win32.whl", hash = "sha256:25b1b60cb059471c0c0c803e07d0dfdc79e41a0a122f288b819219ed162672a3"},
+ {file = "librt-0.6.3-cp313-cp313-win_amd64.whl", hash = "sha256:10a95ad074e2a98c9e4abc7f5b7d40e5ecbfa84c04c6ab8a70fabf59bd429b88"},
+ {file = "librt-0.6.3-cp313-cp313-win_arm64.whl", hash = "sha256:17000df14f552e86877d67e4ab7966912224efc9368e998c96a6974a8d609bf9"},
+ {file = "librt-0.6.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8e695f25d1a425ad7a272902af8ab8c8d66c1998b177e4b5f5e7b4e215d0c88a"},
+ {file = "librt-0.6.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3e84a4121a7ae360ca4da436548a9c1ca8ca134a5ced76c893cc5944426164bd"},
+ {file = "librt-0.6.3-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:05f385a414de3f950886ea0aad8f109650d4b712cf9cc14cc17f5f62a9ab240b"},
+ {file = "librt-0.6.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36a8e337461150b05ca2c7bdedb9e591dfc262c5230422cea398e89d0c746cdc"},
+ {file = "librt-0.6.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcbe48f6a03979384f27086484dc2a14959be1613cb173458bd58f714f2c48f3"},
+ {file = "librt-0.6.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4bca9e4c260233fba37b15c4ec2f78aa99c1a79fbf902d19dd4a763c5c3fb751"},
+ {file = "librt-0.6.3-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:760c25ed6ac968e24803eb5f7deb17ce026902d39865e83036bacbf5cf242aa8"},
+ {file = "librt-0.6.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4aa4a93a353ccff20df6e34fa855ae8fd788832c88f40a9070e3ddd3356a9f0e"},
+ {file = "librt-0.6.3-cp314-cp314-win32.whl", hash = "sha256:cb92741c2b4ea63c09609b064b26f7f5d9032b61ae222558c55832ec3ad0bcaf"},
+ {file = "librt-0.6.3-cp314-cp314-win_amd64.whl", hash = "sha256:fdcd095b1b812d756fa5452aca93b962cf620694c0cadb192cec2bb77dcca9a2"},
+ {file = "librt-0.6.3-cp314-cp314-win_arm64.whl", hash = "sha256:822ca79e28720a76a935c228d37da6579edef048a17cd98d406a2484d10eda78"},
+ {file = "librt-0.6.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:078cd77064d1640cb7b0650871a772956066174d92c8aeda188a489b58495179"},
+ {file = "librt-0.6.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5cc22f7f5c0cc50ed69f4b15b9c51d602aabc4500b433aaa2ddd29e578f452f7"},
+ {file = "librt-0.6.3-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:14b345eb7afb61b9fdcdfda6738946bd11b8e0f6be258666b0646af3b9bb5916"},
+ {file = "librt-0.6.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d46aa46aa29b067f0b8b84f448fd9719aaf5f4c621cc279164d76a9dc9ab3e8"},
+ {file = "librt-0.6.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1b51ba7d9d5d9001494769eca8c0988adce25d0a970c3ba3f2eb9df9d08036fc"},
+ {file = "librt-0.6.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ced0925a18fddcff289ef54386b2fc230c5af3c83b11558571124bfc485b8c07"},
+ {file = "librt-0.6.3-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:6bac97e51f66da2ca012adddbe9fd656b17f7368d439de30898f24b39512f40f"},
+ {file = "librt-0.6.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b2922a0e8fa97395553c304edc3bd36168d8eeec26b92478e292e5d4445c1ef0"},
+ {file = "librt-0.6.3-cp314-cp314t-win32.whl", hash = "sha256:f33462b19503ba68d80dac8a1354402675849259fb3ebf53b67de86421735a3a"},
+ {file = "librt-0.6.3-cp314-cp314t-win_amd64.whl", hash = "sha256:04f8ce401d4f6380cfc42af0f4e67342bf34c820dae01343f58f472dbac75dcf"},
+ {file = "librt-0.6.3-cp314-cp314t-win_arm64.whl", hash = "sha256:afb39550205cc5e5c935762c6bf6a2bb34f7d21a68eadb25e2db7bf3593fecc0"},
+ {file = "librt-0.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:09262cb2445b6f15d09141af20b95bb7030c6f13b00e876ad8fdd1a9045d6aa5"},
+ {file = "librt-0.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:57705e8eec76c5b77130d729c0f70190a9773366c555c5457c51eace80afd873"},
+ {file = "librt-0.6.3-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3ac2a7835434b31def8ed5355dd9b895bbf41642d61967522646d1d8b9681106"},
+ {file = "librt-0.6.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:71f0a5918aebbea1e7db2179a8fe87e8a8732340d9e8b8107401fb407eda446e"},
+ {file = "librt-0.6.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa346e202e6e1ebc01fe1c69509cffe486425884b96cb9ce155c99da1ecbe0e9"},
+ {file = "librt-0.6.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:92267f865c7bbd12327a0d394666948b9bf4b51308b52947c0cc453bfa812f5d"},
+ {file = "librt-0.6.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:86605d5bac340beb030cbc35859325982a79047ebdfba1e553719c7126a2389d"},
+ {file = "librt-0.6.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:98e4bbecbef8d2a60ecf731d735602feee5ac0b32117dbbc765e28b054bac912"},
+ {file = "librt-0.6.3-cp39-cp39-win32.whl", hash = "sha256:3caa0634c02d5ff0b2ae4a28052e0d8c5f20d497623dc13f629bd4a9e2a6efad"},
+ {file = "librt-0.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:b47395091e7e0ece1e6ebac9b98bf0c9084d1e3d3b2739aa566be7e56e3f7bf2"},
+ {file = "librt-0.6.3.tar.gz", hash = "sha256:c724a884e642aa2bbad52bb0203ea40406ad742368a5f90da1b220e970384aae"},
+]
+
[[package]]
name = "lxml"
version = "6.0.2"
@@ -1033,7 +1119,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
optional = true
python-versions = ">=3.8"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"url-preview\""
+markers = "extra == \"url-preview\" or extra == \"all\""
files = [
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"},
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"},
@@ -1319,7 +1405,7 @@ description = "An LDAP3 auth provider for Synapse"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
+markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
files = [
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
@@ -1448,53 +1534,54 @@ docs = ["sphinx (>=8,<9)", "sphinx-autobuild"]
[[package]]
name = "mypy"
-version = "1.17.1"
+version = "1.19.0"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
- {file = "mypy-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972"},
- {file = "mypy-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7"},
- {file = "mypy-1.17.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df"},
- {file = "mypy-1.17.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390"},
- {file = "mypy-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94"},
- {file = "mypy-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b"},
- {file = "mypy-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58"},
- {file = "mypy-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5"},
- {file = "mypy-1.17.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd"},
- {file = "mypy-1.17.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b"},
- {file = "mypy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5"},
- {file = "mypy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b"},
- {file = "mypy-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb"},
- {file = "mypy-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403"},
- {file = "mypy-1.17.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056"},
- {file = "mypy-1.17.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341"},
- {file = "mypy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb"},
- {file = "mypy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19"},
- {file = "mypy-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7"},
- {file = "mypy-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81"},
- {file = "mypy-1.17.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6"},
- {file = "mypy-1.17.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849"},
- {file = "mypy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14"},
- {file = "mypy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a"},
- {file = "mypy-1.17.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733"},
- {file = "mypy-1.17.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd"},
- {file = "mypy-1.17.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0"},
- {file = "mypy-1.17.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a"},
- {file = "mypy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91"},
- {file = "mypy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed"},
- {file = "mypy-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5d1092694f166a7e56c805caaf794e0585cabdbf1df36911c414e4e9abb62ae9"},
- {file = "mypy-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79d44f9bfb004941ebb0abe8eff6504223a9c1ac51ef967d1263c6572bbebc99"},
- {file = "mypy-1.17.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b01586eed696ec905e61bd2568f48740f7ac4a45b3a468e6423a03d3788a51a8"},
- {file = "mypy-1.17.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43808d9476c36b927fbcd0b0255ce75efe1b68a080154a38ae68a7e62de8f0f8"},
- {file = "mypy-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:feb8cc32d319edd5859da2cc084493b3e2ce5e49a946377663cc90f6c15fb259"},
- {file = "mypy-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d7598cf74c3e16539d4e2f0b8d8c318e00041553d83d4861f87c7a72e95ac24d"},
- {file = "mypy-1.17.1-py3-none-any.whl", hash = "sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9"},
- {file = "mypy-1.17.1.tar.gz", hash = "sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01"},
+ {file = "mypy-1.19.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6148ede033982a8c5ca1143de34c71836a09f105068aaa8b7d5edab2b053e6c8"},
+ {file = "mypy-1.19.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a9ac09e52bb0f7fb912f5d2a783345c72441a08ef56ce3e17c1752af36340a39"},
+ {file = "mypy-1.19.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:11f7254c15ab3f8ed68f8e8f5cbe88757848df793e31c36aaa4d4f9783fd08ab"},
+ {file = "mypy-1.19.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:318ba74f75899b0e78b847d8c50821e4c9637c79d9a59680fc1259f29338cb3e"},
+ {file = "mypy-1.19.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cf7d84f497f78b682edd407f14a7b6e1a2212b433eedb054e2081380b7395aa3"},
+ {file = "mypy-1.19.0-cp310-cp310-win_amd64.whl", hash = "sha256:c3385246593ac2b97f155a0e9639be906e73534630f663747c71908dfbf26134"},
+ {file = "mypy-1.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a31e4c28e8ddb042c84c5e977e28a21195d086aaffaf08b016b78e19c9ef8106"},
+ {file = "mypy-1.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:34ec1ac66d31644f194b7c163d7f8b8434f1b49719d403a5d26c87fff7e913f7"},
+ {file = "mypy-1.19.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cb64b0ba5980466a0f3f9990d1c582bcab8db12e29815ecb57f1408d99b4bff7"},
+ {file = "mypy-1.19.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:120cffe120cca5c23c03c77f84abc0c14c5d2e03736f6c312480020082f1994b"},
+ {file = "mypy-1.19.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7a500ab5c444268a70565e374fc803972bfd1f09545b13418a5174e29883dab7"},
+ {file = "mypy-1.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:c14a98bc63fd867530e8ec82f217dae29d0550c86e70debc9667fff1ec83284e"},
+ {file = "mypy-1.19.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0fb3115cb8fa7c5f887c8a8d81ccdcb94cff334684980d847e5a62e926910e1d"},
+ {file = "mypy-1.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3e19e3b897562276bb331074d64c076dbdd3e79213f36eed4e592272dabd760"},
+ {file = "mypy-1.19.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b9d491295825182fba01b6ffe2c6fe4e5a49dbf4e2bb4d1217b6ced3b4797bc6"},
+ {file = "mypy-1.19.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6016c52ab209919b46169651b362068f632efcd5eb8ef9d1735f6f86da7853b2"},
+ {file = "mypy-1.19.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f188dcf16483b3e59f9278c4ed939ec0254aa8a60e8fc100648d9ab5ee95a431"},
+ {file = "mypy-1.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:0e3c3d1e1d62e678c339e7ade72746a9e0325de42cd2cccc51616c7b2ed1a018"},
+ {file = "mypy-1.19.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7686ed65dbabd24d20066f3115018d2dce030d8fa9db01aa9f0a59b6813e9f9e"},
+ {file = "mypy-1.19.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:fd4a985b2e32f23bead72e2fb4bbe5d6aceee176be471243bd831d5b2644672d"},
+ {file = "mypy-1.19.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fc51a5b864f73a3a182584b1ac75c404396a17eced54341629d8bdcb644a5bba"},
+ {file = "mypy-1.19.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:37af5166f9475872034b56c5efdcf65ee25394e9e1d172907b84577120714364"},
+ {file = "mypy-1.19.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:510c014b722308c9bd377993bcbf9a07d7e0692e5fa8fc70e639c1eb19fc6bee"},
+ {file = "mypy-1.19.0-cp313-cp313-win_amd64.whl", hash = "sha256:cabbee74f29aa9cd3b444ec2f1e4fa5a9d0d746ce7567a6a609e224429781f53"},
+ {file = "mypy-1.19.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:f2e36bed3c6d9b5f35d28b63ca4b727cb0228e480826ffc8953d1892ddc8999d"},
+ {file = "mypy-1.19.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a18d8abdda14035c5718acb748faec09571432811af129bf0d9e7b2d6699bf18"},
+ {file = "mypy-1.19.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f75e60aca3723a23511948539b0d7ed514dda194bc3755eae0bfc7a6b4887aa7"},
+ {file = "mypy-1.19.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f44f2ae3c58421ee05fe609160343c25f70e3967f6e32792b5a78006a9d850f"},
+ {file = "mypy-1.19.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:63ea6a00e4bd6822adbfc75b02ab3653a17c02c4347f5bb0cf1d5b9df3a05835"},
+ {file = "mypy-1.19.0-cp314-cp314-win_amd64.whl", hash = "sha256:3ad925b14a0bb99821ff6f734553294aa6a3440a8cb082fe1f5b84dfb662afb1"},
+ {file = "mypy-1.19.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0dde5cb375cb94deff0d4b548b993bec52859d1651e073d63a1386d392a95495"},
+ {file = "mypy-1.19.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1cf9c59398db1c68a134b0b5354a09a1e124523f00bacd68e553b8bd16ff3299"},
+ {file = "mypy-1.19.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3210d87b30e6af9c8faed61be2642fcbe60ef77cec64fa1ef810a630a4cf671c"},
+ {file = "mypy-1.19.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e2c1101ab41d01303103ab6ef82cbbfedb81c1a060c868fa7cc013d573d37ab5"},
+ {file = "mypy-1.19.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ea4fd21bb48f0da49e6d3b37ef6bd7e8228b9fe41bbf4d80d9364d11adbd43c"},
+ {file = "mypy-1.19.0-cp39-cp39-win_amd64.whl", hash = "sha256:16f76ff3f3fd8137aadf593cb4607d82634fca675e8211ad75c43d86033ee6c6"},
+ {file = "mypy-1.19.0-py3-none-any.whl", hash = "sha256:0c01c99d626380752e527d5ce8e69ffbba2046eb8a060db0329690849cf9b6f9"},
+ {file = "mypy-1.19.0.tar.gz", hash = "sha256:f6b874ca77f733222641e5c46e4711648c4037ea13646fd0cdc814c2eaec2528"},
]
[package.dependencies]
+librt = ">=0.6.2"
mypy_extensions = ">=1.0.0"
pathspec = ">=0.9.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
@@ -1521,18 +1608,18 @@ files = [
[[package]]
name = "mypy-zope"
-version = "1.0.13"
+version = "1.0.14"
description = "Plugin for mypy to support zope interfaces"
optional = false
python-versions = "*"
groups = ["dev"]
files = [
- {file = "mypy_zope-1.0.13-py3-none-any.whl", hash = "sha256:13740c4cbc910cca2c143c6709e1c483c991abeeeb7b629ad6f73d8ac1edad15"},
- {file = "mypy_zope-1.0.13.tar.gz", hash = "sha256:63fb4d035ea874baf280dc69e714dcde4bd2a4a4837a0fd8d90ce91bea510f99"},
+ {file = "mypy_zope-1.0.14-py3-none-any.whl", hash = "sha256:8842ade93630421dbec0c9906d6515f6e65c6407ef8b9b2eb7f4f73ae1e8a42a"},
+ {file = "mypy_zope-1.0.14.tar.gz", hash = "sha256:42555ad4703f2e50c912de3ebe0c7197619c3f71864817fabc5385ecea0f8449"},
]
[package.dependencies]
-mypy = ">=1.0.0,<1.18.0"
+mypy = ">=1.0.0,<1.20.0"
"zope.interface" = "*"
"zope.schema" = "*"
@@ -1561,7 +1648,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"opentracing\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
@@ -1610,14 +1697,14 @@ files = [
[[package]]
name = "phonenumbers"
-version = "9.0.18"
+version = "9.0.19"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
groups = ["main"]
files = [
- {file = "phonenumbers-9.0.18-py2.py3-none-any.whl", hash = "sha256:d3354454ac31c97f8a08121df97a7145b8dca641f734c6f1518a41c2f60c5764"},
- {file = "phonenumbers-9.0.18.tar.gz", hash = "sha256:5537c61ba95b11b992c95e804da6e49193cc06b1224f632ade64631518a48ed1"},
+ {file = "phonenumbers-9.0.19-py2.py3-none-any.whl", hash = "sha256:004abdfe2010518c2383f148515664a742e8a5d5540e07c049735c139d7e8b09"},
+ {file = "phonenumbers-9.0.19.tar.gz", hash = "sha256:e0674e31554362f4d95383558f7aefde738ef2e7bf96d28a10afd3e87d63a65c"},
]
[[package]]
@@ -1751,7 +1838,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"postgres\""
+markers = "extra == \"postgres\" or extra == \"all\""
files = [
{file = "psycopg2-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:103e857f46bb76908768ead4e2d0ba1d1a130e7b8ed77d3ae91e8b33481813e8"},
{file = "psycopg2-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:210daed32e18f35e3140a1ebe059ac29209dd96468f2f7559aa59f75ee82a5cb"},
@@ -1769,7 +1856,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
optional = true
python-versions = "*"
groups = ["main"]
-markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
+markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
files = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
]
@@ -1785,7 +1872,7 @@ description = "A Simple library to enable psycopg2 compatability"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
+markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
files = [
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
]
@@ -2067,7 +2154,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
optional = true
python-versions = ">=3.6"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"cache-memory\""
+markers = "extra == \"cache-memory\" or extra == \"all\""
files = [
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
@@ -2127,7 +2214,7 @@ description = "Python implementation of SAML Version 2 Standard"
optional = true
python-versions = ">=3.9,<4.0"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
@@ -2152,7 +2239,7 @@ description = "Extensions to the standard Python datetime module"
optional = true
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
@@ -2175,15 +2262,15 @@ files = [
[[package]]
name = "pytz"
-version = "2022.7.1"
+version = "2025.2"
description = "World timezone definitions, modern and historical"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
- {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
- {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
+ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
+ {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
]
[[package]]
@@ -2584,7 +2671,7 @@ description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"sentry\""
+markers = "extra == \"sentry\" or extra == \"all\""
files = [
{file = "sentry_sdk-2.46.0-py2.py3-none-any.whl", hash = "sha256:4eeeb60198074dff8d066ea153fa6f241fef1668c10900ea53a4200abc8da9b1"},
{file = "sentry_sdk-2.46.0.tar.gz", hash = "sha256:91821a23460725734b7741523021601593f35731808afc0bb2ba46c27b8acd91"},
@@ -2794,7 +2881,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"opentracing\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
@@ -2810,7 +2897,7 @@ description = "Python bindings for the Apache Thrift RPC system"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"opentracing\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
@@ -2883,7 +2970,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"opentracing\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
@@ -3017,7 +3104,7 @@ description = "non-blocking redis client for python"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"redis\""
+markers = "extra == \"redis\" or extra == \"all\""
files = [
{file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
{file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
@@ -3263,7 +3350,7 @@ description = "An XML Schema validator and decoder"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 6f8481de9a..a8935fded6 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -874,7 +874,7 @@ async def update_membership_locked(
if target_id == self._server_notices_mxid:
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
- block_invite_result = None
+ block_invite_result: tuple[Codes, dict] | None = None
if (
self._server_notices_mxid is not None
diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index a0aea4975c..cfde107b48 100644
--- a/synapse/storage/databases/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -983,7 +983,7 @@ def __getitem__(self, key: StateKey) -> str:
raise Exception("State map was filtered and doesn't include: %s", key)
return super().__getitem__(key)
- @overload # type: ignore[override]
+ @overload
def get(self, key: StateKey, default: None = None, /) -> str | None: ...
@overload
def get(self, key: StateKey, default: str, /) -> str: ...
diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py
index 19789a4666..cb3b996073 100644
--- a/synapse/util/iterutils.py
+++ b/synapse/util/iterutils.py
@@ -22,6 +22,7 @@
import heapq
from itertools import islice
from typing import (
+ Any,
Callable,
Collection,
Generator,
@@ -33,7 +34,7 @@
TypeVar,
)
-T = TypeVar("T")
+T = TypeVar("T", bound=Any)
S = TypeVar("S", bound="_SelfSlice")
diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py
index 4dff59e180..d85d169476 100644
--- a/tests/rest/admin/test_device.py
+++ b/tests/rest/admin/test_device.py
@@ -26,7 +26,7 @@
import synapse.rest.admin
from synapse.api.errors import Codes
-from synapse.handlers.device import DeviceWriterHandler
+from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN, DeviceWriterHandler
from synapse.rest.client import devices, login
from synapse.server import HomeServer
from synapse.util.clock import Clock
@@ -175,10 +175,7 @@ def test_update_device_too_long_display_name(self) -> None:
)
# Request to update a device display name with a new value that is longer than allowed.
- update = {
- "display_name": "a"
- * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1)
- }
+ update = {"display_name": "a" * (MAX_DEVICE_DISPLAY_NAME_LEN + 1)}
channel = self.make_request(
"PUT",
From 09fd2645c21350e859663c60a8090a28683baa62 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 5 Dec 2025 23:51:29 +0000
Subject: [PATCH 13/59] Bump urllib3 from 2.5.0 to 2.6.0 (#19282)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [urllib3](https://github.com/urllib3/urllib3) from 2.5.0 to 2.6.0.
Release notes
Sourced from urllib3's
releases.
2.6.0
🚀 urllib3 is fundraising for HTTP/2 support
urllib3
is raising ~$40,000 USD to release HTTP/2 support and ensure
long-term sustainable maintenance of the project after a sharp decline
in financial support. If your company or organization uses Python and
would benefit from HTTP/2 support in Requests, pip, cloud SDKs, and
thousands of other projects please consider contributing
financially to ensure HTTP/2 support is developed sustainably and
maintained for the long-haul.
Thank you for your support.
Security
- Fixed a security issue where streaming API could improperly handle
highly compressed HTTP content ("decompression bombs") leading
to excessive resource consumption even when a small amount of data was
requested. Reading small chunks of compressed data is safer and much
more efficient now. (CVE-2025-66471 reported by
@​Cycloctane, 8.9
High, GHSA-2xpw-w6gg-jr37)
- Fixed a security issue where an attacker could compose an HTTP
response with virtually unlimited links in the
Content-Encoding header, potentially leading to a denial of
service (DoS) attack by exhausting system resources during decoding. The
number of allowed chained encodings is now limited to 5. (CVE-2025-66418
reported by @​illia-v, 8.9 High,
GHSA-gm62-xv2j-4w53)
[!IMPORTANT]
- If urllib3 is not installed with the optional
urllib3[brotli] extra, but your environment contains a
Brotli/brotlicffi/brotlipy package anyway, make sure to upgrade it to at
least Brotli 1.2.0 or brotlicffi 1.2.0.0 to benefit from the security
fixes and avoid warnings. Prefer using urllib3[brotli] to
install a compatible Brotli package automatically.
- If you use custom decompressors, please make sure to update them to
respect the changed API of
urllib3.response.ContentDecoder.
Features
- Enabled retrieval, deletion, and membership testing in
HTTPHeaderDict using bytes keys. (#3653)
- Added host and port information to string representations of
HTTPConnection. (#3666)
- Added support for Python 3.14 free-threading builds explicitly. (#3696)
Removals
- Removed the
HTTPResponse.getheaders() method in favor
of HTTPResponse.headers. Removed the
HTTPResponse.getheader(name, default) method in favor of
HTTPResponse.headers.get(name, default). (#3622)
Bugfixes
- Fixed redirect handling in
urllib3.PoolManager when an
integer is passed for the retries parameter. (#3649)
- Fixed
HTTPConnectionPool when used in Emscripten with
no explicit port. (#3664)
- Fixed handling of
SSLKEYLOGFILE with expandable
variables. (#3700)
Misc
- Changed the
zstd extra to install
backports.zstd instead of zstandard on Python
3.13 and before. (#3693)
- Improved the performance of content decoding by optimizing
BytesQueueBuffer class. (#3710)
- Allowed building the urllib3 package with newer setuptools-scm v9.x.
(#3652)
- Ensured successful urllib3 builds by setting Hatchling requirement
to ≥ 1.27.0. (#3638)
Changelog
Sourced from urllib3's
changelog.
2.6.0 (2025-12-05)
Security
- Fixed a security issue where streaming API could improperly handle
highly
compressed HTTP content ("decompression bombs") leading to
excessive resource
consumption even when a small amount of data was requested. Reading
small
chunks of compressed data is safer and much more efficient now.
(
GHSA-2xpw-w6gg-jr37
<https://github.com/urllib3/urllib3/security/advisories/GHSA-2xpw-w6gg-jr37>__)
- Fixed a security issue where an attacker could compose an HTTP
response with
virtually unlimited links in the
Content-Encoding header,
potentially
leading to a denial of service (DoS) attack by exhausting system
resources
during decoding. The number of allowed chained encodings is now limited
to 5.
(GHSA-gm62-xv2j-4w53
<https://github.com/urllib3/urllib3/security/advisories/GHSA-gm62-xv2j-4w53>__)
.. caution::
-
If urllib3 is not installed with the optional
urllib3[brotli] extra, but
your environment contains a Brotli/brotlicffi/brotlipy package anyway,
make
sure to upgrade it to at least Brotli 1.2.0 or brotlicffi 1.2.0.0 to
benefit from the security fixes and avoid warnings. Prefer using
urllib3[brotli] to install a compatible Brotli package
automatically.
-
If you use custom decompressors, please make sure to update them to
respect the changed API of
urllib3.response.ContentDecoder.
Features
- Enabled retrieval, deletion, and membership testing in
HTTPHeaderDict using bytes keys.
([#3653](https://github.com/urllib3/urllib3/issues/3653)
<https://github.com/urllib3/urllib3/issues/3653>__)
- Added host and port information to string representations of
HTTPConnection.
([#3666](https://github.com/urllib3/urllib3/issues/3666)
<https://github.com/urllib3/urllib3/issues/3666>__)
- Added support for Python 3.14 free-threading builds explicitly.
(
[#3696](https://github.com/urllib3/urllib3/issues/3696)
<https://github.com/urllib3/urllib3/issues/3696>__)
Removals
- Removed the
HTTPResponse.getheaders() method in favor
of HTTPResponse.headers.
Removed the HTTPResponse.getheader(name, default) method in
favor of HTTPResponse.headers.get(name, default).
([#3622](https://github.com/urllib3/urllib3/issues/3622)
<https://github.com/urllib3/urllib3/issues/3622>__)
Bugfixes
- Fixed redirect handling in
urllib3.PoolManager when an
integer is passed
for the retries parameter.
([#3649](https://github.com/urllib3/urllib3/issues/3649)
<https://github.com/urllib3/urllib3/issues/3649>__)
- Fixed
HTTPConnectionPool when used in Emscripten with
no explicit port.
([#3664](https://github.com/urllib3/urllib3/issues/3664)
<https://github.com/urllib3/urllib3/issues/3664>__)
- Fixed handling of
SSLKEYLOGFILE with expandable
variables.
([#3700](https://github.com/urllib3/urllib3/issues/3700)
<https://github.com/urllib3/urllib3/issues/3700>__)
... (truncated)
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
You can disable automated security fix PRs for this repo from the
[Security Alerts
page](https://github.com/element-hq/synapse/network/alerts).
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 54 ++++++++++++++++++++++++++---------------------------
1 file changed, 27 insertions(+), 27 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 5fe740a025..14b1f085b8 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -31,7 +31,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
files = [
{file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"},
{file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"},
@@ -481,7 +481,7 @@ description = "XML bomb protection for Python stdlib modules"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
groups = ["main"]
-markers = "extra == \"saml2\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
@@ -506,7 +506,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"saml2\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
@@ -556,7 +556,7 @@ description = "Python wrapper for hiredis"
optional = true
python-versions = ">=3.8"
groups = ["main"]
-markers = "extra == \"redis\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"redis\""
files = [
{file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9937d9b69321b393fbace69f55423480f098120bc55a3316e1ca3508c4dbbd6f"},
{file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:50351b77f89ba6a22aff430b993653847f36b71d444509036baa0f2d79d1ebf4"},
@@ -879,7 +879,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"opentracing\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
@@ -1017,7 +1017,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
files = [
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
@@ -1119,7 +1119,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
optional = true
python-versions = ">=3.8"
groups = ["main"]
-markers = "extra == \"url-preview\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"url-preview\""
files = [
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"},
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"},
@@ -1405,7 +1405,7 @@ description = "An LDAP3 auth provider for Synapse"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
files = [
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
@@ -1648,7 +1648,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"opentracing\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
@@ -1838,7 +1838,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"postgres\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"postgres\""
files = [
{file = "psycopg2-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:103e857f46bb76908768ead4e2d0ba1d1a130e7b8ed77d3ae91e8b33481813e8"},
{file = "psycopg2-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:210daed32e18f35e3140a1ebe059ac29209dd96468f2f7559aa59f75ee82a5cb"},
@@ -1856,7 +1856,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
optional = true
python-versions = "*"
groups = ["main"]
-markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
+markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
files = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
]
@@ -1872,7 +1872,7 @@ description = "A Simple library to enable psycopg2 compatability"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
+markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
files = [
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
]
@@ -2154,7 +2154,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
optional = true
python-versions = ">=3.6"
groups = ["main"]
-markers = "extra == \"cache-memory\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"cache-memory\""
files = [
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
@@ -2214,7 +2214,7 @@ description = "Python implementation of SAML Version 2 Standard"
optional = true
python-versions = ">=3.9,<4.0"
groups = ["main"]
-markers = "extra == \"saml2\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
@@ -2239,7 +2239,7 @@ description = "Extensions to the standard Python datetime module"
optional = true
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
groups = ["main"]
-markers = "extra == \"saml2\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
@@ -2267,7 +2267,7 @@ description = "World timezone definitions, modern and historical"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"saml2\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
{file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
@@ -2671,7 +2671,7 @@ description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
groups = ["main"]
-markers = "extra == \"sentry\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"sentry\""
files = [
{file = "sentry_sdk-2.46.0-py2.py3-none-any.whl", hash = "sha256:4eeeb60198074dff8d066ea153fa6f241fef1668c10900ea53a4200abc8da9b1"},
{file = "sentry_sdk-2.46.0.tar.gz", hash = "sha256:91821a23460725734b7741523021601593f35731808afc0bb2ba46c27b8acd91"},
@@ -2881,7 +2881,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"opentracing\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
@@ -2897,7 +2897,7 @@ description = "Python bindings for the Apache Thrift RPC system"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"opentracing\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
@@ -2970,7 +2970,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"opentracing\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
@@ -3104,7 +3104,7 @@ description = "non-blocking redis client for python"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"redis\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"redis\""
files = [
{file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
{file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
@@ -3315,21 +3315,21 @@ files = [
[[package]]
name = "urllib3"
-version = "2.5.0"
+version = "2.6.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
groups = ["main", "dev"]
files = [
- {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"},
- {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"},
+ {file = "urllib3-2.6.0-py3-none-any.whl", hash = "sha256:c90f7a39f716c572c4e3e58509581ebd83f9b59cced005b7db7ad2d22b0db99f"},
+ {file = "urllib3-2.6.0.tar.gz", hash = "sha256:cb9bcef5a4b345d5da5d145dc3e30834f58e8018828cbc724d30b4cb7d4d49f1"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""]
+brotli = ["brotli (>=1.2.0) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=1.2.0.0) ; platform_python_implementation != \"CPython\""]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
-zstd = ["zstandard (>=0.18.0)"]
+zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""]
[[package]]
name = "webencodings"
@@ -3350,7 +3350,7 @@ description = "An XML Schema validator and decoder"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"saml2\" or extra == \"all\""
+markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
From 8b0083cad97ca27e1bc6ce2753a75abfae8166ab Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Mon, 8 Dec 2025 21:39:18 +0000
Subject: [PATCH 14/59] Respond with useful error codes when `Content-Length`
header/s are invalid (#19212)
Related to https://github.com/element-hq/synapse/issues/17035, when
Synapse receives a request that is larger than the maximum size allowed,
it aborts the connection without ever sending back a HTTP response.
I dug into our usage of twisted and how best to try and report such an
error and this is what I came up with.
It would be ideal to be able to report the status from within
`handleContentChunk` but that is called too early on in the twisted http
handling code, before things have been setup enough to be able to
properly write a response.
I tested this change out locally (both with C-S and S-S apis) and they
do receive a 413 response now in addition to the connection being
closed.
Hopefully this will aid in being able to quickly detect when
https://github.com/element-hq/synapse/issues/17035 is occurring as the
current situation makes it very hard to narrow things down to that
specific issue without making a lot of assumptions.
This PR also responds with more meaningful error codes now in the case
of:
- multiple `Content-Length` headers
- invalid `Content-Length` header value
- request content size being larger than the `Content-Length` value
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---------
Co-authored-by: Eric Eastwood
---
changelog.d/19212.misc | 1 +
synapse/api/constants.py | 13 +++
synapse/app/_base.py | 15 +--
synapse/http/site.py | 158 ++++++++++++++++++++++++++++----
tests/http/test_site.py | 102 +++++++++++++++++++++
tests/rest/client/test_login.py | 6 --
tests/rest/client/test_media.py | 4 -
tests/rest/client/utils.py | 2 -
tests/server.py | 25 +++--
tests/test_server.py | 60 ++++++++++++
10 files changed, 336 insertions(+), 50 deletions(-)
create mode 100644 changelog.d/19212.misc
diff --git a/changelog.d/19212.misc b/changelog.d/19212.misc
new file mode 100644
index 0000000000..83158ce2d9
--- /dev/null
+++ b/changelog.d/19212.misc
@@ -0,0 +1 @@
+Respond with useful error codes with `Content-Length` header/s are invalid.
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index d41e44b154..9b6a68e929 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -29,6 +29,19 @@
# the max size of a (canonical-json-encoded) event
MAX_PDU_SIZE = 65536
+# The maximum allowed size of an HTTP request.
+# Other than media uploads, the biggest request we expect to see is a fully-loaded
+# /federation/v1/send request.
+#
+# The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are
+# limited to 65536 bytes (possibly slightly more if the sender didn't use canonical
+# json encoding); there is no specced limit to EDUs (see
+# https://github.com/matrix-org/matrix-doc/issues/3121).
+#
+# in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M)
+#
+MAX_REQUEST_SIZE = 200 * MAX_PDU_SIZE
+
# Max/min size of ints in canonical JSON
CANONICALJSON_MAX_INT = (2**53) - 1
CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index d1ed1201e5..98d051bf04 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -59,7 +59,7 @@
from twisted.web.resource import Resource
import synapse.util.caches
-from synapse.api.constants import MAX_PDU_SIZE
+from synapse.api.constants import MAX_REQUEST_SIZE
from synapse.app import check_bind_error
from synapse.config import ConfigError
from synapse.config._base import format_config_error
@@ -895,17 +895,8 @@ def sdnotify(state: bytes) -> None:
def max_request_body_size(config: HomeServerConfig) -> int:
"""Get a suitable maximum size for incoming HTTP requests"""
- # Other than media uploads, the biggest request we expect to see is a fully-loaded
- # /federation/v1/send request.
- #
- # The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are
- # limited to 65536 bytes (possibly slightly more if the sender didn't use canonical
- # json encoding); there is no specced limit to EDUs (see
- # https://github.com/matrix-org/matrix-doc/issues/3121).
- #
- # in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M)
- #
- max_request_size = 200 * MAX_PDU_SIZE
+ # Baseline default for any request that isn't configured in the homeserver config
+ max_request_size = MAX_REQUEST_SIZE
# if we have a media repo enabled, we may need to allow larger uploads than that
if config.media.can_load_media_repo:
diff --git a/synapse/http/site.py b/synapse/http/site.py
index a1b0b8d9c2..6ced5b98b3 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -19,6 +19,7 @@
#
#
import contextlib
+import json
import logging
import time
from http import HTTPStatus
@@ -36,6 +37,7 @@
from twisted.web.resource import IResource, Resource
from twisted.web.server import Request
+from synapse.api.errors import Codes, SynapseError
from synapse.config.server import ListenerConfig
from synapse.http import get_request_user_agent, redact_uri
from synapse.http.proxy import ProxySite
@@ -59,6 +61,10 @@
_next_request_seq = 0
+class ContentLengthError(SynapseError):
+ """Raised when content-length validation fails."""
+
+
class SynapseRequest(Request):
"""Class which encapsulates an HTTP request to synapse.
@@ -144,36 +150,150 @@ def __repr__(self) -> str:
self.synapse_site.site_tag,
)
+ def _respond_with_error(self, synapse_error: SynapseError) -> None:
+ """Send an error response and close the connection."""
+ self.setResponseCode(synapse_error.code)
+ error_response_bytes = json.dumps(synapse_error.error_dict(None)).encode()
+
+ self.responseHeaders.setRawHeaders(b"Content-Type", [b"application/json"])
+ self.responseHeaders.setRawHeaders(
+ b"Content-Length", [f"{len(error_response_bytes)}"]
+ )
+ self.write(error_response_bytes)
+ self.loseConnection()
+
+ def _get_content_length_from_headers(self) -> int | None:
+ """Attempts to obtain the `Content-Length` value from the request's headers.
+
+ Returns:
+ Content length as `int` if present. Otherwise `None`.
+
+ Raises:
+ ContentLengthError: if multiple `Content-Length` headers are present or the
+ value is not an `int`.
+ """
+ content_length_headers = self.requestHeaders.getRawHeaders(b"Content-Length")
+ if content_length_headers is None:
+ return None
+
+ # If there are multiple `Content-Length` headers return an error.
+ # We don't want to even try to pick the right one if there are multiple
+ # as we could run into problems similar to request smuggling vulnerabilities
+ # which rely on the mismatch of how different systems interpret information.
+ if len(content_length_headers) != 1:
+ raise ContentLengthError(
+ HTTPStatus.BAD_REQUEST,
+ "Multiple Content-Length headers received",
+ Codes.UNKNOWN,
+ )
+
+ try:
+ return int(content_length_headers[0])
+ except (ValueError, TypeError):
+ raise ContentLengthError(
+ HTTPStatus.BAD_REQUEST,
+ "Content-Length header value is not a valid integer",
+ Codes.UNKNOWN,
+ )
+
+ def _validate_content_length(self) -> None:
+ """Validate Content-Length header and actual content size.
+
+ Raises:
+ ContentLengthError: If validation fails.
+ """
+ # we should have a `content` by now.
+ assert self.content, "_validate_content_length() called before gotLength()"
+ content_length = self._get_content_length_from_headers()
+
+ if content_length is None:
+ return
+
+ actual_content_length = self.content.tell()
+
+ if content_length > self._max_request_body_size:
+ logger.info(
+ "Rejecting request from %s because Content-Length %d exceeds maximum size %d: %s %s",
+ self.client,
+ content_length,
+ self._max_request_body_size,
+ self.get_method(),
+ self.get_redacted_uri(),
+ )
+ raise ContentLengthError(
+ HTTPStatus.REQUEST_ENTITY_TOO_LARGE,
+ f"Request content is too large (>{self._max_request_body_size})",
+ Codes.TOO_LARGE,
+ )
+
+ if content_length != actual_content_length:
+ comparison = (
+ "smaller" if content_length < actual_content_length else "larger"
+ )
+ logger.info(
+ "Rejecting request from %s because Content-Length %d is %s than the request content size %d: %s %s",
+ self.client,
+ content_length,
+ comparison,
+ actual_content_length,
+ self.get_method(),
+ self.get_redacted_uri(),
+ )
+ raise ContentLengthError(
+ HTTPStatus.BAD_REQUEST,
+ f"Rejecting request as the Content-Length header value {content_length} "
+ f"is {comparison} than the actual request content size {actual_content_length}",
+ Codes.UNKNOWN,
+ )
+
# Twisted machinery: this method is called by the Channel once the full request has
# been received, to dispatch the request to a resource.
- #
- # We're patching Twisted to bail/abort early when we see someone trying to upload
- # `multipart/form-data` so we can avoid Twisted parsing the entire request body into
- # in-memory (specific problem of this specific `Content-Type`). This protects us
- # from an attacker uploading something bigger than the available RAM and crashing
- # the server with a `MemoryError`, or carefully block just enough resources to cause
- # all other requests to fail.
- #
- # FIXME: This can be removed once we Twisted releases a fix and we update to a
- # version that is patched
def requestReceived(self, command: bytes, path: bytes, version: bytes) -> None:
+ # In the case of a Content-Length header being present, and it's value being too
+ # large, throw a proper error to make debugging issues due to overly large requests much
+ # easier. Currently we handle such cases in `handleContentChunk` and abort the
+ # connection without providing a proper HTTP response.
+ #
+ # Attempting to write an HTTP response from within `handleContentChunk` does not
+ # work, so the code here has been added to at least provide a response in the
+ # case of the Content-Length header being present.
+ self.method, self.uri = command, path
+ self.clientproto = version
+
+ try:
+ self._validate_content_length()
+ except ContentLengthError as e:
+ self._respond_with_error(e)
+ return
+
+ # We're patching Twisted to bail/abort early when we see someone trying to upload
+ # `multipart/form-data` so we can avoid Twisted parsing the entire request body into
+ # in-memory (specific problem of this specific `Content-Type`). This protects us
+ # from an attacker uploading something bigger than the available RAM and crashing
+ # the server with a `MemoryError`, or carefully block just enough resources to cause
+ # all other requests to fail.
+ #
+ # FIXME: This can be removed once Twisted releases a fix and we update to a
+ # version that is patched
+ # See: https://github.com/element-hq/synapse/security/advisories/GHSA-rfq8-j7rh-8hf2
if command == b"POST":
ctype = self.requestHeaders.getRawHeaders(b"content-type")
if ctype and b"multipart/form-data" in ctype[0]:
- self.method, self.uri = command, path
- self.clientproto = version
+ logger.warning(
+ "Aborting connection from %s because `content-type: multipart/form-data` is unsupported: %s %s",
+ self.client,
+ self.get_method(),
+ self.get_redacted_uri(),
+ )
+
self.code = HTTPStatus.UNSUPPORTED_MEDIA_TYPE.value
self.code_message = bytes(
HTTPStatus.UNSUPPORTED_MEDIA_TYPE.phrase, "ascii"
)
- self.responseHeaders.setRawHeaders(b"content-length", [b"0"])
- logger.warning(
- "Aborting connection from %s because `content-type: multipart/form-data` is unsupported: %s %s",
- self.client,
- command,
- path,
- )
+ # FIXME: Return a better error response here similar to the
+ # `error_response_json` returned in other code paths here.
+ self.responseHeaders.setRawHeaders(b"Content-Length", [b"0"])
self.write(b"")
self.loseConnection()
return
diff --git a/tests/http/test_site.py b/tests/http/test_site.py
index 9e6d929c9e..654ec3190b 100644
--- a/tests/http/test_site.py
+++ b/tests/http/test_site.py
@@ -22,6 +22,7 @@
from twisted.internet.address import IPv6Address
from twisted.internet.testing import MemoryReactor, StringTransport
+from synapse.app._base import max_request_body_size
from synapse.app.homeserver import SynapseHomeServer
from synapse.server import HomeServer
from synapse.util.clock import Clock
@@ -143,3 +144,104 @@ def test_content_type_multipart(self) -> None:
# we should get a 415
self.assertRegex(transport.value().decode(), r"^HTTP/1\.1 415 ")
+
+ def test_content_length_too_large(self) -> None:
+ """HTTP requests with Content-Length exceeding max size should be rejected with 413"""
+ self.hs.start_listening()
+
+ # find the HTTP server which is configured to listen on port 0
+ (port, factory, _backlog, interface) = self.reactor.tcpServers[0]
+ self.assertEqual(interface, "::")
+ self.assertEqual(port, 0)
+
+ # complete the connection and wire it up to a fake transport
+ client_address = IPv6Address("TCP", "::1", 2345)
+ protocol = factory.buildProtocol(client_address)
+ transport = StringTransport()
+ protocol.makeConnection(transport)
+
+ # Send a request with Content-Length header that exceeds the limit.
+ # Default max is 50MB (from media max_upload_size), so send something larger.
+ oversized_length = 1 + max_request_body_size(self.hs.config)
+ protocol.dataReceived(
+ b"POST / HTTP/1.1\r\n"
+ b"Connection: close\r\n"
+ b"Content-Length: " + str(oversized_length).encode() + b"\r\n"
+ b"\r\n"
+ b"" + b"x" * oversized_length + b"\r\n"
+ b"\r\n"
+ )
+
+ # Advance the reactor to process the request
+ while not transport.disconnecting:
+ self.reactor.advance(1)
+
+ # We should get a 413 Content Too Large
+ response = transport.value().decode()
+ self.assertRegex(response, r"^HTTP/1\.1 413 ")
+ self.assertSubstring("M_TOO_LARGE", response)
+
+ def test_too_many_content_length_headers(self) -> None:
+ """HTTP requests with multiple Content-Length headers should be rejected with 400"""
+ self.hs.start_listening()
+
+ # find the HTTP server which is configured to listen on port 0
+ (port, factory, _backlog, interface) = self.reactor.tcpServers[0]
+ self.assertEqual(interface, "::")
+ self.assertEqual(port, 0)
+
+ # complete the connection and wire it up to a fake transport
+ client_address = IPv6Address("TCP", "::1", 2345)
+ protocol = factory.buildProtocol(client_address)
+ transport = StringTransport()
+ protocol.makeConnection(transport)
+
+ protocol.dataReceived(
+ b"POST / HTTP/1.1\r\n"
+ b"Connection: close\r\n"
+ b"Content-Length: " + str(5).encode() + b"\r\n"
+ b"Content-Length: " + str(5).encode() + b"\r\n"
+ b"\r\n"
+ b"" + b"xxxxx" + b"\r\n"
+ b"\r\n"
+ )
+
+ # Advance the reactor to process the request
+ while not transport.disconnecting:
+ self.reactor.advance(1)
+
+ # We should get a 400
+ response = transport.value().decode()
+ self.assertRegex(response, r"^HTTP/1\.1 400 ")
+
+ def test_invalid_content_length_headers(self) -> None:
+ """HTTP requests with invalid Content-Length header should be rejected with 400"""
+ self.hs.start_listening()
+
+ # find the HTTP server which is configured to listen on port 0
+ (port, factory, _backlog, interface) = self.reactor.tcpServers[0]
+ self.assertEqual(interface, "::")
+ self.assertEqual(port, 0)
+
+ # complete the connection and wire it up to a fake transport
+ client_address = IPv6Address("TCP", "::1", 2345)
+ protocol = factory.buildProtocol(client_address)
+ transport = StringTransport()
+ protocol.makeConnection(transport)
+
+ protocol.dataReceived(
+ b"POST / HTTP/1.1\r\n"
+ b"Connection: close\r\n"
+ b"Content-Length: eight\r\n"
+ b"\r\n"
+ b"" + b"xxxxx" + b"\r\n"
+ b"\r\n"
+ )
+
+ # Advance the reactor to process the request
+ while not transport.disconnecting:
+ self.reactor.advance(1)
+
+ # We should get a 400
+ response = transport.value().decode()
+ self.assertRegex(response, r"^HTTP/1\.1 400 ")
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index d599351df7..d83604a696 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -1728,9 +1728,6 @@ def test_username_picker_use_displayname_avatar_and_email(self) -> None:
content_is_form=True,
custom_headers=[
("Cookie", "username_mapping_session=" + session_id),
- # old versions of twisted don't do form-parsing without a valid
- # content-length header.
- ("Content-Length", str(len(content))),
],
)
self.assertEqual(chan.code, 302, chan.result)
@@ -1818,9 +1815,6 @@ def test_username_picker_dont_use_displayname_avatar_or_email(self) -> None:
content_is_form=True,
custom_headers=[
("Cookie", "username_mapping_session=" + session_id),
- # old versions of twisted don't do form-parsing without a valid
- # content-length header.
- ("Content-Length", str(len(content))),
],
)
self.assertEqual(chan.code, 302, chan.result)
diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py
index 33172f930e..ec81b1413c 100644
--- a/tests/rest/client/test_media.py
+++ b/tests/rest/client/test_media.py
@@ -2590,7 +2590,6 @@ def test_authenticated_media(self) -> None:
self.tok,
shorthand=False,
content_type=b"image/png",
- custom_headers=[("Content-Length", str(67))],
)
self.assertEqual(channel.code, 200)
res = channel.json_body.get("content_uri")
@@ -2750,7 +2749,6 @@ def test_authenticated_media_etag(self) -> None:
self.tok,
shorthand=False,
content_type=b"image/png",
- custom_headers=[("Content-Length", str(67))],
)
self.assertEqual(channel.code, 200)
res = channel.json_body.get("content_uri")
@@ -2909,7 +2907,6 @@ def upload_media(self, size: int) -> FakeChannel:
access_token=self.tok,
shorthand=False,
content_type=b"text/plain",
- custom_headers=[("Content-Length", str(size))],
)
def test_upload_under_limit(self) -> None:
@@ -3074,7 +3071,6 @@ def upload_media(self, size: int, tok: str) -> FakeChannel:
access_token=tok,
shorthand=False,
content_type=b"text/plain",
- custom_headers=[("Content-Length", str(size))],
)
def test_upload_under_limit(self) -> None:
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index 613c317b8a..b3808d75bb 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -612,7 +612,6 @@ def upload_media(
filename: The filename of the media to be uploaded
expect_code: The return code to expect from attempting to upload the media
"""
- image_length = len(image_data)
path = "/_matrix/media/r0/upload?filename=%s" % (filename,)
channel = make_request(
self.reactor,
@@ -621,7 +620,6 @@ def upload_media(
path,
content=image_data,
access_token=tok,
- custom_headers=[("Content-Length", str(image_length))],
)
assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % (
diff --git a/tests/server.py b/tests/server.py
index 4fb7dea5ec..ce31a4162a 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -81,6 +81,7 @@
from twisted.web.resource import IResource
from twisted.web.server import Request, Site
+from synapse.api.constants import MAX_REQUEST_SIZE
from synapse.config.database import DatabaseConnectionConfig
from synapse.config.homeserver import HomeServerConfig
from synapse.events.auto_accept_invites import InviteAutoAccepter
@@ -241,7 +242,6 @@ def writeSequence(self, data: Iterable[bytes]) -> None:
def loseConnection(self) -> None:
self.unregisterProducer()
- self.transport.loseConnection()
# Type ignore: mypy doesn't like the fact that producer isn't an IProducer.
def registerProducer(self, producer: IProducer, streaming: bool) -> None:
@@ -428,18 +428,29 @@ def make_request(
channel = FakeChannel(site, reactor, ip=client_ip)
- req = request(channel, site, our_server_name="test_server")
+ req = request(
+ channel,
+ site,
+ our_server_name="test_server",
+ max_request_body_size=MAX_REQUEST_SIZE,
+ )
channel.request = req
req.content = BytesIO(content)
# Twisted expects to be at the end of the content when parsing the request.
req.content.seek(0, SEEK_END)
- # Old version of Twisted (<20.3.0) have issues with parsing x-www-form-urlencoded
- # bodies if the Content-Length header is missing
- req.requestHeaders.addRawHeader(
- b"Content-Length", str(len(content)).encode("ascii")
- )
+ # If `Content-Length` was passed in as a custom header, don't automatically add it
+ # here.
+ if custom_headers is None or not any(
+ (k if isinstance(k, bytes) else k.encode("ascii")) == b"Content-Length"
+ for k, _ in custom_headers
+ ):
+ # Old version of Twisted (<20.3.0) have issues with parsing x-www-form-urlencoded
+ # bodies if the Content-Length header is missing
+ req.requestHeaders.addRawHeader(
+ b"Content-Length", str(len(content)).encode("ascii")
+ )
if access_token:
req.requestHeaders.addRawHeader(
diff --git a/tests/test_server.py b/tests/test_server.py
index ec31b6cc5f..2a36dd4b30 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -212,6 +212,66 @@ def _callback(
self.assertEqual(channel.code, 200)
self.assertNotIn("body", channel.result)
+ def test_content_larger_than_content_length(self) -> None:
+ """
+ HTTP requests with content size exceeding Content-Length should be rejected with 400.
+ """
+
+ def _callback(
+ request: SynapseRequest, **kwargs: object
+ ) -> tuple[int, JsonDict]:
+ return 200, {}
+
+ res = JsonResource(self.homeserver)
+ res.register_paths(
+ "POST", [re.compile("^/_matrix/foo$")], _callback, "test_servlet"
+ )
+
+ channel = make_request(
+ self.reactor,
+ FakeSite(res, self.reactor),
+ b"POST",
+ b"/_matrix/foo",
+ {},
+ # Set the `Content-Length` value to be smaller than the actual content size
+ custom_headers=[("Content-Length", "1")],
+ # The request should disconnect early so don't await the result
+ await_result=False,
+ )
+
+ self.reactor.advance(0.1)
+ self.assertEqual(channel.code, 400)
+
+ def test_content_smaller_than_content_length(self) -> None:
+ """
+ HTTP requests with content size smaller than Content-Length should be rejected with 400.
+ """
+
+ def _callback(
+ request: SynapseRequest, **kwargs: object
+ ) -> tuple[int, JsonDict]:
+ return 200, {}
+
+ res = JsonResource(self.homeserver)
+ res.register_paths(
+ "POST", [re.compile("^/_matrix/foo$")], _callback, "test_servlet"
+ )
+
+ channel = make_request(
+ self.reactor,
+ FakeSite(res, self.reactor),
+ b"POST",
+ b"/_matrix/foo",
+ {},
+ # Set the `Content-Length` value to be larger than the actual content size
+ custom_headers=[("Content-Length", "10")],
+ # The request should disconnect early so don't await the result
+ await_result=False,
+ )
+
+ self.reactor.advance(0.1)
+ self.assertEqual(channel.code, 400)
+
class OptionsResourceTests(unittest.TestCase):
def setUp(self) -> None:
From ba774e2311022b997687c76d4bbed1743974c1f8 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 9 Dec 2025 23:01:56 +0000
Subject: [PATCH 15/59] Bump ruff from 0.14.5 to 0.14.6 in the
minor-and-patches group across 1 directory (#19296)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps the minor-and-patches group with 1 update in the / directory:
[ruff](https://github.com/astral-sh/ruff).
Updates `ruff` from 0.14.5 to 0.14.6
Release notes
Sourced from ruff's
releases.
0.14.6
Release Notes
Released on 2025-11-21.
Preview features
- [
flake8-bandit] Support new PySNMP API paths
(S508, S509) (#21374)
Bug fixes
- Adjust own-line comment placement between branches (#21185)
- Avoid syntax error when formatting attribute expressions with outer
parentheses, parenthesized value, and trailing comment on value (#20418)
- Fix panic when formatting comments in unary expressions (#21501)
- Respect
fmt: skip for compound statements on a single
line (#20633)
- [
refurb] Fix FURB103 autofix (#21454)
- [
ruff] Fix false positive for complex conversion
specifiers in logging-eager-conversion
(RUF065) (#21464)
Rule changes
- [
ruff] Avoid false positive on ClassVar
reassignment (RUF012) (#21478)
CLI
- Render hyperlinks for lint errors (#21514)
- Add a
ruff analyze option to skip over imports in
TYPE_CHECKING blocks (#21472)
Documentation
- Limit
eglot-format hook to eglot-managed Python buffers
(#21459)
- Mention
force-exclude in "Configuration >
Python file discovery" (#21500)
Contributors
Install ruff 0.14.6
Install prebuilt binaries via shell script
curl --proto '=https' --tlsv1.2 -LsSf
https://github.com/astral-sh/ruff/releases/download/0.14.6/ruff-installer.sh
| sh
</tr></table>
... (truncated)
Changelog
Sourced from ruff's
changelog.
0.14.6
Released on 2025-11-21.
Preview features
- [
flake8-bandit] Support new PySNMP API paths
(S508, S509) (#21374)
Bug fixes
- Adjust own-line comment placement between branches (#21185)
- Avoid syntax error when formatting attribute expressions with outer
parentheses, parenthesized value, and trailing comment on value (#20418)
- Fix panic when formatting comments in unary expressions (#21501)
- Respect
fmt: skip for compound statements on a single
line (#20633)
- [
refurb] Fix FURB103 autofix (#21454)
- [
ruff] Fix false positive for complex conversion
specifiers in logging-eager-conversion
(RUF065) (#21464)
Rule changes
- [
ruff] Avoid false positive on ClassVar
reassignment (RUF012) (#21478)
CLI
- Render hyperlinks for lint errors (#21514)
- Add a
ruff analyze option to skip over imports in
TYPE_CHECKING blocks (#21472)
Documentation
- Limit
eglot-format hook to eglot-managed Python buffers
(#21459)
- Mention
force-exclude in "Configuration >
Python file discovery" (#21500)
Contributors
Commits
59c6cb5
Bump 0.14.6 (#21558)
54dba15
[ty] Improve debug messages when imports fail (#21555)
1af3185
[ty] Add support for relative import completions
553e568
[ty] Refactor detection of import statements for completions
cdef3f5
[ty] Use dedicated collector for completions
6178822
[ty] Attach subdiagnostics to unresolved-import errors for
relative imports...
6b7adb0
[ty] support PEP 613 type aliases (#21394)
06941c1
[ty] More low-hanging fruit for inlay hint goto-definition (#21548)
eb7c098
[ty] implement TypedDict structural assignment (#21467)
1b28fc1
[ty] Add more random TypeDetails and tests (#21546)
- Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore major version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's major version (unless you unignore this specific
dependency's major version or upgrade to it yourself)
- `@dependabot ignore minor version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's minor version (unless you unignore this specific
dependency's minor version or upgrade to it yourself)
- `@dependabot ignore ` will close this group update PR
and stop Dependabot creating any more for the specific dependency
(unless you unignore this specific dependency or upgrade to it yourself)
- `@dependabot unignore ` will remove all of the ignore
conditions of the specified dependency
- `@dependabot unignore ` will
remove the ignore condition of the specified dependency and ignore
conditions
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
poetry.lock | 42 +++++++++++++++++++++---------------------
pyproject.toml | 2 +-
2 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 14b1f085b8..8c9256c892 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2604,31 +2604,31 @@ files = [
[[package]]
name = "ruff"
-version = "0.14.5"
+version = "0.14.6"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
- {file = "ruff-0.14.5-py3-none-linux_armv6l.whl", hash = "sha256:f3b8248123b586de44a8018bcc9fefe31d23dda57a34e6f0e1e53bd51fd63594"},
- {file = "ruff-0.14.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:f7a75236570318c7a30edd7f5491945f0169de738d945ca8784500b517163a72"},
- {file = "ruff-0.14.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6d146132d1ee115f8802356a2dc9a634dbf58184c51bff21f313e8cd1c74899a"},
- {file = "ruff-0.14.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2380596653dcd20b057794d55681571a257a42327da8894b93bbd6111aa801f"},
- {file = "ruff-0.14.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d1fa985a42b1f075a098fa1ab9d472b712bdb17ad87a8ec86e45e7fa6273e68"},
- {file = "ruff-0.14.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88f0770d42b7fa02bbefddde15d235ca3aa24e2f0137388cc15b2dcbb1f7c7a7"},
- {file = "ruff-0.14.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:3676cb02b9061fee7294661071c4709fa21419ea9176087cb77e64410926eb78"},
- {file = "ruff-0.14.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b595bedf6bc9cab647c4a173a61acf4f1ac5f2b545203ba82f30fcb10b0318fb"},
- {file = "ruff-0.14.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f55382725ad0bdb2e8ee2babcbbfb16f124f5a59496a2f6a46f1d9d99d93e6e2"},
- {file = "ruff-0.14.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7497d19dce23976bdaca24345ae131a1d38dcfe1b0850ad8e9e6e4fa321a6e19"},
- {file = "ruff-0.14.5-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:410e781f1122d6be4f446981dd479470af86537fb0b8857f27a6e872f65a38e4"},
- {file = "ruff-0.14.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c01be527ef4c91a6d55e53b337bfe2c0f82af024cc1a33c44792d6844e2331e1"},
- {file = "ruff-0.14.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f66e9bb762e68d66e48550b59c74314168ebb46199886c5c5aa0b0fbcc81b151"},
- {file = "ruff-0.14.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d93be8f1fa01022337f1f8f3bcaa7ffee2d0b03f00922c45c2207954f351f465"},
- {file = "ruff-0.14.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:c135d4b681f7401fe0e7312017e41aba9b3160861105726b76cfa14bc25aa367"},
- {file = "ruff-0.14.5-py3-none-win32.whl", hash = "sha256:c83642e6fccfb6dea8b785eb9f456800dcd6a63f362238af5fc0c83d027dd08b"},
- {file = "ruff-0.14.5-py3-none-win_amd64.whl", hash = "sha256:9d55d7af7166f143c94eae1db3312f9ea8f95a4defef1979ed516dbb38c27621"},
- {file = "ruff-0.14.5-py3-none-win_arm64.whl", hash = "sha256:4b700459d4649e2594b31f20a9de33bc7c19976d4746d8d0798ad959621d64a4"},
- {file = "ruff-0.14.5.tar.gz", hash = "sha256:8d3b48d7d8aad423d3137af7ab6c8b1e38e4de104800f0d596990f6ada1a9fc1"},
+ {file = "ruff-0.14.6-py3-none-linux_armv6l.whl", hash = "sha256:d724ac2f1c240dbd01a2ae98db5d1d9a5e1d9e96eba999d1c48e30062df578a3"},
+ {file = "ruff-0.14.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9f7539ea257aa4d07b7ce87aed580e485c40143f2473ff2f2b75aee003186004"},
+ {file = "ruff-0.14.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7f6007e55b90a2a7e93083ba48a9f23c3158c433591c33ee2e99a49b889c6332"},
+ {file = "ruff-0.14.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a8e7b9d73d8728b68f632aa8e824ef041d068d231d8dbc7808532d3629a6bef"},
+ {file = "ruff-0.14.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d50d45d4553a3ebcbd33e7c5e0fe6ca4aafd9a9122492de357205c2c48f00775"},
+ {file = "ruff-0.14.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:118548dd121f8a21bfa8ab2c5b80e5b4aed67ead4b7567790962554f38e598ce"},
+ {file = "ruff-0.14.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:57256efafbfefcb8748df9d1d766062f62b20150691021f8ab79e2d919f7c11f"},
+ {file = "ruff-0.14.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff18134841e5c68f8e5df1999a64429a02d5549036b394fafbe410f886e1989d"},
+ {file = "ruff-0.14.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:29c4b7ec1e66a105d5c27bd57fa93203637d66a26d10ca9809dc7fc18ec58440"},
+ {file = "ruff-0.14.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:167843a6f78680746d7e226f255d920aeed5e4ad9c03258094a2d49d3028b105"},
+ {file = "ruff-0.14.6-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:16a33af621c9c523b1ae006b1b99b159bf5ac7e4b1f20b85b2572455018e0821"},
+ {file = "ruff-0.14.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1432ab6e1ae2dc565a7eea707d3b03a0c234ef401482a6f1621bc1f427c2ff55"},
+ {file = "ruff-0.14.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4c55cfbbe7abb61eb914bfd20683d14cdfb38a6d56c6c66efa55ec6570ee4e71"},
+ {file = "ruff-0.14.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:efea3c0f21901a685fff4befda6d61a1bf4cb43de16da87e8226a281d614350b"},
+ {file = "ruff-0.14.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:344d97172576d75dc6afc0e9243376dbe1668559c72de1864439c4fc95f78185"},
+ {file = "ruff-0.14.6-py3-none-win32.whl", hash = "sha256:00169c0c8b85396516fdd9ce3446c7ca20c2a8f90a77aa945ba6b8f2bfe99e85"},
+ {file = "ruff-0.14.6-py3-none-win_amd64.whl", hash = "sha256:390e6480c5e3659f8a4c8d6a0373027820419ac14fa0d2713bd8e6c3e125b8b9"},
+ {file = "ruff-0.14.6-py3-none-win_arm64.whl", hash = "sha256:d43c81fbeae52cfa8728d8766bbf46ee4298c888072105815b392da70ca836b2"},
+ {file = "ruff-0.14.6.tar.gz", hash = "sha256:6f0c742ca6a7783a736b867a263b9a7a80a45ce9bee391eeda296895f1b4e1cc"},
]
[[package]]
@@ -3485,4 +3485,4 @@ url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10.0,<4.0.0"
-content-hash = "98b9062f48205a3bcc99b43ae665083d360a15d4a208927fa978df9c36fd5315"
+content-hash = "960ddae65fde8574f0f36b6988622fc4baf7646823c36699c5cd4773cad8b0ed"
diff --git a/pyproject.toml b/pyproject.toml
index 38f5990cc7..70d5e3d573 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -330,7 +330,7 @@ generate-setup-file = true
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
-ruff = "0.14.5"
+ruff = "0.14.6"
# Typechecking
lxml-stubs = ">=0.4.0"
From 3aaa2e80b2eb43bed618b9f13db30f3d4585dae9 Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Wed, 10 Dec 2025 14:46:47 +0000
Subject: [PATCH 16/59] Switch the build backend from `poetry-core` to
`maturin` (#19234)
---
.ci/scripts/auditwheel_wrapper.py | 146 ----------------------
changelog.d/19234.misc | 1 +
pyproject.toml | 193 ++++++++++++++++--------------
3 files changed, 105 insertions(+), 235 deletions(-)
delete mode 100755 .ci/scripts/auditwheel_wrapper.py
create mode 100644 changelog.d/19234.misc
diff --git a/.ci/scripts/auditwheel_wrapper.py b/.ci/scripts/auditwheel_wrapper.py
deleted file mode 100755
index 9832821221..0000000000
--- a/.ci/scripts/auditwheel_wrapper.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/usr/bin/env python
-#
-# This file is licensed under the Affero General Public License (AGPL) version 3.
-#
-# Copyright (C) 2023 New Vector, Ltd
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-#
-# See the GNU Affero General Public License for more details:
-# .
-#
-# Originally licensed under the Apache License, Version 2.0:
-# .
-#
-# [This file includes modifications made by New Vector Limited]
-#
-#
-
-# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
-# compatible wheel, if so rename the wheel before repairing it.
-
-import argparse
-import os
-import subprocess
-from zipfile import ZipFile
-
-from packaging.tags import Tag
-from packaging.utils import parse_wheel_filename
-from packaging.version import Version
-
-
-def check_is_abi3_compatible(wheel_file: str) -> None:
- """Check the contents of the built wheel for any `.so` files that are *not*
- abi3 compatible.
- """
-
- with ZipFile(wheel_file, "r") as wheel:
- for file in wheel.namelist():
- if not file.endswith(".so"):
- continue
-
- if not file.endswith(".abi3.so"):
- raise Exception(f"Found non-abi3 lib: {file}")
-
-
-def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
- """Replaces the cpython wheel file with a ABI3 compatible wheel"""
-
- if tag.abi == "abi3":
- # Nothing to do.
- return wheel_file
-
- check_is_abi3_compatible(wheel_file)
-
- # HACK: it seems that some older versions of pip will consider a wheel marked
- # as macosx_11_0 as incompatible with Big Sur. I haven't done the full archaeology
- # here; there are some clues in
- # https://github.com/pantsbuild/pants/pull/12857
- # https://github.com/pypa/pip/issues/9138
- # https://github.com/pypa/packaging/pull/319
- # Empirically this seems to work, note that macOS 11 and 10.16 are the same,
- # both versions are valid for backwards compatibility.
- platform = tag.platform.replace("macosx_11_0", "macosx_10_16")
- abi3_tag = Tag(tag.interpreter, "abi3", platform)
-
- dirname = os.path.dirname(wheel_file)
- new_wheel_file = os.path.join(
- dirname,
- f"{name}-{version}-{abi3_tag}.whl",
- )
-
- os.rename(wheel_file, new_wheel_file)
-
- print("Renamed wheel to", new_wheel_file)
-
- return new_wheel_file
-
-
-def main(wheel_file: str, dest_dir: str, archs: str | None) -> None:
- """Entry point"""
-
- # Parse the wheel file name into its parts. Note that `parse_wheel_filename`
- # normalizes the package name (i.e. it converts matrix_synapse ->
- # matrix-synapse), which is not what we want.
- _, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
- name = os.path.basename(wheel_file).split("-")[0]
-
- if len(tags) != 1:
- # We expect only a wheel file with only a single tag
- raise Exception(f"Unexpectedly found multiple tags: {tags}")
-
- tag = next(iter(tags))
-
- if build:
- # We don't use build tags in Synapse
- raise Exception(f"Unexpected build tag: {build}")
-
- # If the wheel is for cpython then convert it into an abi3 wheel.
- if tag.interpreter.startswith("cp"):
- wheel_file = cpython(wheel_file, name, version, tag)
-
- # Finally, repair the wheel.
- if archs is not None:
- # If we are given archs then we are on macos and need to use
- # `delocate-listdeps`.
- subprocess.run(["delocate-listdeps", wheel_file], check=True)
- subprocess.run(
- ["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
- check=True,
- )
- else:
- subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
-
- parser.add_argument(
- "--wheel-dir",
- "-w",
- metavar="WHEEL_DIR",
- help="Directory to store delocated wheels",
- required=True,
- )
-
- parser.add_argument(
- "--require-archs",
- metavar="archs",
- default=None,
- )
-
- parser.add_argument(
- "wheel_file",
- metavar="WHEEL_FILE",
- )
-
- args = parser.parse_args()
-
- wheel_file = args.wheel_file
- wheel_dir = args.wheel_dir
- archs = args.require_archs
-
- main(wheel_file, wheel_dir, archs)
diff --git a/changelog.d/19234.misc b/changelog.d/19234.misc
new file mode 100644
index 0000000000..d79bc0b19f
--- /dev/null
+++ b/changelog.d/19234.misc
@@ -0,0 +1 @@
+Switch the build backend from `poetry-core` to `maturin`.
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
index 70d5e3d573..c009ecbf9b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -120,7 +120,7 @@ redis = ["txredisapi>=1.4.7", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option.
cache-memory = ["pympler"]
# If this is updated, don't forget to update the equivalent lines in
-# tool.poetry.group.dev.dependencies.
+# `dependency-groups.dev` below.
test = ["parameterized>=0.9.0", "idna>=3.3"]
# The duplication here is awful.
@@ -177,6 +177,85 @@ synapse_port_db = "synapse._scripts.synapse_port_db:main"
synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
+[tool.poetry]
+packages = [{ include = "synapse" }]
+
+[tool.poetry.build]
+# Compile our rust module when using `poetry install`. This is still required
+# while using `poetry` as the build frontend. Saves the developer from needing
+# to run both:
+#
+# $ poetry install
+# $ maturin develop
+script = "build_rust.py"
+# Create a `setup.py` file which will call the `build` method in our build
+# script.
+#
+# Our build script currently uses the "old" build method, where we define a
+# `build` method and `setup.py` calls it. Poetry developers have mentioned that
+# this will eventually be removed:
+# https://github.com/matrix-org/synapse/pull/14949#issuecomment-1418001859
+#
+# The new build method is defined here:
+# https://python-poetry.org/docs/building-extension-modules/#maturin-build-script
+# but is still marked as "unstable" at the time of writing. This would also
+# bump our minimum `poetry-core` version to 1.5.0.
+#
+# We can just drop this work-around entirely if migrating away from
+# Poetry, thus there's little motivation to update the build script.
+generate-setup-file = true
+
+# Dependencies used for developing Synapse itself.
+#
+# Hold off on migrating these to `dev-dependencies` (PEP 735) for now until
+# Poetry 2.2.0+, pip 25.1+ are more widely available.
+[tool.poetry.group.dev.dependencies]
+# We pin development dependencies in poetry.lock so that our tests don't start
+# failing on new releases. Keeping lower bounds loose here means that dependabot
+# can bump versions without having to update the content-hash in the lockfile.
+# This helps prevents merge conflicts when running a batch of dependabot updates.
+ruff = "0.14.6"
+
+# Typechecking
+lxml-stubs = ">=0.4.0"
+mypy = "*"
+mypy-zope = "*"
+types-bleach = ">=4.1.0"
+types-jsonschema = ">=3.2.0"
+types-netaddr = ">=0.8.0.6"
+types-opentracing = ">=2.4.2"
+types-Pillow = ">=8.3.4"
+types-psycopg2 = ">=2.9.9"
+types-pyOpenSSL = ">=20.0.7"
+types-PyYAML = ">=5.4.10"
+types-requests = ">=2.26.0"
+types-setuptools = ">=57.4.0"
+
+# Dependencies which are exclusively required by unit test code. This is
+# NOT a list of all modules that are necessary to run the unit tests.
+# Tests assume that all optional dependencies are installed.
+#
+# If this is updated, don't forget to update the equivalent lines in
+# project.optional-dependencies.test.
+parameterized = ">=0.9.0"
+idna = ">=3.3"
+
+# The following are used by the release script
+click = ">=8.1.3"
+# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
+GitPython = ">=3.1.20"
+markdown-it-py = ">=3.0.0"
+pygithub = ">=1.59"
+# The following are executed as commands by the release script.
+twine = "*"
+# Towncrier min version comes from https://github.com/matrix-org/synapse/pull/3425. Rationale unclear.
+towncrier = ">=18.6.0rc1"
+
+# Used for checking the Poetry lockfile
+tomli = ">=1.2.3"
+
+# Used for checking the schema delta files
+sqlglot = ">=28.0.0"
[tool.towncrier]
package = "synapse"
@@ -291,88 +370,29 @@ line-ending = "auto"
[tool.maturin]
manifest-path = "rust/Cargo.toml"
module-name = "synapse.synapse_rust"
-
-[tool.poetry]
-packages = [
- { include = "synapse" },
-]
-include = [
- { path = "AUTHORS.rst", format = "sdist" },
- { path = "book.toml", format = "sdist" },
- { path = "changelog.d", format = "sdist" },
- { path = "CHANGES.md", format = "sdist" },
- { path = "CONTRIBUTING.md", format = "sdist" },
- { path = "demo", format = "sdist" },
- { path = "docs", format = "sdist" },
- { path = "INSTALL.md", format = "sdist" },
- { path = "mypy.ini", format = "sdist" },
- { path = "scripts-dev", format = "sdist" },
- { path = "synmark", format="sdist" },
- { path = "sytest-blacklist", format = "sdist" },
- { path = "tests", format = "sdist" },
- { path = "UPGRADE.rst", format = "sdist" },
- { path = "Cargo.toml", format = "sdist" },
- { path = "Cargo.lock", format = "sdist" },
- { path = "rust/Cargo.toml", format = "sdist" },
- { path = "rust/build.rs", format = "sdist" },
- { path = "rust/src/**", format = "sdist" },
+python-source = "."
+sdist-include = [
+ "AUTHORS.rst",
+ "book.toml",
+ "changelog.d",
+ "CHANGES.md",
+ "CONTRIBUTING.md",
+ "demo",
+ "docs",
+ "INSTALL.md",
+ "mypy.ini",
+ "scripts-dev",
+ "synmark",
+ "sytest-blacklist",
+ "tests",
+ "UPGRADE.rst",
+ "Cargo.toml",
+ "Cargo.lock",
+ "rust/Cargo.toml",
+ "rust/build.rs",
+ "rust/src/**",
]
-exclude = [
- { path = "synapse/*.so", format = "sdist"}
-]
-
-[tool.poetry.build]
-script = "build_rust.py"
-generate-setup-file = true
-
-[tool.poetry.group.dev.dependencies]
-# We pin development dependencies in poetry.lock so that our tests don't start
-# failing on new releases. Keeping lower bounds loose here means that dependabot
-# can bump versions without having to update the content-hash in the lockfile.
-# This helps prevents merge conflicts when running a batch of dependabot updates.
-ruff = "0.14.6"
-
-# Typechecking
-lxml-stubs = ">=0.4.0"
-mypy = "*"
-mypy-zope = "*"
-types-bleach = ">=4.1.0"
-types-jsonschema = ">=3.2.0"
-types-netaddr = ">=0.8.0.6"
-types-opentracing = ">=2.4.2"
-types-Pillow = ">=8.3.4"
-types-psycopg2 = ">=2.9.9"
-types-pyOpenSSL = ">=20.0.7"
-types-PyYAML = ">=5.4.10"
-types-requests = ">=2.26.0"
-types-setuptools = ">=57.4.0"
-
-# Dependencies which are exclusively required by unit test code. This is
-# NOT a list of all modules that are necessary to run the unit tests.
-# Tests assume that all optional dependencies are installed.
-#
-# If this is updated, don't forget to update the equivalent lines in
-# project.optional-dependencies.test.
-parameterized = ">=0.9.0"
-idna = ">=3.3"
-
-# The following are used by the release script
-click = ">=8.1.3"
-# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
-GitPython = ">=3.1.20"
-markdown-it-py = ">=3.0.0"
-pygithub = ">=1.59"
-# The following are executed as commands by the release script.
-twine = "*"
-# Towncrier min version comes from https://github.com/matrix-org/synapse/pull/3425. Rationale unclear.
-towncrier = ">=18.6.0rc1"
-
-# Used for checking the Poetry lockfile
-tomli = ">=1.2.3"
-
-# Used for checking the schema delta files
-sqlglot = ">=28.0.0"
-
+sdist-exclude = ["synapse/*.so"]
[build-system]
# The upper bounds here are defensive, intended to prevent situations like
@@ -381,8 +401,8 @@ sqlglot = ">=28.0.0"
# runtime errors caused by build system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
-requires = ["poetry-core>=2.0.0,<=2.1.3", "setuptools_rust>=1.3,<=1.11.1"]
-build-backend = "poetry.core.masonry.api"
+requires = ["maturin>=1.0,<2.0"]
+build-backend = "maturin"
[tool.cibuildwheel]
@@ -419,8 +439,3 @@ environment= { PATH = "$PATH:$HOME/.cargo/bin" }
before-build = "rm -rf {project}/build"
build-frontend = "build"
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
-
-
-[tool.cibuildwheel.linux]
-# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
-repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}"
From cdf286d405c606ab2f9133d660289097f70f5322 Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Thu, 11 Dec 2025 17:58:27 +0000
Subject: [PATCH 17/59] Use `uv` to test full set of minimum deps in CI
(#19289)
Stemming from #19274 this updates the `olddeps` CI to test against not
just the minimum version of our explicit dependencies, but also the
minimum version of all implicit (transitive) dependencies that are
pulled in from the explicit dependencies themselves.
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---
.ci/scripts/prepare_old_deps.sh | 39 ------------
.github/workflows/tests.yml | 14 ++--
changelog.d/19289.misc | 1 +
poetry.lock | 109 ++++++++++++++++++++++++--------
pyproject.toml | 58 +++++++++++++++--
5 files changed, 141 insertions(+), 80 deletions(-)
delete mode 100755 .ci/scripts/prepare_old_deps.sh
create mode 100644 changelog.d/19289.misc
diff --git a/.ci/scripts/prepare_old_deps.sh b/.ci/scripts/prepare_old_deps.sh
deleted file mode 100755
index 29d281dc23..0000000000
--- a/.ci/scripts/prepare_old_deps.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env bash
-# this script is run by GitHub Actions in a plain `jammy` container; it
-# - installs the minimal system requirements, and poetry;
-# - patches the project definition file to refer to old versions only;
-# - creates a venv with these old versions using poetry; and finally
-# - invokes `trial` to run the tests with old deps.
-
-set -ex
-
-# Prevent virtualenv from auto-updating pip to an incompatible version
-export VIRTUALENV_NO_DOWNLOAD=1
-
-# TODO: in the future, we could use an implementation of
-# https://github.com/python-poetry/poetry/issues/3527
-# https://github.com/pypa/pip/issues/8085
-# to select the lowest possible versions, rather than resorting to this sed script.
-
-# Patch the project definitions in-place:
-# - `-E` use extended regex syntax.
-# - Don't modify the line that defines required Python versions.
-# - Replace all lower and tilde bounds with exact bounds.
-# - Replace all caret bounds with exact bounds.
-# - Delete all lines referring to psycopg2 - so no testing of postgres support.
-# - Use pyopenssl 17.0, which is the oldest version that works with
-# a `cryptography` compiled against OpenSSL 1.1.
-# - Omit systemd: we're not logging to journal here.
-
-sed -i -E '
- /^\s*requires-python\s*=/b
- s/[~>]=/==/g
- s/\^/==/g
- /psycopg2/d
- s/pyOpenSSL\s*==\s*16\.0\.0"/pyOpenSSL==17.0.0"/
- /systemd/d
-' pyproject.toml
-
-echo "::group::Patched pyproject.toml"
-cat pyproject.toml
-echo "::endgroup::"
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index aff2832b94..cab6bbdefe 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -452,14 +452,12 @@ jobs:
python-version: '3.10'
- name: Prepare old deps
- if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
- run: .ci/scripts/prepare_old_deps.sh
-
- # Note: we install using `pip` here, not poetry. `poetry install` ignores the
- # build-system section (https://github.com/python-poetry/poetry/issues/6154), but
- # we explicitly want to test that you can `pip install` using the oldest version
- # of poetry-core and setuptools-rust.
- - run: pip install .[all,test]
+ # Note: we install using `uv` here, not poetry or pip to allow us to test with the
+ # minimum version of all dependencies, both those explicitly specified and those
+ # implicitly brought in by the explicit dependencies.
+ run: |
+ pip install uv
+ uv pip install --system --resolution=lowest .[all,test]
# We nuke the local copy, as we've installed synapse into the virtualenv
# (rather than use an editable install, which we no longer support). If we
diff --git a/changelog.d/19289.misc b/changelog.d/19289.misc
new file mode 100644
index 0000000000..4ad0dbc430
--- /dev/null
+++ b/changelog.d/19289.misc
@@ -0,0 +1 @@
+Use `uv` to test olddeps to ensure all transitive dependencies use minimum versions.
diff --git a/poetry.lock b/poetry.lock
index 8c9256c892..4dacae38a4 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -31,7 +31,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
+markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
files = [
{file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"},
{file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"},
@@ -481,7 +481,7 @@ description = "XML bomb protection for Python stdlib modules"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
@@ -506,7 +506,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
@@ -556,7 +556,7 @@ description = "Python wrapper for hiredis"
optional = true
python-versions = ">=3.8"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"redis\""
+markers = "extra == \"redis\" or extra == \"all\""
files = [
{file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9937d9b69321b393fbace69f55423480f098120bc55a3316e1ca3508c4dbbd6f"},
{file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:50351b77f89ba6a22aff430b993653847f36b71d444509036baa0f2d79d1ebf4"},
@@ -879,7 +879,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"opentracing\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
@@ -1017,7 +1017,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
+markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
files = [
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
@@ -1119,7 +1119,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
optional = true
python-versions = ">=3.8"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"url-preview\""
+markers = "extra == \"url-preview\" or extra == \"all\""
files = [
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"},
{file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"},
@@ -1405,7 +1405,7 @@ description = "An LDAP3 auth provider for Synapse"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
+markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
files = [
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
@@ -1648,7 +1648,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"opentracing\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
@@ -1838,7 +1838,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"postgres\""
+markers = "extra == \"postgres\" or extra == \"all\""
files = [
{file = "psycopg2-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:103e857f46bb76908768ead4e2d0ba1d1a130e7b8ed77d3ae91e8b33481813e8"},
{file = "psycopg2-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:210daed32e18f35e3140a1ebe059ac29209dd96468f2f7559aa59f75ee82a5cb"},
@@ -1856,7 +1856,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
optional = true
python-versions = "*"
groups = ["main"]
-markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
+markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
files = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
]
@@ -1872,7 +1872,7 @@ description = "A Simple library to enable psycopg2 compatability"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
+markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
files = [
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
]
@@ -2154,7 +2154,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
optional = true
python-versions = ">=3.6"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"cache-memory\""
+markers = "extra == \"cache-memory\" or extra == \"all\""
files = [
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
@@ -2207,6 +2207,63 @@ typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"]
test = ["pretend", "pytest (>=3.0.1)", "pytest-rerunfailures"]
+[[package]]
+name = "pyparsing"
+version = "3.2.5"
+description = "pyparsing - Classes and methods to define and execute parsing grammars"
+optional = false
+python-versions = ">=3.9"
+groups = ["main"]
+files = [
+ {file = "pyparsing-3.2.5-py3-none-any.whl", hash = "sha256:e38a4f02064cf41fe6593d328d0512495ad1f3d8a91c4f73fc401b3079a59a5e"},
+ {file = "pyparsing-3.2.5.tar.gz", hash = "sha256:2df8d5b7b2802ef88e8d016a2eb9c7aeaa923529cd251ed0fe4608275d4105b6"},
+]
+
+[package.extras]
+diagrams = ["jinja2", "railroad-diagrams"]
+
+[[package]]
+name = "pyrsistent"
+version = "0.20.0"
+description = "Persistent/Functional/Immutable data structures"
+optional = false
+python-versions = ">=3.8"
+groups = ["main"]
+files = [
+ {file = "pyrsistent-0.20.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c3aba3e01235221e5b229a6c05f585f344734bd1ad42a8ac51493d74722bbce"},
+ {file = "pyrsistent-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1beb78af5423b879edaf23c5591ff292cf7c33979734c99aa66d5914ead880f"},
+ {file = "pyrsistent-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21cc459636983764e692b9eba7144cdd54fdec23ccdb1e8ba392a63666c60c34"},
+ {file = "pyrsistent-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5ac696f02b3fc01a710427585c855f65cd9c640e14f52abe52020722bb4906b"},
+ {file = "pyrsistent-0.20.0-cp310-cp310-win32.whl", hash = "sha256:0724c506cd8b63c69c7f883cc233aac948c1ea946ea95996ad8b1380c25e1d3f"},
+ {file = "pyrsistent-0.20.0-cp310-cp310-win_amd64.whl", hash = "sha256:8441cf9616d642c475684d6cf2520dd24812e996ba9af15e606df5f6fd9d04a7"},
+ {file = "pyrsistent-0.20.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0f3b1bcaa1f0629c978b355a7c37acd58907390149b7311b5db1b37648eb6958"},
+ {file = "pyrsistent-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cdd7ef1ea7a491ae70d826b6cc64868de09a1d5ff9ef8d574250d0940e275b8"},
+ {file = "pyrsistent-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cae40a9e3ce178415040a0383f00e8d68b569e97f31928a3a8ad37e3fde6df6a"},
+ {file = "pyrsistent-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6288b3fa6622ad8a91e6eb759cfc48ff3089e7c17fb1d4c59a919769314af224"},
+ {file = "pyrsistent-0.20.0-cp311-cp311-win32.whl", hash = "sha256:7d29c23bdf6e5438c755b941cef867ec2a4a172ceb9f50553b6ed70d50dfd656"},
+ {file = "pyrsistent-0.20.0-cp311-cp311-win_amd64.whl", hash = "sha256:59a89bccd615551391f3237e00006a26bcf98a4d18623a19909a2c48b8e986ee"},
+ {file = "pyrsistent-0.20.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:09848306523a3aba463c4b49493a760e7a6ca52e4826aa100ee99d8d39b7ad1e"},
+ {file = "pyrsistent-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a14798c3005ec892bbada26485c2eea3b54109cb2533713e355c806891f63c5e"},
+ {file = "pyrsistent-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b14decb628fac50db5e02ee5a35a9c0772d20277824cfe845c8a8b717c15daa3"},
+ {file = "pyrsistent-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e2c116cc804d9b09ce9814d17df5edf1df0c624aba3b43bc1ad90411487036d"},
+ {file = "pyrsistent-0.20.0-cp312-cp312-win32.whl", hash = "sha256:e78d0c7c1e99a4a45c99143900ea0546025e41bb59ebc10182e947cf1ece9174"},
+ {file = "pyrsistent-0.20.0-cp312-cp312-win_amd64.whl", hash = "sha256:4021a7f963d88ccd15b523787d18ed5e5269ce57aa4037146a2377ff607ae87d"},
+ {file = "pyrsistent-0.20.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:79ed12ba79935adaac1664fd7e0e585a22caa539dfc9b7c7c6d5ebf91fb89054"},
+ {file = "pyrsistent-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f920385a11207dc372a028b3f1e1038bb244b3ec38d448e6d8e43c6b3ba20e98"},
+ {file = "pyrsistent-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f5c2d012671b7391803263419e31b5c7c21e7c95c8760d7fc35602353dee714"},
+ {file = "pyrsistent-0.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef3992833fbd686ee783590639f4b8343a57f1f75de8633749d984dc0eb16c86"},
+ {file = "pyrsistent-0.20.0-cp38-cp38-win32.whl", hash = "sha256:881bbea27bbd32d37eb24dd320a5e745a2a5b092a17f6debc1349252fac85423"},
+ {file = "pyrsistent-0.20.0-cp38-cp38-win_amd64.whl", hash = "sha256:6d270ec9dd33cdb13f4d62c95c1a5a50e6b7cdd86302b494217137f760495b9d"},
+ {file = "pyrsistent-0.20.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ca52d1ceae015859d16aded12584c59eb3825f7b50c6cfd621d4231a6cc624ce"},
+ {file = "pyrsistent-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b318ca24db0f0518630e8b6f3831e9cba78f099ed5c1d65ffe3e023003043ba0"},
+ {file = "pyrsistent-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fed2c3216a605dc9a6ea50c7e84c82906e3684c4e80d2908208f662a6cbf9022"},
+ {file = "pyrsistent-0.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e14c95c16211d166f59c6611533d0dacce2e25de0f76e4c140fde250997b3ca"},
+ {file = "pyrsistent-0.20.0-cp39-cp39-win32.whl", hash = "sha256:f058a615031eea4ef94ead6456f5ec2026c19fb5bd6bfe86e9665c4158cf802f"},
+ {file = "pyrsistent-0.20.0-cp39-cp39-win_amd64.whl", hash = "sha256:58b8f6366e152092194ae68fefe18b9f0b4f89227dfd86a07770c3d86097aebf"},
+ {file = "pyrsistent-0.20.0-py3-none-any.whl", hash = "sha256:c55acc4733aad6560a7f5f818466631f07efc001fd023f34a6c203f8b6df0f0b"},
+ {file = "pyrsistent-0.20.0.tar.gz", hash = "sha256:4c48f78f62ab596c679086084d0dd13254ae4f3d6c72a83ffdf5ebdef8f265a4"},
+]
+
[[package]]
name = "pysaml2"
version = "7.5.0"
@@ -2214,7 +2271,7 @@ description = "Python implementation of SAML Version 2 Standard"
optional = true
python-versions = ">=3.9,<4.0"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
@@ -2239,7 +2296,7 @@ description = "Extensions to the standard Python datetime module"
optional = true
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
@@ -2267,7 +2324,7 @@ description = "World timezone definitions, modern and historical"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
{file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
@@ -2671,7 +2728,7 @@ description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"sentry\""
+markers = "extra == \"sentry\" or extra == \"all\""
files = [
{file = "sentry_sdk-2.46.0-py2.py3-none-any.whl", hash = "sha256:4eeeb60198074dff8d066ea153fa6f241fef1668c10900ea53a4200abc8da9b1"},
{file = "sentry_sdk-2.46.0.tar.gz", hash = "sha256:91821a23460725734b7741523021601593f35731808afc0bb2ba46c27b8acd91"},
@@ -2881,7 +2938,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"opentracing\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
@@ -2897,7 +2954,7 @@ description = "Python bindings for the Apache Thrift RPC system"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"opentracing\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
@@ -2970,7 +3027,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"opentracing\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
@@ -3104,7 +3161,7 @@ description = "non-blocking redis client for python"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"redis\""
+markers = "extra == \"redis\" or extra == \"all\""
files = [
{file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
{file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
@@ -3350,7 +3407,7 @@ description = "An XML Schema validator and decoder"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"all\" or extra == \"saml2\""
+markers = "extra == \"saml2\" or extra == \"all\""
files = [
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
@@ -3468,15 +3525,15 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
[extras]
-all = ["authlib", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pympler", "pysaml2", "sentry-sdk", "txredisapi"]
+all = ["authlib", "defusedxml", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pympler", "pysaml2", "pytz", "sentry-sdk", "thrift", "tornado", "txredisapi"]
cache-memory = ["pympler"]
jwt = ["authlib"]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
oidc = ["authlib"]
-opentracing = ["jaeger-client", "opentracing"]
+opentracing = ["jaeger-client", "opentracing", "thrift", "tornado"]
postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"]
redis = ["hiredis", "txredisapi"]
-saml2 = ["pysaml2"]
+saml2 = ["defusedxml", "pysaml2", "pytz"]
sentry = ["sentry-sdk"]
systemd = ["systemd-python"]
test = ["idna", "parameterized"]
@@ -3485,4 +3542,4 @@ url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10.0,<4.0.0"
-content-hash = "960ddae65fde8574f0f36b6988622fc4baf7646823c36699c5cd4773cad8b0ed"
+content-hash = "abbbdff591a306b56cc8890dbb2f477ac5f1a2d328baa6409e01084abc655bbf"
diff --git a/pyproject.toml b/pyproject.toml
index c009ecbf9b..182861ca39 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -42,7 +42,8 @@ dependencies = [
"Twisted[tls]>=21.2.0",
"treq>=21.5.0",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
- "pyOpenSSL>=16.0.0",
+ # pyOpenSSL 16.2.0 fixes compatibility with OpenSSL 1.1.0.
+ "pyOpenSSL>=16.2.0",
"PyYAML>=5.3",
"pyasn1>=0.1.9",
"pyasn1-modules>=0.0.7",
@@ -95,6 +96,20 @@ dependencies = [
# This is used for parsing multipart responses
"python-multipart>=0.0.9",
+
+ # Transitive dependency constraints
+ # These dependencies aren't directly required by Synapse.
+ # However, in order for Synapse to build, Synapse requires a higher minimum version
+ # for these dependencies than the minimum specified by the direct dependency.
+ # We should periodically check to see if these dependencies are still necessary and
+ # remove any that are no longer required.
+ "cffi>=1.15", # via cryptography
+ "pynacl>=1.3", # via signedjson
+ "pyparsing>=2.4", # via packaging
+ "pyrsistent>=0.18.0", # via jsonschema
+ "requests>=2.16.0", # 2.16.0+ no longer vendors urllib3, avoiding Python 3.10+ incompatibility
+ "urllib3>=1.26.5", # via treq; 1.26.5 fixes Python 3.10+ collections.abc compatibility
+ "zope-interface>=6.2", # via twisted
]
[project.optional-dependencies]
@@ -104,7 +119,16 @@ postgres = [
"psycopg2cffi>=2.8;platform_python_implementation == 'PyPy'",
"psycopg2cffi-compat==1.1;platform_python_implementation == 'PyPy'",
]
-saml2 = ["pysaml2>=4.5.0"]
+saml2 = [
+ "pysaml2>=4.5.0",
+
+ # Transitive dependencies from pysaml2
+ # These dependencies aren't directly required by Synapse.
+ # However, in order for Synapse to build, Synapse requires a higher minimum version
+ # for these dependencies than the minimum specified by the direct dependency.
+ "defusedxml>=0.7.1", # via pysaml2
+ "pytz>=2018.3", # via pysaml2
+]
oidc = ["authlib>=0.15.1"]
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
@@ -112,13 +136,23 @@ oidc = ["authlib>=0.15.1"]
systemd = ["systemd-python>=231"]
url-preview = ["lxml>=4.6.3"]
sentry = ["sentry-sdk>=0.7.2"]
-opentracing = ["jaeger-client>=4.2.0", "opentracing>=2.2.0"]
+opentracing = [
+ "jaeger-client>=4.2.0",
+ "opentracing>=2.2.0",
+
+ # Transitive dependencies from jaeger-client
+ # These dependencies aren't directly required by Synapse.
+ # However, in order for Synapse to build, Synapse requires a higher minimum version
+ # for these dependencies than the minimum specified by the direct dependency.
+ "thrift>=0.10", # via jaeger-client
+ "tornado>=6.0", # via jaeger-client
+]
jwt = ["authlib"]
# hiredis is not a *strict* dependency, but it makes things much faster.
# (if it is not installed, we fall back to slow code.)
-redis = ["txredisapi>=1.4.7", "hiredis"]
+redis = ["txredisapi>=1.4.7", "hiredis>=0.3"]
# Required to use experimental `caches.track_memory_usage` config option.
-cache-memory = ["pympler"]
+cache-memory = ["pympler>=1.0"]
# If this is updated, don't forget to update the equivalent lines in
# `dependency-groups.dev` below.
test = ["parameterized>=0.9.0", "idna>=3.3"]
@@ -149,12 +183,22 @@ all = [
# opentracing
"jaeger-client>=4.2.0", "opentracing>=2.2.0",
# redis
- "txredisapi>=1.4.7", "hiredis",
+ "txredisapi>=1.4.7", "hiredis>=0.3",
# cache-memory
- "pympler",
+ # 1.0 added support for python 3.10, our current minimum supported python version
+ "pympler>=1.0",
# omitted:
# - test: it's useful to have this separate from dev deps in the olddeps job
# - systemd: this is a system-based requirement
+
+ # Transitive dependencies
+ # These dependencies aren't directly required by Synapse.
+ # However, in order for Synapse to build, Synapse requires a higher minimum version
+ # for these dependencies than the minimum specified by the direct dependency.
+ "defusedxml>=0.7.1", # via pysaml2
+ "pytz>=2018.3", # via pysaml2
+ "thrift>=0.10", # via jaeger-client
+ "tornado>=6.0", # via jaeger-client
]
[project.urls]
From dfd00a986fe12208dc4bb0fe0cc4e9770b621197 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 12 Dec 2025 10:02:57 +0000
Subject: [PATCH 18/59] Fix sliding sync performance slow down for long lived
connections. (#19206)
Fixes https://github.com/element-hq/synapse/issues/19175
This PR moves tracking of what lazy loaded membership we've sent to each
room out of the required state table. This avoids that table from
continuously growing, which massively helps performance as we pull out
all matching rows for the connection when we receive a request.
The new table is only read when we have data in a room to send, so we
end up reading a lot fewer rows from the DB. Though we now read from
that table for every room we have events to return in, rather than once
at the start of the request.
For an explanation of how the new table works, see the
[comment](https://github.com/element-hq/synapse/blob/erikj/sss_better_membership_storage2/synapse/storage/schema/main/delta/93/02_sliding_sync_members.sql#L15-L38)
on the table schema.
The table is designed so that we can later prune old entries if we wish,
but that is not implemented in this PR.
Reviewable commit-by-commit.
---------
Co-authored-by: Eric Eastwood
---
changelog.d/19206.bugfix | 1 +
scripts-dev/check_schema_delta.py | 1 -
synapse/handlers/sliding_sync/__init__.py | 394 ++++++++++--
synapse/handlers/sliding_sync/store.py | 10 +-
.../storage/databases/main/sliding_sync.py | 179 +++++-
.../main/delta/93/02_sliding_sync_members.sql | 60 ++
synapse/types/handlers/sliding_sync.py | 90 ++-
tests/handlers/test_sliding_sync.py | 561 +++++++++++------
.../client/sliding_sync/test_lists_filters.py | 12 +-
.../sliding_sync/test_rooms_required_state.py | 569 +++++++++++++++++-
.../client/sliding_sync/test_sliding_sync.py | 10 +-
tests/storage/test_sliding_sync_tables.py | 220 ++++---
12 files changed, 1748 insertions(+), 359 deletions(-)
create mode 100644 changelog.d/19206.bugfix
create mode 100644 synapse/storage/schema/main/delta/93/02_sliding_sync_members.sql
diff --git a/changelog.d/19206.bugfix b/changelog.d/19206.bugfix
new file mode 100644
index 0000000000..9cdfaa2571
--- /dev/null
+++ b/changelog.d/19206.bugfix
@@ -0,0 +1 @@
+Fix sliding sync performance slow down for long lived connections.
diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py
index d344083148..ba8aff3628 100755
--- a/scripts-dev/check_schema_delta.py
+++ b/scripts-dev/check_schema_delta.py
@@ -14,7 +14,6 @@
SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$")
-
# The base branch we want to check against. We use the main development branch
# on the assumption that is what we are developing against.
DEVELOP_BRANCH = "develop"
diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py
index 68135e9cd3..bb2e785cfa 100644
--- a/synapse/handlers/sliding_sync/__init__.py
+++ b/synapse/handlers/sliding_sync/__init__.py
@@ -17,6 +17,7 @@
from itertools import chain
from typing import TYPE_CHECKING, AbstractSet, Mapping
+import attr
from prometheus_client import Histogram
from typing_extensions import assert_never
@@ -62,6 +63,7 @@
HaveSentRoomFlag,
MutablePerConnectionState,
PerConnectionState,
+ RoomLazyMembershipChanges,
RoomSyncConfig,
SlidingSyncConfig,
SlidingSyncResult,
@@ -106,7 +108,7 @@ def __init__(self, hs: "HomeServer"):
self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync
self.is_mine_id = hs.is_mine_id
- self.connection_store = SlidingSyncConnectionStore(self.store)
+ self.connection_store = SlidingSyncConnectionStore(self.clock, self.store)
self.extensions = SlidingSyncExtensionHandler(hs)
self.room_lists = SlidingSyncRoomLists(hs)
@@ -981,14 +983,15 @@ async def get_room_sync_data(
#
# Calculate the `StateFilter` based on the `required_state` for the room
required_state_filter = StateFilter.none()
- # The requested `required_state_map` with the lazy membership expanded and
- # `$ME` replaced with the user's ID. This allows us to see what membership we've
- # sent down to the client in the next request.
- #
- # Make a copy so we can modify it. Still need to be careful to make a copy of
- # the state key sets if we want to add/remove from them. We could make a deep
- # copy but this saves us some work.
- expanded_required_state_map = dict(room_sync_config.required_state_map)
+
+ # Keep track of which users' state we may need to fetch. We split this
+ # into explicit users and lazy loaded users.
+ explicit_user_state = set()
+ lazy_load_user_ids = set()
+
+ # Whether lazy-loading of room members is enabled.
+ lazy_load_room_members = False
+
if room_membership_for_user_at_to_token.membership not in (
Membership.INVITE,
Membership.KNOCK,
@@ -1036,7 +1039,6 @@ async def get_room_sync_data(
else:
required_state_types: list[tuple[str, str | None]] = []
num_wild_state_keys = 0
- lazy_load_room_members = False
num_others = 0
for (
state_type,
@@ -1068,43 +1070,60 @@ async def get_room_sync_data(
timeline_event.state_key
)
+ # The client needs to know the membership of everyone in
+ # the timeline we're returning.
+ lazy_load_user_ids.update(timeline_membership)
+
# Update the required state filter so we pick up the new
# membership
- for user_id in timeline_membership:
- required_state_types.append(
- (EventTypes.Member, user_id)
+ if limited or initial:
+ # If the timeline is limited, we only need to
+ # return the membership changes for people in
+ # the timeline.
+ for user_id in timeline_membership:
+ required_state_types.append(
+ (EventTypes.Member, user_id)
+ )
+ else:
+ # For non-limited timelines we always return all
+ # membership changes. This is so that clients
+ # who have fetched the full membership list
+ # already can continue to maintain it for
+ # non-limited syncs.
+ #
+ # This assumes that for non-limited syncs there
+ # won't be many membership changes that wouldn't
+ # have been included already (this can only
+ # happen if membership state was rolled back due
+ # to state resolution anyway).
+ #
+ # `None` is a wildcard in the `StateFilter`
+ required_state_types.append((EventTypes.Member, None))
+
+ # Record the extra members we're returning.
+ lazy_load_user_ids.update(
+ state_key
+ for event_type, state_key in room_state_delta_id_map
+ if event_type == EventTypes.Member
)
-
- # Add an explicit entry for each user in the timeline
- #
- # Make a new set or copy of the state key set so we can
- # modify it without affecting the original
- # `required_state_map`
- expanded_required_state_map[EventTypes.Member] = (
- expanded_required_state_map.get(
- EventTypes.Member, set()
- )
- | timeline_membership
- )
- elif state_key == StateValues.ME:
+ else:
num_others += 1
- required_state_types.append((state_type, user.to_string()))
+
# Replace `$ME` with the user's ID so we can deduplicate
# when someone requests the same state with `$ME` or with
# their user ID.
- #
- # Make a new set or copy of the state key set so we can
- # modify it without affecting the original
- # `required_state_map`
- expanded_required_state_map[EventTypes.Member] = (
- expanded_required_state_map.get(
- EventTypes.Member, set()
- )
- | {user.to_string()}
+ normalized_state_key = state_key
+ if state_key == StateValues.ME:
+ normalized_state_key = user.to_string()
+
+ if state_type == EventTypes.Member:
+ # Also track explicitly requested member state for
+ # lazy membership tracking.
+ explicit_user_state.add(normalized_state_key)
+
+ required_state_types.append(
+ (state_type, normalized_state_key)
)
- else:
- num_others += 1
- required_state_types.append((state_type, state_key))
set_tag(
SynapseTags.FUNC_ARG_PREFIX
@@ -1122,6 +1141,10 @@ async def get_room_sync_data(
required_state_filter = StateFilter.from_types(required_state_types)
+ # Remove any explicitly requested user state from the lazy-loaded set,
+ # as we track them separately.
+ lazy_load_user_ids -= explicit_user_state
+
# We need this base set of info for the response so let's just fetch it along
# with the `required_state` for the room
hero_room_state = [
@@ -1149,6 +1172,22 @@ async def get_room_sync_data(
# We can return all of the state that was requested if this was the first
# time we've sent the room down this connection.
room_state: StateMap[EventBase] = {}
+
+ # Includes the state for the heroes if we need them (may contain other
+ # state as well).
+ hero_membership_state: StateMap[EventBase] = {}
+
+ # By default, we mark all `lazy_load_user_ids` as being sent down
+ # for the first time in this sync. We later check if we sent any of them
+ # down previously and update `returned_user_id_to_last_seen_ts_map` if
+ # we have.
+ returned_user_id_to_last_seen_ts_map = {}
+ if lazy_load_room_members:
+ returned_user_id_to_last_seen_ts_map = dict.fromkeys(lazy_load_user_ids)
+ new_connection_state.room_lazy_membership[room_id] = RoomLazyMembershipChanges(
+ returned_user_id_to_last_seen_ts_map=returned_user_id_to_last_seen_ts_map
+ )
+
if initial:
room_state = await self.get_current_state_at(
room_id=room_id,
@@ -1156,28 +1195,97 @@ async def get_room_sync_data(
state_filter=state_filter,
to_token=to_token,
)
+
+ # The `room_state` includes the hero membership state if needed.
+ # We'll later filter this down so we don't need to do so here.
+ hero_membership_state = room_state
else:
+ assert from_token is not None
assert from_bound is not None
if prev_room_sync_config is not None:
+ # Define `all_required_user_state` as all user state we want, which
+ # is the explicitly requested members, any needed for lazy
+ # loading, and users whose membership has changed.
+ all_required_user_state = explicit_user_state | lazy_load_user_ids
+ for state_type, state_key in room_state_delta_id_map:
+ if state_type == EventTypes.Member:
+ all_required_user_state.add(state_key)
+
+ # We need to know what user state we previously sent down the
+ # connection so we can determine what has changed.
+ #
+ # We need to fetch all users whose memberships we may want
+ # to send down this sync. This includes (and matches
+ # `all_required_user_state`):
+ # 1. Explicitly requested user state
+ # 2. Lazy loaded members, i.e. users who appear in the
+ # timeline.
+ # 3. The users whose membership has changed in the room, i.e.
+ # in the state deltas.
+ #
+ # This is to correctly handle the cases where a user was
+ # previously sent down as a lazy loaded member:
+ # - and is now explicitly requested (so shouldn't be sent down
+ # again); or
+ # - their membership has changed (so we need to invalidate
+ # their entry in the lazy loaded table if we don't send the
+ # change down).
+ if all_required_user_state:
+ previously_returned_user_to_last_seen = (
+ await self.store.get_sliding_sync_connection_lazy_members(
+ connection_position=from_token.connection_position,
+ room_id=room_id,
+ user_ids=all_required_user_state,
+ )
+ )
+
+ # Update the room lazy membership changes to track which
+ # lazy loaded members were needed for this sync. This is so
+ # that we can correctly track the last time we sent down
+ # users' membership (and so can evict old membership state
+ # from the DB tables).
+ returned_user_id_to_last_seen_ts_map.update(
+ (user_id, timestamp)
+ for user_id, timestamp in previously_returned_user_to_last_seen.items()
+ if user_id in lazy_load_user_ids
+ )
+ else:
+ previously_returned_user_to_last_seen = {}
+
# Check if there are any changes to the required state config
# that we need to handle.
- changed_required_state_map, added_state_filter = (
- _required_state_changes(
- user.to_string(),
- prev_required_state_map=prev_room_sync_config.required_state_map,
- request_required_state_map=expanded_required_state_map,
- state_deltas=room_state_delta_id_map,
- )
+ changes_return = _required_state_changes(
+ user.to_string(),
+ prev_required_state_map=prev_room_sync_config.required_state_map,
+ request_required_state_map=room_sync_config.required_state_map,
+ previously_returned_lazy_user_ids=previously_returned_user_to_last_seen.keys(),
+ request_lazy_load_user_ids=lazy_load_user_ids,
+ state_deltas=room_state_delta_id_map,
)
+ changed_required_state_map = changes_return.changed_required_state_map
- if added_state_filter:
+ new_connection_state.room_lazy_membership[
+ room_id
+ ].invalidated_user_ids = changes_return.lazy_members_invalidated
+
+ # Add any previously returned explicit memberships to the lazy
+ # loaded table. This happens when a client requested explicit
+ # members and then converted them to lazy loading.
+ for user_id in changes_return.extra_users_to_add_to_lazy_cache:
+ # We don't know the right timestamp to use here, as we don't
+ # know the last time we would have sent the membership down.
+ # So we don't overwrite it if we have a timestamp already,
+ # and fallback to `None` (which means now) if we don't.
+ returned_user_id_to_last_seen_ts_map.setdefault(user_id, None)
+
+ if changes_return.added_state_filter:
# Some state entries got added, so we pull out the current
# state for them. If we don't do this we'd only send down new deltas.
state_ids = await self.get_current_state_ids_at(
room_id=room_id,
room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
- state_filter=added_state_filter,
+ state_filter=changes_return.added_state_filter,
to_token=to_token,
)
room_state_delta_id_map.update(state_ids)
@@ -1189,6 +1297,7 @@ async def get_room_sync_data(
# If the membership changed and we have to get heroes, get the remaining
# heroes from the state
+ hero_membership_state = {}
if hero_user_ids:
hero_membership_state = await self.get_current_state_at(
room_id=room_id,
@@ -1196,7 +1305,6 @@ async def get_room_sync_data(
state_filter=StateFilter.from_types(hero_room_state),
to_token=to_token,
)
- room_state.update(hero_membership_state)
required_room_state: StateMap[EventBase] = {}
if required_state_filter != StateFilter.none():
@@ -1219,7 +1327,7 @@ async def get_room_sync_data(
# Assemble heroes: extract the info from the state we just fetched
heroes: list[SlidingSyncResult.RoomResult.StrippedHero] = []
for hero_user_id in hero_user_ids:
- member_event = room_state.get((EventTypes.Member, hero_user_id))
+ member_event = hero_membership_state.get((EventTypes.Member, hero_user_id))
if member_event is not None:
heroes.append(
SlidingSyncResult.RoomResult.StrippedHero(
@@ -1281,7 +1389,7 @@ async def get_room_sync_data(
bump_stamp = 0
room_sync_required_state_map_to_persist: Mapping[str, AbstractSet[str]] = (
- expanded_required_state_map
+ room_sync_config.required_state_map
)
if changed_required_state_map:
room_sync_required_state_map_to_persist = changed_required_state_map
@@ -1471,13 +1579,37 @@ async def _get_bump_stamp(
return None
+@attr.s(auto_attribs=True)
+class _RequiredStateChangesReturn:
+ """Return type for _required_state_changes."""
+
+ changed_required_state_map: Mapping[str, AbstractSet[str]] | None
+ """The updated required state map to store in the room config, or None if
+ there is no change."""
+
+ added_state_filter: StateFilter
+ """The state filter to use to fetch any additional current state that needs
+ to be returned to the client."""
+
+ extra_users_to_add_to_lazy_cache: AbstractSet[str] = frozenset()
+ """The set of user IDs we should add to the lazy members cache that we had
+ previously returned. Handles the case where a user was previously sent down
+ explicitly but is now being lazy loaded."""
+
+ lazy_members_invalidated: AbstractSet[str] = frozenset()
+ """The set of user IDs whose membership has changed but we didn't send down,
+ so we need to invalidate them from the cache."""
+
+
def _required_state_changes(
user_id: str,
*,
prev_required_state_map: Mapping[str, AbstractSet[str]],
request_required_state_map: Mapping[str, AbstractSet[str]],
+ previously_returned_lazy_user_ids: AbstractSet[str],
+ request_lazy_load_user_ids: AbstractSet[str],
state_deltas: StateMap[str],
-) -> tuple[Mapping[str, AbstractSet[str]] | None, StateFilter]:
+) -> _RequiredStateChangesReturn:
"""Calculates the changes between the required state room config from the
previous requests compared with the current request.
@@ -1491,14 +1623,62 @@ def _required_state_changes(
added, removed and then added again to the required state. In that case we
only want to re-send that entry down sync if it has changed.
- Returns:
- A 2-tuple of updated required state config (or None if there is no update)
- and the state filter to use to fetch extra current state that we need to
- return.
+ Args:
+ user_id: The user ID of the user making the request.
+ prev_required_state_map: The required state map from the previous
+ request.
+ request_required_state_map: The required state map from the current
+ request.
+ previously_returned_lazy_user_ids: The set of user IDs whose membership
+ we have previously returned to the client due to lazy loading. This
+ is filtered to only include users who have either sent events in the
+ `timeline`, `required_state` or whose membership changed.
+ request_lazy_load_user_ids: The set of user IDs whose lazy-loaded
+ membership is required for this request.
+ state_deltas: The state deltas in the room in the request token range,
+ considering user membership. See `get_current_state_deltas_for_room`
+ for more details.
"""
+
+ # First we find any lazy members that have been invalidated due to state
+ # changes that we are not sending down.
+ lazy_members_invalidated = set()
+ for event_type, state_key in state_deltas:
+ if event_type != EventTypes.Member:
+ continue
+
+ if state_key in request_lazy_load_user_ids:
+ # Because it's part of the `request_lazy_load_user_ids`, we're going to
+ # send this member change down.
+ continue
+
+ if state_key not in previously_returned_lazy_user_ids:
+ # We've not previously returned this member so nothing to
+ # invalidate.
+ continue
+
+ lazy_members_invalidated.add(state_key)
+
if prev_required_state_map == request_required_state_map:
- # There has been no change. Return immediately.
- return None, StateFilter.none()
+ # There has been no change in state, just need to check lazy members.
+ newly_returned_lazy_members = (
+ request_lazy_load_user_ids - previously_returned_lazy_user_ids
+ )
+ if newly_returned_lazy_members:
+ # There are some new lazy members we need to fetch.
+ added_types: list[tuple[str, str | None]] = []
+ for new_user_id in newly_returned_lazy_members:
+ added_types.append((EventTypes.Member, new_user_id))
+
+ added_state_filter = StateFilter.from_types(added_types)
+ else:
+ added_state_filter = StateFilter.none()
+
+ return _RequiredStateChangesReturn(
+ changed_required_state_map=None,
+ added_state_filter=added_state_filter,
+ lazy_members_invalidated=lazy_members_invalidated,
+ )
prev_wildcard = prev_required_state_map.get(StateValues.WILDCARD, set())
request_wildcard = request_required_state_map.get(StateValues.WILDCARD, set())
@@ -1508,17 +1688,29 @@ def _required_state_changes(
# already fetching everything, we don't have to fetch anything now that they've
# narrowed.
if StateValues.WILDCARD in prev_wildcard:
- return request_required_state_map, StateFilter.none()
+ return _RequiredStateChangesReturn(
+ changed_required_state_map=request_required_state_map,
+ added_state_filter=StateFilter.none(),
+ lazy_members_invalidated=lazy_members_invalidated,
+ )
# If a event type wildcard has been added or removed we don't try and do
# anything fancy, and instead always update the effective room required
# state config to match the request.
if request_wildcard - prev_wildcard:
# Some keys were added, so we need to fetch everything
- return request_required_state_map, StateFilter.all()
+ return _RequiredStateChangesReturn(
+ changed_required_state_map=request_required_state_map,
+ added_state_filter=StateFilter.all(),
+ lazy_members_invalidated=lazy_members_invalidated,
+ )
if prev_wildcard - request_wildcard:
# Keys were only removed, so we don't have to fetch everything.
- return request_required_state_map, StateFilter.none()
+ return _RequiredStateChangesReturn(
+ changed_required_state_map=request_required_state_map,
+ added_state_filter=StateFilter.none(),
+ lazy_members_invalidated=lazy_members_invalidated,
+ )
# Contains updates to the required state map compared with the previous room
# config. This has the same format as `RoomSyncConfig.required_state`
@@ -1550,6 +1742,17 @@ def _required_state_changes(
# Nothing *added*, so we skip. Removals happen below.
continue
+ # Handle the special case of adding `$LAZY` membership, where we want to
+ # always record the change to be lazy loading, as we immediately start
+ # using the lazy loading tables so there is no point *not* recording the
+ # change to lazy load in the effective room config.
+ if event_type == EventTypes.Member:
+ old_state_key_lazy = StateValues.LAZY in old_state_keys
+ request_state_key_lazy = StateValues.LAZY in request_state_keys
+ if not old_state_key_lazy and request_state_key_lazy:
+ changes[event_type] = request_state_keys
+ continue
+
# We only remove state keys from the effective state if they've been
# removed from the request *and* the state has changed. This ensures
# that if a client removes and then re-adds a state key, we only send
@@ -1620,9 +1823,31 @@ def _required_state_changes(
# LAZY values should also be ignore for event types that are
# not membership.
pass
+ elif event_type == EventTypes.Member:
+ if state_key not in previously_returned_lazy_user_ids:
+ # Only add *explicit* members we haven't previously sent
+ # down.
+ added.append((event_type, state_key))
else:
added.append((event_type, state_key))
+ previously_required_state_members = set(
+ prev_required_state_map.get(EventTypes.Member, ())
+ )
+ if StateValues.ME in previously_required_state_members:
+ previously_required_state_members.add(user_id)
+
+ # We also need to pull out any lazy members that are now required but
+ # haven't previously been returned.
+ for required_user_id in (
+ request_lazy_load_user_ids
+ # Remove previously returned users
+ - previously_returned_lazy_user_ids
+ # Exclude previously explicitly requested members.
+ - previously_required_state_members
+ ):
+ added.append((EventTypes.Member, required_user_id))
+
added_state_filter = StateFilter.from_types(added)
# Figure out what changes we need to apply to the effective required state
@@ -1663,13 +1888,25 @@ def _required_state_changes(
changes[event_type] = request_state_keys
continue
+ # When handling $LAZY membership, we want to either a) not update the
+ # state or b) update it to match the request. This is to avoid churn of
+ # the effective required state for rooms (we deduplicate required state
+ # between rooms), and because we can store the previously returned
+ # explicit memberships with the lazy loaded memberships.
if event_type == EventTypes.Member:
old_state_key_lazy = StateValues.LAZY in old_state_keys
request_state_key_lazy = StateValues.LAZY in request_state_keys
+ has_lazy = old_state_key_lazy or request_state_key_lazy
+ # If a "$LAZY" has been added or removed we always update to match
+ # the request.
if old_state_key_lazy != request_state_key_lazy:
- # If a "$LAZY" has been added or removed we always update the effective room
- # required state config to match the request.
+ changes[event_type] = request_state_keys
+ continue
+
+ # Or if we have lazy membership and there are invalidated
+ # explicit memberships.
+ if has_lazy and invalidated_state_keys:
changes[event_type] = request_state_keys
continue
@@ -1684,6 +1921,28 @@ def _required_state_changes(
if invalidated_state_keys:
changes[event_type] = old_state_keys - invalidated_state_keys
+ # Check for any explicit membership changes that were removed that we can
+ # add to the lazy members previously returned. This is so that we don't
+ # return a user due to lazy loading if they were previously returned as an
+ # explicit membership.
+ users_to_add_to_lazy_cache: set[str] = set()
+
+ membership_changes = changes.get(EventTypes.Member, set())
+ if membership_changes and StateValues.LAZY in request_state_keys:
+ for state_key in prev_required_state_map.get(EventTypes.Member, set()):
+ if state_key == StateValues.WILDCARD or state_key == StateValues.LAZY:
+ # Ignore non-user IDs.
+ continue
+
+ if state_key == StateValues.ME:
+ # Normalize to proper user ID
+ state_key = user_id
+
+ # We remember the user if they haven't been invalidated
+ if (EventTypes.Member, state_key) not in state_deltas:
+ users_to_add_to_lazy_cache.add(state_key)
+
+ new_required_state_map = None
if changes:
# Update the required state config based on the changes.
new_required_state_map = dict(prev_required_state_map)
@@ -1694,6 +1953,9 @@ def _required_state_changes(
# Remove entries with empty state keys.
new_required_state_map.pop(event_type, None)
- return new_required_state_map, added_state_filter
- else:
- return None, added_state_filter
+ return _RequiredStateChangesReturn(
+ changed_required_state_map=new_required_state_map,
+ added_state_filter=added_state_filter,
+ lazy_members_invalidated=lazy_members_invalidated,
+ extra_users_to_add_to_lazy_cache=users_to_add_to_lazy_cache,
+ )
diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py
index d01fab271f..65febe58aa 100644
--- a/synapse/handlers/sliding_sync/store.py
+++ b/synapse/handlers/sliding_sync/store.py
@@ -13,7 +13,6 @@
#
import logging
-from typing import TYPE_CHECKING
import attr
@@ -25,9 +24,7 @@
PerConnectionState,
SlidingSyncConfig,
)
-
-if TYPE_CHECKING:
- pass
+from synapse.util.clock import Clock
logger = logging.getLogger(__name__)
@@ -61,7 +58,8 @@ class SlidingSyncConnectionStore:
to mapping of room ID to `HaveSentRoom`.
"""
- store: "DataStore"
+ clock: Clock
+ store: DataStore
async def get_and_clear_connection_positions(
self,
@@ -101,7 +99,7 @@ async def record_new_state(
If there are no changes to the state this may return the same token as
the existing per-connection state.
"""
- if not new_connection_state.has_updates():
+ if not new_connection_state.has_updates(self.clock):
if from_token is not None:
return from_token.connection_position
else:
diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py
index 828eed3a73..c66002dae4 100644
--- a/synapse/storage/databases/main/sliding_sync.py
+++ b/synapse/storage/databases/main/sliding_sync.py
@@ -14,7 +14,7 @@
import logging
-from typing import TYPE_CHECKING, Mapping, cast
+from typing import TYPE_CHECKING, AbstractSet, Mapping, cast
import attr
@@ -26,13 +26,16 @@
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
+ make_in_list_sql_clause,
)
+from synapse.storage.engines import PostgresEngine
from synapse.types import MultiWriterStreamToken, RoomStreamToken
from synapse.types.handlers.sliding_sync import (
HaveSentRoom,
HaveSentRoomFlag,
MutablePerConnectionState,
PerConnectionState,
+ RoomLazyMembershipChanges,
RoomStatusMap,
RoomSyncConfig,
)
@@ -373,6 +376,13 @@ def persist_per_connection_state_txn(
value_values=values,
)
+ self._persist_sliding_sync_connection_lazy_members_txn(
+ txn,
+ connection_key,
+ connection_position,
+ per_connection_state.room_lazy_membership,
+ )
+
return connection_position
@cached(iterable=True, max_entries=100000)
@@ -446,6 +456,23 @@ def _get_and_clear_connection_positions_txn(
"""
txn.execute(sql, (connection_key, connection_position))
+ # Move any lazy membership entries for this connection position to have
+ # `NULL` connection position, indicating that it applies to all future
+ # positions on this connection. This is safe because we have deleted all
+ # other (potentially forked) connection positions, and so all future
+ # positions in this connection will be a continuation of the current
+ # position. Thus any lazy membership entries we have sent down will still
+ # be valid.
+ self.db_pool.simple_update_txn(
+ txn,
+ table="sliding_sync_connection_lazy_members",
+ keyvalues={
+ "connection_key": connection_key,
+ "connection_position": connection_position,
+ },
+ updatevalues={"connection_position": None},
+ )
+
# Fetch and create a mapping from required state ID to the actual
# required state for the connection.
rows = self.db_pool.simple_select_list_txn(
@@ -525,8 +552,153 @@ def _get_and_clear_connection_positions_txn(
receipts=RoomStatusMap(receipts),
account_data=RoomStatusMap(account_data),
room_configs=room_configs,
+ room_lazy_membership={},
)
+ async def get_sliding_sync_connection_lazy_members(
+ self,
+ connection_position: int,
+ room_id: str,
+ user_ids: AbstractSet[str],
+ ) -> Mapping[str, int]:
+ """Get which user IDs in the room we have previously sent lazy
+ membership for.
+
+ Args:
+ connection_position: The sliding sync connection position.
+ room_id: The room ID to get lazy members for.
+ user_ids: The user IDs to check whether we've previously sent
+ because of lazy membership.
+
+ Returns:
+ The mapping of user IDs to the last seen timestamp for those user
+ IDs. Only includes user IDs that we have previously sent lazy
+ membership for, and so may be a subset of the `user_ids` passed in.
+ """
+
+ def get_sliding_sync_connection_lazy_members_txn(
+ txn: LoggingTransaction,
+ ) -> Mapping[str, int]:
+ user_clause, user_args = make_in_list_sql_clause(
+ txn.database_engine, "user_id", user_ids
+ )
+
+ # Fetch all the lazy membership entries for the given connection,
+ # room and user IDs. We don't have the `connection_key` here, so we
+ # join against `sliding_sync_connection_positions` to get it.
+ #
+ # Beware that there are two `connection_position` columns in the
+ # query which are different, the one in
+ # `sliding_sync_connection_positions` is the one we match to get the
+ # connection_key, whereas the one in
+ # `sliding_sync_connection_lazy_members` is what we filter against
+ # (it may be null or the same as the one passed in).
+ #
+ # FIXME: We should pass in `connection_key` here to avoid the join.
+ # We don't do this currently as the caller doesn't have it handy.
+ sql = f"""
+ SELECT user_id, members.connection_position, last_seen_ts
+ FROM sliding_sync_connection_lazy_members AS members
+ INNER JOIN sliding_sync_connection_positions AS pos USING (connection_key)
+ WHERE pos.connection_position = ? AND room_id = ? AND {user_clause}
+ """
+
+ txn.execute(sql, (connection_position, room_id, *user_args))
+
+ # Filter out any cache entries that only apply to forked connection
+ # positions. Entries with `NULL` `connection_position` apply to all
+ # positions on the connection.
+ return {
+ user_id: last_seen_ts
+ for user_id, db_connection_position, last_seen_ts in txn
+ if db_connection_position == connection_position
+ or db_connection_position is None
+ }
+
+ return await self.db_pool.runInteraction(
+ "get_sliding_sync_connection_lazy_members",
+ get_sliding_sync_connection_lazy_members_txn,
+ db_autocommit=True, # Avoid transaction for single read
+ )
+
+ def _persist_sliding_sync_connection_lazy_members_txn(
+ self,
+ txn: LoggingTransaction,
+ connection_key: int,
+ new_connection_position: int,
+ all_changes: dict[str, RoomLazyMembershipChanges],
+ ) -> None:
+ """Persist that we have sent lazy membership for the given user IDs."""
+
+ now = self.clock.time_msec()
+
+ # Figure out which cache entries to add or update.
+ #
+ # These are either a) new entries we've never sent before (i.e. with a
+ # None last_seen_ts), or b) where the `last_seen_ts` is old enough that
+ # we want to update it.
+ #
+ # We don't update the timestamp every time to avoid hammering the DB
+ # with writes, and we don't need the timestamp to be precise. It is used
+ # to evict old entries that haven't been used in a while.
+ to_update: list[tuple[str, str]] = []
+ for room_id, room_changes in all_changes.items():
+ user_ids_to_update = room_changes.get_returned_user_ids_to_update(
+ self.clock
+ )
+ to_update.extend((room_id, user_id) for user_id in user_ids_to_update)
+
+ if to_update:
+ # Upsert the new/updated entries.
+ #
+ # Ignore conflicts where the existing entry has a different
+ # connection position (i.e. from a forked connection position). This
+ # may mean that we lose some updates, but that's acceptable as this
+ # is a cache and its fine for it to *not* include rows. (Downstream
+ # this will cause us to maybe send a few extra lazy members down
+ # sync, but we're allowed to send extra members).
+ sql = """
+ INSERT INTO sliding_sync_connection_lazy_members
+ (connection_key, connection_position, room_id, user_id, last_seen_ts)
+ VALUES {value_placeholder}
+ ON CONFLICT (connection_key, room_id, user_id)
+ DO UPDATE SET last_seen_ts = EXCLUDED.last_seen_ts
+ WHERE sliding_sync_connection_lazy_members.connection_position IS NULL
+ OR sliding_sync_connection_lazy_members.connection_position = EXCLUDED.connection_position
+ """
+
+ args = [
+ (connection_key, new_connection_position, room_id, user_id, now)
+ for room_id, user_id in to_update
+ ]
+
+ if isinstance(self.database_engine, PostgresEngine):
+ sql = sql.format(value_placeholder="?")
+ txn.execute_values(sql, args, fetch=False)
+ else:
+ sql = sql.format(value_placeholder="(?, ?, ?, ?, ?)")
+ txn.execute_batch(sql, args)
+
+ # Remove any invalidated entries.
+ to_remove: list[tuple[str, str]] = []
+ for room_id, room_changes in all_changes.items():
+ for user_id in room_changes.invalidated_user_ids:
+ to_remove.append((room_id, user_id))
+
+ if to_remove:
+ # We don't try and match on connection position here: it's fine to
+ # remove it from all forks. This is a cache so it's fine to expire
+ # arbitrary entries, the worst that happens is we send a few extra
+ # lazy members down sync.
+ self.db_pool.simple_delete_many_batch_txn(
+ txn,
+ table="sliding_sync_connection_lazy_members",
+ keys=("connection_key", "room_id", "user_id"),
+ values=[
+ (connection_key, room_id, user_id) for room_id, user_id in to_remove
+ ],
+ )
+
@wrap_as_background_process("delete_old_sliding_sync_connections")
async def delete_old_sliding_sync_connections(self) -> None:
"""Delete sliding sync connections that have not been used for a long time."""
@@ -564,6 +736,10 @@ class PerConnectionStateDB:
room_configs: Mapping[str, "RoomSyncConfig"]
+ room_lazy_membership: dict[str, RoomLazyMembershipChanges]
+ """Lazy membership changes to persist alongside this state. Only used
+ when persisting."""
+
@staticmethod
async def from_state(
per_connection_state: "MutablePerConnectionState", store: "DataStore"
@@ -618,6 +794,7 @@ async def from_state(
receipts=RoomStatusMap(receipts),
account_data=RoomStatusMap(account_data),
room_configs=per_connection_state.room_configs.maps[0],
+ room_lazy_membership=per_connection_state.room_lazy_membership,
)
async def to_state(self, store: "DataStore") -> "PerConnectionState":
diff --git a/synapse/storage/schema/main/delta/93/02_sliding_sync_members.sql b/synapse/storage/schema/main/delta/93/02_sliding_sync_members.sql
new file mode 100644
index 0000000000..279458d710
--- /dev/null
+++ b/synapse/storage/schema/main/delta/93/02_sliding_sync_members.sql
@@ -0,0 +1,60 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2025 Element Creations Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- .
+
+
+-- Tracks which member states have been sent to the client for lazy-loaded
+-- members in sliding sync. This is a *cache* as it doesn't matter if we send
+-- down members we've previously sent down, i.e. it's safe to delete any rows.
+--
+-- We could have tracked these as part of the
+-- `sliding_sync_connection_required_state` table, but that would bloat that
+-- table significantly as most rooms will have lazy-loaded members. We want to
+-- keep that table small as we always pull out all rows for the connection for
+-- every request, so storing lots of data there would be bad for performance. To
+-- keep that table small we also deduplicate the requested state across
+-- different rooms, which if we stored lazy members there would prevent.
+--
+-- We track a *rough* `last_seen_ts` for each user in each room which indicates
+-- when we last would've sent their member state to the client. `last_seen_ts`
+-- is used so that we can remove members which haven't been seen for a while to
+-- save space. This is a *rough* timestamp as we don't want to update the
+-- timestamp every time to avoid hammering the DB with writes, and we don't need
+-- the timestamp to be precise (as it is used to evict old entries that haven't
+-- been used in a while).
+--
+-- Care must be taken when handling "forked" positions, i.e. we have responded
+-- to a request with a position and then get another different request using the
+-- previous position as a base. We track this by including a
+-- `connection_position` for newly inserted rows. When we advance the position
+-- we set this to NULL for all rows which were present at that position, and
+-- delete all other rows. When reading rows we can then filter out any rows
+-- which have a non-NULL `connection_position` which is not the current
+-- position.
+--
+-- I.e. `connection_position` is NULL for rows which are valid for *all*
+-- positions on the connection, and is non-NULL for rows which are only valid
+-- for a specific position.
+--
+-- When invalidating rows, we can just delete them. Technically this could
+-- invalidate for a forked position, but this is acceptable as equivalent to a
+-- cache eviction.
+CREATE TABLE sliding_sync_connection_lazy_members (
+ connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE,
+ connection_position BIGINT REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE,
+ room_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ last_seen_ts BIGINT NOT NULL
+);
+
+CREATE UNIQUE INDEX sliding_sync_connection_lazy_members_idx ON sliding_sync_connection_lazy_members (connection_key, room_id, user_id);
+CREATE INDEX sliding_sync_connection_lazy_members_pos_idx ON sliding_sync_connection_lazy_members (connection_key, connection_position) WHERE connection_position IS NOT NULL;
diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py
index 03b3bcb3ca..dcb125c494 100644
--- a/synapse/types/handlers/sliding_sync.py
+++ b/synapse/types/handlers/sliding_sync.py
@@ -49,12 +49,21 @@
UserID,
)
from synapse.types.rest.client import SlidingSyncBody
+from synapse.util.clock import Clock
+from synapse.util.duration import Duration
if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations
logger = logging.getLogger(__name__)
+# How often to update the last seen timestamp for lazy members.
+#
+# We don't update the timestamp every time to avoid hammering the DB with
+# writes, and we don't need the timestamp to be precise (as it is used to evict
+# old entries that haven't been used in a while).
+LAZY_MEMBERS_UPDATE_INTERVAL = Duration(hours=1)
+
class SlidingSyncConfig(SlidingSyncBody):
"""
@@ -891,6 +900,69 @@ def __len__(self) -> int:
return len(self.rooms) + len(self.receipts) + len(self.room_configs)
+@attr.s(auto_attribs=True)
+class RoomLazyMembershipChanges:
+ """Changes to lazily-loaded room memberships for a given room."""
+
+ returned_user_id_to_last_seen_ts_map: Mapping[str, int | None] = attr.Factory(dict)
+ """Map from user ID to timestamp for users whose membership we have lazily
+ loaded in this room an request. The timestamp indicates the time we
+ previously needed the membership, or None if we sent it down for the first
+ time in this request.
+
+ We track a *rough* `last_seen_ts` for each user in each room which indicates
+ when we last would've sent their member state to the client. This is used so
+ that we can remove members which haven't been seen for a while to save
+ space.
+
+ Note: this will include users whose membership we would have sent down but
+ didn't due to us having previously sent them.
+ """
+
+ invalidated_user_ids: AbstractSet[str] = attr.Factory(set)
+ """Set of user IDs whose latest membership we have *not* sent down"""
+
+ def get_returned_user_ids_to_update(self, clock: Clock) -> StrCollection:
+ """Get the user IDs whose last seen timestamp we need to update in the
+ database.
+
+ This is a subset of user IDs in `returned_user_id_to_last_seen_ts_map`,
+ whose timestamp is either None (first time we've sent them) or older
+ than `LAZY_MEMBERS_UPDATE_INTERVAL`.
+
+ We only update the timestamp in the database every so often to avoid
+ hammering the DB with writes. We don't need the timestamp to be precise,
+ as the timestamp is used to evict old entries that haven't been used in
+ a while.
+ """
+
+ now_ms = clock.time_msec()
+ return [
+ user_id
+ for user_id, last_seen_ts in self.returned_user_id_to_last_seen_ts_map.items()
+ if last_seen_ts is None
+ or now_ms - last_seen_ts >= LAZY_MEMBERS_UPDATE_INTERVAL.as_millis()
+ ]
+
+ def has_updates(self, clock: Clock) -> bool:
+ """Check if there are any updates to the lazy membership changes.
+
+ Called to check if we need to persist changes to the lazy membership
+ state for the room. We want to avoid persisting the state if there are
+ no changes, to avoid unnecessary writes (and cache misses due to new
+ connection position).
+ """
+
+ # We consider there to be updates if there are any invalidated user
+ # IDs...
+ if self.invalidated_user_ids:
+ return True
+
+ # ...or if any of the returned user IDs need their last seen timestamp
+ # updating in the database.
+ return bool(self.get_returned_user_ids_to_update(clock))
+
+
@attr.s(auto_attribs=True)
class MutablePerConnectionState(PerConnectionState):
"""A mutable version of `PerConnectionState`"""
@@ -903,12 +975,28 @@ class MutablePerConnectionState(PerConnectionState):
room_configs: typing.ChainMap[str, RoomSyncConfig]
- def has_updates(self) -> bool:
+ # A map from room ID to the lazily-loaded memberships needed for the
+ # request in that room.
+ room_lazy_membership: dict[str, RoomLazyMembershipChanges] = attr.Factory(dict)
+
+ def has_updates(self, clock: Clock) -> bool:
+ """Check if there are any updates to the per-connection state that need
+ persisting.
+
+ It is important that we don't spuriously do persistence, as that will
+ always generate a new connection position which will invalidate some of
+ the caches. It doesn't need to be perfect, but we should avoid always
+ generating new connection positions when doing lazy loading
+ """
return (
bool(self.rooms.get_updates())
or bool(self.receipts.get_updates())
or bool(self.account_data.get_updates())
or bool(self.get_room_config_updates())
+ or any(
+ change.has_updates(clock)
+ for change in self.room_lazy_membership.values()
+ )
)
def get_room_config_updates(self) -> Mapping[str, RoomSyncConfig]:
diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
index 4582906441..2293988036 100644
--- a/tests/handlers/test_sliding_sync.py
+++ b/tests/handlers/test_sliding_sync.py
@@ -18,7 +18,7 @@
#
#
import logging
-from typing import AbstractSet, Mapping
+from typing import AbstractSet
from unittest.mock import patch
import attr
@@ -38,13 +38,17 @@
RoomSyncConfig,
StateValues,
_required_state_changes,
+ _RequiredStateChangesReturn,
)
from synapse.rest import admin
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.types import JsonDict, StateMap, StreamToken, UserID, create_requester
-from synapse.types.handlers.sliding_sync import PerConnectionState, SlidingSyncConfig
+from synapse.types.handlers.sliding_sync import (
+ PerConnectionState,
+ SlidingSyncConfig,
+)
from synapse.types.state import StateFilter
from synapse.util.clock import Clock
@@ -3827,12 +3831,11 @@ class RequiredStateChangesTestParameters:
previous_required_state_map: dict[str, set[str]]
request_required_state_map: dict[str, set[str]]
state_deltas: StateMap[str]
- expected_with_state_deltas: tuple[
- Mapping[str, AbstractSet[str]] | None, StateFilter
- ]
- expected_without_state_deltas: tuple[
- Mapping[str, AbstractSet[str]] | None, StateFilter
- ]
+ expected_with_state_deltas: _RequiredStateChangesReturn
+ expected_without_state_deltas: _RequiredStateChangesReturn
+
+ previously_returned_lazy_user_ids: AbstractSet[str] = frozenset()
+ request_lazy_load_user_ids: AbstractSet[str] = frozenset()
class RequiredStateChangesTestCase(unittest.TestCase):
@@ -3848,8 +3851,12 @@ class RequiredStateChangesTestCase(unittest.TestCase):
request_required_state_map={"type1": {"state_key"}},
state_deltas={("type1", "state_key"): "$event_id"},
# No changes
- expected_with_state_deltas=(None, StateFilter.none()),
- expected_without_state_deltas=(None, StateFilter.none()),
+ expected_with_state_deltas=_RequiredStateChangesReturn(
+ None, StateFilter.none()
+ ),
+ expected_without_state_deltas=_RequiredStateChangesReturn(
+ None, StateFilter.none()
+ ),
),
),
(
@@ -3862,14 +3869,14 @@ class RequiredStateChangesTestCase(unittest.TestCase):
"type2": {"state_key"},
},
state_deltas={("type2", "state_key"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# We've added a type so we should persist the changed required state
# config.
{"type1": {"state_key"}, "type2": {"state_key"}},
# We should see the new type added
StateFilter.from_types([("type2", "state_key")]),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{"type1": {"state_key"}, "type2": {"state_key"}},
StateFilter.from_types([("type2", "state_key")]),
),
@@ -3885,7 +3892,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
"type2": {"state_key"},
},
state_deltas={("type2", "state_key"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# We've added a type so we should persist the changed required state
# config.
{"type1": {"state_key"}, "type2": {"state_key"}},
@@ -3894,7 +3901,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
[("type1", "state_key"), ("type2", "state_key")]
),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{"type1": {"state_key"}, "type2": {"state_key"}},
StateFilter.from_types(
[("type1", "state_key"), ("type2", "state_key")]
@@ -3909,14 +3916,14 @@ class RequiredStateChangesTestCase(unittest.TestCase):
previous_required_state_map={"type": {"state_key1"}},
request_required_state_map={"type": {"state_key1", "state_key2"}},
state_deltas={("type", "state_key2"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# We've added a key so we should persist the changed required state
# config.
{"type": {"state_key1", "state_key2"}},
# We should see the new state_keys added
StateFilter.from_types([("type", "state_key2")]),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{"type": {"state_key1", "state_key2"}},
StateFilter.from_types([("type", "state_key2")]),
),
@@ -3929,7 +3936,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
previous_required_state_map={"type": {"state_key1"}},
request_required_state_map={"type": {"state_key2", "state_key3"}},
state_deltas={("type", "state_key2"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# We've added a key so we should persist the changed required state
# config.
#
@@ -3940,7 +3947,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
[("type", "state_key2"), ("type", "state_key3")]
),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{"type": {"state_key1", "state_key2", "state_key3"}},
StateFilter.from_types(
[("type", "state_key2"), ("type", "state_key3")]
@@ -3964,7 +3971,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
},
request_required_state_map={"type1": {"state_key"}},
state_deltas={("type2", "state_key"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# Remove `type2` since there's been a change to that state,
# (persist the change to required state). That way next time,
# they request `type2`, we see that we haven't sent it before
@@ -3975,7 +3982,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# less state now
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
# `type2` is no longer requested but since that state hasn't
# changed, nothing should change (we should still keep track
# that we've sent `type2` before).
@@ -3998,7 +4005,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
},
request_required_state_map={},
state_deltas={("type2", "state_key"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# Remove `type2` since there's been a change to that state,
# (persist the change to required state). That way next time,
# they request `type2`, we see that we haven't sent it before
@@ -4009,7 +4016,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# less state now
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
# `type2` is no longer requested but since that state hasn't
# changed, nothing should change (we should still keep track
# that we've sent `type2` before).
@@ -4029,7 +4036,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
previous_required_state_map={"type": {"state_key1", "state_key2"}},
request_required_state_map={"type": {"state_key1"}},
state_deltas={("type", "state_key2"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# Remove `(type, state_key2)` since there's been a change
# to that state (persist the change to required state).
# That way next time, they request `(type, state_key2)`, we see
@@ -4041,7 +4048,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# less state now
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
# `(type, state_key2)` is no longer requested but since that
# state hasn't changed, nothing should change (we should still
# keep track that we've sent `(type, state_key1)` and `(type,
@@ -4073,11 +4080,11 @@ class RequiredStateChangesTestCase(unittest.TestCase):
("other_type", "state_key"): "$event_id",
},
# We've added a wildcard, so we persist the change and request everything
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
{"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}},
StateFilter.all(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}},
StateFilter.all(),
),
@@ -4103,13 +4110,13 @@ class RequiredStateChangesTestCase(unittest.TestCase):
("other_type", "state_key"): "$event_id",
},
# We've removed a type wildcard, so we persist the change but don't request anything
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
{"type1": {"state_key2"}},
# We don't need to request anything more if they are requesting
# less state now
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{"type1": {"state_key2"}},
# We don't need to request anything more if they are requesting
# less state now
@@ -4129,11 +4136,11 @@ class RequiredStateChangesTestCase(unittest.TestCase):
state_deltas={("type2", "state_key"): "$event_id"},
# We've added a wildcard state_key, so we persist the change and
# request all of the state for that type
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
{"type1": {"state_key"}, "type2": {StateValues.WILDCARD}},
StateFilter.from_types([("type2", None)]),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{"type1": {"state_key"}, "type2": {StateValues.WILDCARD}},
StateFilter.from_types([("type2", None)]),
),
@@ -4151,7 +4158,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
state_deltas={("type2", "state_key"): "$event_id"},
# We've removed a state_key wildcard, so we persist the change and
# request nothing
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
{"type1": {"state_key"}},
# We don't need to request anything more if they are requesting
# less state now
@@ -4160,7 +4167,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# We've removed a state_key wildcard but there have been no matching
# state changes, so no changes needed, just persist the
# `request_required_state_map` as-is.
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
None,
# We don't need to request anything more if they are requesting
# less state now
@@ -4180,7 +4187,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
},
request_required_state_map={"type1": {"state_key1"}},
state_deltas={("type1", "state_key3"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# We've removed some state keys from the type, but only state_key3 was
# changed so only that one should be removed.
{"type1": {"state_key1", "state_key2"}},
@@ -4188,7 +4195,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# less state now
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
# No changes needed, just persist the
# `request_required_state_map` as-is
None,
@@ -4207,14 +4214,14 @@ class RequiredStateChangesTestCase(unittest.TestCase):
previous_required_state_map={},
request_required_state_map={"type1": {StateValues.ME}},
state_deltas={("type1", "@user:test"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# We've added a type so we should persist the changed required state
# config.
{"type1": {StateValues.ME}},
# We should see the new state_keys added
StateFilter.from_types([("type1", "@user:test")]),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{"type1": {StateValues.ME}},
StateFilter.from_types([("type1", "@user:test")]),
),
@@ -4229,7 +4236,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
previous_required_state_map={"type1": {StateValues.ME}},
request_required_state_map={},
state_deltas={("type1", "@user:test"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# Remove `type1` since there's been a change to that state,
# (persist the change to required state). That way next time,
# they request `type1`, we see that we haven't sent it before
@@ -4240,7 +4247,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# less state now
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
# `type1` is no longer requested but since that state hasn't
# changed, nothing should change (we should still keep track
# that we've sent `type1` before).
@@ -4260,14 +4267,14 @@ class RequiredStateChangesTestCase(unittest.TestCase):
previous_required_state_map={},
request_required_state_map={"type1": {"@user:test"}},
state_deltas={("type1", "@user:test"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# We've added a type so we should persist the changed required state
# config.
{"type1": {"@user:test"}},
# We should see the new state_keys added
StateFilter.from_types([("type1", "@user:test")]),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{"type1": {"@user:test"}},
StateFilter.from_types([("type1", "@user:test")]),
),
@@ -4282,7 +4289,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
previous_required_state_map={"type1": {"@user:test"}},
request_required_state_map={},
state_deltas={("type1", "@user:test"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# Remove `type1` since there's been a change to that state,
# (persist the change to required state). That way next time,
# they request `type1`, we see that we haven't sent it before
@@ -4293,7 +4300,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# less state now
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
# `type1` is no longer requested but since that state hasn't
# changed, nothing should change (we should still keep track
# that we've sent `type1` before).
@@ -4313,13 +4320,13 @@ class RequiredStateChangesTestCase(unittest.TestCase):
previous_required_state_map={},
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
state_deltas={(EventTypes.Member, "@user:test"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# If a "$LAZY" has been added or removed we always update the
# required state to what was requested for simplicity.
{EventTypes.Member: {StateValues.LAZY}},
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{EventTypes.Member: {StateValues.LAZY}},
StateFilter.none(),
),
@@ -4334,7 +4341,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
request_required_state_map={},
state_deltas={(EventTypes.Member, "@user:test"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# If a "$LAZY" has been added or removed we always update the
# required state to what was requested for simplicity.
{},
@@ -4342,7 +4349,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# less state now
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
# `EventTypes.Member` is no longer requested but since that
# state hasn't changed, nothing should change (we should still
# keep track that we've sent `EventTypes.Member` before).
@@ -4361,41 +4368,40 @@ class RequiredStateChangesTestCase(unittest.TestCase):
we're sending down another response without any timeline events.
""",
RequiredStateChangesTestParameters(
- previous_required_state_map={
- EventTypes.Member: {
- StateValues.LAZY,
- "@user2:test",
- "@user3:test",
- }
- },
+ previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
+ previously_returned_lazy_user_ids={"@user2:test", "@user3:test"},
+ request_lazy_load_user_ids=set(),
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
+ # The `request_required_state_map` hasn't changed
+ None,
+ # We don't need to request anything more if they are requesting
+ # less state now
+ StateFilter.none(),
+ # Previous request did not include any explicit members,
+ # so there is no extra users to add to the lazy cache.
+ extra_users_to_add_to_lazy_cache=frozenset(),
# Remove "@user2:test" since that state has changed and is no
# longer being requested anymore. Since something was removed,
# we should persist the changed to required state. That way next
# time, they request "@user2:test", we see that we haven't sent
# it before and send the new state. (we should still keep track
# that we've sent specific `EventTypes.Member` before)
- {
- EventTypes.Member: {
- StateValues.LAZY,
- "@user3:test",
- }
- },
- # We don't need to request anything more if they are requesting
- # less state now
- StateFilter.none(),
+ lazy_members_invalidated={"@user2:test"},
),
- expected_without_state_deltas=(
- # We're not requesting any specific `EventTypes.Member` now but
- # since that state hasn't changed, nothing should change (we
- # should still keep track that we've sent specific
- # `EventTypes.Member` before).
+ expected_without_state_deltas=_RequiredStateChangesReturn(
+ # The `request_required_state_map` hasn't changed
None,
# We don't need to request anything more if they are requesting
# less state now
StateFilter.none(),
+ # Previous request did not include any explicit members,
+ # so there is no extra users to add to the lazy cache.
+ extra_users_to_add_to_lazy_cache=frozenset(),
+ # Nothing should change (we should still keep track that
+ # we've sent specific `EventTypes.Member` before).
+ lazy_members_invalidated=frozenset(),
),
),
),
@@ -4407,50 +4413,37 @@ class RequiredStateChangesTestCase(unittest.TestCase):
we're sending down another response with a new event from user4.
""",
RequiredStateChangesTestParameters(
- previous_required_state_map={
- EventTypes.Member: {
- StateValues.LAZY,
- "@user2:test",
- "@user3:test",
- }
- },
- request_required_state_map={
- EventTypes.Member: {StateValues.LAZY, "@user4:test"}
- },
+ previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
+ request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
+ previously_returned_lazy_user_ids={"@user2:test", "@user3:test"},
+ request_lazy_load_user_ids={"@user4:test"},
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
- expected_with_state_deltas=(
- # Since "@user4:test" was added, we should persist the changed
- # required state config.
- #
- # Also remove "@user2:test" since that state has changed and is no
- # longer being requested anymore. Since something was removed,
- # we also should persist the changed to required state. That way next
- # time, they request "@user2:test", we see that we haven't sent
- # it before and send the new state. (we should still keep track
- # that we've sent specific `EventTypes.Member` before)
- {
- EventTypes.Member: {
- StateValues.LAZY,
- "@user3:test",
- "@user4:test",
- }
- },
+ expected_with_state_deltas=_RequiredStateChangesReturn(
+ # The `request_required_state_map` hasn't changed
+ None,
# We should see the new state_keys added
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
+ # Previous request did not include any explicit members,
+ # so there is no extra users to add to the lazy cache.
+ extra_users_to_add_to_lazy_cache=frozenset(),
+ # Remove "@user2:test" since that state has changed and
+ # is no longer being requested anymore. Since something
+ # was removed, we also should persist the changed to
+ # required state. That way next time, they request
+ # "@user2:test", we see that we haven't sent it before
+ # and send the new state. (we should still keep track
+ # that we've sent specific `EventTypes.Member` before)
+ lazy_members_invalidated={"@user2:test"},
),
- expected_without_state_deltas=(
- # Since "@user4:test" was added, we should persist the changed
- # required state config.
- {
- EventTypes.Member: {
- StateValues.LAZY,
- "@user2:test",
- "@user3:test",
- "@user4:test",
- }
- },
+ expected_without_state_deltas=_RequiredStateChangesReturn(
+ # The `request_required_state_map` hasn't changed
+ None,
# We should see the new state_keys added
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
+ # Previous request did not include any explicit members,
+ # so there is no extra users to add to the lazy cache.
+ extra_users_to_add_to_lazy_cache=frozenset(),
+ lazy_members_invalidated=frozenset(),
),
),
),
@@ -4464,40 +4457,81 @@ class RequiredStateChangesTestCase(unittest.TestCase):
EventTypes.Member: {"@user2:test", "@user3:test"}
},
request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
+ previously_returned_lazy_user_ids=frozenset(),
+ request_lazy_load_user_ids=frozenset(),
+ state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
+ expected_with_state_deltas=_RequiredStateChangesReturn(
+ # Since `StateValues.LAZY` was added, we should persist the
+ # changed required state config.
+ {EventTypes.Member: {StateValues.LAZY}},
+ # No users are being lazy loaded, so nothing to request.
+ StateFilter.none(),
+ # Remember the fact that we've sent @user3 down before,
+ # but not @user2 as that has been invalidated.
+ extra_users_to_add_to_lazy_cache={"@user3:test"},
+ # Nothing to invalidate as there are no existing lazy members.
+ lazy_members_invalidated=frozenset(),
+ ),
+ expected_without_state_deltas=_RequiredStateChangesReturn(
+ # Since `StateValues.LAZY` was added, we should persist the
+ # changed required state config.
+ {EventTypes.Member: {StateValues.LAZY}},
+ # No users are being lazy loaded, so nothing to request.
+ StateFilter.none(),
+ # Remember the fact that we've sent the users down before.
+ extra_users_to_add_to_lazy_cache={"@user2:test", "@user3:test"},
+ # Nothing to invalidate as there are no existing lazy members.
+ lazy_members_invalidated=frozenset(),
+ ),
+ ),
+ ),
+ (
+ "state_key_expand_lazy_keep_previous_memberships_need_previous_sent",
+ """
+ Test expanding the `required_state` to lazy-loading room
+ members. If a previously explicit membership is requested then
+ we should not send it again (as it was already sent before).
+ """,
+ RequiredStateChangesTestParameters(
+ previous_required_state_map={
+ EventTypes.Member: {"@user2:test", "@user3:test"}
+ },
+ request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
+ previously_returned_lazy_user_ids=frozenset(),
+ request_lazy_load_user_ids={"@user3:test"},
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# Since `StateValues.LAZY` was added, we should persist the
# changed required state config.
+ {EventTypes.Member: {StateValues.LAZY}},
+ # We have already sent @user3 down before.
#
- # Also remove "@user2:test" since that state has changed and is no
- # longer being requested anymore. Since something was removed,
- # we also should persist the changed to required state. That way next
- # time, they request "@user2:test", we see that we haven't sent
- # it before and send the new state. (we should still keep track
- # that we've sent specific `EventTypes.Member` before)
- {
- EventTypes.Member: {
- StateValues.LAZY,
- "@user3:test",
- }
- },
- # We don't need to request anything more if they are requesting
- # less state now
+ # `@user3:test` is required for lazy loading, but we've
+ # already sent it down before (due to it being in
+ # `previous_required_state_map`), so we don't need to
+ # request it again.
StateFilter.none(),
+ # Remember the fact that we've sent @user3 down before,
+ # but not @user2 as that has been invalidated.
+ extra_users_to_add_to_lazy_cache={"@user3:test"},
+ # Nothing to invalidate as there are no existing lazy members.
+ lazy_members_invalidated=frozenset(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
# Since `StateValues.LAZY` was added, we should persist the
# changed required state config.
- {
- EventTypes.Member: {
- StateValues.LAZY,
- "@user2:test",
- "@user3:test",
- }
- },
- # We don't need to request anything more if they are requesting
- # less state now
+ {EventTypes.Member: {StateValues.LAZY}},
+ # We have already sent @user3 down before.
+ #
+ # `@user3:test` is required for lazy loading, but we've
+ # already sent it down before (due to it being in
+ # `previous_required_state_map`), so we don't need to
+ # request it again.
StateFilter.none(),
+ # Remember the fact that we've sent the users down before.
+ extra_users_to_add_to_lazy_cache={"@user2:test", "@user3:test"},
+ # Nothing to invalidate as there are no existing lazy members.
+ lazy_members_invalidated=frozenset(),
),
),
),
@@ -4507,36 +4541,33 @@ class RequiredStateChangesTestCase(unittest.TestCase):
Test retracting the `required_state` to no longer lazy-loading room members.
""",
RequiredStateChangesTestParameters(
- previous_required_state_map={
- EventTypes.Member: {
- StateValues.LAZY,
- "@user2:test",
- "@user3:test",
- }
- },
+ previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
request_required_state_map={},
+ previously_returned_lazy_user_ids={"@user2:test", "@user3:test"},
+ request_lazy_load_user_ids=set(),
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# Remove `EventTypes.Member` since there's been a change to that
- # state, (persist the change to required state). That way next
- # time, they request `EventTypes.Member`, we see that we haven't
- # sent it before and send the new state. (if we were tracking
- # that we sent any other state, we should still keep track
- # that).
- #
- # This acts the same as the `simple_remove_type` test. It's
- # possible that we could remember the specific `state_keys` that
- # we have sent down before but this currently just acts the same
- # as if a whole `type` was removed. Perhaps it's good that we
- # "garbage collect" and forget what we've sent before for a
- # given `type` when the client stops caring about a certain
- # `type`.
+ # state, (persist the change to required state).
{},
# We don't need to request anything more if they are requesting
# less state now
StateFilter.none(),
+ # Previous request did not include any explicit members,
+ # so there is no extra users to add to the lazy cache.
+ extra_users_to_add_to_lazy_cache=frozenset(),
+ # Explicitly remove the now invalidated @user2:test
+ # membership.
+ #
+ # We don't invalidate @user3:test as that membership
+ # hasn't changed. We continue to store the existing lazy
+ # members since they might be useful for future
+ # requests. (Alternatively, we could invalidate all
+ # members in the room when the client stops lazy
+ # loading, but we opt to keep track of them).
+ lazy_members_invalidated={"@user2:test"},
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
# `EventTypes.Member` is no longer requested but since that
# state hasn't changed, nothing should change (we should still
# keep track that we've sent `EventTypes.Member` before).
@@ -4544,13 +4575,20 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# We don't need to request anything more if they are requesting
# less state now
StateFilter.none(),
+ # Previous request did not include any explicit members,
+ # so there is no extra users to add to the lazy cache.
+ extra_users_to_add_to_lazy_cache=frozenset(),
+ # Nothing has been invalidated.
+ lazy_members_invalidated=frozenset(),
),
),
),
(
- "state_key_retract_lazy_keep_previous_memberships_with_new_memberships",
+ "state_key_retract_lazy_keep_previous_explicit_memberships",
"""
- Test retracting the `required_state` to no longer lazy-loading room members.
+ Test removing explicit memberships from the `required_state`
+ when lazy-loading room members tracks previously sent
+ memberships.
""",
RequiredStateChangesTestParameters(
previous_required_state_map={
@@ -4560,39 +4598,144 @@ class RequiredStateChangesTestCase(unittest.TestCase):
"@user3:test",
}
},
+ request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
+ previously_returned_lazy_user_ids=frozenset(),
+ request_lazy_load_user_ids={"@user3:test"},
+ state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
+ expected_with_state_deltas=_RequiredStateChangesReturn(
+ # Since an explicit membership was removed, we record
+ # the new required state config and move them to lazy
+ # members.
+ {EventTypes.Member: {StateValues.LAZY}},
+ # We have already sent @user3 down before.
+ #
+ # `@user3:test` is required for lazy loading, but we've
+ # already sent it down before (due to it being in
+ # `previous_required_state_map`), so we don't need to
+ # request it again.
+ StateFilter.none(),
+ # Remember the fact that we've sent @user3 down before,
+ # but not @user2 as that has been invalidated.
+ extra_users_to_add_to_lazy_cache={"@user3:test"},
+ # Nothing to invalidate as there are no existing lazy members.
+ lazy_members_invalidated=frozenset(),
+ ),
+ expected_without_state_deltas=_RequiredStateChangesReturn(
+ # While some explicit memberships were removed, there were no
+ # state changes, so we don't need to persist the new required
+ # state config yet.
+ None,
+ # We have already sent @user3 down before.
+ #
+ # `@user3:test` is required for lazy loading, but we've
+ # already sent it down before (due to it being in
+ # `previous_required_state_map`), so we don't need to
+ # request it again.
+ StateFilter.none(),
+ # Remember the fact that we've sent the users down before.
+ extra_users_to_add_to_lazy_cache=frozenset(),
+ # Nothing to invalidate as there are no existing lazy members.
+ lazy_members_invalidated=frozenset(),
+ ),
+ ),
+ ),
+ (
+ "state_key_retract_lazy_keep_previous_explicit_me_memberships",
+ """
+ Test removing explicit $ME memberships from the `required_state`
+ when lazy-loading room members tracks previously sent
+ memberships.
+ """,
+ RequiredStateChangesTestParameters(
+ previous_required_state_map={
+ EventTypes.Member: {
+ StateValues.LAZY,
+ StateValues.ME,
+ "@user2:test",
+ }
+ },
+ request_required_state_map={EventTypes.Member: {StateValues.LAZY}},
+ previously_returned_lazy_user_ids=frozenset(),
+ request_lazy_load_user_ids={"@user:test"},
+ state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
+ expected_with_state_deltas=_RequiredStateChangesReturn(
+ # Since an explicit membership was removed, we record
+ # the new required state config and move them to lazy
+ # members.
+ {EventTypes.Member: {StateValues.LAZY}},
+ # We have already sent @user down before.
+ #
+ # `@user:test` is required for lazy loading, but we've
+ # already sent it down before (due to `StateValues.ME`
+ # being in `previous_required_state_map`), so we don't
+ # need to request it again.
+ StateFilter.none(),
+ # Remember the fact that we've sent @user down before,
+ # but not @user2 as that has been invalidated.
+ extra_users_to_add_to_lazy_cache={"@user:test"},
+ # Nothing to invalidate as there are no existing lazy members.
+ lazy_members_invalidated=frozenset(),
+ ),
+ expected_without_state_deltas=_RequiredStateChangesReturn(
+ # While some explicit memberships were removed, there were no
+ # state changes, so we don't need to persist the new required
+ # state config yet.
+ None,
+ # We have already sent @user down before.
+ #
+ # `@user:test` is required for lazy loading, but we've
+ # already sent it down before (due to `StateValues.ME`
+ # being in `previous_required_state_map`), so we don't
+ # need to request it again.
+ StateFilter.none(),
+ # No relevant state has changed and we don't persist the
+ # changed required_state_map, so we don't yet move the
+ # $ME state to the lazy cache.
+ extra_users_to_add_to_lazy_cache=frozenset(),
+ # Nothing to invalidate as there are no existing lazy members.
+ lazy_members_invalidated=frozenset(),
+ ),
+ ),
+ ),
+ (
+ "state_key_retract_lazy_keep_previous_memberships_with_new_memberships",
+ """
+ Test retracting the `required_state` to no longer lazy-loading room members.
+ """,
+ RequiredStateChangesTestParameters(
+ previous_required_state_map={EventTypes.Member: {StateValues.LAZY}},
request_required_state_map={EventTypes.Member: {"@user4:test"}},
+ previously_returned_lazy_user_ids={"@user2:test", "@user3:test"},
+ request_lazy_load_user_ids=frozenset(),
state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"},
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
# Since "@user4:test" was added, we should persist the changed
# required state config.
- #
+ {EventTypes.Member: {"@user4:test"}},
+ # We should see the new state_keys added
+ StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
+ # Previous request did not include any explicit members,
+ # so there is no extra users to add to the lazy cache.
+ extra_users_to_add_to_lazy_cache=frozenset(),
# Also remove "@user2:test" since that state has changed and is no
# longer being requested anymore. Since something was removed,
# we also should persist the changed to required state. That way next
# time, they request "@user2:test", we see that we haven't sent
# it before and send the new state. (we should still keep track
# that we've sent specific `EventTypes.Member` before)
- {
- EventTypes.Member: {
- "@user3:test",
- "@user4:test",
- }
- },
- # We should see the new state_keys added
- StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
+ lazy_members_invalidated={"@user2:test"},
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
# Since "@user4:test" was added, we should persist the changed
# required state config.
- {
- EventTypes.Member: {
- "@user2:test",
- "@user3:test",
- "@user4:test",
- }
- },
+ {EventTypes.Member: {"@user4:test"}},
# We should see the new state_keys added
StateFilter.from_types([(EventTypes.Member, "@user4:test")]),
+ # Previous request did not include any explicit members,
+ # so there is no extra users to add to the lazy cache.
+ extra_users_to_add_to_lazy_cache=frozenset(),
+ # We don't invalidate user2 as they haven't changed
+ lazy_members_invalidated=frozenset(),
),
),
),
@@ -4613,7 +4756,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# room required state config to match the request. And since we we're previously
# already fetching everything, we don't have to fetch anything now that they've
# narrowed.
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
{
StateValues.WILDCARD: {
"state_key1",
@@ -4623,7 +4766,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
},
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{
StateValues.WILDCARD: {
"state_key1",
@@ -4649,11 +4792,11 @@ class RequiredStateChangesTestCase(unittest.TestCase):
},
state_deltas={("type1", "state_key1"): "$event_id"},
# We've added a wildcard, so we persist the change and request everything
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
{StateValues.WILDCARD: {StateValues.WILDCARD}},
StateFilter.all(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{StateValues.WILDCARD: {StateValues.WILDCARD}},
StateFilter.all(),
),
@@ -4673,7 +4816,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# request. And since we we're previously already fetching
# everything, we don't have to fetch anything now that they've
# narrowed.
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
{
"type1": {
"state_key1",
@@ -4683,7 +4826,7 @@ class RequiredStateChangesTestCase(unittest.TestCase):
},
StateFilter.none(),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{
"type1": {
"state_key1",
@@ -4708,11 +4851,11 @@ class RequiredStateChangesTestCase(unittest.TestCase):
# update the effective room required state config to match the
# request. And we need to request all of the state for that type
# because we previously, only sent down a few keys.
- expected_with_state_deltas=(
+ expected_with_state_deltas=_RequiredStateChangesReturn(
{"type1": {StateValues.WILDCARD, "state_key2", "state_key3"}},
StateFilter.from_types([("type1", None)]),
),
- expected_without_state_deltas=(
+ expected_without_state_deltas=_RequiredStateChangesReturn(
{
"type1": {
StateValues.WILDCARD,
@@ -4734,42 +4877,66 @@ def test_xxx(
test_parameters: RequiredStateChangesTestParameters,
) -> None:
# Without `state_deltas`
- changed_required_state_map, added_state_filter = _required_state_changes(
+ state_changes = _required_state_changes(
user_id="@user:test",
prev_required_state_map=test_parameters.previous_required_state_map,
request_required_state_map=test_parameters.request_required_state_map,
+ previously_returned_lazy_user_ids=test_parameters.previously_returned_lazy_user_ids,
+ request_lazy_load_user_ids=test_parameters.request_lazy_load_user_ids,
state_deltas={},
)
self.assertEqual(
- changed_required_state_map,
- test_parameters.expected_without_state_deltas[0],
+ state_changes.changed_required_state_map,
+ test_parameters.expected_without_state_deltas.changed_required_state_map,
"changed_required_state_map does not match (without state_deltas)",
)
self.assertEqual(
- added_state_filter,
- test_parameters.expected_without_state_deltas[1],
+ state_changes.added_state_filter,
+ test_parameters.expected_without_state_deltas.added_state_filter,
"added_state_filter does not match (without state_deltas)",
)
+ self.assertEqual(
+ state_changes.lazy_members_invalidated,
+ test_parameters.expected_without_state_deltas.lazy_members_invalidated,
+ "lazy_members_invalidated does not match (without state_deltas)",
+ )
+ self.assertEqual(
+ state_changes.extra_users_to_add_to_lazy_cache,
+ test_parameters.expected_without_state_deltas.extra_users_to_add_to_lazy_cache,
+ "lazy_members_previously_returned does not match (without state_deltas)",
+ )
# With `state_deltas`
- changed_required_state_map, added_state_filter = _required_state_changes(
+ state_changes = _required_state_changes(
user_id="@user:test",
prev_required_state_map=test_parameters.previous_required_state_map,
request_required_state_map=test_parameters.request_required_state_map,
+ previously_returned_lazy_user_ids=test_parameters.previously_returned_lazy_user_ids,
+ request_lazy_load_user_ids=test_parameters.request_lazy_load_user_ids,
state_deltas=test_parameters.state_deltas,
)
self.assertEqual(
- changed_required_state_map,
- test_parameters.expected_with_state_deltas[0],
+ state_changes.changed_required_state_map,
+ test_parameters.expected_with_state_deltas.changed_required_state_map,
"changed_required_state_map does not match (with state_deltas)",
)
self.assertEqual(
- added_state_filter,
- test_parameters.expected_with_state_deltas[1],
+ state_changes.added_state_filter,
+ test_parameters.expected_with_state_deltas.added_state_filter,
"added_state_filter does not match (with state_deltas)",
)
+ self.assertEqual(
+ state_changes.lazy_members_invalidated,
+ test_parameters.expected_with_state_deltas.lazy_members_invalidated,
+ "lazy_members_invalidated does not match (with state_deltas)",
+ )
+ self.assertEqual(
+ state_changes.extra_users_to_add_to_lazy_cache,
+ test_parameters.expected_with_state_deltas.extra_users_to_add_to_lazy_cache,
+ "lazy_members_previously_returned does not match (with state_deltas)",
+ )
@parameterized.expand(
[
@@ -4805,12 +4972,16 @@ def test_limit_retained_previous_state_keys(
}
# (function under test)
- changed_required_state_map, added_state_filter = _required_state_changes(
+ state_changes = _required_state_changes(
user_id="@user:test",
prev_required_state_map=previous_required_state_map,
request_required_state_map=request_required_state_map,
+ previously_returned_lazy_user_ids=frozenset(),
+ request_lazy_load_user_ids=frozenset(),
state_deltas={},
)
+ changed_required_state_map = state_changes.changed_required_state_map
+
assert changed_required_state_map is not None
# We should only remember up to the maximum number of state keys
@@ -4874,12 +5045,16 @@ def test_request_more_state_keys_than_remember_limit(self) -> None:
)
# (function under test)
- changed_required_state_map, added_state_filter = _required_state_changes(
+ state_changes = _required_state_changes(
user_id="@user:test",
prev_required_state_map=previous_required_state_map,
request_required_state_map=request_required_state_map,
+ previously_returned_lazy_user_ids=frozenset(),
+ request_lazy_load_user_ids=frozenset(),
state_deltas={},
)
+ changed_required_state_map = state_changes.changed_required_state_map
+
assert changed_required_state_map is not None
# Should include all of the requested state
diff --git a/tests/rest/client/sliding_sync/test_lists_filters.py b/tests/rest/client/sliding_sync/test_lists_filters.py
index 3b7b2a16d8..59facea1e0 100644
--- a/tests/rest/client/sliding_sync/test_lists_filters.py
+++ b/tests/rest/client/sliding_sync/test_lists_filters.py
@@ -690,7 +690,7 @@ def test_filters_is_encrypted_with_remote_invite_room_no_stripped_state(
user1_tok = self.login(user1_id, "pass")
# Create a remote invite room without any `unsigned.invite_room_state`
- _remote_invite_room_id = self._create_remote_invite_room_for_user(
+ _remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
user1_id, None
)
@@ -760,7 +760,7 @@ def test_filters_is_encrypted_with_remote_invite_encrypted_room(self) -> None:
# Create a remote invite room with some `unsigned.invite_room_state`
# indicating that the room is encrypted.
- remote_invite_room_id = self._create_remote_invite_room_for_user(
+ remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
user1_id,
[
StrippedStateEvent(
@@ -849,7 +849,7 @@ def test_filters_is_encrypted_with_remote_invite_unencrypted_room(self) -> None:
# Create a remote invite room with some `unsigned.invite_room_state`
# but don't set any room encryption event.
- remote_invite_room_id = self._create_remote_invite_room_for_user(
+ remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
user1_id,
[
StrippedStateEvent(
@@ -1484,7 +1484,7 @@ def test_filters_room_types_with_remote_invite_room_no_stripped_state(self) -> N
user1_tok = self.login(user1_id, "pass")
# Create a remote invite room without any `unsigned.invite_room_state`
- _remote_invite_room_id = self._create_remote_invite_room_for_user(
+ _remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
user1_id, None
)
@@ -1554,7 +1554,7 @@ def test_filters_room_types_with_remote_invite_space(self) -> None:
# Create a remote invite room with some `unsigned.invite_room_state` indicating
# that it is a space room
- remote_invite_room_id = self._create_remote_invite_room_for_user(
+ remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
user1_id,
[
StrippedStateEvent(
@@ -1637,7 +1637,7 @@ def test_filters_room_types_with_remote_invite_normal_room(self) -> None:
# Create a remote invite room with some `unsigned.invite_room_state`
# but the create event does not specify a room type (normal room)
- remote_invite_room_id = self._create_remote_invite_room_for_user(
+ remote_invite_room_id, _ = self._create_remote_invite_room_for_user(
user1_id,
[
StrippedStateEvent(
diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py
index 210280bc48..586b127f8a 100644
--- a/tests/rest/client/sliding_sync/test_rooms_required_state.py
+++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py
@@ -23,6 +23,7 @@
from synapse.handlers.sliding_sync import StateValues
from synapse.rest.client import knock, login, room, sync
from synapse.server import HomeServer
+from synapse.storage.databases.main.events import DeltaState, SlidingSyncTableChanges
from synapse.util.clock import Clock
from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
@@ -642,11 +643,6 @@ def test_rooms_required_state_changed_membership_in_timeline_lazy_loading_room_m
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
- # This appears because *some* membership in the room changed and the
- # heroes are recalculated and is thrown in because we have it. But this
- # is technically optional and not needed because we've already seen user2
- # in the last sync (and their membership hasn't changed).
- state_map[(EventTypes.Member, user2_id)],
# Appears because there is a message in the timeline from this user
state_map[(EventTypes.Member, user4_id)],
# Appears because there is a membership event in the timeline from this user
@@ -841,6 +837,437 @@ def test_rooms_required_state_expand_retract_expand_lazy_loading_room_members_in
exact=True,
)
+ def test_lazy_loading_room_members_limited_sync(self) -> None:
+ """Test that when using lazy loading for room members and a limited sync
+ missing a membership change, we include the membership change next time
+ said user says something.
+ """
+
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ # Send a message from each user to the room so that both memberships are sent down.
+ self.helper.send(room_id1, "1", tok=user1_tok)
+ self.helper.send(room_id1, "2", tok=user2_tok)
+
+ # Make a first sync with lazy loading for the room members to establish
+ # a position
+ sync_body = {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ [EventTypes.Member, StateValues.LAZY],
+ ],
+ "timeline_limit": 2,
+ }
+ }
+ }
+ response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+ # We should see both membership events in required_state
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ state_map[(EventTypes.Member, user2_id)],
+ },
+ exact=True,
+ )
+
+ # User2 changes their display name (causing a membership change)
+ self.helper.send_state(
+ room_id1,
+ event_type=EventTypes.Member,
+ state_key=user2_id,
+ body={
+ EventContentFields.MEMBERSHIP: Membership.JOIN,
+ EventContentFields.MEMBERSHIP_DISPLAYNAME: "New Name",
+ },
+ tok=user2_tok,
+ )
+
+ # Send a couple of messages to the room to push out the membership change
+ self.helper.send(room_id1, "3", tok=user1_tok)
+ self.helper.send(room_id1, "4", tok=user1_tok)
+
+ # Make an incremental Sliding Sync request
+ response_body, from_token = self.do_sync(
+ sync_body, since=from_token, tok=user1_tok
+ )
+
+ # The membership change should *not* be included yet as user2 doesn't
+ # have any events in the timeline.
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1].get("required_state", []),
+ set(),
+ exact=True,
+ )
+
+ # Now user2 sends a message to the room
+ self.helper.send(room_id1, "5", tok=user2_tok)
+
+ # Make another incremental Sliding Sync request
+ response_body, from_token = self.do_sync(
+ sync_body, since=from_token, tok=user1_tok
+ )
+
+ # The membership change should now be included as user2 has an event
+ # in the timeline.
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1].get("required_state", []),
+ {
+ state_map[(EventTypes.Member, user2_id)],
+ },
+ exact=True,
+ )
+
+ def test_lazy_loading_room_members_across_multiple_rooms(self) -> None:
+ """Test that lazy loading room members are tracked per-room correctly."""
+
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ # Create two rooms with both users in them and send a message in each
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+ self.helper.send(room_id1, "room1-msg1", tok=user2_tok)
+
+ room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.join(room_id2, user1_id, tok=user1_tok)
+ self.helper.send(room_id2, "room2-msg1", tok=user2_tok)
+
+ # Make a sync with lazy loading for the room members to establish
+ # a position
+ sync_body = {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ [EventTypes.Member, StateValues.LAZY],
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ }
+ response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+ # We expect to see only user2's membership in both rooms
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user2_id)],
+ },
+ exact=True,
+ )
+
+ # Send a message in room1 from user1
+ self.helper.send(room_id1, "room1-msg2", tok=user1_tok)
+
+ # Make an incremental Sliding Sync request and check that we get user1's
+ # membership.
+ response_body, from_token = self.do_sync(
+ sync_body, since=from_token, tok=user1_tok
+ )
+
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ exact=True,
+ )
+
+ # Send a message in room2 from user1
+ self.helper.send(room_id2, "room2-msg2", tok=user1_tok)
+
+ # Make an incremental Sliding Sync request and check that we get user1's
+ # membership.
+ response_body, from_token = self.do_sync(
+ sync_body, since=from_token, tok=user1_tok
+ )
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id2)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id2]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ exact=True,
+ )
+
+ def test_lazy_loading_room_members_across_multiple_connections(self) -> None:
+ """Test that lazy loading room members are tracked per-connection
+ correctly.
+
+ This catches bugs where if a membership got sent down one connection,
+ it would incorrectly assume it was sent down another connection.
+ """
+
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ self.helper.send(room_id1, "1", tok=user2_tok)
+
+ # Make a sync with lazy loading for the room members to establish
+ # a position
+ sync_body1 = {
+ "conn_id": "first-connection",
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ [EventTypes.Member, StateValues.LAZY],
+ ],
+ "timeline_limit": 1,
+ }
+ },
+ }
+ response_body, from_token1 = self.do_sync(sync_body1, tok=user1_tok)
+
+ # We expect to see only user2's membership in the room
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user2_id)],
+ },
+ exact=True,
+ )
+
+ # Now make a new connection
+ sync_body2 = {
+ "conn_id": "second-connection",
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ [EventTypes.Member, StateValues.LAZY],
+ ],
+ "timeline_limit": 1,
+ }
+ },
+ }
+ response_body, from_token2 = self.do_sync(sync_body2, tok=user1_tok)
+
+ # We should see user2's membership as this is a new connection
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user2_id)],
+ },
+ exact=True,
+ )
+
+ # If we send a message from user1 and sync again on the first connection,
+ # we should get user1's membership
+ self.helper.send(room_id1, "2", tok=user1_tok)
+ response_body, from_token1 = self.do_sync(
+ sync_body1, since=from_token1, tok=user1_tok
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ exact=True,
+ )
+
+ # We sync again on the first connection to "ack" the position. This
+ # triggers the `sliding_sync_connection_lazy_members` to set its
+ # connection_position to null.
+ self.do_sync(sync_body1, since=from_token1, tok=user1_tok)
+
+ # If we sync again on the second connection, we should also get user1's
+ # membership
+ response_body, _ = self.do_sync(sync_body2, since=from_token2, tok=user1_tok)
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ exact=True,
+ )
+
+ def test_lazy_loading_room_members_forked_position(self) -> None:
+ """Test that lazy loading room members are tracked correctly when a
+ connection position is reused"""
+
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ self.helper.send(room_id1, "1", tok=user2_tok)
+
+ # Make a sync with lazy loading for the room members to establish
+ # a position
+ sync_body = {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ [EventTypes.Member, StateValues.LAZY],
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ }
+ response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+ # We expect to see only user2's membership in the room
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user2_id)],
+ },
+ exact=True,
+ )
+
+ # Send a message in room1 from user1
+ self.helper.send(room_id1, "2", tok=user1_tok)
+
+ # Make an incremental Sliding Sync request and check that we get user1's
+ # membership.
+ response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ exact=True,
+ )
+
+ # Now, reuse the original position and check we still get user1's
+ # membership.
+ response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ exact=True,
+ )
+
+ def test_lazy_loading_room_members_explicit_membership_removed(self) -> None:
+ """Test the case where we requested explicit memberships and then later
+ changed to lazy loading."""
+
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
+ self.helper.join(room_id1, user1_id, tok=user1_tok)
+
+ self.helper.send(room_id1, "1", tok=user2_tok)
+
+ # Make a sync with lazy loading for the room members to establish
+ # a position
+ sync_body = {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ [EventTypes.Member, StateValues.ME],
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ }
+ response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+ # We expect to see only user1's membership in the room
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ exact=True,
+ )
+
+ # Now change to lazy loading...
+ sync_body["lists"]["foo-list"]["required_state"] = [
+ [EventTypes.Member, StateValues.LAZY],
+ ]
+
+ # Send a message in room1 from user2
+ self.helper.send(room_id1, "2", tok=user2_tok)
+ response_body, from_token = self.do_sync(
+ sync_body, since=from_token, tok=user1_tok
+ )
+
+ # We should see user2's membership as it's in the timeline
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1]["required_state"],
+ {
+ state_map[(EventTypes.Member, user2_id)],
+ },
+ exact=True,
+ )
+
+ # Now send a message in room1 from user1
+ self.helper.send(room_id1, "3", tok=user1_tok)
+
+ response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
+
+ # We should not see any memberships as we've already seen user1's
+ # membership.
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id1)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id1].get("required_state", []),
+ [],
+ exact=True,
+ )
+
def test_rooms_required_state_me(self) -> None:
"""
Test `rooms.required_state` correctly handles $ME.
@@ -1686,3 +2113,135 @@ def test_rooms_required_state_expand_deduplicate(self) -> None:
# We should not see the room name again, as we have already sent that
# down.
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
+
+ def test_lazy_loading_room_members_state_reset_non_limited_timeline(self) -> None:
+ """Test that when using lazy-loaded members, if a membership state is
+ reset to a previous state and the sync is not limited, then we send down
+ the state reset.
+
+ Regression test as previously we only returned membership relevant to
+ the timeline and so did not tell clients about state resets for
+ users who did not send any timeline events.
+ """
+
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ content = self.helper.join(room_id, user1_id, tok=user1_tok)
+ first_event_id = content["event_id"]
+
+ # Send a message so that the user1 membership comes down sync (because we're lazy-loading room members)
+ self.helper.send(room_id, "msg", tok=user1_tok)
+
+ sync_body = {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ [EventTypes.Member, StateValues.LAZY],
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ }
+ response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+ # Check that user1 is returned
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ exact=True,
+ )
+
+ # user1 changes their display name
+ content = self.helper.send_state(
+ room_id,
+ EventTypes.Member,
+ body={"membership": "join", "displayname": "New display name"},
+ state_key=user1_id,
+ tok=user1_tok,
+ )
+ second_event_id = content["event_id"]
+
+ response_body, from_token = self.do_sync(
+ sync_body, since=from_token, tok=user1_tok
+ )
+
+ # We should see the updated membership state
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ exact=True,
+ )
+ self.assertEqual(
+ response_body["rooms"][room_id]["required_state"][0]["event_id"],
+ second_event_id,
+ )
+
+ # Now, fake a reset the membership state to the first event
+ persist_event_store = self.hs.get_datastores().persist_events
+ assert persist_event_store is not None
+
+ self.get_success(
+ persist_event_store.update_current_state(
+ room_id,
+ DeltaState(
+ to_insert={(EventTypes.Member, user1_id): first_event_id},
+ to_delete=[],
+ ),
+ # We don't need to worry about sliding sync changes for this test
+ SlidingSyncTableChanges(
+ room_id=room_id,
+ joined_room_bump_stamp_to_fully_insert=None,
+ joined_room_updates={},
+ membership_snapshot_shared_insert_values={},
+ to_insert_membership_snapshots=[],
+ to_delete_membership_snapshots=[],
+ ),
+ )
+ )
+
+ # Send a message from *user2* so that user1 wouldn't normally get
+ # synced.
+ self.helper.send(room_id, "msg2", tok=user2_tok)
+
+ response_body, from_token = self.do_sync(
+ sync_body, since=from_token, tok=user1_tok
+ )
+
+ # This should be a non-limited sync as there is only one timeline event
+ # (<= `timeline_limit). This is important as we're specifically testing the non-`limited`
+ # timeline scenario. And for reference, we don't send down state resets
+ # on limited timelines when using lazy loaded memberships.
+ self.assertFalse(
+ response_body["rooms"][room_id].get("limited", False),
+ "Expected a non-limited timeline",
+ )
+
+ # We should see the reset membership state of user1
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ )
+ self.assertEqual(
+ response_body["rooms"][room_id]["required_state"][0]["event_id"],
+ first_event_id,
+ )
diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py
index bcd22d15ca..ac8dfd37d8 100644
--- a/tests/rest/client/sliding_sync/test_sliding_sync.py
+++ b/tests/rest/client/sliding_sync/test_sliding_sync.py
@@ -257,7 +257,7 @@ def _create_remote_invite_room_for_user(
invitee_user_id: str,
unsigned_invite_room_state: list[StrippedStateEvent] | None,
invite_room_id: str | None = None,
- ) -> str:
+ ) -> tuple[str, EventBase]:
"""
Create a fake invite for a remote room and persist it.
@@ -323,11 +323,13 @@ def _create_remote_invite_room_for_user(
context = EventContext.for_outlier(self.hs.get_storage_controllers())
persist_controller = self.hs.get_storage_controllers().persistence
assert persist_controller is not None
- self.get_success(persist_controller.persist_event(invite_event, context))
+ persisted_event, _, _ = self.get_success(
+ persist_controller.persist_event(invite_event, context)
+ )
self._remote_invite_count += 1
- return invite_room_id
+ return invite_room_id, persisted_event
def _bump_notifier_wait_for_events(
self,
@@ -763,7 +765,7 @@ def test_invited_to_forgotten_remote_room(self) -> None:
user1_tok = self.login(user1_id, "pass")
# Create a remote room invite (out-of-band membership)
- room_id = self._create_remote_invite_room_for_user(user1_id, None)
+ room_id, _ = self._create_remote_invite_room_for_user(user1_id, None)
# Make the Sliding Sync request
sync_body = {
diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py
index db31348a8c..cb9be29c5d 100644
--- a/tests/storage/test_sliding_sync_tables.py
+++ b/tests/storage/test_sliding_sync_tables.py
@@ -30,19 +30,23 @@
from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.rest import admin
-from synapse.rest.client import login, room
+from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.storage.databases.main.events import DeltaState
from synapse.storage.databases.main.events_bg_updates import (
_resolve_stale_data_in_sliding_sync_joined_rooms_table,
_resolve_stale_data_in_sliding_sync_membership_snapshots_table,
)
-from synapse.types import create_requester
+from synapse.types import SlidingSyncStreamToken, create_requester
+from synapse.types.handlers.sliding_sync import (
+ LAZY_MEMBERS_UPDATE_INTERVAL,
+ StateValues,
+)
from synapse.types.storage import _BackgroundUpdates
from synapse.util.clock import Clock
+from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase
from tests.test_utils.event_injection import create_event
-from tests.unittest import HomeserverTestCase
logger = logging.getLogger(__name__)
@@ -86,7 +90,7 @@ class _SlidingSyncMembershipSnapshotResult:
forgotten: bool = False
-class SlidingSyncTablesTestCaseBase(HomeserverTestCase):
+class SlidingSyncTablesTestCaseBase(SlidingSyncBase):
"""
Helpers to deal with testing that the
`sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` database tables are
@@ -97,6 +101,7 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase):
admin.register_servlets,
login.register_servlets,
room.register_servlets,
+ sync.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
@@ -202,78 +207,6 @@ def _get_sliding_sync_membership_snapshots(
for row in rows
}
- _remote_invite_count: int = 0
-
- def _create_remote_invite_room_for_user(
- self,
- invitee_user_id: str,
- unsigned_invite_room_state: list[StrippedStateEvent] | None,
- ) -> tuple[str, EventBase]:
- """
- Create a fake invite for a remote room and persist it.
-
- We don't have any state for these kind of rooms and can only rely on the
- stripped state included in the unsigned portion of the invite event to identify
- the room.
-
- Args:
- invitee_user_id: The person being invited
- unsigned_invite_room_state: List of stripped state events to assist the
- receiver in identifying the room.
-
- Returns:
- The room ID of the remote invite room and the persisted remote invite event.
- """
- invite_room_id = f"!test_room{self._remote_invite_count}:remote_server"
-
- invite_event_dict = {
- "room_id": invite_room_id,
- "sender": "@inviter:remote_server",
- "state_key": invitee_user_id,
- "depth": 1,
- "origin_server_ts": 1,
- "type": EventTypes.Member,
- "content": {"membership": Membership.INVITE},
- "auth_events": [],
- "prev_events": [],
- }
- if unsigned_invite_room_state is not None:
- serialized_stripped_state_events = []
- for stripped_event in unsigned_invite_room_state:
- serialized_stripped_state_events.append(
- {
- "type": stripped_event.type,
- "state_key": stripped_event.state_key,
- "sender": stripped_event.sender,
- "content": stripped_event.content,
- }
- )
-
- invite_event_dict["unsigned"] = {
- "invite_room_state": serialized_stripped_state_events
- }
-
- invite_event = make_event_from_dict(
- invite_event_dict,
- room_version=RoomVersions.V10,
- )
- invite_event.internal_metadata.outlier = True
- invite_event.internal_metadata.out_of_band_membership = True
-
- self.get_success(
- self.store.maybe_store_room_on_outlier_membership(
- room_id=invite_room_id, room_version=invite_event.room_version
- )
- )
- context = EventContext.for_outlier(self.hs.get_storage_controllers())
- persisted_event, _, _ = self.get_success(
- self.persist_controller.persist_event(invite_event, context)
- )
-
- self._remote_invite_count += 1
-
- return invite_room_id, persisted_event
-
def _retract_remote_invite_for_user(
self,
user_id: str,
@@ -3052,6 +2985,141 @@ def test_membership_snapshot_missing_forget(
exact=True,
)
+ def test_lazy_loading_room_members_last_seen_ts(self) -> None:
+ """Test that the `last_seen_ts` column in
+ `sliding_sync_connection_lazy_members` is correctly kept up to date.
+
+ We expect that it only gets updated every
+ `LAZY_MEMBERS_UPDATE_INTERVAL`, rather than on every sync.
+ """
+
+ user1_id = self.register_user("user1", "pass")
+ user1_tok = self.login(user1_id, "pass")
+ user2_id = self.register_user("user2", "pass")
+ user2_tok = self.login(user2_id, "pass")
+
+ room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
+ self.helper.join(room_id, user1_id, tok=user1_tok)
+
+ # Send a message so that user1 comes down sync.
+ self.helper.send(room_id, "msg", tok=user1_tok)
+
+ sync_body = {
+ "lists": {
+ "foo-list": {
+ "ranges": [[0, 1]],
+ "required_state": [
+ [EventTypes.Member, StateValues.LAZY],
+ ],
+ "timeline_limit": 1,
+ }
+ }
+ }
+ response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
+
+ # Check that user1 is returned
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id]["required_state"],
+ {
+ state_map[(EventTypes.Member, user1_id)],
+ },
+ exact=True,
+ )
+
+ # Check that we have an entry in sliding_sync_connection_lazy_members
+ connection_pos1 = self.get_success(
+ SlidingSyncStreamToken.from_string(self.store, from_token)
+ ).connection_position
+ lazy_member_entries = self.get_success(
+ self.store.get_sliding_sync_connection_lazy_members(
+ connection_pos1, room_id, {user1_id}
+ )
+ )
+ self.assertIn(user1_id, lazy_member_entries)
+
+ prev_timestamp = lazy_member_entries[user1_id]
+
+ # If user1 sends a message then we consider it for lazy loading. We have
+ # previously returned it so we don't send the state down again, but it
+ # is still eligible for updating the timestamp. Since we last updated
+ # the timestamp within the last `LAZY_MEMBERS_UPDATE_INTERVAL`, we do not
+ # update it.
+ self.helper.send(room_id, "msg2", tok=user1_tok)
+
+ response_body, from_token = self.do_sync(
+ sync_body, since=from_token, tok=user1_tok
+ )
+
+ # We expect the required_state map to be empty as nothing has changed.
+ state_map = self.get_success(
+ self.storage_controllers.state.get_current_state(room_id)
+ )
+ self._assertRequiredStateIncludes(
+ response_body["rooms"][room_id].get("required_state", []),
+ {},
+ exact=True,
+ )
+
+ connection_pos2 = self.get_success(
+ SlidingSyncStreamToken.from_string(self.store, from_token)
+ ).connection_position
+
+ lazy_member_entries = self.get_success(
+ self.store.get_sliding_sync_connection_lazy_members(
+ connection_pos2, room_id, {user1_id}
+ )
+ )
+
+ # The timestamp should be unchanged.
+ self.assertEqual(lazy_member_entries[user1_id], prev_timestamp)
+
+ # Now advance the time by `LAZY_MEMBERS_UPDATE_INTERVAL` so that we
+ # would update the timestamp.
+ self.reactor.advance(LAZY_MEMBERS_UPDATE_INTERVAL.as_secs())
+
+ # Send a message from user2
+ self.helper.send(room_id, "msg3", tok=user2_tok)
+
+ response_body, from_token = self.do_sync(
+ sync_body, since=from_token, tok=user1_tok
+ )
+
+ connection_pos3 = self.get_success(
+ SlidingSyncStreamToken.from_string(self.store, from_token)
+ ).connection_position
+
+ lazy_member_entries = self.get_success(
+ self.store.get_sliding_sync_connection_lazy_members(
+ connection_pos3, room_id, {user1_id}
+ )
+ )
+
+ # The timestamp for user1 should be unchanged, as they were not sent down.
+ self.assertEqual(lazy_member_entries[user1_id], prev_timestamp)
+
+ # Now if user1 sends a message, then the timestamp should be updated as
+ # its been over `LAZY_MEMBERS_UPDATE_INTERVAL` since we last updated it.
+ # (Even though we don't send the state down again).
+ self.helper.send(room_id, "msg4", tok=user1_tok)
+
+ response_body, from_token = self.do_sync(
+ sync_body, since=from_token, tok=user1_tok
+ )
+ connection_pos4 = self.get_success(
+ SlidingSyncStreamToken.from_string(self.store, from_token)
+ ).connection_position
+
+ lazy_member_entries = self.get_success(
+ self.store.get_sliding_sync_connection_lazy_members(
+ connection_pos4, room_id, {user1_id}
+ )
+ )
+ # The timestamp for user1 should be updated.
+ self.assertGreater(lazy_member_entries[user1_id], prev_timestamp)
+
class SlidingSyncTablesBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase):
"""
From 1f7f16477dc878152febbb74ba7c99abbd438fba Mon Sep 17 00:00:00 2001
From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Date: Fri, 12 Dec 2025 11:31:55 +0000
Subject: [PATCH 19/59] Unpin Rust from 1.82.0 (#19302)
---
.ci/before_build_wheel.sh | 2 +-
.github/workflows/release-artifacts.yml | 2 +-
changelog.d/19302.misc | 1 +
pyproject.toml | 3 ---
4 files changed, 3 insertions(+), 5 deletions(-)
create mode 100644 changelog.d/19302.misc
diff --git a/.ci/before_build_wheel.sh b/.ci/before_build_wheel.sh
index 56108dcd60..44ca97f31b 100644
--- a/.ci/before_build_wheel.sh
+++ b/.ci/before_build_wheel.sh
@@ -7,4 +7,4 @@ if command -v yum &> /dev/null; then
fi
# Install a Rust toolchain
-curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.82.0 -y --profile minimal
+curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 531680b989..33b965d960 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -5,7 +5,7 @@ name: Build release artifacts
on:
# we build on PRs and develop to (hopefully) get early warning
# of things breaking (but only build one set of debs). PRs skip
- # building wheels on macOS & ARM.
+ # building wheels on ARM.
pull_request:
push:
branches: ["develop", "release-*"]
diff --git a/changelog.d/19302.misc b/changelog.d/19302.misc
new file mode 100644
index 0000000000..606ab5b52d
--- /dev/null
+++ b/changelog.d/19302.misc
@@ -0,0 +1 @@
+Unpin the version of Rust we use to build Synapse wheels (was 1.82.0) now that MacOS support has been dropped.
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
index 182861ca39..a9832ccabf 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -471,9 +471,6 @@ skip = "cp3??t-* *i686* *macosx*"
enable = "pypy"
# We need a rust compiler.
-#
-# We temporarily pin Rust to 1.82.0 to work around
-# https://github.com/element-hq/synapse/issues/17988
before-all = "sh .ci/before_build_wheel.sh"
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
From 3f636386a66cbc57a6a3c3e641dfd6f1917c838e Mon Sep 17 00:00:00 2001
From: Travis Ralston
Date: Fri, 12 Dec 2025 06:30:21 -0700
Subject: [PATCH 20/59] Add an Admin API endpoint for listing quarantined media
(#19268)
Co-authored-by: turt2live <1190097+turt2live@users.noreply.github.com>
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/19268.feature | 1 +
docs/admin_api/media_admin_api.md | 27 +++++
synapse/media/media_repository.py | 2 +
synapse/rest/admin/media.py | 33 ++++++
.../databases/main/media_repository.py | 10 +-
synapse/storage/databases/main/room.py | 70 ++++++++++--
.../93/04_add_quarantined_ts_to_media.sql | 27 +++++
tests/rest/admin/test_media.py | 106 ++++++++++++++++++
8 files changed, 266 insertions(+), 10 deletions(-)
create mode 100644 changelog.d/19268.feature
create mode 100644 synapse/storage/schema/main/delta/93/04_add_quarantined_ts_to_media.sql
diff --git a/changelog.d/19268.feature b/changelog.d/19268.feature
new file mode 100644
index 0000000000..cb7035fee2
--- /dev/null
+++ b/changelog.d/19268.feature
@@ -0,0 +1 @@
+Add an admin API for retrieving a paginated list of quarantined media.
\ No newline at end of file
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index be72b2e3e2..a3d99cb074 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -73,6 +73,33 @@ Response:
}
```
+## Listing all quarantined media
+
+This API returns a list of all quarantined media on the server. It is paginated, and can be scoped to either local or
+remote media. Note that the pagination values are also scoped to the request parameters - changing them but keeping the
+same pagination values will result in unexpected results.
+
+Request:
+```http
+GET /_synapse/admin/v1/media/quarantined?from=0&limit=100&kind=local
+```
+
+`from` and `limit` are optional parameters, and default to `0` and `100` respectively. They are the row index and number
+of rows to return - they are not timestamps.
+
+`kind` *MUST* either be `local` or `remote`.
+
+The API returns a JSON body containing MXC URIs for the quarantined media, like the following:
+
+```json
+{
+ "media": [
+ "mxc://localhost/xwvutsrqponmlkjihgfedcba",
+ "mxc://localhost/abcdefghijklmnopqrstuvwx"
+ ]
+}
+```
+
# Quarantine media
Quarantining media means that it is marked as inaccessible by users. It applies
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index 29c5e66ec4..e84e842300 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -914,6 +914,7 @@ async def _download_remote_file(
filesystem_id=file_id,
last_access_ts=time_now_ms,
quarantined_by=None,
+ quarantined_ts=None,
authenticated=authenticated,
sha256=sha256writer.hexdigest(),
)
@@ -1047,6 +1048,7 @@ async def _federation_download_remote_file(
filesystem_id=file_id,
last_access_ts=time_now_ms,
quarantined_by=None,
+ quarantined_ts=None,
authenticated=authenticated,
sha256=sha256writer.hexdigest(),
)
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index d5346fe0d5..4cfd9da0f9 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -293,6 +293,38 @@ async def on_GET(
return HTTPStatus.OK, {"local": local_mxcs, "remote": remote_mxcs}
+class ListQuarantinedMedia(RestServlet):
+ """Lists all quarantined media on the server."""
+
+ PATTERNS = admin_patterns("/media/quarantined$")
+
+ def __init__(self, hs: "HomeServer"):
+ self.store = hs.get_datastores().main
+ self.auth = hs.get_auth()
+
+ async def on_GET(
+ self,
+ request: SynapseRequest,
+ ) -> tuple[int, JsonDict]:
+ await assert_requester_is_admin(self.auth, request)
+
+ start = parse_integer(request, "from", default=0)
+ limit = parse_integer(request, "limit", default=100)
+ local_or_remote = parse_string(request, "kind", required=True)
+
+ if local_or_remote not in ["local", "remote"]:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Query parameter `kind` must be either 'local' or 'remote'.",
+ )
+
+ mxcs = await self.store.get_quarantined_media_mxcs(
+ start, limit, local_or_remote == "local"
+ )
+
+ return HTTPStatus.OK, {"media": mxcs}
+
+
class PurgeMediaCacheRestServlet(RestServlet):
PATTERNS = admin_patterns("/purge_media_cache$")
@@ -532,6 +564,7 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer)
ProtectMediaByID(hs).register(http_server)
UnprotectMediaByID(hs).register(http_server)
ListMediaInRoom(hs).register(http_server)
+ ListQuarantinedMedia(hs).register(http_server)
# XXX DeleteMediaByDateSize must be registered before DeleteMediaByID as
# their URL routes overlap.
DeleteMediaByDateSize(hs).register(http_server)
diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py
index 50664d63e5..c27c68fbc2 100644
--- a/synapse/storage/databases/main/media_repository.py
+++ b/synapse/storage/databases/main/media_repository.py
@@ -61,6 +61,7 @@ class LocalMedia:
url_cache: str | None
last_access_ts: int
quarantined_by: str | None
+ quarantined_ts: int | None
safe_from_quarantine: bool
user_id: str | None
authenticated: bool | None
@@ -78,6 +79,7 @@ class RemoteMedia:
created_ts: int
last_access_ts: int
quarantined_by: str | None
+ quarantined_ts: int | None
authenticated: bool | None
sha256: str | None
@@ -243,6 +245,7 @@ async def get_local_media(self, media_id: str) -> LocalMedia | None:
"user_id",
"authenticated",
"sha256",
+ "quarantined_ts",
),
allow_none=True,
desc="get_local_media",
@@ -262,6 +265,7 @@ async def get_local_media(self, media_id: str) -> LocalMedia | None:
user_id=row[8],
authenticated=row[9],
sha256=row[10],
+ quarantined_ts=row[11],
)
async def get_local_media_by_user_paginate(
@@ -319,7 +323,8 @@ def get_local_media_by_user_paginate_txn(
safe_from_quarantine,
user_id,
authenticated,
- sha256
+ sha256,
+ quarantined_ts
FROM local_media_repository
WHERE user_id = ?
ORDER BY {order_by_column} {order}, media_id ASC
@@ -345,6 +350,7 @@ def get_local_media_by_user_paginate_txn(
user_id=row[9],
authenticated=row[10],
sha256=row[11],
+ quarantined_ts=row[12],
)
for row in txn
]
@@ -695,6 +701,7 @@ async def get_cached_remote_media(
"quarantined_by",
"authenticated",
"sha256",
+ "quarantined_ts",
),
allow_none=True,
desc="get_cached_remote_media",
@@ -713,6 +720,7 @@ async def get_cached_remote_media(
quarantined_by=row[6],
authenticated=row[7],
sha256=row[8],
+ quarantined_ts=row[9],
)
async def store_cached_remote_media(
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 633df07736..182e55743a 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -945,6 +945,50 @@ def get_retention_policy_for_room_txn(
max_lifetime=max_lifetime,
)
+ async def get_quarantined_media_mxcs(
+ self, index_start: int, index_limit: int, local: bool
+ ) -> list[str]:
+ """Retrieves all the quarantined media MXC URIs starting from the given position,
+ ordered from oldest quarantined timestamp, then alphabetically by media ID
+ (including origin).
+
+ Note that on established servers the "quarantined timestamp" may be zero due to
+ being introduced after the quarantine timestamp field was introduced.
+
+ Args:
+ index_start: The position to start from.
+ index_limit: The maximum number of results to return.
+ local: When true, only local media will be returned. When false, only remote media will be returned.
+
+ Returns:
+ The quarantined media as a list of media IDs.
+ """
+
+ def _get_quarantined_media_mxcs_txn(
+ txn: LoggingTransaction,
+ ) -> list[str]:
+ # We order by quarantined timestamp *and* media ID (including origin, when
+ # known) to ensure the ordering is stable for established servers.
+ if local:
+ sql = "SELECT '' as media_origin, media_id FROM local_media_repository WHERE quarantined_by IS NOT NULL ORDER BY quarantined_ts, media_id ASC LIMIT ? OFFSET ?"
+ else:
+ sql = "SELECT media_origin, media_id FROM remote_media_cache WHERE quarantined_by IS NOT NULL ORDER BY quarantined_ts, media_origin, media_id ASC LIMIT ? OFFSET ?"
+ txn.execute(sql, (index_limit, index_start))
+
+ mxcs = []
+
+ for media_origin, media_id in txn:
+ if local:
+ media_origin = self.hs.hostname
+ mxcs.append(f"mxc://{media_origin}/{media_id}")
+
+ return mxcs
+
+ return await self.db_pool.runInteraction(
+ "get_quarantined_media_mxcs",
+ _get_quarantined_media_mxcs_txn,
+ )
+
async def get_media_mxcs_in_room(self, room_id: str) -> tuple[list[str], list[str]]:
"""Retrieves all the local and remote media MXC URIs in a given room
@@ -952,7 +996,7 @@ async def get_media_mxcs_in_room(self, room_id: str) -> tuple[list[str], list[st
room_id
Returns:
- The local and remote media as a lists of the media IDs.
+ The local and remote media as lists of the media IDs.
"""
def _get_media_mxcs_in_room_txn(
@@ -1147,6 +1191,10 @@ def _quarantine_local_media_txn(
The total number of media items quarantined
"""
total_media_quarantined = 0
+ now_ts: int | None = self.clock.time_msec()
+
+ if quarantined_by is None:
+ now_ts = None
# Effectively a legacy path, update any media that was explicitly named.
if media_ids:
@@ -1155,13 +1203,13 @@ def _quarantine_local_media_txn(
)
sql = f"""
UPDATE local_media_repository
- SET quarantined_by = ?
+ SET quarantined_by = ?, quarantined_ts = ?
WHERE {sql_many_clause_sql}"""
if quarantined_by is not None:
sql += " AND safe_from_quarantine = FALSE"
- txn.execute(sql, [quarantined_by] + sql_many_clause_args)
+ txn.execute(sql, [quarantined_by, now_ts] + sql_many_clause_args)
# Note that a rowcount of -1 can be used to indicate no rows were affected.
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
@@ -1172,13 +1220,13 @@ def _quarantine_local_media_txn(
)
sql = f"""
UPDATE local_media_repository
- SET quarantined_by = ?
+ SET quarantined_by = ?, quarantined_ts = ?
WHERE {sql_many_clause_sql}"""
if quarantined_by is not None:
sql += " AND safe_from_quarantine = FALSE"
- txn.execute(sql, [quarantined_by] + sql_many_clause_args)
+ txn.execute(sql, [quarantined_by, now_ts] + sql_many_clause_args)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
return total_media_quarantined
@@ -1202,6 +1250,10 @@ def _quarantine_remote_media_txn(
The total number of media items quarantined
"""
total_media_quarantined = 0
+ now_ts: int | None = self.clock.time_msec()
+
+ if quarantined_by is None:
+ now_ts = None
if media:
sql_in_list_clause, sql_args = make_tuple_in_list_sql_clause(
@@ -1211,10 +1263,10 @@ def _quarantine_remote_media_txn(
)
sql = f"""
UPDATE remote_media_cache
- SET quarantined_by = ?
+ SET quarantined_by = ?, quarantined_ts = ?
WHERE {sql_in_list_clause}"""
- txn.execute(sql, [quarantined_by] + sql_args)
+ txn.execute(sql, [quarantined_by, now_ts] + sql_args)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
total_media_quarantined = 0
@@ -1224,9 +1276,9 @@ def _quarantine_remote_media_txn(
)
sql = f"""
UPDATE remote_media_cache
- SET quarantined_by = ?
+ SET quarantined_by = ?, quarantined_ts = ?
WHERE {sql_many_clause_sql}"""
- txn.execute(sql, [quarantined_by] + sql_many_clause_args)
+ txn.execute(sql, [quarantined_by, now_ts] + sql_many_clause_args)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
return total_media_quarantined
diff --git a/synapse/storage/schema/main/delta/93/04_add_quarantined_ts_to_media.sql b/synapse/storage/schema/main/delta/93/04_add_quarantined_ts_to_media.sql
new file mode 100644
index 0000000000..18b76804ff
--- /dev/null
+++ b/synapse/storage/schema/main/delta/93/04_add_quarantined_ts_to_media.sql
@@ -0,0 +1,27 @@
+--
+-- This file is licensed under the Affero General Public License (AGPL) version 3.
+--
+-- Copyright (C) 2025 Element Creations, Ltd
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU Affero General Public License as
+-- published by the Free Software Foundation, either version 3 of the
+-- License, or (at your option) any later version.
+--
+-- See the GNU Affero General Public License for more details:
+-- .
+
+-- Add a timestamp for when the sliding sync connection position was last used,
+-- only updated with a small granularity.
+--
+-- This should be NOT NULL, but we need to consider existing rows. In future we
+-- may want to either backfill this or delete all rows with a NULL value (and
+-- then make it NOT NULL).
+ALTER TABLE local_media_repository ADD COLUMN quarantined_ts BIGINT;
+ALTER TABLE remote_media_cache ADD COLUMN quarantined_ts BIGINT;
+
+UPDATE local_media_repository SET quarantined_ts = 0 WHERE quarantined_by IS NOT NULL;
+UPDATE remote_media_cache SET quarantined_ts = 0 WHERE quarantined_by IS NOT NULL;
+
+-- Note: We *probably* should have an index on quarantined_ts, but we're going
+-- to try to defer that to a future migration after seeing the performance impact.
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index 8cc54cc80c..e45cc4d208 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -756,6 +756,112 @@ def _access_media(
self.assertFalse(os.path.exists(local_path))
+class ListQuarantinedMediaTestCase(_AdminMediaTests):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.server_name = hs.hostname
+
+ @parameterized.expand(["local", "remote"])
+ def test_no_auth(self, kind: str) -> None:
+ """
+ Try to list quarantined media without authentication.
+ """
+
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/media/quarantined?kind=%s" % (kind,),
+ )
+
+ self.assertEqual(401, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
+
+ @parameterized.expand(["local", "remote"])
+ def test_requester_is_not_admin(self, kind: str) -> None:
+ """
+ If the user is not a server admin, an error is returned.
+ """
+ self.other_user = self.register_user("user", "pass")
+ self.other_user_token = self.login("user", "pass")
+
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/media/quarantined?kind=%s" % (kind,),
+ access_token=self.other_user_token,
+ )
+
+ self.assertEqual(403, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+ def test_list_quarantined_media(self) -> None:
+ """
+ Ensure we actually get results for each page. We can't really test that
+ remote media is quarantined, but we can test that local media is.
+ """
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+
+ def _upload() -> str:
+ return self.helper.upload_media(
+ SMALL_PNG, tok=self.admin_user_tok, expect_code=200
+ )["content_uri"][6:].split("/")[1] # Cut off 'mxc://' and domain
+
+ self.media_id_1 = _upload()
+ self.media_id_2 = _upload()
+ self.media_id_3 = _upload()
+
+ def _quarantine(media_id: str) -> None:
+ channel = self.make_request(
+ "POST",
+ "/_synapse/admin/v1/media/quarantine/%s/%s"
+ % (
+ self.server_name,
+ media_id,
+ ),
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ _quarantine(self.media_id_1)
+ _quarantine(self.media_id_2)
+ _quarantine(self.media_id_3)
+
+ # Page 1
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/media/quarantined?kind=local&from=0&limit=1",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(1, len(channel.json_body["media"]))
+
+ # Page 2
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/media/quarantined?kind=local&from=1&limit=1",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(1, len(channel.json_body["media"]))
+
+ # Page 3
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/media/quarantined?kind=local&from=2&limit=1",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(1, len(channel.json_body["media"]))
+
+ # Page 4 (no media)
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/media/quarantined?kind=local&from=3&limit=1",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(0, len(channel.json_body["media"]))
+
+
class QuarantineMediaByIDTestCase(_AdminMediaTests):
def upload_media_and_return_media_id(self, data: bytes) -> str:
# Upload some media into the room
From 7347cc436e9632ea9b7abf484f5d5aa99755f656 Mon Sep 17 00:00:00 2001
From: Mathieu Velten
Date: Fri, 12 Dec 2025 14:35:46 +0100
Subject: [PATCH 21/59] Add `memberships` admin API (#19260)
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/19260.feature | 1 +
docs/admin_api/user_admin_api.md | 49 ++++++++
synapse/rest/admin/__init__.py | 6 +-
synapse/rest/admin/users.py | 24 +++-
synapse/storage/databases/main/roommember.py | 21 ++++
tests/rest/admin/test_room.py | 114 +++++++++++++++++++
6 files changed, 212 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/19260.feature
diff --git a/changelog.d/19260.feature b/changelog.d/19260.feature
new file mode 100644
index 0000000000..19b192a009
--- /dev/null
+++ b/changelog.d/19260.feature
@@ -0,0 +1 @@
+Add `memberships` endpoint to the admin API. This is useful for forensics and T&S purpose.
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 4de7e85642..5645c6dd1c 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -505,6 +505,55 @@ with a body of:
}
```
+## List room memberships of a user
+
+Gets a list of room memberships for a specific `user_id`. This
+endpoint differs from
+[`GET /_synapse/admin/v1/users//joined_rooms`](#list-joined-rooms-of-a-user)
+in that it returns rooms with memberships other than "join".
+
+The API is:
+
+```
+GET /_synapse/admin/v1/users//memberships
+```
+
+A response body like the following is returned:
+
+```json
+ {
+ "memberships": {
+ "!DuGcnbhHGaSZQoNQR:matrix.org": "join",
+ "!ZtSaPCawyWtxfWiIy:matrix.org": "leave",
+ }
+ }
+```
+
+which is a list of room membership states for the given user. This endpoint can
+be used with both local and remote users, with the caveat that the homeserver will
+only be aware of the memberships for rooms one of its local users has joined.
+
+Remote user memberships may also be out of date if all local users have since left
+a room. The homeserver will thus no longer receive membership updates about it.
+
+The list includes rooms that the user has since left; other membership states (knock,
+invite, etc.) are also possible.
+
+Note that rooms will only disappear from this list if they are
+[purged](./rooms.md#delete-room-api) from the homeserver.
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - fully qualified: for example, `@user:server.com`.
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `memberships` - A map of `room_id` (string) to `membership` state (string).
+
## List joined rooms of a user
Gets a list of all `room_id` that a specific `user_id` is joined to and is a member of (participating in).
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index e34ebb17e6..fe3eeafd9f 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -114,7 +114,8 @@
UserByThreePid,
UserInvitesCount,
UserJoinedRoomCount,
- UserMembershipRestServlet,
+ UserJoinedRoomsRestServlet,
+ UserMembershipsRestServlet,
UserRegisterServlet,
UserReplaceMasterCrossSigningKeyRestServlet,
UserRestServletV2,
@@ -297,7 +298,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
VersionServlet(hs).register(http_server)
if not auth_delegated:
UserAdminServlet(hs).register(http_server)
- UserMembershipRestServlet(hs).register(http_server)
+ UserJoinedRoomsRestServlet(hs).register(http_server)
+ UserMembershipsRestServlet(hs).register(http_server)
if not auth_delegated:
UserTokenRestServlet(hs).register(http_server)
UserRestServletV2(hs).register(http_server)
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 42e9f8043d..406ad8f406 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -1031,7 +1031,7 @@ async def on_PUT(
return HTTPStatus.OK, {}
-class UserMembershipRestServlet(RestServlet):
+class UserJoinedRoomsRestServlet(RestServlet):
"""
Get list of joined room ID's for a user.
"""
@@ -1054,6 +1054,28 @@ async def on_GET(
return HTTPStatus.OK, rooms_response
+class UserMembershipsRestServlet(RestServlet):
+ """
+ Get list of room memberships for a user.
+ """
+
+ PATTERNS = admin_patterns("/users/(?P[^/]*)/memberships$")
+
+ def __init__(self, hs: "HomeServer"):
+ self.is_mine = hs.is_mine
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastores().main
+
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str
+ ) -> tuple[int, JsonDict]:
+ await assert_requester_is_admin(self.auth, request)
+
+ memberships = await self.store.get_memberships_for_user(user_id)
+
+ return HTTPStatus.OK, {"memberships": memberships}
+
+
class PushersRestServlet(RestServlet):
"""
Gets information about all pushers for a specific `user_id`.
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 9b06ab69fe..7c06080f10 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -747,6 +747,27 @@ async def get_rooms_user_currently_banned_from(
return frozenset(room_ids)
+ async def get_memberships_for_user(self, user_id: str) -> dict[str, str]:
+ """Returns a dict of room_id to membership state for a given user.
+
+ If a remote user only returns rooms this server is currently
+ participating in.
+ """
+
+ rows = cast(
+ list[tuple[str, str]],
+ await self.db_pool.simple_select_list(
+ "current_state_events",
+ keyvalues={
+ "type": EventTypes.Member,
+ "state_key": user_id,
+ },
+ retcols=["room_id", "membership"],
+ desc="get_memberships_for_user",
+ ),
+ )
+ return dict(rows)
+
@cached(max_entries=500000, iterable=True)
async def get_rooms_for_user(self, user_id: str) -> frozenset[str]:
"""Returns a set of room_ids the user is currently joined to.
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index 1c340efa0c..ad713b4da4 100644
--- a/tests/rest/admin/test_room.py
+++ b/tests/rest/admin/test_room.py
@@ -2976,6 +2976,120 @@ def test_join_private_room_if_owner(self) -> None:
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])
+ def test_joined_rooms(self) -> None:
+ """
+ Test joined_rooms admin endpoint.
+ """
+
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/v3/join/{self.public_room_id}",
+ content={"user_id": self.second_user_id},
+ access_token=self.second_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(self.public_room_id, channel.json_body["room_id"])
+
+ channel = self.make_request(
+ "GET",
+ f"/_synapse/admin/v1/users/{self.second_user_id}/joined_rooms",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(self.public_room_id, channel.json_body["joined_rooms"][0])
+
+ def test_memberships(self) -> None:
+ """
+ Test user memberships admin endpoint.
+ """
+
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/v3/join/{self.public_room_id}",
+ content={"user_id": self.second_user_id},
+ access_token=self.second_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ other_room_id = self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok
+ )
+
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/v3/join/{other_room_id}",
+ content={"user_id": self.second_user_id},
+ access_token=self.second_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ channel = self.make_request(
+ "GET",
+ f"/_synapse/admin/v1/users/{self.second_user_id}/memberships",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(
+ {
+ "memberships": {
+ self.public_room_id: Membership.JOIN,
+ other_room_id: Membership.JOIN,
+ }
+ },
+ channel.json_body,
+ )
+
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/v3/rooms/{other_room_id}/leave",
+ content={"user_id": self.second_user_id},
+ access_token=self.second_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ invited_room_id = self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok
+ )
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/v3/rooms/{invited_room_id}/invite",
+ content={"user_id": self.second_user_id},
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ banned_room_id = self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok
+ )
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/v3/rooms/{banned_room_id}/ban",
+ content={"user_id": self.second_user_id},
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ channel = self.make_request(
+ "GET",
+ f"/_synapse/admin/v1/users/{self.second_user_id}/memberships",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(
+ {
+ "memberships": {
+ self.public_room_id: Membership.JOIN,
+ other_room_id: Membership.LEAVE,
+ invited_room_id: Membership.INVITE,
+ banned_room_id: Membership.BAN,
+ }
+ },
+ channel.json_body,
+ )
+
def test_context_as_non_admin(self) -> None:
"""
Test that, without being admin, one cannot use the context admin API
From 048629dd13ffbcbc7603a8c729a4f5a6be20db8a Mon Sep 17 00:00:00 2001
From: Andrew Morgan
Date: Fri, 12 Dec 2025 13:36:34 +0000
Subject: [PATCH 22/59] minor grammar fix
context: https://github.com/element-hq/synapse/pull/19260#discussion_r2614227743
---
docs/admin_api/user_admin_api.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 5645c6dd1c..9e0a1cb70c 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -531,7 +531,7 @@ A response body like the following is returned:
which is a list of room membership states for the given user. This endpoint can
be used with both local and remote users, with the caveat that the homeserver will
-only be aware of the memberships for rooms one of its local users has joined.
+only be aware of the memberships for rooms that one of its local users has joined.
Remote user memberships may also be out of date if all local users have since left
a room. The homeserver will thus no longer receive membership updates about it.
From df24e0f30244b1c423f4130d64c6008be341d0b7 Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Fri, 12 Dec 2025 15:34:13 +0000
Subject: [PATCH 23/59] Fix support for older versions of zope-interface
(#19274)
Fixes #19269
Versions of zope-interface from RHEL, Ubuntu LTS 22 & 24 and OpenSuse
don't support the new python union `X | Y` syntax for interfaces. This
PR partially reverts the change over to fully use the new syntax, adds a
minimum supported version of zope-interface to Synapse's dependency
list, and removes the linter auto-upgrades which prefer the newer
syntax.
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---------
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/19274.bugfix | 1 +
poetry.lock | 2 +-
pyproject.toml | 12 ++++++------
synapse/app/_base.py | 3 ++-
synapse/app/admin_cmd.py | 4 ++--
synapse/app/generic_worker.py | 3 ++-
synapse/app/homeserver.py | 4 ++--
synapse/handlers/delayed_events.py | 4 ++--
synapse/handlers/message.py | 4 ++--
synapse/handlers/user_directory.py | 4 ++--
synapse/http/client.py | 9 +++++----
synapse/http/federation/matrix_federation_agent.py | 4 ++--
synapse/http/matrixfederationclient.py | 3 ++-
synapse/http/proxy.py | 4 ++--
synapse/http/proxyagent.py | 14 +++++++-------
synapse/http/replicationagent.py | 3 ++-
synapse/logging/_remote.py | 4 ++--
synapse/logging/handlers.py | 4 ++--
synapse/media/_base.py | 3 ++-
synapse/push/emailpusher.py | 4 ++--
synapse/push/httppusher.py | 4 ++--
synapse/server.py | 5 +++--
synapse/util/file_consumer.py | 6 +++---
tests/server.py | 10 +++++-----
tests/unittest.py | 3 ++-
25 files changed, 65 insertions(+), 56 deletions(-)
create mode 100644 changelog.d/19274.bugfix
diff --git a/changelog.d/19274.bugfix b/changelog.d/19274.bugfix
new file mode 100644
index 0000000000..92aaa0fe6d
--- /dev/null
+++ b/changelog.d/19274.bugfix
@@ -0,0 +1 @@
+Fix bug introduced in 1.143.0 that broke support for versions of `zope-interface` older than 6.2.
diff --git a/poetry.lock b/poetry.lock
index 4dacae38a4..3f8607b687 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -3542,4 +3542,4 @@ url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10.0,<4.0.0"
-content-hash = "abbbdff591a306b56cc8890dbb2f477ac5f1a2d328baa6409e01084abc655bbf"
+content-hash = "1caa5072f6304122c89377420f993a54f54587f3618ccc8094ec31642264592c"
diff --git a/pyproject.toml b/pyproject.toml
index a9832ccabf..09ca2a9e77 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -109,7 +109,12 @@ dependencies = [
"pyrsistent>=0.18.0", # via jsonschema
"requests>=2.16.0", # 2.16.0+ no longer vendors urllib3, avoiding Python 3.10+ incompatibility
"urllib3>=1.26.5", # via treq; 1.26.5 fixes Python 3.10+ collections.abc compatibility
- "zope-interface>=6.2", # via twisted
+ # 5.2 is the current version in Debian oldstable. If we don't care to support that, then 5.4 is
+ # the minimum version from Ubuntu 22.04 and RHEL 9. (as of 2025-12)
+ # When bumping this version to 6.2 or above, refer to https://github.com/element-hq/synapse/pull/19274
+ # for details of Synapse improvements that may be unlocked. Particularly around the use of `|`
+ # syntax with zope interface types.
+ "zope-interface>=5.2", # via twisted
]
[project.optional-dependencies]
@@ -383,15 +388,10 @@ select = [
"G",
# pyupgrade
"UP006",
- "UP007",
- "UP045",
]
extend-safe-fixes = [
# pyupgrade rules compatible with Python >= 3.9
"UP006",
- "UP007",
- # pyupgrade rules compatible with Python >= 3.10
- "UP045",
# Allow ruff to automatically fix trailing spaces within a multi-line string/comment.
"W293"
]
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 98d051bf04..c64c41e9d2 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -36,6 +36,7 @@
Awaitable,
Callable,
NoReturn,
+ Optional,
cast,
)
from wsgiref.simple_server import WSGIServer
@@ -455,7 +456,7 @@ def listen_http(
root_resource: Resource,
version_string: str,
max_request_body_size: int,
- context_factory: IOpenSSLContextFactory | None,
+ context_factory: Optional[IOpenSSLContextFactory],
reactor: ISynapseReactor = reactor,
) -> list[Port]:
"""
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index facc98164e..0614c805da 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -24,7 +24,7 @@
import os
import sys
import tempfile
-from typing import Mapping, Sequence
+from typing import Mapping, Optional, Sequence
from twisted.internet import defer, task
@@ -291,7 +291,7 @@ def load_config(argv_options: list[str]) -> tuple[HomeServerConfig, argparse.Nam
def create_homeserver(
config: HomeServerConfig,
- reactor: ISynapseReactor | None = None,
+ reactor: Optional[ISynapseReactor] = None,
) -> AdminCmdServer:
"""
Create a homeserver instance for the Synapse admin command process.
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 9939c0fe7d..0a4abd1839 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -21,6 +21,7 @@
#
import logging
import sys
+from typing import Optional
from twisted.web.resource import Resource
@@ -335,7 +336,7 @@ def load_config(argv_options: list[str]) -> HomeServerConfig:
def create_homeserver(
config: HomeServerConfig,
- reactor: ISynapseReactor | None = None,
+ reactor: Optional[ISynapseReactor] = None,
) -> GenericWorkerServer:
"""
Create a homeserver instance for the Synapse worker process.
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index bd2956d9e1..2b1760416b 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -22,7 +22,7 @@
import logging
import os
import sys
-from typing import Iterable
+from typing import Iterable, Optional
from twisted.internet.tcp import Port
from twisted.web.resource import EncodingResourceWrapper, Resource
@@ -350,7 +350,7 @@ def load_or_generate_config(argv_options: list[str]) -> HomeServerConfig:
def create_homeserver(
config: HomeServerConfig,
- reactor: ISynapseReactor | None = None,
+ reactor: Optional[ISynapseReactor] = None,
) -> SynapseHomeServer:
"""
Create a homeserver instance for the Synapse main process.
diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py
index cb0a4dd6b2..c58d1d42bc 100644
--- a/synapse/handlers/delayed_events.py
+++ b/synapse/handlers/delayed_events.py
@@ -13,7 +13,7 @@
#
import logging
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Optional
from twisted.internet.interfaces import IDelayedCall
@@ -74,7 +74,7 @@ def __init__(self, hs: "HomeServer"):
cfg=self._config.ratelimiting.rc_delayed_event_mgmt,
)
- self._next_delayed_event_call: IDelayedCall | None = None
+ self._next_delayed_event_call: Optional[IDelayedCall] = None
# The current position in the current_state_delta stream
self._event_pos: int | None = None
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index a6499de3a8..7808bd68cb 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -22,7 +22,7 @@
import logging
import random
from http import HTTPStatus
-from typing import TYPE_CHECKING, Any, Mapping, Sequence
+from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence
from canonicaljson import encode_canonical_json
@@ -111,7 +111,7 @@ def __init__(self, hs: "HomeServer"):
# The scheduled call to self._expire_event. None if no call is currently
# scheduled.
- self._scheduled_expiry: IDelayedCall | None = None
+ self._scheduled_expiry: Optional[IDelayedCall] = None
if not hs.config.worker.worker_app:
self.hs.run_as_background_process(
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 36b037e8e1..e5c4de03c5 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -21,7 +21,7 @@
import logging
from http import HTTPStatus
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Optional
from twisted.internet.interfaces import IDelayedCall
@@ -125,7 +125,7 @@ def __init__(self, hs: "HomeServer"):
# Guard to ensure we only have one process for refreshing remote profiles
self._is_refreshing_remote_profiles = False
# Handle to cancel the `call_later` of `kick_off_remote_profile_refresh_process`
- self._refresh_remote_profiles_call_later: IDelayedCall | None = None
+ self._refresh_remote_profiles_call_later: Optional[IDelayedCall] = None
# Guard to ensure we only have one process for refreshing remote profiles
# for the given servers.
diff --git a/synapse/http/client.py b/synapse/http/client.py
index f0b9201086..05c5f13a87 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -28,6 +28,7 @@
BinaryIO,
Callable,
Mapping,
+ Optional,
Protocol,
)
@@ -313,7 +314,7 @@ def request(
method: bytes,
uri: bytes,
headers: Headers | None = None,
- bodyProducer: IBodyProducer | None = None,
+ bodyProducer: Optional[IBodyProducer] = None,
) -> defer.Deferred:
h = urllib.parse.urlparse(uri.decode("ascii"))
@@ -1033,7 +1034,7 @@ class BodyExceededMaxSize(Exception):
class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
"""A protocol which immediately errors upon receiving data."""
- transport: ITCPTransport | None = None
+ transport: Optional[ITCPTransport] = None
def __init__(self, deferred: defer.Deferred):
self.deferred = deferred
@@ -1075,7 +1076,7 @@ class _MultipartParserProtocol(protocol.Protocol):
Protocol to read and parse a MSC3916 multipart/mixed response
"""
- transport: ITCPTransport | None = None
+ transport: Optional[ITCPTransport] = None
def __init__(
self,
@@ -1188,7 +1189,7 @@ def connectionLost(self, reason: Failure = connectionDone) -> None:
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
"""A protocol which reads body to a stream, erroring if the body exceeds a maximum size."""
- transport: ITCPTransport | None = None
+ transport: Optional[ITCPTransport] = None
def __init__(
self, stream: ByteWriteable, deferred: defer.Deferred, max_size: int | None
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index c3ba26fe03..a0167659f1 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -19,7 +19,7 @@
#
import logging
import urllib.parse
-from typing import Any, Generator
+from typing import Any, Generator, Optional
from urllib.request import ( # type: ignore[attr-defined]
proxy_bypass_environment,
)
@@ -173,7 +173,7 @@ def request(
method: bytes,
uri: bytes,
headers: Headers | None = None,
- bodyProducer: IBodyProducer | None = None,
+ bodyProducer: Optional[IBodyProducer] = None,
) -> Generator[defer.Deferred, Any, IResponse]:
"""
Args:
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 7090960cfb..dbd4f1e4b6 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -33,6 +33,7 @@
Callable,
Generic,
Literal,
+ Optional,
TextIO,
TypeVar,
cast,
@@ -691,7 +692,7 @@ async def _send_request(
destination_bytes, method_bytes, url_to_sign_bytes, json
)
data = encode_canonical_json(json)
- producer: IBodyProducer | None = QuieterFileBodyProducer(
+ producer: Optional[IBodyProducer] = QuieterFileBodyProducer(
BytesIO(data), cooperator=self._cooperator
)
else:
diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py
index c7f5e39dd8..b3a2f84f29 100644
--- a/synapse/http/proxy.py
+++ b/synapse/http/proxy.py
@@ -22,7 +22,7 @@
import json
import logging
import urllib.parse
-from typing import TYPE_CHECKING, Any, cast
+from typing import TYPE_CHECKING, Any, Optional, cast
from twisted.internet import protocol
from twisted.internet.interfaces import ITCPTransport
@@ -237,7 +237,7 @@ class _ProxyResponseBody(protocol.Protocol):
request.
"""
- transport: ITCPTransport | None = None
+ transport: Optional[ITCPTransport] = None
def __init__(self, request: "SynapseRequest") -> None:
self._request = request
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
index d315ce8475..1f8e58efbc 100644
--- a/synapse/http/proxyagent.py
+++ b/synapse/http/proxyagent.py
@@ -21,7 +21,7 @@
import logging
import random
import re
-from typing import Any, Collection, Sequence, cast
+from typing import Any, Collection, Optional, Sequence, cast
from urllib.parse import urlparse
from urllib.request import ( # type: ignore[attr-defined]
proxy_bypass_environment,
@@ -119,8 +119,8 @@ def __init__(
self,
*,
reactor: IReactorCore,
- proxy_reactor: IReactorCore | None = None,
- contextFactory: IPolicyForHTTPS | None = None,
+ proxy_reactor: Optional[IReactorCore] = None,
+ contextFactory: Optional[IPolicyForHTTPS] = None,
connectTimeout: float | None = None,
bindAddress: bytes | None = None,
pool: HTTPConnectionPool | None = None,
@@ -175,7 +175,7 @@ def __init__(
self._policy_for_https = contextFactory
self._reactor = cast(IReactorTime, reactor)
- self._federation_proxy_endpoint: IStreamClientEndpoint | None = None
+ self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None
self._federation_proxy_credentials: ProxyCredentials | None = None
if federation_proxy_locations:
assert federation_proxy_credentials is not None, (
@@ -221,7 +221,7 @@ def request(
method: bytes,
uri: bytes,
headers: Headers | None = None,
- bodyProducer: IBodyProducer | None = None,
+ bodyProducer: Optional[IBodyProducer] = None,
) -> "defer.Deferred[IResponse]":
"""
Issue a request to the server indicated by the given uri.
@@ -365,11 +365,11 @@ def request(
def http_proxy_endpoint(
proxy: bytes | None,
reactor: IReactorCore,
- tls_options_factory: IPolicyForHTTPS | None,
+ tls_options_factory: Optional[IPolicyForHTTPS],
timeout: float = 30,
bindAddress: bytes | str | tuple[bytes | str, int] | None = None,
attemptDelay: float | None = None,
-) -> tuple[IStreamClientEndpoint | None, ProxyCredentials | None]:
+) -> tuple[Optional[IStreamClientEndpoint], ProxyCredentials | None]:
"""Parses an http proxy setting and returns an endpoint for the proxy
Args:
diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py
index 708e4c386b..3d47107cf2 100644
--- a/synapse/http/replicationagent.py
+++ b/synapse/http/replicationagent.py
@@ -20,6 +20,7 @@
#
import logging
+from typing import Optional
from zope.interface import implementer
@@ -149,7 +150,7 @@ def request(
method: bytes,
uri: bytes,
headers: Headers | None = None,
- bodyProducer: IBodyProducer | None = None,
+ bodyProducer: Optional[IBodyProducer] = None,
) -> "defer.Deferred[IResponse]":
"""
Issue a request to the server indicated by the given uri.
diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py
index e3e0ba4beb..72faa3c746 100644
--- a/synapse/logging/_remote.py
+++ b/synapse/logging/_remote.py
@@ -25,7 +25,7 @@
from collections import deque
from ipaddress import IPv4Address, IPv6Address, ip_address
from math import floor
-from typing import Callable
+from typing import Callable, Optional
import attr
from zope.interface import implementer
@@ -113,7 +113,7 @@ def __init__(
port: int,
maximum_buffer: int = 1000,
level: int = logging.NOTSET,
- _reactor: IReactorTime | None = None,
+ _reactor: Optional[IReactorTime] = None,
):
super().__init__(level=level)
self.host = host
diff --git a/synapse/logging/handlers.py b/synapse/logging/handlers.py
index 976c7075d4..984d7c2238 100644
--- a/synapse/logging/handlers.py
+++ b/synapse/logging/handlers.py
@@ -3,7 +3,7 @@
from logging import Handler, LogRecord
from logging.handlers import MemoryHandler
from threading import Thread
-from typing import cast
+from typing import Optional, cast
from twisted.internet.interfaces import IReactorCore
@@ -26,7 +26,7 @@ def __init__(
target: Handler | None = None,
flushOnClose: bool = True,
period: float = 5.0,
- reactor: IReactorCore | None = None,
+ reactor: Optional[IReactorCore] = None,
) -> None:
"""
period: the period between automatic flushes
diff --git a/synapse/media/_base.py b/synapse/media/_base.py
index 0fe2e5b529..7884930876 100644
--- a/synapse/media/_base.py
+++ b/synapse/media/_base.py
@@ -30,6 +30,7 @@
Awaitable,
BinaryIO,
Generator,
+ Optional,
)
import attr
@@ -705,7 +706,7 @@ def __init__(self, hs: "HomeServer") -> None:
self.file: BinaryIO | None = None
self.deferred: "Deferred[None]" = Deferred()
- self.consumer: interfaces.IConsumer | None = None
+ self.consumer: Optional[IConsumer] = None
# Signals if the thread should keep reading/sending data. Set means
# continue, clear means pause.
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index ce4a2102e4..c44222f6ea 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -20,7 +20,7 @@
#
import logging
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Optional
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
from twisted.internet.interfaces import IDelayedCall
@@ -71,7 +71,7 @@ def __init__(self, hs: "HomeServer", pusher_config: PusherConfig, mailer: Mailer
self.server_name = hs.hostname
self.store = self.hs.get_datastores().main
self.email = pusher_config.pushkey
- self.timed_call: IDelayedCall | None = None
+ self.timed_call: Optional[IDelayedCall] = None
self.throttle_params: dict[str, ThrottleParams] = {}
self._inited = False
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 1e7e742ddd..fdfae234be 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -21,7 +21,7 @@
import logging
import random
import urllib.parse
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Optional
from prometheus_client import Counter
@@ -120,7 +120,7 @@ def __init__(self, hs: "HomeServer", pusher_config: PusherConfig):
self.data = pusher_config.data
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
self.failing_since = pusher_config.failing_since
- self.timed_call: IDelayedCall | None = None
+ self.timed_call: Optional[IDelayedCall] = None
self._is_processing = False
self._group_unread_count_by_room = (
hs.config.push.push_group_unread_count_by_room
diff --git a/synapse/server.py b/synapse/server.py
index 88662c5b28..be83a59b88 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -34,6 +34,7 @@
Any,
Awaitable,
Callable,
+ Optional,
TypeVar,
cast,
)
@@ -320,7 +321,7 @@ def __init__(
self,
hostname: str,
config: HomeServerConfig,
- reactor: ISynapseReactor | None = None,
+ reactor: Optional[ISynapseReactor] = None,
):
"""
Args:
@@ -353,7 +354,7 @@ def __init__(
self._module_web_resources_consumed = False
# This attribute is set by the free function `refresh_certificate`.
- self.tls_server_context_factory: IOpenSSLContextFactory | None = None
+ self.tls_server_context_factory: Optional[IOpenSSLContextFactory] = None
self._is_shutdown = False
self._async_shutdown_handlers: list[ShutdownInfo] = []
diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py
index 8d64684084..c473c524f6 100644
--- a/synapse/util/file_consumer.py
+++ b/synapse/util/file_consumer.py
@@ -19,7 +19,7 @@
#
import queue
-from typing import Any, BinaryIO, cast
+from typing import Any, BinaryIO, Optional, Union, cast
from twisted.internet import threads
from twisted.internet.defer import Deferred
@@ -50,7 +50,7 @@ def __init__(self, file_obj: BinaryIO, reactor: ISynapseReactor) -> None:
self._reactor: ISynapseReactor = reactor
# Producer we're registered with
- self._producer: IPushProducer | IPullProducer | None = None
+ self._producer: Optional[Union[IPushProducer, IPullProducer]] = None
# True if PushProducer, false if PullProducer
self.streaming = False
@@ -72,7 +72,7 @@ def __init__(self, file_obj: BinaryIO, reactor: ISynapseReactor) -> None:
self._write_exception: Exception | None = None
def registerProducer(
- self, producer: IPushProducer | IPullProducer, streaming: bool
+ self, producer: Union[IPushProducer, IPullProducer], streaming: bool
) -> None:
"""Part of IConsumer interface
diff --git a/tests/server.py b/tests/server.py
index ce31a4162a..d17b2478e3 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -147,7 +147,7 @@ class FakeChannel:
_reactor: MemoryReactorClock
result: dict = attr.Factory(dict)
_ip: str = "127.0.0.1"
- _producer: IPullProducer | IPushProducer | None = None
+ _producer: Optional[Union[IPullProducer, IPushProducer]] = None
resource_usage: ContextResourceUsage | None = None
_request: Request | None = None
@@ -248,7 +248,7 @@ def registerProducer(self, producer: IProducer, streaming: bool) -> None:
# TODO This should ensure that the IProducer is an IPushProducer or
# IPullProducer, unfortunately twisted.protocols.basic.FileSender does
# implement those, but doesn't declare it.
- self._producer = cast(IPushProducer | IPullProducer, producer)
+ self._producer = cast(Union[IPushProducer, IPullProducer], producer)
self.producerStreaming = streaming
def _produce() -> None:
@@ -852,7 +852,7 @@ class FakeTransport:
"""Test reactor
"""
- _protocol: IProtocol | None = None
+ _protocol: Optional[IProtocol] = None
"""The Protocol which is producing data for this transport. Optional, but if set
will get called back for connectionLost() notifications etc.
"""
@@ -871,7 +871,7 @@ class FakeTransport:
disconnected = False
connected = True
buffer: bytes = b""
- producer: IPushProducer | None = None
+ producer: Optional[IPushProducer] = None
autoflush: bool = True
def getPeer(self) -> IPv4Address | IPv6Address:
@@ -1073,7 +1073,7 @@ def setup_test_homeserver(
cleanup_func: Callable[[Callable[[], Optional["Deferred[None]"]]], None],
server_name: str = "test",
config: HomeServerConfig | None = None,
- reactor: ISynapseReactor | None = None,
+ reactor: Optional[ISynapseReactor] = None,
homeserver_to_use: type[HomeServer] = TestHomeServer,
db_txn_limit: int | None = None,
**extra_homeserver_attributes: Any,
diff --git a/tests/unittest.py b/tests/unittest.py
index 7ea29364db..6022c750d0 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -37,6 +37,7 @@
Iterable,
Mapping,
NoReturn,
+ Optional,
Protocol,
TypeVar,
)
@@ -636,7 +637,7 @@ def setup_test_homeserver(
self,
server_name: str | None = None,
config: JsonDict | None = None,
- reactor: ISynapseReactor | None = None,
+ reactor: Optional[ISynapseReactor] = None,
clock: Clock | None = None,
**extra_homeserver_attributes: Any,
) -> HomeServer:
From 466994743ab10a590fc2e70ae8bbba4677049daa Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Andre=20Kl=C3=A4rner?=
Date: Sat, 13 Dec 2025 01:07:39 +0100
Subject: [PATCH 24/59] Document importance of `public_baseurl` for delegation
and OIDC (#19270)
I just stumbled across the fact that my config used delegation as
recommended by the docs, and hosted Synapse on a subdomain. However my
config never had `public_baseurl` set and worked without issues, until I
just now tried to setup OIDC.
OIDC is initialized by the client instructing to open a URL on the
homeserver, and initially the correct URL is called, but Synapse does
not recognize it without `public_baseurl` being set correctly. After
changing this it immediately started working.
So in order to prevent anybody from making the same mistake, this adds a
small clarifying block in the OIDC docs.
---
changelog.d/19270.doc | 1 +
docs/openid.md | 5 +++++
2 files changed, 6 insertions(+)
create mode 100644 changelog.d/19270.doc
diff --git a/changelog.d/19270.doc b/changelog.d/19270.doc
new file mode 100644
index 0000000000..fdb7e2e51c
--- /dev/null
+++ b/changelog.d/19270.doc
@@ -0,0 +1 @@
+Document the importance of `public_baseurl` when configuring OpenID Connect authentication.
diff --git a/docs/openid.md b/docs/openid.md
index 819f754390..e91d375c41 100644
--- a/docs/openid.md
+++ b/docs/openid.md
@@ -50,6 +50,11 @@ setting in your configuration file.
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
the text below for example configurations for specific providers.
+For setups using [`.well-known` delegation](delegate.md), make sure
+[`public_baseurl`](usage/configuration/config_documentation.md#public_baseurl) is set
+appropriately. If unset, Synapse defaults to `https:///` which is used in
+the OIDC callback URL.
+
## OIDC Back-Channel Logout
Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
From 0f2b29511fd88d1dc2278f41fd6e4e2f2989fcb7 Mon Sep 17 00:00:00 2001
From: Travis Ralston
Date: Mon, 15 Dec 2025 10:23:33 -0700
Subject: [PATCH 25/59] Allow admins to bypass the quarantine check on media
downloads (#19275)
Co-authored-by: turt2live <1190097+turt2live@users.noreply.github.com>
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/19275.feature | 1 +
docs/admin_api/media_admin_api.md | 14 +++
synapse/media/media_repository.py | 22 ++++-
synapse/rest/client/media.py | 26 ++++-
tests/rest/admin/test_admin.py | 154 +++++++++++++++++++++++++++---
5 files changed, 197 insertions(+), 20 deletions(-)
create mode 100644 changelog.d/19275.feature
diff --git a/changelog.d/19275.feature b/changelog.d/19275.feature
new file mode 100644
index 0000000000..5147c546cf
--- /dev/null
+++ b/changelog.d/19275.feature
@@ -0,0 +1 @@
+Server admins can bypass the quarantine media check when downloading media by setting the `admin_unsafely_bypass_quarantine` query parameter to `true` on Client-Server API media download requests.
\ No newline at end of file
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index a3d99cb074..25481a8c55 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -115,6 +115,20 @@ is quarantined, Synapse will:
- Quarantine any existing cached remote media.
- Quarantine any future remote media.
+## Downloading quarantined media
+
+Normally, when media is quarantined, it will return a 404 error when downloaded.
+Admins can bypass this by adding `?admin_unsafely_bypass_quarantine=true`
+to the [normal download URL](https://spec.matrix.org/v1.16/client-server-api/#get_matrixclientv1mediadownloadservernamemediaid).
+
+Bypassing the quarantine check is not recommended. Media is typically quarantined
+to prevent harmful content from being served to users, which includes admins. Only
+set the bypass parameter if you intentionally want to access potentially harmful
+content.
+
+Non-admin users cannot bypass quarantine checks, even when specifying the above
+query parameter.
+
## Quarantining media by ID
This API quarantines a single piece of local or remote media.
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index e84e842300..cb745b96ad 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -439,7 +439,11 @@ async def get_cached_remote_media_info(
return await self.store.get_cached_remote_media(origin, media_id)
async def get_local_media_info(
- self, request: SynapseRequest, media_id: str, max_timeout_ms: int
+ self,
+ request: SynapseRequest,
+ media_id: str,
+ max_timeout_ms: int,
+ bypass_quarantine: bool = False,
) -> LocalMedia | None:
"""Gets the info dictionary for given local media ID. If the media has
not been uploaded yet, this function will wait up to ``max_timeout_ms``
@@ -451,6 +455,7 @@ async def get_local_media_info(
the file_id for local content.)
max_timeout_ms: the maximum number of milliseconds to wait for the
media to be uploaded.
+ bypass_quarantine: whether to bypass quarantine checks
Returns:
Either the info dictionary for the given local media ID or
@@ -466,7 +471,7 @@ async def get_local_media_info(
respond_404(request)
return None
- if media_info.quarantined_by:
+ if media_info.quarantined_by and not bypass_quarantine:
logger.info("Media %s is quarantined", media_id)
respond_404(request)
return None
@@ -500,6 +505,7 @@ async def get_local_media(
max_timeout_ms: int,
allow_authenticated: bool = True,
federation: bool = False,
+ bypass_quarantine: bool = False,
) -> None:
"""Responds to requests for local media, if exists, or returns 404.
@@ -513,11 +519,14 @@ async def get_local_media(
media to be uploaded.
allow_authenticated: whether media marked as authenticated may be served to this request
federation: whether the local media being fetched is for a federation request
+ bypass_quarantine: whether to bypass quarantine checks
Returns:
Resolves once a response has successfully been written to request
"""
- media_info = await self.get_local_media_info(request, media_id, max_timeout_ms)
+ media_info = await self.get_local_media_info(
+ request, media_id, max_timeout_ms, bypass_quarantine=bypass_quarantine
+ )
if not media_info:
return
@@ -561,6 +570,7 @@ async def get_remote_media(
ip_address: str,
use_federation_endpoint: bool,
allow_authenticated: bool = True,
+ bypass_quarantine: bool = False,
) -> None:
"""Respond to requests for remote media.
@@ -577,6 +587,7 @@ async def get_remote_media(
federation `/download` endpoint
allow_authenticated: whether media marked as authenticated may be served to this
request
+ bypass_quarantine: whether to bypass quarantine checks
Returns:
Resolves once a response has successfully been written to request
@@ -609,6 +620,7 @@ async def get_remote_media(
ip_address,
use_federation_endpoint,
allow_authenticated,
+ bypass_quarantine=bypass_quarantine,
)
# Check if the media is cached on the client, if so return 304. We need
@@ -697,6 +709,7 @@ async def _get_remote_media_impl(
ip_address: str,
use_federation_endpoint: bool,
allow_authenticated: bool,
+ bypass_quarantine: bool = False,
) -> tuple[Responder | None, RemoteMedia]:
"""Looks for media in local cache, if not there then attempt to
download from remote server.
@@ -712,6 +725,7 @@ async def _get_remote_media_impl(
ip_address: the IP address of the requester
use_federation_endpoint: whether to request the remote media over the new federation
/download endpoint
+ bypass_quarantine: whether to bypass quarantine checks
Returns:
A tuple of responder and the media info of the file.
@@ -732,7 +746,7 @@ async def _get_remote_media_impl(
file_id = media_info.filesystem_id
file_info = FileInfo(server_name, file_id)
- if media_info.quarantined_by:
+ if media_info.quarantined_by and not bypass_quarantine:
logger.info("Media is quarantined")
raise NotFoundError()
diff --git a/synapse/rest/client/media.py b/synapse/rest/client/media.py
index f145b03af4..4db3b01576 100644
--- a/synapse/rest/client/media.py
+++ b/synapse/rest/client/media.py
@@ -23,6 +23,7 @@
import logging
import re
+from synapse.api.errors import Codes, cs_error
from synapse.http.server import (
HttpServer,
respond_with_json,
@@ -235,7 +236,23 @@ async def on_GET(
# Validate the server name, raising if invalid
parse_and_validate_server_name(server_name)
- await self.auth.get_user_by_req(request, allow_guest=True)
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
+ is_admin = await self.auth.is_server_admin(requester)
+ bypass_quarantine = False
+ if parse_string(request, "admin_unsafely_bypass_quarantine") == "true":
+ if is_admin:
+ logger.info("Admin bypassing quarantine for media download")
+ bypass_quarantine = True
+ else:
+ respond_with_json(
+ request,
+ 400,
+ cs_error(
+ "Must be a server admin to bypass quarantine",
+ code=Codes.UNKNOWN,
+ ),
+ send_cors=True,
+ )
set_cors_headers(request)
set_corp_headers(request)
@@ -259,7 +276,11 @@ async def on_GET(
if self._is_mine_server_name(server_name):
await self.media_repo.get_local_media(
- request, media_id, file_name, max_timeout_ms
+ request,
+ media_id,
+ file_name,
+ max_timeout_ms,
+ bypass_quarantine=bypass_quarantine,
)
else:
ip_address = request.getClientAddress().host
@@ -271,6 +292,7 @@ async def on_GET(
max_timeout_ms,
ip_address,
True,
+ bypass_quarantine=bypass_quarantine,
)
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index f3740a8e35..77d824dcd8 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -71,14 +71,43 @@ def create_resource_dict(self) -> dict[str, Resource]:
return resources
def _ensure_quarantined(
- self, admin_user_tok: str, server_and_media_id: str
+ self,
+ user_tok: str,
+ server_and_media_id: str,
+ include_bypass_param: bool = False,
) -> None:
- """Ensure a piece of media is quarantined when trying to access it."""
+ """Ensure a piece of media is quarantined when trying to access it.
+
+ The include_bypass_param flag enables the presence of the
+ admin_unsafely_bypass_quarantine query parameter, but still expects that the
+ request will fail to download the media.
+ """
+ if include_bypass_param:
+ query_string = "?admin_unsafely_bypass_quarantine=true"
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/media/download/{server_and_media_id}{query_string}",
+ shorthand=False,
+ access_token=user_tok,
+ )
+
+ # Non-admins can't bypass, so this should fail regardless of whether the
+ # media is actually quarantined.
+ self.assertEqual(
+ 400,
+ channel.code,
+ msg=(
+ "Expected to receive a 400 when bypassing quarantined media: %s"
+ % server_and_media_id
+ ),
+ )
+
+ # Repeat the request, this time without the bypass parameter.
channel = self.make_request(
"GET",
f"/_matrix/client/v1/media/download/{server_and_media_id}",
shorthand=False,
- access_token=admin_user_tok,
+ access_token=user_tok,
)
# Should be quarantined
@@ -91,6 +120,62 @@ def _ensure_quarantined(
),
)
+ def test_admin_can_bypass_quarantine(self) -> None:
+ self.register_user("admin", "pass", admin=True)
+ admin_user_tok = self.login("admin", "pass")
+
+ # Upload some media
+ response = self.helper.upload_media(SMALL_PNG, tok=admin_user_tok)
+
+ # Extract media ID from the response
+ server_name_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
+ server_name, media_id = server_name_and_media_id.split("/")
+
+ # Attempt to access the media
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/media/download/{server_name_and_media_id}",
+ shorthand=False,
+ access_token=admin_user_tok,
+ )
+
+ # Should be successful
+ self.assertEqual(200, channel.code)
+
+ # Quarantine the media
+ url = "/_synapse/admin/v1/media/quarantine/%s/%s" % (
+ urllib.parse.quote(server_name),
+ urllib.parse.quote(media_id),
+ )
+ channel = self.make_request(
+ "POST",
+ url,
+ access_token=admin_user_tok,
+ )
+ self.pump(1.0)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ # Now access it *without* the bypass parameter - this should fail (as expected).
+ self._ensure_quarantined(
+ admin_user_tok, server_name_and_media_id, include_bypass_param=False
+ )
+
+ # Now access it *with* the bypass parameter - this should work
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/media/download/{server_name_and_media_id}?admin_unsafely_bypass_quarantine=true",
+ shorthand=False,
+ access_token=admin_user_tok,
+ )
+ self.assertEqual(
+ 200,
+ channel.code,
+ msg=(
+ "Expected to receive a 200 on accessing (with bypass) quarantined media: %s"
+ % server_name_and_media_id
+ ),
+ )
+
@parameterized.expand(
[
# Attempt quarantine media APIs as non-admin
@@ -154,8 +239,14 @@ def test_quarantine_media_by_id(self) -> None:
self.pump(1.0)
self.assertEqual(200, channel.code, msg=channel.json_body)
- # Attempt to access the media
- self._ensure_quarantined(admin_user_tok, server_name_and_media_id)
+ # Attempt to access the media (and ensure non-admins can't download it, even
+ # with a bypass parameter). Admins cannot download it without the bypass param.
+ self._ensure_quarantined(
+ non_admin_user_tok, server_name_and_media_id, include_bypass_param=True
+ )
+ self._ensure_quarantined(
+ admin_user_tok, server_name_and_media_id, include_bypass_param=False
+ )
@parameterized.expand(
[
@@ -214,9 +305,21 @@ def test_quarantine_all_media_in_room(self, url: str) -> None:
server_and_media_id_1 = mxc_1[6:]
server_and_media_id_2 = mxc_2[6:]
- # Test that we cannot download any of the media anymore
- self._ensure_quarantined(admin_user_tok, server_and_media_id_1)
- self._ensure_quarantined(admin_user_tok, server_and_media_id_2)
+ # Test that we cannot download any of the media anymore, especially with the
+ # bypass parameter set. Admins cannot download the media without supplying the
+ # bypass parameter, so we check that too.
+ self._ensure_quarantined(
+ non_admin_user_tok, server_and_media_id_1, include_bypass_param=True
+ )
+ self._ensure_quarantined(
+ non_admin_user_tok, server_and_media_id_2, include_bypass_param=True
+ )
+ self._ensure_quarantined(
+ admin_user_tok, server_and_media_id_1, include_bypass_param=False
+ )
+ self._ensure_quarantined(
+ admin_user_tok, server_and_media_id_2, include_bypass_param=False
+ )
def test_quarantine_all_media_by_user(self) -> None:
self.register_user("user_admin", "pass", admin=True)
@@ -263,10 +366,27 @@ def test_quarantine_all_media_by_user(self) -> None:
channel.json_body, {"num_quarantined": 3}, "Expected 3 quarantined items"
)
- # Attempt to access each piece of media
- self._ensure_quarantined(admin_user_tok, server_and_media_id_1)
- self._ensure_quarantined(admin_user_tok, server_and_media_id_2)
- self._ensure_quarantined(admin_user_tok, server_and_media_id_3)
+ # Attempt to access each piece of media, ensuring that it can't be downloaded
+ # even with a bypass parameter. Admins should not be able to download the media
+ # either when not supplying the bypass parameter, so we check that too.
+ self._ensure_quarantined(
+ non_admin_user_tok, server_and_media_id_1, include_bypass_param=True
+ )
+ self._ensure_quarantined(
+ non_admin_user_tok, server_and_media_id_2, include_bypass_param=True
+ )
+ self._ensure_quarantined(
+ non_admin_user_tok, server_and_media_id_3, include_bypass_param=True
+ )
+ self._ensure_quarantined(
+ admin_user_tok, server_and_media_id_1, include_bypass_param=False
+ )
+ self._ensure_quarantined(
+ admin_user_tok, server_and_media_id_2, include_bypass_param=False
+ )
+ self._ensure_quarantined(
+ admin_user_tok, server_and_media_id_3, include_bypass_param=False
+ )
def test_cannot_quarantine_safe_media(self) -> None:
self.register_user("user_admin", "pass", admin=True)
@@ -307,8 +427,14 @@ def test_cannot_quarantine_safe_media(self) -> None:
)
# Attempt to access each piece of media, the first should fail, the
- # second should succeed.
- self._ensure_quarantined(admin_user_tok, server_and_media_id_1)
+ # second should succeed. We check both the non-admin user with a bypass
+ # parameter, and the admin user without.
+ self._ensure_quarantined(
+ non_admin_user_tok, server_and_media_id_1, include_bypass_param=True
+ )
+ self._ensure_quarantined(
+ admin_user_tok, server_and_media_id_1, include_bypass_param=False
+ )
# Attempt to access each piece of media
channel = self.make_request(
From 29fd0116a5e4dbae797c02a9c731de3c92c899bc Mon Sep 17 00:00:00 2001
From: Denis Kasak
Date: Tue, 16 Dec 2025 12:06:07 +0100
Subject: [PATCH 26/59] Improve proxy support for the federation_client.py dev
script (#19300)
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/19300.feature | 1 +
scripts-dev/federation_client.py | 34 ++++++++++++++++++++++++++++----
2 files changed, 31 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/19300.feature
diff --git a/changelog.d/19300.feature b/changelog.d/19300.feature
new file mode 100644
index 0000000000..97e43e9b28
--- /dev/null
+++ b/changelog.d/19300.feature
@@ -0,0 +1 @@
+Improve proxy support for the `federation_client.py` dev script. Contributed by Denis Kasak (@dkasak).
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
index 0fefc23b22..cb14f357cb 100755
--- a/scripts-dev/federation_client.py
+++ b/scripts-dev/federation_client.py
@@ -145,7 +145,7 @@ def request(
print("Requesting %s" % dest, file=sys.stderr)
s = requests.Session()
- s.mount("matrix-federation://", MatrixConnectionAdapter())
+ s.mount("matrix-federation://", MatrixConnectionAdapter(verify_tls=verify_tls))
headers: dict[str, str] = {
"Authorization": authorization_headers[0],
@@ -267,6 +267,17 @@ def read_args_from_config(args: argparse.Namespace) -> None:
class MatrixConnectionAdapter(HTTPAdapter):
+ """
+ A Matrix federation-aware HTTP Adapter.
+ """
+
+ verify_tls: bool
+ """whether to verify the remote server's TLS certificate."""
+
+ def __init__(self, verify_tls: bool = True) -> None:
+ self.verify_tls = verify_tls
+ super().__init__()
+
def send(
self,
request: PreparedRequest,
@@ -280,7 +291,7 @@ def send(
assert isinstance(request.url, str)
parsed = urlparse.urlsplit(request.url)
server_name = parsed.netloc
- well_known = self._get_well_known(parsed.netloc)
+ well_known = self._get_well_known(parsed.netloc, verify_tls=self.verify_tls)
if well_known:
server_name = well_known
@@ -318,6 +329,21 @@ def get_connection_with_tls_context(
print(
f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
)
+
+ if proxies:
+ scheme = parsed.scheme
+ if isinstance(scheme, bytes):
+ scheme = scheme.decode("utf-8")
+
+ proxy_for_scheme = proxies.get(scheme)
+ if proxy_for_scheme:
+ return self.proxy_manager_for(proxy_for_scheme).connection_from_host(
+ host,
+ port=port,
+ scheme="https",
+ pool_kwargs={"server_hostname": ssl_server_name},
+ )
+
return self.poolmanager.connection_from_host(
host,
port=port,
@@ -368,7 +394,7 @@ def _lookup(server_name: str) -> tuple[str, int, str]:
return server_name, 8448, server_name
@staticmethod
- def _get_well_known(server_name: str) -> str | None:
+ def _get_well_known(server_name: str, verify_tls: bool = True) -> str | None:
if ":" in server_name:
# explicit port, or ipv6 literal. Either way, no .well-known
return None
@@ -379,7 +405,7 @@ def _get_well_known(server_name: str) -> str | None:
print(f"fetching {uri}", file=sys.stderr)
try:
- resp = requests.get(uri)
+ resp = requests.get(uri, verify=verify_tls)
if resp.status_code != 200:
print("%s gave %i" % (uri, resp.status_code), file=sys.stderr)
return None
From 0395b71e253b0287e17c144f08d7605c10c104b0 Mon Sep 17 00:00:00 2001
From: Joshua Goins
Date: Tue, 16 Dec 2025 08:02:29 -0500
Subject: [PATCH 27/59] Fix Mastodon URL previews not showing anything useful
(#19231)
Fixes #18444. Inside of UrlPreviewer, we need to combine two dicts (one
from oEmbed, and one from OpenGraph metadata in the HTML) and in Mastodon's case they were very
different.
Single Page Applications (SPAs) seem to sometimes provide better information in the OpenGraph tags
than the oEmbed stubs, because the oEmbed stubs are filled in with JavaScript that Synapse does
not execute.
This change improves previews on Mastodon and YouTube (for the same reason).
Tested to not regress previews of Twitter or GitHub.
---
changelog.d/19231.bugfix | 1 +
synapse/media/url_previewer.py | 14 ++++++++++----
2 files changed, 11 insertions(+), 4 deletions(-)
create mode 100644 changelog.d/19231.bugfix
diff --git a/changelog.d/19231.bugfix b/changelog.d/19231.bugfix
new file mode 100644
index 0000000000..580b642bb2
--- /dev/null
+++ b/changelog.d/19231.bugfix
@@ -0,0 +1 @@
+Fix a bug where Mastodon posts (and possibly other embeds) have the wrong description for URL previews.
diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py
index 2c5e518918..7782905a7a 100644
--- a/synapse/media/url_previewer.py
+++ b/synapse/media/url_previewer.py
@@ -331,10 +331,16 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes:
# response failed or is incomplete.
og_from_html = parse_html_to_open_graph(tree)
- # Compile the Open Graph response by using the scraped
- # information from the HTML and overlaying any information
- # from the oEmbed response.
- og = {**og_from_html, **og_from_oembed}
+ # Compile an Open Graph response by combining the oEmbed response
+ # and the information from the HTML, with information in the HTML
+ # preferred.
+ #
+ # The ordering here is intentional: certain websites (especially
+ # SPA JavaScript-based ones) including Mastodon and YouTube provide
+ # almost complete OpenGraph descriptions but only stubs for oEmbed,
+ # with further oEmbed information being populated with JavaScript,
+ # that Synapse won't execute.
+ og = og_from_oembed | og_from_html
await self._precache_image_url(user, media_info, og)
else:
From 3989d22a37c9291134edeb7453f4e2a617ee57cb Mon Sep 17 00:00:00 2001
From: Tulir Asokan
Date: Tue, 16 Dec 2025 17:24:36 +0200
Subject: [PATCH 28/59] Implement pagination for MSC2666 (#19279)
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
---
changelog.d/19279.feature | 1 +
synapse/rest/client/mutual_rooms.py | 79 +++++++++++++++++++++-----
tests/rest/client/test_mutual_rooms.py | 65 ++++++++++++++++++++-
tests/utils.py | 2 +
4 files changed, 131 insertions(+), 16 deletions(-)
create mode 100644 changelog.d/19279.feature
diff --git a/changelog.d/19279.feature b/changelog.d/19279.feature
new file mode 100644
index 0000000000..031e48dceb
--- /dev/null
+++ b/changelog.d/19279.feature
@@ -0,0 +1 @@
+Implemented pagination for the [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) mutual rooms endpoint. Contributed by @tulir @ Beeper.
diff --git a/synapse/rest/client/mutual_rooms.py b/synapse/rest/client/mutual_rooms.py
index 3e5316c4b7..a6a913db34 100644
--- a/synapse/rest/client/mutual_rooms.py
+++ b/synapse/rest/client/mutual_rooms.py
@@ -19,9 +19,12 @@
#
#
import logging
+from bisect import bisect
from http import HTTPStatus
from typing import TYPE_CHECKING
+from unpaddedbase64 import decode_base64, encode_base64
+
from synapse.api.errors import Codes, SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_strings_from_args
@@ -35,10 +38,34 @@
logger = logging.getLogger(__name__)
+MUTUAL_ROOMS_BATCH_LIMIT = 100
+
+
+def _parse_mutual_rooms_batch_token_args(args: dict[bytes, list[bytes]]) -> str | None:
+ from_batches = parse_strings_from_args(args, "from")
+ if not from_batches:
+ return None
+ if len(from_batches) > 1:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Duplicate from query parameter",
+ errcode=Codes.INVALID_PARAM,
+ )
+ if from_batches[0]:
+ try:
+ return decode_base64(from_batches[0]).decode("utf-8")
+ except Exception:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Malformed from token",
+ errcode=Codes.INVALID_PARAM,
+ )
+ return None
+
class UserMutualRoomsServlet(RestServlet):
"""
- GET /uk.half-shot.msc2666/user/mutual_rooms?user_id={user_id} HTTP/1.1
+ GET /uk.half-shot.msc2666/user/mutual_rooms?user_id={user_id}&from={token} HTTP/1.1
"""
PATTERNS = client_patterns(
@@ -56,6 +83,7 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]:
args: dict[bytes, list[bytes]] = request.args # type: ignore
user_ids = parse_strings_from_args(args, "user_id", required=True)
+ from_batch = _parse_mutual_rooms_batch_token_args(args)
if len(user_ids) > 1:
raise SynapseError(
@@ -64,29 +92,52 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]:
errcode=Codes.INVALID_PARAM,
)
- # We don't do batching, so a batch token is illegal by default
- if b"batch_token" in args:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Unknown batch_token",
- errcode=Codes.INVALID_PARAM,
- )
-
user_id = user_ids[0]
requester = await self.auth.get_user_by_req(request)
if user_id == requester.user.to_string():
raise SynapseError(
- HTTPStatus.UNPROCESSABLE_ENTITY,
+ HTTPStatus.BAD_REQUEST,
"You cannot request a list of shared rooms with yourself",
- errcode=Codes.INVALID_PARAM,
+ errcode=Codes.UNKNOWN,
)
- rooms = await self.store.get_mutual_rooms_between_users(
- frozenset((requester.user.to_string(), user_id))
+ # Sort here instead of the database function, so that we don't expose
+ # clients to any unrelated changes to the sorting algorithm.
+ rooms = sorted(
+ await self.store.get_mutual_rooms_between_users(
+ frozenset((requester.user.to_string(), user_id))
+ )
)
- return 200, {"joined": list(rooms)}
+ if from_batch:
+ # A from_batch token was provided, so cut off any rooms where the ID is
+ # lower than or equal to the token. This method doesn't care whether the
+ # provided token room still exists, nor whether it's even a real room ID.
+ #
+ # However, if rooms with a lower ID are added after the token was issued,
+ # they will not be included until the client makes a new request without a
+ # from token. This is considered acceptable, as clients generally won't
+ # persist these results for long periods.
+ rooms = rooms[bisect(rooms, from_batch) :]
+
+ if len(rooms) <= MUTUAL_ROOMS_BATCH_LIMIT:
+ # We've reached the end of the list, don't return a batch token
+ return 200, {"joined": rooms}
+
+ rooms = rooms[:MUTUAL_ROOMS_BATCH_LIMIT]
+ # We use urlsafe unpadded base64 encoding for the batch token in order to
+ # handle funny room IDs in old pre-v12 rooms properly. We also truncate it
+ # to stay within the 255-character limit of opaque tokens.
+ next_batch = encode_base64(rooms[-1].encode("utf-8"), urlsafe=True)[:255]
+ # Due to the truncation, it is technically possible to have conflicting next
+ # batches by creating hundreds of rooms with the same 191 character prefix
+ # in the room ID. In the event that some silly user does that, don't let
+ # them paginate further.
+ if next_batch == from_batch:
+ return 200, {"joined": rooms}
+
+ return 200, {"joined": list(rooms), "next_batch": next_batch}
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
diff --git a/tests/rest/client/test_mutual_rooms.py b/tests/rest/client/test_mutual_rooms.py
index ea063707aa..f78c67fcd9 100644
--- a/tests/rest/client/test_mutual_rooms.py
+++ b/tests/rest/client/test_mutual_rooms.py
@@ -55,12 +55,16 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
+ mutual_rooms.MUTUAL_ROOMS_BATCH_LIMIT = 10
- def _get_mutual_rooms(self, token: str, other_user: str) -> FakeChannel:
+ def _get_mutual_rooms(
+ self, token: str, other_user: str, since_token: str | None = None
+ ) -> FakeChannel:
return self.make_request(
"GET",
"/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms"
- f"?user_id={quote(other_user)}",
+ f"?user_id={quote(other_user)}"
+ + (f"&from={quote(since_token)}" if since_token else ""),
access_token=token,
)
@@ -141,6 +145,52 @@ def _check_mutual_rooms_with(
for room_id_id in channel.json_body["joined"]:
self.assertIn(room_id_id, [room_id_one, room_id_two])
+ def _create_rooms_for_pagination_test(
+ self, count: int
+ ) -> tuple[str, str, list[str]]:
+ u1 = self.register_user("user1", "pass")
+ u1_token = self.login(u1, "pass")
+ u2 = self.register_user("user2", "pass")
+ u2_token = self.login(u2, "pass")
+ room_ids = []
+ for i in range(count):
+ room_id = self.helper.create_room_as(u1, is_public=i % 2 == 0, tok=u1_token)
+ self.helper.invite(room_id, src=u1, targ=u2, tok=u1_token)
+ self.helper.join(room_id, user=u2, tok=u2_token)
+ room_ids.append(room_id)
+ room_ids.sort()
+ return u1_token, u2, room_ids
+
+ def test_shared_room_list_pagination_two_pages(self) -> None:
+ u1_token, u2, room_ids = self._create_rooms_for_pagination_test(15)
+
+ channel = self._get_mutual_rooms(u1_token, u2)
+ self.assertEqual(200, channel.code, channel.result)
+ self.assertEqual(channel.json_body["joined"], room_ids[0:10])
+ self.assertIn("next_batch", channel.json_body)
+
+ channel = self._get_mutual_rooms(u1_token, u2, channel.json_body["next_batch"])
+ self.assertEqual(200, channel.code, channel.result)
+ self.assertEqual(channel.json_body["joined"], room_ids[10:20])
+ self.assertNotIn("next_batch", channel.json_body)
+
+ def test_shared_room_list_pagination_one_page(self) -> None:
+ u1_token, u2, room_ids = self._create_rooms_for_pagination_test(10)
+
+ channel = self._get_mutual_rooms(u1_token, u2)
+ self.assertEqual(200, channel.code, channel.result)
+ self.assertEqual(channel.json_body["joined"], room_ids)
+ self.assertNotIn("next_batch", channel.json_body)
+
+ def test_shared_room_list_pagination_invalid_token(self) -> None:
+ u1_token, u2, room_ids = self._create_rooms_for_pagination_test(10)
+
+ channel = self._get_mutual_rooms(u1_token, u2, "!<>##faketoken")
+ self.assertEqual(400, channel.code, channel.result)
+ self.assertEqual(
+ "M_INVALID_PARAM", channel.json_body["errcode"], channel.result
+ )
+
def test_shared_room_list_after_leave(self) -> None:
"""
A room should no longer be considered shared if the other
@@ -172,3 +222,14 @@ def test_shared_room_list_after_leave(self) -> None:
channel = self._get_mutual_rooms(u2_token, u1)
self.assertEqual(200, channel.code, channel.result)
self.assertEqual(len(channel.json_body["joined"]), 0)
+
+ def test_shared_room_list_nonexistent_user(self) -> None:
+ u1 = self.register_user("user1", "pass")
+ u1_token = self.login(u1, "pass")
+
+ # Check shared rooms from user1's perspective.
+ # We should see the one room in common
+ channel = self._get_mutual_rooms(u1_token, "@meow:example.com")
+ self.assertEqual(200, channel.code, channel.result)
+ self.assertEqual(len(channel.json_body["joined"]), 0)
+ self.assertNotIn("next_batch", channel.json_body)
diff --git a/tests/utils.py b/tests/utils.py
index 4052c9a4fb..0cf97a7e8d 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -198,7 +198,9 @@ def default_config(
"rc_invites": {
"per_room": {"per_second": 10000, "burst_count": 10000},
"per_user": {"per_second": 10000, "burst_count": 10000},
+ "per_issuer": {"per_second": 10000, "burst_count": 10000},
},
+ "rc_room_creation": {"per_second": 10000, "burst_count": 10000},
"rc_3pid_validation": {"per_second": 10000, "burst_count": 10000},
"rc_presence": {"per_user": {"per_second": 10000, "burst_count": 10000}},
"saml2_enabled": False,
From f4320b5a4926a8f5b7fb7d6899a22f7dc7236114 Mon Sep 17 00:00:00 2001
From: Andrew Ferrazzutti
Date: Tue, 16 Dec 2025 12:42:08 -0500
Subject: [PATCH 29/59] Admin API: worker support for Query User Account
(#19281)
---
changelog.d/19281.feature | 1 +
docs/workers.md | 5 +++++
synapse/rest/admin/__init__.py | 3 +++
synapse/rest/admin/users.py | 40 +++++++++++++++++++---------------
4 files changed, 32 insertions(+), 17 deletions(-)
create mode 100644 changelog.d/19281.feature
diff --git a/changelog.d/19281.feature b/changelog.d/19281.feature
new file mode 100644
index 0000000000..78d3002d90
--- /dev/null
+++ b/changelog.d/19281.feature
@@ -0,0 +1 @@
+Admin API: add worker support to `GET /_synapse/admin/v2/users/`.
diff --git a/docs/workers.md b/docs/workers.md
index 2bc8afa74f..c2aef33e16 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -255,6 +255,8 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
^/_matrix/client/(r0|v3|unstable)/capabilities$
^/_matrix/client/(r0|v3|unstable)/notifications$
+
+ # Admin API requests
^/_synapse/admin/v1/rooms/[^/]+$
# Encryption requests
@@ -300,6 +302,9 @@ Additionally, the following REST endpoints can be handled for GET requests:
# Presence requests
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
+ # Admin API requests
+ ^/_synapse/admin/v2/users/[^/]+$
+
Pagination requests can also be handled, but all requests for a given
room must be routed to the same instance. Additionally, care must be taken to
ensure that the purge history admin API is not used while pagination requests
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index fe3eeafd9f..b209404cd1 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -119,6 +119,7 @@
UserRegisterServlet,
UserReplaceMasterCrossSigningKeyRestServlet,
UserRestServletV2,
+ UserRestServletV2Get,
UsersRestServletV2,
UsersRestServletV3,
UserTokenRestServlet,
@@ -281,6 +282,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
# matrix_authentication_service integration uses the dedicated MAS API.
if hs.config.experimental.msc3861.enabled:
register_servlets_for_msc3861_delegation(hs, http_server)
+ else:
+ UserRestServletV2Get(hs).register(http_server)
return
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 406ad8f406..ccd34d17d8 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -210,7 +210,7 @@ def _parse_parameter_deactivated(self, request: SynapseRequest) -> bool | None:
return parse_boolean(request, "deactivated")
-class UserRestServletV2(RestServlet):
+class UserRestServletV2Get(RestServlet):
PATTERNS = admin_patterns("/users/(?P[^/]*)$", "v2")
"""Get request to list user details.
@@ -220,22 +220,6 @@ class UserRestServletV2(RestServlet):
returns:
200 OK with user details if success otherwise an error.
-
- Put request to allow an administrator to add or modify a user.
- This needs user to have administrator access in Synapse.
- We use PUT instead of POST since we already know the id of the user
- object to create. POST could be used to create guests.
-
- PUT /_synapse/admin/v2/users/
- {
- "password": "secret",
- "displayname": "User"
- }
-
- returns:
- 201 OK with new user object if user was created or
- 200 OK with modified user object if user was modified
- otherwise an error.
"""
def __init__(self, hs: "HomeServer"):
@@ -267,6 +251,28 @@ async def on_GET(
return HTTPStatus.OK, user_info_dict
+
+class UserRestServletV2(UserRestServletV2Get):
+ """
+ Put request to allow an administrator to add or modify a user.
+ This needs user to have administrator access in Synapse.
+ We use PUT instead of POST since we already know the id of the user
+ object to create. POST could be used to create guests.
+
+ Note: This inherits from `UserRestServletV2Get`, so also supports the `GET` route.
+
+ PUT /_synapse/admin/v2/users/
+ {
+ "password": "secret",
+ "displayname": "User"
+ }
+
+ returns:
+ 201 OK with new user object if user was created or
+ 200 OK with modified user object if user was modified
+ otherwise an error.
+ """
+
async def on_PUT(
self, request: SynapseRequest, user_id: str
) -> tuple[int, JsonMapping]:
From 41938d6fd2a2690a6ca5c884a787f819907d1255 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Fri, 19 Dec 2025 14:29:04 -0600
Subject: [PATCH 30/59] Log the original bind exception when encountering
`Failed to listen on 0.0.0.0, continuing because listening on [::]` (#19297)
**Before:**
```
WARNING - call_when_running - Failed to listen on 0.0.0.0, continuing because listening on [::]
```
**After:**
```
WARNING - call_when_running - Failed to listen on 0.0.0.0, continuing because listening on [::]. Original exception: CannotListenError: Couldn't listen on 0.0.0.0:8008: [Errno 98] Address already in use.
```
---
changelog.d/19297.misc | 1 +
synapse/app/__init__.py | 4 +++-
2 files changed, 4 insertions(+), 1 deletion(-)
create mode 100644 changelog.d/19297.misc
diff --git a/changelog.d/19297.misc b/changelog.d/19297.misc
new file mode 100644
index 0000000000..aec97fd973
--- /dev/null
+++ b/changelog.d/19297.misc
@@ -0,0 +1 @@
+Log the original bind exception when encountering `Failed to listen on 0.0.0.0, continuing because listening on [::]`.
diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py
index 56033f5782..fa436e874b 100644
--- a/synapse/app/__init__.py
+++ b/synapse/app/__init__.py
@@ -54,7 +54,9 @@ def check_bind_error(
"""
if address == "0.0.0.0" and "::" in bind_addresses:
logger.warning(
- "Failed to listen on 0.0.0.0, continuing because listening on [::]"
+ "Failed to listen on 0.0.0.0, continuing because listening on [::]. Original exception: %s: %s",
+ type(e).__name__,
+ str(e),
)
else:
raise e
From 50fabc48c322bc5c6cf08720c3779383b4c7fd52 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 22 Dec 2025 16:04:03 +0000
Subject: [PATCH 31/59] Bump actions/checkout from 6.0.0 to 6.0.1 in the
minor-and-patches group (#19319)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps the minor-and-patches group with 1 update:
[actions/checkout](https://github.com/actions/checkout).
Updates `actions/checkout` from 6.0.0 to 6.0.1
Release notes
Sourced from actions/checkout's
releases.
v6.0.1
What's Changed
Full Changelog: https://github.com/actions/checkout/compare/v6...v6.0.1
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore major version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's major version (unless you unignore this specific
dependency's major version or upgrade to it yourself)
- `@dependabot ignore minor version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's minor version (unless you unignore this specific
dependency's minor version or upgrade to it yourself)
- `@dependabot ignore ` will close this group update PR
and stop Dependabot creating any more for the specific dependency
(unless you unignore this specific dependency or upgrade to it yourself)
- `@dependabot unignore ` will remove all of the ignore
conditions of the specified dependency
- `@dependabot unignore ` will
remove the ignore condition of the specified dependency and ignore
conditions
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/docker.yml | 2 +-
.github/workflows/docs-pr.yaml | 4 +-
.github/workflows/docs.yaml | 2 +-
.github/workflows/fix_lint.yaml | 2 +-
.github/workflows/latest_deps.yml | 10 ++---
.github/workflows/poetry_lockfile.yaml | 2 +-
.github/workflows/push_complement_image.yml | 6 +--
.github/workflows/release-artifacts.yml | 8 ++--
.github/workflows/schema.yaml | 4 +-
.github/workflows/tests.yml | 44 ++++++++++-----------
.github/workflows/triage_labelled.yml | 2 +-
.github/workflows/twisted_trunk.yml | 10 ++---
12 files changed, 48 insertions(+), 48 deletions(-)
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index aaf1e22d3c..3a5b554a7f 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -31,7 +31,7 @@ jobs:
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Checkout repository
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Extract version from pyproject.toml
# Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
index 4d28533a27..524739ba62 100644
--- a/.github/workflows/docs-pr.yaml
+++ b/.github/workflows/docs-pr.yaml
@@ -13,7 +13,7 @@ jobs:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
# Fetch all history so that the schema_versions script works.
fetch-depth: 0
@@ -50,7 +50,7 @@ jobs:
name: Check links in documentation
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Setup mdbook
uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index 51944b13e8..e33add1830 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -50,7 +50,7 @@ jobs:
needs:
- pre
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
# Fetch all history so that the schema_versions script works.
fetch-depth: 0
diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml
index 9daea3f378..34f3b14e75 100644
--- a/.github/workflows/fix_lint.yaml
+++ b/.github/workflows/fix_lint.yaml
@@ -18,7 +18,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index c356ee8e3d..0e27666ef1 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -42,7 +42,7 @@ jobs:
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with:
@@ -77,7 +77,7 @@ jobs:
postgres-version: "14"
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -152,7 +152,7 @@ jobs:
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -202,7 +202,7 @@ jobs:
steps:
- name: Check out synapse codebase
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
path: synapse
@@ -234,7 +234,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/poetry_lockfile.yaml b/.github/workflows/poetry_lockfile.yaml
index 5c139bf574..29b5950ab8 100644
--- a/.github/workflows/poetry_lockfile.yaml
+++ b/.github/workflows/poetry_lockfile.yaml
@@ -16,7 +16,7 @@ jobs:
name: "Check locked dependencies have sdists"
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: '3.x'
diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml
index ed82482505..b662b98754 100644
--- a/.github/workflows/push_complement_image.yml
+++ b/.github/workflows/push_complement_image.yml
@@ -33,17 +33,17 @@ jobs:
packages: write
steps:
- name: Checkout specific branch (debug build)
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
if: github.event_name == 'workflow_dispatch'
with:
ref: ${{ inputs.branch }}
- name: Checkout clean copy of develop (scheduled build)
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
if: github.event_name == 'schedule'
with:
ref: develop
- name: Checkout clean copy of master (on-push)
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
if: github.event_name == 'push'
with:
ref: master
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 33b965d960..496b9086ae 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -27,7 +27,7 @@ jobs:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: "3.x"
@@ -55,7 +55,7 @@ jobs:
steps:
- name: Checkout
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
path: src
@@ -125,7 +125,7 @@ jobs:
os: "ubuntu-24.04-arm"
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
@@ -163,7 +163,7 @@ jobs:
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: "3.10"
diff --git a/.github/workflows/schema.yaml b/.github/workflows/schema.yaml
index 0755a5f023..356d155807 100644
--- a/.github/workflows/schema.yaml
+++ b/.github/workflows/schema.yaml
@@ -14,7 +14,7 @@ jobs:
name: Ensure Synapse config schema is valid
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: "3.x"
@@ -40,7 +40,7 @@ jobs:
name: Ensure generated documentation is up-to-date
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: "3.x"
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index cab6bbdefe..b5ae3c3512 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -86,7 +86,7 @@ jobs:
if: ${{ needs.changes.outputs.linting == 'true' }}
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with:
@@ -106,7 +106,7 @@ jobs:
if: ${{ needs.changes.outputs.linting == 'true' }}
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: "3.x"
@@ -116,7 +116,7 @@ jobs:
check-lockfile:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: "3.x"
@@ -129,7 +129,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Setup Poetry
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
@@ -151,7 +151,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -187,7 +187,7 @@ jobs:
lint-crlf:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
@@ -196,7 +196,7 @@ jobs:
if: ${{ github.event_name == 'pull_request' && (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.event.pull_request.user.login != 'dependabot[bot]' }}
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
@@ -214,7 +214,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -233,7 +233,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -251,7 +251,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -287,7 +287,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -307,7 +307,7 @@ jobs:
needs: changes
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: "3.x"
@@ -355,7 +355,7 @@ jobs:
needs: linting-done
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: "3.x"
@@ -376,7 +376,7 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
@@ -432,7 +432,7 @@ jobs:
- changes
runs-on: ubuntu-22.04
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -495,7 +495,7 @@ jobs:
extras: ["all"]
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
@@ -545,7 +545,7 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
@@ -592,7 +592,7 @@ jobs:
--health-retries 5
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
@@ -636,7 +636,7 @@ jobs:
--health-retries 5
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Add PostgreSQL apt repository
# We need a version of pg_dump that can handle the version of
# PostgreSQL being tested against. The Ubuntu package repository lags
@@ -691,7 +691,7 @@ jobs:
steps:
- name: Checkout synapse codebase
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
path: synapse
@@ -727,7 +727,7 @@ jobs:
- changes
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -747,7 +747,7 @@ jobs:
- changes
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml
index 34222b7d1b..27ff1d80cd 100644
--- a/.github/workflows/triage_labelled.yml
+++ b/.github/workflows/triage_labelled.yml
@@ -22,7 +22,7 @@ jobs:
# This field is case-sensitive.
TARGET_STATUS: Needs info
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
# Only clone the script file we care about, instead of the whole repo.
sparse-checkout: .ci/scripts/triage_labelled_issue.sh
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index 325902f131..64486b0f05 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -43,7 +43,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -70,7 +70,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
@@ -117,7 +117,7 @@ jobs:
- ${{ github.workspace }}:/src
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
@@ -175,7 +175,7 @@ jobs:
steps:
- name: Run actions/checkout@v4 for synapse
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
path: synapse
@@ -217,7 +217,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
From f79acff862aa10fcb528d0d893ccbbc2738189cf Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 22 Dec 2025 16:37:16 +0000
Subject: [PATCH 32/59] Bump log from 0.4.28 to 0.4.29 in the patches group
(#19318)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps the patches group with 1 update:
[log](https://github.com/rust-lang/log).
Updates `log` from 0.4.28 to 0.4.29
Release notes
Sourced from log's
releases.
0.4.29
MSRV
This release increases log's MSRV from
1.61.0 to 1.68.0.
What's Changed
New Contributors
Full Changelog: https://github.com/rust-lang/log/compare/0.4.28...0.4.29
Changelog
Sourced from log's
changelog.
[0.4.29] - 2025-12-02
What's Changed
New Contributors
Full Changelog: https://github.com/rust-lang/log/compare/0.4.28...0.4.29
Commits
b1e2df7
Merge pull request #719
from rust-lang/cargo/0.4.29
3fe1a54
prepare for 0.4.29 release
7a432d9
Merge pull request #718
from rust-lang/ci/msrv
0689d56
rebump msrv to 1.68.0
46b448e
try drop msrv back to 1.61.0
929ab38
fix up doc test feature gate
957cece
bump serde-dependent crates
bea40c8
bump msrv to 1.68.0
c540184
Merge pull request #716
from rust-lang/ci-smaller-matrix2
c971e63
Merge branch 'master' into ci-smaller-matrix2
- Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore major version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's major version (unless you unignore this specific
dependency's major version or upgrade to it yourself)
- `@dependabot ignore minor version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's minor version (unless you unignore this specific
dependency's minor version or upgrade to it yourself)
- `@dependabot ignore ` will close this group update PR
and stop Dependabot creating any more for the specific dependency
(unless you unignore this specific dependency or upgrade to it yourself)
- `@dependabot unignore ` will remove all of the ignore
conditions of the specified dependency
- `@dependabot unignore ` will
remove the ignore condition of the specified dependency and ignore
conditions
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 007428a380..892279e4f2 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -705,9 +705,9 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
[[package]]
name = "log"
-version = "0.4.28"
+version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
+checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "lru-slab"
From 7a24fafbc376b9bffeb3277b1ad4aa950720c96c Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Mon, 29 Dec 2025 12:20:58 -0600
Subject: [PATCH 33/59] Auto-formatting `.github/workflows/tests.yml` from
VSCode (#19327)
---
.github/workflows/tests.yml | 124 ++++++++++++++++++------------------
changelog.d/19327.misc | 1 +
2 files changed, 62 insertions(+), 63 deletions(-)
create mode 100644 changelog.d/19327.misc
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index b5ae3c3512..7923383768 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -26,59 +26,59 @@ jobs:
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
linting_readme: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting_readme }}
steps:
- - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
- id: filter
- # We only check on PRs
- if: startsWith(github.ref, 'refs/pull/')
- with:
- filters: |
- rust:
- - 'rust/**'
- - 'Cargo.toml'
- - 'Cargo.lock'
- - '.rustfmt.toml'
- - '.github/workflows/tests.yml'
-
- trial:
- - 'synapse/**'
- - 'tests/**'
- - 'rust/**'
- - '.ci/scripts/calculate_jobs.py'
- - 'Cargo.toml'
- - 'Cargo.lock'
- - 'pyproject.toml'
- - 'poetry.lock'
- - '.github/workflows/tests.yml'
-
- integration:
- - 'synapse/**'
- - 'rust/**'
- - 'docker/**'
- - 'Cargo.toml'
- - 'Cargo.lock'
- - 'pyproject.toml'
- - 'poetry.lock'
- - 'docker/**'
- - '.ci/**'
- - 'scripts-dev/complement.sh'
- - '.github/workflows/tests.yml'
-
- linting:
- - 'synapse/**'
- - 'docker/**'
- - 'tests/**'
- - 'scripts-dev/**'
- - 'contrib/**'
- - 'synmark/**'
- - 'stubs/**'
- - '.ci/**'
- - 'mypy.ini'
- - 'pyproject.toml'
- - 'poetry.lock'
- - '.github/workflows/tests.yml'
-
- linting_readme:
- - 'README.rst'
+ - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
+ id: filter
+ # We only check on PRs
+ if: startsWith(github.ref, 'refs/pull/')
+ with:
+ filters: |
+ rust:
+ - 'rust/**'
+ - 'Cargo.toml'
+ - 'Cargo.lock'
+ - '.rustfmt.toml'
+ - '.github/workflows/tests.yml'
+
+ trial:
+ - 'synapse/**'
+ - 'tests/**'
+ - 'rust/**'
+ - '.ci/scripts/calculate_jobs.py'
+ - 'Cargo.toml'
+ - 'Cargo.lock'
+ - 'pyproject.toml'
+ - 'poetry.lock'
+ - '.github/workflows/tests.yml'
+
+ integration:
+ - 'synapse/**'
+ - 'rust/**'
+ - 'docker/**'
+ - 'Cargo.toml'
+ - 'Cargo.lock'
+ - 'pyproject.toml'
+ - 'poetry.lock'
+ - 'docker/**'
+ - '.ci/**'
+ - 'scripts-dev/complement.sh'
+ - '.github/workflows/tests.yml'
+
+ linting:
+ - 'synapse/**'
+ - 'docker/**'
+ - 'tests/**'
+ - 'scripts-dev/**'
+ - 'contrib/**'
+ - 'synmark/**'
+ - 'stubs/**'
+ - '.ci/**'
+ - 'mypy.ini'
+ - 'pyproject.toml'
+ - 'poetry.lock'
+ - '.github/workflows/tests.yml'
+
+ linting_readme:
+ - 'README.rst'
check-sampleconfig:
runs-on: ubuntu-latest
@@ -219,8 +219,8 @@ jobs:
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with:
- components: clippy
- toolchain: ${{ env.RUST_VERSION }}
+ components: clippy
+ toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
- run: cargo clippy -- -D warnings
@@ -238,8 +238,8 @@ jobs:
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with:
- toolchain: nightly-2025-04-23
- components: clippy
+ toolchain: nightly-2025-04-23
+ components: clippy
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
- run: cargo clippy --all-features -- -D warnings
@@ -349,7 +349,6 @@ jobs:
lint-rustfmt
lint-readme
-
calculate-test-jobs:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
@@ -373,7 +372,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
+ job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
@@ -449,12 +448,12 @@ jobs:
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
- python-version: '3.10'
+ python-version: "3.10"
- name: Prepare old deps
# Note: we install using `uv` here, not poetry or pip to allow us to test with the
# minimum version of all dependencies, both those explicitly specified and those
- # implicitly brought in by the explicit dependencies.
+ # implicitly brought in by the explicit dependencies.
run: |
pip install uv
uv pip install --system --resolution=lowest .[all,test]
@@ -605,7 +604,6 @@ jobs:
PGPASSWORD: postgres
PGDATABASE: postgres
-
portdb:
if: ${{ !failure() && !cancelled() && needs.changes.outputs.integration == 'true'}} # Allow previous steps to be skipped, but not fail
needs:
@@ -752,7 +750,7 @@ jobs:
- name: Install Rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master
with:
- toolchain: nightly-2022-12-01
+ toolchain: nightly-2022-12-01
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
- run: cargo bench --no-run
diff --git a/changelog.d/19327.misc b/changelog.d/19327.misc
new file mode 100644
index 0000000000..d61a66907e
--- /dev/null
+++ b/changelog.d/19327.misc
@@ -0,0 +1 @@
+Format `.github/workflows/tests.yml`.
From bd94152e0644a00708e570b0e9abc775d9704015 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Wed, 31 Dec 2025 14:43:04 -0600
Subject: [PATCH 34/59] Stream Complement progress and format logs in a
separate step after all tests are done (#19326)
This way we can see what's happening as the tests run
instead of nothing until the end. Also useful to split the test output
from the formatting so we can take the raw test output before formatting
gobbles it all up.
Same thing I did in
https://github.com/element-hq/synapse-rust-apps/pull/361
---
.github/workflows/tests.yml | 20 +++++++++++++++++---
changelog.d/19326.misc | 1 +
2 files changed, 18 insertions(+), 3 deletions(-)
create mode 100644 changelog.d/19326.misc
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 7923383768..dd216fc696 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -708,14 +708,28 @@ jobs:
go-version-file: complement/go.mod
# use p=1 concurrency as GHA boxes are underpowered and don't like running tons of synapses at once.
- - run: |
+ - name: Run Complement Tests
+ id: run_complement_tests
+ # -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
+ # are underpowered and don't like running tons of Synapse instances at once.
+ # -json: Output JSON format so that gotestfmt can parse it.
+ #
+ # tee /tmp/gotest.log: We tee the output to a file so that we can re-process it
+ # later on for better formatting with gotestfmt. But we still want the command
+ # to output to the terminal as it runs so we can see what's happening in
+ # real-time.
+ run: |
set -o pipefail
- COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | synapse/.ci/scripts/gotestfmt
+ COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | tee /tmp/gotest.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
- name: Run Complement Tests
+
+ - name: Formatted Complement test logs
+ # Always run this step if we attempted to run the Complement tests.
+ if: always() && steps.run_complement_tests.outcome != 'skipped'
+ run: cat /tmp/gotest.log | gotestfmt -hide "successful-downloads,empty-packages"
cargo-test:
if: ${{ needs.changes.outputs.rust == 'true' }}
diff --git a/changelog.d/19326.misc b/changelog.d/19326.misc
new file mode 100644
index 0000000000..37493c7488
--- /dev/null
+++ b/changelog.d/19326.misc
@@ -0,0 +1 @@
+Update CI to stream Complement progress and format logs in a separate step after all tests are done.
From 9dae6cc5958e6e7300637ac22e091f295218f0d7 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Thu, 1 Jan 2026 14:00:00 -0600
Subject: [PATCH 35/59] Add a way to expose metrics from the Docker image
(`SYNAPSE_ENABLE_METRICS`) (#19324)
Spawning from wanting to [run a load
test](https://github.com/element-hq/synapse-rust-apps/pull/397) against
the Complement Docker image of Synapse and see metrics from the
homeserver.
### Why not just provide your own homeserver config?
Probably possible but it gets tricky when you try to use the workers
variant of the Docker image (`docker/Dockerfile-workers`). The way to
workaround it would probably be to `yq` edit everything in a script and
change `/data/homeserver.yaml` and `/conf/workers/*.yaml` to add the
`metrics` listener. And then modify `/conf/workers/shared.yaml` to add
`enable_metrics: true`. Doesn't spark much joy.
---
changelog.d/19324.docker | 1 +
docker/Dockerfile | 8 ++-
docker/README-testing.md | 2 +
docker/README.md | 3 ++
docker/conf-workers/shared.yaml.j2 | 5 ++
docker/conf-workers/worker.yaml.j2 | 8 +++
docker/conf/homeserver.yaml | 9 ++++
docker/configure_workers_and_start.py | 33 ++++++++++--
docker/start.py | 57 +++++++++++++++-----
docs/sample_config.yaml | 14 +++--
synapse/config/_base.py | 51 +++++++++++++++++-
synapse/config/_base.pyi | 1 +
synapse/config/metrics.py | 11 +++-
synapse/config/server.py | 53 ++++---------------
tests/config/test_server.py | 76 +++++++++++++++++++++++----
15 files changed, 252 insertions(+), 80 deletions(-)
create mode 100644 changelog.d/19324.docker
diff --git a/changelog.d/19324.docker b/changelog.d/19324.docker
new file mode 100644
index 0000000000..52bf9cb7ae
--- /dev/null
+++ b/changelog.d/19324.docker
@@ -0,0 +1 @@
+Add a way to expose metrics from the Docker image (`SYNAPSE_ENABLE_METRICS`).
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 6d10dee1aa..91116ea1c4 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -188,7 +188,13 @@ COPY --from=builder --exclude=.lock /install /usr/local
COPY ./docker/start.py /start.py
COPY ./docker/conf /conf
-EXPOSE 8008/tcp 8009/tcp 8448/tcp
+# 8008: CS Matrix API port from Synapse
+# 8448: SS Matrix API port from Synapse
+EXPOSE 8008/tcp 8448/tcp
+# 19090: Metrics listener port for the main process (metrics must be enabled with
+# SYNAPSE_ENABLE_METRICS=1). Metrics for workers are on ports starting from 19091 but
+# since these are dynamic we don't expose them by default.
+EXPOSE 19090/tcp
ENTRYPOINT ["/start.py"]
diff --git a/docker/README-testing.md b/docker/README-testing.md
index db88598b8c..009a5d612d 100644
--- a/docker/README-testing.md
+++ b/docker/README-testing.md
@@ -135,3 +135,5 @@ but it does not serve TLS by default.
You can configure `SYNAPSE_TLS_CERT` and `SYNAPSE_TLS_KEY` to point to a
TLS certificate and key (respectively), both in PEM (textual) format.
In this case, Nginx will additionally serve using HTTPS on port 8448.
+
+
diff --git a/docker/README.md b/docker/README.md
index 3438e9c441..ed5155f541 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -75,6 +75,9 @@ The following environment variables are supported in `generate` mode:
particularly tricky.
* `SYNAPSE_LOG_TESTING`: if set, Synapse will log additional information useful
for testing.
+* `SYNAPSE_ENABLE_METRICS`: if set to `1`, the metrics listener will be enabled on the
+ main and worker processes. Defaults to `0` (disabled). The main process will listen on
+ port `19090` and workers on port `19091 + `.
## Postgres
diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2
index 1dfc60ad11..6efbd05472 100644
--- a/docker/conf-workers/shared.yaml.j2
+++ b/docker/conf-workers/shared.yaml.j2
@@ -20,4 +20,9 @@ app_service_config_files:
{%- endfor %}
{%- endif %}
+{# Controlled by SYNAPSE_ENABLE_METRICS #}
+{% if enable_metrics %}
+enable_metrics: true
+{% endif %}
+
{{ shared_worker_config }}
diff --git a/docker/conf-workers/worker.yaml.j2 b/docker/conf-workers/worker.yaml.j2
index 29ec74b4ea..d4d2d4d4cf 100644
--- a/docker/conf-workers/worker.yaml.j2
+++ b/docker/conf-workers/worker.yaml.j2
@@ -21,6 +21,14 @@ worker_listeners:
{%- endfor %}
{% endif %}
+{# Controlled by SYNAPSE_ENABLE_METRICS #}
+{% if metrics_port %}
+ - type: metrics
+ # Prometheus does not support Unix sockets so we don't bother with
+ # `SYNAPSE_USE_UNIX_SOCKET`, https://github.com/prometheus/prometheus/issues/12024
+ port: {{ metrics_port }}
+{% endif %}
+
worker_log_config: {{ worker_log_config_filepath }}
{{ worker_extra_conf }}
diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml
index 2890990705..e2e1338673 100644
--- a/docker/conf/homeserver.yaml
+++ b/docker/conf/homeserver.yaml
@@ -53,6 +53,15 @@ listeners:
- names: [federation]
compress: false
+{% if SYNAPSE_ENABLE_METRICS %}
+ - type: metrics
+ # The main process always uses the same port 19090
+ #
+ # Prometheus does not support Unix sockets so we don't bother with
+ # `SYNAPSE_USE_UNIX_SOCKET`, https://github.com/prometheus/prometheus/issues/12024
+ port: 19090
+{% endif %}
+
## Database ##
{% if POSTGRES_PASSWORD %}
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index e7cbd701b8..c03f2a5e56 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -49,6 +49,10 @@
# regardless of the SYNAPSE_LOG_LEVEL setting.
# * SYNAPSE_LOG_TESTING: if set, Synapse will log additional information useful
# for testing.
+# * SYNAPSE_USE_UNIX_SOCKET: TODO
+# * `SYNAPSE_ENABLE_METRICS`: if set to `1`, the metrics listener will be enabled on the
+# main and worker processes. Defaults to `0` (disabled). The main process will listen on
+# port `19090` and workers on port `19091 + `.
#
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
# in the project's README), this script may be run multiple times, and functionality should
@@ -758,6 +762,9 @@ def generate_worker_files(
# Convenience helper for if using unix sockets instead of host:port
using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
+
+ enable_metrics = environ.get("SYNAPSE_ENABLE_METRICS", "0") == "1"
+
# First read the original config file and extract the listeners block. Then we'll
# add another listener for replication. Later we'll write out the result to the
# shared config file.
@@ -789,7 +796,11 @@ def generate_worker_files(
# base shared worker jinja2 template. This config file will be passed to all
# workers, included Synapse's main process. It is intended mainly for disabling
# functionality when certain workers are spun up, and adding a replication listener.
- shared_config: dict[str, Any] = {"listeners": listeners}
+ shared_config: dict[str, Any] = {
+ "listeners": listeners,
+ # Controls `enable_metrics: true`
+ "enable_metrics": enable_metrics,
+ }
# List of dicts that describe workers.
# We pass this to the Supervisor template later to generate the appropriate
@@ -816,6 +827,8 @@ def generate_worker_files(
# Start worker ports from this arbitrary port
worker_port = 18009
+ # The main process metrics port is 19090, so start workers from 19091
+ worker_metrics_port = 19091
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
@@ -862,10 +875,15 @@ def generate_worker_files(
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
)
- # Update the shared config with any worker_type specific options. The first of a
- # given worker_type needs to stay assigned and not be replaced.
- worker_config["shared_extra_conf"].update(shared_config)
- shared_config = worker_config["shared_extra_conf"]
+ # Keep the `shared_config` up to date with the `shared_extra_conf` from each
+ # worker.
+ shared_config = {
+ **worker_config["shared_extra_conf"],
+ # We combine `shared_config` second to avoid overwriting existing keys
+ # because TODO: why?
+ **shared_config,
+ }
+
if using_unix_sockets:
healthcheck_urls.append(
f"--unix-socket /run/worker.{worker_port} http://localhost/health"
@@ -891,6 +909,10 @@ def generate_worker_files(
# Write out the worker's logging config file
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
+ if enable_metrics:
+ # Enable prometheus metrics endpoint on this worker
+ worker_config["metrics_port"] = worker_metrics_port
+
# Then a worker config file
convert(
"/conf/worker.yaml.j2",
@@ -905,6 +927,7 @@ def generate_worker_files(
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
worker_port += 1
+ worker_metrics_port += 1
# Build the nginx location config blocks
nginx_location_config = ""
diff --git a/docker/start.py b/docker/start.py
index c88d23695f..19f1ab5075 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -31,6 +31,25 @@ def flush_buffers() -> None:
sys.stderr.flush()
+def strtobool(val: str) -> bool:
+ """Convert a string representation of truth to True or False
+
+ True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
+ are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
+ 'val' is anything else.
+
+ This is lifted from distutils.util.strtobool, with the exception that it actually
+ returns a bool, rather than an int.
+ """
+ val = val.lower()
+ if val in ("y", "yes", "t", "true", "on", "1"):
+ return True
+ elif val in ("n", "no", "f", "false", "off", "0"):
+ return False
+ else:
+ raise ValueError("invalid truth value %r" % (val,))
+
+
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
"""Generate a file from a template
@@ -98,19 +117,16 @@ def generate_config_from_template(
os.mkdir(config_dir)
# Convert SYNAPSE_NO_TLS to boolean if exists
- if "SYNAPSE_NO_TLS" in environ:
- tlsanswerstring = str.lower(environ["SYNAPSE_NO_TLS"])
- if tlsanswerstring in ("true", "on", "1", "yes"):
- environ["SYNAPSE_NO_TLS"] = True
- else:
- if tlsanswerstring in ("false", "off", "0", "no"):
- environ["SYNAPSE_NO_TLS"] = False
- else:
- error(
- 'Environment variable "SYNAPSE_NO_TLS" found but value "'
- + tlsanswerstring
- + '" unrecognized; exiting.'
- )
+ tlsanswerstring = environ.get("SYNAPSE_NO_TLS")
+ if tlsanswerstring is not None:
+ try:
+ environ["SYNAPSE_NO_TLS"] = strtobool(tlsanswerstring)
+ except ValueError:
+ error(
+ 'Environment variable "SYNAPSE_NO_TLS" found but value "'
+ + tlsanswerstring
+ + '" unrecognized; exiting.'
+ )
if "SYNAPSE_LOG_CONFIG" not in environ:
environ["SYNAPSE_LOG_CONFIG"] = config_dir + "/log.config"
@@ -164,6 +180,18 @@ def run_generate_config(environ: Mapping[str, str], ownership: str | None) -> No
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
+ enable_metrics_raw = environ.get("SYNAPSE_ENABLE_METRICS", "0")
+
+ enable_metrics = False
+ if enable_metrics_raw is not None:
+ try:
+ enable_metrics = strtobool(enable_metrics_raw)
+ except ValueError:
+ error(
+ 'Environment variable "SYNAPSE_ENABLE_METRICS" found but value "'
+ + enable_metrics_raw
+ + '" unrecognized; exiting.'
+ )
# create a suitable log config from our template
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
@@ -190,6 +218,9 @@ def run_generate_config(environ: Mapping[str, str], ownership: str | None) -> No
"--open-private-ports",
]
+ if enable_metrics:
+ args.append("--enable-metrics")
+
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
log(f"Setting ownership on {data_dir} to {ownership}")
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 0d75e6d4a1..470e44a8ed 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -24,14 +24,18 @@
server_name: "SERVERNAME"
pid_file: DATADIR/homeserver.pid
listeners:
- - port: 8008
+ - bind_addresses:
+ - ::1
+ - 127.0.0.1
+ port: 8008
+ resources:
+ - compress: false
+ names:
+ - client
+ - federation
tls: false
type: http
x_forwarded: true
- bind_addresses: ['::1', '127.0.0.1']
- resources:
- - names: [client, federation]
- compress: false
database:
name: sqlite3
args:
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 43dece4a08..fdfbee98a1 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -44,6 +44,7 @@
import yaml
from synapse.types import StrSequence
+from synapse.util.stringutils import parse_and_validate_server_name
from synapse.util.templates import _create_mxc_to_http_filter, _format_ts_filter
logger = logging.getLogger(__name__)
@@ -465,6 +466,7 @@ def generate_config(
generate_secrets: bool = False,
report_stats: bool | None = None,
open_private_ports: bool = False,
+ enable_metrics: bool = False,
listeners: list[dict] | None = None,
tls_certificate_path: str | None = None,
tls_private_key_path: str | None = None,
@@ -495,9 +497,15 @@ def generate_config(
open_private_ports: True to leave private ports (such as the non-TLS
HTTP listener) open to the internet.
+ enable_metrics: True to set `enable_metrics: true` and when using the
+ default set of listeners, will also add the metrics listener on port 19090.
+
listeners: A list of descriptions of the listeners synapse should
- start with each of which specifies a port (int), a list of
- resources (list(str)), tls (bool) and type (str). For example:
+ start with each of which specifies a port (int), a list of resources
+ (list(str)), tls (bool) and type (str). There is a default set of
+ listeners when `None`.
+
+ Example usage:
[{
"port": 8448,
"resources": [{"names": ["federation"]}],
@@ -518,6 +526,35 @@ def generate_config(
Returns:
The yaml config file
"""
+ _, bind_port = parse_and_validate_server_name(server_name)
+ if bind_port is not None:
+ unsecure_port = bind_port - 400
+ else:
+ bind_port = 8448
+ unsecure_port = 8008
+
+ # The default listeners
+ if listeners is None:
+ listeners = [
+ {
+ "port": unsecure_port,
+ "tls": False,
+ "type": "http",
+ "x_forwarded": True,
+ "resources": [
+ {"names": ["client", "federation"], "compress": False}
+ ],
+ }
+ ]
+
+ if enable_metrics:
+ listeners.append(
+ {
+ "port": 19090,
+ "tls": False,
+ "type": "metrics",
+ }
+ )
conf = CONFIG_FILE_HEADER + "\n".join(
dedent(conf)
@@ -529,6 +566,7 @@ def generate_config(
generate_secrets=generate_secrets,
report_stats=report_stats,
open_private_ports=open_private_ports,
+ enable_metrics=enable_metrics,
listeners=listeners,
tls_certificate_path=tls_certificate_path,
tls_private_key_path=tls_private_key_path,
@@ -756,6 +794,14 @@ def load_or_generate_config(
" internet. Do not use this unless you know what you are doing."
),
)
+ generate_group.add_argument(
+ "--enable-metrics",
+ action="store_true",
+ help=(
+ "Sets `enable_metrics: true` and when using the default set of listeners, "
+ "will also add the metrics listener on port 19090."
+ ),
+ )
cls.invoke_all_static("add_arguments", parser)
config_args = parser.parse_args(argv_options)
@@ -812,6 +858,7 @@ def load_or_generate_config(
report_stats=(config_args.report_stats == "yes"),
generate_secrets=True,
open_private_ports=config_args.open_private_ports,
+ enable_metrics=config_args.enable_metrics,
)
os.makedirs(config_dir_path, exist_ok=True)
diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index fe9b3333c4..7c371d161c 100644
--- a/synapse/config/_base.pyi
+++ b/synapse/config/_base.pyi
@@ -146,6 +146,7 @@ class RootConfig:
generate_secrets: bool = ...,
report_stats: bool | None = ...,
open_private_ports: bool = ...,
+ enable_metrics: bool = ...,
listeners: Any | None = ...,
tls_certificate_path: str | None = ...,
tls_private_key_path: str | None = ...,
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index 83dbee53b6..d8cddb82ed 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -75,10 +75,19 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
)
def generate_config_section(
- self, report_stats: bool | None = None, **kwargs: Any
+ self,
+ report_stats: bool | None = None,
+ enable_metrics: bool = False,
+ **kwargs: Any,
) -> str:
if report_stats is not None:
res = "report_stats: %s\n" % ("true" if report_stats else "false")
else:
res = "\n"
+
+ # We avoid adding anything if it's `False` since that's the default (less noise
+ # in the default generated config)
+ if enable_metrics:
+ res += "enable_metrics: true\n"
+
return res
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 495f289159..ca94c224ea 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -923,26 +923,21 @@ def has_tls_listener(self) -> bool:
def generate_config_section(
self,
+ *,
config_dir_path: str,
data_dir_path: str,
server_name: str,
- open_private_ports: bool,
- listeners: list[dict] | None,
+ open_private_ports: bool = False,
+ listeners: list[dict] | None = None,
**kwargs: Any,
) -> str:
- _, bind_port = parse_and_validate_server_name(server_name)
- if bind_port is not None:
- unsecure_port = bind_port - 400
- else:
- bind_port = 8448
- unsecure_port = 8008
-
pid_file = os.path.join(data_dir_path, "homeserver.pid")
- secure_listeners = []
- unsecure_listeners = []
+ http_bindings = "[]"
private_addresses = ["::1", "127.0.0.1"]
if listeners:
+ secure_listeners = []
+ unsecure_listeners = []
for listener in listeners:
if listener["tls"]:
secure_listeners.append(listener)
@@ -957,43 +952,17 @@ def generate_config_section(
unsecure_listeners.append(listener)
- secure_http_bindings = indent(
- yaml.dump(secure_listeners), " " * 10
- ).lstrip()
-
- unsecure_http_bindings = indent(
- yaml.dump(unsecure_listeners), " " * 10
+ # `lstrip` is used because the first line already has whitespace in the
+ # template below
+ http_bindings = indent(
+ yaml.dump(secure_listeners + unsecure_listeners), " " * 10
).lstrip()
- if not unsecure_listeners:
- unsecure_http_bindings = """- port: %(unsecure_port)s
- tls: false
- type: http
- x_forwarded: true""" % locals()
-
- if not open_private_ports:
- unsecure_http_bindings += (
- "\n bind_addresses: ['::1', '127.0.0.1']"
- )
-
- unsecure_http_bindings += """
-
- resources:
- - names: [client, federation]
- compress: false"""
-
- if listeners:
- unsecure_http_bindings = ""
-
- if not secure_listeners:
- secure_http_bindings = ""
-
return """\
server_name: "%(server_name)s"
pid_file: %(pid_file)s
listeners:
- %(secure_http_bindings)s
- %(unsecure_http_bindings)s
+ %(http_bindings)s
""" % locals()
def read_arguments(self, args: argparse.Namespace) -> None:
diff --git a/tests/config/test_server.py b/tests/config/test_server.py
index 5eb2540439..d3c59ae14c 100644
--- a/tests/config/test_server.py
+++ b/tests/config/test_server.py
@@ -21,6 +21,7 @@
import yaml
from synapse.config._base import ConfigError, RootConfig
+from synapse.config.homeserver import HomeServerConfig
from synapse.config.server import ServerConfig, generate_ip_set, is_threepid_reserved
from tests import unittest
@@ -38,14 +39,23 @@ def test_is_threepid_reserved(self) -> None:
self.assertFalse(is_threepid_reserved(config, user3))
self.assertFalse(is_threepid_reserved(config, user1_msisdn))
- def test_unsecure_listener_no_listeners_open_private_ports_false(self) -> None:
+ def test_default_set_of_listeners(self) -> None:
+ """
+ Test that we get a default set of listeners from the `RootConfig`
+ """
conf = yaml.safe_load(
- ServerConfig(RootConfig()).generate_config_section(
- "CONFDIR", "/data_dir_path", "che.org", False, None
+ # We use `HomeServerConfig` instead of `RootConfig` as it has all of the
+ # `config_classes` defined.
+ HomeServerConfig().generate_config(
+ config_dir_path="CONFDIR",
+ data_dir_path="/data_dir_path",
+ server_name="che.org",
+ open_private_ports=False,
+ listeners=None,
)
)
- expected_listeners = [
+ expected_listeners: list[dict] = [
{
"port": 8008,
"tls": False,
@@ -58,25 +68,61 @@ def test_unsecure_listener_no_listeners_open_private_ports_false(self) -> None:
self.assertEqual(conf["listeners"], expected_listeners)
- def test_unsecure_listener_no_listeners_open_private_ports_true(self) -> None:
+ def test_default_set_of_listeners_with_enable_metrics(self) -> None:
+ """
+ Test that the default set of listeners from the `RootConfig` gets a metrics
+ listener when `enable_metrics=True`.
+ """
conf = yaml.safe_load(
- ServerConfig(RootConfig()).generate_config_section(
- "CONFDIR", "/data_dir_path", "che.org", True, None
+ # We use `HomeServerConfig` instead of `RootConfig` as it has all of the
+ # `config_classes` defined.
+ HomeServerConfig().generate_config(
+ config_dir_path="CONFDIR",
+ data_dir_path="/data_dir_path",
+ server_name="che.org",
+ open_private_ports=False,
+ enable_metrics=True,
+ listeners=None,
)
)
- expected_listeners = [
+ expected_listeners: list[dict] = [
{
"port": 8008,
"tls": False,
"type": "http",
"x_forwarded": True,
+ "bind_addresses": ["::1", "127.0.0.1"],
"resources": [{"names": ["client", "federation"], "compress": False}],
- }
+ },
+ {
+ "port": 19090,
+ "tls": False,
+ "type": "metrics",
+ "bind_addresses": ["::1", "127.0.0.1"],
+ },
]
self.assertEqual(conf["listeners"], expected_listeners)
+ def test_unsecure_listener_no_listeners(self) -> None:
+ conf = yaml.safe_load(
+ ServerConfig(RootConfig()).generate_config_section(
+ config_dir_path="CONFDIR",
+ data_dir_path="/data_dir_path",
+ server_name="che.org",
+ open_private_ports=False,
+ listeners=None,
+ )
+ )
+
+ # We expect `None` because we only operate with what's given to us. The default
+ # set of listeners comes from the logic one layer above in `RootConfig` (see
+ # tests above).
+ expected_listeners: list[dict] = []
+
+ self.assertEqual(conf["listeners"], expected_listeners)
+
def test_listeners_set_correctly_open_private_ports_false(self) -> None:
listeners = [
{
@@ -95,7 +141,11 @@ def test_listeners_set_correctly_open_private_ports_false(self) -> None:
conf = yaml.safe_load(
ServerConfig(RootConfig()).generate_config_section(
- "CONFDIR", "/data_dir_path", "this.one.listens", True, listeners
+ config_dir_path="CONFDIR",
+ data_dir_path="/data_dir_path",
+ server_name="this.one.listens",
+ open_private_ports=True,
+ listeners=listeners,
)
)
@@ -129,7 +179,11 @@ def test_listeners_set_correctly_open_private_ports_true(self) -> None:
conf = yaml.safe_load(
ServerConfig(RootConfig()).generate_config_section(
- "CONFDIR", "/data_dir_path", "this.one.listens", True, listeners
+ config_dir_path="CONFDIR",
+ data_dir_path="/data_dir_path",
+ server_name="this.one.listens",
+ open_private_ports=True,
+ listeners=listeners,
)
)
From 803e4b4d884b2de4b9e20dc47ffb59a983b8a4b5 Mon Sep 17 00:00:00 2001
From: Eric Eastwood
Date: Fri, 2 Jan 2026 12:08:37 -0600
Subject: [PATCH 36/59] Make it more clear how `shared_extra_conf` is combined
in our Docker configuration scripts (#19323)
For reference, this PR used to include this whole `shared_config` block in the diff.
But https://github.com/element-hq/synapse/pull/19324 was merged first which introduced parts of it already.
Here is what this code used to look like: https://github.com/element-hq/synapse/blob/566670c363915691826b5b435c4aa7acde61b408/docker/configure_workers_and_start.py#L865-L868
---
Original context for why it was changed this way:
https://github.com/matrix-org/synapse/pull/14921#discussion_r1126257933
Previously, this code made me question two things:
1. Do we actually use `worker_config["shared_extra_conf"]` in the
templates?
- At first glance, I couldn't see why we're updating `shared_extra_conf`
here. It's not used in the `worker.yaml.j2` template so all of this
seemed a bit pointless.
- Turns out, updating `shared_extra_conf` itself is pointless and it's
being done as a convenient place to mix the objects to get things right
in `shared_config` (confusing).
1. Does it actually do anything?
- Because `shared_config` starts out as an empty object, my first glance
made me think we we're just updating with an empty object and then just
re-assigning. But because we're in a loop, we actually accumulate the
`shared_extra_conf` from each worker.
I'm not sure whether I'm capturing my confusion well enough here but
basically, this made me spend time trying to figure out what/why we're
doing things this way and we can use a more clear pattern to accomplish
the same thing.
---
This change is spawning from looking at the
`docker/configure_workers_and_start.py` script in order to add a metrics
listener ([upcoming
PR](https://github.com/element-hq/synapse/pull/19324)).
---
changelog.d/19323.misc | 1 +
docker/configure_workers_and_start.py | 4 ++--
2 files changed, 3 insertions(+), 2 deletions(-)
create mode 100644 changelog.d/19323.misc
diff --git a/changelog.d/19323.misc b/changelog.d/19323.misc
new file mode 100644
index 0000000000..6699d7ea54
--- /dev/null
+++ b/changelog.d/19323.misc
@@ -0,0 +1 @@
+Make it more clear how `shared_extra_conf` is combined in our Docker configuration scripts.
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index c03f2a5e56..0ebee49b9d 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -879,8 +879,8 @@ def generate_worker_files(
# worker.
shared_config = {
**worker_config["shared_extra_conf"],
- # We combine `shared_config` second to avoid overwriting existing keys
- # because TODO: why?
+ # We combine `shared_config` second to avoid overwriting existing keys just
+ # for sanity sake (always use the first worker).
**shared_config,
}
From 8f96a83d164bbead2030c2bffabd5626cbb2ce8b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 5 Jan 2026 13:59:36 +0000
Subject: [PATCH 37/59] Bump actions/download-artifact from 6.0.0 to 7.0.0
(#19333)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps
[actions/download-artifact](https://github.com/actions/download-artifact)
from 6.0.0 to 7.0.0.
Release notes
Sourced from actions/download-artifact's
releases.
v7.0.0
v7 - What's new
[!IMPORTANT]
actions/download-artifact@v7 now runs on Node.js 24 (runs.using:
node24) and requires a minimum Actions Runner version of 2.327.1.
If you are using self-hosted runners, ensure they are updated before
upgrading.
Node.js 24
This release updates the runtime to Node.js 24. v6 had preliminary
support for Node 24, however this action was by default still running on
Node.js 20. Now this action by default will run on Node.js 24.
What's Changed
New Contributors
Full Changelog: https://github.com/actions/download-artifact/compare/v6.0.0...v7.0.0
Commits
37930b1
Merge pull request #452
from actions/download-artifact-v7-release
72582b9
doc: update readme
0d2ec9d
chore: release v7.0.0 for Node.js 24 support
fd7ae8f
Merge pull request #451
from actions/fix-storage-blob
d484700
chore: restore minimatch.dep.yml license file
03a8080
chore: remove obsolete dependency license files
56fe6d9
chore: update @​actions/artifact license file to 5.0.1
8e3ebc4
chore: update package-lock.json with @​actions/artifact@​5.0.1
1e3c4b4
fix: update @​actions/artifact to ^5.0.0 for Node.js 24
punycode fix
458627d
chore: use local @​actions/artifact package for Node.js 24
testing
- Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/docker.yml | 2 +-
.github/workflows/release-artifacts.yml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index 3a5b554a7f..3a038d1e58 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -95,7 +95,7 @@ jobs:
- build
steps:
- name: Download digests
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
+ uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
with:
path: ${{ runner.temp }}/digests
pattern: digests-*
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 496b9086ae..936aae5036 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -189,7 +189,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
+ uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
- name: Build a tarball for the debs
# We need to merge all the debs uploads into one folder, then compress
# that.
From 691e43bac93bace003ddb0c27e416764e820e6af Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 5 Jan 2026 14:00:05 +0000
Subject: [PATCH 38/59] Bump actions/cache from 4.3.0 to 5.0.1 (#19332)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [actions/cache](https://github.com/actions/cache) from 4.3.0 to
5.0.1.
Release notes
Sourced from actions/cache's
releases.
v5.0.1
[!IMPORTANT]
actions/cache@v5 runs on the Node.js 24 runtime and
requires a minimum Actions Runner version of
2.327.1.
If you are using self-hosted runners, ensure they are updated before
upgrading.
v5.0.1
What's Changed
v5.0.0
What's Changed
Full Changelog: https://github.com/actions/cache/compare/v5...v5.0.1
v5.0.0
[!IMPORTANT]
actions/cache@v5 runs on the Node.js 24 runtime and
requires a minimum Actions Runner version of
2.327.1.
If you are using self-hosted runners, ensure they are updated before
upgrading.
What's Changed
Full Changelog: https://github.com/actions/cache/compare/v4.3.0...v5.0.0
Changelog
Sourced from actions/cache's
changelog.
Releases
Changelog
5.0.1
- Update
@azure/storage-blob to ^12.29.1 via
@actions/cache@5.0.1 #1685
5.0.0
[!IMPORTANT]
actions/cache@v5 runs on the Node.js 24 runtime and
requires a minimum Actions Runner version of 2.327.1.
If you are using self-hosted runners, ensure they are updated before
upgrading.
4.3.0
4.2.4
- Bump
@actions/cache to v4.0.5
4.2.3
- Bump
@actions/cache to v4.0.3 (obfuscates SAS token in
debug logs for cache entries)
4.2.2
- Bump
@actions/cache to v4.0.2
4.2.1
- Bump
@actions/cache to v4.0.1
4.2.0
TLDR; The cache backend service has been rewritten from the ground up
for improved performance and reliability. actions/cache now integrates
with the new cache service (v2) APIs.
The new service will gradually roll out as of February 1st,
2025. The legacy service will also be sunset on the same date.
Changes in these release are fully backward
compatible.
We are deprecating some versions of this action. We
recommend upgrading to version v4 or v3 as
soon as possible before February 1st, 2025. (Upgrade
instructions below).
If you are using pinned SHAs, please use the SHAs of versions
v4.2.0 or v3.4.0
If you do not upgrade, all workflow runs using any of the deprecated
actions/cache will
fail.
Upgrading to the recommended versions will not break your
workflows.
4.1.2
... (truncated)
Commits
9255dc7
Merge pull request #1686
from actions/cache-v5.0.1-release
8ff5423
chore: release v5.0.1
9233019
Merge pull request #1685
from salmanmkc/node24-storage-blob-fix
b975f2b
fix: add peer property to package-lock.json for dependencies
d0a0e18
fix: update license files for @​actions/cache,
fast-xml-parser, and strnum
74de208
fix: update @​actions/cache to ^5.0.1 for Node.js 24
punycode fix
ac7f115
peer
b0f846b
fix: update @​actions/cache with storage-blob fix for
Node.js 24 punycode depr...
a783357
Merge pull request #1684
from actions/prepare-cache-v5-release
3bb0d78
docs: highlight v5 runner requirement in releases
- Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/release-artifacts.yml | 2 +-
.github/workflows/tests.yml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 936aae5036..5559a415a6 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -66,7 +66,7 @@ jobs:
install: true
- name: Set up docker layer caching
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
+ uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index dd216fc696..7f265a086b 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -174,7 +174,7 @@ jobs:
# Cribbed from
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
- name: Restore/persist mypy's cache
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
+ uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
with:
path: |
.mypy_cache
From 169d5b9590f392e758a7f4e8ce00e5f70a1b20ec Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 5 Jan 2026 14:15:00 +0000
Subject: [PATCH 39/59] Bump reqwest from 0.12.24 to 0.12.25 in the patches
group (#19331)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps the patches group with 1 update:
[reqwest](https://github.com/seanmonstar/reqwest).
Updates `reqwest` from 0.12.24 to 0.12.25
Release notes
Sourced from reqwest's
releases.
v0.12.25
Highlights
- Add
Error::is_upgrade() to determine if the error was
from an HTTP upgrade.
- Fix sending
Proxy-Authorization if only username is
configured.
- Fix sending
Proxy-Authorization to HTTPS proxies when
the target is HTTP.
- Refactor internal decompression handling to use tower-http.
What's Changed
New Contributors
Full Changelog: https://github.com/seanmonstar/reqwest/compare/v0.12.24...v0.12.25
Changelog
Sourced from reqwest's
changelog.
v0.12.25
- Add
Error::is_upgrade() to determine if the error was
from an HTTP upgrade.
- Fix sending
Proxy-Authorization if only username is
configured.
- Fix sending
Proxy-Authorization to HTTPS proxies when
the target is HTTP.
- Refactor internal decompression handling to use tower-http.
Commits
f156a9f
v0.12.25
fc1ff4f
fix(proxy): forward Proxy-Authorization header to HTTPS proxies for HTTP
targ...
b7c3712
Use decompression from tower-http (#2840)
74e6f84
feat(error): add is_upgrade method to detect protocol
upgrade errors (#2822)
c0c06b7
fix: send proxy-authorization even with empty
password (#2868)
a2aa5a3
chore: minor improvement for docs (#2862)
9c4999d
docs: document WASM client (#2859)
a97e195
docs: document connection pooling behavior (#2851)
e3093ed
tests: fix wasm timeout test with uncached response (#2853)
- See full diff in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore major version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's major version (unless you unignore this specific
dependency's major version or upgrade to it yourself)
- `@dependabot ignore minor version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's minor version (unless you unignore this specific
dependency's minor version or upgrade to it yourself)
- `@dependabot ignore ` will close this group update PR
and stop Dependabot creating any more for the specific dependency
(unless you unignore this specific dependency or upgrade to it yourself)
- `@dependabot unignore ` will remove all of the ignore
conditions of the specified dependency
- `@dependabot unignore ` will
remove the ignore condition of the specified dependency and ignore
conditions
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
Cargo.lock | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 892279e4f2..e5ce9325df 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1024,9 +1024,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "reqwest"
-version = "0.12.24"
+version = "0.12.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f"
+checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f"
dependencies = [
"base64",
"bytes",
@@ -1468,9 +1468,9 @@ dependencies = [
[[package]]
name = "tower-http"
-version = "0.6.6"
+version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
+checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8"
dependencies = [
"bitflags",
"bytes",
From 6b755f964bab89b6dd0f52eeafb2de2f7a4b3c73 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 5 Jan 2026 14:53:58 +0000
Subject: [PATCH 40/59] Bump actions/upload-artifact from 5.0.0 to 6.0.0
(#19334)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps
[actions/upload-artifact](https://github.com/actions/upload-artifact)
from 5.0.0 to 6.0.0.
Release notes
Sourced from actions/upload-artifact's
releases.
v6.0.0
v6 - What's new
[!IMPORTANT]
actions/upload-artifact@v6 now runs on Node.js 24 (runs.using:
node24) and requires a minimum Actions Runner version of 2.327.1.
If you are using self-hosted runners, ensure they are updated before
upgrading.
Node.js 24
This release updates the runtime to Node.js 24. v5 had preliminary
support for Node.js 24, however this action was by default still running
on Node.js 20. Now this action by default will run on Node.js 24.
What's Changed
Full Changelog: https://github.com/actions/upload-artifact/compare/v5.0.0...v6.0.0
Commits
b7c566a
Merge pull request #745
from actions/upload-artifact-v6-release
e516bc8
docs: correct description of Node.js 24 support in README
ddc45ed
docs: update README to correct action name for Node.js 24 support
615b319
chore: release v6.0.0 for Node.js 24 support
017748b
Merge pull request #744
from actions/fix-storage-blob
38d4c79
chore: rebuild dist
7d27270
chore: add missing license cache files for @​actions/core,
@​actions/io, and mi...
5f643d3
chore: update license files for @​actions/artifact@​5.0.1 dependencies
1df1684
chore: update package-lock.json with @​actions/artifact@​5.0.1
b5b1a91
fix: update @​actions/artifact to ^5.0.0 for Node.js 24
punycode fix
- Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Devon Hudson
---
.github/workflows/docker.yml | 2 +-
.github/workflows/docs-pr.yaml | 2 +-
.github/workflows/latest_deps.yml | 2 +-
.github/workflows/release-artifacts.yml | 6 +++---
.github/workflows/tests.yml | 4 ++--
.github/workflows/twisted_trunk.yml | 2 +-
6 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index 3a038d1e58..301f25177a 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -75,7 +75,7 @@ jobs:
touch "${{ runner.temp }}/digests/${digest#sha256:}"
- name: Upload digest
- uses: actions/upload-artifact@v5
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: digests-${{ matrix.suffix }}
path: ${{ runner.temp }}/digests/*
diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
index 524739ba62..eed2acc4a9 100644
--- a/.github/workflows/docs-pr.yaml
+++ b/.github/workflows/docs-pr.yaml
@@ -39,7 +39,7 @@ jobs:
cp book/welcome_and_overview.html book/index.html
- name: Upload Artifact
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: book
path: book
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index 0e27666ef1..9908633f8e 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -173,7 +173,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 5559a415a6..41a1473de3 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -101,7 +101,7 @@ jobs:
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
- name: Upload debs as artifacts
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
path: debs/*
@@ -152,7 +152,7 @@ jobs:
# musl: (TODO: investigate).
CIBW_TEST_SKIP: pp3*-* *musl*
- - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: Wheel-${{ matrix.os }}
path: ./wheelhouse/*.whl
@@ -173,7 +173,7 @@ jobs:
- name: Build sdist
run: python -m build --sdist
- - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: Sdist
path: dist/*.tar.gz
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 7f265a086b..f93c25c01f 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -561,7 +561,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
@@ -658,7 +658,7 @@ jobs:
PGPASSWORD: postgres
PGDATABASE: postgres
- name: "Upload schema differences"
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
with:
name: Schema dumps
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index 64486b0f05..2433632a7f 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -147,7 +147,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
From 444bc56cda05953cb24f95f291d1d2906f3045cc Mon Sep 17 00:00:00 2001
From: Mathieu Velten
Date: Mon, 5 Jan 2026 20:35:11 +0100
Subject: [PATCH 41/59] Add rate limit conf to user directory endpoint (#19291)
The goal is to avoid that an user could scrape the user directory too
quickly.
---
changelog.d/19291.misc | 1 +
demo/start.sh | 6 ++++++
.../conf/workers-shared-extra.yaml.j2 | 4 ++++
.../configuration/config_documentation.md | 19 +++++++++++++++++++
schema/synapse-config.schema.yaml | 10 ++++++++++
synapse/config/ratelimiting.py | 6 ++++++
synapse/rest/client/user_directory.py | 9 +++++++++
7 files changed, 55 insertions(+)
create mode 100644 changelog.d/19291.misc
diff --git a/changelog.d/19291.misc b/changelog.d/19291.misc
new file mode 100644
index 0000000000..bac12b8506
--- /dev/null
+++ b/changelog.d/19291.misc
@@ -0,0 +1 @@
+Add a config to be able to rate limit search in the user directory.
diff --git a/demo/start.sh b/demo/start.sh
index e010302bf4..0b61ac9991 100755
--- a/demo/start.sh
+++ b/demo/start.sh
@@ -145,6 +145,12 @@ for port in 8080 8081 8082; do
rc_delayed_event_mgmt:
per_second: 1000
burst_count: 1000
+ rc_room_creation:
+ per_second: 1000
+ burst_count: 1000
+ rc_user_directory:
+ per_second: 1000
+ burst_count: 1000
RC
)
echo "${ratelimiting}" >> "$port.config"
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 94e74df9d1..101ff153a5 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -102,6 +102,10 @@ rc_room_creation:
per_second: 9999
burst_count: 9999
+rc_user_directory:
+ per_second: 9999
+ burst_count: 9999
+
federation_rr_transactions_per_room_per_second: 9999
allow_device_name_lookup_over_federation: true
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 7509e4d715..badfe0a03f 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -2041,6 +2041,25 @@ rc_room_creation:
burst_count: 5.0
```
---
+### `rc_user_directory`
+
+*(object)* This option allows admins to ratelimit searches in the user directory.
+
+_Added in Synapse 1.145.0._
+
+This setting has the following sub-options:
+
+* `per_second` (number): Maximum number of requests a client can send per second.
+
+* `burst_count` (number): Maximum number of requests a client can send before being throttled.
+
+Default configuration:
+```yaml
+rc_user_directory:
+ per_second: 0.016
+ burst_count: 200.0
+```
+---
### `federation_rr_transactions_per_room_per_second`
*(integer)* Sets outgoing federation transaction frequency for sending read-receipts, per-room.
diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml
index bf9346995d..ca8db9c9ee 100644
--- a/schema/synapse-config.schema.yaml
+++ b/schema/synapse-config.schema.yaml
@@ -2274,6 +2274,16 @@ properties:
examples:
- per_second: 1.0
burst_count: 5.0
+ rc_user_directory:
+ $ref: "#/$defs/rc"
+ description: >-
+ This option allows admins to ratelimit searches in the user directory.
+
+
+ _Added in Synapse 1.145.0._
+ default:
+ per_second: 0.016
+ burst_count: 200.0
federation_rr_transactions_per_room_per_second:
type: integer
description: >-
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 78d9d61d3c..13c9c4dba0 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -252,3 +252,9 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
"rc_reports",
defaults={"per_second": 1, "burst_count": 5},
)
+
+ self.rc_user_directory = RatelimitSettings.parse(
+ config,
+ "rc_user_directory",
+ defaults={"per_second": 0.016, "burst_count": 200},
+ )
diff --git a/synapse/rest/client/user_directory.py b/synapse/rest/client/user_directory.py
index 0f561c2e61..fa1342d0bf 100644
--- a/synapse/rest/client/user_directory.py
+++ b/synapse/rest/client/user_directory.py
@@ -23,6 +23,7 @@
from typing import TYPE_CHECKING
from synapse.api.errors import SynapseError
+from synapse.api.ratelimiting import Ratelimiter
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest
@@ -46,6 +47,12 @@ def __init__(self, hs: "HomeServer"):
self.auth = hs.get_auth()
self.user_directory_handler = hs.get_user_directory_handler()
+ self._per_user_limiter = Ratelimiter(
+ store=hs.get_datastores().main,
+ clock=hs.get_clock(),
+ cfg=hs.config.ratelimiting.rc_user_directory,
+ )
+
async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonMapping]:
"""Searches for users in directory
@@ -69,6 +76,8 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonMapping]:
if not self.hs.config.userdirectory.user_directory_search_enabled:
return 200, {"limited": False, "results": []}
+ await self._per_user_limiter.ratelimit(requester)
+
body = parse_json_object_from_request(request)
limit = int(body.get("limit", 10))
From cd252db3f56b3c9a8fac5892126f17bb11cce9af Mon Sep 17 00:00:00 2001
From: Olivier 'reivilibre
Date: Tue, 6 Jan 2026 15:53:13 +0000
Subject: [PATCH 42/59] Transform events with client metadata before
serialising in /event response. (#19340)
Fix /event/ endpoint not transforming event with per-requester metadata
Pass notif_event through filter_events_for_client \
Not aware of an actual issue here, but seems silly to bypass it
Call it filter_and_transform_events_for_client to make it more obvious
---------
Signed-off-by: Olivier 'reivilibre
---
changelog.d/19340.bugfix | 1 +
synapse/handlers/admin.py | 4 ++--
synapse/handlers/events.py | 10 ++++++----
synapse/handlers/initial_sync.py | 8 ++++----
synapse/handlers/pagination.py | 4 ++--
synapse/handlers/relations.py | 6 +++---
synapse/handlers/room.py | 4 ++--
synapse/handlers/search.py | 10 +++++-----
synapse/handlers/sliding_sync/__init__.py | 4 ++--
synapse/handlers/sync.py | 6 +++---
synapse/notifier.py | 4 ++--
synapse/push/mailer.py | 7 +++----
synapse/visibility.py | 2 +-
tests/rest/client/test_retention.py | 4 ++--
tests/test_visibility.py | 21 ++++++++++++---------
15 files changed, 50 insertions(+), 45 deletions(-)
create mode 100644 changelog.d/19340.bugfix
diff --git a/changelog.d/19340.bugfix b/changelog.d/19340.bugfix
new file mode 100644
index 0000000000..38de156aa7
--- /dev/null
+++ b/changelog.d/19340.bugfix
@@ -0,0 +1 @@
+Transform events with client metadata before serialising in /event response.
\ No newline at end of file
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index c979752f7f..2fb0e5814f 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -44,7 +44,7 @@
UserInfo,
create_requester,
)
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -251,7 +251,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") ->
topological=last_event.depth,
)
- events = await filter_events_for_client(
+ events = await filter_and_transform_events_for_client(
self._storage_controllers,
user_id,
events,
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index ae17639206..f6517def9c 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -31,7 +31,7 @@
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, UserID
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -156,7 +156,9 @@ async def get_event(
event_id: str,
show_redacted: bool = False,
) -> EventBase | None:
- """Retrieve a single specified event.
+ """Retrieve a single specified event on behalf of a user.
+ The event will be transformed in a user-specific and time-specific way,
+ e.g. having unsigned metadata added or being erased depending on who is accessing.
Args:
user: The local user requesting the event
@@ -188,7 +190,7 @@ async def get_event(
# The user is peeking if they aren't in the room already
is_peeking = not is_user_in_room
- filtered = await filter_events_for_client(
+ filtered = await filter_and_transform_events_for_client(
self._storage_controllers,
user.to_string(),
[event],
@@ -198,4 +200,4 @@ async def get_event(
if not filtered:
raise AuthError(403, "You don't have permission to access that event.")
- return event
+ return filtered[0]
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 611c4fa7b3..1e5e98a59b 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -49,7 +49,7 @@
from synapse.util import unwrapFirstError
from synapse.util.async_helpers import concurrently_execute, gather_results
from synapse.util.caches.response_cache import ResponseCache
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -225,7 +225,7 @@ async def handle_room(event: RoomsForUser) -> None:
)
).addErrback(unwrapFirstError)
- messages = await filter_events_for_client(
+ messages = await filter_and_transform_events_for_client(
self._storage_controllers,
user_id,
messages,
@@ -382,7 +382,7 @@ async def _room_initial_sync_parted(
room_id, limit=pagin_config.limit, end_token=stream_token
)
- messages = await filter_events_for_client(
+ messages = await filter_and_transform_events_for_client(
self._storage_controllers,
requester.user.to_string(),
messages,
@@ -496,7 +496,7 @@ async def get_receipts() -> list[JsonMapping]:
).addErrback(unwrapFirstError)
)
- messages = await filter_events_for_client(
+ messages = await filter_and_transform_events_for_client(
self._storage_controllers,
requester.user.to_string(),
messages,
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 63e5dfa70c..7b9c829056 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -46,7 +46,7 @@
from synapse.types.state import StateFilter
from synapse.util.async_helpers import ReadWriteLock
from synapse.util.duration import Duration
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -684,7 +684,7 @@ async def get_messages(
events = await event_filter.filter(events)
if not use_admin_priviledge:
- events = await filter_events_for_client(
+ events = await filter_and_transform_events_for_client(
self._storage_controllers,
user_id,
events,
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index fd38ffa920..d7d3002fbe 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -40,7 +40,7 @@
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, UserID
from synapse.util.async_helpers import gather_results
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -154,7 +154,7 @@ async def get_relations(
[e.event_id for e in related_events]
)
- events = await filter_events_for_client(
+ events = await filter_and_transform_events_for_client(
self._storage_controllers,
user_id,
events,
@@ -599,7 +599,7 @@ async def get_threads(
# Limit the returned threads to those the user has participated in.
events = [event for event in events if participated[event.event_id]]
- events = await filter_events_for_client(
+ events = await filter_and_transform_events_for_client(
self._storage_controllers,
user_id,
events,
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 1026bfd876..e03a912319 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -95,7 +95,7 @@
from synapse.util.duration import Duration
from synapse.util.iterutils import batch_iter
from synapse.util.stringutils import parse_and_validate_server_name
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -1919,7 +1919,7 @@ async def get_event_context(
async def filter_evts(events: list[EventBase]) -> list[EventBase]:
if use_admin_priviledge:
return events
- return await filter_events_for_client(
+ return await filter_and_transform_events_for_client(
self._storage_controllers,
user.to_string(),
events,
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 20b38427a6..56c047b0e8 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -33,7 +33,7 @@
from synapse.events.utils import SerializeEventConfig
from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType, UserID
from synapse.types.state import StateFilter
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -479,7 +479,7 @@ async def _search_by_rank(
filtered_events = await search_filter.filter([r["event"] for r in results])
- events = await filter_events_for_client(
+ events = await filter_and_transform_events_for_client(
self._storage_controllers,
user.to_string(),
filtered_events,
@@ -580,7 +580,7 @@ async def _search_by_recent(
filtered_events = await search_filter.filter([r["event"] for r in results])
- events = await filter_events_for_client(
+ events = await filter_and_transform_events_for_client(
self._storage_controllers,
user.to_string(),
filtered_events,
@@ -667,13 +667,13 @@ async def _calculate_event_contexts(
len(res.events_after),
)
- events_before = await filter_events_for_client(
+ events_before = await filter_and_transform_events_for_client(
self._storage_controllers,
user.to_string(),
res.events_before,
)
- events_after = await filter_events_for_client(
+ events_after = await filter_and_transform_events_for_client(
self._storage_controllers,
user.to_string(),
res.events_after,
diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py
index bb2e785cfa..6feb6c292e 100644
--- a/synapse/handlers/sliding_sync/__init__.py
+++ b/synapse/handlers/sliding_sync/__init__.py
@@ -71,7 +71,7 @@
)
from synapse.types.state import StateFilter
from synapse.util.async_helpers import concurrently_execute
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -755,7 +755,7 @@ async def get_room_sync_data(
timeline_events.reverse()
# Make sure we don't expose any events that the client shouldn't see
- timeline_events = await filter_events_for_client(
+ timeline_events = await filter_and_transform_events_for_client(
self.storage_controllers,
user.to_string(),
timeline_events,
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 60d8827425..72e91d66ac 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -78,7 +78,7 @@
from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext
from synapse.util.metrics import Measure
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -679,7 +679,7 @@ async def _load_filtered_recents(
)
)
- recents = await filter_events_for_client(
+ recents = await filter_and_transform_events_for_client(
self._storage_controllers,
sync_config.user.to_string(),
recents,
@@ -789,7 +789,7 @@ async def _load_filtered_recents(
)
)
- loaded_recents = await filter_events_for_client(
+ loaded_recents = await filter_and_transform_events_for_client(
self._storage_controllers,
sync_config.user.to_string(),
loaded_recents,
diff --git a/synapse/notifier.py b/synapse/notifier.py
index d8d2db17f1..cf3923110e 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -63,7 +63,7 @@
)
from synapse.util.duration import Duration
from synapse.util.stringutils import shortstr
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -783,7 +783,7 @@ async def check_for_updates(
)
if keyname == StreamKeyType.ROOM:
- new_events = await filter_events_for_client(
+ new_events = await filter_and_transform_events_for_client(
self._storage_controllers,
user.to_string(),
new_events,
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 6492207403..d18630e80b 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -49,7 +49,7 @@
from synapse.types import StateMap, UserID
from synapse.types.state import StateFilter
from synapse.util.async_helpers import concurrently_execute
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -537,12 +537,11 @@ async def _get_notif_vars(
"messages": [],
}
- the_events = await filter_events_for_client(
+ the_events = await filter_and_transform_events_for_client(
self._storage_controllers,
user_id,
- results.events_before,
+ results.events_before + [notif_event],
)
- the_events.append(notif_event)
for event in the_events:
messagevars = await self._get_message_vars(notif, event, room_state_ids)
diff --git a/synapse/visibility.py b/synapse/visibility.py
index bfa0db5670..452a2d50fb 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -75,7 +75,7 @@
@trace
-async def filter_events_for_client(
+async def filter_and_transform_events_for_client(
storage: StorageControllers,
user_id: str,
events: list[EventBase],
diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
index 758d62e63b..82a3b5b337 100644
--- a/tests/rest/client/test_retention.py
+++ b/tests/rest/client/test_retention.py
@@ -28,7 +28,7 @@
from synapse.server import HomeServer
from synapse.types import JsonDict, create_requester
from synapse.util.clock import Clock
-from synapse.visibility import filter_events_for_client
+from synapse.visibility import filter_and_transform_events_for_client
from tests import unittest
from tests.unittest import override_config
@@ -163,7 +163,7 @@ def test_visibility(self) -> None:
)
self.assertEqual(2, len(events), "events retrieved from database")
filtered_events = self.get_success(
- filter_events_for_client(
+ filter_and_transform_events_for_client(
storage_controllers,
self.user_id,
events,
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index 06598c29de..b50faa2a49 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -31,7 +31,10 @@
from synapse.server import HomeServer
from synapse.types import create_requester
from synapse.util.clock import Clock
-from synapse.visibility import filter_events_for_client, filter_events_for_server
+from synapse.visibility import (
+ filter_and_transform_events_for_client,
+ filter_events_for_server,
+)
from tests import unittest
from tests.test_utils.event_injection import inject_event, inject_member_event
@@ -330,7 +333,7 @@ def test_normal_operation_as_admin(self) -> None:
# Do filter & assert
filtered_events = self.get_success(
- filter_events_for_client(
+ filter_and_transform_events_for_client(
self.hs.get_storage_controllers(),
"@admin:test",
events_to_filter,
@@ -369,7 +372,7 @@ def test_see_soft_failed_events(self) -> None:
# Do filter & assert
filtered_events = self.get_success(
- filter_events_for_client(
+ filter_and_transform_events_for_client(
self.hs.get_storage_controllers(),
"@admin:test",
events_to_filter,
@@ -416,7 +419,7 @@ def test_see_policy_server_spammy_events(self) -> None:
# Do filter & assert
filtered_events = self.get_success(
- filter_events_for_client(
+ filter_and_transform_events_for_client(
self.hs.get_storage_controllers(),
"@admin:test",
events_to_filter,
@@ -463,7 +466,7 @@ def test_see_soft_failed_and_policy_server_spammy_events(self) -> None:
# Do filter & assert
filtered_events = self.get_success(
- filter_events_for_client(
+ filter_and_transform_events_for_client(
self.hs.get_storage_controllers(),
"@admin:test",
events_to_filter,
@@ -538,14 +541,14 @@ def test_joined_history_visibility(self) -> None:
# accidentally serving the same event object (with the same unsigned.membership
# property) to both users.
joiner_filtered_events = self.get_success(
- filter_events_for_client(
+ filter_and_transform_events_for_client(
self.hs.get_storage_controllers(),
"@joiner:test",
events_to_filter,
)
)
resident_filtered_events = self.get_success(
- filter_events_for_client(
+ filter_and_transform_events_for_client(
self.hs.get_storage_controllers(),
"@resident:test",
events_to_filter,
@@ -641,7 +644,7 @@ def test_out_of_band_invite_rejection(self) -> None:
# the invited user should be able to see both the invite and the rejection
filtered_events = self.get_success(
- filter_events_for_client(
+ filter_and_transform_events_for_client(
self.hs.get_storage_controllers(),
"@user:test",
[invite_event, reject_event],
@@ -662,7 +665,7 @@ def test_out_of_band_invite_rejection(self) -> None:
# other users should see neither
self.assertEqual(
self.get_success(
- filter_events_for_client(
+ filter_and_transform_events_for_client(
self.hs.get_storage_controllers(),
"@other:test",
[invite_event, reject_event],
From 39f80296c5683bfa2c06a7ade56d4c95f0e30b2a Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Tue, 6 Jan 2026 09:38:44 -0700
Subject: [PATCH 43/59] 1.145.0rc1
---
CHANGES.md | 57 +++++++++++++++++++++++++++++++
changelog.d/18402.misc | 1 -
changelog.d/19187.misc | 1 -
changelog.d/19206.bugfix | 1 -
changelog.d/19212.misc | 1 -
changelog.d/19231.bugfix | 1 -
changelog.d/19232.misc | 1 -
changelog.d/19234.misc | 1 -
changelog.d/19253.misc | 1 -
changelog.d/19254.removal | 1 -
changelog.d/19258.misc | 1 -
changelog.d/19260.feature | 1 -
changelog.d/19262.misc | 1 -
changelog.d/19267.bugfix | 1 -
changelog.d/19268.feature | 1 -
changelog.d/19270.doc | 1 -
changelog.d/19274.bugfix | 1 -
changelog.d/19275.feature | 1 -
changelog.d/19278.misc | 1 -
changelog.d/19279.feature | 1 -
changelog.d/19281.feature | 1 -
changelog.d/19289.misc | 1 -
changelog.d/19291.misc | 1 -
changelog.d/19297.misc | 1 -
changelog.d/19300.feature | 1 -
changelog.d/19302.misc | 1 -
changelog.d/19323.misc | 1 -
changelog.d/19324.docker | 1 -
changelog.d/19326.misc | 1 -
changelog.d/19327.misc | 1 -
changelog.d/19340.bugfix | 1 -
debian/changelog | 6 ++++
pyproject.toml | 2 +-
schema/synapse-config.schema.yaml | 2 +-
34 files changed, 65 insertions(+), 32 deletions(-)
delete mode 100644 changelog.d/18402.misc
delete mode 100644 changelog.d/19187.misc
delete mode 100644 changelog.d/19206.bugfix
delete mode 100644 changelog.d/19212.misc
delete mode 100644 changelog.d/19231.bugfix
delete mode 100644 changelog.d/19232.misc
delete mode 100644 changelog.d/19234.misc
delete mode 100644 changelog.d/19253.misc
delete mode 100644 changelog.d/19254.removal
delete mode 100644 changelog.d/19258.misc
delete mode 100644 changelog.d/19260.feature
delete mode 100644 changelog.d/19262.misc
delete mode 100644 changelog.d/19267.bugfix
delete mode 100644 changelog.d/19268.feature
delete mode 100644 changelog.d/19270.doc
delete mode 100644 changelog.d/19274.bugfix
delete mode 100644 changelog.d/19275.feature
delete mode 100644 changelog.d/19278.misc
delete mode 100644 changelog.d/19279.feature
delete mode 100644 changelog.d/19281.feature
delete mode 100644 changelog.d/19289.misc
delete mode 100644 changelog.d/19291.misc
delete mode 100644 changelog.d/19297.misc
delete mode 100644 changelog.d/19300.feature
delete mode 100644 changelog.d/19302.misc
delete mode 100644 changelog.d/19323.misc
delete mode 100644 changelog.d/19324.docker
delete mode 100644 changelog.d/19326.misc
delete mode 100644 changelog.d/19327.misc
delete mode 100644 changelog.d/19340.bugfix
diff --git a/CHANGES.md b/CHANGES.md
index 83ab1849e6..1f6ed01c20 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,60 @@
+# Synapse 1.145.0rc1 (2026-01-06)
+
+- Ubuntu 25.04 (Plucky Puffin) will be end of life on Jan 17, 2026. Synapse will stop building packages for Ubuntu 25.04 shortly thereafter.
+- Remove the "Updates to locked dependencies" section from the changelog due to lack of use and the maintenance burden. ([\#19254](https://github.com/element-hq/synapse/issues/19254))
+
+## Features
+
+- Add `memberships` endpoint to the admin API. This is useful for forensics and T&S purpose. ([\#19260](https://github.com/element-hq/synapse/issues/19260))
+- Add an admin API for retrieving a paginated list of quarantined media. ([\#19268](https://github.com/element-hq/synapse/issues/19268))
+- Server admins can bypass the quarantine media check when downloading media by setting the `admin_unsafely_bypass_quarantine` query parameter to `true` on Client-Server API media download requests. ([\#19275](https://github.com/element-hq/synapse/issues/19275))
+- Implemented pagination for the [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) mutual rooms endpoint. Contributed by @tulir @ Beeper. ([\#19279](https://github.com/element-hq/synapse/issues/19279))
+- Admin API: add worker support to `GET /_synapse/admin/v2/users/`. ([\#19281](https://github.com/element-hq/synapse/issues/19281))
+- Improve proxy support for the `federation_client.py` dev script. Contributed by Denis Kasak (@dkasak). ([\#19300](https://github.com/element-hq/synapse/issues/19300))
+
+## Bugfixes
+
+- Fix sliding sync performance slow down for long lived connections. ([\#19206](https://github.com/element-hq/synapse/issues/19206))
+- Fix a bug where Mastodon posts (and possibly other embeds) have the wrong description for URL previews. ([\#19231](https://github.com/element-hq/synapse/issues/19231))
+- Fix bug where `Duration` was logged incorrectly. ([\#19267](https://github.com/element-hq/synapse/issues/19267))
+- Fix bug introduced in 1.143.0 that broke support for versions of `zope-interface` older than 6.2. ([\#19274](https://github.com/element-hq/synapse/issues/19274))
+- Transform events with client metadata before serialising in /event response. ([\#19340](https://github.com/element-hq/synapse/issues/19340))
+
+## Updates to the Docker image
+
+- Add a way to expose metrics from the Docker image (`SYNAPSE_ENABLE_METRICS`). ([\#19324](https://github.com/element-hq/synapse/issues/19324))
+
+## Improved Documentation
+
+- Document the importance of `public_baseurl` when configuring OpenID Connect authentication. ([\#19270](https://github.com/element-hq/synapse/issues/19270))
+
+## Deprecations and Removals
+
+- Ubuntu 25.04 (Plucky Puffin) will be end of life on Jan 17, 2026. Synapse will stop building packages for Ubuntu 25.04 shortly thereafter.
+- Remove the "Updates to locked dependencies" section from the changelog due to lack of use and the maintenance burden. ([\#19254](https://github.com/element-hq/synapse/issues/19254))
+
+## Internal Changes
+
+- Group together dependabot update PRs to reduce the review load. ([\#18402](https://github.com/element-hq/synapse/issues/18402))
+- Fix `HomeServer.shutdown()` failing if the homeserver hasn't been setup yet. ([\#19187](https://github.com/element-hq/synapse/issues/19187))
+- Respond with useful error codes with `Content-Length` header/s are invalid. ([\#19212](https://github.com/element-hq/synapse/issues/19212))
+- Fix `HomeServer.shutdown()` failing if the homeserver failed to `start`. ([\#19232](https://github.com/element-hq/synapse/issues/19232))
+- Switch the build backend from `poetry-core` to `maturin`. ([\#19234](https://github.com/element-hq/synapse/issues/19234))
+- Raise the limit for concurrently-open non-security @dependabot PRs from 5 to 10. ([\#19253](https://github.com/element-hq/synapse/issues/19253))
+- Require 14 days to pass before pulling in general dependency updates to help mitigate upstream supply chain attacks. ([\#19258](https://github.com/element-hq/synapse/issues/19258))
+- Drop the broken netlify documentation workflow until a new one is implemented. ([\#19262](https://github.com/element-hq/synapse/issues/19262))
+- Don't include debug logs in `Clock` unless explicitly enabled. ([\#19278](https://github.com/element-hq/synapse/issues/19278))
+- Use `uv` to test olddeps to ensure all transitive dependencies use minimum versions. ([\#19289](https://github.com/element-hq/synapse/issues/19289))
+- Add a config to be able to rate limit search in the user directory. ([\#19291](https://github.com/element-hq/synapse/issues/19291))
+- Log the original bind exception when encountering `Failed to listen on 0.0.0.0, continuing because listening on [::]`. ([\#19297](https://github.com/element-hq/synapse/issues/19297))
+- Unpin the version of Rust we use to build Synapse wheels (was 1.82.0) now that MacOS support has been dropped. ([\#19302](https://github.com/element-hq/synapse/issues/19302))
+- Make it more clear how `shared_extra_conf` is combined in our Docker configuration scripts. ([\#19323](https://github.com/element-hq/synapse/issues/19323))
+- Update CI to stream Complement progress and format logs in a separate step after all tests are done. ([\#19326](https://github.com/element-hq/synapse/issues/19326))
+- Format `.github/workflows/tests.yml`. ([\#19327](https://github.com/element-hq/synapse/issues/19327))
+
+
+
+
# Synapse 1.144.0 (2025-12-09)
## Deprecation of MacOS Python wheels
diff --git a/changelog.d/18402.misc b/changelog.d/18402.misc
deleted file mode 100644
index 4b13652845..0000000000
--- a/changelog.d/18402.misc
+++ /dev/null
@@ -1 +0,0 @@
-Group together dependabot update PRs to reduce the review load.
\ No newline at end of file
diff --git a/changelog.d/19187.misc b/changelog.d/19187.misc
deleted file mode 100644
index d831de38c8..0000000000
--- a/changelog.d/19187.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix `HomeServer.shutdown()` failing if the homeserver hasn't been setup yet.
diff --git a/changelog.d/19206.bugfix b/changelog.d/19206.bugfix
deleted file mode 100644
index 9cdfaa2571..0000000000
--- a/changelog.d/19206.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix sliding sync performance slow down for long lived connections.
diff --git a/changelog.d/19212.misc b/changelog.d/19212.misc
deleted file mode 100644
index 83158ce2d9..0000000000
--- a/changelog.d/19212.misc
+++ /dev/null
@@ -1 +0,0 @@
-Respond with useful error codes with `Content-Length` header/s are invalid.
diff --git a/changelog.d/19231.bugfix b/changelog.d/19231.bugfix
deleted file mode 100644
index 580b642bb2..0000000000
--- a/changelog.d/19231.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug where Mastodon posts (and possibly other embeds) have the wrong description for URL previews.
diff --git a/changelog.d/19232.misc b/changelog.d/19232.misc
deleted file mode 100644
index 6e3e2ff649..0000000000
--- a/changelog.d/19232.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix `HomeServer.shutdown()` failing if the homeserver failed to `start`.
diff --git a/changelog.d/19234.misc b/changelog.d/19234.misc
deleted file mode 100644
index d79bc0b19f..0000000000
--- a/changelog.d/19234.misc
+++ /dev/null
@@ -1 +0,0 @@
-Switch the build backend from `poetry-core` to `maturin`.
\ No newline at end of file
diff --git a/changelog.d/19253.misc b/changelog.d/19253.misc
deleted file mode 100644
index 1d45f936f6..0000000000
--- a/changelog.d/19253.misc
+++ /dev/null
@@ -1 +0,0 @@
-Raise the limit for concurrently-open non-security @dependabot PRs from 5 to 10.
\ No newline at end of file
diff --git a/changelog.d/19254.removal b/changelog.d/19254.removal
deleted file mode 100644
index ee527cef99..0000000000
--- a/changelog.d/19254.removal
+++ /dev/null
@@ -1 +0,0 @@
-Remove the "Updates to locked dependencies" section from the changelog due to lack of use and the maintenance burden.
\ No newline at end of file
diff --git a/changelog.d/19258.misc b/changelog.d/19258.misc
deleted file mode 100644
index 9155f9d20f..0000000000
--- a/changelog.d/19258.misc
+++ /dev/null
@@ -1 +0,0 @@
-Require 14 days to pass before pulling in general dependency updates to help mitigate upstream supply chain attacks.
\ No newline at end of file
diff --git a/changelog.d/19260.feature b/changelog.d/19260.feature
deleted file mode 100644
index 19b192a009..0000000000
--- a/changelog.d/19260.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add `memberships` endpoint to the admin API. This is useful for forensics and T&S purpose.
diff --git a/changelog.d/19262.misc b/changelog.d/19262.misc
deleted file mode 100644
index 31906e6623..0000000000
--- a/changelog.d/19262.misc
+++ /dev/null
@@ -1 +0,0 @@
-Drop the broken netlify documentation workflow until a new one is implemented.
\ No newline at end of file
diff --git a/changelog.d/19267.bugfix b/changelog.d/19267.bugfix
deleted file mode 100644
index 6c7ed750ec..0000000000
--- a/changelog.d/19267.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug where `Duration` was logged incorrectly.
diff --git a/changelog.d/19268.feature b/changelog.d/19268.feature
deleted file mode 100644
index cb7035fee2..0000000000
--- a/changelog.d/19268.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add an admin API for retrieving a paginated list of quarantined media.
\ No newline at end of file
diff --git a/changelog.d/19270.doc b/changelog.d/19270.doc
deleted file mode 100644
index fdb7e2e51c..0000000000
--- a/changelog.d/19270.doc
+++ /dev/null
@@ -1 +0,0 @@
-Document the importance of `public_baseurl` when configuring OpenID Connect authentication.
diff --git a/changelog.d/19274.bugfix b/changelog.d/19274.bugfix
deleted file mode 100644
index 92aaa0fe6d..0000000000
--- a/changelog.d/19274.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug introduced in 1.143.0 that broke support for versions of `zope-interface` older than 6.2.
diff --git a/changelog.d/19275.feature b/changelog.d/19275.feature
deleted file mode 100644
index 5147c546cf..0000000000
--- a/changelog.d/19275.feature
+++ /dev/null
@@ -1 +0,0 @@
-Server admins can bypass the quarantine media check when downloading media by setting the `admin_unsafely_bypass_quarantine` query parameter to `true` on Client-Server API media download requests.
\ No newline at end of file
diff --git a/changelog.d/19278.misc b/changelog.d/19278.misc
deleted file mode 100644
index d1425ff38c..0000000000
--- a/changelog.d/19278.misc
+++ /dev/null
@@ -1 +0,0 @@
-Don't include debug logs in `Clock` unless explicitly enabled.
diff --git a/changelog.d/19279.feature b/changelog.d/19279.feature
deleted file mode 100644
index 031e48dceb..0000000000
--- a/changelog.d/19279.feature
+++ /dev/null
@@ -1 +0,0 @@
-Implemented pagination for the [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) mutual rooms endpoint. Contributed by @tulir @ Beeper.
diff --git a/changelog.d/19281.feature b/changelog.d/19281.feature
deleted file mode 100644
index 78d3002d90..0000000000
--- a/changelog.d/19281.feature
+++ /dev/null
@@ -1 +0,0 @@
-Admin API: add worker support to `GET /_synapse/admin/v2/users/`.
diff --git a/changelog.d/19289.misc b/changelog.d/19289.misc
deleted file mode 100644
index 4ad0dbc430..0000000000
--- a/changelog.d/19289.misc
+++ /dev/null
@@ -1 +0,0 @@
-Use `uv` to test olddeps to ensure all transitive dependencies use minimum versions.
diff --git a/changelog.d/19291.misc b/changelog.d/19291.misc
deleted file mode 100644
index bac12b8506..0000000000
--- a/changelog.d/19291.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a config to be able to rate limit search in the user directory.
diff --git a/changelog.d/19297.misc b/changelog.d/19297.misc
deleted file mode 100644
index aec97fd973..0000000000
--- a/changelog.d/19297.misc
+++ /dev/null
@@ -1 +0,0 @@
-Log the original bind exception when encountering `Failed to listen on 0.0.0.0, continuing because listening on [::]`.
diff --git a/changelog.d/19300.feature b/changelog.d/19300.feature
deleted file mode 100644
index 97e43e9b28..0000000000
--- a/changelog.d/19300.feature
+++ /dev/null
@@ -1 +0,0 @@
-Improve proxy support for the `federation_client.py` dev script. Contributed by Denis Kasak (@dkasak).
diff --git a/changelog.d/19302.misc b/changelog.d/19302.misc
deleted file mode 100644
index 606ab5b52d..0000000000
--- a/changelog.d/19302.misc
+++ /dev/null
@@ -1 +0,0 @@
-Unpin the version of Rust we use to build Synapse wheels (was 1.82.0) now that MacOS support has been dropped.
\ No newline at end of file
diff --git a/changelog.d/19323.misc b/changelog.d/19323.misc
deleted file mode 100644
index 6699d7ea54..0000000000
--- a/changelog.d/19323.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make it more clear how `shared_extra_conf` is combined in our Docker configuration scripts.
diff --git a/changelog.d/19324.docker b/changelog.d/19324.docker
deleted file mode 100644
index 52bf9cb7ae..0000000000
--- a/changelog.d/19324.docker
+++ /dev/null
@@ -1 +0,0 @@
-Add a way to expose metrics from the Docker image (`SYNAPSE_ENABLE_METRICS`).
diff --git a/changelog.d/19326.misc b/changelog.d/19326.misc
deleted file mode 100644
index 37493c7488..0000000000
--- a/changelog.d/19326.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update CI to stream Complement progress and format logs in a separate step after all tests are done.
diff --git a/changelog.d/19327.misc b/changelog.d/19327.misc
deleted file mode 100644
index d61a66907e..0000000000
--- a/changelog.d/19327.misc
+++ /dev/null
@@ -1 +0,0 @@
-Format `.github/workflows/tests.yml`.
diff --git a/changelog.d/19340.bugfix b/changelog.d/19340.bugfix
deleted file mode 100644
index 38de156aa7..0000000000
--- a/changelog.d/19340.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Transform events with client metadata before serialising in /event response.
\ No newline at end of file
diff --git a/debian/changelog b/debian/changelog
index 15ff7cbd9d..3b7d0b773e 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.145.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.145.0rc1.
+
+ -- Synapse Packaging team Tue, 06 Jan 2026 09:29:39 -0700
+
matrix-synapse-py3 (1.144.0) stable; urgency=medium
* New Synapse release 1.144.0.
diff --git a/pyproject.toml b/pyproject.toml
index 09ca2a9e77..7aa5840a9d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "matrix-synapse"
-version = "1.144.0"
+version = "1.145.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
readme = "README.rst"
authors = [
diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml
index ca8db9c9ee..3b58db6938 100644
--- a/schema/synapse-config.schema.yaml
+++ b/schema/synapse-config.schema.yaml
@@ -1,5 +1,5 @@
$schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json
-$id: https://element-hq.github.io/synapse/schema/synapse/v1.144/synapse-config.schema.json
+$id: https://element-hq.github.io/synapse/schema/synapse/v1.145/synapse-config.schema.json
type: object
properties:
modules:
From d6d1404a8ee36501b733d780d44d91dbe42cd806 Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Tue, 6 Jan 2026 09:49:48 -0700
Subject: [PATCH 44/59] Add nifty titles to top level deprecations
---
CHANGES.md | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/CHANGES.md b/CHANGES.md
index 1f6ed01c20..d075da21c2 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,7 +1,12 @@
# Synapse 1.145.0rc1 (2026-01-06)
-- Ubuntu 25.04 (Plucky Puffin) will be end of life on Jan 17, 2026. Synapse will stop building packages for Ubuntu 25.04 shortly thereafter.
-- Remove the "Updates to locked dependencies" section from the changelog due to lack of use and the maintenance burden. ([\#19254](https://github.com/element-hq/synapse/issues/19254))
+## End of Life of Ubuntu 25.04 Plucky Puffin
+
+Ubuntu 25.04 (Plucky Puffin) will be end of life on Jan 17, 2026. Synapse will stop building packages for Ubuntu 25.04 shortly thereafter.
+
+## Updates to Locked Dependencies No Longer Included in Changelog
+
+The "Updates to locked dependencies" section has been removed from the changelog due to lack of use and the maintenance burden. ([\#19254](https://github.com/element-hq/synapse/issues/19254))
## Features
From 6ac61e4be494cb3467348882343dd19a0a249994 Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Tue, 6 Jan 2026 21:37:23 +0000
Subject: [PATCH 45/59] Revert "Add an Admin API endpoint for listing
quarantined media (#19268)" (#19351)
Fixes #19349
This reverts commit 3f636386a66cbc57a6a3c3e641dfd6f1917c838e
(https://github.com/element-hq/synapse/pull/19268) as the DB migration
was taking too long and blocking media access while it happened.
See https://github.com/element-hq/synapse/issues/19349 for further
information.
### Pull Request Checklist
* [X] Pull request is based on the develop branch
* [X] Pull request includes a [changelog
file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog).
The entry should:
- Be a short description of your change which makes sense to users.
"Fixed a bug that prevented receiving messages from other servers."
instead of "Moved X method from `EventStore` to `EventWorkerStore`.".
- Use markdown where necessary, mostly for `code blocks`.
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by
@github_username." or "Contributed by [Your Name]." to the end of the
entry.
* [X] [Code
style](https://element-hq.github.io/synapse/latest/code_style.html) is
correct (run the
[linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
---------
Co-authored-by: Travis Ralston
---
changelog.d/19351.misc | 1 +
docs/admin_api/media_admin_api.md | 27 -----
synapse/media/media_repository.py | 2 -
synapse/rest/admin/media.py | 33 ------
.../databases/main/media_repository.py | 10 +-
synapse/storage/databases/main/room.py | 70 ++----------
.../93/04_add_quarantined_ts_to_media.sql | 27 -----
tests/rest/admin/test_media.py | 106 ------------------
8 files changed, 11 insertions(+), 265 deletions(-)
create mode 100644 changelog.d/19351.misc
delete mode 100644 synapse/storage/schema/main/delta/93/04_add_quarantined_ts_to_media.sql
diff --git a/changelog.d/19351.misc b/changelog.d/19351.misc
new file mode 100644
index 0000000000..5e186b9a0c
--- /dev/null
+++ b/changelog.d/19351.misc
@@ -0,0 +1 @@
+Revert "Add an Admin API endpoint for listing quarantined media (#19268)".
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index 25481a8c55..6b96eb3356 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -73,33 +73,6 @@ Response:
}
```
-## Listing all quarantined media
-
-This API returns a list of all quarantined media on the server. It is paginated, and can be scoped to either local or
-remote media. Note that the pagination values are also scoped to the request parameters - changing them but keeping the
-same pagination values will result in unexpected results.
-
-Request:
-```http
-GET /_synapse/admin/v1/media/quarantined?from=0&limit=100&kind=local
-```
-
-`from` and `limit` are optional parameters, and default to `0` and `100` respectively. They are the row index and number
-of rows to return - they are not timestamps.
-
-`kind` *MUST* either be `local` or `remote`.
-
-The API returns a JSON body containing MXC URIs for the quarantined media, like the following:
-
-```json
-{
- "media": [
- "mxc://localhost/xwvutsrqponmlkjihgfedcba",
- "mxc://localhost/abcdefghijklmnopqrstuvwx"
- ]
-}
-```
-
# Quarantine media
Quarantining media means that it is marked as inaccessible by users. It applies
diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index cb745b96ad..8d38c1655f 100644
--- a/synapse/media/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -928,7 +928,6 @@ async def _download_remote_file(
filesystem_id=file_id,
last_access_ts=time_now_ms,
quarantined_by=None,
- quarantined_ts=None,
authenticated=authenticated,
sha256=sha256writer.hexdigest(),
)
@@ -1062,7 +1061,6 @@ async def _federation_download_remote_file(
filesystem_id=file_id,
last_access_ts=time_now_ms,
quarantined_by=None,
- quarantined_ts=None,
authenticated=authenticated,
sha256=sha256writer.hexdigest(),
)
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index 4cfd9da0f9..d5346fe0d5 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -293,38 +293,6 @@ async def on_GET(
return HTTPStatus.OK, {"local": local_mxcs, "remote": remote_mxcs}
-class ListQuarantinedMedia(RestServlet):
- """Lists all quarantined media on the server."""
-
- PATTERNS = admin_patterns("/media/quarantined$")
-
- def __init__(self, hs: "HomeServer"):
- self.store = hs.get_datastores().main
- self.auth = hs.get_auth()
-
- async def on_GET(
- self,
- request: SynapseRequest,
- ) -> tuple[int, JsonDict]:
- await assert_requester_is_admin(self.auth, request)
-
- start = parse_integer(request, "from", default=0)
- limit = parse_integer(request, "limit", default=100)
- local_or_remote = parse_string(request, "kind", required=True)
-
- if local_or_remote not in ["local", "remote"]:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Query parameter `kind` must be either 'local' or 'remote'.",
- )
-
- mxcs = await self.store.get_quarantined_media_mxcs(
- start, limit, local_or_remote == "local"
- )
-
- return HTTPStatus.OK, {"media": mxcs}
-
-
class PurgeMediaCacheRestServlet(RestServlet):
PATTERNS = admin_patterns("/purge_media_cache$")
@@ -564,7 +532,6 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer)
ProtectMediaByID(hs).register(http_server)
UnprotectMediaByID(hs).register(http_server)
ListMediaInRoom(hs).register(http_server)
- ListQuarantinedMedia(hs).register(http_server)
# XXX DeleteMediaByDateSize must be registered before DeleteMediaByID as
# their URL routes overlap.
DeleteMediaByDateSize(hs).register(http_server)
diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py
index c27c68fbc2..50664d63e5 100644
--- a/synapse/storage/databases/main/media_repository.py
+++ b/synapse/storage/databases/main/media_repository.py
@@ -61,7 +61,6 @@ class LocalMedia:
url_cache: str | None
last_access_ts: int
quarantined_by: str | None
- quarantined_ts: int | None
safe_from_quarantine: bool
user_id: str | None
authenticated: bool | None
@@ -79,7 +78,6 @@ class RemoteMedia:
created_ts: int
last_access_ts: int
quarantined_by: str | None
- quarantined_ts: int | None
authenticated: bool | None
sha256: str | None
@@ -245,7 +243,6 @@ async def get_local_media(self, media_id: str) -> LocalMedia | None:
"user_id",
"authenticated",
"sha256",
- "quarantined_ts",
),
allow_none=True,
desc="get_local_media",
@@ -265,7 +262,6 @@ async def get_local_media(self, media_id: str) -> LocalMedia | None:
user_id=row[8],
authenticated=row[9],
sha256=row[10],
- quarantined_ts=row[11],
)
async def get_local_media_by_user_paginate(
@@ -323,8 +319,7 @@ def get_local_media_by_user_paginate_txn(
safe_from_quarantine,
user_id,
authenticated,
- sha256,
- quarantined_ts
+ sha256
FROM local_media_repository
WHERE user_id = ?
ORDER BY {order_by_column} {order}, media_id ASC
@@ -350,7 +345,6 @@ def get_local_media_by_user_paginate_txn(
user_id=row[9],
authenticated=row[10],
sha256=row[11],
- quarantined_ts=row[12],
)
for row in txn
]
@@ -701,7 +695,6 @@ async def get_cached_remote_media(
"quarantined_by",
"authenticated",
"sha256",
- "quarantined_ts",
),
allow_none=True,
desc="get_cached_remote_media",
@@ -720,7 +713,6 @@ async def get_cached_remote_media(
quarantined_by=row[6],
authenticated=row[7],
sha256=row[8],
- quarantined_ts=row[9],
)
async def store_cached_remote_media(
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 182e55743a..633df07736 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -945,50 +945,6 @@ def get_retention_policy_for_room_txn(
max_lifetime=max_lifetime,
)
- async def get_quarantined_media_mxcs(
- self, index_start: int, index_limit: int, local: bool
- ) -> list[str]:
- """Retrieves all the quarantined media MXC URIs starting from the given position,
- ordered from oldest quarantined timestamp, then alphabetically by media ID
- (including origin).
-
- Note that on established servers the "quarantined timestamp" may be zero due to
- being introduced after the quarantine timestamp field was introduced.
-
- Args:
- index_start: The position to start from.
- index_limit: The maximum number of results to return.
- local: When true, only local media will be returned. When false, only remote media will be returned.
-
- Returns:
- The quarantined media as a list of media IDs.
- """
-
- def _get_quarantined_media_mxcs_txn(
- txn: LoggingTransaction,
- ) -> list[str]:
- # We order by quarantined timestamp *and* media ID (including origin, when
- # known) to ensure the ordering is stable for established servers.
- if local:
- sql = "SELECT '' as media_origin, media_id FROM local_media_repository WHERE quarantined_by IS NOT NULL ORDER BY quarantined_ts, media_id ASC LIMIT ? OFFSET ?"
- else:
- sql = "SELECT media_origin, media_id FROM remote_media_cache WHERE quarantined_by IS NOT NULL ORDER BY quarantined_ts, media_origin, media_id ASC LIMIT ? OFFSET ?"
- txn.execute(sql, (index_limit, index_start))
-
- mxcs = []
-
- for media_origin, media_id in txn:
- if local:
- media_origin = self.hs.hostname
- mxcs.append(f"mxc://{media_origin}/{media_id}")
-
- return mxcs
-
- return await self.db_pool.runInteraction(
- "get_quarantined_media_mxcs",
- _get_quarantined_media_mxcs_txn,
- )
-
async def get_media_mxcs_in_room(self, room_id: str) -> tuple[list[str], list[str]]:
"""Retrieves all the local and remote media MXC URIs in a given room
@@ -996,7 +952,7 @@ async def get_media_mxcs_in_room(self, room_id: str) -> tuple[list[str], list[st
room_id
Returns:
- The local and remote media as lists of the media IDs.
+ The local and remote media as a lists of the media IDs.
"""
def _get_media_mxcs_in_room_txn(
@@ -1191,10 +1147,6 @@ def _quarantine_local_media_txn(
The total number of media items quarantined
"""
total_media_quarantined = 0
- now_ts: int | None = self.clock.time_msec()
-
- if quarantined_by is None:
- now_ts = None
# Effectively a legacy path, update any media that was explicitly named.
if media_ids:
@@ -1203,13 +1155,13 @@ def _quarantine_local_media_txn(
)
sql = f"""
UPDATE local_media_repository
- SET quarantined_by = ?, quarantined_ts = ?
+ SET quarantined_by = ?
WHERE {sql_many_clause_sql}"""
if quarantined_by is not None:
sql += " AND safe_from_quarantine = FALSE"
- txn.execute(sql, [quarantined_by, now_ts] + sql_many_clause_args)
+ txn.execute(sql, [quarantined_by] + sql_many_clause_args)
# Note that a rowcount of -1 can be used to indicate no rows were affected.
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
@@ -1220,13 +1172,13 @@ def _quarantine_local_media_txn(
)
sql = f"""
UPDATE local_media_repository
- SET quarantined_by = ?, quarantined_ts = ?
+ SET quarantined_by = ?
WHERE {sql_many_clause_sql}"""
if quarantined_by is not None:
sql += " AND safe_from_quarantine = FALSE"
- txn.execute(sql, [quarantined_by, now_ts] + sql_many_clause_args)
+ txn.execute(sql, [quarantined_by] + sql_many_clause_args)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
return total_media_quarantined
@@ -1250,10 +1202,6 @@ def _quarantine_remote_media_txn(
The total number of media items quarantined
"""
total_media_quarantined = 0
- now_ts: int | None = self.clock.time_msec()
-
- if quarantined_by is None:
- now_ts = None
if media:
sql_in_list_clause, sql_args = make_tuple_in_list_sql_clause(
@@ -1263,10 +1211,10 @@ def _quarantine_remote_media_txn(
)
sql = f"""
UPDATE remote_media_cache
- SET quarantined_by = ?, quarantined_ts = ?
+ SET quarantined_by = ?
WHERE {sql_in_list_clause}"""
- txn.execute(sql, [quarantined_by, now_ts] + sql_args)
+ txn.execute(sql, [quarantined_by] + sql_args)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
total_media_quarantined = 0
@@ -1276,9 +1224,9 @@ def _quarantine_remote_media_txn(
)
sql = f"""
UPDATE remote_media_cache
- SET quarantined_by = ?, quarantined_ts = ?
+ SET quarantined_by = ?
WHERE {sql_many_clause_sql}"""
- txn.execute(sql, [quarantined_by, now_ts] + sql_many_clause_args)
+ txn.execute(sql, [quarantined_by] + sql_many_clause_args)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
return total_media_quarantined
diff --git a/synapse/storage/schema/main/delta/93/04_add_quarantined_ts_to_media.sql b/synapse/storage/schema/main/delta/93/04_add_quarantined_ts_to_media.sql
deleted file mode 100644
index 18b76804ff..0000000000
--- a/synapse/storage/schema/main/delta/93/04_add_quarantined_ts_to_media.sql
+++ /dev/null
@@ -1,27 +0,0 @@
---
--- This file is licensed under the Affero General Public License (AGPL) version 3.
---
--- Copyright (C) 2025 Element Creations, Ltd
---
--- This program is free software: you can redistribute it and/or modify
--- it under the terms of the GNU Affero General Public License as
--- published by the Free Software Foundation, either version 3 of the
--- License, or (at your option) any later version.
---
--- See the GNU Affero General Public License for more details:
--- .
-
--- Add a timestamp for when the sliding sync connection position was last used,
--- only updated with a small granularity.
---
--- This should be NOT NULL, but we need to consider existing rows. In future we
--- may want to either backfill this or delete all rows with a NULL value (and
--- then make it NOT NULL).
-ALTER TABLE local_media_repository ADD COLUMN quarantined_ts BIGINT;
-ALTER TABLE remote_media_cache ADD COLUMN quarantined_ts BIGINT;
-
-UPDATE local_media_repository SET quarantined_ts = 0 WHERE quarantined_by IS NOT NULL;
-UPDATE remote_media_cache SET quarantined_ts = 0 WHERE quarantined_by IS NOT NULL;
-
--- Note: We *probably* should have an index on quarantined_ts, but we're going
--- to try to defer that to a future migration after seeing the performance impact.
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index e45cc4d208..8cc54cc80c 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -756,112 +756,6 @@ def _access_media(
self.assertFalse(os.path.exists(local_path))
-class ListQuarantinedMediaTestCase(_AdminMediaTests):
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- self.store = hs.get_datastores().main
- self.server_name = hs.hostname
-
- @parameterized.expand(["local", "remote"])
- def test_no_auth(self, kind: str) -> None:
- """
- Try to list quarantined media without authentication.
- """
-
- channel = self.make_request(
- "GET",
- "/_synapse/admin/v1/media/quarantined?kind=%s" % (kind,),
- )
-
- self.assertEqual(401, channel.code, msg=channel.json_body)
- self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
-
- @parameterized.expand(["local", "remote"])
- def test_requester_is_not_admin(self, kind: str) -> None:
- """
- If the user is not a server admin, an error is returned.
- """
- self.other_user = self.register_user("user", "pass")
- self.other_user_token = self.login("user", "pass")
-
- channel = self.make_request(
- "GET",
- "/_synapse/admin/v1/media/quarantined?kind=%s" % (kind,),
- access_token=self.other_user_token,
- )
-
- self.assertEqual(403, channel.code, msg=channel.json_body)
- self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
-
- def test_list_quarantined_media(self) -> None:
- """
- Ensure we actually get results for each page. We can't really test that
- remote media is quarantined, but we can test that local media is.
- """
- self.admin_user = self.register_user("admin", "pass", admin=True)
- self.admin_user_tok = self.login("admin", "pass")
-
- def _upload() -> str:
- return self.helper.upload_media(
- SMALL_PNG, tok=self.admin_user_tok, expect_code=200
- )["content_uri"][6:].split("/")[1] # Cut off 'mxc://' and domain
-
- self.media_id_1 = _upload()
- self.media_id_2 = _upload()
- self.media_id_3 = _upload()
-
- def _quarantine(media_id: str) -> None:
- channel = self.make_request(
- "POST",
- "/_synapse/admin/v1/media/quarantine/%s/%s"
- % (
- self.server_name,
- media_id,
- ),
- access_token=self.admin_user_tok,
- )
- self.assertEqual(200, channel.code, msg=channel.json_body)
-
- _quarantine(self.media_id_1)
- _quarantine(self.media_id_2)
- _quarantine(self.media_id_3)
-
- # Page 1
- channel = self.make_request(
- "GET",
- "/_synapse/admin/v1/media/quarantined?kind=local&from=0&limit=1",
- access_token=self.admin_user_tok,
- )
- self.assertEqual(200, channel.code, msg=channel.json_body)
- self.assertEqual(1, len(channel.json_body["media"]))
-
- # Page 2
- channel = self.make_request(
- "GET",
- "/_synapse/admin/v1/media/quarantined?kind=local&from=1&limit=1",
- access_token=self.admin_user_tok,
- )
- self.assertEqual(200, channel.code, msg=channel.json_body)
- self.assertEqual(1, len(channel.json_body["media"]))
-
- # Page 3
- channel = self.make_request(
- "GET",
- "/_synapse/admin/v1/media/quarantined?kind=local&from=2&limit=1",
- access_token=self.admin_user_tok,
- )
- self.assertEqual(200, channel.code, msg=channel.json_body)
- self.assertEqual(1, len(channel.json_body["media"]))
-
- # Page 4 (no media)
- channel = self.make_request(
- "GET",
- "/_synapse/admin/v1/media/quarantined?kind=local&from=3&limit=1",
- access_token=self.admin_user_tok,
- )
- self.assertEqual(200, channel.code, msg=channel.json_body)
- self.assertEqual(0, len(channel.json_body["media"]))
-
-
class QuarantineMediaByIDTestCase(_AdminMediaTests):
def upload_media_and_return_media_id(self, data: bytes) -> str:
# Upload some media into the room
From 16bc8c78ba14bfafcd21cc06489c4750b02fe56b Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Tue, 6 Jan 2026 14:49:09 -0700
Subject: [PATCH 46/59] Update changelog after reverting PR
---
CHANGES.md | 1 -
changelog.d/19351.misc | 1 -
2 files changed, 2 deletions(-)
delete mode 100644 changelog.d/19351.misc
diff --git a/CHANGES.md b/CHANGES.md
index d075da21c2..8192a0c0ac 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -11,7 +11,6 @@ The "Updates to locked dependencies" section has been removed from the changelog
## Features
- Add `memberships` endpoint to the admin API. This is useful for forensics and T&S purpose. ([\#19260](https://github.com/element-hq/synapse/issues/19260))
-- Add an admin API for retrieving a paginated list of quarantined media. ([\#19268](https://github.com/element-hq/synapse/issues/19268))
- Server admins can bypass the quarantine media check when downloading media by setting the `admin_unsafely_bypass_quarantine` query parameter to `true` on Client-Server API media download requests. ([\#19275](https://github.com/element-hq/synapse/issues/19275))
- Implemented pagination for the [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) mutual rooms endpoint. Contributed by @tulir @ Beeper. ([\#19279](https://github.com/element-hq/synapse/issues/19279))
- Admin API: add worker support to `GET /_synapse/admin/v2/users/`. ([\#19281](https://github.com/element-hq/synapse/issues/19281))
diff --git a/changelog.d/19351.misc b/changelog.d/19351.misc
deleted file mode 100644
index 5e186b9a0c..0000000000
--- a/changelog.d/19351.misc
+++ /dev/null
@@ -1 +0,0 @@
-Revert "Add an Admin API endpoint for listing quarantined media (#19268)".
From 13dff90b5bf94ae97644c0c8f19c444d4057d86b Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Wed, 7 Jan 2026 10:08:03 -0700
Subject: [PATCH 47/59] Fix sdist include formatting for maturin
---
pyproject.toml | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 7aa5840a9d..026f3e5870 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -415,20 +415,22 @@ line-ending = "auto"
manifest-path = "rust/Cargo.toml"
module-name = "synapse.synapse_rust"
python-source = "."
-sdist-include = [
+include = [
"AUTHORS.rst",
"book.toml",
- "changelog.d",
+ "changelog.d/**/*",
"CHANGES.md",
"CONTRIBUTING.md",
- "demo",
- "docs",
+ "demo/**/*",
+ "docs/**/*",
"INSTALL.md",
+ "LICENSE-AGPL-3.0",
+ "LICENSE-COMMERCIAL",
"mypy.ini",
- "scripts-dev",
- "synmark",
+ "scripts-dev/**/*",
+ "synmark/**/*",
"sytest-blacklist",
- "tests",
+ "tests/**/*",
"UPGRADE.rst",
"Cargo.toml",
"Cargo.lock",
@@ -436,7 +438,7 @@ sdist-include = [
"rust/build.rs",
"rust/src/**",
]
-sdist-exclude = ["synapse/*.so"]
+exclude = ["synapse/*.so"]
[build-system]
# The upper bounds here are defensive, intended to prevent situations like
From ecd67df49d89b5efa413ca419def33f6c255312c Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Wed, 7 Jan 2026 10:11:44 -0700
Subject: [PATCH 48/59] 1.145.0rc2
---
CHANGES.md | 8 ++++++++
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
3 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 8192a0c0ac..74a4c3d5e8 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,11 @@
+# Synapse 1.145.0rc2 (2026-01-07)
+
+No significant changes since 1.145.0rc1.
+
+This RC fixes the source distribution packaging for uploading to PyPI.
+
+
+
# Synapse 1.145.0rc1 (2026-01-06)
## End of Life of Ubuntu 25.04 Plucky Puffin
diff --git a/debian/changelog b/debian/changelog
index 3b7d0b773e..b0870c798c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.145.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.145.0rc2.
+
+ -- Synapse Packaging team Wed, 07 Jan 2026 10:10:07 -0700
+
matrix-synapse-py3 (1.145.0~rc1) stable; urgency=medium
* New Synapse release 1.145.0rc1.
diff --git a/pyproject.toml b/pyproject.toml
index 026f3e5870..960d7891a8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "matrix-synapse"
-version = "1.145.0rc1"
+version = "1.145.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol"
readme = "README.rst"
authors = [
From 66b1daa6797e6385a292fe00de7d1a5537be29ea Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Wed, 7 Jan 2026 15:23:00 -0700
Subject: [PATCH 49/59] Limit maturin includes to sdist packaging
---
pyproject.toml | 42 +++++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 960d7891a8..93c70256eb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -416,27 +416,27 @@ manifest-path = "rust/Cargo.toml"
module-name = "synapse.synapse_rust"
python-source = "."
include = [
- "AUTHORS.rst",
- "book.toml",
- "changelog.d/**/*",
- "CHANGES.md",
- "CONTRIBUTING.md",
- "demo/**/*",
- "docs/**/*",
- "INSTALL.md",
- "LICENSE-AGPL-3.0",
- "LICENSE-COMMERCIAL",
- "mypy.ini",
- "scripts-dev/**/*",
- "synmark/**/*",
- "sytest-blacklist",
- "tests/**/*",
- "UPGRADE.rst",
- "Cargo.toml",
- "Cargo.lock",
- "rust/Cargo.toml",
- "rust/build.rs",
- "rust/src/**",
+ { path = "AUTHORS.rst", format = "sdist" },
+ { path = "book.toml", format = "sdist" },
+ { path = "changelog.d/**/*", format = "sdist" },
+ { path = "CHANGES.md", format = "sdist" },
+ { path = "CONTRIBUTING.md", format = "sdist" },
+ { path = "demo/**/*", format = "sdist" },
+ { path = "docs/**/*", format = "sdist" },
+ { path = "INSTALL.md", format = "sdist" },
+ { path = "LICENSE-AGPL-3.0", format = "sdist" },
+ { path = "LICENSE-COMMERCIAL", format = "sdist" },
+ { path = "mypy.ini", format = "sdist" },
+ { path = "scripts-dev/**/*", format = "sdist" },
+ { path = "synmark/**/*", format = "sdist" },
+ { path = "sytest-blacklist", format = "sdist" },
+ { path = "tests/**/*", format = "sdist" },
+ { path = "UPGRADE.rst", format = "sdist" },
+ { path = "Cargo.toml", format = "sdist" },
+ { path = "Cargo.lock", format = "sdist" },
+ { path = "rust/Cargo.toml", format = "sdist" },
+ { path = "rust/build.rs", format = "sdist" },
+ { path = "rust/src/**", format = "sdist" },
]
exclude = ["synapse/*.so"]
From ade89c4317034fcfc86d98aaf749703c219fccf5 Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Wed, 7 Jan 2026 15:33:27 -0700
Subject: [PATCH 50/59] 1.145.0rc3
---
CHANGES.md | 8 ++++++++
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
3 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 74a4c3d5e8..f09196c3e0 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,11 @@
+# Synapse 1.145.0rc3 (2026-01-07)
+
+No significant changes since 1.145.0rc2.
+
+This RC strips out unnecessary files from the wheels that were added when fixing the source distribution packaging in the previous RC.
+
+
+
# Synapse 1.145.0rc2 (2026-01-07)
No significant changes since 1.145.0rc1.
diff --git a/debian/changelog b/debian/changelog
index b0870c798c..7d3961db63 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.145.0~rc3) stable; urgency=medium
+
+ * New Synapse release 1.145.0rc3.
+
+ -- Synapse Packaging team Wed, 07 Jan 2026 15:32:07 -0700
+
matrix-synapse-py3 (1.145.0~rc2) stable; urgency=medium
* New Synapse release 1.145.0rc2.
diff --git a/pyproject.toml b/pyproject.toml
index 93c70256eb..ea512c5656 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "matrix-synapse"
-version = "1.145.0rc2"
+version = "1.145.0rc3"
description = "Homeserver for the Matrix decentralised comms protocol"
readme = "README.rst"
authors = [
From 15700e0a322abd2d9db5fff694e83df9b0bdfd7a Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Thu, 8 Jan 2026 11:22:59 -0700
Subject: [PATCH 51/59] Only exclude .so files for sdist packaging
---
pyproject.toml | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index ea512c5656..0a3dd0cf5d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -438,7 +438,9 @@ include = [
{ path = "rust/build.rs", format = "sdist" },
{ path = "rust/src/**", format = "sdist" },
]
-exclude = ["synapse/*.so"]
+exclude = [
+ { path = "synapse/*.so", format = "sdist" },
+]
[build-system]
# The upper bounds here are defensive, intended to prevent situations like
From 438aa7c87696be5da366ba8aca43afaf421059b2 Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Thu, 8 Jan 2026 12:09:01 -0700
Subject: [PATCH 52/59] 1.145.0rc4
---
CHANGES.md | 8 ++++++++
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
3 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index f09196c3e0..42f9a96546 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,11 @@
+# Synapse 1.145.0rc4 (2026-01-08)
+
+No significant changes since 1.145.0rc3.
+
+This RC contains a fix specifically for openSUSE packaging and no other changes.
+
+
+
# Synapse 1.145.0rc3 (2026-01-07)
No significant changes since 1.145.0rc2.
diff --git a/debian/changelog b/debian/changelog
index 7d3961db63..83bf194bad 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.145.0~rc4) stable; urgency=medium
+
+ * New Synapse release 1.145.0rc4.
+
+ -- Synapse Packaging team Thu, 08 Jan 2026 12:06:35 -0700
+
matrix-synapse-py3 (1.145.0~rc3) stable; urgency=medium
* New Synapse release 1.145.0rc3.
diff --git a/pyproject.toml b/pyproject.toml
index 0a3dd0cf5d..548c1284e8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "matrix-synapse"
-version = "1.145.0rc3"
+version = "1.145.0rc4"
description = "Homeserver for the Matrix decentralised comms protocol"
readme = "README.rst"
authors = [
From 27223a349c0cb5268782dc07b20f3450580165dc Mon Sep 17 00:00:00 2001
From: Devon Hudson
Date: Tue, 13 Jan 2026 08:38:14 -0700
Subject: [PATCH 53/59] 1.145.0
---
CHANGES.md | 15 +++++++++++++++
debian/changelog | 6 ++++++
pyproject.toml | 2 +-
3 files changed, 22 insertions(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 42f9a96546..bb758b1bc3 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,18 @@
+# Synapse 1.145.0 (2026-01-13)
+
+No significant changes since 1.145.0rc4.
+
+## End of Life of Ubuntu 25.04 Plucky Puffin
+
+Ubuntu 25.04 (Plucky Puffin) will be end of life on Jan 17, 2026. Synapse will stop building packages for Ubuntu 25.04 shortly thereafter.
+
+## Updates to Locked Dependencies No Longer Included in Changelog
+
+The "Updates to locked dependencies" section has been removed from the changelog due to lack of use and the maintenance burden. ([\#19254](https://github.com/element-hq/synapse/issues/19254))
+
+
+
+
# Synapse 1.145.0rc4 (2026-01-08)
No significant changes since 1.145.0rc3.
diff --git a/debian/changelog b/debian/changelog
index 83bf194bad..6ea411f2d2 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.145.0) stable; urgency=medium
+
+ * New Synapse release 1.145.0.
+
+ -- Synapse Packaging team Tue, 13 Jan 2026 08:37:42 -0700
+
matrix-synapse-py3 (1.145.0~rc4) stable; urgency=medium
* New Synapse release 1.145.0rc4.
diff --git a/pyproject.toml b/pyproject.toml
index 548c1284e8..a6f7eac92c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "matrix-synapse"
-version = "1.145.0rc4"
+version = "1.145.0"
description = "Homeserver for the Matrix decentralised comms protocol"
readme = "README.rst"
authors = [
From 09c0135b5e1dff87d603bf011ff972037439649f Mon Sep 17 00:00:00 2001
From: FrenchGithubUser
Date: Thu, 15 Jan 2026 15:51:04 +0100
Subject: [PATCH 54/59] chore: regenerate `poetry.lock`
---
poetry.lock | 121 ++++------------------------------------------------
1 file changed, 8 insertions(+), 113 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 078daa5cf4..0689b7e675 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -396,111 +396,6 @@ files = [
{file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"},
]
-[[package]]
-name = "coverage"
-version = "7.13.1"
-description = "Code coverage measurement for Python"
-optional = false
-python-versions = ">=3.10"
-groups = ["dev"]
-files = [
- {file = "coverage-7.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e1fa280b3ad78eea5be86f94f461c04943d942697e0dac889fa18fff8f5f9147"},
- {file = "coverage-7.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c3d8c679607220979434f494b139dfb00131ebf70bb406553d69c1ff01a5c33d"},
- {file = "coverage-7.13.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:339dc63b3eba969067b00f41f15ad161bf2946613156fb131266d8debc8e44d0"},
- {file = "coverage-7.13.1-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:db622b999ffe49cb891f2fff3b340cdc2f9797d01a0a202a0973ba2562501d90"},
- {file = "coverage-7.13.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1443ba9acbb593fa7c1c29e011d7c9761545fe35e7652e85ce7f51a16f7e08d"},
- {file = "coverage-7.13.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c832ec92c4499ac463186af72f9ed4d8daec15499b16f0a879b0d1c8e5cf4a3b"},
- {file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:562ec27dfa3f311e0db1ba243ec6e5f6ab96b1edfcfc6cf86f28038bc4961ce6"},
- {file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4de84e71173d4dada2897e5a0e1b7877e5eefbfe0d6a44edee6ce31d9b8ec09e"},
- {file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:a5a68357f686f8c4d527a2dc04f52e669c2fc1cbde38f6f7eb6a0e58cbd17cae"},
- {file = "coverage-7.13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:77cc258aeb29a3417062758975521eae60af6f79e930d6993555eeac6a8eac29"},
- {file = "coverage-7.13.1-cp310-cp310-win32.whl", hash = "sha256:bb4f8c3c9a9f34423dba193f241f617b08ffc63e27f67159f60ae6baf2dcfe0f"},
- {file = "coverage-7.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:c8e2706ceb622bc63bac98ebb10ef5da80ed70fbd8a7999a5076de3afaef0fb1"},
- {file = "coverage-7.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a55d509a1dc5a5b708b5dad3b5334e07a16ad4c2185e27b40e4dba796ab7f88"},
- {file = "coverage-7.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4d010d080c4888371033baab27e47c9df7d6fb28d0b7b7adf85a4a49be9298b3"},
- {file = "coverage-7.13.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d938b4a840fb1523b9dfbbb454f652967f18e197569c32266d4d13f37244c3d9"},
- {file = "coverage-7.13.1-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bf100a3288f9bb7f919b87eb84f87101e197535b9bd0e2c2b5b3179633324fee"},
- {file = "coverage-7.13.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef6688db9bf91ba111ae734ba6ef1a063304a881749726e0d3575f5c10a9facf"},
- {file = "coverage-7.13.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0b609fc9cdbd1f02e51f67f51e5aee60a841ef58a68d00d5ee2c0faf357481a3"},
- {file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c43257717611ff5e9a1d79dce8e47566235ebda63328718d9b65dd640bc832ef"},
- {file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e09fbecc007f7b6afdfb3b07ce5bd9f8494b6856dd4f577d26c66c391b829851"},
- {file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:a03a4f3a19a189919c7055098790285cc5c5b0b3976f8d227aea39dbf9f8bfdb"},
- {file = "coverage-7.13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3820778ea1387c2b6a818caec01c63adc5b3750211af6447e8dcfb9b6f08dbba"},
- {file = "coverage-7.13.1-cp311-cp311-win32.whl", hash = "sha256:ff10896fa55167371960c5908150b434b71c876dfab97b69478f22c8b445ea19"},
- {file = "coverage-7.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:a998cc0aeeea4c6d5622a3754da5a493055d2d95186bad877b0a34ea6e6dbe0a"},
- {file = "coverage-7.13.1-cp311-cp311-win_arm64.whl", hash = "sha256:fea07c1a39a22614acb762e3fbbb4011f65eedafcb2948feeef641ac78b4ee5c"},
- {file = "coverage-7.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6f34591000f06e62085b1865c9bc5f7858df748834662a51edadfd2c3bfe0dd3"},
- {file = "coverage-7.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b67e47c5595b9224599016e333f5ec25392597a89d5744658f837d204e16c63e"},
- {file = "coverage-7.13.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e7b8bd70c48ffb28461ebe092c2345536fb18bbbf19d287c8913699735f505c"},
- {file = "coverage-7.13.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c223d078112e90dc0e5c4e35b98b9584164bea9fbbd221c0b21c5241f6d51b62"},
- {file = "coverage-7.13.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:794f7c05af0763b1bbd1b9e6eff0e52ad068be3b12cd96c87de037b01390c968"},
- {file = "coverage-7.13.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0642eae483cc8c2902e4af7298bf886d605e80f26382124cddc3967c2a3df09e"},
- {file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9f5e772ed5fef25b3de9f2008fe67b92d46831bd2bc5bdc5dd6bfd06b83b316f"},
- {file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:45980ea19277dc0a579e432aef6a504fe098ef3a9032ead15e446eb0f1191aee"},
- {file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:e4f18eca6028ffa62adbd185a8f1e1dd242f2e68164dba5c2b74a5204850b4cf"},
- {file = "coverage-7.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f8dca5590fec7a89ed6826fce625595279e586ead52e9e958d3237821fbc750c"},
- {file = "coverage-7.13.1-cp312-cp312-win32.whl", hash = "sha256:ff86d4e85188bba72cfb876df3e11fa243439882c55957184af44a35bd5880b7"},
- {file = "coverage-7.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:16cc1da46c04fb0fb128b4dc430b78fa2aba8a6c0c9f8eb391fd5103409a6ac6"},
- {file = "coverage-7.13.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d9bc218650022a768f3775dd7fdac1886437325d8d295d923ebcfef4892ad5c"},
- {file = "coverage-7.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cb237bfd0ef4d5eb6a19e29f9e528ac67ac3be932ea6b44fb6cc09b9f3ecff78"},
- {file = "coverage-7.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1dcb645d7e34dcbcc96cd7c132b1fc55c39263ca62eb961c064eb3928997363b"},
- {file = "coverage-7.13.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3d42df8201e00384736f0df9be2ced39324c3907607d17d50d50116c989d84cd"},
- {file = "coverage-7.13.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa3edde1aa8807de1d05934982416cb3ec46d1d4d91e280bcce7cca01c507992"},
- {file = "coverage-7.13.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9edd0e01a343766add6817bc448408858ba6b489039eaaa2018474e4001651a4"},
- {file = "coverage-7.13.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:985b7836931d033570b94c94713c6dba5f9d3ff26045f72c3e5dbc5fe3361e5a"},
- {file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ffed1e4980889765c84a5d1a566159e363b71d6b6fbaf0bebc9d3c30bc016766"},
- {file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8842af7f175078456b8b17f1b73a0d16a65dcbdc653ecefeb00a56b3c8c298c4"},
- {file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:ccd7a6fca48ca9c131d9b0a2972a581e28b13416fc313fb98b6d24a03ce9a398"},
- {file = "coverage-7.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0403f647055de2609be776965108447deb8e384fe4a553c119e3ff6bfbab4784"},
- {file = "coverage-7.13.1-cp313-cp313-win32.whl", hash = "sha256:549d195116a1ba1e1ae2f5ca143f9777800f6636eab917d4f02b5310d6d73461"},
- {file = "coverage-7.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:5899d28b5276f536fcf840b18b61a9fce23cc3aec1d114c44c07fe94ebeaa500"},
- {file = "coverage-7.13.1-cp313-cp313-win_arm64.whl", hash = "sha256:868a2fae76dfb06e87291bcbd4dcbcc778a8500510b618d50496e520bd94d9b9"},
- {file = "coverage-7.13.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:67170979de0dacac3f3097d02b0ad188d8edcea44ccc44aaa0550af49150c7dc"},
- {file = "coverage-7.13.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f80e2bb21bfab56ed7405c2d79d34b5dc0bc96c2c1d2a067b643a09fb756c43a"},
- {file = "coverage-7.13.1-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f83351e0f7dcdb14d7326c3d8d8c4e915fa685cbfdc6281f9470d97a04e9dfe4"},
- {file = "coverage-7.13.1-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb3f6562e89bad0110afbe64e485aac2462efdce6232cdec7862a095dc3412f6"},
- {file = "coverage-7.13.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77545b5dcda13b70f872c3b5974ac64c21d05e65b1590b441c8560115dc3a0d1"},
- {file = "coverage-7.13.1-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a4d240d260a1aed814790bbe1f10a5ff31ce6c21bc78f0da4a1e8268d6c80dbd"},
- {file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d2287ac9360dec3837bfdad969963a5d073a09a85d898bd86bea82aa8876ef3c"},
- {file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:0d2c11f3ea4db66b5cbded23b20185c35066892c67d80ec4be4bab257b9ad1e0"},
- {file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:3fc6a169517ca0d7ca6846c3c5392ef2b9e38896f61d615cb75b9e7134d4ee1e"},
- {file = "coverage-7.13.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d10a2ed46386e850bb3de503a54f9fe8192e5917fcbb143bfef653a9355e9a53"},
- {file = "coverage-7.13.1-cp313-cp313t-win32.whl", hash = "sha256:75a6f4aa904301dab8022397a22c0039edc1f51e90b83dbd4464b8a38dc87842"},
- {file = "coverage-7.13.1-cp313-cp313t-win_amd64.whl", hash = "sha256:309ef5706e95e62578cda256b97f5e097916a2c26247c287bbe74794e7150df2"},
- {file = "coverage-7.13.1-cp313-cp313t-win_arm64.whl", hash = "sha256:92f980729e79b5d16d221038dbf2e8f9a9136afa072f9d5d6ed4cb984b126a09"},
- {file = "coverage-7.13.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:97ab3647280d458a1f9adb85244e81587505a43c0c7cff851f5116cd2814b894"},
- {file = "coverage-7.13.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8f572d989142e0908e6acf57ad1b9b86989ff057c006d13b76c146ec6a20216a"},
- {file = "coverage-7.13.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d72140ccf8a147e94274024ff6fd8fb7811354cf7ef88b1f0a988ebaa5bc774f"},
- {file = "coverage-7.13.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d3c9f051b028810f5a87c88e5d6e9af3c0ff32ef62763bf15d29f740453ca909"},
- {file = "coverage-7.13.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f398ba4df52d30b1763f62eed9de5620dcde96e6f491f4c62686736b155aa6e4"},
- {file = "coverage-7.13.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:132718176cc723026d201e347f800cd1a9e4b62ccd3f82476950834dad501c75"},
- {file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9e549d642426e3579b3f4b92d0431543b012dcb6e825c91619d4e93b7363c3f9"},
- {file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:90480b2134999301eea795b3a9dbf606c6fbab1b489150c501da84a959442465"},
- {file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e825dbb7f84dfa24663dd75835e7257f8882629fc11f03ecf77d84a75134b864"},
- {file = "coverage-7.13.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:623dcc6d7a7ba450bbdbeedbaa0c42b329bdae16491af2282f12a7e809be7eb9"},
- {file = "coverage-7.13.1-cp314-cp314-win32.whl", hash = "sha256:6e73ebb44dca5f708dc871fe0b90cf4cff1a13f9956f747cc87b535a840386f5"},
- {file = "coverage-7.13.1-cp314-cp314-win_amd64.whl", hash = "sha256:be753b225d159feb397bd0bf91ae86f689bad0da09d3b301478cd39b878ab31a"},
- {file = "coverage-7.13.1-cp314-cp314-win_arm64.whl", hash = "sha256:228b90f613b25ba0019361e4ab81520b343b622fc657daf7e501c4ed6a2366c0"},
- {file = "coverage-7.13.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:60cfb538fe9ef86e5b2ab0ca8fc8d62524777f6c611dcaf76dc16fbe9b8e698a"},
- {file = "coverage-7.13.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:57dfc8048c72ba48a8c45e188d811e5efd7e49b387effc8fb17e97936dde5bf6"},
- {file = "coverage-7.13.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3f2f725aa3e909b3c5fdb8192490bdd8e1495e85906af74fe6e34a2a77ba0673"},
- {file = "coverage-7.13.1-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9ee68b21909686eeb21dfcba2c3b81fee70dcf38b140dcd5aa70680995fa3aa5"},
- {file = "coverage-7.13.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:724b1b270cb13ea2e6503476e34541a0b1f62280bc997eab443f87790202033d"},
- {file = "coverage-7.13.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:916abf1ac5cf7eb16bc540a5bf75c71c43a676f5c52fcb9fe75a2bd75fb944e8"},
- {file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:776483fd35b58d8afe3acbd9988d5de592ab6da2d2a865edfdbc9fdb43e7c486"},
- {file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b6f3b96617e9852703f5b633ea01315ca45c77e879584f283c44127f0f1ec564"},
- {file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:bd63e7b74661fed317212fab774e2a648bc4bb09b35f25474f8e3325d2945cd7"},
- {file = "coverage-7.13.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:933082f161bbb3e9f90d00990dc956120f608cdbcaeea15c4d897f56ef4fe416"},
- {file = "coverage-7.13.1-cp314-cp314t-win32.whl", hash = "sha256:18be793c4c87de2965e1c0f060f03d9e5aff66cfeae8e1dbe6e5b88056ec153f"},
- {file = "coverage-7.13.1-cp314-cp314t-win_amd64.whl", hash = "sha256:0e42e0ec0cd3e0d851cb3c91f770c9301f48647cb2877cb78f74bdaa07639a79"},
- {file = "coverage-7.13.1-cp314-cp314t-win_arm64.whl", hash = "sha256:eaecf47ef10c72ece9a2a92118257da87e460e113b83cc0d2905cbbe931792b4"},
- {file = "coverage-7.13.1-py3-none-any.whl", hash = "sha256:2016745cb3ba554469d02819d78958b571792bb68e31302610e898f80dd3a573"},
- {file = "coverage-7.13.1.tar.gz", hash = "sha256:b7593fe7eb5feaa3fbb461ac79aac9f9fc0387a5ca8080b0c6fe2ca27b091afd"},
-]
-
-[package.extras]
-toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
-
[[package]]
name = "cryptography"
version = "46.0.3"
@@ -1068,7 +963,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"opentracing-jaeger\" or extra == \"all\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
@@ -2003,7 +1898,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"opentracing-jaeger\" or extra == \"all\" or extra == \"opentracing-otlp\""
+markers = "extra == \"opentracing\" or extra == \"all\" or extra == \"opentracing-otlp\""
files = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
@@ -3315,7 +3210,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"opentracing-jaeger\" or extra == \"all\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
@@ -3331,7 +3226,7 @@ description = "Python bindings for the Apache Thrift RPC system"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"opentracing-jaeger\" or extra == \"all\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
@@ -3404,7 +3299,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"opentracing-jaeger\" or extra == \"all\""
+markers = "extra == \"opentracing\" or extra == \"all\""
files = [
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
@@ -3902,13 +3797,13 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
[extras]
-all = ["authlib", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-opentracing-shim", "opentelemetry-sdk", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pympler", "pysaml2", "sentry-sdk", "txredisapi"]
+all = ["authlib", "defusedxml", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-opentracing-shim", "opentelemetry-sdk", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pympler", "pysaml2", "pytz", "sentry-sdk", "thrift", "tornado", "txredisapi"]
cache-memory = ["pympler"]
jwt = ["authlib"]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
oidc = ["authlib"]
opentelemetry-log-handler = ["opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk"]
-opentracing-jaeger = ["jaeger-client", "opentracing"]
+opentracing = ["jaeger-client", "opentracing", "thrift", "tornado"]
opentracing-otlp = ["opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-opentracing-shim", "opentelemetry-sdk", "opentracing"]
postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"]
redis = ["hiredis", "txredisapi"]
@@ -3921,4 +3816,4 @@ url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10.0,<4.0.0"
-content-hash = "8063b3f9a676e166ea92f0b88cf48267ba66b332c677f63c8b73ec552fe53132"
+content-hash = "829184e5297c858c398050e55f69f001b03ca5d2b3af38432b4bb54368fb55e0"
From a9d628e0ff990d0f965cfc54a74b34bc3077c64f Mon Sep 17 00:00:00 2001
From: FrenchGithubUser
Date: Thu, 15 Jan 2026 16:14:48 +0100
Subject: [PATCH 55/59] chore: remove dependabot config as we use renovate
instead
it was already disabled by setting `open-pull-requests-limit: 0` but
this still caused extra work with merge conflicts
---
.github/dependabot.yml | 94 ------------------------------------------
1 file changed, 94 deletions(-)
delete mode 100644 .github/dependabot.yml
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
deleted file mode 100644
index 3b0dc5b050..0000000000
--- a/.github/dependabot.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-# the lines "open-pull-requests-limit: 0" disable dependabot
-# remove them to re-enable it
-version: 2
-# As dependabot is currently only run on a weekly basis, we raise the
-# open-pull-requests-limit to 10 (from the default of 5) to better ensure we
-# don't continuously grow a backlog of updates.
-updates:
- - # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
- package-ecosystem: "pip"
- directory: "/"
- open-pull-requests-limit: 10
- schedule:
- interval: "weekly"
- # Group patch updates to packages together into a single PR, as they rarely
- # if ever contain breaking changes that need to be reviewed separately.
- #
- # Less PRs means a streamlined review process.
- #
- # Python packages follow semantic versioning, and tend to only introduce
- # breaking changes in major version bumps. Thus, we'll group minor and patch
- # versions together.
- groups:
- minor-and-patches:
- applies-to: version-updates
- patterns:
- - "*"
- update-types:
- - "minor"
- - "patch"
- # Prevent pulling packages that were recently updated to help mitigate
- # supply chain attacks. 14 days was taken from the recommendation at
- # https://blog.yossarian.net/2025/11/21/We-should-all-be-using-dependency-cooldowns
- # where the author noted that 9/10 attacks would have been mitigated by a
- # two week cooldown.
- #
- # The cooldown only applies to general updates; security updates will still
- # be pulled in as soon as possible.
- cooldown:
- default-days: 14
-
- - package-ecosystem: "docker"
- directory: "/docker"
- open-pull-requests-limit: 10
- schedule:
- interval: "weekly"
- # For container versions, breaking changes are also typically only introduced in major
- # package bumps.
- groups:
- minor-and-patches:
- applies-to: version-updates
- patterns:
- - "*"
- update-types:
- - "minor"
- - "patch"
- cooldown:
- default-days: 14
-
- - package-ecosystem: "github-actions"
- directory: "/"
- open-pull-requests-limit: 10
- schedule:
- interval: "weekly"
- # Similarly for GitHub Actions, breaking changes are typically only introduced in major
- # package bumps.
- groups:
- minor-and-patches:
- applies-to: version-updates
- patterns:
- - "*"
- update-types:
- - "minor"
- - "patch"
- cooldown:
- default-days: 14
-
- - package-ecosystem: "cargo"
- directory: "/"
- open-pull-requests-limit: 10
- versioning-strategy: "lockfile-only"
- schedule:
- interval: "weekly"
- # The Rust ecosystem is special in that breaking changes are often introduced
- # in minor version bumps, as packages typically stay pre-1.0 for a long time.
- # Thus we specifically keep minor version bumps separate in their own PRs.
- groups:
- patches:
- applies-to: version-updates
- patterns:
- - "*"
- update-types:
- - "patch"
- cooldown:
- default-days: 14
From e0c13734619a15f5ff8bc11b9ae9c23a639abcd9 Mon Sep 17 00:00:00 2001
From: FrenchGithubUser
Date: Thu, 15 Jan 2026 16:15:58 +0100
Subject: [PATCH 56/59] fix: typo in changelog
`v1.143.0_1` was written instead of `v1.144.0_1`
---
CHANGES.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGES.md b/CHANGES.md
index 0b3c0db2ef..cfe92f4a27 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -117,7 +117,7 @@ that disable that endpoint by default.
No significant changes since 1.144.0rc1.
-### Famedly additions for v1.143.0_1
+### Famedly additions for v1.144.0_1
- ci: generate requirements.txt file from synapse's poetry.lock for invite-checker and token-authenticator tests ([\#227(https://github.com/famedly/synapse/pull/227)]) (FrenchGithubUser)
- ci: fix tests failing as the token-authenticator's synapse dependency is already pointing to the master branch ([\#226](https://github.com/famedly/synapse/pull/226)) (FrenchGithubUser)
- chore: Remove unused make_release.sh script and update README.rst ([\#224](https://github.com/famedly/synapse/pull/224)) (Jason Little)
From 1e28db8c263c54378e879d1333bbfc35c1d8cf1b Mon Sep 17 00:00:00 2001
From: FrenchGithubUser
Date: Thu, 15 Jan 2026 17:00:36 +0100
Subject: [PATCH 57/59] ci: Stream Complement progress and format logs in a
separate step after all tests are done
This is taken for [this upstream
commit](https://github.com/famedly/synapse/commit/bd94152e0644a00708e570b0e9abc775d9704015).
That wasn't done automatically as we run complement tests in our own
workflow file.
This way we can see what's happening as the tests run instead of nothing
until the end. Also useful to split the test output from the formatting
so we can take the raw test output before formatting gobbles it all up.
Same thing was done in
https://github.com/element-hq/synapse-rust-apps/pull/361
---
.github/workflows/famedly-tests.yml | 20 +++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/famedly-tests.yml b/.github/workflows/famedly-tests.yml
index 03f8695c0c..9afa962405 100644
--- a/.github/workflows/famedly-tests.yml
+++ b/.github/workflows/famedly-tests.yml
@@ -342,14 +342,28 @@ jobs:
go-version-file: complement/go.mod
# use p=1 concurrency as GHA boxes are underpowered and don't like running tons of synapses at once.
- - run: |
+ - name: Run Complement Tests
+ id: run_complement_tests
+ # -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
+ # are underpowered and don't like running tons of Synapse instances at once.
+ # -json: Output JSON format so that gotestfmt can parse it.
+ #
+ # tee /tmp/gotest.log: We tee the output to a file so that we can re-process it
+ # later on for better formatting with gotestfmt. But we still want the command
+ # to output to the terminal as it runs so we can see what's happening in
+ # real-time.
+ run: |
set -o pipefail
- COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | synapse/.ci/scripts/gotestfmt
+ COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | tee /tmp/gotest.log
shell: bash
env:
POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
- name: Run Complement Tests
+
+ - name: Formatted Complement test logs
+ # Always run this step if we attempted to run the Complement tests.
+ if: always() && steps.run_complement_tests.outcome != 'skipped'
+ run: cat /tmp/gotest.log | gotestfmt -hide "successful-downloads,empty-packages"
cargo-test:
runs-on: ubuntu-latest
From b52bebf904cb57c50e7675107c018fce7f8b2b11 Mon Sep 17 00:00:00 2001
From: FrenchGithubUser
Date: Thu, 15 Jan 2026 16:19:37 +0100
Subject: [PATCH 58/59] Update changelog
---
CHANGES.md | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/CHANGES.md b/CHANGES.md
index cfe92f4a27..9afeed7435 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -11,6 +11,11 @@ Ubuntu 25.04 (Plucky Puffin) will be end of life on Jan 17, 2026. Synapse will s
The "Updates to locked dependencies" section has been removed from the changelog due to lack of use and the maintenance burden. ([\#19254](https://github.com/element-hq/synapse/issues/19254))
+### Famedly additions for v1.145.0_1
+- fix: typo in changelog (FrenchGithubUser)
+- chore: remove dependabot config as we use renovate instead (FrenchGithubUser)
+- chore: bump Github actions versions in Famedly workflows ([\#229](https://github.com/famedly/synapse/pull/229)) (jason-famedly)
+
# Synapse 1.145.0rc4 (2026-01-08)
From 31e48d2b1d068f16f0d55832ab9842e6719f52f0 Mon Sep 17 00:00:00 2001
From: FrenchGithubUser
Date: Fri, 16 Jan 2026 12:43:04 +0100
Subject: [PATCH 59/59] fix: rename extra dependency `opentracing` to
`opentracing-jaeger`
this was missed during the merge conflict resolution and we want to keep
it this way to avoid ambiguous naming
---
poetry.lock | 14 +++++++-------
pyproject.toml | 2 +-
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 0689b7e675..0366df120b 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -963,7 +963,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
optional = true
python-versions = ">=3.7"
groups = ["main"]
-markers = "extra == \"opentracing\" or extra == \"all\""
+markers = "extra == \"opentracing-jaeger\" or extra == \"all\""
files = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
@@ -1898,7 +1898,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"opentracing\" or extra == \"all\" or extra == \"opentracing-otlp\""
+markers = "extra == \"opentracing-jaeger\" or extra == \"all\" or extra == \"opentracing-otlp\""
files = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
@@ -3210,7 +3210,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"opentracing\" or extra == \"all\""
+markers = "extra == \"opentracing-jaeger\" or extra == \"all\""
files = [
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
@@ -3226,7 +3226,7 @@ description = "Python bindings for the Apache Thrift RPC system"
optional = true
python-versions = "*"
groups = ["main"]
-markers = "extra == \"opentracing\" or extra == \"all\""
+markers = "extra == \"opentracing-jaeger\" or extra == \"all\""
files = [
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
@@ -3299,7 +3299,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "extra == \"opentracing\" or extra == \"all\""
+markers = "extra == \"opentracing-jaeger\" or extra == \"all\""
files = [
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
@@ -3803,7 +3803,7 @@ jwt = ["authlib"]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
oidc = ["authlib"]
opentelemetry-log-handler = ["opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk"]
-opentracing = ["jaeger-client", "opentracing", "thrift", "tornado"]
+opentracing-jaeger = ["jaeger-client", "opentracing", "thrift", "tornado"]
opentracing-otlp = ["opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-opentracing-shim", "opentelemetry-sdk", "opentracing"]
postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"]
redis = ["hiredis", "txredisapi"]
@@ -3816,4 +3816,4 @@ url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10.0,<4.0.0"
-content-hash = "829184e5297c858c398050e55f69f001b03ca5d2b3af38432b4bb54368fb55e0"
+content-hash = "0baec416520c1d2b9ec3d92d43dd26b27cce50a4b168a3cc6c6daa57df9f66da"
diff --git a/pyproject.toml b/pyproject.toml
index 0500c03694..c2184b3ba8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -141,7 +141,7 @@ oidc = ["authlib>=0.15.1"]
systemd = ["systemd-python>=231"]
url-preview = ["lxml>=4.6.3"]
sentry = ["sentry-sdk>=0.7.2"]
-opentracing = [
+opentracing-jaeger = [
"jaeger-client>=4.2.0",
"opentracing>=2.2.0",