From 46762fc2ca2a3d05045e758c51f9a7633c17744d Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 7 Mar 2021 13:07:01 +0100 Subject: Pipenv: add 'python-frontmatter' & re-lock --- Pipfile | 1 + Pipfile.lock | 106 ++++++++++++++++++++++++++++++++--------------------------- 2 files changed, 59 insertions(+), 48 deletions(-) diff --git a/Pipfile b/Pipfile index 024aa6eff..e222a2108 100644 --- a/Pipfile +++ b/Pipfile @@ -21,6 +21,7 @@ lxml = "~=4.4" markdownify = "==0.5.3" more_itertools = "~=8.2" python-dateutil = "~=2.8" +python-frontmatter = "~=0.5.0" pyyaml = "~=5.1" requests = "~=2.22" sentry-sdk = "~=0.19" diff --git a/Pipfile.lock b/Pipfile.lock index dc7f6f21f..01a78af9b 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "81ca9d1891e71de1c3f71958f082e1a8cad71e5b3ca425dc561d0ae74664fdb0" + "sha256": "e8b1d8e8a3b258f482c25fe396aaa3255c749fdeae26770fccd7ce1a35f41180" }, "pipfile-spec": 6, "requires": { @@ -34,46 +34,46 @@ }, "aiohttp": { "hashes": [ - "sha256:119feb2bd551e58d83d1b38bfa4cb921af8ddedec9fad7183132db334c3133e0", - "sha256:16d0683ef8a6d803207f02b899c928223eb219111bd52420ef3d7a8aa76227b6", - "sha256:2eb3efe243e0f4ecbb654b08444ae6ffab37ac0ef8f69d3a2ffb958905379daf", - "sha256:2ffea7904e70350da429568113ae422c88d2234ae776519549513c8f217f58a9", - "sha256:40bd1b101b71a18a528ffce812cc14ff77d4a2a1272dfb8b11b200967489ef3e", - "sha256:418597633b5cd9639e514b1d748f358832c08cd5d9ef0870026535bd5eaefdd0", - "sha256:481d4b96969fbfdcc3ff35eea5305d8565a8300410d3d269ccac69e7256b1329", - "sha256:4c1bdbfdd231a20eee3e56bd0ac1cd88c4ff41b64ab679ed65b75c9c74b6c5c2", - "sha256:5563ad7fde451b1986d42b9bb9140e2599ecf4f8e42241f6da0d3d624b776f40", - "sha256:58c62152c4c8731a3152e7e650b29ace18304d086cb5552d317a54ff2749d32a", - "sha256:5b50e0b9460100fe05d7472264d1975f21ac007b35dcd6fd50279b72925a27f4", - "sha256:5d84ecc73141d0a0d61ece0742bb7ff5751b0657dab8405f899d3ceb104cc7de", - "sha256:5dde6d24bacac480be03f4f864e9a67faac5032e28841b00533cd168ab39cad9", - "sha256:5e91e927003d1ed9283dee9abcb989334fc8e72cf89ebe94dc3e07e3ff0b11e9", - "sha256:62bc216eafac3204877241569209d9ba6226185aa6d561c19159f2e1cbb6abfb", - "sha256:6c8200abc9dc5f27203986100579fc19ccad7a832c07d2bc151ce4ff17190076", - "sha256:6ca56bdfaf825f4439e9e3673775e1032d8b6ea63b8953d3812c71bd6a8b81de", - "sha256:71680321a8a7176a58dfbc230789790639db78dad61a6e120b39f314f43f1907", - "sha256:7c7820099e8b3171e54e7eedc33e9450afe7cd08172632d32128bd527f8cb77d", - "sha256:7dbd087ff2f4046b9b37ba28ed73f15fd0bc9f4fdc8ef6781913da7f808d9536", - "sha256:822bd4fd21abaa7b28d65fc9871ecabaddc42767884a626317ef5b75c20e8a2d", - "sha256:8ec1a38074f68d66ccb467ed9a673a726bb397142c273f90d4ba954666e87d54", - "sha256:950b7ef08b2afdab2488ee2edaff92a03ca500a48f1e1aaa5900e73d6cf992bc", - "sha256:99c5a5bf7135607959441b7d720d96c8e5c46a1f96e9d6d4c9498be8d5f24212", - "sha256:b84ad94868e1e6a5e30d30ec419956042815dfaea1b1df1cef623e4564c374d9", - "sha256:bc3d14bf71a3fb94e5acf5bbf67331ab335467129af6416a437bd6024e4f743d", - "sha256:c2a80fd9a8d7e41b4e38ea9fe149deed0d6aaede255c497e66b8213274d6d61b", - "sha256:c44d3c82a933c6cbc21039326767e778eface44fca55c65719921c4b9661a3f7", - "sha256:cc31e906be1cc121ee201adbdf844522ea3349600dd0a40366611ca18cd40e81", - "sha256:d5d102e945ecca93bcd9801a7bb2fa703e37ad188a2f81b1e65e4abe4b51b00c", - "sha256:dd7936f2a6daa861143e376b3a1fb56e9b802f4980923594edd9ca5670974895", - "sha256:dee68ec462ff10c1d836c0ea2642116aba6151c6880b688e56b4c0246770f297", - "sha256:e76e78863a4eaec3aee5722d85d04dcbd9844bc6cd3bfa6aa880ff46ad16bfcb", - "sha256:eab51036cac2da8a50d7ff0ea30be47750547c9aa1aa2cf1a1b710a1827e7dbe", - "sha256:f4496d8d04da2e98cc9133e238ccebf6a13ef39a93da2e87146c8c8ac9768242", - "sha256:fbd3b5e18d34683decc00d9a360179ac1e7a320a5fee10ab8053ffd6deab76e0", - "sha256:feb24ff1226beeb056e247cf2e24bba5232519efb5645121c4aea5b6ad74c1f2" + "sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe", + "sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe", + "sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5", + "sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8", + "sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd", + "sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb", + "sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c", + "sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87", + "sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0", + "sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290", + "sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5", + "sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287", + "sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde", + "sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf", + "sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8", + "sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16", + "sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf", + "sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809", + "sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213", + "sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f", + "sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013", + "sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b", + "sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9", + "sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5", + "sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb", + "sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df", + "sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4", + "sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439", + "sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f", + "sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22", + "sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f", + "sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5", + "sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970", + "sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009", + "sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc", + "sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a", + "sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95" ], "index": "pypi", - "version": "==3.7.4" + "version": "==3.7.4.post0" }, "aioping": { "hashes": [ @@ -210,10 +210,11 @@ }, "chardet": { "hashes": [ - "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", - "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" + "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", + "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" ], - "version": "==3.0.4" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.0.0" }, "colorama": { "hashes": [ @@ -596,11 +597,11 @@ }, "pygments": { "hashes": [ - "sha256:37a13ba168a02ac54cc5891a42b1caec333e59b66addb7fa633ea8a6d73445c0", - "sha256:b21b072d0ccdf29297a82a2363359d99623597b8a265b8081760e4d0f7153c88" + "sha256:2656e1a6edcdabf4275f9a3640db59fd5de107d88e8663c5d4e9a0fa62f77f94", + "sha256:534ef71d539ae97d4c3a4cf7d6f110f214b0e687e92f9cb9d2a3b0d3101289c8" ], "markers": "python_version >= '3.5'", - "version": "==2.8.0" + "version": "==2.8.1" }, "pyparsing": { "hashes": [ @@ -618,6 +619,14 @@ "index": "pypi", "version": "==2.8.1" }, + "python-frontmatter": { + "hashes": [ + "sha256:a7dcdfdaf498d488dce98bfa9452f8b70f803a923760ceab1ebd99291d98d28a", + "sha256:a9c2e90fc38e9f0c68d8b82299040f331ca3b8525ac7fa5f6beffef52b26c426" + ], + "index": "pypi", + "version": "==0.5.0" + }, "python-json-logger": { "hashes": [ "sha256:f26eea7898db40609563bed0a7ca11af12e2a79858632706d835a0f961b7d398" @@ -870,10 +879,11 @@ }, "chardet": { "hashes": [ - "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", - "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" + "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", + "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" ], - "version": "==3.0.4" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.0.0" }, "coverage": { "hashes": [ -- cgit v1.2.3 From 98b24b2fd38907807f7cd6c837687f0708afb9e1 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Tue, 9 Mar 2021 16:00:41 +0100 Subject: Branding: remove current implementation Since we're planning substantial changes, it will be easier to build from scratch. --- bot/decorators.py | 23 +- bot/exts/backend/branding/__init__.py | 6 +- bot/exts/backend/branding/_cog.py | 556 +------------------------------ bot/exts/backend/branding/_constants.py | 48 +-- bot/exts/backend/branding/_decorators.py | 27 -- bot/exts/backend/branding/_errors.py | 2 - bot/exts/backend/branding/_seasons.py | 175 ---------- bot/exts/backend/error_handler.py | 7 +- 8 files changed, 32 insertions(+), 812 deletions(-) delete mode 100644 bot/exts/backend/branding/_decorators.py delete mode 100644 bot/exts/backend/branding/_errors.py delete mode 100644 bot/exts/backend/branding/_seasons.py diff --git a/bot/decorators.py b/bot/decorators.py index 063c8f878..0b50cc365 100644 --- a/bot/decorators.py +++ b/bot/decorators.py @@ -1,4 +1,5 @@ import asyncio +import functools import logging import typing as t from contextlib import suppress @@ -8,7 +9,7 @@ from discord import Member, NotFound from discord.ext import commands from discord.ext.commands import Cog, Context -from bot.constants import Channels, RedirectOutput +from bot.constants import Channels, DEBUG_MODE, RedirectOutput from bot.utils import function from bot.utils.checks import in_whitelist_check @@ -153,3 +154,23 @@ def respect_role_hierarchy(member_arg: function.Argument) -> t.Callable: await func(*args, **kwargs) return wrapper return decorator + + +def mock_in_debug(return_value: t.Any) -> t.Callable: + """ + Short-circuit function execution if in debug mode and return `return_value`. + + The original function name, and the incoming args and kwargs are DEBUG level logged + upon each call. This is useful for expensive operations, i.e. media asset uploads + that are prone to rate-limits but need to be tested extensively. + """ + def decorator(func: t.Callable) -> t.Callable: + @functools.wraps(func) + async def wrapped(*args, **kwargs) -> t.Any: + """Short-circuit and log if in debug mode.""" + if DEBUG_MODE: + log.debug(f"Function {func.__name__} called with args: {args}, kwargs: {kwargs}") + return return_value + return await func(*args, **kwargs) + return wrapped + return decorator diff --git a/bot/exts/backend/branding/__init__.py b/bot/exts/backend/branding/__init__.py index 81ea3bf49..20a747b7f 100644 --- a/bot/exts/backend/branding/__init__.py +++ b/bot/exts/backend/branding/__init__.py @@ -1,7 +1,7 @@ from bot.bot import Bot -from bot.exts.backend.branding._cog import BrandingManager +from bot.exts.backend.branding._cog import Branding def setup(bot: Bot) -> None: - """Loads BrandingManager cog.""" - bot.add_cog(BrandingManager(bot)) + """Load Branding cog.""" + bot.add_cog(Branding(bot)) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 20df83a89..afe575e1a 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -1,566 +1,14 @@ -import asyncio -import itertools import logging -import random -import typing as t -from datetime import datetime, time, timedelta -import arrow -import async_timeout -import discord -from async_rediscache import RedisCache from discord.ext import commands from bot.bot import Bot -from bot.constants import Branding, Colours, Emojis, Guild, MODERATION_ROLES -from bot.exts.backend.branding import _constants, _decorators, _errors, _seasons log = logging.getLogger(__name__) -class GitHubFile(t.NamedTuple): - """ - Represents a remote file on GitHub. - - The `sha` hash is kept so that we can determine that a file has changed, - despite its filename remaining unchanged. - """ - - download_url: str - path: str - sha: str - - -def pretty_files(files: t.Iterable[GitHubFile]) -> str: - """Provide a human-friendly representation of `files`.""" - return "\n".join(file.path for file in files) - - -def time_until_midnight() -> timedelta: - """ - Determine amount of time until the next-up UTC midnight. - - The exact `midnight` moment is actually delayed to 5 seconds after, in order - to avoid potential problems due to imprecise sleep. - """ - now = datetime.utcnow() - tomorrow = now + timedelta(days=1) - midnight = datetime.combine(tomorrow, time(second=5)) - - return midnight - now - - -class BrandingManager(commands.Cog): - """ - Manages the guild's branding. - - The purpose of this cog is to help automate the synchronization of the branding - repository with the guild. It is capable of discovering assets in the repository - via GitHub's API, resolving download urls for them, and delegating - to the `bot` instance to upload them to the guild. - - BrandingManager is designed to be entirely autonomous. Its `daemon` background task awakens - once a day (see `time_until_midnight`) to detect new seasons, or to cycle icons within a single - season. The daemon can be turned on and off via the `daemon` cmd group. The value set via - its `start` and `stop` commands is persisted across sessions. If turned on, the daemon will - automatically start on the next bot start-up. Otherwise, it will wait to be started manually. - - All supported operations, e.g. setting seasons, applying the branding, or cycling icons, can - also be invoked manually, via the following API: - - branding list - - Show all available seasons - - branding set - - Set the cog's internal state to represent `season_name`, if it exists - - If no `season_name` is given, set chronologically current season - - This will not automatically apply the season's branding to the guild, - the cog's state can be detached from the guild - - Seasons can therefore be 'previewed' using this command - - branding info - - View detailed information about resolved assets for current season - - branding refresh - - Refresh internal state, i.e. synchronize with branding repository - - branding apply - - Apply the current internal state to the guild, i.e. upload the assets - - branding cycle - - If there are multiple available icons for current season, randomly pick - and apply the next one - - The daemon calls these methods autonomously as appropriate. The use of this cog - is locked to moderation roles. As it performs media asset uploads, it is prone to - rate-limits - the `apply` command should be used with caution. The `set` command can, - however, be used freely to 'preview' seasonal branding and check whether paths have been - resolved as appropriate. - - While the bot is in debug mode, it will 'mock' asset uploads by logging the passed - download urls and pretending that the upload was successful. Make use of this - to test this cog's behaviour. - """ - - current_season: t.Type[_seasons.SeasonBase] - - banner: t.Optional[GitHubFile] - - available_icons: t.List[GitHubFile] - remaining_icons: t.List[GitHubFile] - - days_since_cycle: t.Iterator - - daemon: t.Optional[asyncio.Task] - - # Branding configuration - branding_configuration = RedisCache() +class Branding(commands.Cog): + """Guild branding management.""" def __init__(self, bot: Bot) -> None: - """ - Assign safe default values on init. - - At this point, we don't have information about currently available branding. - Most of these attributes will be overwritten once the daemon connects, or once - the `refresh` command is used. - """ self.bot = bot - self.current_season = _seasons.get_current_season() - - self.banner = None - - self.available_icons = [] - self.remaining_icons = [] - - self.days_since_cycle = itertools.cycle([None]) - - self.daemon = None - self._startup_task = self.bot.loop.create_task(self._initial_start_daemon()) - - async def _initial_start_daemon(self) -> None: - """Checks is daemon active and when is, start it at cog load.""" - if await self.branding_configuration.get("daemon_active"): - self.daemon = self.bot.loop.create_task(self._daemon_func()) - - @property - def _daemon_running(self) -> bool: - """True if the daemon is currently active, False otherwise.""" - return self.daemon is not None and not self.daemon.done() - - async def _daemon_func(self) -> None: - """ - Manage all automated behaviour of the BrandingManager cog. - - Once a day, the daemon will perform the following tasks: - - Update `current_season` - - Poll GitHub API to see if the available branding for `current_season` has changed - - Update assets if changes are detected (banner, guild icon, bot avatar, bot nickname) - - Check whether it's time to cycle guild icons - - The internal loop runs once when activated, then periodically at the time - given by `time_until_midnight`. - - All method calls in the internal loop are considered safe, i.e. no errors propagate - to the daemon's loop. The daemon itself does not perform any error handling on its own. - """ - await self.bot.wait_until_guild_available() - - while True: - self.current_season = _seasons.get_current_season() - branding_changed = await self.refresh() - - if branding_changed: - await self.apply() - - elif next(self.days_since_cycle) == Branding.cycle_frequency: - await self.cycle() - - until_midnight = time_until_midnight() - await asyncio.sleep(until_midnight.total_seconds()) - - async def _info_embed(self) -> discord.Embed: - """Make an informative embed representing current season.""" - info_embed = discord.Embed(description=self.current_season.description, colour=self.current_season.colour) - - # If we're in a non-evergreen season, also show active months - if self.current_season is not _seasons.SeasonBase: - title = f"{self.current_season.season_name} ({', '.join(str(m) for m in self.current_season.months)})" - else: - title = self.current_season.season_name - - # Use the author field to show the season's name and avatar if available - info_embed.set_author(name=title) - - banner = self.banner.path if self.banner is not None else "Unavailable" - info_embed.add_field(name="Banner", value=banner, inline=False) - - icons = pretty_files(self.available_icons) or "Unavailable" - info_embed.add_field(name="Available icons", value=icons, inline=False) - - # Only display cycle frequency if we're actually cycling - if len(self.available_icons) > 1 and Branding.cycle_frequency: - info_embed.set_footer(text=f"Icon cycle frequency: {Branding.cycle_frequency}") - - return info_embed - - async def _reset_remaining_icons(self) -> None: - """Set `remaining_icons` to a shuffled copy of `available_icons`.""" - self.remaining_icons = random.sample(self.available_icons, k=len(self.available_icons)) - - async def _reset_days_since_cycle(self) -> None: - """ - Reset the `days_since_cycle` iterator based on configured frequency. - - If the current season only has 1 icon, or if `Branding.cycle_frequency` is falsey, - the iterator will always yield None. This signals that the icon shouldn't be cycled. - - Otherwise, it will yield ints in range [1, `Branding.cycle_frequency`] indefinitely. - When the iterator yields a value equal to `Branding.cycle_frequency`, it is time to cycle. - """ - if len(self.available_icons) > 1 and Branding.cycle_frequency: - sequence = range(1, Branding.cycle_frequency + 1) - else: - sequence = [None] - - self.days_since_cycle = itertools.cycle(sequence) - - async def _get_files(self, path: str, include_dirs: bool = False) -> t.Dict[str, GitHubFile]: - """ - Get files at `path` in the branding repository. - - If `include_dirs` is False (default), only returns files at `path`. - Otherwise, will return both files and directories. Never returns symlinks. - - Return dict mapping from filename to corresponding `GitHubFile` instance. - This may return an empty dict if the response status is non-200, - or if the target directory is empty. - """ - url = f"{_constants.BRANDING_URL}/{path}" - async with self.bot.http_session.get( - url, headers=_constants.HEADERS, params=_constants.PARAMS - ) as resp: - # Short-circuit if we get non-200 response - if resp.status != _constants.STATUS_OK: - log.error(f"GitHub API returned non-200 response: {resp}") - return {} - directory = await resp.json() # Directory at `path` - - allowed_types = {"file", "dir"} if include_dirs else {"file"} - return { - file["name"]: GitHubFile(file["download_url"], file["path"], file["sha"]) - for file in directory - if file["type"] in allowed_types - } - - async def refresh(self) -> bool: - """ - Synchronize available assets with branding repository. - - If the current season is not the evergreen, and lacks at least one asset, - we use the evergreen seasonal dir as fallback for missing assets. - - Finally, if neither the seasonal nor fallback branding directories contain - an asset, it will simply be ignored. - - Return True if the branding has changed. This will be the case when we enter - a new season, or when something changes in the current seasons's directory - in the branding repository. - """ - old_branding = (self.banner, self.available_icons) - seasonal_dir = await self._get_files(self.current_season.branding_path, include_dirs=True) - - # Only make a call to the fallback directory if there is something to be gained - branding_incomplete = any( - asset not in seasonal_dir - for asset in (_constants.FILE_BANNER, _constants.FILE_AVATAR, _constants.SERVER_ICONS) - ) - if branding_incomplete and self.current_season is not _seasons.SeasonBase: - fallback_dir = await self._get_files( - _seasons.SeasonBase.branding_path, include_dirs=True - ) - else: - fallback_dir = {} - - # Resolve assets in this directory, None is a safe value - self.banner = ( - seasonal_dir.get(_constants.FILE_BANNER) - or fallback_dir.get(_constants.FILE_BANNER) - ) - - # Now resolve server icons by making a call to the proper sub-directory - if _constants.SERVER_ICONS in seasonal_dir: - icons_dir = await self._get_files( - f"{self.current_season.branding_path}/{_constants.SERVER_ICONS}" - ) - self.available_icons = list(icons_dir.values()) - - elif _constants.SERVER_ICONS in fallback_dir: - icons_dir = await self._get_files( - f"{_seasons.SeasonBase.branding_path}/{_constants.SERVER_ICONS}" - ) - self.available_icons = list(icons_dir.values()) - - else: - self.available_icons = [] # This should never be the case, but an empty list is a safe value - - # GitHubFile instances carry a `sha` attr so this will pick up if a file changes - branding_changed = old_branding != (self.banner, self.available_icons) - - if branding_changed: - log.info(f"New branding detected (season: {self.current_season.season_name})") - await self._reset_remaining_icons() - await self._reset_days_since_cycle() - - return branding_changed - - async def cycle(self) -> bool: - """ - Apply the next-up server icon. - - Returns True if an icon is available and successfully gets applied, False otherwise. - """ - if not self.available_icons: - log.info("Cannot cycle: no icons for this season") - return False - - if not self.remaining_icons: - log.info("Reset & shuffle remaining icons") - await self._reset_remaining_icons() - - next_up = self.remaining_icons.pop(0) - success = await self.set_icon(next_up.download_url) - - return success - - async def apply(self) -> t.List[str]: - """ - Apply current branding to the guild and bot. - - This delegates to the bot instance to do all the work. We only provide download urls - for available assets. Assets unavailable in the branding repo will be ignored. - - Returns a list of names of all failed assets. An asset is considered failed - if it isn't found in the branding repo, or if something goes wrong while the - bot is trying to apply it. - - An empty list denotes that all assets have been applied successfully. - """ - report = {asset: False for asset in ("banner", "icon")} - - if self.banner is not None: - report["banner"] = await self.set_banner(self.banner.download_url) - - report["icon"] = await self.cycle() - - failed_assets = [asset for asset, succeeded in report.items() if not succeeded] - return failed_assets - - @commands.has_any_role(*MODERATION_ROLES) - @commands.group(name="branding") - async def branding_cmds(self, ctx: commands.Context) -> None: - """Manual branding control.""" - if not ctx.invoked_subcommand: - await ctx.send_help(ctx.command) - - @branding_cmds.command(name="list", aliases=["ls"]) - async def branding_list(self, ctx: commands.Context) -> None: - """List all available seasons and branding sources.""" - embed = discord.Embed(title="Available seasons", colour=Colours.soft_green) - - for season in _seasons.get_all_seasons(): - if season is _seasons.SeasonBase: - active_when = "always" - else: - active_when = f"in {', '.join(str(m) for m in season.months)}" - - description = ( - f"Active {active_when}\n" - f"Branding: {season.branding_path}" - ) - embed.add_field(name=season.season_name, value=description, inline=False) - - await ctx.send(embed=embed) - - @branding_cmds.command(name="set") - async def branding_set(self, ctx: commands.Context, *, season_name: t.Optional[str] = None) -> None: - """ - Manually set season, or reset to current if none given. - - Season search is a case-less comparison against both seasonal class name, - and its `season_name` attr. - - This only pre-loads the cog's internal state to the chosen season, but does not - automatically apply the branding. As that is an expensive operation, the `apply` - command must be called explicitly after this command finishes. - - This means that this command can be used to 'preview' a season gathering info - about its available assets, without applying them to the guild. - - If the daemon is running, it will automatically reset the season to current when - it wakes up. The season set via this command can therefore remain 'detached' from - what it should be - the daemon will make sure that it's set back properly. - """ - if season_name is None: - new_season = _seasons.get_current_season() - else: - new_season = _seasons.get_season(season_name) - if new_season is None: - raise _errors.BrandingError("No such season exists") - - if self.current_season is new_season: - raise _errors.BrandingError(f"Season {self.current_season.season_name} already active") - - self.current_season = new_season - await self.branding_refresh(ctx) - - @branding_cmds.command(name="info", aliases=["status"]) - async def branding_info(self, ctx: commands.Context) -> None: - """ - Show available assets for current season. - - This can be used to confirm that assets have been resolved properly. - When `apply` is used, it attempts to upload exactly the assets listed here. - """ - await ctx.send(embed=await self._info_embed()) - - @branding_cmds.command(name="refresh") - async def branding_refresh(self, ctx: commands.Context) -> None: - """Sync currently available assets with branding repository.""" - async with ctx.typing(): - await self.refresh() - await self.branding_info(ctx) - - @branding_cmds.command(name="apply") - async def branding_apply(self, ctx: commands.Context) -> None: - """ - Apply current season's branding to the guild. - - Use `info` to check which assets will be applied. Shows which assets have - failed to be applied, if any. - """ - async with ctx.typing(): - failed_assets = await self.apply() - if failed_assets: - raise _errors.BrandingError( - f"Failed to apply following assets: {', '.join(failed_assets)}" - ) - - response = discord.Embed(description=f"All assets applied {Emojis.ok_hand}", colour=Colours.soft_green) - await ctx.send(embed=response) - - @branding_cmds.command(name="cycle") - async def branding_cycle(self, ctx: commands.Context) -> None: - """ - Apply the next-up guild icon, if multiple are available. - - The order is random. - """ - async with ctx.typing(): - success = await self.cycle() - if not success: - raise _errors.BrandingError("Failed to cycle icon") - - response = discord.Embed(description=f"Success {Emojis.ok_hand}", colour=Colours.soft_green) - await ctx.send(embed=response) - - @branding_cmds.group(name="daemon", aliases=["d", "task"]) - async def daemon_group(self, ctx: commands.Context) -> None: - """Control the background daemon.""" - if not ctx.invoked_subcommand: - await ctx.send_help(ctx.command) - - @daemon_group.command(name="status") - async def daemon_status(self, ctx: commands.Context) -> None: - """Check whether daemon is currently active.""" - if self._daemon_running: - remaining_time = (arrow.utcnow() + time_until_midnight()).humanize() - response = discord.Embed(description=f"Daemon running {Emojis.ok_hand}", colour=Colours.soft_green) - response.set_footer(text=f"Next refresh {remaining_time}") - else: - response = discord.Embed(description="Daemon not running", colour=Colours.soft_red) - - await ctx.send(embed=response) - - @daemon_group.command(name="start") - async def daemon_start(self, ctx: commands.Context) -> None: - """If the daemon isn't running, start it.""" - if self._daemon_running: - raise _errors.BrandingError("Daemon already running!") - - self.daemon = self.bot.loop.create_task(self._daemon_func()) - await self.branding_configuration.set("daemon_active", True) - - response = discord.Embed(description=f"Daemon started {Emojis.ok_hand}", colour=Colours.soft_green) - await ctx.send(embed=response) - - @daemon_group.command(name="stop") - async def daemon_stop(self, ctx: commands.Context) -> None: - """If the daemon is running, stop it.""" - if not self._daemon_running: - raise _errors.BrandingError("Daemon not running!") - - self.daemon.cancel() - await self.branding_configuration.set("daemon_active", False) - - response = discord.Embed(description=f"Daemon stopped {Emojis.ok_hand}", colour=Colours.soft_green) - await ctx.send(embed=response) - - async def _fetch_image(self, url: str) -> bytes: - """Retrieve and read image from `url`.""" - log.debug(f"Getting image from: {url}") - async with self.bot.http_session.get(url) as resp: - return await resp.read() - - async def _apply_asset(self, target: discord.Guild, asset: _constants.AssetType, url: str) -> bool: - """ - Internal method for applying media assets to the guild. - - This shouldn't be called directly. The purpose of this method is mainly generic - error handling to reduce needless code repetition. - - Return True if upload was successful, False otherwise. - """ - log.info(f"Attempting to set {asset.name}: {url}") - - kwargs = {asset.value: await self._fetch_image(url)} - try: - async with async_timeout.timeout(5): - await target.edit(**kwargs) - - except asyncio.TimeoutError: - log.info("Asset upload timed out") - return False - - except discord.HTTPException as discord_error: - log.exception("Asset upload failed", exc_info=discord_error) - return False - - else: - log.info("Asset successfully applied") - return True - - @_decorators.mock_in_debug(return_value=True) - async def set_banner(self, url: str) -> bool: - """Set the guild's banner to image at `url`.""" - guild = self.bot.get_guild(Guild.id) - if guild is None: - log.info("Failed to get guild instance, aborting asset upload") - return False - - return await self._apply_asset(guild, _constants.AssetType.BANNER, url) - - @_decorators.mock_in_debug(return_value=True) - async def set_icon(self, url: str) -> bool: - """Sets the guild's icon to image at `url`.""" - guild = self.bot.get_guild(Guild.id) - if guild is None: - log.info("Failed to get guild instance, aborting asset upload") - return False - - return await self._apply_asset(guild, _constants.AssetType.SERVER_ICON, url) - - def cog_unload(self) -> None: - """Cancels startup and daemon task.""" - self._startup_task.cancel() - if self.daemon is not None: - self.daemon.cancel() diff --git a/bot/exts/backend/branding/_constants.py b/bot/exts/backend/branding/_constants.py index dbc7615f2..8afac6538 100644 --- a/bot/exts/backend/branding/_constants.py +++ b/bot/exts/backend/branding/_constants.py @@ -1,51 +1,11 @@ -from enum import Enum, IntEnum - from bot.constants import Keys +# Base URL for requests into the branding repository +BRANDING_URL = "https://api.github.com/repos/kwzrd/pydis-branding/contents" -class Month(IntEnum): - """All month constants for seasons.""" - - JANUARY = 1 - FEBRUARY = 2 - MARCH = 3 - APRIL = 4 - MAY = 5 - JUNE = 6 - JULY = 7 - AUGUST = 8 - SEPTEMBER = 9 - OCTOBER = 10 - NOVEMBER = 11 - DECEMBER = 12 - - def __str__(self) -> str: - return self.name.title() - - -class AssetType(Enum): - """ - Discord media assets. - - The values match exactly the kwarg keys that can be passed to `Guild.edit`. - """ - - BANNER = "banner" - SERVER_ICON = "icon" - - -STATUS_OK = 200 # HTTP status code - -FILE_BANNER = "banner.png" -FILE_AVATAR = "avatar.png" -SERVER_ICONS = "server_icons" - -BRANDING_URL = "https://api.github.com/repos/python-discord/branding/contents" - -PARAMS = {"ref": "master"} # Target branch +PARAMS = {"ref": "kwzrd/events-rework"} # Target branch HEADERS = {"Accept": "application/vnd.github.v3+json"} # Ensure we use API v3 -# A GitHub token is not necessary for the cog to operate, -# unauthorized requests are however limited to 60 per hour +# A GitHub token is not necessary for the cog to operate, unauthorized requests are however limited to 60 per hour if Keys.github: HEADERS["Authorization"] = f"token {Keys.github}" diff --git a/bot/exts/backend/branding/_decorators.py b/bot/exts/backend/branding/_decorators.py deleted file mode 100644 index 6a1e7e869..000000000 --- a/bot/exts/backend/branding/_decorators.py +++ /dev/null @@ -1,27 +0,0 @@ -import functools -import logging -import typing as t - -from bot.constants import DEBUG_MODE - -log = logging.getLogger(__name__) - - -def mock_in_debug(return_value: t.Any) -> t.Callable: - """ - Short-circuit function execution if in debug mode and return `return_value`. - - The original function name, and the incoming args and kwargs are DEBUG level logged - upon each call. This is useful for expensive operations, i.e. media asset uploads - that are prone to rate-limits but need to be tested extensively. - """ - def decorator(func: t.Callable) -> t.Callable: - @functools.wraps(func) - async def wrapped(*args, **kwargs) -> t.Any: - """Short-circuit and log if in debug mode.""" - if DEBUG_MODE: - log.debug(f"Function {func.__name__} called with args: {args}, kwargs: {kwargs}") - return return_value - return await func(*args, **kwargs) - return wrapped - return decorator diff --git a/bot/exts/backend/branding/_errors.py b/bot/exts/backend/branding/_errors.py deleted file mode 100644 index 7cd271af3..000000000 --- a/bot/exts/backend/branding/_errors.py +++ /dev/null @@ -1,2 +0,0 @@ -class BrandingError(Exception): - """Exception raised by the BrandingManager cog.""" diff --git a/bot/exts/backend/branding/_seasons.py b/bot/exts/backend/branding/_seasons.py deleted file mode 100644 index 5f6256b30..000000000 --- a/bot/exts/backend/branding/_seasons.py +++ /dev/null @@ -1,175 +0,0 @@ -import logging -import typing as t -from datetime import datetime - -from bot.constants import Colours -from bot.exts.backend.branding._constants import Month -from bot.exts.backend.branding._errors import BrandingError - -log = logging.getLogger(__name__) - - -class SeasonBase: - """ - Base for Seasonal classes. - - This serves as the off-season fallback for when no specific - seasons are active. - - Seasons are 'registered' simply by inheriting from `SeasonBase`. - We discover them by calling `__subclasses__`. - """ - - season_name: str = "Evergreen" - - colour: str = Colours.soft_green - description: str = "The default season!" - - branding_path: str = "seasonal/evergreen" - - months: t.Set[Month] = set(Month) - - -class Christmas(SeasonBase): - """Branding for December.""" - - season_name = "Festive season" - - colour = Colours.soft_red - description = ( - "The time is here to get into the festive spirit! No matter who you are, where you are, " - "or what beliefs you may follow, we hope every one of you enjoy this festive season!" - ) - - branding_path = "seasonal/christmas" - - months = {Month.DECEMBER} - - -class Easter(SeasonBase): - """Branding for April.""" - - season_name = "Easter" - - colour = Colours.bright_green - description = ( - "Bunny here, bunny there, bunny everywhere! Here at Python Discord, we celebrate " - "our version of Easter during the entire month of April." - ) - - branding_path = "seasonal/easter" - - months = {Month.APRIL} - - -class Halloween(SeasonBase): - """Branding for October.""" - - season_name = "Halloween" - - colour = Colours.orange - description = "Trick or treat?!" - - branding_path = "seasonal/halloween" - - months = {Month.OCTOBER} - - -class Pride(SeasonBase): - """Branding for June.""" - - season_name = "Pride" - - colour = Colours.pink - description = ( - "The month of June is a special month for us at Python Discord. It is very important to us " - "that everyone feels welcome here, no matter their origin, identity or sexuality. During the " - "month of June, while some of you are participating in Pride festivals across the world, " - "we will be celebrating individuality and commemorating the history and challenges " - "of the LGBTQ+ community with a Pride event of our own!" - ) - - branding_path = "seasonal/pride" - - months = {Month.JUNE} - - -class Valentines(SeasonBase): - """Branding for February.""" - - season_name = "Valentines" - - colour = Colours.pink - description = "Love is in the air!" - - branding_path = "seasonal/valentines" - - months = {Month.FEBRUARY} - - -class Wildcard(SeasonBase): - """Branding for August.""" - - season_name = "Wildcard" - - colour = Colours.purple - description = "A season full of surprises!" - - months = {Month.AUGUST} - - -def get_all_seasons() -> t.List[t.Type[SeasonBase]]: - """Give all available season classes.""" - return [SeasonBase] + SeasonBase.__subclasses__() - - -def get_current_season() -> t.Type[SeasonBase]: - """Give active season, based on current UTC month.""" - current_month = Month(datetime.utcnow().month) - - active_seasons = tuple( - season - for season in SeasonBase.__subclasses__() - if current_month in season.months - ) - - if not active_seasons: - return SeasonBase - - return active_seasons[0] - - -def get_season(name: str) -> t.Optional[t.Type[SeasonBase]]: - """ - Give season such that its class name or its `season_name` attr match `name` (caseless). - - If no such season exists, return None. - """ - name = name.casefold() - - for season in get_all_seasons(): - matches = (season.__name__.casefold(), season.season_name.casefold()) - - if name in matches: - return season - - -def _validate_season_overlap() -> None: - """ - Raise BrandingError if there are any colliding seasons. - - This serves as a local test to ensure that seasons haven't been misconfigured. - """ - month_to_season = {} - - for season in SeasonBase.__subclasses__(): - for month in season.months: - colliding_season = month_to_season.get(month) - - if colliding_season: - raise BrandingError(f"Season {season} collides with {colliding_season} in {month.name}") - else: - month_to_season[month] = season - - -_validate_season_overlap() diff --git a/bot/exts/backend/error_handler.py b/bot/exts/backend/error_handler.py index 9cb54cdab..76ab7dfc2 100644 --- a/bot/exts/backend/error_handler.py +++ b/bot/exts/backend/error_handler.py @@ -1,7 +1,6 @@ import contextlib import difflib import logging -import random import typing as t from discord import Embed @@ -10,10 +9,9 @@ from sentry_sdk import push_scope from bot.api import ResponseCodeError from bot.bot import Bot -from bot.constants import Colours, ERROR_REPLIES, Icons, MODERATION_ROLES +from bot.constants import Colours, Icons, MODERATION_ROLES from bot.converters import TagNameConverter from bot.errors import InvalidInfractedUser, LockedResourceError -from bot.exts.backend.branding._errors import BrandingError from bot.utils.checks import InWhitelistCheckFailure log = logging.getLogger(__name__) @@ -79,9 +77,6 @@ class ErrorHandler(Cog): await self.handle_api_error(ctx, e.original) elif isinstance(e.original, LockedResourceError): await ctx.send(f"{e.original} Please wait for it to finish and try again later.") - elif isinstance(e.original, BrandingError): - await ctx.send(embed=self._get_error_embed(random.choice(ERROR_REPLIES), str(e.original))) - return elif isinstance(e.original, InvalidInfractedUser): await ctx.send(f"Cannot infract that user. {e.original.reason}") else: -- cgit v1.2.3 From 65f3dd35ec7eca6160691c5cc339ba9462941c47 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Tue, 9 Mar 2021 20:27:52 +0100 Subject: Branding: initiate repository abstraction --- bot/exts/backend/branding/_cog.py | 3 +++ bot/exts/backend/branding/_repository.py | 12 ++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 bot/exts/backend/branding/_repository.py diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index afe575e1a..cef17a614 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -3,6 +3,7 @@ import logging from discord.ext import commands from bot.bot import Bot +from bot.exts.backend.branding._repository import BrandingRepository log = logging.getLogger(__name__) @@ -11,4 +12,6 @@ class Branding(commands.Cog): """Guild branding management.""" def __init__(self, bot: Bot) -> None: + """Instantiate repository abstraction.""" self.bot = bot + self.repository = BrandingRepository(bot) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py new file mode 100644 index 000000000..de47fcd36 --- /dev/null +++ b/bot/exts/backend/branding/_repository.py @@ -0,0 +1,12 @@ +import logging + +from bot.bot import Bot + +log = logging.getLogger(__name__) + + +class BrandingRepository: + """Abstraction exposing the branding repository via convenient methods.""" + + def __init__(self, bot: Bot) -> None: + self.bot = bot -- cgit v1.2.3 From bcddc5cdaaa021af3fef9b0b3c2b30d11960083a Mon Sep 17 00:00:00 2001 From: kwzrd Date: Tue, 9 Mar 2021 20:37:19 +0100 Subject: Branding: migrate constants Constants will only be used in one place and there's not enough of them to warrant a separate module. --- bot/exts/backend/branding/_constants.py | 11 ----------- bot/exts/backend/branding/_repository.py | 11 +++++++++++ 2 files changed, 11 insertions(+), 11 deletions(-) delete mode 100644 bot/exts/backend/branding/_constants.py diff --git a/bot/exts/backend/branding/_constants.py b/bot/exts/backend/branding/_constants.py deleted file mode 100644 index 8afac6538..000000000 --- a/bot/exts/backend/branding/_constants.py +++ /dev/null @@ -1,11 +0,0 @@ -from bot.constants import Keys - -# Base URL for requests into the branding repository -BRANDING_URL = "https://api.github.com/repos/kwzrd/pydis-branding/contents" - -PARAMS = {"ref": "kwzrd/events-rework"} # Target branch -HEADERS = {"Accept": "application/vnd.github.v3+json"} # Ensure we use API v3 - -# A GitHub token is not necessary for the cog to operate, unauthorized requests are however limited to 60 per hour -if Keys.github: - HEADERS["Authorization"] = f"token {Keys.github}" diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index de47fcd36..3bdb632f8 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -1,6 +1,17 @@ import logging from bot.bot import Bot +from bot.constants import Keys + +# Base URL for requests into the branding repository +BRANDING_URL = "https://api.github.com/repos/kwzrd/pydis-branding/contents" + +PARAMS = {"ref": "kwzrd/events-rework"} # Target branch +HEADERS = {"Accept": "application/vnd.github.v3+json"} # Ensure we use API v3 + +# A GitHub token is not necessary for the cog to operate, unauthorized requests are however limited to 60 per hour +if Keys.github: + HEADERS["Authorization"] = f"token {Keys.github}" log = logging.getLogger(__name__) -- cgit v1.2.3 From ff8193ed19c2c5e5106fa2afee264f5ea700275f Mon Sep 17 00:00:00 2001 From: kwzrd Date: Tue, 9 Mar 2021 20:47:07 +0100 Subject: Branding: add HTTP fetch helper methods --- bot/exts/backend/branding/_repository.py | 54 ++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 3bdb632f8..bf38fccad 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -1,4 +1,5 @@ import logging +import typing as t from bot.bot import Bot from bot.constants import Keys @@ -16,8 +17,61 @@ if Keys.github: log = logging.getLogger(__name__) +class RemoteObject: + """ + Represent a remote file or directory on GitHub. + + The annotations match keys in the response JSON that we're interested in. + """ + + name: str # Filename + path: str # Path from repo root + type: str # Either 'file' or 'dir' + download_url: str + + def __init__(self, dictionary: t.Dict[str, t.Any]) -> None: + """Initialize by grabbing annotated attributes from `dictionary`.""" + for annotation in self.__annotations__: + setattr(self, annotation, dictionary[annotation]) + + class BrandingRepository: """Abstraction exposing the branding repository via convenient methods.""" def __init__(self, bot: Bot) -> None: self.bot = bot + + async def fetch_directory(self, path: str, types: t.Container[str] = ("file", "dir")) -> t.Dict[str, RemoteObject]: + """ + Fetch directory found at `path` in the branding repository. + + The directory will be represented by a mapping from file or sub-directory names to their corresponding + instances of `RemoteObject`. Passing a custom `types` value allows only getting files or directories. + + If the request fails, returns an empty dictionary. + """ + full_url = f"{BRANDING_URL}/{path}" + log.debug(f"Fetching directory from branding repository: {full_url}") + + async with self.bot.http_session.get(full_url, params=PARAMS, headers=HEADERS) as response: + if response.status == 200: + json_directory = await response.json() + else: + log.warning(f"Received non-200 response status: {response.status}") + return {} + + return {file["name"]: RemoteObject(file) for file in json_directory if file["type"] in types} + + async def fetch_file(self, file: RemoteObject) -> t.Optional[bytes]: + """ + Fetch `file` using its download URL. + + Returns the file as bytes unless the request fails, in which case None is given. + """ + log.debug(f"Fetching file from branding repository: {file.download_url}") + + async with self.bot.http_session.get(file.download_url, params=PARAMS, headers=HEADERS) as response: + if response.status == 200: + return await response.read() + else: + log.warning(f"Received non-200 response status: {response.status}") -- cgit v1.2.3 From cb3b80788bde2aba280de5370ee78abcaa39f613 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Tue, 9 Mar 2021 22:41:25 +0100 Subject: Branding: define event construction methodology --- bot/errors.py | 6 +++ bot/exts/backend/branding/_repository.py | 84 ++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) diff --git a/bot/errors.py b/bot/errors.py index ab0adcd42..3544c6320 100644 --- a/bot/errors.py +++ b/bot/errors.py @@ -35,3 +35,9 @@ class InvalidInfractedUser(Exception): self.reason = reason super().__init__(reason) + + +class BrandingMisconfiguration(RuntimeError): + """Raised by the Branding cog when a misconfigured event is encountered.""" + + pass diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index bf38fccad..9d32fdfb1 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -1,8 +1,12 @@ import logging import typing as t +from datetime import date, datetime + +import frontmatter from bot.bot import Bot from bot.constants import Keys +from bot.errors import BrandingMisconfiguration # Base URL for requests into the branding repository BRANDING_URL = "https://api.github.com/repos/kwzrd/pydis-branding/contents" @@ -14,6 +18,13 @@ HEADERS = {"Accept": "application/vnd.github.v3+json"} # Ensure we use API v3 if Keys.github: HEADERS["Authorization"] = f"token {Keys.github}" +# Since event periods are year-agnostic, we parse them into `datetime` objects with a manually inserted year +# Please note that this is intentionally a leap year in order to allow Feb 29 to be valid +ARBITRARY_YEAR = 2020 + +# Format used to parse date strings after we inject `ARBITRARY_YEAR` at the end +DATE_FMT = "%B %d %Y" # Ex: July 10 2020 + log = logging.getLogger(__name__) @@ -35,6 +46,23 @@ class RemoteObject: setattr(self, annotation, dictionary[annotation]) +class MetaFile(t.NamedTuple): + """Composition of attributes defined in a 'meta.md' file.""" + + is_fallback: bool + start_date: t.Optional[date] + end_date: t.Optional[date] + description: str # Markdown event description + + +class Event(t.NamedTuple): + """Represent an event defined in the branding repository.""" + + banner: RemoteObject + icons: t.List[RemoteObject] + meta: MetaFile + + class BrandingRepository: """Abstraction exposing the branding repository via convenient methods.""" @@ -75,3 +103,59 @@ class BrandingRepository: return await response.read() else: log.warning(f"Received non-200 response status: {response.status}") + + async def parse_meta_file(self, raw_file: bytes) -> MetaFile: + """ + Parse a 'meta.md' file from raw bytes. + + The caller is responsible for handling errors caused by misconfiguration. + """ + attrs, description = frontmatter.parse(raw_file) # Library automatically decodes using UTF-8 + + if not description: + raise BrandingMisconfiguration("No description found in 'meta.md'!") + + if attrs.get("fallback", False): + return MetaFile(is_fallback=True, start_date=None, end_date=None, description=description) + + start_date_raw = attrs.get("start_date") + end_date_raw = attrs.get("end_date") + + if None in (start_date_raw, end_date_raw): + raise BrandingMisconfiguration("Non-fallback event doesn't have start and end dates defined!") + + # We extend the configured month & day with an arbitrary leap year to allow a `datetime` repr to exist + # This may raise errors if configured in a wrong format ~ we let the caller handle such cases + start_date = datetime.strptime(f"{start_date_raw} {ARBITRARY_YEAR}", DATE_FMT).date() + end_date = datetime.strptime(f"{end_date_raw} {ARBITRARY_YEAR}", DATE_FMT).date() + + return MetaFile(is_fallback=False, start_date=start_date, end_date=end_date, description=description) + + async def construct_event(self, directory: RemoteObject) -> Event: + """ + Construct an `Event` instance from an event `directory`. + + The caller is responsible for handling errors caused by misconfiguration. + """ + contents = await self.fetch_directory(directory.path) + + missing_assets = {"meta.md", "banner.png", "server_icons"} - contents.keys() + + if missing_assets: + raise BrandingMisconfiguration(f"Directory is missing following assets: {missing_assets}") + + server_icons = await self.fetch_directory(contents["server_icons"].path, types=("file",)) + + if server_icons is None: + raise BrandingMisconfiguration("Failed to fetch server icons!") + if len(server_icons) == 0: + raise BrandingMisconfiguration("Found no server icons!") + + meta_bytes = await self.fetch_file(contents["meta.md"]) + + if meta_bytes is None: + raise BrandingMisconfiguration("Failed to fetch 'meta.md' file!") + + meta_file = await self.parse_meta_file(meta_bytes) + + return Event(contents["banner.png"], list(server_icons.values()), meta_file) -- cgit v1.2.3 From 305046418b4cb66b59d4592a02ad2613e75718aa Mon Sep 17 00:00:00 2001 From: kwzrd Date: Tue, 9 Mar 2021 23:06:00 +0100 Subject: Branding: add event getters These methods form the API to the repository abstraction. --- bot/exts/backend/branding/_repository.py | 51 ++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 9d32fdfb1..20e287504 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -159,3 +159,54 @@ class BrandingRepository: meta_file = await self.parse_meta_file(meta_bytes) return Event(contents["banner.png"], list(server_icons.values()), meta_file) + + async def get_events(self) -> t.List[Event]: + """ + Discover available events in the branding repository. + + Misconfigured events are skipped, the return value may therefore not contain a representation of each + directory in the repository. May return an empty list in the catastrophic case. + """ + log.debug("Discovering events in branding repository") + + event_directories = await self.fetch_directory("events", types=("dir",)) # Skip files + instances: t.List[Event] = [] + + for event_directory in event_directories.values(): + log.trace(f"Attempting to construct event from directory: {event_directory.path}") + try: + instance = await self.construct_event(event_directory) + except Exception as exc: + log.warning(f"Could not construct event: {exc}") + else: + instances.append(instance) + + log.trace(f"Found {len(instances)} correctly configured events") + return instances + + async def get_current_event(self) -> t.Optional[Event]: + """ + Get the currently active event, or the fallback event. + + Returns None in the case that no event is active, and no fallback event is found. + """ + utc_now = datetime.utcnow() + log.debug(f"Finding active event for: {utc_now}") + + # As all events exist in the arbitrary year, we construct a separate object for the purposes of comparison + lookup_now = date(year=ARBITRARY_YEAR, month=utc_now.month, day=utc_now.day) + + events = await self.get_events() + + for event in events: + meta = event.meta + if not meta.is_fallback and (meta.start_date <= lookup_now <= meta.end_date): + return event + + log.debug("No active event found, looking for fallback") + + for event in events: + if event.meta.is_fallback: + return event + + log.warning("No event is currently active and no fallback event was found!") -- cgit v1.2.3 From d831086f0f5b21138283e6165f3efe0c42ba2530 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Tue, 9 Mar 2021 23:23:44 +0100 Subject: Branding: make event instances aware of their location This allows us to add a neat string representation. --- bot/exts/backend/branding/_repository.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 20e287504..e9d44417f 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -58,9 +58,13 @@ class MetaFile(t.NamedTuple): class Event(t.NamedTuple): """Represent an event defined in the branding repository.""" + path: str # Path from repo root where event lives + meta: MetaFile banner: RemoteObject icons: t.List[RemoteObject] - meta: MetaFile + + def __str__(self) -> str: + return f"" class BrandingRepository: @@ -158,7 +162,7 @@ class BrandingRepository: meta_file = await self.parse_meta_file(meta_bytes) - return Event(contents["banner.png"], list(server_icons.values()), meta_file) + return Event(directory.path, meta_file, contents["banner.png"], list(server_icons.values())) async def get_events(self) -> t.List[Event]: """ -- cgit v1.2.3 From d3f3ba9d999091bae1d455afa9f1b94eea47f778 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Tue, 9 Mar 2021 23:42:51 +0100 Subject: Branding: correctly annotate optional attribute --- bot/exts/backend/branding/_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index e9d44417f..133231968 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -38,7 +38,7 @@ class RemoteObject: name: str # Filename path: str # Path from repo root type: str # Either 'file' or 'dir' - download_url: str + download_url: t.Optional[str] # If type is 'dir', this is None! def __init__(self, dictionary: t.Dict[str, t.Any]) -> None: """Initialize by grabbing annotated attributes from `dictionary`.""" -- cgit v1.2.3 From 0810b76e448f8b13b760eb29c080a7d576959821 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Wed, 10 Mar 2021 22:03:59 +0100 Subject: Branding: do not require 'RemoteObject' instance to fetch file --- bot/exts/backend/branding/_repository.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 133231968..1a6b13c8b 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -94,15 +94,15 @@ class BrandingRepository: return {file["name"]: RemoteObject(file) for file in json_directory if file["type"] in types} - async def fetch_file(self, file: RemoteObject) -> t.Optional[bytes]: + async def fetch_file(self, download_url: str) -> t.Optional[bytes]: """ - Fetch `file` using its download URL. + Fetch file from `download_url`. Returns the file as bytes unless the request fails, in which case None is given. """ - log.debug(f"Fetching file from branding repository: {file.download_url}") + log.debug(f"Fetching file from branding repository: {download_url}") - async with self.bot.http_session.get(file.download_url, params=PARAMS, headers=HEADERS) as response: + async with self.bot.http_session.get(download_url, params=PARAMS, headers=HEADERS) as response: if response.status == 200: return await response.read() else: @@ -155,7 +155,7 @@ class BrandingRepository: if len(server_icons) == 0: raise BrandingMisconfiguration("Found no server icons!") - meta_bytes = await self.fetch_file(contents["meta.md"]) + meta_bytes = await self.fetch_file(contents["meta.md"].download_url) if meta_bytes is None: raise BrandingMisconfiguration("Failed to fetch 'meta.md' file!") -- cgit v1.2.3 From 53db28b0a2c126efd1ead201d9053eac81d95758 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Wed, 10 Mar 2021 22:27:30 +0100 Subject: Branding: implement asset application logic --- bot/exts/backend/branding/_cog.py | 51 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index cef17a614..79106d694 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -1,13 +1,30 @@ +import asyncio import logging +from enum import Enum +import async_timeout +import discord from discord.ext import commands from bot.bot import Bot +from bot.constants import Guild +from bot.decorators import mock_in_debug from bot.exts.backend.branding._repository import BrandingRepository log = logging.getLogger(__name__) +class AssetType(Enum): + """ + Recognised Discord guild asset types. + + The value of each member corresponds exactly to a kwarg that can be passed to `Guild.edit`. + """ + + BANNER = "banner" + ICON = "icon" + + class Branding(commands.Cog): """Guild branding management.""" @@ -15,3 +32,37 @@ class Branding(commands.Cog): """Instantiate repository abstraction.""" self.bot = bot self.repository = BrandingRepository(bot) + + # region: Internal utility + + @mock_in_debug(return_value=None) + async def apply_asset(self, asset_type: AssetType, download_url: str) -> None: + """ + Download asset from `download_url` and apply it to PyDis as `asset_type`. + + This function is mocked in the development environment in order to prevent API spam during testing. + Decorator should be temporarily removed in order to test internal methodology. + """ + log.info(f"Applying {asset_type.value} asset to the guild") + + file = await self.repository.fetch_file(download_url) + + if file is None: + log.error(f"Failed to download {asset_type.value} from branding repository!") + return + + await self.bot.wait_until_guild_available() + pydis: discord.Guild = self.bot.get_guild(Guild.id) + + timeout = 10 # Seconds + try: + with async_timeout.timeout(timeout): + await pydis.edit(**{asset_type.value: file}) + except discord.HTTPException as http_exc: + log.error(f"Asset upload to Discord failed: {http_exc}") + except asyncio.TimeoutError: + log.error(f"Asset upload to Discord timed out after {timeout} seconds!") + else: + log.debug("Asset uploaded successfully!") + + # endregion -- cgit v1.2.3 From 83dfb2a7a0389e53d61a30bd93aba2c749a4aa48 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Thu, 11 Mar 2021 21:51:13 +0100 Subject: Branding: expose SHA on remote objects --- bot/exts/backend/branding/_repository.py | 1 + 1 file changed, 1 insertion(+) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 1a6b13c8b..ef292619e 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -35,6 +35,7 @@ class RemoteObject: The annotations match keys in the response JSON that we're interested in. """ + sha: str # Hash helps us detect asset change name: str # Filename path: str # Path from repo root type: str # Either 'file' or 'dir' -- cgit v1.2.3 From ac4399a4b19dfa5ae0e9856c8df546d00a7d473e Mon Sep 17 00:00:00 2001 From: kwzrd Date: Thu, 11 Mar 2021 22:09:25 +0100 Subject: Branding: implement internal utility This adds the core logic of branding management. In comparison with the previous version, we now maintain all state in Redis, which allows the bot to seamlessly restart without losing any information. The 'send_info_embed' function is intentionally implemented with the consideration of allowing users to invoke it on-demand. It always reads information from the cache, even if the caller could pass a 'MetaFile' instance. So while this may look needlessly indirect right now, it should begin to make sense once the command API is implemented. --- bot/exts/backend/branding/_cog.py | 176 +++++++++++++++++++++++++++++++++++++- 1 file changed, 174 insertions(+), 2 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 79106d694..ddd91b5f8 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -1,15 +1,19 @@ import asyncio import logging +import random +import typing as t +from datetime import datetime, timedelta from enum import Enum import async_timeout import discord +from async_rediscache import RedisCache from discord.ext import commands from bot.bot import Bot -from bot.constants import Guild +from bot.constants import Branding as BrandingConfig, Channels, Guild from bot.decorators import mock_in_debug -from bot.exts.backend.branding._repository import BrandingRepository +from bot.exts.backend.branding._repository import BrandingRepository, Event, RemoteObject log = logging.getLogger(__name__) @@ -25,9 +29,28 @@ class AssetType(Enum): ICON = "icon" +def compound_hash(objects: t.Iterable[RemoteObject]) -> str: + """Compound hashes are cached to check for change in any of the member `objects`.""" + return "-".join(item.sha for item in objects) + + class Branding(commands.Cog): """Guild branding management.""" + # RedisCache[ + # "event_path": Path from root in the branding repo (str) + # "event_description": Markdown description (str) + # "event_duration": Human-readable date range or 'Fallback' (str) + # "banner_hash": Hash of the last applied banner (str) + # "icons_hash": Compound hash of icons in rotation (str) + # "last_rotation_timestamp": POSIX timestamp (float) + # ] + cache_information = RedisCache() + + # Cache holding icons in current rotation ~ the keys are download URLs (str) and the values are integers + # corresponding to the amount of times each icon has been used in the current rotation + cache_icons = RedisCache() + def __init__(self, bot: Bot) -> None: """Instantiate repository abstraction.""" self.bot = bot @@ -65,4 +88,153 @@ class Branding(commands.Cog): else: log.debug("Asset uploaded successfully!") + async def apply_banner(self, banner: RemoteObject) -> None: + """ + Apply `banner` to the guild and cache its hash. + + Banners should always be applied via this method in order to ensure that the last hash is cached. + """ + await self.apply_asset(AssetType.BANNER, banner.download_url) + await self.cache_information.set("banner_hash", banner.sha) + + async def rotate_icons(self) -> None: + """ + Choose and apply the next-up icon in rotation. + + We keep track of the amount of times each icon has been used. The values in `cache_icons` can be understood + to be iteration IDs. When an icon is chosen & applied, we bump its count, pushing it into the next iteration. + + Once the current iteration (lowest count in the cache) depletes, we move onto the next iteration. + + In the case that there is only 1 icon in the rotation and has already been applied, do nothing. + """ + log.debug("Rotating icons") + + state = await self.cache_icons.to_dict() + log.trace(f"Total icons in rotation: {len(state)}") + + if len(state) == 1 and 1 in state.values(): + log.debug("Aborting icon rotation: only 1 icon is available and has already been applied") + return + + current_iteration = min(state.values()) # Choose iteration to draw from + options = [download_url for download_url, times_used in state.items() if times_used == current_iteration] + + log.trace(f"Choosing from {len(options)} icons in iteration {current_iteration}") + next_icon = random.choice(options) + + await self.apply_asset(AssetType.ICON, next_icon) + await self.cache_icons.increment(next_icon) # Push the icon into the next iteration + + timestamp = datetime.utcnow().timestamp() + await self.cache_information.set("last_rotation_timestamp", timestamp) + + async def maybe_rotate_icons(self) -> None: + """ + Call `rotate_icons` if the configured amount of time has passed since last rotation. + + We offset the calculated time difference into the future in order to avoid off-by-a-little-bit errors. + Because there is work to be done before the timestamp is read and written, the next read will likely + commence slightly under 24 hours after the last write. + """ + log.debug("Checking if icons should rotate") + + last_rotation_timestamp = await self.cache_information.get("last_rotation_timestamp") + + if last_rotation_timestamp is None: # Maiden case ~ never rotated + await self.rotate_icons() + + last_rotation = datetime.fromtimestamp(last_rotation_timestamp) + difference = (datetime.utcnow() - last_rotation) + timedelta(minutes=5) + + log.trace(f"Icons last rotated at {last_rotation} (difference: {difference})") + + if difference.days >= BrandingConfig.cycle_frequency: + await self.rotate_icons() + + async def initiate_icon_rotation(self, available_icons: t.List[RemoteObject]) -> None: + """ + Set up a new icon rotation. + + This function should be called whenever the set of `available_icons` changes. This is generally the case + when we enter a new event, but potentially also when the assets of an on-going event change. In such cases, + a reset of `cache_icons` is necessary, because it contains download URLs which may have gotten stale. + """ + log.debug("Initiating new icon rotation") + + await self.cache_icons.clear() + + new_state = {icon.download_url: 0 for icon in available_icons} + await self.cache_icons.update(new_state) + + log.trace(f"Icon rotation initiated for {len(new_state)} icons") + + await self.rotate_icons() + await self.cache_information.set("icons_hash", compound_hash(available_icons)) + + async def send_info_embed(self, channel_id: int) -> None: + """ + Send the currently cached event description to `channel_id`. + + This function is called when entering a new event with the destination being #changelog. However, it can + also be invoked on-demand by users. + + To support either case, we read information about the current event from `cache_information`. The caller + is therefore responsible for making sure that the cache is up-to-date before calling this function. + """ + log.debug(f"Sending event information event to channel id: {channel_id}") + + await self.bot.wait_until_guild_available() + channel: t.Optional[discord.TextChannel] = self.bot.get_channel(channel_id) + + if channel is None: + log.warning(f"Cannot send event information: channel {channel_id} not found!") + return + + log.debug(f"Destination channel: #{channel.name}") + + embed = discord.Embed( + description=await self.cache_information.get("event_description"), + colour=discord.Colour.blurple(), + ) + embed.set_footer(text=await self.cache_information.get("event_duration")) + + await channel.send(embed=embed) + + async def enter_event(self, event: Event) -> None: + """ + Enter `event` and update information cache. + + From the outside, entering a new event is as simple as applying its branding to the guild and dispatching + a notification to #changelog. + + However, internally we cache information to ensure that we: + * Remember which event we're currently in across restarts + * Provide an on-demand information embed without re-querying the branding repository + + An event change should always be handled via this function, as it ensures that the cache is populated. + """ + log.debug(f"Entering new event: {event.path}") + + await self.apply_banner(event.banner) # Only one asset ~ apply directly + await self.initiate_icon_rotation(event.icons) # Extra layer of abstraction to handle multiple assets + + # Cache event identity to avoid re-entry in case of restart + await self.cache_information.set("event_path", event.path) + + # The following values are only stored for the purpose of presenting them to the users + if event.meta.is_fallback: + event_duration = "Fallback" + else: + fmt = "%B %d" # Ex: August 23 + start_date = event.meta.start_date.strftime(fmt) + end_date = event.meta.end_date.strftime(fmt) + event_duration = f"{start_date} - {end_date}" + + await self.cache_information.set("event_duration", event_duration) + await self.cache_information.set("event_description", event.meta.description) + + # Notify guild of new event ~ this reads the information that we cached above! + await self.send_info_embed(Channels.change_log) + # endregion -- cgit v1.2.3 From 8c05cf3a2202fd06ba15636f998c222694e6085e Mon Sep 17 00:00:00 2001 From: kwzrd Date: Fri, 12 Mar 2021 16:52:49 +0100 Subject: Branding: implement daemon --- bot/exts/backend/branding/_cog.py | 112 +++++++++++++++++++++++++++++++++++++- 1 file changed, 109 insertions(+), 3 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index ddd91b5f8..4387e68b4 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -2,13 +2,13 @@ import asyncio import logging import random import typing as t -from datetime import datetime, timedelta +from datetime import datetime, time, timedelta from enum import Enum import async_timeout import discord from async_rediscache import RedisCache -from discord.ext import commands +from discord.ext import commands, tasks from bot.bot import Bot from bot.constants import Branding as BrandingConfig, Channels, Guild @@ -38,6 +38,7 @@ class Branding(commands.Cog): """Guild branding management.""" # RedisCache[ + # "daemon_active": If True, daemon auto-starts; controlled via commands (bool) # "event_path": Path from root in the branding repo (str) # "event_description": Markdown description (str) # "event_duration": Human-readable date range or 'Fallback' (str) @@ -52,10 +53,12 @@ class Branding(commands.Cog): cache_icons = RedisCache() def __init__(self, bot: Bot) -> None: - """Instantiate repository abstraction.""" + """Instantiate repository abstraction & allow daemon to start.""" self.bot = bot self.repository = BrandingRepository(bot) + self.bot.loop.create_task(self.maybe_start_daemon()) # Start depending on cache + # region: Internal utility @mock_in_debug(return_value=None) @@ -238,3 +241,106 @@ class Branding(commands.Cog): await self.send_info_embed(Channels.change_log) # endregion + # region: Daemon + + async def maybe_start_daemon(self) -> None: + """ + Start the daemon depending on cache state. + + The daemon will only start if it's been previously explicitly enabled via a command. + """ + log.debug("Checking whether daemon is enabled") + + should_begin: t.Optional[bool] = await self.cache_information.get("daemon_active") # None if never set! + + if should_begin: + self.daemon_main.start() + + async def cog_unload(self) -> None: + """ + Cancel the daemon in case of cog unload. + + This is **not** done automatically! The daemon otherwise remains active in the background. + """ + log.debug("Cog unload: cancelling daemon") + + self.daemon_main.cancel() + + @tasks.loop(hours=24) + async def daemon_main(self) -> None: + """ + Periodically synchronise guild & caches with branding repository. + + This function executes every 24 hours at midnight. We pull the currently active event from the branding + repository and check whether it matches the currently active event. If not, we apply the new event. + + However, it is also possible that an event's assets change as it's active. To account for such cases, + we check the banner & icons hashes against the currently cached values. If there is a mismatch, the + specific asset is re-applied. + + As such, the guild should always remain synchronised with the branding repository. However, the #changelog + notification is only sent in the case of entering a new event ~ no change in an on-going event will trigger + a new notification to be sent. + """ + log.debug("Daemon awakens: checking current event") + + new_event = await self.repository.get_current_event() + + if new_event is None: + log.warning("Failed to get current event from the branding repository, daemon will do nothing!") + return + + if new_event.path != await self.cache_information.get("event_path"): + log.debug("New event detected!") + await self.enter_event(new_event) + return + + log.debug("Event has not changed, checking for change in assets") + + if new_event.banner.sha != await self.cache_information.get("banner_hash"): + log.debug("Detected same-event banner change!") + await self.apply_banner(new_event.banner) + + if compound_hash(new_event.icons) != await self.cache_information.get("icons_hash"): + log.debug("Detected same-event icon change!") + await self.initiate_icon_rotation(new_event.icons) + else: + await self.maybe_rotate_icons() + + @daemon_main.before_loop + async def daemon_before(self) -> None: + """ + Wait until the next-up UTC midnight before letting `daemon_main` begin. + + This function allows the daemon to keep a consistent schedule across restarts. + + We check for a special case in which the cog's cache is empty. This indicates that we have never entered + an event (on first start-up), or that there was a cache loss. In either case, the current event gets + applied immediately, to avoid leaving the cog in an empty state. + """ + log.debug("Calculating time for daemon to sleep before first awakening") + + current_event = await self.cache_information.get("event_path") + + if current_event is None: # Maiden case ~ first start or cache loss + log.debug("Applying event immediately as cache is empty (indicating maiden case)") + + event = await self.repository.get_current_event() + + if event is None: + log.warning("Failed to fetch event ~ cache will remain empty!") + else: + await self.enter_event(event) + + now = datetime.utcnow() + + # The actual midnight moment is offset into the future in order to prevent issues with imprecise sleep + tomorrow = now + timedelta(days=1) + midnight = datetime.combine(tomorrow, time(minute=1)) + + sleep_secs = (midnight - now).total_seconds() + + log.debug(f"Sleeping {sleep_secs} seconds before next-up midnight at {midnight}") + await asyncio.sleep(sleep_secs) + + # endregion -- cgit v1.2.3 From 64469430d0a1a4ed6ca41c696622e5d6e46d52a4 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 13 Mar 2021 12:11:29 +0100 Subject: Branding: gate sync via helper function Sync make also be invoked with a command; avoid logic duplication. --- bot/exts/backend/branding/_cog.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 4387e68b4..dce2b7bc0 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -240,6 +240,23 @@ class Branding(commands.Cog): # Notify guild of new event ~ this reads the information that we cached above! await self.send_info_embed(Channels.change_log) + async def synchronise(self) -> None: + """ + Fetch the current event and delegate to `enter_event`. + + This is a convenience wrapper to force synchronisation either via a command, or when the daemon starts + with an empty cache. It is generally only used in a recovery scenario. In the usual case, the daemon + already has an `Event` instance and can pass it to `enter_event` directly. + """ + log.debug("Synchronise: fetching current event") + + event = await self.repository.get_current_event() + + if event is None: + log.error("Failed to fetch event ~ cannot synchronise!") + else: + await self.enter_event(event) + # endregion # region: Daemon @@ -323,14 +340,8 @@ class Branding(commands.Cog): current_event = await self.cache_information.get("event_path") if current_event is None: # Maiden case ~ first start or cache loss - log.debug("Applying event immediately as cache is empty (indicating maiden case)") - - event = await self.repository.get_current_event() - - if event is None: - log.warning("Failed to fetch event ~ cache will remain empty!") - else: - await self.enter_event(event) + log.debug("Event cache is empty (indicating maiden case), invoking synchronisation") + await self.synchronise() now = datetime.utcnow() -- cgit v1.2.3 From 9b3e5d4ec761c15c1fc6a261bbe8796c6ab0a50d Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 13 Mar 2021 13:38:56 +0100 Subject: Branding: implement command interface --- bot/exts/backend/branding/_cog.py | 86 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index dce2b7bc0..0fd694bca 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -11,7 +11,7 @@ from async_rediscache import RedisCache from discord.ext import commands, tasks from bot.bot import Bot -from bot.constants import Branding as BrandingConfig, Channels, Guild +from bot.constants import Branding as BrandingConfig, Channels, Colours, Guild from bot.decorators import mock_in_debug from bot.exts.backend.branding._repository import BrandingRepository, Event, RemoteObject @@ -34,6 +34,18 @@ def compound_hash(objects: t.Iterable[RemoteObject]) -> str: return "-".join(item.sha for item in objects) +def make_embed(title: str, description: str, *, success: bool) -> discord.Embed: + """ + Construct simple response embed. + + If `success` is True, use green colour, otherwise red. + + For both `title` and `description`, empty string are valid values ~ fields will be empty. + """ + colour = Colours.soft_green if success else Colours.soft_red + return discord.Embed(title=title, description=description, colour=colour) + + class Branding(commands.Cog): """Guild branding management.""" @@ -355,3 +367,75 @@ class Branding(commands.Cog): await asyncio.sleep(sleep_secs) # endregion + # region: Command interface (branding) + + @commands.group(name="branding") + async def branding_group(self, ctx: commands.Context) -> None: + """Control the branding cog.""" + if not ctx.invoked_subcommand: + await ctx.send_help(ctx.command) + + @branding_group.command(name="about") + async def branding_about_cmd(self, ctx: commands.Context) -> None: + """Show the current event description.""" + await self.send_info_embed(ctx.channel.id) + + @branding_group.command(name="sync") + async def branding_sync_cmd(self, ctx: commands.Context) -> None: + """Force branding synchronisation.""" + async with ctx.typing(): + await self.synchronise() + + resp = make_embed( + "Synchronisation complete", + "If something doesn't look right, check log for errors.", + success=True, + ) + await ctx.send(embed=resp) + + # endregion + # region: Command interface (branding daemon) + + @branding_group.group(name="daemon", aliases=("d",)) + async def branding_daemon_group(self, ctx: commands.Context) -> None: + """Control the branding cog's daemon.""" + if not ctx.invoked_subcommand: + await ctx.send_help(ctx.command) + + @branding_daemon_group.command(name="enable", aliases=("start", "on")) + async def branding_daemon_enable_cmd(self, ctx: commands.Context) -> None: + """Enable the branding daemon.""" + await self.cache_information.set("daemon_active", True) + + if self.daemon_main.is_running(): + resp = make_embed("Daemon is already enabled!", "", success=False) + else: + self.daemon_main.start() + resp = make_embed("Daemon enabled!", "It will now automatically awaken on start-up.", success=True) + + await ctx.send(embed=resp) + + @branding_daemon_group.command(name="disable", aliases=("stop", "off")) + async def branding_daemon_disable_cmd(self, ctx: commands.Context) -> None: + """Disable the branding daemon.""" + await self.cache_information.set("daemon_active", False) + + if self.daemon_main.is_running(): + self.daemon_main.cancel() + resp = make_embed("Daemon disabled!", "It will not awaken on start-up.", success=True) + else: + resp = make_embed("Daemon is already disabled!", "", success=False) + + await ctx.send(embed=resp) + + @branding_daemon_group.command(name="status") + async def branding_daemon_status_cmd(self, ctx: commands.Context) -> None: + """Check whether the daemon is currently enabled.""" + if self.daemon_main.is_running(): + resp = make_embed("Daemon is enabled", "Use `branding daemon disable` to stop.", success=True) + else: + resp = make_embed("Daemon is disabled", "Use `branding daemon enable` to start.", success=False) + + await ctx.send(embed=resp) + + # endregion -- cgit v1.2.3 From c7ecc27bb576f7fb717259702e8f7212eef92b29 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 13 Mar 2021 13:48:07 +0100 Subject: Branding: lock commands to mods+ where necessary --- bot/exts/backend/branding/_cog.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 0fd694bca..61ae46923 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -11,7 +11,7 @@ from async_rediscache import RedisCache from discord.ext import commands, tasks from bot.bot import Bot -from bot.constants import Branding as BrandingConfig, Channels, Colours, Guild +from bot.constants import Branding as BrandingConfig, Channels, Colours, Guild, MODERATION_ROLES from bot.decorators import mock_in_debug from bot.exts.backend.branding._repository import BrandingRepository, Event, RemoteObject @@ -380,6 +380,7 @@ class Branding(commands.Cog): """Show the current event description.""" await self.send_info_embed(ctx.channel.id) + @commands.has_any_role(*MODERATION_ROLES) @branding_group.command(name="sync") async def branding_sync_cmd(self, ctx: commands.Context) -> None: """Force branding synchronisation.""" @@ -396,6 +397,7 @@ class Branding(commands.Cog): # endregion # region: Command interface (branding daemon) + @commands.has_any_role(*MODERATION_ROLES) @branding_group.group(name="daemon", aliases=("d",)) async def branding_daemon_group(self, ctx: commands.Context) -> None: """Control the branding cog's daemon.""" -- cgit v1.2.3 From 88111ccab42e6ff686cb1dc4ff2416f0b409c14a Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 13 Mar 2021 14:08:11 +0100 Subject: Branding: add missing early exit --- bot/exts/backend/branding/_cog.py | 1 + 1 file changed, 1 insertion(+) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 61ae46923..f5bad21ac 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -158,6 +158,7 @@ class Branding(commands.Cog): if last_rotation_timestamp is None: # Maiden case ~ never rotated await self.rotate_icons() + return last_rotation = datetime.fromtimestamp(last_rotation_timestamp) difference = (datetime.utcnow() - last_rotation) + timedelta(minutes=5) -- cgit v1.2.3 From 80e037772150148c8aee9f49fd34a834e778b6c2 Mon Sep 17 00:00:00 2001 From: Matteo Bertucci Date: Sat, 13 Mar 2021 18:08:54 +0100 Subject: Add leads to the constants --- bot/constants.py | 2 ++ config-default.yml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/bot/constants.py b/bot/constants.py index 394d59a73..3918dfdc7 100644 --- a/bot/constants.py +++ b/bot/constants.py @@ -486,6 +486,8 @@ class Roles(metaclass=YAMLGetter): voice_verified: int admins: int + domain_leads: int + project_leads: int core_developers: int devops: int helpers: int diff --git a/config-default.yml b/config-default.yml index 3dbc7bd6b..a961df85b 100644 --- a/config-default.yml +++ b/config-default.yml @@ -262,6 +262,8 @@ guild: # Staff admins: &ADMINS_ROLE 267628507062992896 + domain_leads: 807415650778742785 + project_leads: 807415650778742785 core_developers: 587606783669829632 devops: 409416496733880320 helpers: &HELPERS_ROLE 267630620367257601 -- cgit v1.2.3 From 6724505ea01cf1bb345a8272643da00a6f28f272 Mon Sep 17 00:00:00 2001 From: Matteo Bertucci Date: Sat, 13 Mar 2021 18:22:44 +0100 Subject: Allow the !poll command to leads --- bot/exts/utils/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/exts/utils/utils.py b/bot/exts/utils/utils.py index a5d6f69b9..c45f73b88 100644 --- a/bot/exts/utils/utils.py +++ b/bot/exts/utils/utils.py @@ -9,7 +9,7 @@ from discord.ext.commands import BadArgument, Cog, Context, clean_content, comma from discord.utils import snowflake_time from bot.bot import Bot -from bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES +from bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES, Roles from bot.converters import Snowflake from bot.decorators import in_whitelist from bot.pagination import LinePaginator @@ -175,7 +175,7 @@ class Utils(Cog): await ctx.send(embed=embed) @command(aliases=("poll",)) - @has_any_role(*MODERATION_ROLES) + @has_any_role(*MODERATION_ROLES, Roles.project_leads, Roles.domain_leads) async def vote(self, ctx: Context, title: clean_content(fix_channel_mentions=True), *options: str) -> None: """ Build a quick voting poll with matching reactions with the provided options. -- cgit v1.2.3 From 35e538ab9ab8ac0d479825aa8da9a42f885820a5 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 13 Mar 2021 16:18:03 +0100 Subject: Branding: extract duration string in helper function --- bot/exts/backend/branding/_cog.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index f5bad21ac..332d4ad58 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -46,6 +46,22 @@ def make_embed(title: str, description: str, *, success: bool) -> discord.Embed: return discord.Embed(title=title, description=description, colour=colour) +def extract_event_duration(event: Event) -> str: + """ + Extract a human-readable, year-agnostic duration string from `event`. + + In the case that `event` is a fallback event, resolves to 'Fallback'. + """ + if event.meta.is_fallback: + return "Fallback" + + fmt = "%B %d" # Ex: August 23 + start_date = event.meta.start_date.strftime(fmt) + end_date = event.meta.end_date.strftime(fmt) + + return f"{start_date} - {end_date}" + + class Branding(commands.Cog): """Guild branding management.""" @@ -239,15 +255,7 @@ class Branding(commands.Cog): await self.cache_information.set("event_path", event.path) # The following values are only stored for the purpose of presenting them to the users - if event.meta.is_fallback: - event_duration = "Fallback" - else: - fmt = "%B %d" # Ex: August 23 - start_date = event.meta.start_date.strftime(fmt) - end_date = event.meta.end_date.strftime(fmt) - event_duration = f"{start_date} - {end_date}" - - await self.cache_information.set("event_duration", event_duration) + await self.cache_information.set("event_duration", extract_event_duration(event)) await self.cache_information.set("event_description", event.meta.description) # Notify guild of new event ~ this reads the information that we cached above! -- cgit v1.2.3 From 81e48983c7408e6a8dd4c6131eb5633be7c53825 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 13 Mar 2021 17:58:25 +0100 Subject: Branding: cache all available events This is a prequel to adding a calendar command. To avoid re-querying the branding repo on command invocation, event information will be cached whenever we make requests. The command can then simply get an up-to-date event schedule from the cache, with the option of forcing an update via the 'populate_cache_events' function. Since we cannot easily serialize entire 'Event' instances, we simply store what's needed - the event name, and its duration. The author has verified that the cache maintains order; in this case chronological order based on event start date. --- bot/exts/backend/branding/_cog.py | 49 +++++++++++++++++++++++++++++--- bot/exts/backend/branding/_repository.py | 18 +++++++----- 2 files changed, 56 insertions(+), 11 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 332d4ad58..50ae11b11 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -62,6 +62,18 @@ def extract_event_duration(event: Event) -> str: return f"{start_date} - {end_date}" +def extract_event_name(event: Event) -> str: + """ + Extract title-cased event name from the path of `event`. + + An event with a path of 'events/black_history_month' will resolve to 'Black History Month'. + """ + name = event.path.split("/")[-1] # Inner-most directory name + words = name.split("_") # Words from snake case + + return " ".join(word.title() for word in words) + + class Branding(commands.Cog): """Guild branding management.""" @@ -80,6 +92,10 @@ class Branding(commands.Cog): # corresponding to the amount of times each icon has been used in the current rotation cache_icons = RedisCache() + # Cache holding all available event names & their durations; this is cached by the daemon and read by + # the calendar command with the intention of preventing API spam; doesn't contain the fallback event + cache_events = RedisCache() + def __init__(self, bot: Bot) -> None: """Instantiate repository abstraction & allow daemon to start.""" self.bot = bot @@ -271,12 +287,35 @@ class Branding(commands.Cog): """ log.debug("Synchronise: fetching current event") - event = await self.repository.get_current_event() + current_event, available_events = await self.repository.get_current_event() - if event is None: + await self.populate_cache_events(available_events) + + if current_event is None: log.error("Failed to fetch event ~ cannot synchronise!") else: - await self.enter_event(event) + await self.enter_event(current_event) + + async def populate_cache_events(self, events: t.List[Event]) -> None: + """ + Clear `cache_events` and re-populate with names and durations of `events`. + + For each event, we store its name and duration string. This is the information presented to users in the + calendar command. If a format change is needed, it has to be done here. + + The cache does not store the fallback event, as it is not shown in the calendar. + """ + log.debug(f"Populating events cache with {len(events)} events") + + await self.cache_events.clear() + + no_fallback = [event for event in events if not event.meta.is_fallback] + chronological_events = sorted(no_fallback, key=lambda event_: event_.meta.start_date) + + await self.cache_events.update({ + extract_event_name(event): extract_event_duration(event) + for event in chronological_events + }) # endregion # region: Daemon @@ -322,7 +361,9 @@ class Branding(commands.Cog): """ log.debug("Daemon awakens: checking current event") - new_event = await self.repository.get_current_event() + new_event, available_events = await self.repository.get_current_event() + + await self.populate_cache_events(available_events) if new_event is None: log.warning("Failed to get current event from the branding repository, daemon will do nothing!") diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index ef292619e..b1368c59e 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -189,11 +189,14 @@ class BrandingRepository: log.trace(f"Found {len(instances)} correctly configured events") return instances - async def get_current_event(self) -> t.Optional[Event]: + async def get_current_event(self) -> t.Tuple[t.Optional[Event], t.List[Event]]: """ Get the currently active event, or the fallback event. - Returns None in the case that no event is active, and no fallback event is found. + The second return value is a list of all available events. The caller may discard it, if not needed. + Returning all events alongside the current one prevents having to query the API twice in some cases. + + The current event may be None in the case that no event is active, and no fallback event is found. """ utc_now = datetime.utcnow() log.debug(f"Finding active event for: {utc_now}") @@ -201,17 +204,18 @@ class BrandingRepository: # As all events exist in the arbitrary year, we construct a separate object for the purposes of comparison lookup_now = date(year=ARBITRARY_YEAR, month=utc_now.month, day=utc_now.day) - events = await self.get_events() + available_events = await self.get_events() - for event in events: + for event in available_events: meta = event.meta if not meta.is_fallback and (meta.start_date <= lookup_now <= meta.end_date): - return event + return event, available_events log.debug("No active event found, looking for fallback") - for event in events: + for event in available_events: if event.meta.is_fallback: - return event + return event, available_events log.warning("No event is currently active and no fallback event was found!") + return None, available_events -- cgit v1.2.3 From c047af35cbd32104f94f8619b1e861393a2ad4ce Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 13 Mar 2021 18:09:13 +0100 Subject: Branding: add calendar command group --- bot/exts/backend/branding/_cog.py | 63 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 50ae11b11..0640ca243 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -444,6 +444,69 @@ class Branding(commands.Cog): ) await ctx.send(embed=resp) + # endregion + # region: Command interface (branding calendar) + + @branding_group.group(name="calendar", aliases=("schedule",)) + async def branding_calendar_group(self, ctx: commands.Context) -> None: + """ + Show the current event calendar. + + We draw event information from `cache_events` and use each key-value pair to create a field in the response + embed. As such, we do not need to query the API to get event information. The cache is automatically + re-populated by the daemon whenever it makes a request. A moderator+ can also explicitly request a cache + refresh using the 'refresh' subcommand. + + Due to Discord limitations, we only show up to 25 events. This is entirely sufficient at the time of writing. + In the case that we find ourselves with more than 25 events, a warning log will alert core devs. + + In the future, we may be interested in a field-paginating solution. + """ + if ctx.invoked_subcommand: + # If you're wondering why this works: when the 'refresh' subcommand eventually re-invokes + # this group, the attribute will be automatically set to None by the framework + return + + available_events = await self.cache_events.to_dict() + log.debug(f"Found {len(available_events)} cached events available for calendar view") + + if not available_events: + resp = make_embed("No events found!", "Cache may be empty, try `branding calendar refresh`.", success=False) + await ctx.send(embed=resp) + return + + embed = discord.Embed(title="Current event calendar", colour=discord.Colour.blurple()) + + # Because a Discord embed can only contain up to 25 fields, we only show the first 25 + first_25 = list(available_events.items())[:25] + + if len(first_25) != len(available_events): # Alert core devs that a paginating solution is now necessary + log.warning(f"There are {len(available_events)} events, but the calendar view can only display 25!") + + for name, duration in first_25: + embed.add_field(name=name, value=duration) + + embed.set_footer(text="Otherwise, the fallback season is used.") + + await ctx.send(embed=embed) + + @commands.has_any_role(*MODERATION_ROLES) + @branding_calendar_group.command(name="refresh") + async def branding_calendar_refresh_cmd(self, ctx: commands.Context) -> None: + """ + Refresh event cache and show current event calendar. + + Supplementary subcommand allowing force-refreshing the event cache. Implemented as a subcommand because + unlike the supergroup, it requires moderator privileges. + """ + log.debug("Performing command-requested event cache refresh") + + async with ctx.typing(): + available_events = await self.repository.get_events() + await self.populate_cache_events(available_events) + + await ctx.invoke(self.branding_calendar_group) + # endregion # region: Command interface (branding daemon) -- cgit v1.2.3 From eff113e5c93474d295079b92a1c28650b51f8e7a Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 13 Mar 2021 21:56:37 +0100 Subject: Branding: replace ugly lambda with 'attrgetter' --- bot/exts/backend/branding/_cog.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 0640ca243..df0ef2a5f 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -4,6 +4,7 @@ import random import typing as t from datetime import datetime, time, timedelta from enum import Enum +from operator import attrgetter import async_timeout import discord @@ -310,7 +311,7 @@ class Branding(commands.Cog): await self.cache_events.clear() no_fallback = [event for event in events if not event.meta.is_fallback] - chronological_events = sorted(no_fallback, key=lambda event_: event_.meta.start_date) + chronological_events = sorted(no_fallback, key=attrgetter("meta.start_date")) await self.cache_events.update({ extract_event_name(event): extract_event_duration(event) -- cgit v1.2.3 From a48e79a5ee108b47914029513a4d5cd1fa4b72a6 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 00:07:14 +0100 Subject: Branding: do not call 'rotate_icons' from rotation init It makes more sense for the init and the rotation to be separate operations. In a subsequent commit, the separation of responsibility will allow the `rotate_icons` function to have a meaningful return value. --- bot/exts/backend/branding/_cog.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index df0ef2a5f..cd645fba4 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -208,6 +208,8 @@ class Branding(commands.Cog): This function should be called whenever the set of `available_icons` changes. This is generally the case when we enter a new event, but potentially also when the assets of an on-going event change. In such cases, a reset of `cache_icons` is necessary, because it contains download URLs which may have gotten stale. + + This function does not upload a new icon! """ log.debug("Initiating new icon rotation") @@ -218,7 +220,6 @@ class Branding(commands.Cog): log.trace(f"Icon rotation initiated for {len(new_state)} icons") - await self.rotate_icons() await self.cache_information.set("icons_hash", compound_hash(available_icons)) async def send_info_embed(self, channel_id: int) -> None: @@ -266,7 +267,9 @@ class Branding(commands.Cog): log.debug(f"Entering new event: {event.path}") await self.apply_banner(event.banner) # Only one asset ~ apply directly - await self.initiate_icon_rotation(event.icons) # Extra layer of abstraction to handle multiple assets + + await self.initiate_icon_rotation(event.icons) # Prepare a new rotation + await self.rotate_icons() # Apply an icon from the new rotation # Cache event identity to avoid re-entry in case of restart await self.cache_information.set("event_path", event.path) @@ -384,6 +387,7 @@ class Branding(commands.Cog): if compound_hash(new_event.icons) != await self.cache_information.get("icons_hash"): log.debug("Detected same-event icon change!") await self.initiate_icon_rotation(new_event.icons) + await self.rotate_icons() else: await self.maybe_rotate_icons() -- cgit v1.2.3 From d7bd0c348d6dd8be18174bb67ecf210362070b20 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 00:32:30 +0100 Subject: Branding: propagate success-indicating boolean from 'apply_asset' The sync command will now be able to use present this information to the invoking user. This commit also prevents the cached banner & icon hash from being overwritten in the case of asset upload failure. As a result, the daemon will attempt to re-apply the assets the following day. --- bot/exts/backend/branding/_cog.py | 62 +++++++++++++++++++++++++++------------ 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index cd645fba4..dd19832af 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -106,13 +106,15 @@ class Branding(commands.Cog): # region: Internal utility - @mock_in_debug(return_value=None) - async def apply_asset(self, asset_type: AssetType, download_url: str) -> None: + @mock_in_debug(return_value=True) + async def apply_asset(self, asset_type: AssetType, download_url: str) -> bool: """ Download asset from `download_url` and apply it to PyDis as `asset_type`. This function is mocked in the development environment in order to prevent API spam during testing. Decorator should be temporarily removed in order to test internal methodology. + + Returns a boolean indicating whether the application was successful. """ log.info(f"Applying {asset_type.value} asset to the guild") @@ -120,7 +122,7 @@ class Branding(commands.Cog): if file is None: log.error(f"Failed to download {asset_type.value} from branding repository!") - return + return False await self.bot.wait_until_guild_available() pydis: discord.Guild = self.bot.get_guild(Guild.id) @@ -131,21 +133,30 @@ class Branding(commands.Cog): await pydis.edit(**{asset_type.value: file}) except discord.HTTPException as http_exc: log.error(f"Asset upload to Discord failed: {http_exc}") + return False except asyncio.TimeoutError: log.error(f"Asset upload to Discord timed out after {timeout} seconds!") + return False else: log.debug("Asset uploaded successfully!") + return True - async def apply_banner(self, banner: RemoteObject) -> None: + async def apply_banner(self, banner: RemoteObject) -> bool: """ - Apply `banner` to the guild and cache its hash. + Apply `banner` to the guild and cache its hash if successful. Banners should always be applied via this method in order to ensure that the last hash is cached. + + Returns a boolean indicating whether the application was successful. """ - await self.apply_asset(AssetType.BANNER, banner.download_url) - await self.cache_information.set("banner_hash", banner.sha) + success = await self.apply_asset(AssetType.BANNER, banner.download_url) + + if success: + await self.cache_information.set("banner_hash", banner.sha) - async def rotate_icons(self) -> None: + return success + + async def rotate_icons(self) -> bool: """ Choose and apply the next-up icon in rotation. @@ -155,6 +166,8 @@ class Branding(commands.Cog): Once the current iteration (lowest count in the cache) depletes, we move onto the next iteration. In the case that there is only 1 icon in the rotation and has already been applied, do nothing. + + Returns a boolean indicating whether a new icon was applied successfully. """ log.debug("Rotating icons") @@ -163,7 +176,7 @@ class Branding(commands.Cog): if len(state) == 1 and 1 in state.values(): log.debug("Aborting icon rotation: only 1 icon is available and has already been applied") - return + return False current_iteration = min(state.values()) # Choose iteration to draw from options = [download_url for download_url, times_used in state.items() if times_used == current_iteration] @@ -171,11 +184,15 @@ class Branding(commands.Cog): log.trace(f"Choosing from {len(options)} icons in iteration {current_iteration}") next_icon = random.choice(options) - await self.apply_asset(AssetType.ICON, next_icon) - await self.cache_icons.increment(next_icon) # Push the icon into the next iteration + success = await self.apply_asset(AssetType.ICON, next_icon) - timestamp = datetime.utcnow().timestamp() - await self.cache_information.set("last_rotation_timestamp", timestamp) + if success: + await self.cache_icons.increment(next_icon) # Push the icon into the next iteration + + timestamp = datetime.utcnow().timestamp() + await self.cache_information.set("last_rotation_timestamp", timestamp) + + return success async def maybe_rotate_icons(self) -> None: """ @@ -251,7 +268,7 @@ class Branding(commands.Cog): await channel.send(embed=embed) - async def enter_event(self, event: Event) -> None: + async def enter_event(self, event: Event) -> t.Tuple[bool, bool]: """ Enter `event` and update information cache. @@ -263,13 +280,15 @@ class Branding(commands.Cog): * Provide an on-demand information embed without re-querying the branding repository An event change should always be handled via this function, as it ensures that the cache is populated. + + Returns a 2-tuple indicating whether the banner, and the icon, were applied successfully. """ log.debug(f"Entering new event: {event.path}") - await self.apply_banner(event.banner) # Only one asset ~ apply directly + banner_success = await self.apply_banner(event.banner) # Only one asset ~ apply directly await self.initiate_icon_rotation(event.icons) # Prepare a new rotation - await self.rotate_icons() # Apply an icon from the new rotation + icon_success = await self.rotate_icons() # Apply an icon from the new rotation # Cache event identity to avoid re-entry in case of restart await self.cache_information.set("event_path", event.path) @@ -281,13 +300,17 @@ class Branding(commands.Cog): # Notify guild of new event ~ this reads the information that we cached above! await self.send_info_embed(Channels.change_log) - async def synchronise(self) -> None: + return banner_success, icon_success + + async def synchronise(self) -> t.Tuple[bool, bool]: """ Fetch the current event and delegate to `enter_event`. This is a convenience wrapper to force synchronisation either via a command, or when the daemon starts with an empty cache. It is generally only used in a recovery scenario. In the usual case, the daemon already has an `Event` instance and can pass it to `enter_event` directly. + + Returns a 2-tuple indicating whether the banner, and the icon, were applied successfully. """ log.debug("Synchronise: fetching current event") @@ -297,8 +320,9 @@ class Branding(commands.Cog): if current_event is None: log.error("Failed to fetch event ~ cannot synchronise!") - else: - await self.enter_event(current_event) + return False, False + + return await self.enter_event(current_event) async def populate_cache_events(self, events: t.List[Event]) -> None: """ -- cgit v1.2.3 From 7900c630b81c0800c2bd7cb9e205cab8eeeac4ed Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 09:49:55 +0100 Subject: Branding: show success information in 'sync' response Now that the boolean flags are propagating from 'apply_asset', we can present them to the user. --- bot/exts/backend/branding/_cog.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index dd19832af..0664a5c6c 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -462,15 +462,26 @@ class Branding(commands.Cog): @commands.has_any_role(*MODERATION_ROLES) @branding_group.command(name="sync") async def branding_sync_cmd(self, ctx: commands.Context) -> None: - """Force branding synchronisation.""" + """ + Force branding synchronisation. + + Shows which assets have failed to synchronise, if any. + """ async with ctx.typing(): - await self.synchronise() + banner_success, icon_success = await self.synchronise() - resp = make_embed( - "Synchronisation complete", - "If something doesn't look right, check log for errors.", - success=True, + failed_assets = ", ".join( + name + for name, status in [("banner", banner_success), ("icon", icon_success)] + if status is False ) + + if failed_assets: + resp = make_embed("Synchronisation unsuccessful", f"Failed to apply: {failed_assets}.", success=False) + resp.set_footer(text="Check log for details.") + else: + resp = make_embed("Synchronisation successful", "Assets have been applied.", success=True) + await ctx.send(embed=resp) # endregion -- cgit v1.2.3 From 49720c72df05703168756d8fcadd017e11dd6ece Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 09:52:25 +0100 Subject: Branding: make 'cog_unload' synchronous Discord.py doesn't await the return value. --- bot/exts/backend/branding/_cog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 0664a5c6c..269aa6ad2 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -361,7 +361,7 @@ class Branding(commands.Cog): if should_begin: self.daemon_main.start() - async def cog_unload(self) -> None: + def cog_unload(self) -> None: """ Cancel the daemon in case of cog unload. -- cgit v1.2.3 From 71839ab8b15450201db1dfd8a321dd84b4dd140f Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 10:05:08 +0100 Subject: Branding: avoid sending #changelog notification on resync The notification is now sent conditionally depending on whether we're entering a new event. This prevents sending a repeating notification in the case of a manual resynchronisation. A practical example of when this may trigger is when a staff member temporarily applies custom assets & then uses the sync command to reapply the current event. --- bot/exts/backend/branding/_cog.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 269aa6ad2..df1c2d61c 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -270,26 +270,28 @@ class Branding(commands.Cog): async def enter_event(self, event: Event) -> t.Tuple[bool, bool]: """ - Enter `event` and update information cache. + Apply `event` assets and update information cache. - From the outside, entering a new event is as simple as applying its branding to the guild and dispatching - a notification to #changelog. - - However, internally we cache information to ensure that we: + We cache `event` information to ensure that we: * Remember which event we're currently in across restarts * Provide an on-demand information embed without re-querying the branding repository An event change should always be handled via this function, as it ensures that the cache is populated. + The #changelog notification is sent only if `event` differs from the currently cached event. + Returns a 2-tuple indicating whether the banner, and the icon, were applied successfully. """ - log.debug(f"Entering new event: {event.path}") + log.debug(f"Entering event: {event.path}") banner_success = await self.apply_banner(event.banner) # Only one asset ~ apply directly await self.initiate_icon_rotation(event.icons) # Prepare a new rotation icon_success = await self.rotate_icons() # Apply an icon from the new rotation + # This will only be False in the case of a manual same-event re-synchronisation + event_changed = event.path != await self.cache_information.get("event_path") + # Cache event identity to avoid re-entry in case of restart await self.cache_information.set("event_path", event.path) @@ -298,7 +300,10 @@ class Branding(commands.Cog): await self.cache_information.set("event_description", event.meta.description) # Notify guild of new event ~ this reads the information that we cached above! - await self.send_info_embed(Channels.change_log) + if event_changed: + await self.send_info_embed(Channels.change_log) + else: + log.trace("Omitted #changelog notification as event has not changed (indicating manual re-sync)") return banner_success, icon_success -- cgit v1.2.3 From 8f61115cd9773997cb79b7df799318c83f6b7651 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 10:08:18 +0100 Subject: Branding: log event path alongside error Knowing which event failed would probably be quite useful. --- bot/exts/backend/branding/_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index b1368c59e..2f96396c0 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -182,7 +182,7 @@ class BrandingRepository: try: instance = await self.construct_event(event_directory) except Exception as exc: - log.warning(f"Could not construct event: {exc}") + log.warning(f"Could not construct event '{event_directory.path}': {exc}") else: instances.append(instance) -- cgit v1.2.3 From 5baf7fea618cc486e5d380f95f645d75d5e2048a Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 10:10:16 +0100 Subject: Branding: log correct amount of cached events --- bot/exts/backend/branding/_cog.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index df1c2d61c..1c8ea1f05 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -338,13 +338,15 @@ class Branding(commands.Cog): The cache does not store the fallback event, as it is not shown in the calendar. """ - log.debug(f"Populating events cache with {len(events)} events") + log.debug("Populating events cache") await self.cache_events.clear() no_fallback = [event for event in events if not event.meta.is_fallback] chronological_events = sorted(no_fallback, key=attrgetter("meta.start_date")) + log.trace(f"Writing {len(chronological_events)} events (fallback omitted)") + await self.cache_events.update({ extract_event_name(event): extract_event_duration(event) for event in chronological_events -- cgit v1.2.3 From 6a5d14be71e3453f7040602e64bfe9bd77e3555d Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 15:21:29 +0100 Subject: Branding: provide class documentation --- bot/exts/backend/branding/_cog.py | 24 +++++++++++++++++++++++- bot/exts/backend/branding/_repository.py | 21 ++++++++++++++++++++- 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 1c8ea1f05..7519f029e 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -76,7 +76,29 @@ def extract_event_name(event: Event) -> str: class Branding(commands.Cog): - """Guild branding management.""" + """ + Guild branding management. + + This cog is responsible for automatic management of the guild's branding while sourcing assets directly from + the branding repository. + + We utilize multiple Redis caches to persist state. As a result, the cog should seamlessly transition across + restarts without having to query either the Discord or GitHub APIs, as it will always remember which + assets are currently applied. + + Additionally, the state of the icon rotation is persisted. As a result, the rotation doesn't reset unless + the current event or its icons change. + + The cog is designed to be autonomous. The daemon, unless disabled, will poll the branding repository at + midnight every day and respond to detected changes. Since we persist SHA hashes of tracked assets, + changes in an on-going event will trigger automatic resynchronisation. + + A #changelog notification is automatically sent when entering a new event. Changes in the branding of + an on-going event do not trigger a repeated notification. + + The command interface allows moderators+ to control the daemon or request an asset synchronisation, + while regular users can see information about the current event and the overall event schedule. + """ # RedisCache[ # "daemon_active": If True, daemon auto-starts; controlled via commands (bool) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 2f96396c0..a612b6752 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -69,7 +69,26 @@ class Event(t.NamedTuple): class BrandingRepository: - """Abstraction exposing the branding repository via convenient methods.""" + """ + Branding repository abstraction. + + This class represents the branding repository's main branch and exposes available events and assets as objects. + + The API is primarily formed by the `get_current_event` function. It performs the necessary amount of validation + to ensure that a misconfigured event isn't returned. Such events are simply ignored, and will be substituted + with the fallback event, if available. + + Warning logs will inform core developers if a misconfigured event is encountered. + + Colliding events cause no special behaviour - in such cases, the first found active event is returned. + We work with the assumption that the branding repository checks for such conflicts and prevents them + from reaching the main branch. + + This class keeps no internal state. All `get_current_event` calls will result in GitHub API requests. + The caller is therefore responsible for being responsible and caching information to prevent API abuse. + + Requests are made using the HTTP session looked up on the bot instance. + """ def __init__(self, bot: Bot) -> None: self.bot = bot -- cgit v1.2.3 From ba01289d12f047bdd0465daaa95f12d4cf1eddb9 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 16:11:04 +0100 Subject: Branding: add embed length cut-offs for safety This should never do anything, but it's better to be safe. Values taken from Discord developer docs. --- bot/exts/backend/branding/_cog.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 7519f029e..75d912530 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -44,7 +44,7 @@ def make_embed(title: str, description: str, *, success: bool) -> discord.Embed: For both `title` and `description`, empty string are valid values ~ fields will be empty. """ colour = Colours.soft_green if success else Colours.soft_red - return discord.Embed(title=title, description=description, colour=colour) + return discord.Embed(title=title[:256], description=description[:2048], colour=colour) def extract_event_duration(event: Event) -> str: @@ -282,11 +282,14 @@ class Branding(commands.Cog): log.debug(f"Destination channel: #{channel.name}") - embed = discord.Embed( - description=await self.cache_information.get("event_description"), - colour=discord.Colour.blurple(), - ) - embed.set_footer(text=await self.cache_information.get("event_duration")) + description = await self.cache_information.get("event_description") + duration = await self.cache_information.get("event_duration") + + if None in (description, duration): + embed = make_embed("No event in cache", "Is the daemon enabled?", success=False) + else: + embed = discord.Embed(description=description[:2048], colour=discord.Colour.blurple()) + embed.set_footer(text=duration[:2048]) await channel.send(embed=embed) @@ -553,7 +556,7 @@ class Branding(commands.Cog): log.warning(f"There are {len(available_events)} events, but the calendar view can only display 25!") for name, duration in first_25: - embed.add_field(name=name, value=duration) + embed.add_field(name=name[:256], value=duration[:1024]) embed.set_footer(text="Otherwise, the fallback season is used.") -- cgit v1.2.3 From 8aacf079cf44259b88562bda0b9e78d43ba3fd68 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 16:30:28 +0100 Subject: Branding: check for empty icon cache --- bot/exts/backend/branding/_cog.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 75d912530..43f0d742b 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -196,6 +196,10 @@ class Branding(commands.Cog): state = await self.cache_icons.to_dict() log.trace(f"Total icons in rotation: {len(state)}") + if not state: # This would only happen if rotation not initiated, but we can handle gracefully + log.warning("Attempted icon rotation with an empty icon cache!") + return False + if len(state) == 1 and 1 in state.values(): log.debug("Aborting icon rotation: only 1 icon is available and has already been applied") return False -- cgit v1.2.3 From 081169a91bde21700195a51ca1f6fec7dcda76ba Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sun, 14 Mar 2021 21:54:26 +0100 Subject: Branding: suppress 'ValueError' on empty-dict update --- bot/exts/backend/branding/_cog.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 43f0d742b..025a609b5 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -1,4 +1,5 @@ import asyncio +import contextlib import logging import random import typing as t @@ -376,10 +377,11 @@ class Branding(commands.Cog): log.trace(f"Writing {len(chronological_events)} events (fallback omitted)") - await self.cache_events.update({ - extract_event_name(event): extract_event_duration(event) - for event in chronological_events - }) + with contextlib.suppress(ValueError): # Cache raises when updated with an empty dict + await self.cache_events.update({ + extract_event_name(event): extract_event_duration(event) + for event in chronological_events + }) # endregion # region: Daemon -- cgit v1.2.3 From 290a082207faa94dea0f468ef0cab793e1e2cae9 Mon Sep 17 00:00:00 2001 From: vcokltfre Date: Sun, 14 Mar 2021 21:13:36 +0000 Subject: feat: add new discord.py tags --- bot/resources/tags/customhelp.md | 3 +++ bot/resources/tags/intents.md | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 bot/resources/tags/customhelp.md create mode 100644 bot/resources/tags/intents.md diff --git a/bot/resources/tags/customhelp.md b/bot/resources/tags/customhelp.md new file mode 100644 index 000000000..b787fe673 --- /dev/null +++ b/bot/resources/tags/customhelp.md @@ -0,0 +1,3 @@ +**Custom help commands in discord.py** + +To learn more about how to create custom help commands in discord.py by subclassing the help command, please see [this tutorial](https://gist.github.com/InterStella0/b78488fb28cadf279dfd3164b9f0cf96#embed-minimalhelpcommand) by Stella#2000 \ No newline at end of file diff --git a/bot/resources/tags/intents.md b/bot/resources/tags/intents.md new file mode 100644 index 000000000..642e65764 --- /dev/null +++ b/bot/resources/tags/intents.md @@ -0,0 +1,19 @@ +**Using intents in discord.py** + +Intents are a feature of Discord that tells the gateway exactly which events to send your bot. By default discord.py has all intents enabled, except for the `Members` and `Presences` intents, which are needed for events such as `on_member` and to get members' statuses. + +To enable one of these intents you need to first to to the [Discord developer portal](https://discord.com/developers/applications), then to the bot page of your bot's application. Scroll down to the `Privileged Gateway Intents` section, and enable the intents that you need. + +Next, in your bot you need to set the intents you want to connect with in the bot's constructor using the `intents` keyword argument, like this: + +```py +from discord import Intents +from discord.ext import commands + +intents = Intents.default() +intents.members = True + +bot = commands.Bot(command_prefix="!", intents=intents) +``` + +For more info about using intents, see the [discord.py docs on intents.](https://discordpy.readthedocs.io/en/latest/intents.html) \ No newline at end of file -- cgit v1.2.3 From 39b4da6a242a96ac298119d60f89bf2af69a952f Mon Sep 17 00:00:00 2001 From: vcokltfre Date: Sun, 14 Mar 2021 21:16:42 +0000 Subject: fix: add newline file endings --- bot/resources/tags/customhelp.md | 2 +- bot/resources/tags/intents.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/resources/tags/customhelp.md b/bot/resources/tags/customhelp.md index b787fe673..6f0b17642 100644 --- a/bot/resources/tags/customhelp.md +++ b/bot/resources/tags/customhelp.md @@ -1,3 +1,3 @@ **Custom help commands in discord.py** -To learn more about how to create custom help commands in discord.py by subclassing the help command, please see [this tutorial](https://gist.github.com/InterStella0/b78488fb28cadf279dfd3164b9f0cf96#embed-minimalhelpcommand) by Stella#2000 \ No newline at end of file +To learn more about how to create custom help commands in discord.py by subclassing the help command, please see [this tutorial](https://gist.github.com/InterStella0/b78488fb28cadf279dfd3164b9f0cf96#embed-minimalhelpcommand) by Stella#2000 diff --git a/bot/resources/tags/intents.md b/bot/resources/tags/intents.md index 642e65764..9171b2314 100644 --- a/bot/resources/tags/intents.md +++ b/bot/resources/tags/intents.md @@ -16,4 +16,4 @@ intents.members = True bot = commands.Bot(command_prefix="!", intents=intents) ``` -For more info about using intents, see the [discord.py docs on intents.](https://discordpy.readthedocs.io/en/latest/intents.html) \ No newline at end of file +For more info about using intents, see the [discord.py docs on intents.](https://discordpy.readthedocs.io/en/latest/intents.html) -- cgit v1.2.3 From b8a74372c6f37c2eda28272195a96668d324844d Mon Sep 17 00:00:00 2001 From: vcokltfre Date: Sun, 14 Mar 2021 21:44:13 +0000 Subject: fix: minor spelling correction --- bot/resources/tags/intents.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/resources/tags/intents.md b/bot/resources/tags/intents.md index 9171b2314..0e94520a8 100644 --- a/bot/resources/tags/intents.md +++ b/bot/resources/tags/intents.md @@ -2,7 +2,7 @@ Intents are a feature of Discord that tells the gateway exactly which events to send your bot. By default discord.py has all intents enabled, except for the `Members` and `Presences` intents, which are needed for events such as `on_member` and to get members' statuses. -To enable one of these intents you need to first to to the [Discord developer portal](https://discord.com/developers/applications), then to the bot page of your bot's application. Scroll down to the `Privileged Gateway Intents` section, and enable the intents that you need. +To enable one of these intents you need to first go to the [Discord developer portal](https://discord.com/developers/applications), then to the bot page of your bot's application. Scroll down to the `Privileged Gateway Intents` section, and enable the intents that you need. Next, in your bot you need to set the intents you want to connect with in the bot's constructor using the `intents` keyword argument, like this: -- cgit v1.2.3 From 4fc4d1c0d0303ec7c207165bd812aeb1387e58ac Mon Sep 17 00:00:00 2001 From: vcokltfre Date: Sun, 14 Mar 2021 21:51:38 +0000 Subject: fix: more minor spelling/grammar corrections --- bot/resources/tags/intents.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/resources/tags/intents.md b/bot/resources/tags/intents.md index 0e94520a8..6a282bc17 100644 --- a/bot/resources/tags/intents.md +++ b/bot/resources/tags/intents.md @@ -1,8 +1,8 @@ **Using intents in discord.py** -Intents are a feature of Discord that tells the gateway exactly which events to send your bot. By default discord.py has all intents enabled, except for the `Members` and `Presences` intents, which are needed for events such as `on_member` and to get members' statuses. +Intents are a feature of Discord that tells the gateway exactly which events to send your bot. By default, discord.py has all intents enabled, except for the `Members` and `Presences` intents, which are needed for events such as `on_member` and to get members' statuses. -To enable one of these intents you need to first go to the [Discord developer portal](https://discord.com/developers/applications), then to the bot page of your bot's application. Scroll down to the `Privileged Gateway Intents` section, and enable the intents that you need. +To enable one of these intents, you need to first go to the [Discord developer portal](https://discord.com/developers/applications), then to the bot page of your bot's application. Scroll down to the `Privileged Gateway Intents` section, then enable the intents that you need. Next, in your bot you need to set the intents you want to connect with in the bot's constructor using the `intents` keyword argument, like this: -- cgit v1.2.3 From e475181b5c51d6363c74e0e36f51f174bfa6ba2c Mon Sep 17 00:00:00 2001 From: kwzrd Date: Mon, 15 Mar 2021 18:40:35 +0100 Subject: Branding: decode 'meta.md' using UTF-8 --- bot/exts/backend/branding/_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index a612b6752..e14ff4226 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -134,7 +134,7 @@ class BrandingRepository: The caller is responsible for handling errors caused by misconfiguration. """ - attrs, description = frontmatter.parse(raw_file) # Library automatically decodes using UTF-8 + attrs, description = frontmatter.parse(raw_file, encoding="UTF-8") if not description: raise BrandingMisconfiguration("No description found in 'meta.md'!") -- cgit v1.2.3 From 10ce75f0d0af36e2ebf87c95d9d0b2095b541d2d Mon Sep 17 00:00:00 2001 From: kwzrd Date: Mon, 15 Mar 2021 22:30:44 +0100 Subject: Pipenv: bump 'python-frontmatter' pin & re-lock Fresh stable release, just in time! --- Pipfile | 2 +- Pipfile.lock | 52 ++++++++++++++++++++++++++++++++++------------------ 2 files changed, 35 insertions(+), 19 deletions(-) diff --git a/Pipfile b/Pipfile index e222a2108..56ece7611 100644 --- a/Pipfile +++ b/Pipfile @@ -21,7 +21,7 @@ lxml = "~=4.4" markdownify = "==0.5.3" more_itertools = "~=8.2" python-dateutil = "~=2.8" -python-frontmatter = "~=0.5.0" +python-frontmatter = "~=1.0.0" pyyaml = "~=5.1" requests = "~=2.22" sentry-sdk = "~=0.19" diff --git a/Pipfile.lock b/Pipfile.lock index 01a78af9b..589af71b7 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "e8b1d8e8a3b258f482c25fe396aaa3255c749fdeae26770fccd7ce1a35f41180" + "sha256": "bf99bc953819e8c890d5482834bf243b3c3bc4f5f637fcff4a94b0a046e7593e" }, "pipfile-spec": 6, "requires": { @@ -621,11 +621,11 @@ }, "python-frontmatter": { "hashes": [ - "sha256:a7dcdfdaf498d488dce98bfa9452f8b70f803a923760ceab1ebd99291d98d28a", - "sha256:a9c2e90fc38e9f0c68d8b82299040f331ca3b8525ac7fa5f6beffef52b26c426" + "sha256:766ae75f1b301ffc5fe3494339147e0fd80bc3deff3d7590a93991978b579b08", + "sha256:e98152e977225ddafea6f01f40b4b0f1de175766322004c826ca99842d19a7cd" ], "index": "pypi", - "version": "==0.5.0" + "version": "==1.0.0" }, "python-json-logger": { "hashes": [ @@ -655,15 +655,23 @@ "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018", "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e", "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253", + "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347", "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183", + "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541", "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb", "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185", + "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc", "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db", + "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa", "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46", + "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122", "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b", "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63", "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df", - "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc" + "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc", + "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247", + "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6", + "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0" ], "index": "pypi", "version": "==5.4.1" @@ -796,11 +804,11 @@ }, "urllib3": { "hashes": [ - "sha256:1b465e494e3e0d8939b50680403e3aedaa2bc434b7d5af64dfd3c958d7f5ae80", - "sha256:de3eedaad74a2683334e282005cd8d7f22f4d55fa690a2a1020a416cb0a47e73" + "sha256:2f4da4594db7e1e110a944bb1b551fdf4e6c136ad42e4234131391e21eb5b0df", + "sha256:e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", - "version": "==1.26.3" + "version": "==1.26.4" }, "yarl": { "hashes": [ @@ -1043,11 +1051,11 @@ }, "identify": { "hashes": [ - "sha256:2179e7359471ab55729f201b3fdf7dc2778e221f868410fedcb0987b791ba552", - "sha256:2a5fdf2f5319cc357eda2550bea713a404392495961022cf2462624ce62f0f46" + "sha256:46d1816c6a4fc2d1e8758f293a5dcc1ae6404ab344179d7c1e73637bf283beb1", + "sha256:ed4a05fb80e3cbd12e83c959f9ff7f729ba6b66ab8d6178850fd5cb4c1cf6c5d" ], "markers": "python_full_version >= '3.6.1'", - "version": "==2.1.0" + "version": "==2.1.3" }, "idna": { "hashes": [ @@ -1081,11 +1089,11 @@ }, "pre-commit": { "hashes": [ - "sha256:16212d1fde2bed88159287da88ff03796863854b04dc9f838a55979325a3d20e", - "sha256:399baf78f13f4de82a29b649afd74bef2c4e28eb4f021661fc7f29246e8c7a3a" + "sha256:94c82f1bf5899d56edb1d926732f4e75a7df29a0c8c092559c77420c9d62428b", + "sha256:de55c5c72ce80d79106e48beb1b54104d16495ce7f95b0c7b13d4784193a00af" ], "index": "pypi", - "version": "==2.10.1" + "version": "==2.11.1" }, "pycodestyle": { "hashes": [ @@ -1125,15 +1133,23 @@ "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018", "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e", "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253", + "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347", "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183", + "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541", "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb", "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185", + "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc", "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db", + "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa", "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46", + "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122", "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b", "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63", "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df", - "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc" + "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc", + "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247", + "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6", + "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0" ], "index": "pypi", "version": "==5.4.1" @@ -1171,11 +1187,11 @@ }, "urllib3": { "hashes": [ - "sha256:1b465e494e3e0d8939b50680403e3aedaa2bc434b7d5af64dfd3c958d7f5ae80", - "sha256:de3eedaad74a2683334e282005cd8d7f22f4d55fa690a2a1020a416cb0a47e73" + "sha256:2f4da4594db7e1e110a944bb1b551fdf4e6c136ad42e4234131391e21eb5b0df", + "sha256:e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", - "version": "==1.26.3" + "version": "==1.26.4" }, "virtualenv": { "hashes": [ -- cgit v1.2.3 From 6b9c95458f5f89496741f187d187c2b93561c839 Mon Sep 17 00:00:00 2001 From: Chris Date: Tue, 16 Mar 2021 17:21:28 +0000 Subject: Split out help channel idle time constants This allows us to configure the idle time allowed for claiments seperate from tohers. --- bot/constants.py | 3 ++- config-default.yml | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/bot/constants.py b/bot/constants.py index 394d59a73..b4d702e1d 100644 --- a/bot/constants.py +++ b/bot/constants.py @@ -592,7 +592,8 @@ class HelpChannels(metaclass=YAMLGetter): enable: bool claim_minutes: int cmd_whitelist: List[int] - idle_minutes: int + idle_minutes_claimant: int + idle_minutes_others: int deleted_idle_minutes: int max_available: int max_total_channels: int diff --git a/config-default.yml b/config-default.yml index 49d7f84ac..9388ecb99 100644 --- a/config-default.yml +++ b/config-default.yml @@ -468,8 +468,12 @@ help_channels: cmd_whitelist: - *HELPERS_ROLE - # Allowed duration of inactivity before making a channel dormant - idle_minutes: 30 + # Allowed duration of inactivity by claimant before making a channel dormant + idle_minutes_claimant: 30 + + # Allowed duration of inactivity by others before making a channel dormant + # `idle_minutes_claimant` must also be met, before a channel is closed + idle_minutes_others: 30 # Allowed duration of inactivity when channel is empty (due to deleted messages) # before message making a channel dormant -- cgit v1.2.3 From ce8616988d2e499f8dc79b22e8287c5a0ed50c19 Mon Sep 17 00:00:00 2001 From: Chris Date: Tue, 16 Mar 2021 20:06:25 +0000 Subject: Set a reasonable default for `idle_minutes_others`. --- config-default.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config-default.yml b/config-default.yml index 9388ecb99..38144c90c 100644 --- a/config-default.yml +++ b/config-default.yml @@ -473,7 +473,7 @@ help_channels: # Allowed duration of inactivity by others before making a channel dormant # `idle_minutes_claimant` must also be met, before a channel is closed - idle_minutes_others: 30 + idle_minutes_others: 10 # Allowed duration of inactivity when channel is empty (due to deleted messages) # before message making a channel dormant -- cgit v1.2.3 From 243d4657bfeafe31bc3ba9666b35a88eeef74a92 Mon Sep 17 00:00:00 2001 From: Chris Date: Tue, 16 Mar 2021 20:07:34 +0000 Subject: Update help availible footer As we have complicated this logic, we now don't specify exactly how long until the channel will close. --- bot/exts/help_channels/_message.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_message.py b/bot/exts/help_channels/_message.py index 36388f9bd..011b4cdbe 100644 --- a/bot/exts/help_channels/_message.py +++ b/bot/exts/help_channels/_message.py @@ -28,7 +28,7 @@ For more tips, check out our guide on **[asking good questions]({ASKING_GUIDE_UR AVAILABLE_TITLE = "Available help channel" -AVAILABLE_FOOTER = f"Closes after {constants.HelpChannels.idle_minutes} minutes of inactivity or when you send !close." +AVAILABLE_FOOTER = "Closes after being idle for some time, or when you send !close." DORMANT_MSG = f""" This help channel has been marked as **dormant**, and has been moved into the **Help: Dormant** \ -- cgit v1.2.3 From b3c66a6fb07ebc92c0b53d946cf10df6c1107303 Mon Sep 17 00:00:00 2001 From: Chris Date: Tue, 16 Mar 2021 20:08:53 +0000 Subject: Extend close time logic to differentiate between the claimant and other users. --- bot/exts/help_channels/_caches.py | 8 ++++++ bot/exts/help_channels/_channel.py | 54 ++++++++++++++++++++++++++------------ bot/exts/help_channels/_cog.py | 42 ++++++++++++++++++++--------- 3 files changed, 75 insertions(+), 29 deletions(-) diff --git a/bot/exts/help_channels/_caches.py b/bot/exts/help_channels/_caches.py index 4cea385b7..c790a37b1 100644 --- a/bot/exts/help_channels/_caches.py +++ b/bot/exts/help_channels/_caches.py @@ -8,6 +8,14 @@ claim_times = RedisCache(namespace="HelpChannels.claim_times") # RedisCache[discord.TextChannel.id, t.Union[discord.User.id, discord.Member.id]] claimants = RedisCache(namespace="HelpChannels.help_channel_claimants") +# Stores the timestamp of the last message in a help channel +# RedisCache[discord.TextChannel.id, UtcPosixTimestamp] +last_message = RedisCache(namespace="HelpChannels.last_message") + +# Stores the timestamp of the last message from the claimant of a help channel +# RedisCache[discord.TextChannel.id, UtcPosixTimestamp] +claimant_last_message = RedisCache(namespace="HelpChannels.claimant_last_message") + # This cache maps a help channel to original question message in same channel. # RedisCache[discord.TextChannel.id, discord.Message.id] question_messages = RedisCache(namespace="HelpChannels.question_messages") diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 224214b00..95b8cdc1f 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -25,23 +25,43 @@ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[disco yield channel -async def get_idle_time(channel: discord.TextChannel) -> t.Optional[int]: - """ - Return the time elapsed, in seconds, since the last message sent in the `channel`. - - Return None if the channel has no messages. - """ - log.trace(f"Getting the idle time for #{channel} ({channel.id}).") - - msg = await _message.get_last_message(channel) - if not msg: - log.debug(f"No idle time available; #{channel} ({channel.id}) has no messages.") - return None - - idle_time = (datetime.utcnow() - msg.created_at).seconds - - log.trace(f"#{channel} ({channel.id}) has been idle for {idle_time} seconds.") - return idle_time +async def get_closing_time(channel: discord.TextChannel) -> t.Optional[int]: + """Return the timestamp at which the given help `channel` should be closed.""" + log.trace(f"Getting the closing time for #{channel} ({channel.id}).") + + if await _message.is_empty(channel): + idle_minutes = constants.HelpChannels.deleted_idle_minutes + else: + idle_minutes = constants.HelpChannels.idle_minutes_others + + last_message = await _caches.last_message.get(channel.id) + claimant_last_message = await _caches.claimant_last_message.get(channel.id) + + if not (last_message or claimant_last_message): + # Using the old method if we can't get cached info. + msg = await _message.get_last_message(channel) + if not msg: + log.debug(f"No idle time available; #{channel} ({channel.id}) has no messages.") + return datetime.min + + # We want to get the time at which a channel should be closed. + closing_time = msg.created_at + closing_time += timedelta(minutes=idle_minutes) + + return closing_time + + # We want to get the time at which a channel should be closed. + last_message = datetime.fromtimestamp(last_message) + claimant = datetime.fromtimestamp(claimant_last_message) + + last_message += timedelta(minutes=idle_minutes) + claimant += timedelta(minutes=constants.HelpChannels.idle_minutes_claimant) + + # The further away closing time is what we should use. + closing_time = max(claimant, last_message) + log.trace(f"claimant: {claimant}, last_message: {last_message}") + log.trace(f"#{channel} ({channel.id}) should be closed at {closing_time}.") + return closing_time async def get_in_use_time(channel_id: int) -> t.Optional[timedelta]: diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 1c730dce9..db14ce0ef 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -43,7 +43,9 @@ class HelpChannels(commands.Cog): In Use Category * Contains all channels which are occupied by someone needing help - * Channel moves to dormant category after `constants.HelpChannels.idle_minutes` of being idle + * Channel moves to dormant category after `constants.HelpChannels.idle_minutes_other` minutes + since the last user message, or `constants.HelpChannels.idle_minutes_claimant` minutes + since the last claimant message. * Command can prematurely mark a channel as dormant * Channel claimant is allowed to use the command * Allowed roles for the command are configurable with `constants.HelpChannels.cmd_whitelist` @@ -293,16 +295,12 @@ class HelpChannels(commands.Cog): """ log.trace(f"Handling in-use channel #{channel} ({channel.id}).") - if not await _message.is_empty(channel): - idle_seconds = constants.HelpChannels.idle_minutes * 60 - else: - idle_seconds = constants.HelpChannels.deleted_idle_minutes * 60 - - time_elapsed = await _channel.get_idle_time(channel) + closing_time = await _channel.get_closing_time(channel) + # The time at which the channel should be closed, based on messages sent. + if closing_time < datetime.utcnow(): - if time_elapsed is None or time_elapsed >= idle_seconds: log.info( - f"#{channel} ({channel.id}) is idle longer than {idle_seconds} seconds " + f"#{channel} ({channel.id}) is idle past {closing_time} " f"and will be made dormant." ) @@ -312,7 +310,7 @@ class HelpChannels(commands.Cog): if has_task: self.scheduler.cancel(channel.id) - delay = idle_seconds - time_elapsed + delay = (closing_time - datetime.utcnow()).seconds log.info( f"#{channel} ({channel.id}) is still active; " f"scheduling it to be moved after {delay} seconds." @@ -410,7 +408,7 @@ class HelpChannels(commands.Cog): category_id=constants.Categories.help_in_use, ) - timeout = constants.HelpChannels.idle_minutes * 60 + timeout = constants.HelpChannels.idle_minutes_others * 60 log.trace(f"Scheduling #{channel} ({channel.id}) to become dormant in {timeout} sec.") self.scheduler.schedule_later(timeout, channel.id, self.move_idle_channel(channel)) @@ -418,7 +416,12 @@ class HelpChannels(commands.Cog): @commands.Cog.listener() async def on_message(self, message: discord.Message) -> None: - """Move an available channel to the In Use category and replace it with a dormant one.""" + """ + Move an available channel to the In Use category and replace it with a dormant one. + + Also updates the `message_times` cache based on the current timestamp. If the message + author is the claimant of this channel, also update the claimant_last_message. + """ if message.author.bot: return # Ignore messages sent by bots. @@ -427,6 +430,21 @@ class HelpChannels(commands.Cog): if channel_utils.is_in_category(message.channel, constants.Categories.help_available): if not _channel.is_excluded_channel(message.channel): await self.claim_channel(message) + # Initialise the cache for this channel + await _caches.claimant_last_message.set( + message.channel.id, + message.created_at.timestamp() + ) + await _caches.last_message.set( + message.channel.id, + message.created_at.timestamp() + ) + elif channel_utils.is_in_category(message.channel, constants.Categories.help_in_use): + # Overwrite the claimant message time, if its from the claimant. + if message.author == await _caches.claimants.get(message.channel.id): + await _caches.claimant_last_message(message.channel.id, message.created_at.timestamp()) + + await _caches.last_message.set(message.channel.id, message.created_at.timestamp()) else: await _message.check_for_answer(message) -- cgit v1.2.3 From 9503a2edc7b9609da6f96a65c9742259f106bbb1 Mon Sep 17 00:00:00 2001 From: Chris Date: Tue, 16 Mar 2021 20:33:35 +0000 Subject: Improve trace message. --- bot/exts/help_channels/_channel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 95b8cdc1f..8db6e7617 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -59,7 +59,7 @@ async def get_closing_time(channel: discord.TextChannel) -> t.Optional[int]: # The further away closing time is what we should use. closing_time = max(claimant, last_message) - log.trace(f"claimant: {claimant}, last_message: {last_message}") + log.trace(f"claimant closing time: {claimant}, last_message closing time: {last_message}") log.trace(f"#{channel} ({channel.id}) should be closed at {closing_time}.") return closing_time -- cgit v1.2.3 From 61e17c893d2bfa8969396b6212c2b3a0190bd636 Mon Sep 17 00:00:00 2001 From: Chris Date: Wed, 17 Mar 2021 18:33:07 +0000 Subject: Improve availible channel embed footer --- bot/exts/help_channels/_message.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_message.py b/bot/exts/help_channels/_message.py index 011b4cdbe..ec2daab45 100644 --- a/bot/exts/help_channels/_message.py +++ b/bot/exts/help_channels/_message.py @@ -28,7 +28,7 @@ For more tips, check out our guide on **[asking good questions]({ASKING_GUIDE_UR AVAILABLE_TITLE = "Available help channel" -AVAILABLE_FOOTER = "Closes after being idle for some time, or when you send !close." +AVAILABLE_FOOTER = "Closes after a period of inactivity, or when you send !close." DORMANT_MSG = f""" This help channel has been marked as **dormant**, and has been moved into the **Help: Dormant** \ -- cgit v1.2.3 From 12a73f2b29ac2b2f43b488109fc66216a739f056 Mon Sep 17 00:00:00 2001 From: Chris Date: Wed, 17 Mar 2021 18:45:51 +0000 Subject: Improve docs strings and variable names within the help channel system --- bot/exts/help_channels/_caches.py | 4 ++-- bot/exts/help_channels/_channel.py | 18 +++++++++--------- bot/exts/help_channels/_cog.py | 18 +++++++++--------- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/bot/exts/help_channels/_caches.py b/bot/exts/help_channels/_caches.py index c790a37b1..d4676df87 100644 --- a/bot/exts/help_channels/_caches.py +++ b/bot/exts/help_channels/_caches.py @@ -10,11 +10,11 @@ claimants = RedisCache(namespace="HelpChannels.help_channel_claimants") # Stores the timestamp of the last message in a help channel # RedisCache[discord.TextChannel.id, UtcPosixTimestamp] -last_message = RedisCache(namespace="HelpChannels.last_message") +last_message_times = RedisCache(namespace="HelpChannels.last_message_times") # Stores the timestamp of the last message from the claimant of a help channel # RedisCache[discord.TextChannel.id, UtcPosixTimestamp] -claimant_last_message = RedisCache(namespace="HelpChannels.claimant_last_message") +claimant_last_message_times = RedisCache(namespace="HelpChannels.claimant_last_message_times") # This cache maps a help channel to original question message in same channel. # RedisCache[discord.TextChannel.id, discord.Message.id] diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 8db6e7617..6c17a26e0 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -34,10 +34,10 @@ async def get_closing_time(channel: discord.TextChannel) -> t.Optional[int]: else: idle_minutes = constants.HelpChannels.idle_minutes_others - last_message = await _caches.last_message.get(channel.id) - claimant_last_message = await _caches.claimant_last_message.get(channel.id) + last_message_time = await _caches.last_message_times.get(channel.id) + claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) - if not (last_message or claimant_last_message): + if not (last_message_time or claimant_last_message_time): # Using the old method if we can't get cached info. msg = await _message.get_last_message(channel) if not msg: @@ -51,15 +51,15 @@ async def get_closing_time(channel: discord.TextChannel) -> t.Optional[int]: return closing_time # We want to get the time at which a channel should be closed. - last_message = datetime.fromtimestamp(last_message) - claimant = datetime.fromtimestamp(claimant_last_message) + last_message_time = datetime.fromtimestamp(last_message_time) + claimant_last_message_time = datetime.fromtimestamp(claimant_last_message_time) - last_message += timedelta(minutes=idle_minutes) - claimant += timedelta(minutes=constants.HelpChannels.idle_minutes_claimant) + last_message_time += timedelta(minutes=idle_minutes) + claimant_last_message_time += timedelta(minutes=constants.HelpChannels.idle_minutes_claimant) # The further away closing time is what we should use. - closing_time = max(claimant, last_message) - log.trace(f"claimant closing time: {claimant}, last_message closing time: {last_message}") + closing_time = max(claimant_last_message_time, last_message_time) + log.trace(f"claimant closing time: {claimant_last_message_time}, last_message closing time: {last_message_time}") log.trace(f"#{channel} ({channel.id}) should be closed at {closing_time}.") return closing_time diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index db14ce0ef..bac9aa9dd 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -43,9 +43,9 @@ class HelpChannels(commands.Cog): In Use Category * Contains all channels which are occupied by someone needing help - * Channel moves to dormant category after `constants.HelpChannels.idle_minutes_other` minutes - since the last user message, or `constants.HelpChannels.idle_minutes_claimant` minutes - since the last claimant message. + * Channel moves to dormant category after + - `constants.HelpChannels.idle_minutes_other` minutes since the last user message, or + - `constants.HelpChannels.idle_minutes_claimant` minutes since the last claimant message. * Command can prematurely mark a channel as dormant * Channel claimant is allowed to use the command * Allowed roles for the command are configurable with `constants.HelpChannels.cmd_whitelist` @@ -419,8 +419,8 @@ class HelpChannels(commands.Cog): """ Move an available channel to the In Use category and replace it with a dormant one. - Also updates the `message_times` cache based on the current timestamp. If the message - author is the claimant of this channel, also update the claimant_last_message. + Also updates the `last_message_times` cache based on the current timestamp. If the message + author is the claimant of this channel, also update the `claimant_last_message_times` cache. """ if message.author.bot: return # Ignore messages sent by bots. @@ -431,20 +431,20 @@ class HelpChannels(commands.Cog): if not _channel.is_excluded_channel(message.channel): await self.claim_channel(message) # Initialise the cache for this channel - await _caches.claimant_last_message.set( + await _caches.claimant_last_message_times.set( message.channel.id, message.created_at.timestamp() ) - await _caches.last_message.set( + await _caches.last_message_times.set( message.channel.id, message.created_at.timestamp() ) elif channel_utils.is_in_category(message.channel, constants.Categories.help_in_use): # Overwrite the claimant message time, if its from the claimant. if message.author == await _caches.claimants.get(message.channel.id): - await _caches.claimant_last_message(message.channel.id, message.created_at.timestamp()) + await _caches.claimant_last_message_times(message.channel.id, message.created_at.timestamp()) - await _caches.last_message.set(message.channel.id, message.created_at.timestamp()) + await _caches.last_message_times.set(message.channel.id, message.created_at.timestamp()) else: await _message.check_for_answer(message) -- cgit v1.2.3 From 1d5625a2f47a1d4d050f9eb0eb7a18e7d6fe171b Mon Sep 17 00:00:00 2001 From: kwzrd Date: Wed, 17 Mar 2021 22:19:55 +0100 Subject: Branding: adjust daemon start-up behaviour The daemon will now perform a sync iteration immediately when started, and then every UTC midnight. Previously, it would only perform the initial iteration when started for the first time, which is odd. It is also believed that splitting the daemon's logic into three separate functions is beneficial: before, loop, and main. This commit makes log and doc adjustments where appropriate. --- bot/exts/backend/branding/_cog.py | 71 ++++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 025a609b5..cbd61a751 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -397,7 +397,7 @@ class Branding(commands.Cog): should_begin: t.Optional[bool] = await self.cache_information.get("daemon_active") # None if never set! if should_begin: - self.daemon_main.start() + self.daemon_loop.start() def cog_unload(self) -> None: """ @@ -407,71 +407,72 @@ class Branding(commands.Cog): """ log.debug("Cog unload: cancelling daemon") - self.daemon_main.cancel() + self.daemon_loop.cancel() - @tasks.loop(hours=24) async def daemon_main(self) -> None: """ - Periodically synchronise guild & caches with branding repository. + Synchronise guild & caches with branding repository. - This function executes every 24 hours at midnight. We pull the currently active event from the branding - repository and check whether it matches the currently active event. If not, we apply the new event. + Pull the currently active event from the branding repository and check whether it matches the currently + active event in the cache. If not, apply the new event. However, it is also possible that an event's assets change as it's active. To account for such cases, - we check the banner & icons hashes against the currently cached values. If there is a mismatch, the + we check the banner & icons hashes against the currently cached values. If there is a mismatch, each specific asset is re-applied. - - As such, the guild should always remain synchronised with the branding repository. However, the #changelog - notification is only sent in the case of entering a new event ~ no change in an on-going event will trigger - a new notification to be sent. """ - log.debug("Daemon awakens: checking current event") + log.trace("Daemon main: checking current event") new_event, available_events = await self.repository.get_current_event() await self.populate_cache_events(available_events) if new_event is None: - log.warning("Failed to get current event from the branding repository, daemon will do nothing!") + log.warning("Daemon main: failed to get current event from branding repository, will do nothing") return if new_event.path != await self.cache_information.get("event_path"): - log.debug("New event detected!") + log.debug("Daemon main: new event detected!") await self.enter_event(new_event) return - log.debug("Event has not changed, checking for change in assets") + log.trace("Daemon main: event has not changed, checking for change in assets") if new_event.banner.sha != await self.cache_information.get("banner_hash"): - log.debug("Detected same-event banner change!") + log.debug("Daemon main: detected banner change!") await self.apply_banner(new_event.banner) if compound_hash(new_event.icons) != await self.cache_information.get("icons_hash"): - log.debug("Detected same-event icon change!") + log.debug("Daemon main: detected icon change!") await self.initiate_icon_rotation(new_event.icons) await self.rotate_icons() else: await self.maybe_rotate_icons() - @daemon_main.before_loop - async def daemon_before(self) -> None: + @tasks.loop(hours=24) + async def daemon_loop(self) -> None: """ - Wait until the next-up UTC midnight before letting `daemon_main` begin. + Call `daemon_main` every 24 hours. - This function allows the daemon to keep a consistent schedule across restarts. + The scheduler maintains an exact 24-hour frequency even if this coroutine takes time to complete. If the + coroutine is started at 00:01 and completes at 00:05, it will still be started at 00:01 the next day. + """ + log.trace("Daemon loop: calling daemon main") + + await self.daemon_main() - We check for a special case in which the cog's cache is empty. This indicates that we have never entered - an event (on first start-up), or that there was a cache loss. In either case, the current event gets - applied immediately, to avoid leaving the cog in an empty state. + @daemon_loop.before_loop + async def daemon_before(self) -> None: """ - log.debug("Calculating time for daemon to sleep before first awakening") + Call `daemon_main` immediately, then block `daemon_loop` until the next-up UTC midnight. - current_event = await self.cache_information.get("event_path") + The first iteration will be invoked manually such that synchronisation happens immediately after daemon start. + We then calculate the time until the next-up midnight and sleep before letting `daemon_loop` begin. + """ + log.info("Daemon before: synchronising guild") - if current_event is None: # Maiden case ~ first start or cache loss - log.debug("Event cache is empty (indicating maiden case), invoking synchronisation") - await self.synchronise() + await self.daemon_main() + log.trace("Daemon before: calculating time to sleep before loop begins") now = datetime.utcnow() # The actual midnight moment is offset into the future in order to prevent issues with imprecise sleep @@ -479,8 +480,8 @@ class Branding(commands.Cog): midnight = datetime.combine(tomorrow, time(minute=1)) sleep_secs = (midnight - now).total_seconds() + log.trace(f"Daemon before: sleeping {sleep_secs} seconds before next-up midnight: {midnight}") - log.debug(f"Sleeping {sleep_secs} seconds before next-up midnight at {midnight}") await asyncio.sleep(sleep_secs) # endregion @@ -600,10 +601,10 @@ class Branding(commands.Cog): """Enable the branding daemon.""" await self.cache_information.set("daemon_active", True) - if self.daemon_main.is_running(): + if self.daemon_loop.is_running(): resp = make_embed("Daemon is already enabled!", "", success=False) else: - self.daemon_main.start() + self.daemon_loop.start() resp = make_embed("Daemon enabled!", "It will now automatically awaken on start-up.", success=True) await ctx.send(embed=resp) @@ -613,8 +614,8 @@ class Branding(commands.Cog): """Disable the branding daemon.""" await self.cache_information.set("daemon_active", False) - if self.daemon_main.is_running(): - self.daemon_main.cancel() + if self.daemon_loop.is_running(): + self.daemon_loop.cancel() resp = make_embed("Daemon disabled!", "It will not awaken on start-up.", success=True) else: resp = make_embed("Daemon is already disabled!", "", success=False) @@ -624,7 +625,7 @@ class Branding(commands.Cog): @branding_daemon_group.command(name="status") async def branding_daemon_status_cmd(self, ctx: commands.Context) -> None: """Check whether the daemon is currently enabled.""" - if self.daemon_main.is_running(): + if self.daemon_loop.is_running(): resp = make_embed("Daemon is enabled", "Use `branding daemon disable` to stop.", success=True) else: resp = make_embed("Daemon is disabled", "Use `branding daemon enable` to start.", success=False) -- cgit v1.2.3 From 9f65bfd0985331b5974011feca30b02f01548de5 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Wed, 17 Mar 2021 22:36:20 +0100 Subject: Branding: extend command aliases --- bot/exts/backend/branding/_cog.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index cbd61a751..57b762d1f 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -493,7 +493,7 @@ class Branding(commands.Cog): if not ctx.invoked_subcommand: await ctx.send_help(ctx.command) - @branding_group.command(name="about") + @branding_group.command(name="about", aliases=("current", "event")) async def branding_about_cmd(self, ctx: commands.Context) -> None: """Show the current event description.""" await self.send_info_embed(ctx.channel.id) @@ -526,7 +526,7 @@ class Branding(commands.Cog): # endregion # region: Command interface (branding calendar) - @branding_group.group(name="calendar", aliases=("schedule",)) + @branding_group.group(name="calendar", aliases=("schedule", "events")) async def branding_calendar_group(self, ctx: commands.Context) -> None: """ Show the current event calendar. -- cgit v1.2.3 From 1b3c23a7d89d64a899a67fbfd0e69e2fa3acd911 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Thu, 18 Mar 2021 01:11:59 +0100 Subject: Branding: update 'synchronise' docs After previous changes, the docstring was no longer accurate. See: 1d5625a2f47a1d4d050f9eb0eb7a18e7d6fe171b --- bot/exts/backend/branding/_cog.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 57b762d1f..0723458c2 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -341,9 +341,9 @@ class Branding(commands.Cog): """ Fetch the current event and delegate to `enter_event`. - This is a convenience wrapper to force synchronisation either via a command, or when the daemon starts - with an empty cache. It is generally only used in a recovery scenario. In the usual case, the daemon - already has an `Event` instance and can pass it to `enter_event` directly. + This is a convenience function to force synchronisation via a command. It should generally only be used + in a recovery scenario. In the usual case, the daemon already has an `Event` instance and can pass it + to `enter_event` directly. Returns a 2-tuple indicating whether the banner, and the icon, were applied successfully. """ -- cgit v1.2.3 From fce7fb99e810f92187f5f10d1e681b8367336c6d Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 18 Mar 2021 20:08:54 +0000 Subject: Repurpose unanswered cache for storing non-claimant last message times The unanswered cache was previously just a boolen of whether a non-claimant every replied to a help channel. With us now needing to know the time at which a non-claimant messaged in a given channel, we infer the answered status from this cache instead. --- bot/exts/help_channels/_caches.py | 16 ++++++---------- bot/exts/help_channels/_channel.py | 33 +++++++++++++++++++-------------- bot/exts/help_channels/_cog.py | 21 +++------------------ bot/exts/help_channels/_message.py | 31 +++++++++++++++++-------------- bot/exts/help_channels/_stats.py | 6 +++--- 5 files changed, 48 insertions(+), 59 deletions(-) diff --git a/bot/exts/help_channels/_caches.py b/bot/exts/help_channels/_caches.py index d4676df87..834c5f4c2 100644 --- a/bot/exts/help_channels/_caches.py +++ b/bot/exts/help_channels/_caches.py @@ -8,20 +8,16 @@ claim_times = RedisCache(namespace="HelpChannels.claim_times") # RedisCache[discord.TextChannel.id, t.Union[discord.User.id, discord.Member.id]] claimants = RedisCache(namespace="HelpChannels.help_channel_claimants") -# Stores the timestamp of the last message in a help channel -# RedisCache[discord.TextChannel.id, UtcPosixTimestamp] -last_message_times = RedisCache(namespace="HelpChannels.last_message_times") - # Stores the timestamp of the last message from the claimant of a help channel # RedisCache[discord.TextChannel.id, UtcPosixTimestamp] claimant_last_message_times = RedisCache(namespace="HelpChannels.claimant_last_message_times") +# This cache maps a help channel to the timestamp of the last, non-claimant, +# message. This cache being empty for a given help channel indicates the +# question is unanswered. +# RedisCache[discord.TextChannel.id, UtcPosixTimestamp] +non_claimant_last_message_times = RedisCache(namespace="HelpChannels.non_claimant_last_message_times") + # This cache maps a help channel to original question message in same channel. # RedisCache[discord.TextChannel.id, discord.Message.id] question_messages = RedisCache(namespace="HelpChannels.question_messages") - -# This cache maps a help channel to whether it has had any -# activity other than the original claimant. True being no other -# activity and False being other activity. -# RedisCache[discord.TextChannel.id, bool] -unanswered = RedisCache(namespace="HelpChannels.unanswered") diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 6c17a26e0..3e3749041 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -25,8 +25,13 @@ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[disco yield channel -async def get_closing_time(channel: discord.TextChannel) -> t.Optional[int]: - """Return the timestamp at which the given help `channel` should be closed.""" +async def get_closing_time(channel: discord.TextChannel) -> datetime: + """ + Return the timestamp at which the given help `channel` should be closed. + + If either cache is empty, use the last message in the channel to determine closign time. + If the last message connt be retreived, return datetime.min, I.E close right now. + """ log.trace(f"Getting the closing time for #{channel} ({channel.id}).") if await _message.is_empty(channel): @@ -34,32 +39,32 @@ async def get_closing_time(channel: discord.TextChannel) -> t.Optional[int]: else: idle_minutes = constants.HelpChannels.idle_minutes_others - last_message_time = await _caches.last_message_times.get(channel.id) + non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) - if not (last_message_time or claimant_last_message_time): + if not (non_claimant_last_message_time or claimant_last_message_time): # Using the old method if we can't get cached info. msg = await _message.get_last_message(channel) if not msg: log.debug(f"No idle time available; #{channel} ({channel.id}) has no messages.") return datetime.min - # We want to get the time at which a channel should be closed. - closing_time = msg.created_at - closing_time += timedelta(minutes=idle_minutes) + # The time at which a channel should be closed. + return msg.created_at + timedelta(minutes=idle_minutes) - return closing_time - - # We want to get the time at which a channel should be closed. - last_message_time = datetime.fromtimestamp(last_message_time) + # Get the later time at which a channel should be closed + non_claimant_last_message_time = datetime.fromtimestamp(non_claimant_last_message_time) claimant_last_message_time = datetime.fromtimestamp(claimant_last_message_time) - last_message_time += timedelta(minutes=idle_minutes) + non_claimant_last_message_time += timedelta(minutes=idle_minutes) claimant_last_message_time += timedelta(minutes=constants.HelpChannels.idle_minutes_claimant) # The further away closing time is what we should use. - closing_time = max(claimant_last_message_time, last_message_time) - log.trace(f"claimant closing time: {claimant_last_message_time}, last_message closing time: {last_message_time}") + closing_time = max(claimant_last_message_time, non_claimant_last_message_time) + log.trace( + f"Claimant closing time: {claimant_last_message_time}, " + f"last_message closing time: {non_claimant_last_message_time}" + ) log.trace(f"#{channel} ({channel.id}) should be closed at {closing_time}.") return closing_time diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index bac9aa9dd..8c97ef2d0 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -117,8 +117,7 @@ class HelpChannels(commands.Cog): # Must use a timezone-aware datetime to ensure a correct POSIX timestamp. timestamp = datetime.now(timezone.utc).timestamp() await _caches.claim_times.set(message.channel.id, timestamp) - - await _caches.unanswered.set(message.channel.id, True) + await _caches.claimant_last_message_times.set(message.channel.id, timestamp) # Not awaited because it may indefinitely hold the lock while waiting for a channel. scheduling.create_task(self.move_to_available(), name=f"help_claim_{message.id}") @@ -378,6 +377,7 @@ class HelpChannels(commands.Cog): async def _unclaim_channel(self, channel: discord.TextChannel, claimant_id: int, is_auto: bool) -> None: """Actual implementation of `unclaim_channel`. See that for full documentation.""" await _caches.claimants.delete(channel.id) + await _caches.non_claimant_last_message_times.delete(channel.id) # Ignore missing tasks because a channel may still be dormant after the cooldown expires. if claimant_id in self.scheduler: @@ -419,7 +419,7 @@ class HelpChannels(commands.Cog): """ Move an available channel to the In Use category and replace it with a dormant one. - Also updates the `last_message_times` cache based on the current timestamp. If the message + Update the `last_message_times` cache based on the current timestamp. If the message author is the claimant of this channel, also update the `claimant_last_message_times` cache. """ if message.author.bot: @@ -430,21 +430,6 @@ class HelpChannels(commands.Cog): if channel_utils.is_in_category(message.channel, constants.Categories.help_available): if not _channel.is_excluded_channel(message.channel): await self.claim_channel(message) - # Initialise the cache for this channel - await _caches.claimant_last_message_times.set( - message.channel.id, - message.created_at.timestamp() - ) - await _caches.last_message_times.set( - message.channel.id, - message.created_at.timestamp() - ) - elif channel_utils.is_in_category(message.channel, constants.Categories.help_in_use): - # Overwrite the claimant message time, if its from the claimant. - if message.author == await _caches.claimants.get(message.channel.id): - await _caches.claimant_last_message_times(message.channel.id, message.created_at.timestamp()) - - await _caches.last_message_times.set(message.channel.id, message.created_at.timestamp()) else: await _message.check_for_answer(message) diff --git a/bot/exts/help_channels/_message.py b/bot/exts/help_channels/_message.py index ec2daab45..9ba019550 100644 --- a/bot/exts/help_channels/_message.py +++ b/bot/exts/help_channels/_message.py @@ -1,7 +1,7 @@ import logging import textwrap import typing as t -from datetime import datetime +from datetime import datetime, timezone import discord @@ -48,19 +48,22 @@ async def check_for_answer(message: discord.Message) -> None: # Confirm the channel is an in use help channel if is_in_category(channel, constants.Categories.help_in_use): - log.trace(f"Checking if #{channel} ({channel.id}) has been answered.") - - # Check if there is an entry in unanswered - if await _caches.unanswered.contains(channel.id): - claimant_id = await _caches.claimants.get(channel.id) - if not claimant_id: - # The mapping for this channel doesn't exist, we can't do anything. - return - - # Check the message did not come from the claimant - if claimant_id != message.author.id: - # Mark the channel as answered - await _caches.unanswered.set(channel.id, False) + log.trace(f"Checking if #{channel} ({channel.id}) has had a reply.") + # Must use a timezone-aware datetime to ensure a correct POSIX timestamp. + timestamp = datetime.now(timezone.utc).timestamp() + + # Overwrite the claimant message time, if its from the claimant. + if message.author == await _caches.claimants.get(channel.id): + await _caches.claimant_last_message_times.set(channel.id, timestamp) + return + + claimant_id = await _caches.claimants.get(channel.id) + if not claimant_id: + # The mapping for this channel doesn't exist, we can't do anything. + return + + # Cache the timestamp of the non-claimants message + await _caches.non_claimant_last_message_times.set(channel.id, timestamp) async def get_last_message(channel: discord.TextChannel) -> t.Optional[discord.Message]: diff --git a/bot/exts/help_channels/_stats.py b/bot/exts/help_channels/_stats.py index b8778e7d9..e212c495d 100644 --- a/bot/exts/help_channels/_stats.py +++ b/bot/exts/help_channels/_stats.py @@ -35,8 +35,8 @@ async def report_complete_session(channel_id: int, is_auto: bool) -> None: if in_use_time: bot.instance.stats.timing("help.in_use_time", in_use_time) - unanswered = await _caches.unanswered.get(channel_id) - if unanswered: + non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel_id) + if non_claimant_last_message_time is None: bot.instance.stats.incr("help.sessions.unanswered") - elif unanswered is not None: + else: bot.instance.stats.incr("help.sessions.answered") -- cgit v1.2.3 From 50030597819e1be2787ecc52db3d41c8155ede40 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 18 Mar 2021 20:12:25 +0000 Subject: Update function name to describe new behaviour. Also updates the doc string to reflect this new behaviour. --- bot/exts/help_channels/_cog.py | 2 +- bot/exts/help_channels/_message.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 8c97ef2d0..3f453edd3 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -431,7 +431,7 @@ class HelpChannels(commands.Cog): if not _channel.is_excluded_channel(message.channel): await self.claim_channel(message) else: - await _message.check_for_answer(message) + await _message.update_message_caches(message) @commands.Cog.listener() async def on_message_delete(self, msg: discord.Message) -> None: diff --git a/bot/exts/help_channels/_message.py b/bot/exts/help_channels/_message.py index 9ba019550..c20af2946 100644 --- a/bot/exts/help_channels/_message.py +++ b/bot/exts/help_channels/_message.py @@ -42,8 +42,8 @@ through our guide for **[asking a good question]({ASKING_GUIDE_URL})**. """ -async def check_for_answer(message: discord.Message) -> None: - """Checks for whether new content in a help channel comes from non-claimants.""" +async def update_message_caches(message: discord.Message) -> None: + """Checks the source of new content in a help channel and updates the appropriate cache.""" channel = message.channel # Confirm the channel is an in use help channel -- cgit v1.2.3 From 9a3c946d5fd390c422cc83b57fdf29afb0e635b7 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 18 Mar 2021 20:13:59 +0000 Subject: Delete cached claimant message time. --- bot/exts/help_channels/_cog.py | 1 + 1 file changed, 1 insertion(+) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 3f453edd3..a372fa868 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -377,6 +377,7 @@ class HelpChannels(commands.Cog): async def _unclaim_channel(self, channel: discord.TextChannel, claimant_id: int, is_auto: bool) -> None: """Actual implementation of `unclaim_channel`. See that for full documentation.""" await _caches.claimants.delete(channel.id) + await _caches.claimant_last_message_times.delete(channel.id) await _caches.non_claimant_last_message_times.delete(channel.id) # Ignore missing tasks because a channel may still be dormant after the cooldown expires. -- cgit v1.2.3 From 6a954de33cbfa36be45dfb61b05141c0ced23256 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 18 Mar 2021 20:26:12 +0000 Subject: Move return behaviour comments to in-line rather than docstring. --- bot/exts/help_channels/_channel.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 3e3749041..13726d234 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -26,12 +26,7 @@ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[disco async def get_closing_time(channel: discord.TextChannel) -> datetime: - """ - Return the timestamp at which the given help `channel` should be closed. - - If either cache is empty, use the last message in the channel to determine closign time. - If the last message connt be retreived, return datetime.min, I.E close right now. - """ + """Return the timestamp at which the given help `channel` should be closed.""" log.trace(f"Getting the closing time for #{channel} ({channel.id}).") if await _message.is_empty(channel): @@ -43,10 +38,11 @@ async def get_closing_time(channel: discord.TextChannel) -> datetime: claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) if not (non_claimant_last_message_time or claimant_last_message_time): - # Using the old method if we can't get cached info. + # One of the caches is empty, use the last message in the channel to determine closing time instead. msg = await _message.get_last_message(channel) if not msg: - log.debug(f"No idle time available; #{channel} ({channel.id}) has no messages.") + # last message can't be retreived, return datetime.min so channel closes right now. + log.debug(f"No idle time available; #{channel} ({channel.id}) has no messages, closing now.") return datetime.min # The time at which a channel should be closed. -- cgit v1.2.3 From e962aa5d10e990ccaba6640aeca1c8b84bbb7ce4 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 18 Mar 2021 21:17:54 +0000 Subject: If the channel is empty, determine closing time based on last message. --- bot/exts/help_channels/_channel.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 13726d234..454e41157 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -26,10 +26,10 @@ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[disco async def get_closing_time(channel: discord.TextChannel) -> datetime: - """Return the timestamp at which the given help `channel` should be closed.""" + """Return the timestamp at which the given help channel, `channel`, should be closed.""" log.trace(f"Getting the closing time for #{channel} ({channel.id}).") - if await _message.is_empty(channel): + if is_empty := await _message.is_empty(channel): idle_minutes = constants.HelpChannels.deleted_idle_minutes else: idle_minutes = constants.HelpChannels.idle_minutes_others @@ -37,8 +37,9 @@ async def get_closing_time(channel: discord.TextChannel) -> datetime: non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) - if not (non_claimant_last_message_time or claimant_last_message_time): - # One of the caches is empty, use the last message in the channel to determine closing time instead. + if is_empty or not (non_claimant_last_message_time or claimant_last_message_time): + # Current help session has no messages, or one of the caches is empty. + # Use the last message in the channel to determine closing time instead. msg = await _message.get_last_message(channel) if not msg: # last message can't be retreived, return datetime.min so channel closes right now. -- cgit v1.2.3 From 3fe9a39a3d262f32bc5dc8d18de5075aad8b0e5c Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 18 Mar 2021 21:19:34 +0000 Subject: Determine closing time from last message if either cache is empty, rather than if both are empty --- bot/exts/help_channels/_channel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 454e41157..c65b87986 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -26,7 +26,7 @@ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[disco async def get_closing_time(channel: discord.TextChannel) -> datetime: - """Return the timestamp at which the given help channel, `channel`, should be closed.""" + """Return the timestamp at which the given help `channel` should be closed.""" log.trace(f"Getting the closing time for #{channel} ({channel.id}).") if is_empty := await _message.is_empty(channel): @@ -37,7 +37,7 @@ async def get_closing_time(channel: discord.TextChannel) -> datetime: non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) - if is_empty or not (non_claimant_last_message_time or claimant_last_message_time): + if is_empty or not (non_claimant_last_message_time and claimant_last_message_time): # Current help session has no messages, or one of the caches is empty. # Use the last message in the channel to determine closing time instead. msg = await _message.get_last_message(channel) -- cgit v1.2.3 From 72eb71d073e19d34d5a31c6e7c7af26a3be5f746 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 18 Mar 2021 21:20:36 +0000 Subject: More descriptive comment for when an if block is entered --- bot/exts/help_channels/_channel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index c65b87986..ad683b218 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -38,7 +38,7 @@ async def get_closing_time(channel: discord.TextChannel) -> datetime: claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) if is_empty or not (non_claimant_last_message_time and claimant_last_message_time): - # Current help session has no messages, or one of the caches is empty. + # Current help session has no messages, or at least one of the caches is empty. # Use the last message in the channel to determine closing time instead. msg = await _message.get_last_message(channel) if not msg: -- cgit v1.2.3 From 457cb14deade3d023d01ee0342bf3c84911f6d0b Mon Sep 17 00:00:00 2001 From: laundmo Date: Fri, 19 Mar 2021 22:56:50 +0100 Subject: reset cache on bot start, stats for different close reasons --- bot/exts/help_channels/_channel.py | 28 +++++++++++++++------------- bot/exts/help_channels/_cog.py | 35 ++++++++++++++++++++++------------- bot/exts/help_channels/_stats.py | 4 ++-- 3 files changed, 39 insertions(+), 28 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index ad683b218..1e152eaa3 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -25,11 +25,13 @@ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[disco yield channel -async def get_closing_time(channel: discord.TextChannel) -> datetime: - """Return the timestamp at which the given help `channel` should be closed.""" +async def get_closing_time(channel: discord.TextChannel) -> t.Tuple[datetime, str]: + """Return the timestamp at which the given help `channel` should be closed along with the reason.""" log.trace(f"Getting the closing time for #{channel} ({channel.id}).") - if is_empty := await _message.is_empty(channel): + is_empty = await _message.is_empty(channel) + + if is_empty: idle_minutes = constants.HelpChannels.deleted_idle_minutes else: idle_minutes = constants.HelpChannels.idle_minutes_others @@ -40,14 +42,16 @@ async def get_closing_time(channel: discord.TextChannel) -> datetime: if is_empty or not (non_claimant_last_message_time and claimant_last_message_time): # Current help session has no messages, or at least one of the caches is empty. # Use the last message in the channel to determine closing time instead. + msg = await _message.get_last_message(channel) + if not msg: # last message can't be retreived, return datetime.min so channel closes right now. log.debug(f"No idle time available; #{channel} ({channel.id}) has no messages, closing now.") - return datetime.min + return datetime.min, "deleted" # The time at which a channel should be closed. - return msg.created_at + timedelta(minutes=idle_minutes) + return msg.created_at + timedelta(minutes=idle_minutes), "latest_message" # Get the later time at which a channel should be closed non_claimant_last_message_time = datetime.fromtimestamp(non_claimant_last_message_time) @@ -57,14 +61,12 @@ async def get_closing_time(channel: discord.TextChannel) -> datetime: claimant_last_message_time += timedelta(minutes=constants.HelpChannels.idle_minutes_claimant) # The further away closing time is what we should use. - closing_time = max(claimant_last_message_time, non_claimant_last_message_time) - log.trace( - f"Claimant closing time: {claimant_last_message_time}, " - f"last_message closing time: {non_claimant_last_message_time}" - ) - log.trace(f"#{channel} ({channel.id}) should be closed at {closing_time}.") - return closing_time - + if claimant_last_message_time >= non_claimant_last_message_time: + log.trace(f"#{channel} ({channel.id}) should be closed at {claimant_last_message_time} due to claimant timeout.") + return claimant_last_message_time, "claimant_timeout" + else: + log.trace(f"#{channel} ({channel.id}) should be closed at {non_claimant_last_message_time} due to others timeout.") + return non_claimant_last_message_time, "others_timeout" async def get_in_use_time(channel_id: int) -> t.Optional[timedelta]: """Return the duration `channel_id` has been in use. Return None if it's not in use.""" diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index a372fa868..ef6a286d6 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -118,6 +118,9 @@ class HelpChannels(commands.Cog): timestamp = datetime.now(timezone.utc).timestamp() await _caches.claim_times.set(message.channel.id, timestamp) await _caches.claimant_last_message_times.set(message.channel.id, timestamp) + # non_claimant needs to be set too, to satisfy the condition in `_channel.get_closing_time` the first time. + # Otherwise it will fall back to the old method if no other messages are sent. + await _caches.non_claimant_last_message_times.set(message.channel.id, timestamp) # Not awaited because it may indefinitely hold the lock while waiting for a channel. scheduling.create_task(self.move_to_available(), name=f"help_claim_{message.id}") @@ -188,7 +191,7 @@ class HelpChannels(commands.Cog): # Don't use a discord.py check because the check needs to fail silently. if await self.close_check(ctx): log.info(f"Close command invoked by {ctx.author} in #{ctx.channel}.") - await self.unclaim_channel(ctx.channel, is_auto=False) + await self.unclaim_channel(ctx.channel, closed_on="command") async def get_available_candidate(self) -> discord.TextChannel: """ @@ -234,7 +237,7 @@ class HelpChannels(commands.Cog): elif missing < 0: log.trace(f"Moving {abs(missing)} superfluous available channels over to the Dormant category.") for channel in channels[:abs(missing)]: - await self.unclaim_channel(channel) + await self.unclaim_channel(channel, closed_on="cleanup") async def init_categories(self) -> None: """Get the help category objects. Remove the cog if retrieval fails.""" @@ -272,6 +275,8 @@ class HelpChannels(commands.Cog): log.trace("Moving or rescheduling in-use channels.") for channel in _channel.get_category_channels(self.in_use_category): + # clear the cache here so moving doesn't rely on old cached messages. + await self._delete_message_time_caches(channel) await self.move_idle_channel(channel, has_task=False) # Prevent the command from being used until ready. @@ -294,16 +299,16 @@ class HelpChannels(commands.Cog): """ log.trace(f"Handling in-use channel #{channel} ({channel.id}).") - closing_time = await _channel.get_closing_time(channel) + closing_time, closed_on = await _channel.get_closing_time(channel) # The time at which the channel should be closed, based on messages sent. if closing_time < datetime.utcnow(): log.info( f"#{channel} ({channel.id}) is idle past {closing_time} " - f"and will be made dormant." + f"and will be made dormant. Reason: {closed_on}" ) - await self.unclaim_channel(channel) + await self.unclaim_channel(channel, closed_on=closed_on) else: # Cancel the existing task, if any. if has_task: @@ -353,7 +358,7 @@ class HelpChannels(commands.Cog): _stats.report_counts() @lock.lock_arg(f"{NAMESPACE}.unclaim", "channel") - async def unclaim_channel(self, channel: discord.TextChannel, *, is_auto: bool = True) -> None: + async def unclaim_channel(self, channel: discord.TextChannel, *, closed_on: str) -> None: """ Unclaim an in-use help `channel` to make it dormant. @@ -361,7 +366,7 @@ class HelpChannels(commands.Cog): Remove the cooldown role from the channel claimant if they have no other channels claimed. Cancel the scheduled cooldown role removal task. - Set `is_auto` to True if the channel was automatically closed or False if manually closed. + `closed_on` is the reason that the channel was closed for. Examples: "cleanup", "command", "claimant_timeout" """ claimant_id = await _caches.claimants.get(channel.id) _unclaim_channel = self._unclaim_channel @@ -372,13 +377,17 @@ class HelpChannels(commands.Cog): decorator = lock.lock_arg(f"{NAMESPACE}.unclaim", "claimant_id", wait=True) _unclaim_channel = decorator(_unclaim_channel) - return await _unclaim_channel(channel, claimant_id, is_auto) + return await _unclaim_channel(channel, claimant_id, closed_on) + + async def _delete_message_time_caches(self, channel: discord.TextChannel) -> None: + """Delete message time caches """ + await _caches.claimant_last_message_times.delete(channel.id) + await _caches.non_claimant_last_message_times.delete(channel.id) - async def _unclaim_channel(self, channel: discord.TextChannel, claimant_id: int, is_auto: bool) -> None: + async def _unclaim_channel(self, channel: discord.TextChannel, claimant_id: int, closed_on: str) -> None: """Actual implementation of `unclaim_channel`. See that for full documentation.""" await _caches.claimants.delete(channel.id) - await _caches.claimant_last_message_times.delete(channel.id) - await _caches.non_claimant_last_message_times.delete(channel.id) + await self._delete_message_time_caches(channel) # Ignore missing tasks because a channel may still be dormant after the cooldown expires. if claimant_id in self.scheduler: @@ -392,12 +401,12 @@ class HelpChannels(commands.Cog): await _cooldown.remove_cooldown_role(claimant) await _message.unpin(channel) - await _stats.report_complete_session(channel.id, is_auto) + await _stats.report_complete_session(channel.id, closed_on) await self.move_to_dormant(channel) # Cancel the task that makes the channel dormant only if called by the close command. # In other cases, the task is either already done or not-existent. - if not is_auto: + if not closed_on: self.scheduler.cancel(channel.id) async def move_to_in_use(self, channel: discord.TextChannel) -> None: diff --git a/bot/exts/help_channels/_stats.py b/bot/exts/help_channels/_stats.py index e212c495d..cc9a053c4 100644 --- a/bot/exts/help_channels/_stats.py +++ b/bot/exts/help_channels/_stats.py @@ -22,13 +22,13 @@ def report_counts() -> None: log.warning(f"Couldn't find category {name!r} to track channel count stats.") -async def report_complete_session(channel_id: int, is_auto: bool) -> None: +async def report_complete_session(channel_id: int, closed_on: str) -> None: """ Report stats for a completed help session channel `channel_id`. Set `is_auto` to True if the channel was automatically closed or False if manually closed. """ - caller = "auto" if is_auto else "command" + caller = f"auto.{closed_on}" if closed_on else "command" bot.instance.stats.incr(f"help.dormant_calls.{caller}") in_use_time = await _channel.get_in_use_time(channel_id) -- cgit v1.2.3 From cfdff9e9268b599dcc476800df64120b371604a9 Mon Sep 17 00:00:00 2001 From: laundmo Date: Sat, 20 Mar 2021 19:20:17 +0100 Subject: passing pre-commit --- bot/exts/help_channels/_channel.py | 11 +++++++++-- bot/exts/help_channels/_cog.py | 4 ++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 1e152eaa3..986d3f28b 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -62,12 +62,19 @@ async def get_closing_time(channel: discord.TextChannel) -> t.Tuple[datetime, st # The further away closing time is what we should use. if claimant_last_message_time >= non_claimant_last_message_time: - log.trace(f"#{channel} ({channel.id}) should be closed at {claimant_last_message_time} due to claimant timeout.") + log.trace( + f"#{channel} ({channel.id}) should be closed at " + f"{claimant_last_message_time} due to claimant timeout." + ) return claimant_last_message_time, "claimant_timeout" else: - log.trace(f"#{channel} ({channel.id}) should be closed at {non_claimant_last_message_time} due to others timeout.") + log.trace( + f"#{channel} ({channel.id}) should be closed at " + f"{non_claimant_last_message_time} due to others timeout." + ) return non_claimant_last_message_time, "others_timeout" + async def get_in_use_time(channel_id: int) -> t.Optional[timedelta]: """Return the duration `channel_id` has been in use. Return None if it's not in use.""" log.trace(f"Calculating in use time for channel {channel_id}.") diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index ef6a286d6..1e9332323 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -119,7 +119,7 @@ class HelpChannels(commands.Cog): await _caches.claim_times.set(message.channel.id, timestamp) await _caches.claimant_last_message_times.set(message.channel.id, timestamp) # non_claimant needs to be set too, to satisfy the condition in `_channel.get_closing_time` the first time. - # Otherwise it will fall back to the old method if no other messages are sent. + # Otherwise it will fall back to the old method if no other messages are sent. await _caches.non_claimant_last_message_times.set(message.channel.id, timestamp) # Not awaited because it may indefinitely hold the lock while waiting for a channel. @@ -378,7 +378,7 @@ class HelpChannels(commands.Cog): _unclaim_channel = decorator(_unclaim_channel) return await _unclaim_channel(channel, claimant_id, closed_on) - + async def _delete_message_time_caches(self, channel: discord.TextChannel) -> None: """Delete message time caches """ await _caches.claimant_last_message_times.delete(channel.id) -- cgit v1.2.3 From 1eaf20181ba5bf80b673e7a6e2f73d8ca1c1d2b0 Mon Sep 17 00:00:00 2001 From: laundmo Date: Sat, 20 Mar 2021 19:55:45 +0100 Subject: run precommit with all deps --- bot/exts/help_channels/_cog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 1e9332323..af106e92f 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -380,7 +380,7 @@ class HelpChannels(commands.Cog): return await _unclaim_channel(channel, claimant_id, closed_on) async def _delete_message_time_caches(self, channel: discord.TextChannel) -> None: - """Delete message time caches """ + """Delete message time caches.""" await _caches.claimant_last_message_times.delete(channel.id) await _caches.non_claimant_last_message_times.delete(channel.id) -- cgit v1.2.3 From afe7aff6948c795dd13f78ec31b1662e0a87493e Mon Sep 17 00:00:00 2001 From: Chris Date: Wed, 24 Mar 2021 19:25:10 +0000 Subject: Use fallback behaviour during cog init --- bot/exts/help_channels/_channel.py | 10 +++++++--- bot/exts/help_channels/_cog.py | 3 +-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 986d3f28b..029f55217 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -25,7 +25,7 @@ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[disco yield channel -async def get_closing_time(channel: discord.TextChannel) -> t.Tuple[datetime, str]: +async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.Tuple[datetime, str]: """Return the timestamp at which the given help `channel` should be closed along with the reason.""" log.trace(f"Getting the closing time for #{channel} ({channel.id}).") @@ -39,8 +39,12 @@ async def get_closing_time(channel: discord.TextChannel) -> t.Tuple[datetime, st non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) - if is_empty or not (non_claimant_last_message_time and claimant_last_message_time): - # Current help session has no messages, or at least one of the caches is empty. + if is_empty or not all( + init_done, + non_claimant_last_message_time, + claimant_last_message_time, + ): + # Current help channel has no messages, at least one of the caches is empty or the help system cog is starting. # Use the last message in the channel to determine closing time instead. msg = await _message.get_last_message(channel) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index af106e92f..78ef8e89f 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -298,8 +298,7 @@ class HelpChannels(commands.Cog): dormant will first be cancelled. """ log.trace(f"Handling in-use channel #{channel} ({channel.id}).") - - closing_time, closed_on = await _channel.get_closing_time(channel) + closing_time, closed_on = await _channel.get_closing_time(channel, self.init_task.done()) # The time at which the channel should be closed, based on messages sent. if closing_time < datetime.utcnow(): -- cgit v1.2.3 From 238142855b1a4fc0f88aed5c897b94fcf5a7f53a Mon Sep 17 00:00:00 2001 From: Chris Date: Wed, 24 Mar 2021 19:25:58 +0000 Subject: Use claiment idle time as default idle minutes. --- bot/exts/help_channels/_channel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 029f55217..9fbeeff17 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -34,7 +34,7 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T if is_empty: idle_minutes = constants.HelpChannels.deleted_idle_minutes else: - idle_minutes = constants.HelpChannels.idle_minutes_others + idle_minutes = constants.HelpChannels.idle_minutes_claimant non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) -- cgit v1.2.3 From 2ac3e1f9f24033c77145fa143350814e15542fd2 Mon Sep 17 00:00:00 2001 From: Chris Date: Wed, 24 Mar 2021 19:28:16 +0000 Subject: Add back seperation whitespace --- bot/exts/help_channels/_cog.py | 1 + 1 file changed, 1 insertion(+) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 78ef8e89f..06adff397 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -298,6 +298,7 @@ class HelpChannels(commands.Cog): dormant will first be cancelled. """ log.trace(f"Handling in-use channel #{channel} ({channel.id}).") + closing_time, closed_on = await _channel.get_closing_time(channel, self.init_task.done()) # The time at which the channel should be closed, based on messages sent. if closing_time < datetime.utcnow(): -- cgit v1.2.3 From 734573baa812dc697c4cbc40c39cd1899b2185ab Mon Sep 17 00:00:00 2001 From: kwzrd Date: Thu, 25 Mar 2021 11:25:51 +0100 Subject: Branding: improve 'compound_hash' docstring --- bot/exts/backend/branding/_cog.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 0723458c2..1f9602401 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -32,7 +32,11 @@ class AssetType(Enum): def compound_hash(objects: t.Iterable[RemoteObject]) -> str: - """Compound hashes are cached to check for change in any of the member `objects`.""" + """ + Join SHA attributes of `objects` into a single string. + + Compound hashes are cached to check for change in any of the member `objects`. + """ return "-".join(item.sha for item in objects) -- cgit v1.2.3 From 8a58d04c277265f2009e46d6fcc9a85f6ddd5896 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Thu, 25 Mar 2021 20:16:25 +0100 Subject: Branding: cache fresh event description in daemon Previously, the event description & duration strings were only stored on event entry. In the case that the description or duration change for an on-going event, the cached values wouldn't be updated. After this commit, the cache is refreshed daily by the daemon. --- bot/exts/backend/branding/_cog.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 1f9602401..7d4f80f13 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -329,9 +329,8 @@ class Branding(commands.Cog): # Cache event identity to avoid re-entry in case of restart await self.cache_information.set("event_path", event.path) - # The following values are only stored for the purpose of presenting them to the users - await self.cache_information.set("event_duration", extract_event_duration(event)) - await self.cache_information.set("event_description", event.meta.description) + # Cache information shown in the 'about' embed + await self.populate_cache_event_description(event) # Notify guild of new event ~ this reads the information that we cached above! if event_changed: @@ -387,6 +386,20 @@ class Branding(commands.Cog): for event in chronological_events }) + async def populate_cache_event_description(self, event: Event) -> None: + """ + Cache `event` description & duration. + + This should be called when entering a new event, and can be called periodically to ensure that the cache + holds fresh information in the case that the event remains the same, but its description changes. + + The duration is stored formatted for the frontend. It is not intended to be used programmatically. + """ + log.trace("Caching event description & duration") + + await self.cache_information.set("event_description", event.meta.description) + await self.cache_information.set("event_duration", extract_event_duration(event)) + # endregion # region: Daemon @@ -439,6 +452,8 @@ class Branding(commands.Cog): await self.enter_event(new_event) return + await self.populate_cache_event_description(new_event) # Cache fresh frontend info in case of change + log.trace("Daemon main: event has not changed, checking for change in assets") if new_event.banner.sha != await self.cache_information.get("banner_hash"): -- cgit v1.2.3 From 65c009736507974ebce20a1f36b7e24a4ecc349b Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 19:45:32 +0000 Subject: Don't prepend command closes with auto --- bot/exts/help_channels/_stats.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bot/exts/help_channels/_stats.py b/bot/exts/help_channels/_stats.py index cc9a053c4..123604945 100644 --- a/bot/exts/help_channels/_stats.py +++ b/bot/exts/help_channels/_stats.py @@ -28,8 +28,9 @@ async def report_complete_session(channel_id: int, closed_on: str) -> None: Set `is_auto` to True if the channel was automatically closed or False if manually closed. """ - caller = f"auto.{closed_on}" if closed_on else "command" - bot.instance.stats.incr(f"help.dormant_calls.{caller}") + if closed_on != "command": + closed_on = f"auto.{closed_on}" + bot.instance.stats.incr(f"help.dormant_calls.{closed_on}") in_use_time = await _channel.get_in_use_time(channel_id) if in_use_time: -- cgit v1.2.3 From 9f576bcf28aa228b6c7f7d57072bba3135047200 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 20:25:18 +0000 Subject: Change help channel logic to use timezone naive stamps --- bot/exts/help_channels/_channel.py | 4 ++-- bot/exts/help_channels/_cog.py | 10 ++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 9fbeeff17..9f6bc00a4 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -85,8 +85,8 @@ async def get_in_use_time(channel_id: int) -> t.Optional[timedelta]: claimed_timestamp = await _caches.claim_times.get(channel_id) if claimed_timestamp: - claimed = datetime.utcfromtimestamp(claimed_timestamp) - return datetime.utcnow() - claimed + claimed = datetime.fromtimestamp(claimed_timestamp) + return datetime.now() - claimed def is_excluded_channel(channel: discord.abc.GuildChannel) -> bool: diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 06adff397..f60e1a0c2 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -2,7 +2,7 @@ import asyncio import logging import random import typing as t -from datetime import datetime, timezone +from datetime import datetime from operator import attrgetter import discord @@ -114,13 +114,11 @@ class HelpChannels(commands.Cog): self.bot.stats.incr("help.claimed") - # Must use a timezone-aware datetime to ensure a correct POSIX timestamp. - timestamp = datetime.now(timezone.utc).timestamp() - await _caches.claim_times.set(message.channel.id, timestamp) - await _caches.claimant_last_message_times.set(message.channel.id, timestamp) + await _caches.claim_times.set(message.channel.id, message.created_at) + await _caches.claimant_last_message_times.set(message.channel.id, message.created_at) # non_claimant needs to be set too, to satisfy the condition in `_channel.get_closing_time` the first time. # Otherwise it will fall back to the old method if no other messages are sent. - await _caches.non_claimant_last_message_times.set(message.channel.id, timestamp) + await _caches.non_claimant_last_message_times.set(message.channel.id, message.created_at) # Not awaited because it may indefinitely hold the lock while waiting for a channel. scheduling.create_task(self.move_to_available(), name=f"help_claim_{message.id}") -- cgit v1.2.3 From 258086ff6831fc0665aeda07d379cd613979dfbc Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 20:27:42 +0000 Subject: Remove unneeded cache clearing --- bot/exts/help_channels/_cog.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index f60e1a0c2..4031cf3c1 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -273,8 +273,6 @@ class HelpChannels(commands.Cog): log.trace("Moving or rescheduling in-use channels.") for channel in _channel.get_category_channels(self.in_use_category): - # clear the cache here so moving doesn't rely on old cached messages. - await self._delete_message_time_caches(channel) await self.move_idle_channel(channel, has_task=False) # Prevent the command from being used until ready. @@ -377,15 +375,9 @@ class HelpChannels(commands.Cog): return await _unclaim_channel(channel, claimant_id, closed_on) - async def _delete_message_time_caches(self, channel: discord.TextChannel) -> None: - """Delete message time caches.""" - await _caches.claimant_last_message_times.delete(channel.id) - await _caches.non_claimant_last_message_times.delete(channel.id) - async def _unclaim_channel(self, channel: discord.TextChannel, claimant_id: int, closed_on: str) -> None: """Actual implementation of `unclaim_channel`. See that for full documentation.""" await _caches.claimants.delete(channel.id) - await self._delete_message_time_caches(channel) # Ignore missing tasks because a channel may still be dormant after the cooldown expires. if claimant_id in self.scheduler: -- cgit v1.2.3 From b1b105ad71ad53c94a1610e4e076bd0c0a4e466d Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 20:28:29 +0000 Subject: Check for close on command explictly. --- bot/exts/help_channels/_cog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 4031cf3c1..7dde204d0 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -396,7 +396,7 @@ class HelpChannels(commands.Cog): # Cancel the task that makes the channel dormant only if called by the close command. # In other cases, the task is either already done or not-existent. - if not closed_on: + if closed_on == "command": self.scheduler.cancel(channel.id) async def move_to_in_use(self, channel: discord.TextChannel) -> None: -- cgit v1.2.3 From b8eef953511a283da912abdd3b6c673788f3652d Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 20:29:46 +0000 Subject: Schedule channels just opened using claimant configured idle time --- bot/exts/help_channels/_cog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 7dde204d0..bac17fb2d 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -408,7 +408,7 @@ class HelpChannels(commands.Cog): category_id=constants.Categories.help_in_use, ) - timeout = constants.HelpChannels.idle_minutes_others * 60 + timeout = constants.HelpChannels.idle_minutes_claimant * 60 log.trace(f"Scheduling #{channel} ({channel.id}) to become dormant in {timeout} sec.") self.scheduler.schedule_later(timeout, channel.id, self.move_idle_channel(channel)) -- cgit v1.2.3 From ef38220761fa7f7299b2bd853dcd1f6f7cab9646 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 20:30:37 +0000 Subject: update help channel on_message docstring to reflect unchanged responsibility --- bot/exts/help_channels/_cog.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index bac17fb2d..a8828348c 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -416,12 +416,7 @@ class HelpChannels(commands.Cog): @commands.Cog.listener() async def on_message(self, message: discord.Message) -> None: - """ - Move an available channel to the In Use category and replace it with a dormant one. - - Update the `last_message_times` cache based on the current timestamp. If the message - author is the claimant of this channel, also update the `claimant_last_message_times` cache. - """ + """Move an available channel to the In Use category and replace it with a dormant one.""" if message.author.bot: return # Ignore messages sent by bots. -- cgit v1.2.3 From 93b9d410ce03a356b7c5edea87b389faeee36168 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 20:32:07 +0000 Subject: Only fetch claimant id once --- bot/exts/help_channels/_message.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bot/exts/help_channels/_message.py b/bot/exts/help_channels/_message.py index c20af2946..074fd01da 100644 --- a/bot/exts/help_channels/_message.py +++ b/bot/exts/help_channels/_message.py @@ -52,12 +52,13 @@ async def update_message_caches(message: discord.Message) -> None: # Must use a timezone-aware datetime to ensure a correct POSIX timestamp. timestamp = datetime.now(timezone.utc).timestamp() + claimant_id = await _caches.claimants.get(channel.id) + # Overwrite the claimant message time, if its from the claimant. - if message.author == await _caches.claimants.get(channel.id): + if message.author.id == claimant_id: await _caches.claimant_last_message_times.set(channel.id, timestamp) return - claimant_id = await _caches.claimants.get(channel.id) if not claimant_id: # The mapping for this channel doesn't exist, we can't do anything. return -- cgit v1.2.3 From 3114b48aa4f943726b6afbbc34bc2b02ebec0930 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 20:37:57 +0000 Subject: Change help channel caching logic to use timezone naive stamps --- bot/exts/help_channels/_message.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bot/exts/help_channels/_message.py b/bot/exts/help_channels/_message.py index 074fd01da..9506f7edd 100644 --- a/bot/exts/help_channels/_message.py +++ b/bot/exts/help_channels/_message.py @@ -1,7 +1,7 @@ import logging import textwrap import typing as t -from datetime import datetime, timezone +from datetime import datetime import discord @@ -49,8 +49,8 @@ async def update_message_caches(message: discord.Message) -> None: # Confirm the channel is an in use help channel if is_in_category(channel, constants.Categories.help_in_use): log.trace(f"Checking if #{channel} ({channel.id}) has had a reply.") - # Must use a timezone-aware datetime to ensure a correct POSIX timestamp. - timestamp = datetime.now(timezone.utc).timestamp() + # Use datetime naive time stamp to be consistant with timestamps from discord. + timestamp = datetime.now().timestamp() claimant_id = await _caches.claimants.get(channel.id) -- cgit v1.2.3 From bae3d6a75337cf7df58505fd5759ff3bd8df723e Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 22:15:10 +0000 Subject: Create utc datetime objects from timestamps --- bot/exts/help_channels/_channel.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 9f6bc00a4..76cce1249 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -58,8 +58,8 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T return msg.created_at + timedelta(minutes=idle_minutes), "latest_message" # Get the later time at which a channel should be closed - non_claimant_last_message_time = datetime.fromtimestamp(non_claimant_last_message_time) - claimant_last_message_time = datetime.fromtimestamp(claimant_last_message_time) + non_claimant_last_message_time = datetime.utcfromtimestamp(non_claimant_last_message_time) + claimant_last_message_time = datetime.utcfromtimestamp(claimant_last_message_time) non_claimant_last_message_time += timedelta(minutes=idle_minutes) claimant_last_message_time += timedelta(minutes=constants.HelpChannels.idle_minutes_claimant) @@ -85,8 +85,8 @@ async def get_in_use_time(channel_id: int) -> t.Optional[timedelta]: claimed_timestamp = await _caches.claim_times.get(channel_id) if claimed_timestamp: - claimed = datetime.fromtimestamp(claimed_timestamp) - return datetime.now() - claimed + claimed = datetime.utcfromtimestamp(claimed_timestamp) + return datetime.utcnow() - claimed def is_excluded_channel(channel: discord.abc.GuildChannel) -> bool: -- cgit v1.2.3 From f22865931b9cc40b406d8af488d3e65321626f57 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 22:26:30 +0000 Subject: Refactor help channel update message cache function for clearer flow --- bot/exts/help_channels/_message.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/bot/exts/help_channels/_message.py b/bot/exts/help_channels/_message.py index 9506f7edd..d60b31dea 100644 --- a/bot/exts/help_channels/_message.py +++ b/bot/exts/help_channels/_message.py @@ -49,22 +49,21 @@ async def update_message_caches(message: discord.Message) -> None: # Confirm the channel is an in use help channel if is_in_category(channel, constants.Categories.help_in_use): log.trace(f"Checking if #{channel} ({channel.id}) has had a reply.") - # Use datetime naive time stamp to be consistant with timestamps from discord. - timestamp = datetime.now().timestamp() claimant_id = await _caches.claimants.get(channel.id) - # Overwrite the claimant message time, if its from the claimant. - if message.author.id == claimant_id: - await _caches.claimant_last_message_times.set(channel.id, timestamp) - return - if not claimant_id: # The mapping for this channel doesn't exist, we can't do anything. return - # Cache the timestamp of the non-claimants message - await _caches.non_claimant_last_message_times.set(channel.id, timestamp) + # Use datetime naive time stamp to be consistant with timestamps from discord. + timestamp = message.created_at.timestamp() + + # Overwrite the appropriate last message cache depending on the author of the message + if message.author.id == claimant_id: + await _caches.claimant_last_message_times.set(channel.id, timestamp) + else: + await _caches.non_claimant_last_message_times.set(channel.id, timestamp) async def get_last_message(channel: discord.TextChannel) -> t.Optional[discord.Message]: -- cgit v1.2.3 From d7eb48aef4035494d078e489fd926be4eae48e64 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 22:39:17 +0000 Subject: Refactor if block within help channel system to be more readable --- bot/exts/help_channels/_channel.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 76cce1249..9497cb4fb 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -39,13 +39,14 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) - if is_empty or not all( - init_done, - non_claimant_last_message_time, - claimant_last_message_time, + if ( + is_empty + or not init_done + or non_claimant_last_message_time is None + or claimant_last_message_time is None ): - # Current help channel has no messages, at least one of the caches is empty or the help system cog is starting. - # Use the last message in the channel to determine closing time instead. + # if the current help channel has no messages, the help system cog is starting or + # at least one of the caches is empty use the last message in the channel to determine closing time instead. msg = await _message.get_last_message(channel) -- cgit v1.2.3 From a3a5fc491a6fe47791f6a46ceda733f2b01442d7 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 23:24:32 +0000 Subject: Reset a channel's non-claimant cache on claim, to indicate that the session has yet to be answered. --- bot/exts/help_channels/_channel.py | 11 +++++++---- bot/exts/help_channels/_cog.py | 5 ++--- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 9497cb4fb..22966dbe0 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -36,17 +36,20 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T else: idle_minutes = constants.HelpChannels.idle_minutes_claimant - non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) + non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) + if non_claimant_last_message_time is None: + # A non-claimant hasn't messaged since session start, set to min timestamp so only claimant + # idle period is considered when getting the closing time. + non_claimant_last_message_time = datetime.min.timestamp() if ( is_empty or not init_done - or non_claimant_last_message_time is None or claimant_last_message_time is None ): - # if the current help channel has no messages, the help system cog is starting or - # at least one of the caches is empty use the last message in the channel to determine closing time instead. + # If the current help channel has no messages, the help system cog is starting or + # the claimant cache is empty, use the last message in the channel to determine closing time instead. msg = await _message.get_last_message(channel) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index a8828348c..d9b288280 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -116,9 +116,8 @@ class HelpChannels(commands.Cog): await _caches.claim_times.set(message.channel.id, message.created_at) await _caches.claimant_last_message_times.set(message.channel.id, message.created_at) - # non_claimant needs to be set too, to satisfy the condition in `_channel.get_closing_time` the first time. - # Otherwise it will fall back to the old method if no other messages are sent. - await _caches.non_claimant_last_message_times.set(message.channel.id, message.created_at) + # Reset thie non_claimant cache for this channel to indicate that this session has yet to be answered. + await _caches.non_claimant_last_message_times.delete(message.channel.id) # Not awaited because it may indefinitely hold the lock while waiting for a channel. scheduling.create_task(self.move_to_available(), name=f"help_claim_{message.id}") -- cgit v1.2.3 From a95178989a9a5d2e2afd40256da8e672de8b2325 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 25 Mar 2021 23:32:20 +0000 Subject: Convert to timestamp before posting to redis --- bot/exts/help_channels/_cog.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index d9b288280..46817218f 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -114,8 +114,8 @@ class HelpChannels(commands.Cog): self.bot.stats.incr("help.claimed") - await _caches.claim_times.set(message.channel.id, message.created_at) - await _caches.claimant_last_message_times.set(message.channel.id, message.created_at) + await _caches.claim_times.set(message.channel.id, message.created_at.timestamp()) + await _caches.claimant_last_message_times.set(message.channel.id, message.created_at.timestamp()) # Reset thie non_claimant cache for this channel to indicate that this session has yet to be answered. await _caches.non_claimant_last_message_times.delete(message.channel.id) -- cgit v1.2.3 From fea068ff529aa0e2b2f92d0f8e58f5954a49237d Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 26 Mar 2021 00:07:02 +0000 Subject: Fix error when converting non claimant message datetime.min cannot be converted to a timestamp as it's pre-epoch. Instead wait until we actuall need it and then create the correct datetime object depending on teh cache contents. --- bot/exts/help_channels/_channel.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 22966dbe0..5845e7087 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -37,11 +37,6 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T idle_minutes = constants.HelpChannels.idle_minutes_claimant claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) - non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) - if non_claimant_last_message_time is None: - # A non-claimant hasn't messaged since session start, set to min timestamp so only claimant - # idle period is considered when getting the closing time. - non_claimant_last_message_time = datetime.min.timestamp() if ( is_empty @@ -61,10 +56,18 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T # The time at which a channel should be closed. return msg.created_at + timedelta(minutes=idle_minutes), "latest_message" - # Get the later time at which a channel should be closed - non_claimant_last_message_time = datetime.utcfromtimestamp(non_claimant_last_message_time) + # Switch to datetime objects so we can use time deltas claimant_last_message_time = datetime.utcfromtimestamp(claimant_last_message_time) + non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) + if non_claimant_last_message_time: + non_claimant_last_message_time = datetime.utcfromtimestamp(non_claimant_last_message_time) + else: + # If it's falsey, then it indicates a non-claimant has yet to reply to this session. + # Set to min date time so it isn't considered when calculating the closing time. + non_claimant_last_message_time = datetime.min + + # Get the later time at which a channel should be closed non_claimant_last_message_time += timedelta(minutes=idle_minutes) claimant_last_message_time += timedelta(minutes=constants.HelpChannels.idle_minutes_claimant) -- cgit v1.2.3 From 519398bac8cb04ab296e43cc707e466a8a501f12 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 26 Mar 2021 00:09:00 +0000 Subject: Add 1 second due to POSIX timestamps being lower resolution than datetime objects. --- bot/exts/help_channels/_cog.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 46817218f..0e71661ac 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -2,7 +2,7 @@ import asyncio import logging import random import typing as t -from datetime import datetime +from datetime import datetime, timedelta from operator import attrgetter import discord @@ -295,8 +295,10 @@ class HelpChannels(commands.Cog): log.trace(f"Handling in-use channel #{channel} ({channel.id}).") closing_time, closed_on = await _channel.get_closing_time(channel, self.init_task.done()) - # The time at which the channel should be closed, based on messages sent. - if closing_time < datetime.utcnow(): + + # Closing time is in the past. + # Add 1 second due to POSIX timestamps being lower resolution than datetime objects. + if closing_time < (datetime.utcnow() + timedelta(seconds=1)): log.info( f"#{channel} ({channel.id}) is idle past {closing_time} " -- cgit v1.2.3 From c14d9ea78a64b90ccf7815a71206c906c81af710 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Fri, 26 Mar 2021 14:44:46 +0100 Subject: Branding: raise on non-200 responses The fetch helpers will now raise when the request fails rather than logging a warning and returning a fallback value. This allows better error logging as the caller is able to log the propagated exception while adding its own context. Additionally, the caller in some cases no longer needs to check for the None return and raise its own exception. --- bot/exts/backend/branding/_cog.py | 8 ++++---- bot/exts/backend/branding/_repository.py | 35 +++++++++++++++----------------- 2 files changed, 20 insertions(+), 23 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 7d4f80f13..d6c5b159b 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -145,10 +145,10 @@ class Branding(commands.Cog): """ log.info(f"Applying {asset_type.value} asset to the guild") - file = await self.repository.fetch_file(download_url) - - if file is None: - log.error(f"Failed to download {asset_type.value} from branding repository!") + try: + file = await self.repository.fetch_file(download_url) + except Exception as fetch_exc: + log.error(f"Failed to fetch '{asset_type.value}' asset: {fetch_exc}") return False await self.bot.wait_until_guild_available() diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index e14ff4226..715361c5d 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -100,33 +100,30 @@ class BrandingRepository: The directory will be represented by a mapping from file or sub-directory names to their corresponding instances of `RemoteObject`. Passing a custom `types` value allows only getting files or directories. - If the request fails, returns an empty dictionary. + An exception will be raised if the request fails, or if the response lacks the expected keys. """ full_url = f"{BRANDING_URL}/{path}" log.debug(f"Fetching directory from branding repository: {full_url}") async with self.bot.http_session.get(full_url, params=PARAMS, headers=HEADERS) as response: - if response.status == 200: - json_directory = await response.json() - else: - log.warning(f"Received non-200 response status: {response.status}") - return {} + if response.status != 200: + raise RuntimeError(f"Failed to fetch directory due to status: {response.status}") + json_directory = await response.json() return {file["name"]: RemoteObject(file) for file in json_directory if file["type"] in types} - async def fetch_file(self, download_url: str) -> t.Optional[bytes]: + async def fetch_file(self, download_url: str) -> bytes: """ - Fetch file from `download_url`. + Fetch file as bytes from `download_url`. - Returns the file as bytes unless the request fails, in which case None is given. + Raise an exception if the request does not succeed. """ log.debug(f"Fetching file from branding repository: {download_url}") async with self.bot.http_session.get(download_url, params=PARAMS, headers=HEADERS) as response: - if response.status == 200: - return await response.read() - else: - log.warning(f"Received non-200 response status: {response.status}") + if response.status != 200: + raise RuntimeError(f"Failed to fetch file due to status: {response.status}") + return await response.read() async def parse_meta_file(self, raw_file: bytes) -> MetaFile: """ @@ -170,16 +167,11 @@ class BrandingRepository: server_icons = await self.fetch_directory(contents["server_icons"].path, types=("file",)) - if server_icons is None: - raise BrandingMisconfiguration("Failed to fetch server icons!") if len(server_icons) == 0: raise BrandingMisconfiguration("Found no server icons!") meta_bytes = await self.fetch_file(contents["meta.md"].download_url) - if meta_bytes is None: - raise BrandingMisconfiguration("Failed to fetch 'meta.md' file!") - meta_file = await self.parse_meta_file(meta_bytes) return Event(directory.path, meta_file, contents["banner.png"], list(server_icons.values())) @@ -193,7 +185,12 @@ class BrandingRepository: """ log.debug("Discovering events in branding repository") - event_directories = await self.fetch_directory("events", types=("dir",)) # Skip files + try: + event_directories = await self.fetch_directory("events", types=("dir",)) # Skip files + except Exception as fetch_exc: + log.error(f"Failed to fetch 'events' directory: {fetch_exc}") + return [] + instances: t.List[Event] = [] for event_directory in event_directories.values(): -- cgit v1.2.3 From c619a98e6ce16298e999d18667b86ee9f094b550 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Fri, 26 Mar 2021 15:03:56 +0100 Subject: Branding: raise custom error when constructing remote objects The default KeyError message from dict lookup is just the missing key. In order to give more context in the log message, we raise our own. --- bot/exts/backend/branding/_repository.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 715361c5d..91a95ae3a 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -43,6 +43,9 @@ class RemoteObject: def __init__(self, dictionary: t.Dict[str, t.Any]) -> None: """Initialize by grabbing annotated attributes from `dictionary`.""" + missing_keys = self.__annotations__.keys() - dictionary.keys() + if missing_keys: + raise KeyError(f"Fetched object lacks expected keys: {missing_keys}") for annotation in self.__annotations__: setattr(self, annotation, dictionary[annotation]) -- cgit v1.2.3 From fc6a160983cc9548f51e385033ab3755c9d121b1 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Fri, 26 Mar 2021 15:06:03 +0100 Subject: Branding: make 'meta.md' parser synchronous No reason for this to be async. --- bot/exts/backend/branding/_repository.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 91a95ae3a..420cfb9ea 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -128,7 +128,7 @@ class BrandingRepository: raise RuntimeError(f"Failed to fetch file due to status: {response.status}") return await response.read() - async def parse_meta_file(self, raw_file: bytes) -> MetaFile: + def parse_meta_file(self, raw_file: bytes) -> MetaFile: """ Parse a 'meta.md' file from raw bytes. @@ -175,7 +175,7 @@ class BrandingRepository: meta_bytes = await self.fetch_file(contents["meta.md"].download_url) - meta_file = await self.parse_meta_file(meta_bytes) + meta_file = self.parse_meta_file(meta_bytes) return Event(directory.path, meta_file, contents["banner.png"], list(server_icons.values())) -- cgit v1.2.3 From bd64acac079c564d3fca64519463518f7056dfe2 Mon Sep 17 00:00:00 2001 From: vcokltfre Date: Fri, 26 Mar 2021 17:49:15 +0000 Subject: fix: remove . from the hyperlink Co-authored-by: Joe Banks <20439493+jb3@users.noreply.github.com> --- bot/resources/tags/intents.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/resources/tags/intents.md b/bot/resources/tags/intents.md index 6a282bc17..e08fd1c33 100644 --- a/bot/resources/tags/intents.md +++ b/bot/resources/tags/intents.md @@ -16,4 +16,4 @@ intents.members = True bot = commands.Bot(command_prefix="!", intents=intents) ``` -For more info about using intents, see the [discord.py docs on intents.](https://discordpy.readthedocs.io/en/latest/intents.html) +For more info about using intents, see the [discord.py docs on intents](https://discordpy.readthedocs.io/en/latest/intents.html). -- cgit v1.2.3 From 2cf2402ea51e3a61d319706a95bc4ab633d6b8fc Mon Sep 17 00:00:00 2001 From: vcokltfre Date: Fri, 26 Mar 2021 17:52:07 +0000 Subject: feat: add link to discord dev portal intents section --- bot/resources/tags/intents.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/resources/tags/intents.md b/bot/resources/tags/intents.md index e08fd1c33..464caf0ba 100644 --- a/bot/resources/tags/intents.md +++ b/bot/resources/tags/intents.md @@ -16,4 +16,4 @@ intents.members = True bot = commands.Bot(command_prefix="!", intents=intents) ``` -For more info about using intents, see the [discord.py docs on intents](https://discordpy.readthedocs.io/en/latest/intents.html). +For more info about using intents, see the [discord.py docs on intents](https://discordpy.readthedocs.io/en/latest/intents.html), and for general information about them, see the [Discord developer documentation on intents](https://discord.com/developers/docs/topics/gateway#gateway-intents). -- cgit v1.2.3 From a732e0a412e72233af98b4954c2fa001a06bd8c7 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 26 Mar 2021 21:57:59 +0000 Subject: Use correct constant for each type of help session user --- bot/exts/help_channels/_channel.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 5845e7087..d46969d4f 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -32,9 +32,9 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T is_empty = await _message.is_empty(channel) if is_empty: - idle_minutes = constants.HelpChannels.deleted_idle_minutes + idle_minutes_claimant = constants.HelpChannels.deleted_idle_minutes else: - idle_minutes = constants.HelpChannels.idle_minutes_claimant + idle_minutes_claimant = constants.HelpChannels.idle_minutes_claimant claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) @@ -54,7 +54,7 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T return datetime.min, "deleted" # The time at which a channel should be closed. - return msg.created_at + timedelta(minutes=idle_minutes), "latest_message" + return msg.created_at + timedelta(minutes=idle_minutes_claimant), "latest_message" # Switch to datetime objects so we can use time deltas claimant_last_message_time = datetime.utcfromtimestamp(claimant_last_message_time) @@ -68,8 +68,8 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T non_claimant_last_message_time = datetime.min # Get the later time at which a channel should be closed - non_claimant_last_message_time += timedelta(minutes=idle_minutes) - claimant_last_message_time += timedelta(minutes=constants.HelpChannels.idle_minutes_claimant) + non_claimant_last_message_time += timedelta(minutes=constants.HelpChannels.idle_minutes_others) + claimant_last_message_time += timedelta(minutes=idle_minutes_claimant) # The further away closing time is what we should use. if claimant_last_message_time >= non_claimant_last_message_time: -- cgit v1.2.3 From cc3c47463d4317b072810deb622d54dddf9a132c Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 26 Mar 2021 15:19:25 -0700 Subject: Switch to datetime.fromtimestamp() to avoid over-compensation Previously we were using `utcfromtimestamp()` which would compensate the timestamp when converting to UTC even though the timestamp itself was in UTC: >>> datetime.utcnow() datetime.datetime(2021, 3, 26, 22, 8, 47, 441603) >>> a = datetime.utcnow().timestamp() 1616821624.207364 >>> a = datetime.utcfromtimestamp(a) datetime.datetime(2021, 3, 27, 5, 7, 4, 207364) By switching to `fromtimestamp()` this avoids that behaviour. --- bot/exts/help_channels/_channel.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index d46969d4f..b1960531d 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -57,11 +57,11 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T return msg.created_at + timedelta(minutes=idle_minutes_claimant), "latest_message" # Switch to datetime objects so we can use time deltas - claimant_last_message_time = datetime.utcfromtimestamp(claimant_last_message_time) + claimant_last_message_time = datetime.fromtimestamp(claimant_last_message_time) non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) if non_claimant_last_message_time: - non_claimant_last_message_time = datetime.utcfromtimestamp(non_claimant_last_message_time) + non_claimant_last_message_time = datetime.fromtimestamp(non_claimant_last_message_time) else: # If it's falsey, then it indicates a non-claimant has yet to reply to this session. # Set to min date time so it isn't considered when calculating the closing time. @@ -92,7 +92,7 @@ async def get_in_use_time(channel_id: int) -> t.Optional[timedelta]: claimed_timestamp = await _caches.claim_times.get(channel_id) if claimed_timestamp: - claimed = datetime.utcfromtimestamp(claimed_timestamp) + claimed = datetime.fromtimestamp(claimed_timestamp) return datetime.utcnow() - claimed -- cgit v1.2.3 From 288cdac3e826a4fb67d8ba2c1fe98ea52f9fe658 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 27 Mar 2021 11:59:22 +0100 Subject: Branding: ensure daemon logs exceptions --- bot/exts/backend/branding/_cog.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index d6c5b159b..57347b60e 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -477,19 +477,22 @@ class Branding(commands.Cog): """ log.trace("Daemon loop: calling daemon main") - await self.daemon_main() + try: + await self.daemon_main() + except Exception: + log.exception("Daemon loop: failed with an unhandled exception!") @daemon_loop.before_loop async def daemon_before(self) -> None: """ - Call `daemon_main` immediately, then block `daemon_loop` until the next-up UTC midnight. + Call `daemon_loop` immediately, then block the loop until the next-up UTC midnight. - The first iteration will be invoked manually such that synchronisation happens immediately after daemon start. + The first iteration is invoked directly such that synchronisation happens immediately after daemon start. We then calculate the time until the next-up midnight and sleep before letting `daemon_loop` begin. """ - log.info("Daemon before: synchronising guild") + log.trace("Daemon before: performing start-up iteration") - await self.daemon_main() + await self.daemon_loop() log.trace("Daemon before: calculating time to sleep before loop begins") now = datetime.utcnow() -- cgit v1.2.3 From 3a0ddbb3709bd36f2e15bb77c5de7f157ed64425 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 27 Mar 2021 13:24:31 +0100 Subject: Branding: revise documentation --- bot/exts/backend/branding/_cog.py | 116 ++++++++++++++----------------- bot/exts/backend/branding/_repository.py | 61 ++++++++-------- 2 files changed, 80 insertions(+), 97 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 57347b60e..c7d326da3 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -74,8 +74,8 @@ def extract_event_name(event: Event) -> str: An event with a path of 'events/black_history_month' will resolve to 'Black History Month'. """ - name = event.path.split("/")[-1] # Inner-most directory name - words = name.split("_") # Words from snake case + name = event.path.split("/")[-1] # Inner-most directory name. + words = name.split("_") # Words from snake case. return " ".join(word.title() for word in words) @@ -84,44 +84,35 @@ class Branding(commands.Cog): """ Guild branding management. - This cog is responsible for automatic management of the guild's branding while sourcing assets directly from - the branding repository. + Extension responsible for automatic synchronisation of the guild's branding with the branding repository. + Event definitions and assets are automatically discovered and applied as appropriate. - We utilize multiple Redis caches to persist state. As a result, the cog should seamlessly transition across - restarts without having to query either the Discord or GitHub APIs, as it will always remember which - assets are currently applied. + All state is stored in Redis. The cog should therefore seamlessly transition across restarts and maintain + a consistent icon rotation schedule for events with multiple icon assets. - Additionally, the state of the icon rotation is persisted. As a result, the rotation doesn't reset unless - the current event or its icons change. + By caching hashes of banner & icon assets, we discover changes in currently applied assets and always keep + the latest version applied. - The cog is designed to be autonomous. The daemon, unless disabled, will poll the branding repository at - midnight every day and respond to detected changes. Since we persist SHA hashes of tracked assets, - changes in an on-going event will trigger automatic resynchronisation. - - A #changelog notification is automatically sent when entering a new event. Changes in the branding of - an on-going event do not trigger a repeated notification. - - The command interface allows moderators+ to control the daemon or request an asset synchronisation, - while regular users can see information about the current event and the overall event schedule. + The command interface allows moderators+ to control the daemon or request asset synchronisation, while + regular users can see information about the current event and the overall event schedule. """ # RedisCache[ - # "daemon_active": If True, daemon auto-starts; controlled via commands (bool) - # "event_path": Path from root in the branding repo (str) - # "event_description": Markdown description (str) - # "event_duration": Human-readable date range or 'Fallback' (str) - # "banner_hash": Hash of the last applied banner (str) - # "icons_hash": Compound hash of icons in rotation (str) - # "last_rotation_timestamp": POSIX timestamp (float) + # "daemon_active": bool | If True, daemon starts on start-up. Controlled via commands. + # "event_path": str | Current event's path in the branding repo. + # "event_description": str | Current event's Markdown description. + # "event_duration": str | Current event's human-readable date range. + # "banner_hash": str | SHA of the currently applied banner. + # "icons_hash": str | Compound SHA of all icons in current rotation. + # "last_rotation_timestamp": float | POSIX UTC timestamp. # ] cache_information = RedisCache() - # Cache holding icons in current rotation ~ the keys are download URLs (str) and the values are integers - # corresponding to the amount of times each icon has been used in the current rotation + # Icons in current rotation. Keys (str) are download URLs, values (int) track the amount of times each + # icon has been used in the current rotation. cache_icons = RedisCache() - # Cache holding all available event names & their durations; this is cached by the daemon and read by - # the calendar command with the intention of preventing API spam; doesn't contain the fallback event + # All available event names & durations. Cached by the daemon nightly; read by the calendar command. cache_events = RedisCache() def __init__(self, bot: Bot) -> None: @@ -129,19 +120,16 @@ class Branding(commands.Cog): self.bot = bot self.repository = BrandingRepository(bot) - self.bot.loop.create_task(self.maybe_start_daemon()) # Start depending on cache + self.bot.loop.create_task(self.maybe_start_daemon()) # Start depending on cache. - # region: Internal utility + # region: Internal logic & state management - @mock_in_debug(return_value=True) + @mock_in_debug(return_value=True) # Mocked in development environment to prevent API spam. async def apply_asset(self, asset_type: AssetType, download_url: str) -> bool: """ Download asset from `download_url` and apply it to PyDis as `asset_type`. - This function is mocked in the development environment in order to prevent API spam during testing. - Decorator should be temporarily removed in order to test internal methodology. - - Returns a boolean indicating whether the application was successful. + Return a boolean indicating whether the application was successful. """ log.info(f"Applying {asset_type.value} asset to the guild") @@ -154,7 +142,7 @@ class Branding(commands.Cog): await self.bot.wait_until_guild_available() pydis: discord.Guild = self.bot.get_guild(Guild.id) - timeout = 10 # Seconds + timeout = 10 # Seconds. try: with async_timeout.timeout(timeout): await pydis.edit(**{asset_type.value: file}) @@ -174,7 +162,7 @@ class Branding(commands.Cog): Banners should always be applied via this method in order to ensure that the last hash is cached. - Returns a boolean indicating whether the application was successful. + Return a boolean indicating whether the application was successful. """ success = await self.apply_asset(AssetType.BANNER, banner.download_url) @@ -194,14 +182,14 @@ class Branding(commands.Cog): In the case that there is only 1 icon in the rotation and has already been applied, do nothing. - Returns a boolean indicating whether a new icon was applied successfully. + Return a boolean indicating whether a new icon was applied successfully. """ log.debug("Rotating icons") state = await self.cache_icons.to_dict() log.trace(f"Total icons in rotation: {len(state)}") - if not state: # This would only happen if rotation not initiated, but we can handle gracefully + if not state: # This would only happen if rotation not initiated, but we can handle gracefully. log.warning("Attempted icon rotation with an empty icon cache!") return False @@ -209,7 +197,7 @@ class Branding(commands.Cog): log.debug("Aborting icon rotation: only 1 icon is available and has already been applied") return False - current_iteration = min(state.values()) # Choose iteration to draw from + current_iteration = min(state.values()) # Choose iteration to draw from. options = [download_url for download_url, times_used in state.items() if times_used == current_iteration] log.trace(f"Choosing from {len(options)} icons in iteration {current_iteration}") @@ -218,7 +206,7 @@ class Branding(commands.Cog): success = await self.apply_asset(AssetType.ICON, next_icon) if success: - await self.cache_icons.increment(next_icon) # Push the icon into the next iteration + await self.cache_icons.increment(next_icon) # Push the icon into the next iteration. timestamp = datetime.utcnow().timestamp() await self.cache_information.set("last_rotation_timestamp", timestamp) @@ -237,7 +225,7 @@ class Branding(commands.Cog): last_rotation_timestamp = await self.cache_information.get("last_rotation_timestamp") - if last_rotation_timestamp is None: # Maiden case ~ never rotated + if last_rotation_timestamp is None: # Maiden case ~ never rotated. await self.rotate_icons() return @@ -253,9 +241,9 @@ class Branding(commands.Cog): """ Set up a new icon rotation. - This function should be called whenever the set of `available_icons` changes. This is generally the case - when we enter a new event, but potentially also when the assets of an on-going event change. In such cases, - a reset of `cache_icons` is necessary, because it contains download URLs which may have gotten stale. + This function should be called whenever available icons change. This is generally the case when we enter + a new event, but potentially also when the assets of an on-going event change. In such cases, a reset + of `cache_icons` is necessary, because it contains download URLs which may have gotten stale. This function does not upload a new icon! """ @@ -314,25 +302,25 @@ class Branding(commands.Cog): The #changelog notification is sent only if `event` differs from the currently cached event. - Returns a 2-tuple indicating whether the banner, and the icon, were applied successfully. + Return a 2-tuple indicating whether the banner, and the icon, were applied successfully. """ log.debug(f"Entering event: {event.path}") - banner_success = await self.apply_banner(event.banner) # Only one asset ~ apply directly + banner_success = await self.apply_banner(event.banner) # Only one asset ~ apply directly. - await self.initiate_icon_rotation(event.icons) # Prepare a new rotation - icon_success = await self.rotate_icons() # Apply an icon from the new rotation + await self.initiate_icon_rotation(event.icons) # Prepare a new rotation. + icon_success = await self.rotate_icons() # Apply an icon from the new rotation. - # This will only be False in the case of a manual same-event re-synchronisation + # This will only be False in the case of a manual same-event re-synchronisation. event_changed = event.path != await self.cache_information.get("event_path") - # Cache event identity to avoid re-entry in case of restart + # Cache event identity to avoid re-entry in case of restart. await self.cache_information.set("event_path", event.path) - # Cache information shown in the 'about' embed + # Cache information shown in the 'about' embed. await self.populate_cache_event_description(event) - # Notify guild of new event ~ this reads the information that we cached above! + # Notify guild of new event ~ this reads the information that we cached above. if event_changed: await self.send_info_embed(Channels.change_log) else: @@ -348,7 +336,7 @@ class Branding(commands.Cog): in a recovery scenario. In the usual case, the daemon already has an `Event` instance and can pass it to `enter_event` directly. - Returns a 2-tuple indicating whether the banner, and the icon, were applied successfully. + Return a 2-tuple indicating whether the banner, and the icon, were applied successfully. """ log.debug("Synchronise: fetching current event") @@ -380,7 +368,7 @@ class Branding(commands.Cog): log.trace(f"Writing {len(chronological_events)} events (fallback omitted)") - with contextlib.suppress(ValueError): # Cache raises when updated with an empty dict + with contextlib.suppress(ValueError): # Cache raises when updated with an empty dict. await self.cache_events.update({ extract_event_name(event): extract_event_duration(event) for event in chronological_events @@ -407,7 +395,7 @@ class Branding(commands.Cog): """ Start the daemon depending on cache state. - The daemon will only start if it's been previously explicitly enabled via a command. + The daemon will only start if it has been explicitly enabled via a command. """ log.debug("Checking whether daemon is enabled") @@ -452,7 +440,7 @@ class Branding(commands.Cog): await self.enter_event(new_event) return - await self.populate_cache_event_description(new_event) # Cache fresh frontend info in case of change + await self.populate_cache_event_description(new_event) # Cache fresh frontend info in case of change. log.trace("Daemon main: event has not changed, checking for change in assets") @@ -497,7 +485,7 @@ class Branding(commands.Cog): log.trace("Daemon before: calculating time to sleep before loop begins") now = datetime.utcnow() - # The actual midnight moment is offset into the future in order to prevent issues with imprecise sleep + # The actual midnight moment is offset into the future in order to prevent issues with imprecise sleep. tomorrow = now + timedelta(days=1) midnight = datetime.combine(tomorrow, time(minute=1)) @@ -517,7 +505,7 @@ class Branding(commands.Cog): @branding_group.command(name="about", aliases=("current", "event")) async def branding_about_cmd(self, ctx: commands.Context) -> None: - """Show the current event description.""" + """Show the current event's description and duration.""" await self.send_info_embed(ctx.channel.id) @commands.has_any_role(*MODERATION_ROLES) @@ -526,7 +514,7 @@ class Branding(commands.Cog): """ Force branding synchronisation. - Shows which assets have failed to synchronise, if any. + Show which assets have failed to synchronise, if any. """ async with ctx.typing(): banner_success, icon_success = await self.synchronise() @@ -565,7 +553,7 @@ class Branding(commands.Cog): """ if ctx.invoked_subcommand: # If you're wondering why this works: when the 'refresh' subcommand eventually re-invokes - # this group, the attribute will be automatically set to None by the framework + # this group, the attribute will be automatically set to None by the framework. return available_events = await self.cache_events.to_dict() @@ -578,10 +566,10 @@ class Branding(commands.Cog): embed = discord.Embed(title="Current event calendar", colour=discord.Colour.blurple()) - # Because a Discord embed can only contain up to 25 fields, we only show the first 25 + # Because Discord embeds can only contain up to 25 fields, we only show the first 25. first_25 = list(available_events.items())[:25] - if len(first_25) != len(available_events): # Alert core devs that a paginating solution is now necessary + if len(first_25) != len(available_events): # Alert core devs that a paginating solution is now necessary. log.warning(f"There are {len(available_events)} events, but the calendar view can only display 25!") for name, duration in first_25: diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 420cfb9ea..694e79b51 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -8,21 +8,21 @@ from bot.bot import Bot from bot.constants import Keys from bot.errors import BrandingMisconfiguration -# Base URL for requests into the branding repository +# Base URL for requests into the branding repository. BRANDING_URL = "https://api.github.com/repos/kwzrd/pydis-branding/contents" -PARAMS = {"ref": "kwzrd/events-rework"} # Target branch -HEADERS = {"Accept": "application/vnd.github.v3+json"} # Ensure we use API v3 +PARAMS = {"ref": "kwzrd/events-rework"} # Target branch. +HEADERS = {"Accept": "application/vnd.github.v3+json"} # Ensure we use API v3. -# A GitHub token is not necessary for the cog to operate, unauthorized requests are however limited to 60 per hour +# A GitHub token is not necessary. However, unauthorized requests are limited to 60 per hour. if Keys.github: HEADERS["Authorization"] = f"token {Keys.github}" -# Since event periods are year-agnostic, we parse them into `datetime` objects with a manually inserted year -# Please note that this is intentionally a leap year in order to allow Feb 29 to be valid +# Since event periods are year-agnostic, we parse them into `datetime` objects with a manually inserted year. +# Please note that this is intentionally a leap year in order to allow Feb 29 to be valid. ARBITRARY_YEAR = 2020 -# Format used to parse date strings after we inject `ARBITRARY_YEAR` at the end +# Format used to parse date strings after we inject `ARBITRARY_YEAR` at the end. DATE_FMT = "%B %d %Y" # Ex: July 10 2020 log = logging.getLogger(__name__) @@ -30,15 +30,15 @@ log = logging.getLogger(__name__) class RemoteObject: """ - Represent a remote file or directory on GitHub. + Remote file or directory on GitHub. The annotations match keys in the response JSON that we're interested in. """ - sha: str # Hash helps us detect asset change - name: str # Filename - path: str # Path from repo root - type: str # Either 'file' or 'dir' + sha: str # Hash helps us detect asset change. + name: str # Filename. + path: str # Path from repo root. + type: str # Either 'file' or 'dir'. download_url: t.Optional[str] # If type is 'dir', this is None! def __init__(self, dictionary: t.Dict[str, t.Any]) -> None: @@ -51,18 +51,18 @@ class RemoteObject: class MetaFile(t.NamedTuple): - """Composition of attributes defined in a 'meta.md' file.""" + """Attributes defined in a 'meta.md' file.""" is_fallback: bool start_date: t.Optional[date] end_date: t.Optional[date] - description: str # Markdown event description + description: str # Markdown event description. class Event(t.NamedTuple): - """Represent an event defined in the branding repository.""" + """Event defined in the branding repository.""" - path: str # Path from repo root where event lives + path: str # Path from repo root where event lives. This is the event's identity. meta: MetaFile banner: RemoteObject icons: t.List[RemoteObject] @@ -75,15 +75,12 @@ class BrandingRepository: """ Branding repository abstraction. - This class represents the branding repository's main branch and exposes available events and assets as objects. + This class represents the branding repository's main branch and exposes available events and assets + as objects. It performs the necessary amount of validation to ensure that a misconfigured event + isn't returned. Such events are simply ignored, and will be substituted with the fallback event, + if available. Warning logs will inform core developers if a misconfigured event is encountered. - The API is primarily formed by the `get_current_event` function. It performs the necessary amount of validation - to ensure that a misconfigured event isn't returned. Such events are simply ignored, and will be substituted - with the fallback event, if available. - - Warning logs will inform core developers if a misconfigured event is encountered. - - Colliding events cause no special behaviour - in such cases, the first found active event is returned. + Colliding events cause no special behaviour. In such cases, the first found active event is returned. We work with the assumption that the branding repository checks for such conflicts and prevents them from reaching the main branch. @@ -100,10 +97,9 @@ class BrandingRepository: """ Fetch directory found at `path` in the branding repository. - The directory will be represented by a mapping from file or sub-directory names to their corresponding - instances of `RemoteObject`. Passing a custom `types` value allows only getting files or directories. + Raise an exception if the request fails, or if the response lacks the expected keys. - An exception will be raised if the request fails, or if the response lacks the expected keys. + Passing custom `types` allows getting only files or directories. By default, both are included. """ full_url = f"{BRANDING_URL}/{path}" log.debug(f"Fetching directory from branding repository: {full_url}") @@ -148,8 +144,8 @@ class BrandingRepository: if None in (start_date_raw, end_date_raw): raise BrandingMisconfiguration("Non-fallback event doesn't have start and end dates defined!") - # We extend the configured month & day with an arbitrary leap year to allow a `datetime` repr to exist - # This may raise errors if configured in a wrong format ~ we let the caller handle such cases + # We extend the configured month & day with an arbitrary leap year, allowing a datetime object to exist. + # This may raise errors if misconfigured. We let the caller handle such cases. start_date = datetime.strptime(f"{start_date_raw} {ARBITRARY_YEAR}", DATE_FMT).date() end_date = datetime.strptime(f"{end_date_raw} {ARBITRARY_YEAR}", DATE_FMT).date() @@ -183,13 +179,12 @@ class BrandingRepository: """ Discover available events in the branding repository. - Misconfigured events are skipped, the return value may therefore not contain a representation of each - directory in the repository. May return an empty list in the catastrophic case. + Misconfigured events are skipped. May return an empty list in the catastrophic case. """ log.debug("Discovering events in branding repository") try: - event_directories = await self.fetch_directory("events", types=("dir",)) # Skip files + event_directories = await self.fetch_directory("events", types=("dir",)) # Skip files. except Exception as fetch_exc: log.error(f"Failed to fetch 'events' directory: {fetch_exc}") return [] @@ -220,7 +215,7 @@ class BrandingRepository: utc_now = datetime.utcnow() log.debug(f"Finding active event for: {utc_now}") - # As all events exist in the arbitrary year, we construct a separate object for the purposes of comparison + # Construct an object in the arbitrary year for the purpose of comparison. lookup_now = date(year=ARBITRARY_YEAR, month=utc_now.month, day=utc_now.day) available_events = await self.get_events() -- cgit v1.2.3 From 0e8ba3462050e79020ffce0bb6e0bbaf788ecc07 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 27 Mar 2021 14:17:21 +0100 Subject: Branding: revise log messages & levels Logs are now proper sentences ended with full stops. Exceptions are logged with full tracebacks, and log level are revised to be more sensible and consistent across the extension. --- bot/exts/backend/branding/_cog.py | 78 ++++++++++++++++---------------- bot/exts/backend/branding/_repository.py | 21 +++++---- 2 files changed, 50 insertions(+), 49 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index c7d326da3..38ec279cd 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -131,12 +131,12 @@ class Branding(commands.Cog): Return a boolean indicating whether the application was successful. """ - log.info(f"Applying {asset_type.value} asset to the guild") + log.info(f"Applying '{asset_type.value}' asset to the guild.") try: file = await self.repository.fetch_file(download_url) - except Exception as fetch_exc: - log.error(f"Failed to fetch '{asset_type.value}' asset: {fetch_exc}") + except Exception: + log.exception(f"Failed to fetch '{asset_type.value}' asset.") return False await self.bot.wait_until_guild_available() @@ -146,14 +146,14 @@ class Branding(commands.Cog): try: with async_timeout.timeout(timeout): await pydis.edit(**{asset_type.value: file}) - except discord.HTTPException as http_exc: - log.error(f"Asset upload to Discord failed: {http_exc}") + except discord.HTTPException: + log.exception("Asset upload to Discord failed.") return False except asyncio.TimeoutError: - log.error(f"Asset upload to Discord timed out after {timeout} seconds!") + log.error(f"Asset upload to Discord timed out after {timeout} seconds.") return False else: - log.debug("Asset uploaded successfully!") + log.trace("Asset uploaded successfully.") return True async def apply_banner(self, banner: RemoteObject) -> bool: @@ -184,23 +184,23 @@ class Branding(commands.Cog): Return a boolean indicating whether a new icon was applied successfully. """ - log.debug("Rotating icons") + log.debug("Rotating icons.") state = await self.cache_icons.to_dict() - log.trace(f"Total icons in rotation: {len(state)}") + log.trace(f"Total icons in rotation: {len(state)}.") if not state: # This would only happen if rotation not initiated, but we can handle gracefully. - log.warning("Attempted icon rotation with an empty icon cache!") + log.warning("Attempted icon rotation with an empty icon cache. This indicates wrong logic.") return False if len(state) == 1 and 1 in state.values(): - log.debug("Aborting icon rotation: only 1 icon is available and has already been applied") + log.debug("Aborting icon rotation: only 1 icon is available and has already been applied.") return False current_iteration = min(state.values()) # Choose iteration to draw from. options = [download_url for download_url, times_used in state.items() if times_used == current_iteration] - log.trace(f"Choosing from {len(options)} icons in iteration {current_iteration}") + log.trace(f"Choosing from {len(options)} icons in iteration {current_iteration}.") next_icon = random.choice(options) success = await self.apply_asset(AssetType.ICON, next_icon) @@ -221,7 +221,7 @@ class Branding(commands.Cog): Because there is work to be done before the timestamp is read and written, the next read will likely commence slightly under 24 hours after the last write. """ - log.debug("Checking if icons should rotate") + log.debug("Checking whether it's time for icons to rotate.") last_rotation_timestamp = await self.cache_information.get("last_rotation_timestamp") @@ -232,7 +232,7 @@ class Branding(commands.Cog): last_rotation = datetime.fromtimestamp(last_rotation_timestamp) difference = (datetime.utcnow() - last_rotation) + timedelta(minutes=5) - log.trace(f"Icons last rotated at {last_rotation} (difference: {difference})") + log.trace(f"Icons last rotated at {last_rotation} (difference: {difference}).") if difference.days >= BrandingConfig.cycle_frequency: await self.rotate_icons() @@ -247,14 +247,14 @@ class Branding(commands.Cog): This function does not upload a new icon! """ - log.debug("Initiating new icon rotation") + log.debug("Initiating new icon rotation.") await self.cache_icons.clear() new_state = {icon.download_url: 0 for icon in available_icons} await self.cache_icons.update(new_state) - log.trace(f"Icon rotation initiated for {len(new_state)} icons") + log.trace(f"Icon rotation initiated for {len(new_state)} icons.") await self.cache_information.set("icons_hash", compound_hash(available_icons)) @@ -268,7 +268,7 @@ class Branding(commands.Cog): To support either case, we read information about the current event from `cache_information`. The caller is therefore responsible for making sure that the cache is up-to-date before calling this function. """ - log.debug(f"Sending event information event to channel id: {channel_id}") + log.debug(f"Sending event information event to channel id: {channel_id}.") await self.bot.wait_until_guild_available() channel: t.Optional[discord.TextChannel] = self.bot.get_channel(channel_id) @@ -277,7 +277,7 @@ class Branding(commands.Cog): log.warning(f"Cannot send event information: channel {channel_id} not found!") return - log.debug(f"Destination channel: #{channel.name}") + log.trace(f"Destination channel: #{channel.name}.") description = await self.cache_information.get("event_description") duration = await self.cache_information.get("event_duration") @@ -304,7 +304,7 @@ class Branding(commands.Cog): Return a 2-tuple indicating whether the banner, and the icon, were applied successfully. """ - log.debug(f"Entering event: {event.path}") + log.info(f"Entering event: '{event.path}'.") banner_success = await self.apply_banner(event.banner) # Only one asset ~ apply directly. @@ -324,7 +324,7 @@ class Branding(commands.Cog): if event_changed: await self.send_info_embed(Channels.change_log) else: - log.trace("Omitted #changelog notification as event has not changed (indicating manual re-sync)") + log.trace("Omitted #changelog notification as event has not changed. Assuming manual re-sync.") return banner_success, icon_success @@ -338,14 +338,14 @@ class Branding(commands.Cog): Return a 2-tuple indicating whether the banner, and the icon, were applied successfully. """ - log.debug("Synchronise: fetching current event") + log.debug("Synchronise: fetching current event.") current_event, available_events = await self.repository.get_current_event() await self.populate_cache_events(available_events) if current_event is None: - log.error("Failed to fetch event ~ cannot synchronise!") + log.error("Failed to fetch event. Cannot synchronise!") return False, False return await self.enter_event(current_event) @@ -359,14 +359,14 @@ class Branding(commands.Cog): The cache does not store the fallback event, as it is not shown in the calendar. """ - log.debug("Populating events cache") + log.debug("Populating events cache.") await self.cache_events.clear() no_fallback = [event for event in events if not event.meta.is_fallback] chronological_events = sorted(no_fallback, key=attrgetter("meta.start_date")) - log.trace(f"Writing {len(chronological_events)} events (fallback omitted)") + log.trace(f"Writing {len(chronological_events)} events (fallback omitted).") with contextlib.suppress(ValueError): # Cache raises when updated with an empty dict. await self.cache_events.update({ @@ -383,7 +383,7 @@ class Branding(commands.Cog): The duration is stored formatted for the frontend. It is not intended to be used programmatically. """ - log.trace("Caching event description & duration") + log.debug("Caching event description & duration.") await self.cache_information.set("event_description", event.meta.description) await self.cache_information.set("event_duration", extract_event_duration(event)) @@ -397,7 +397,7 @@ class Branding(commands.Cog): The daemon will only start if it has been explicitly enabled via a command. """ - log.debug("Checking whether daemon is enabled") + log.debug("Checking whether daemon should start.") should_begin: t.Optional[bool] = await self.cache_information.get("daemon_active") # None if never set! @@ -410,7 +410,7 @@ class Branding(commands.Cog): This is **not** done automatically! The daemon otherwise remains active in the background. """ - log.debug("Cog unload: cancelling daemon") + log.debug("Cog unload: cancelling daemon.") self.daemon_loop.cancel() @@ -425,14 +425,14 @@ class Branding(commands.Cog): we check the banner & icons hashes against the currently cached values. If there is a mismatch, each specific asset is re-applied. """ - log.trace("Daemon main: checking current event") + log.info("Daemon main: checking current event.") new_event, available_events = await self.repository.get_current_event() await self.populate_cache_events(available_events) if new_event is None: - log.warning("Daemon main: failed to get current event from branding repository, will do nothing") + log.warning("Daemon main: failed to get current event from branding repository, will do nothing.") return if new_event.path != await self.cache_information.get("event_path"): @@ -442,14 +442,14 @@ class Branding(commands.Cog): await self.populate_cache_event_description(new_event) # Cache fresh frontend info in case of change. - log.trace("Daemon main: event has not changed, checking for change in assets") + log.trace("Daemon main: event has not changed, checking for change in assets.") if new_event.banner.sha != await self.cache_information.get("banner_hash"): - log.debug("Daemon main: detected banner change!") + log.debug("Daemon main: detected banner change.") await self.apply_banner(new_event.banner) if compound_hash(new_event.icons) != await self.cache_information.get("icons_hash"): - log.debug("Daemon main: detected icon change!") + log.debug("Daemon main: detected icon change.") await self.initiate_icon_rotation(new_event.icons) await self.rotate_icons() else: @@ -463,7 +463,7 @@ class Branding(commands.Cog): The scheduler maintains an exact 24-hour frequency even if this coroutine takes time to complete. If the coroutine is started at 00:01 and completes at 00:05, it will still be started at 00:01 the next day. """ - log.trace("Daemon loop: calling daemon main") + log.trace("Daemon loop: calling daemon main.") try: await self.daemon_main() @@ -478,11 +478,11 @@ class Branding(commands.Cog): The first iteration is invoked directly such that synchronisation happens immediately after daemon start. We then calculate the time until the next-up midnight and sleep before letting `daemon_loop` begin. """ - log.trace("Daemon before: performing start-up iteration") + log.trace("Daemon before: performing start-up iteration.") await self.daemon_loop() - log.trace("Daemon before: calculating time to sleep before loop begins") + log.trace("Daemon before: calculating time to sleep before loop begins.") now = datetime.utcnow() # The actual midnight moment is offset into the future in order to prevent issues with imprecise sleep. @@ -490,7 +490,7 @@ class Branding(commands.Cog): midnight = datetime.combine(tomorrow, time(minute=1)) sleep_secs = (midnight - now).total_seconds() - log.trace(f"Daemon before: sleeping {sleep_secs} seconds before next-up midnight: {midnight}") + log.trace(f"Daemon before: sleeping {sleep_secs} seconds before next-up midnight: {midnight}.") await asyncio.sleep(sleep_secs) @@ -557,7 +557,7 @@ class Branding(commands.Cog): return available_events = await self.cache_events.to_dict() - log.debug(f"Found {len(available_events)} cached events available for calendar view") + log.trace(f"Found {len(available_events)} cached events available for calendar view.") if not available_events: resp = make_embed("No events found!", "Cache may be empty, try `branding calendar refresh`.", success=False) @@ -570,7 +570,7 @@ class Branding(commands.Cog): first_25 = list(available_events.items())[:25] if len(first_25) != len(available_events): # Alert core devs that a paginating solution is now necessary. - log.warning(f"There are {len(available_events)} events, but the calendar view can only display 25!") + log.warning(f"There are {len(available_events)} events, but the calendar view can only display 25.") for name, duration in first_25: embed.add_field(name=name[:256], value=duration[:1024]) @@ -588,7 +588,7 @@ class Branding(commands.Cog): Supplementary subcommand allowing force-refreshing the event cache. Implemented as a subcommand because unlike the supergroup, it requires moderator privileges. """ - log.debug("Performing command-requested event cache refresh") + log.info("Performing command-requested event cache refresh.") async with ctx.typing(): available_events = await self.repository.get_events() diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 694e79b51..3a9745ed5 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -102,7 +102,7 @@ class BrandingRepository: Passing custom `types` allows getting only files or directories. By default, both are included. """ full_url = f"{BRANDING_URL}/{path}" - log.debug(f"Fetching directory from branding repository: {full_url}") + log.debug(f"Fetching directory from branding repository: '{full_url}'.") async with self.bot.http_session.get(full_url, params=PARAMS, headers=HEADERS) as response: if response.status != 200: @@ -117,7 +117,7 @@ class BrandingRepository: Raise an exception if the request does not succeed. """ - log.debug(f"Fetching file from branding repository: {download_url}") + log.debug(f"Fetching file from branding repository: '{download_url}'.") async with self.bot.http_session.get(download_url, params=PARAMS, headers=HEADERS) as response: if response.status != 200: @@ -181,26 +181,25 @@ class BrandingRepository: Misconfigured events are skipped. May return an empty list in the catastrophic case. """ - log.debug("Discovering events in branding repository") + log.debug("Discovering events in branding repository.") try: event_directories = await self.fetch_directory("events", types=("dir",)) # Skip files. - except Exception as fetch_exc: - log.error(f"Failed to fetch 'events' directory: {fetch_exc}") + except Exception: + log.exception("Failed to fetch 'events' directory.") return [] instances: t.List[Event] = [] for event_directory in event_directories.values(): - log.trace(f"Attempting to construct event from directory: {event_directory.path}") + log.trace(f"Attempting to construct event from directory: '{event_directory.path}'.") try: instance = await self.construct_event(event_directory) except Exception as exc: - log.warning(f"Could not construct event '{event_directory.path}': {exc}") + log.warning(f"Could not construct event '{event_directory.path}'.", exc_info=exc) else: instances.append(instance) - log.trace(f"Found {len(instances)} correctly configured events") return instances async def get_current_event(self) -> t.Tuple[t.Optional[Event], t.List[Event]]: @@ -213,19 +212,21 @@ class BrandingRepository: The current event may be None in the case that no event is active, and no fallback event is found. """ utc_now = datetime.utcnow() - log.debug(f"Finding active event for: {utc_now}") + log.debug(f"Finding active event for: {utc_now}.") # Construct an object in the arbitrary year for the purpose of comparison. lookup_now = date(year=ARBITRARY_YEAR, month=utc_now.month, day=utc_now.day) + log.trace(f"Lookup object in arbitrary year: {lookup_now}.") available_events = await self.get_events() + log.trace(f"Found {len(available_events)} available events.") for event in available_events: meta = event.meta if not meta.is_fallback and (meta.start_date <= lookup_now <= meta.end_date): return event, available_events - log.debug("No active event found, looking for fallback") + log.trace("No active event found. Looking for fallback event.") for event in available_events: if event.meta.is_fallback: -- cgit v1.2.3 From f9f9ced5320a593bb59836086d0d5983b4df58df Mon Sep 17 00:00:00 2001 From: Den4200 Date: Sat, 27 Mar 2021 13:17:12 -0400 Subject: Restrict redirects for paste uploads with an extension that is not `.py`. --- bot/utils/services.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/bot/utils/services.py b/bot/utils/services.py index 5949c9e48..68cbd896e 100644 --- a/bot/utils/services.py +++ b/bot/utils/services.py @@ -47,7 +47,13 @@ async def send_to_paste_service(contents: str, *, extension: str = "") -> Option continue elif "key" in response_json: log.info(f"Successfully uploaded contents to paste service behind key {response_json['key']}.") - return URLs.paste_service.format(key=response_json['key']) + extension + + paste_link = URLs.paste_service.format(key=response_json['key']) + extension + + if extension == '.py': + return paste_link + return paste_link + "?noredirect" + log.warning( f"Got unexpected JSON response from paste service: {response_json}\n" f"trying again ({attempt}/{FAILED_REQUEST_ATTEMPTS})." -- cgit v1.2.3 From 2abbcc8a63d0378392eba52593a65249d9204e9e Mon Sep 17 00:00:00 2001 From: Den4200 Date: Sat, 27 Mar 2021 13:26:25 -0400 Subject: Remove the old DMRelay cog. The moderation team has decided that this may come in conflict with Discord's ToS, and it does not serve too much of a purpose anymore. It was fun while it lasted! --- bot/exts/moderation/dm_relay.py | 132 ---------------------------------------- 1 file changed, 132 deletions(-) delete mode 100644 bot/exts/moderation/dm_relay.py diff --git a/bot/exts/moderation/dm_relay.py b/bot/exts/moderation/dm_relay.py deleted file mode 100644 index 6d081741c..000000000 --- a/bot/exts/moderation/dm_relay.py +++ /dev/null @@ -1,132 +0,0 @@ -import logging -from typing import Optional - -import discord -from async_rediscache import RedisCache -from discord import Color -from discord.ext import commands -from discord.ext.commands import Cog - -from bot import constants -from bot.bot import Bot -from bot.converters import UserMentionOrID -from bot.utils.checks import in_whitelist_check -from bot.utils.messages import send_attachments -from bot.utils.webhooks import send_webhook - -log = logging.getLogger(__name__) - - -class DMRelay(Cog): - """Relay direct messages to and from the bot.""" - - # RedisCache[str, t.Union[discord.User.id, discord.Member.id]] - dm_cache = RedisCache() - - def __init__(self, bot: Bot): - self.bot = bot - self.webhook_id = constants.Webhooks.dm_log - self.webhook = None - self.bot.loop.create_task(self.fetch_webhook()) - - @commands.command(aliases=("reply",)) - async def send_dm(self, ctx: commands.Context, member: Optional[UserMentionOrID], *, message: str) -> None: - """ - Allows you to send a DM to a user from the bot. - - If `member` is not provided, it will send to the last user who DM'd the bot. - - This feature should be used extremely sparingly. Use ModMail if you need to have a serious - conversation with a user. This is just for responding to extraordinary DMs, having a little - fun with users, and telling people they are DMing the wrong bot. - - NOTE: This feature will be removed if it is overused. - """ - if not member: - user_id = await self.dm_cache.get("last_user") - member = ctx.guild.get_member(user_id) if user_id else None - - # If we still don't have a Member at this point, give up - if not member: - log.debug("This bot has never gotten a DM, or the RedisCache has been cleared.") - await ctx.message.add_reaction("❌") - return - - if member.id == self.bot.user.id: - log.debug("Not sending message to bot user") - return await ctx.send("🚫 I can't send messages to myself!") - - try: - await member.send(message) - except discord.errors.Forbidden: - log.debug("User has disabled DMs.") - await ctx.message.add_reaction("❌") - else: - await ctx.message.add_reaction("✅") - self.bot.stats.incr("dm_relay.dm_sent") - - async def fetch_webhook(self) -> None: - """Fetches the webhook object, so we can post to it.""" - await self.bot.wait_until_guild_available() - - try: - self.webhook = await self.bot.fetch_webhook(self.webhook_id) - except discord.HTTPException: - log.exception(f"Failed to fetch webhook with id `{self.webhook_id}`") - - @Cog.listener() - async def on_message(self, message: discord.Message) -> None: - """Relays the message's content and attachments to the dm_log channel.""" - # Only relay DMs from humans - if message.author.bot or message.guild or self.webhook is None: - return - - if message.clean_content: - await send_webhook( - webhook=self.webhook, - content=message.clean_content, - username=f"{message.author.display_name} ({message.author.id})", - avatar_url=message.author.avatar_url - ) - await self.dm_cache.set("last_user", message.author.id) - self.bot.stats.incr("dm_relay.dm_received") - - # Handle any attachments - if message.attachments: - try: - await send_attachments( - message, - self.webhook, - username=f"{message.author.display_name} ({message.author.id})" - ) - except (discord.errors.Forbidden, discord.errors.NotFound): - e = discord.Embed( - description=":x: **This message contained an attachment, but it could not be retrieved**", - color=Color.red() - ) - await send_webhook( - webhook=self.webhook, - embed=e, - username=f"{message.author.display_name} ({message.author.id})", - avatar_url=message.author.avatar_url - ) - except discord.HTTPException: - log.exception("Failed to send an attachment to the webhook") - - async def cog_check(self, ctx: commands.Context) -> bool: - """Only allow moderators to invoke the commands in this cog.""" - checks = [ - await commands.has_any_role(*constants.MODERATION_ROLES).predicate(ctx), - in_whitelist_check( - ctx, - channels=[constants.Channels.dm_log], - redirect=None, - fail_silently=True, - ) - ] - return all(checks) - - -def setup(bot: Bot) -> None: - """Load the DMRelay cog.""" - bot.add_cog(DMRelay(bot)) -- cgit v1.2.3 From 2759409123d458a4a0a274b835bebb3cc728b83a Mon Sep 17 00:00:00 2001 From: Den4200 Date: Sat, 27 Mar 2021 13:49:22 -0400 Subject: Fix tests for paste uploads. Accounts for no redirects on extensions that are not `.py`. --- tests/bot/utils/test_services.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/bot/utils/test_services.py b/tests/bot/utils/test_services.py index 1b48f6560..3b71022db 100644 --- a/tests/bot/utils/test_services.py +++ b/tests/bot/utils/test_services.py @@ -30,9 +30,9 @@ class PasteTests(unittest.IsolatedAsyncioTestCase): """Url with specified extension is returned on successful requests.""" key = "paste_key" test_cases = ( - (f"https://paste_service.com/{key}.txt", "txt"), + (f"https://paste_service.com/{key}.txt?noredirect", "txt"), (f"https://paste_service.com/{key}.py", "py"), - (f"https://paste_service.com/{key}", ""), + (f"https://paste_service.com/{key}?noredirect", ""), ) response = MagicMock( json=AsyncMock(return_value={"key": key}) -- cgit v1.2.3 From e85988e8d63af2a30835a72ec363895b60f22260 Mon Sep 17 00:00:00 2001 From: Den4200 Date: Sat, 27 Mar 2021 13:51:12 -0400 Subject: Create the new DMRelay cog. Includes the `!dmrelay` command, allowing moderators to relay direct messages between the bot and other users. --- bot/exts/moderation/dm_relay.py | 59 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 bot/exts/moderation/dm_relay.py diff --git a/bot/exts/moderation/dm_relay.py b/bot/exts/moderation/dm_relay.py new file mode 100644 index 000000000..2bf2391a4 --- /dev/null +++ b/bot/exts/moderation/dm_relay.py @@ -0,0 +1,59 @@ +import logging +import textwrap + +import discord +from discord.ext.commands import Cog, Context, command + +from bot.bot import Bot +from bot.constants import Emojis +from bot.utils.services import send_to_paste_service + +log = logging.getLogger(__name__) + + +class DMRelay(Cog): + """Relay direct messages from the bot.""" + + def __init__(self, bot: Bot): + self.bot = bot + + @command(aliases=("relay", "dr")) + async def dmrelay(self, ctx: Context, user: discord.User, limit: int = 100) -> None: + """Relays the direct message history between the bot and given user.""" + log.trace(f"Relaying DMs with {user.name} ({user.id})") + + if not user.dm_channel: + await ctx.send(f"{Emojis.cross_mark} No direct message history with {user.mention}.") + return + + output = textwrap.dedent(f"""\ + User: {user} ({user.id}) + Channel ID: {user.dm_channel.id}\n + """) + + async for msg in user.history(limit=limit, oldest_first=True): + created_at = msg.created_at.strftime(r"%Y-%m-%d %H:%M") + + # Metadata (author, created_at, id) + output += f"{msg.author} [{created_at}] ({msg.id}): " + + # Content + if msg.content: + output += msg.content + "\n" + + # Embeds + if (embeds := len(msg.embeds)) > 0: + output += f"<{embeds} embed{'s' if embeds > 1 else ''}>\n" + + # Attachments + attachments = "\n".join(a.url for a in msg.attachments) + if attachments: + output += attachments + "\n" + + paste_link = await send_to_paste_service(output, extension="txt") + await ctx.send(paste_link) + + +def setup(bot: Bot) -> None: + """Load the DMRelay cog.""" + bot.add_cog(DMRelay(bot)) -- cgit v1.2.3 From 4be90b3c454138e3548c7394fcb2a1182b05b7d7 Mon Sep 17 00:00:00 2001 From: Den4200 Date: Sat, 27 Mar 2021 13:52:43 -0400 Subject: Restrict DMRelay cog to moderators only. --- bot/exts/moderation/dm_relay.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/bot/exts/moderation/dm_relay.py b/bot/exts/moderation/dm_relay.py index 2bf2391a4..1d57862d9 100644 --- a/bot/exts/moderation/dm_relay.py +++ b/bot/exts/moderation/dm_relay.py @@ -2,10 +2,10 @@ import logging import textwrap import discord -from discord.ext.commands import Cog, Context, command +from discord.ext.commands import Cog, Context, command, has_any_role from bot.bot import Bot -from bot.constants import Emojis +from bot.constants import Emojis, MODERATION_ROLES from bot.utils.services import send_to_paste_service log = logging.getLogger(__name__) @@ -53,6 +53,10 @@ class DMRelay(Cog): paste_link = await send_to_paste_service(output, extension="txt") await ctx.send(paste_link) + async def cog_check(self, ctx: Context) -> bool: + """Only allow moderators to invoke the commands in this cog.""" + return await has_any_role(*MODERATION_ROLES).predicate(ctx) + def setup(bot: Bot) -> None: """Load the DMRelay cog.""" -- cgit v1.2.3 From 721068e77cb0888feba465d0ba39a58aab12a7bf Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 27 Mar 2021 19:40:08 +0100 Subject: Branding: omit notification when entering evergreen The fallback event should not produce a notification. --- bot/exts/backend/branding/_cog.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index 38ec279cd..e12706c32 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -300,7 +300,7 @@ class Branding(commands.Cog): An event change should always be handled via this function, as it ensures that the cache is populated. - The #changelog notification is sent only if `event` differs from the currently cached event. + The #changelog notification is omitted when `event` is fallback, or already applied. Return a 2-tuple indicating whether the banner, and the icon, were applied successfully. """ @@ -321,10 +321,10 @@ class Branding(commands.Cog): await self.populate_cache_event_description(event) # Notify guild of new event ~ this reads the information that we cached above. - if event_changed: + if event_changed and not event.meta.is_fallback: await self.send_info_embed(Channels.change_log) else: - log.trace("Omitted #changelog notification as event has not changed. Assuming manual re-sync.") + log.trace("Omitting #changelog notification. Event has not changed, or new event is fallback.") return banner_success, icon_success -- cgit v1.2.3 From 1e8d5977084085f37395f10e320931305c3b7cca Mon Sep 17 00:00:00 2001 From: kwzrd Date: Sat, 27 Mar 2021 19:46:49 +0100 Subject: Branding: add contextual message to #changelog notifications It would be strange to just send the embed with no explanation of what it means. --- bot/exts/backend/branding/_cog.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index e12706c32..b07edbffd 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -258,17 +258,16 @@ class Branding(commands.Cog): await self.cache_information.set("icons_hash", compound_hash(available_icons)) - async def send_info_embed(self, channel_id: int) -> None: + async def send_info_embed(self, channel_id: int, *, is_notification: bool) -> None: """ Send the currently cached event description to `channel_id`. - This function is called when entering a new event with the destination being #changelog. However, it can - also be invoked on-demand by users. + When `is_notification` holds, a short contextual message for the #changelog channel is added. - To support either case, we read information about the current event from `cache_information`. The caller - is therefore responsible for making sure that the cache is up-to-date before calling this function. + We read event information from `cache_information`. The caller is therefore responsible for making + sure that the cache is up-to-date before calling this function. """ - log.debug(f"Sending event information event to channel id: {channel_id}.") + log.debug(f"Sending event information event to channel: {channel_id} ({is_notification=}).") await self.bot.wait_until_guild_available() channel: t.Optional[discord.TextChannel] = self.bot.get_channel(channel_id) @@ -283,12 +282,15 @@ class Branding(commands.Cog): duration = await self.cache_information.get("event_duration") if None in (description, duration): + content = None embed = make_embed("No event in cache", "Is the daemon enabled?", success=False) + else: + content = "Python Discord is entering a new event!" if is_notification else None embed = discord.Embed(description=description[:2048], colour=discord.Colour.blurple()) embed.set_footer(text=duration[:2048]) - await channel.send(embed=embed) + await channel.send(content=content, embed=embed) async def enter_event(self, event: Event) -> t.Tuple[bool, bool]: """ @@ -322,7 +324,7 @@ class Branding(commands.Cog): # Notify guild of new event ~ this reads the information that we cached above. if event_changed and not event.meta.is_fallback: - await self.send_info_embed(Channels.change_log) + await self.send_info_embed(Channels.change_log, is_notification=True) else: log.trace("Omitting #changelog notification. Event has not changed, or new event is fallback.") @@ -506,7 +508,7 @@ class Branding(commands.Cog): @branding_group.command(name="about", aliases=("current", "event")) async def branding_about_cmd(self, ctx: commands.Context) -> None: """Show the current event's description and duration.""" - await self.send_info_embed(ctx.channel.id) + await self.send_info_embed(ctx.channel.id, is_notification=False) @commands.has_any_role(*MODERATION_ROLES) @branding_group.command(name="sync") -- cgit v1.2.3 From 459ce9220ab8659d545e32f4ef1532da50789ca7 Mon Sep 17 00:00:00 2001 From: Dennis Pham Date: Sun, 28 Mar 2021 11:52:41 -0400 Subject: Added a newline to space out some code. Co-authored-by: Joe Banks <20439493+jb3@users.noreply.github.com> --- bot/utils/services.py | 1 + 1 file changed, 1 insertion(+) diff --git a/bot/utils/services.py b/bot/utils/services.py index 68cbd896e..db9c93d0f 100644 --- a/bot/utils/services.py +++ b/bot/utils/services.py @@ -52,6 +52,7 @@ async def send_to_paste_service(contents: str, *, extension: str = "") -> Option if extension == '.py': return paste_link + return paste_link + "?noredirect" log.warning( -- cgit v1.2.3 From 8cc27e52735d03273267012c0344acc54c602ea9 Mon Sep 17 00:00:00 2001 From: Dennis Pham Date: Sun, 28 Mar 2021 11:53:41 -0400 Subject: Improve `DMRelay` cog description. Co-authored-by: Joe Banks <20439493+jb3@users.noreply.github.com> --- bot/exts/moderation/dm_relay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/moderation/dm_relay.py b/bot/exts/moderation/dm_relay.py index 1d57862d9..2b897c0ed 100644 --- a/bot/exts/moderation/dm_relay.py +++ b/bot/exts/moderation/dm_relay.py @@ -12,7 +12,7 @@ log = logging.getLogger(__name__) class DMRelay(Cog): - """Relay direct messages from the bot.""" + """Inspect messages sent to the bot.""" def __init__(self, bot: Bot): self.bot = bot -- cgit v1.2.3 From 14514d1ed15e9155ef8700473cd0953126421177 Mon Sep 17 00:00:00 2001 From: Den4200 Date: Sun, 28 Mar 2021 11:58:01 -0400 Subject: Account for requesting the bot's DMs with itself. --- bot/exts/moderation/dm_relay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/moderation/dm_relay.py b/bot/exts/moderation/dm_relay.py index 1d57862d9..ed1c45292 100644 --- a/bot/exts/moderation/dm_relay.py +++ b/bot/exts/moderation/dm_relay.py @@ -22,7 +22,7 @@ class DMRelay(Cog): """Relays the direct message history between the bot and given user.""" log.trace(f"Relaying DMs with {user.name} ({user.id})") - if not user.dm_channel: + if self.bot.user == user or not user.dm_channel: await ctx.send(f"{Emojis.cross_mark} No direct message history with {user.mention}.") return -- cgit v1.2.3 From d36e179912242ea6c21a1d5e1a4485034a1b5343 Mon Sep 17 00:00:00 2001 From: Den4200 Date: Sun, 28 Mar 2021 12:12:41 -0400 Subject: Force cache to update for user history. Before, the user would have to send a DM to the bot after startup for the bot to realize there is DM history with that specific user. Now, we force a cache refresh when a moderator invokes `!dmrelay`, so this shouldn't be an issue anymore. --- bot/exts/moderation/dm_relay.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/bot/exts/moderation/dm_relay.py b/bot/exts/moderation/dm_relay.py index ed1c45292..17316ff48 100644 --- a/bot/exts/moderation/dm_relay.py +++ b/bot/exts/moderation/dm_relay.py @@ -22,7 +22,14 @@ class DMRelay(Cog): """Relays the direct message history between the bot and given user.""" log.trace(f"Relaying DMs with {user.name} ({user.id})") - if self.bot.user == user or not user.dm_channel: + if self.bot.user == user: + await ctx.send(f"{Emojis.cross_mark} No direct message history with myself.") + return + + # Force cache to update + await user.history(limit=1).flatten() + + if not user.dm_channel: await ctx.send(f"{Emojis.cross_mark} No direct message history with {user.mention}.") return -- cgit v1.2.3 From 39d71b578b5f1cfaae2acd01743f8b7522e2c490 Mon Sep 17 00:00:00 2001 From: Den4200 Date: Sun, 28 Mar 2021 13:19:40 -0400 Subject: Reduce API calls in `!dmrelay`. --- bot/exts/moderation/dm_relay.py | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/bot/exts/moderation/dm_relay.py b/bot/exts/moderation/dm_relay.py index cc63a80fe..a03230b3d 100644 --- a/bot/exts/moderation/dm_relay.py +++ b/bot/exts/moderation/dm_relay.py @@ -22,22 +22,11 @@ class DMRelay(Cog): """Relays the direct message history between the bot and given user.""" log.trace(f"Relaying DMs with {user.name} ({user.id})") - if self.bot.user == user: - await ctx.send(f"{Emojis.cross_mark} No direct message history with myself.") + if user.bot: + await ctx.send(f"{Emojis.cross_mark} No direct message history with bots.") return - # Force cache to update - await user.history(limit=1).flatten() - - if not user.dm_channel: - await ctx.send(f"{Emojis.cross_mark} No direct message history with {user.mention}.") - return - - output = textwrap.dedent(f"""\ - User: {user} ({user.id}) - Channel ID: {user.dm_channel.id}\n - """) - + output = "" async for msg in user.history(limit=limit, oldest_first=True): created_at = msg.created_at.strftime(r"%Y-%m-%d %H:%M") @@ -57,7 +46,16 @@ class DMRelay(Cog): if attachments: output += attachments + "\n" - paste_link = await send_to_paste_service(output, extension="txt") + if not output: + await ctx.send(f"{Emojis.cross_mark} No direct message history with {user.mention}.") + return + + metadata = textwrap.dedent(f"""\ + User: {user} ({user.id}) + Channel ID: {user.dm_channel.id}\n + """) + + paste_link = await send_to_paste_service(metadata + output, extension="txt") await ctx.send(paste_link) async def cog_check(self, ctx: Context) -> bool: -- cgit v1.2.3 From 00dd501b99d31b8ca4e6b1edb1638e64abe95ae3 Mon Sep 17 00:00:00 2001 From: ToxicKidz <78174417+ToxicKidz@users.noreply.github.com> Date: Sun, 28 Mar 2021 15:15:35 -0400 Subject: Filter codeblick escapes and allow no mentions for !raw command --- bot/exts/info/information.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bot/exts/info/information.py b/bot/exts/info/information.py index c54ca96bf..ce35df470 100644 --- a/bot/exts/info/information.py +++ b/bot/exts/info/information.py @@ -6,7 +6,7 @@ from collections import defaultdict from typing import Any, DefaultDict, Dict, Mapping, Optional, Tuple, Union import fuzzywuzzy -from discord import Colour, Embed, Guild, Message, Role +from discord import AllowedMentions, Colour, Embed, Guild, Message, Role from discord.ext.commands import BucketType, Cog, Context, Paginator, command, group, has_any_role from bot import constants @@ -447,9 +447,9 @@ class Information(Cog): def add_content(title: str, content: str) -> None: paginator.add_line(f'== {title} ==\n') - # replace backticks as it breaks out of code blocks. Spaces seemed to be the most reasonable solution. - # we hope it's not close to 2000 - paginator.add_line(content.replace('```', '`` `')) + # Replace backticks as it breaks out of code blocks. + # An invisble character seemed to be the most reasonable solution. We hope it's not close to 2000. + paginator.add_line(content.replace('`', '`\u200b')) paginator.close_page() if message.content: @@ -468,7 +468,7 @@ class Information(Cog): add_content(title, transformer(item)) for page in paginator.pages: - await ctx.send(page) + await ctx.send(page, allowed_mentions=AllowedMentions.none()) @raw.command() async def json(self, ctx: Context, message: Message) -> None: -- cgit v1.2.3 From 59dd861ca822f8dcef4c73732300e0f737b3bfa1 Mon Sep 17 00:00:00 2001 From: ToxicKidz <78174417+ToxicKidz@users.noreply.github.com> Date: Sun, 28 Mar 2021 18:12:20 -0400 Subject: Update bot/exts/info/information.py Co-authored-by: Joe Banks <20439493+jb3@users.noreply.github.com> --- bot/exts/info/information.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/info/information.py b/bot/exts/info/information.py index ce35df470..0555544ce 100644 --- a/bot/exts/info/information.py +++ b/bot/exts/info/information.py @@ -448,7 +448,7 @@ class Information(Cog): def add_content(title: str, content: str) -> None: paginator.add_line(f'== {title} ==\n') # Replace backticks as it breaks out of code blocks. - # An invisble character seemed to be the most reasonable solution. We hope it's not close to 2000. + # An invisible character seemed to be the most reasonable solution. We hope it's not close to 2000. paginator.add_line(content.replace('`', '`\u200b')) paginator.close_page() -- cgit v1.2.3 From 450a205e6115bfa296427d4bf15b839433627878 Mon Sep 17 00:00:00 2001 From: Joe Banks <20439493+jb3@users.noreply.github.com> Date: Mon, 29 Mar 2021 00:04:51 +0100 Subject: Add myself to CODEOWNERS (#1489) --- .github/CODEOWNERS | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 634bb4bca..1df05e990 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,14 +4,14 @@ **/bot/exts/moderation/*silence.py @MarkKoz bot/exts/info/codeblock/** @MarkKoz bot/exts/utils/extensions.py @MarkKoz -bot/exts/utils/snekbox.py @MarkKoz @Akarys42 +bot/exts/utils/snekbox.py @MarkKoz @Akarys42 @jb3 bot/exts/help_channels/** @MarkKoz @Akarys42 -bot/exts/moderation/** @Akarys42 @mbaruh @Den4200 @ks129 -bot/exts/info/** @Akarys42 @Den4200 -bot/exts/info/information.py @mbaruh -bot/exts/filters/** @mbaruh +bot/exts/moderation/** @Akarys42 @mbaruh @Den4200 @ks129 @jb3 +bot/exts/info/** @Akarys42 @Den4200 @jb3 +bot/exts/info/information.py @mbaruh @jb3 +bot/exts/filters/** @mbaruh @jb3 bot/exts/fun/** @ks129 -bot/exts/utils/** @ks129 +bot/exts/utils/** @ks129 @jb3 bot/exts/recruitment/** @wookie184 # Rules @@ -30,9 +30,9 @@ tests/bot/exts/test_cogs.py @MarkKoz tests/** @Akarys42 # CI & Docker -.github/workflows/** @MarkKoz @Akarys42 @SebastiaanZ @Den4200 -Dockerfile @MarkKoz @Akarys42 @Den4200 -docker-compose.yml @MarkKoz @Akarys42 @Den4200 +.github/workflows/** @MarkKoz @Akarys42 @SebastiaanZ @Den4200 @jb3 +Dockerfile @MarkKoz @Akarys42 @Den4200 @jb3 +docker-compose.yml @MarkKoz @Akarys42 @Den4200 @jb3 # Tools Pipfile* @Akarys42 -- cgit v1.2.3 From 58fbc2ebad75f4f86c47d23128e0c8421e68446e Mon Sep 17 00:00:00 2001 From: Matteo Bertucci Date: Tue, 30 Mar 2021 15:10:00 +0000 Subject: Alphabetize configuration --- bot/constants.py | 4 ++-- bot/exts/utils/utils.py | 2 +- config-default.yml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bot/constants.py b/bot/constants.py index 787e8b12e..7be0b9f6d 100644 --- a/bot/constants.py +++ b/bot/constants.py @@ -487,13 +487,13 @@ class Roles(metaclass=YAMLGetter): voice_verified: int admins: int - domain_leads: int - project_leads: int core_developers: int devops: int + domain_leads: int helpers: int moderators: int owners: int + project_leads: int jammers: int team_leaders: int diff --git a/bot/exts/utils/utils.py b/bot/exts/utils/utils.py index c45f73b88..cae7f2593 100644 --- a/bot/exts/utils/utils.py +++ b/bot/exts/utils/utils.py @@ -9,7 +9,7 @@ from discord.ext.commands import BadArgument, Cog, Context, clean_content, comma from discord.utils import snowflake_time from bot.bot import Bot -from bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES, Roles +from bot.constants import Channels, MODERATION_ROLES, Roles, STAFF_ROLES from bot.converters import Snowflake from bot.decorators import in_whitelist from bot.pagination import LinePaginator diff --git a/config-default.yml b/config-default.yml index dde7e48e1..59e6f4333 100644 --- a/config-default.yml +++ b/config-default.yml @@ -263,13 +263,13 @@ guild: # Staff admins: &ADMINS_ROLE 267628507062992896 - domain_leads: 807415650778742785 - project_leads: 807415650778742785 core_developers: 587606783669829632 devops: 409416496733880320 + domain_leads: 807415650778742785 helpers: &HELPERS_ROLE 267630620367257601 moderators: &MODS_ROLE 267629731250176001 owners: &OWNERS_ROLE 267627879762755584 + project_leads: 815701647526330398 # Code Jam jammers: 737249140966162473 -- cgit v1.2.3 From fb89ef5b677200596d2a71a8f73c4f9a33608203 Mon Sep 17 00:00:00 2001 From: Sebastiaan Zeeff <33516116+SebastiaanZ@users.noreply.github.com> Date: Tue, 30 Mar 2021 20:14:29 +0200 Subject: Increase the total number of help channels to 42 We've seen an increase in help channel activity and we're running out of help channels frequently. That's why we're increasing the number of help channels from 38 to 42. Note that the old configuration said 32, but we had more channels in actual rotation due to a race condition we had in the past. The system will never delete channels that were already in rotation, meaning that those that were added over the limit in the past still existed. --- config-default.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config-default.yml b/config-default.yml index 502f0f861..4e7060ea4 100644 --- a/config-default.yml +++ b/config-default.yml @@ -481,7 +481,7 @@ help_channels: # Maximum number of channels across all 3 categories # Note Discord has a hard limit of 50 channels per category, so this shouldn't be > 50 - max_total_channels: 32 + max_total_channels: 42 # Prefix for help channel names name_prefix: 'help-' -- cgit v1.2.3 From 07e3b4573a43099351978543569b47dce3ba7b7c Mon Sep 17 00:00:00 2001 From: MarkKoz Date: Tue, 30 Mar 2021 11:18:11 -0700 Subject: Update arrow to 1.0.3 It has some API changes, so it's best to update now before the project starts using the library more. --- Pipfile | 6 +- Pipfile.lock | 597 +++++++++++++++++++++++++++++++++-------------------------- 2 files changed, 336 insertions(+), 267 deletions(-) diff --git a/Pipfile b/Pipfile index 0a94fb888..99e480278 100644 --- a/Pipfile +++ b/Pipfile @@ -9,12 +9,14 @@ aiodns = "~=2.0" aiohttp = "~=3.7" aioping = "~=0.3.1" aioredis = "~=1.3.1" +arrow = "~=1.0.3" "async-rediscache[fakeredis]" = "~=0.1.2" beautifulsoup4 = "~=4.9" colorama = {version = "~=0.4.3",sys_platform = "== 'win32'"} coloredlogs = "~=14.0" deepdiff = "~=4.0" "discord.py" = "~=1.6.0" +emoji = "~=0.6" feedparser = "~=5.2" fuzzywuzzy = "~=0.17" lxml = "~=4.4" @@ -26,11 +28,10 @@ requests = "~=2.22" sentry-sdk = "~=0.19" sphinx = "~=2.2" statsd = "~=3.3" -arrow = "~=0.17" -emoji = "~=0.6" [dev-packages] coverage = "~=5.0" +coveralls = "~=2.1" flake8 = "~=3.8" flake8-annotations = "~=2.0" flake8-bugbear = "~=20.1" @@ -41,7 +42,6 @@ flake8-tidy-imports = "~=4.0" flake8-todo = "~=0.7" pep8-naming = "~=0.9" pre-commit = "~=2.1" -coveralls = "~=2.1" [requires] python_version = "3.8" diff --git a/Pipfile.lock b/Pipfile.lock index f8cedb08f..d16cef2a8 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "228ae55fe5700ac3827ba6b661933b60b1d06f44fea8bcbe8c5a769fa10ab2fd" + "sha256": "e5b57ca7276af4709b345055d4b3705c4142c61c4669c796b79a73379ec37a9a" }, "pipfile-spec": 6, "requires": { @@ -18,11 +18,11 @@ "default": { "aio-pika": { "hashes": [ - "sha256:9773440a89840941ac3099a7720bf9d51e8764a484066b82ede4d395660ff430", - "sha256:a8065be3c722eb8f9fff8c0e7590729e7782202cdb9363d9830d7d5d47b45c7c" + "sha256:1d4305a5f78af3857310b4fe48348cdcf6c097e0e275ea88c2cd08570531a369", + "sha256:e69afef8695f47c5d107bbdba21bdb845d5c249acb3be53ef5c2d497b02657c0" ], "index": "pypi", - "version": "==6.7.1" + "version": "==6.8.0" }, "aiodns": { "hashes": [ @@ -34,46 +34,46 @@ }, "aiohttp": { "hashes": [ - "sha256:119feb2bd551e58d83d1b38bfa4cb921af8ddedec9fad7183132db334c3133e0", - "sha256:16d0683ef8a6d803207f02b899c928223eb219111bd52420ef3d7a8aa76227b6", - "sha256:2eb3efe243e0f4ecbb654b08444ae6ffab37ac0ef8f69d3a2ffb958905379daf", - "sha256:2ffea7904e70350da429568113ae422c88d2234ae776519549513c8f217f58a9", - "sha256:40bd1b101b71a18a528ffce812cc14ff77d4a2a1272dfb8b11b200967489ef3e", - "sha256:418597633b5cd9639e514b1d748f358832c08cd5d9ef0870026535bd5eaefdd0", - "sha256:481d4b96969fbfdcc3ff35eea5305d8565a8300410d3d269ccac69e7256b1329", - "sha256:4c1bdbfdd231a20eee3e56bd0ac1cd88c4ff41b64ab679ed65b75c9c74b6c5c2", - "sha256:5563ad7fde451b1986d42b9bb9140e2599ecf4f8e42241f6da0d3d624b776f40", - "sha256:58c62152c4c8731a3152e7e650b29ace18304d086cb5552d317a54ff2749d32a", - "sha256:5b50e0b9460100fe05d7472264d1975f21ac007b35dcd6fd50279b72925a27f4", - "sha256:5d84ecc73141d0a0d61ece0742bb7ff5751b0657dab8405f899d3ceb104cc7de", - "sha256:5dde6d24bacac480be03f4f864e9a67faac5032e28841b00533cd168ab39cad9", - "sha256:5e91e927003d1ed9283dee9abcb989334fc8e72cf89ebe94dc3e07e3ff0b11e9", - "sha256:62bc216eafac3204877241569209d9ba6226185aa6d561c19159f2e1cbb6abfb", - "sha256:6c8200abc9dc5f27203986100579fc19ccad7a832c07d2bc151ce4ff17190076", - "sha256:6ca56bdfaf825f4439e9e3673775e1032d8b6ea63b8953d3812c71bd6a8b81de", - "sha256:71680321a8a7176a58dfbc230789790639db78dad61a6e120b39f314f43f1907", - "sha256:7c7820099e8b3171e54e7eedc33e9450afe7cd08172632d32128bd527f8cb77d", - "sha256:7dbd087ff2f4046b9b37ba28ed73f15fd0bc9f4fdc8ef6781913da7f808d9536", - "sha256:822bd4fd21abaa7b28d65fc9871ecabaddc42767884a626317ef5b75c20e8a2d", - "sha256:8ec1a38074f68d66ccb467ed9a673a726bb397142c273f90d4ba954666e87d54", - "sha256:950b7ef08b2afdab2488ee2edaff92a03ca500a48f1e1aaa5900e73d6cf992bc", - "sha256:99c5a5bf7135607959441b7d720d96c8e5c46a1f96e9d6d4c9498be8d5f24212", - "sha256:b84ad94868e1e6a5e30d30ec419956042815dfaea1b1df1cef623e4564c374d9", - "sha256:bc3d14bf71a3fb94e5acf5bbf67331ab335467129af6416a437bd6024e4f743d", - "sha256:c2a80fd9a8d7e41b4e38ea9fe149deed0d6aaede255c497e66b8213274d6d61b", - "sha256:c44d3c82a933c6cbc21039326767e778eface44fca55c65719921c4b9661a3f7", - "sha256:cc31e906be1cc121ee201adbdf844522ea3349600dd0a40366611ca18cd40e81", - "sha256:d5d102e945ecca93bcd9801a7bb2fa703e37ad188a2f81b1e65e4abe4b51b00c", - "sha256:dd7936f2a6daa861143e376b3a1fb56e9b802f4980923594edd9ca5670974895", - "sha256:dee68ec462ff10c1d836c0ea2642116aba6151c6880b688e56b4c0246770f297", - "sha256:e76e78863a4eaec3aee5722d85d04dcbd9844bc6cd3bfa6aa880ff46ad16bfcb", - "sha256:eab51036cac2da8a50d7ff0ea30be47750547c9aa1aa2cf1a1b710a1827e7dbe", - "sha256:f4496d8d04da2e98cc9133e238ccebf6a13ef39a93da2e87146c8c8ac9768242", - "sha256:fbd3b5e18d34683decc00d9a360179ac1e7a320a5fee10ab8053ffd6deab76e0", - "sha256:feb24ff1226beeb056e247cf2e24bba5232519efb5645121c4aea5b6ad74c1f2" + "sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe", + "sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe", + "sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5", + "sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8", + "sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd", + "sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb", + "sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c", + "sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87", + "sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0", + "sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290", + "sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5", + "sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287", + "sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde", + "sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf", + "sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8", + "sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16", + "sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf", + "sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809", + "sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213", + "sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f", + "sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013", + "sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b", + "sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9", + "sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5", + "sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb", + "sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df", + "sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4", + "sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439", + "sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f", + "sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22", + "sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f", + "sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5", + "sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970", + "sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009", + "sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc", + "sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a", + "sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95" ], "index": "pypi", - "version": "==3.7.4" + "version": "==3.7.4.post0" }, "aioping": { "hashes": [ @@ -96,6 +96,7 @@ "sha256:8218dd9f7198d6e7935855468326bbacf0089f926c70baa8dd92944cb2496573", "sha256:e584dac13a242589aaf42470fd3006cb0dc5aed6506cbd20357c7ec8bbe4a89e" ], + "markers": "python_version >= '3.6'", "version": "==3.3.1" }, "alabaster": { @@ -107,11 +108,11 @@ }, "arrow": { "hashes": [ - "sha256:e098abbd9af3665aea81bdd6c869e93af4feb078e98468dd351c383af187aac5", - "sha256:ff08d10cda1d36c68657d6ad20d74fbea493d980f8b2d45344e00d6ed2bf6ed4" + "sha256:3515630f11a15c61dcb4cdd245883270dd334c83f3e639824e65a4b79cc48543", + "sha256:399c9c8ae732270e1aa58ead835a79a40d7be8aa109c579898eb41029b5a231d" ], "index": "pypi", - "version": "==0.17.0" + "version": "==1.0.3" }, "async-rediscache": { "extras": [ @@ -122,6 +123,7 @@ "sha256:c25e4fff73f64d20645254783c3224a4c49e083e3fab67c44f17af944c5e26af" ], "index": "pypi", + "markers": "python_version ~= '3.7'", "version": "==0.1.4" }, "async-timeout": { @@ -129,6 +131,7 @@ "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f", "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3" ], + "markers": "python_full_version >= '3.5.3'", "version": "==3.0.1" }, "attrs": { @@ -136,6 +139,7 @@ "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6", "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.3.0" }, "babel": { @@ -143,6 +147,7 @@ "sha256:9d35c22fcc79893c3ecc85ac4a56cde1ecf3f19c540bba0922308a6c06ca6fa5", "sha256:da031ab54472314f210b0adcff1588ee5d1d1d0ba4dbd07b94dba82bde791e05" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.9.0" }, "beautifulsoup4": { @@ -205,17 +210,17 @@ }, "chardet": { "hashes": [ - "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", - "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" + "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", + "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" ], - "version": "==3.0.4" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.0.0" }, "colorama": { "hashes": [ "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b", "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2" ], - "index": "pypi", "markers": "sys_platform == 'win32'", "version": "==0.4.4" }, @@ -248,6 +253,7 @@ "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==0.16" }, "emoji": { @@ -259,10 +265,10 @@ }, "fakeredis": { "hashes": [ - "sha256:01cb47d2286825a171fb49c0e445b1fa9307087e07cbb3d027ea10dbff108b6a", - "sha256:2c6041cf0225889bc403f3949838b2c53470a95a9e2d4272422937786f5f8f73" + "sha256:1ac0cef767c37f51718874a33afb5413e69d132988cb6a80c6e6dbeddf8c7623", + "sha256:e0416e4941cecd3089b0d901e60c8dc3c944f6384f5e29e2261c0d3c5fa99669" ], - "version": "==1.4.5" + "version": "==1.5.0" }, "feedparser": { "hashes": [ @@ -283,60 +289,57 @@ }, "hiredis": { "hashes": [ - "sha256:06a039208f83744a702279b894c8cf24c14fd63c59cd917dcde168b79eef0680", - "sha256:0a909bf501459062aa1552be1461456518f367379fdc9fdb1f2ca5e4a1fdd7c0", - "sha256:18402d9e54fb278cb9a8c638df6f1550aca36a009d47ecf5aa263a38600f35b0", - "sha256:1e4cbbc3858ec7e680006e5ca590d89a5e083235988f26a004acf7244389ac01", - "sha256:23344e3c2177baf6975fbfa361ed92eb7d36d08f454636e5054b3faa7c2aff8a", - "sha256:289b31885b4996ce04cadfd5fc03d034dce8e2a8234479f7c9e23b9e245db06b", - "sha256:2c1c570ae7bf1bab304f29427e2475fe1856814312c4a1cf1cd0ee133f07a3c6", - "sha256:2c227c0ed371771ffda256034427320870e8ea2e4fd0c0a618c766e7c49aad73", - "sha256:3bb9b63d319402cead8bbd9dd55dca3b667d2997e9a0d8a1f9b6cc274db4baee", - "sha256:3ef2183de67b59930d2db8b8e8d4d58e00a50fcc5e92f4f678f6eed7a1c72d55", - "sha256:43b8ed3dbfd9171e44c554cb4acf4ee4505caa84c5e341858b50ea27dd2b6e12", - "sha256:47bcf3c5e6c1e87ceb86cdda2ee983fa0fe56a999e6185099b3c93a223f2fa9b", - "sha256:5263db1e2e1e8ae30500cdd75a979ff99dcc184201e6b4b820d0de74834d2323", - "sha256:5b1451727f02e7acbdf6aae4e06d75f66ee82966ff9114550381c3271a90f56c", - "sha256:6996883a8a6ff9117cbb3d6f5b0dcbbae6fb9e31e1a3e4e2f95e0214d9a1c655", - "sha256:6c96f64a54f030366657a54bb90b3093afc9c16c8e0dfa29fc0d6dbe169103a5", - "sha256:7332d5c3e35154cd234fd79573736ddcf7a0ade7a986db35b6196b9171493e75", - "sha256:7885b6f32c4a898e825bb7f56f36a02781ac4a951c63e4169f0afcf9c8c30dfb", - "sha256:7b0f63f10a166583ab744a58baad04e0f52cfea1ac27bfa1b0c21a48d1003c23", - "sha256:819f95d4eba3f9e484dd115ab7ab72845cf766b84286a00d4ecf76d33f1edca1", - "sha256:8968eeaa4d37a38f8ca1f9dbe53526b69628edc9c42229a5b2f56d98bb828c1f", - "sha256:89ebf69cb19a33d625db72d2ac589d26e936b8f7628531269accf4a3196e7872", - "sha256:8daecd778c1da45b8bd54fd41ffcd471a86beed3d8e57a43acf7a8d63bba4058", - "sha256:955ba8ea73cf3ed8bd2f963b4cb9f8f0dcb27becd2f4b3dd536fd24c45533454", - "sha256:964f18a59f5a64c0170f684c417f4fe3e695a536612e13074c4dd5d1c6d7c882", - "sha256:969843fbdfbf56cdb71da6f0bdf50f9985b8b8aeb630102945306cf10a9c6af2", - "sha256:996021ef33e0f50b97ff2d6b5f422a0fe5577de21a8873b58a779a5ddd1c3132", - "sha256:9e9c9078a7ce07e6fce366bd818be89365a35d2e4b163268f0ca9ba7e13bb2f6", - "sha256:a04901757cb0fb0f5602ac11dda48f5510f94372144d06c2563ba56c480b467c", - "sha256:a7bf1492429f18d205f3a818da3ff1f242f60aa59006e53dee00b4ef592a3363", - "sha256:aa0af2deb166a5e26e0d554b824605e660039b161e37ed4f01b8d04beec184f3", - "sha256:abfb15a6a7822f0fae681785cb38860e7a2cb1616a708d53df557b3d76c5bfd4", - "sha256:b253fe4df2afea4dfa6b1fa8c5fef212aff8bcaaeb4207e81eed05cb5e4a7919", - "sha256:b27f082f47d23cffc4cf1388b84fdc45c4ef6015f906cd7e0d988d9e35d36349", - "sha256:b33aea449e7f46738811fbc6f0b3177c6777a572207412bbbf6f525ffed001ae", - "sha256:b44f9421c4505c548435244d74037618f452844c5d3c67719d8a55e2613549da", - "sha256:bcc371151d1512201d0214c36c0c150b1dc64f19c2b1a8c9cb1d7c7c15ebd93f", - "sha256:c2851deeabd96d3f6283e9c6b26e0bfed4de2dc6fb15edf913e78b79fc5909ed", - "sha256:cdfd501c7ac5b198c15df800a3a34c38345f5182e5f80770caf362bccca65628", - "sha256:d2c0caffa47606d6d7c8af94ba42547bd2a441f06c74fd90a1ffe328524a6c64", - "sha256:dcb2db95e629962db5a355047fb8aefb012df6c8ae608930d391619dbd96fd86", - "sha256:e0eeb9c112fec2031927a1745788a181d0eecbacbed941fc5c4f7bc3f7b273bf", - "sha256:e154891263306200260d7f3051982774d7b9ef35af3509d5adbbe539afd2610c", - "sha256:e2e023a42dcbab8ed31f97c2bcdb980b7fbe0ada34037d87ba9d799664b58ded", - "sha256:e64be68255234bb489a574c4f2f8df7029c98c81ec4d160d6cd836e7f0679390", - "sha256:e82d6b930e02e80e5109b678c663a9ed210680ded81c1abaf54635d88d1da298" - ], - "version": "==1.1.0" + "sha256:04026461eae67fdefa1949b7332e488224eac9e8f2b5c58c98b54d29af22093e", + "sha256:04927a4c651a0e9ec11c68e4427d917e44ff101f761cd3b5bc76f86aaa431d27", + "sha256:07bbf9bdcb82239f319b1f09e8ef4bdfaec50ed7d7ea51a56438f39193271163", + "sha256:09004096e953d7ebd508cded79f6b21e05dff5d7361771f59269425108e703bc", + "sha256:0adea425b764a08270820531ec2218d0508f8ae15a448568109ffcae050fee26", + "sha256:0b39ec237459922c6544d071cdcf92cbb5bc6685a30e7c6d985d8a3e3a75326e", + "sha256:0d5109337e1db373a892fdcf78eb145ffb6bbd66bb51989ec36117b9f7f9b579", + "sha256:0f41827028901814c709e744060843c77e78a3aca1e0d6875d2562372fcb405a", + "sha256:11d119507bb54e81f375e638225a2c057dda748f2b1deef05c2b1a5d42686048", + "sha256:1233e303645f468e399ec906b6b48ab7cd8391aae2d08daadbb5cad6ace4bd87", + "sha256:139705ce59d94eef2ceae9fd2ad58710b02aee91e7fa0ccb485665ca0ecbec63", + "sha256:1f03d4dadd595f7a69a75709bc81902673fa31964c75f93af74feac2f134cc54", + "sha256:240ce6dc19835971f38caf94b5738092cb1e641f8150a9ef9251b7825506cb05", + "sha256:294a6697dfa41a8cba4c365dd3715abc54d29a86a40ec6405d677ca853307cfb", + "sha256:3d55e36715ff06cdc0ab62f9591607c4324297b6b6ce5b58cb9928b3defe30ea", + "sha256:3dddf681284fe16d047d3ad37415b2e9ccdc6c8986c8062dbe51ab9a358b50a5", + "sha256:3f5f7e3a4ab824e3de1e1700f05ad76ee465f5f11f5db61c4b297ec29e692b2e", + "sha256:508999bec4422e646b05c95c598b64bdbef1edf0d2b715450a078ba21b385bcc", + "sha256:5d2a48c80cf5a338d58aae3c16872f4d452345e18350143b3bf7216d33ba7b99", + "sha256:5dc7a94bb11096bc4bffd41a3c4f2b958257085c01522aa81140c68b8bf1630a", + "sha256:65d653df249a2f95673976e4e9dd7ce10de61cfc6e64fa7eeaa6891a9559c581", + "sha256:7492af15f71f75ee93d2a618ca53fea8be85e7b625e323315169977fae752426", + "sha256:7f0055f1809b911ab347a25d786deff5e10e9cf083c3c3fd2dd04e8612e8d9db", + "sha256:807b3096205c7cec861c8803a6738e33ed86c9aae76cac0e19454245a6bbbc0a", + "sha256:81d6d8e39695f2c37954d1011c0480ef7cf444d4e3ae24bc5e89ee5de360139a", + "sha256:87c7c10d186f1743a8fd6a971ab6525d60abd5d5d200f31e073cd5e94d7e7a9d", + "sha256:8b42c0dc927b8d7c0eb59f97e6e34408e53bc489f9f90e66e568f329bff3e443", + "sha256:a00514362df15af041cc06e97aebabf2895e0a7c42c83c21894be12b84402d79", + "sha256:a39efc3ade8c1fb27c097fd112baf09d7fd70b8cb10ef1de4da6efbe066d381d", + "sha256:a4ee8000454ad4486fb9f28b0cab7fa1cd796fc36d639882d0b34109b5b3aec9", + "sha256:a7928283143a401e72a4fad43ecc85b35c27ae699cf5d54d39e1e72d97460e1d", + "sha256:adf4dd19d8875ac147bf926c727215a0faf21490b22c053db464e0bf0deb0485", + "sha256:ae8427a5e9062ba66fc2c62fb19a72276cf12c780e8db2b0956ea909c48acff5", + "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048", + "sha256:b84f29971f0ad4adaee391c6364e6f780d5aae7e9226d41964b26b49376071d0", + "sha256:c39c46d9e44447181cd502a35aad2bb178dbf1b1f86cf4db639d7b9614f837c6", + "sha256:cb2126603091902767d96bcb74093bd8b14982f41809f85c9b96e519c7e1dc41", + "sha256:dcef843f8de4e2ff5e35e96ec2a4abbdf403bd0f732ead127bd27e51f38ac298", + "sha256:e3447d9e074abf0e3cd85aef8131e01ab93f9f0e86654db7ac8a3f73c63706ce", + "sha256:f52010e0a44e3d8530437e7da38d11fb822acfb0d5b12e9cd5ba655509937ca0", + "sha256:f8196f739092a78e4f6b1b2172679ed3343c39c61a3e9d722ce6fcf1dac2824a" + ], + "markers": "python_version >= '3.6'", + "version": "==2.0.0" }, "humanfriendly": { "hashes": [ "sha256:066562956639ab21ff2676d1fda0b5987e985c534fc76700a19bd54bcb81121d", "sha256:d5c731705114b9ad673754f3317d9fa4c23212f36b29bdc4272a892eafc9bc72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==9.1" }, "idna": { @@ -344,6 +347,7 @@ "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.10" }, "imagesize": { @@ -351,6 +355,7 @@ "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.0" }, "jinja2": { @@ -358,50 +363,50 @@ "sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419", "sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.11.3" }, "lxml": { "hashes": [ - "sha256:0448576c148c129594d890265b1a83b9cd76fd1f0a6a04620753d9a6bcfd0a4d", - "sha256:127f76864468d6630e1b453d3ffbbd04b024c674f55cf0a30dc2595137892d37", - "sha256:1471cee35eba321827d7d53d104e7b8c593ea3ad376aa2df89533ce8e1b24a01", - "sha256:2363c35637d2d9d6f26f60a208819e7eafc4305ce39dc1d5005eccc4593331c2", - "sha256:2e5cc908fe43fe1aa299e58046ad66981131a66aea3129aac7770c37f590a644", - "sha256:2e6fd1b8acd005bd71e6c94f30c055594bbd0aa02ef51a22bbfa961ab63b2d75", - "sha256:366cb750140f221523fa062d641393092813b81e15d0e25d9f7c6025f910ee80", - "sha256:42ebca24ba2a21065fb546f3e6bd0c58c3fe9ac298f3a320147029a4850f51a2", - "sha256:4e751e77006da34643ab782e4a5cc21ea7b755551db202bc4d3a423b307db780", - "sha256:4fb85c447e288df535b17ebdebf0ec1cf3a3f1a8eba7e79169f4f37af43c6b98", - "sha256:50c348995b47b5a4e330362cf39fc503b4a43b14a91c34c83b955e1805c8e308", - "sha256:535332fe9d00c3cd455bd3dd7d4bacab86e2d564bdf7606079160fa6251caacf", - "sha256:535f067002b0fd1a4e5296a8f1bf88193080ff992a195e66964ef2a6cfec5388", - "sha256:5be4a2e212bb6aa045e37f7d48e3e1e4b6fd259882ed5a00786f82e8c37ce77d", - "sha256:60a20bfc3bd234d54d49c388950195d23a5583d4108e1a1d47c9eef8d8c042b3", - "sha256:648914abafe67f11be7d93c1a546068f8eff3c5fa938e1f94509e4a5d682b2d8", - "sha256:681d75e1a38a69f1e64ab82fe4b1ed3fd758717bed735fb9aeaa124143f051af", - "sha256:68a5d77e440df94011214b7db907ec8f19e439507a70c958f750c18d88f995d2", - "sha256:69a63f83e88138ab7642d8f61418cf3180a4d8cd13995df87725cb8b893e950e", - "sha256:6e4183800f16f3679076dfa8abf2db3083919d7e30764a069fb66b2b9eff9939", - "sha256:6fd8d5903c2e53f49e99359b063df27fdf7acb89a52b6a12494208bf61345a03", - "sha256:791394449e98243839fa822a637177dd42a95f4883ad3dec2a0ce6ac99fb0a9d", - "sha256:7a7669ff50f41225ca5d6ee0a1ec8413f3a0d8aa2b109f86d540887b7ec0d72a", - "sha256:7e9eac1e526386df7c70ef253b792a0a12dd86d833b1d329e038c7a235dfceb5", - "sha256:7ee8af0b9f7de635c61cdd5b8534b76c52cd03536f29f51151b377f76e214a1a", - "sha256:8246f30ca34dc712ab07e51dc34fea883c00b7ccb0e614651e49da2c49a30711", - "sha256:8c88b599e226994ad4db29d93bc149aa1aff3dc3a4355dd5757569ba78632bdf", - "sha256:923963e989ffbceaa210ac37afc9b906acebe945d2723e9679b643513837b089", - "sha256:94d55bd03d8671686e3f012577d9caa5421a07286dd351dfef64791cf7c6c505", - "sha256:97db258793d193c7b62d4e2586c6ed98d51086e93f9a3af2b2034af01450a74b", - "sha256:a9d6bc8642e2c67db33f1247a77c53476f3a166e09067c0474facb045756087f", - "sha256:cd11c7e8d21af997ee8079037fff88f16fda188a9776eb4b81c7e4c9c0a7d7fc", - "sha256:d8d3d4713f0c28bdc6c806a278d998546e8efc3498949e3ace6e117462ac0a5e", - "sha256:e0bfe9bb028974a481410432dbe1b182e8191d5d40382e5b8ff39cdd2e5c5931", - "sha256:f4822c0660c3754f1a41a655e37cb4dbbc9be3d35b125a37fab6f82d47674ebc", - "sha256:f83d281bb2a6217cd806f4cf0ddded436790e66f393e124dfe9731f6b3fb9afe", - "sha256:fc37870d6716b137e80d19241d0e2cff7a7643b925dfa49b4c8ebd1295eb506e" + "sha256:079f3ae844f38982d156efce585bc540c16a926d4436712cf4baee0cce487a3d", + "sha256:0fbcf5565ac01dff87cbfc0ff323515c823081c5777a9fc7703ff58388c258c3", + "sha256:122fba10466c7bd4178b07dba427aa516286b846b2cbd6f6169141917283aae2", + "sha256:1b7584d421d254ab86d4f0b13ec662a9014397678a7c4265a02a6d7c2b18a75f", + "sha256:26e761ab5b07adf5f555ee82fb4bfc35bf93750499c6c7614bd64d12aaa67927", + "sha256:289e9ca1a9287f08daaf796d96e06cb2bc2958891d7911ac7cae1c5f9e1e0ee3", + "sha256:2a9d50e69aac3ebee695424f7dbd7b8c6d6eb7de2a2eb6b0f6c7db6aa41e02b7", + "sha256:33bb934a044cf32157c12bfcfbb6649807da20aa92c062ef51903415c704704f", + "sha256:3439c71103ef0e904ea0a1901611863e51f50b5cd5e8654a151740fde5e1cade", + "sha256:39b78571b3b30645ac77b95f7c69d1bffc4cf8c3b157c435a34da72e78c82468", + "sha256:4289728b5e2000a4ad4ab8da6e1db2e093c63c08bdc0414799ee776a3f78da4b", + "sha256:4bff24dfeea62f2e56f5bab929b4428ae6caba2d1eea0c2d6eb618e30a71e6d4", + "sha256:542d454665a3e277f76954418124d67516c5f88e51a900365ed54a9806122b83", + "sha256:5a0a14e264069c03e46f926be0d8919f4105c1623d620e7ec0e612a2e9bf1c04", + "sha256:66e575c62792c3f9ca47cb8b6fab9e35bab91360c783d1606f758761810c9791", + "sha256:74f7d8d439b18fa4c385f3f5dfd11144bb87c1da034a466c5b5577d23a1d9b51", + "sha256:7610b8c31688f0b1be0ef882889817939490a36d0ee880ea562a4e1399c447a1", + "sha256:76fa7b1362d19f8fbd3e75fe2fb7c79359b0af8747e6f7141c338f0bee2f871a", + "sha256:7728e05c35412ba36d3e9795ae8995e3c86958179c9770e65558ec3fdfd3724f", + "sha256:8157dadbb09a34a6bd95a50690595e1fa0af1a99445e2744110e3dca7831c4ee", + "sha256:820628b7b3135403540202e60551e741f9b6d3304371712521be939470b454ec", + "sha256:884ab9b29feaca361f7f88d811b1eea9bfca36cf3da27768d28ad45c3ee6f969", + "sha256:89b8b22a5ff72d89d48d0e62abb14340d9e99fd637d046c27b8b257a01ffbe28", + "sha256:92e821e43ad382332eade6812e298dc9701c75fe289f2a2d39c7960b43d1e92a", + "sha256:b007cbb845b28db4fb8b6a5cdcbf65bacb16a8bd328b53cbc0698688a68e1caa", + "sha256:bc4313cbeb0e7a416a488d72f9680fffffc645f8a838bd2193809881c67dd106", + "sha256:bccbfc27563652de7dc9bdc595cb25e90b59c5f8e23e806ed0fd623755b6565d", + "sha256:c4f05c5a7c49d2fb70223d0d5bcfbe474cf928310ac9fa6a7c6dddc831d0b1d4", + "sha256:ce256aaa50f6cc9a649c51be3cd4ff142d67295bfc4f490c9134d0f9f6d58ef0", + "sha256:d2e35d7bf1c1ac8c538f88d26b396e73dd81440d59c1ef8522e1ea77b345ede4", + "sha256:df7c53783a46febb0e70f6b05df2ba104610f2fb0d27023409734a3ecbb78fb2", + "sha256:efac139c3f0bf4f0939f9375af4b02c5ad83a622de52d6dfa8e438e8e01d0eb0", + "sha256:efd7a09678fd8b53117f6bae4fa3825e0a22b03ef0a932e070c0bdbb3a35e654", + "sha256:f2380a6376dfa090227b663f9678150ef27543483055cc327555fb592c5967e2", + "sha256:f8380c03e45cf09f8557bdaa41e1fa7c81f3ae22828e1db470ab2a6c96d8bc23", + "sha256:f90ba11136bfdd25cae3951af8da2e95121c9b9b93727b1b896e3fa105b2f586" ], "index": "pypi", - "version": "==4.6.2" + "version": "==4.6.3" }, "markdownify": { "hashes": [ @@ -466,15 +471,16 @@ "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be", "sha256:feb7b34d6325451ef96bc0e36e1a6c0c1c64bc1fbec4b854f4529e51887b1621" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.1" }, "more-itertools": { "hashes": [ - "sha256:8e1a2a43b2f2727425f2b5839587ae37093f19153dc26c0927d1048ff6557330", - "sha256:b3a9005928e5bed54076e6e549c792b306fddfe72b2d1d22dd63d42d5d3899cf" + "sha256:5652a9ac72209ed7df8d9c15daf4e1aa0e3d2ccd3c87f8265a0673cd9cbc9ced", + "sha256:c5d6da9ca3ff65220c3bfd2a8db06d698f05d4d2b9be57e1deb2be5a45019713" ], "index": "pypi", - "version": "==8.6.0" + "version": "==8.7.0" }, "multidict": { "hashes": [ @@ -516,12 +522,14 @@ "sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281", "sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80" ], + "markers": "python_version >= '3.6'", "version": "==5.1.0" }, "ordered-set": { "hashes": [ "sha256:ba93b2df055bca202116ec44b9bead3df33ea63a7d5827ff8e16738b97f33a95" ], + "markers": "python_version >= '3.5'", "version": "==4.0.2" }, "packaging": { @@ -529,6 +537,7 @@ "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5", "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.9" }, "pamqp": { @@ -577,20 +586,23 @@ "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.20" }, "pygments": { "hashes": [ - "sha256:37a13ba168a02ac54cc5891a42b1caec333e59b66addb7fa633ea8a6d73445c0", - "sha256:b21b072d0ccdf29297a82a2363359d99623597b8a265b8081760e4d0f7153c88" + "sha256:2656e1a6edcdabf4275f9a3640db59fd5de107d88e8663c5d4e9a0fa62f77f94", + "sha256:534ef71d539ae97d4c3a4cf7d6f110f214b0e687e92f9cb9d2a3b0d3101289c8" ], - "version": "==2.8.0" + "markers": "python_version >= '3.5'", + "version": "==2.8.1" }, "pyparsing": { "hashes": [ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "python-dateutil": { @@ -610,28 +622,45 @@ }, "pyyaml": { "hashes": [ - "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97", - "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76", - "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2", - "sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e", - "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648", - "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf", - "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f", - "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2", - "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee", - "sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a", - "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d", - "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c", - "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a" + "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf", + "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696", + "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393", + "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77", + "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922", + "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5", + "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8", + "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10", + "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc", + "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018", + "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e", + "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253", + "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347", + "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183", + "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541", + "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb", + "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185", + "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc", + "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db", + "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa", + "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46", + "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122", + "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b", + "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63", + "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df", + "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc", + "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247", + "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6", + "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0" ], "index": "pypi", - "version": "==5.3.1" + "version": "==5.4.1" }, "redis": { "hashes": [ "sha256:0e7e0cfca8660dea8b7d5cd8c4f6c5e29e11f31158c0b0ae91a397f00e5a05a2", "sha256:432b788c4530cfe16d8d943a09d40ca6c16149727e4afe8c2c9d5580c59d9f24" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==3.5.3" }, "requests": { @@ -644,17 +673,18 @@ }, "sentry-sdk": { "hashes": [ - "sha256:0a711ec952441c2ec89b8f5d226c33bc697914f46e876b44a4edd3e7864cf4d0", - "sha256:737a094e49a529dd0fdcaafa9e97cf7c3d5eb964bd229821d640bc77f3502b3f" + "sha256:4ae8d1ced6c67f1c8ea51d82a16721c166c489b76876c9f2c202b8a50334b237", + "sha256:e75c8c58932bda8cd293ea8e4b242527129e1caaec91433d21b8b2f20fee030b" ], "index": "pypi", - "version": "==0.19.5" + "version": "==0.20.3" }, "six": { "hashes": [ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "snowballstemmer": { @@ -673,11 +703,11 @@ }, "soupsieve": { "hashes": [ - "sha256:407fa1e8eb3458d1b5614df51d9651a1180ea5fedf07feb46e45d7e25e6d6cdd", - "sha256:d3a5ea5b350423f47d07639f74475afedad48cf41c0ad7a82ca13a3928af34f6" + "sha256:052774848f448cf19c7e959adf5566904d525f33a3f8b6ba6f6f8f26ec7de0cc", + "sha256:c2c1c2d44f158cdbddab7824a9af8c4f83c76b1e23e049479aa432feb6c4c23b" ], "markers": "python_version >= '3.0'", - "version": "==2.2" + "version": "==2.2.1" }, "sphinx": { "hashes": [ @@ -692,6 +722,7 @@ "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a", "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-devhelp": { @@ -699,6 +730,7 @@ "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e", "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-htmlhelp": { @@ -706,6 +738,7 @@ "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f", "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-jsmath": { @@ -713,6 +746,7 @@ "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8" ], + "markers": "python_version >= '3.5'", "version": "==1.0.1" }, "sphinxcontrib-qthelp": { @@ -720,6 +754,7 @@ "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72", "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-serializinghtml": { @@ -727,6 +762,7 @@ "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc", "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a" ], + "markers": "python_version >= '3.5'", "version": "==1.1.4" }, "statsd": { @@ -747,10 +783,11 @@ }, "urllib3": { "hashes": [ - "sha256:1b465e494e3e0d8939b50680403e3aedaa2bc434b7d5af64dfd3c958d7f5ae80", - "sha256:de3eedaad74a2683334e282005cd8d7f22f4d55fa690a2a1020a416cb0a47e73" + "sha256:2f4da4594db7e1e110a944bb1b551fdf4e6c136ad42e4234131391e21eb5b0df", + "sha256:e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937" ], - "version": "==1.26.3" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", + "version": "==1.26.4" }, "yarl": { "hashes": [ @@ -792,6 +829,7 @@ "sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a", "sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71" ], + "markers": "python_version >= '3.6'", "version": "==1.6.3" } }, @@ -808,6 +846,7 @@ "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6", "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.3.0" }, "certifi": { @@ -822,69 +861,74 @@ "sha256:32e43d604bbe7896fe7c248a9c2276447dbef840feb28fe20494f62af110211d", "sha256:cf22deb93d4bcf92f345a5c3cd39d3d41d6340adc60c78bbbd6588c384fda6a1" ], + "markers": "python_full_version >= '3.6.1'", "version": "==3.2.0" }, "chardet": { "hashes": [ - "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", - "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" + "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", + "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" ], - "version": "==3.0.4" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.0.0" }, "coverage": { "hashes": [ - "sha256:08b3ba72bd981531fd557f67beee376d6700fba183b167857038997ba30dd297", - "sha256:2757fa64e11ec12220968f65d086b7a29b6583d16e9a544c889b22ba98555ef1", - "sha256:3102bb2c206700a7d28181dbe04d66b30780cde1d1c02c5f3c165cf3d2489497", - "sha256:3498b27d8236057def41de3585f317abae235dd3a11d33e01736ffedb2ef8606", - "sha256:378ac77af41350a8c6b8801a66021b52da8a05fd77e578b7380e876c0ce4f528", - "sha256:38f16b1317b8dd82df67ed5daa5f5e7c959e46579840d77a67a4ceb9cef0a50b", - "sha256:3911c2ef96e5ddc748a3c8b4702c61986628bb719b8378bf1e4a6184bbd48fe4", - "sha256:3a3c3f8863255f3c31db3889f8055989527173ef6192a283eb6f4db3c579d830", - "sha256:3b14b1da110ea50c8bcbadc3b82c3933974dbeea1832e814aab93ca1163cd4c1", - "sha256:535dc1e6e68fad5355f9984d5637c33badbdc987b0c0d303ee95a6c979c9516f", - "sha256:6f61319e33222591f885c598e3e24f6a4be3533c1d70c19e0dc59e83a71ce27d", - "sha256:723d22d324e7997a651478e9c5a3120a0ecbc9a7e94071f7e1954562a8806cf3", - "sha256:76b2775dda7e78680d688daabcb485dc87cf5e3184a0b3e012e1d40e38527cc8", - "sha256:782a5c7df9f91979a7a21792e09b34a658058896628217ae6362088b123c8500", - "sha256:7e4d159021c2029b958b2363abec4a11db0ce8cd43abb0d9ce44284cb97217e7", - "sha256:8dacc4073c359f40fcf73aede8428c35f84639baad7e1b46fce5ab7a8a7be4bb", - "sha256:8f33d1156241c43755137288dea619105477961cfa7e47f48dbf96bc2c30720b", - "sha256:8ffd4b204d7de77b5dd558cdff986a8274796a1e57813ed005b33fd97e29f059", - "sha256:93a280c9eb736a0dcca19296f3c30c720cb41a71b1f9e617f341f0a8e791a69b", - "sha256:9a4f66259bdd6964d8cf26142733c81fb562252db74ea367d9beb4f815478e72", - "sha256:9a9d4ff06804920388aab69c5ea8a77525cf165356db70131616acd269e19b36", - "sha256:a2070c5affdb3a5e751f24208c5c4f3d5f008fa04d28731416e023c93b275277", - "sha256:a4857f7e2bc6921dbd487c5c88b84f5633de3e7d416c4dc0bb70256775551a6c", - "sha256:a607ae05b6c96057ba86c811d9c43423f35e03874ffb03fbdcd45e0637e8b631", - "sha256:a66ca3bdf21c653e47f726ca57f46ba7fc1f260ad99ba783acc3e58e3ebdb9ff", - "sha256:ab110c48bc3d97b4d19af41865e14531f300b482da21783fdaacd159251890e8", - "sha256:b239711e774c8eb910e9b1ac719f02f5ae4bf35fa0420f438cdc3a7e4e7dd6ec", - "sha256:be0416074d7f253865bb67630cf7210cbc14eb05f4099cc0f82430135aaa7a3b", - "sha256:c46643970dff9f5c976c6512fd35768c4a3819f01f61169d8cdac3f9290903b7", - "sha256:c5ec71fd4a43b6d84ddb88c1df94572479d9a26ef3f150cef3dacefecf888105", - "sha256:c6e5174f8ca585755988bc278c8bb5d02d9dc2e971591ef4a1baabdf2d99589b", - "sha256:c89b558f8a9a5a6f2cfc923c304d49f0ce629c3bd85cb442ca258ec20366394c", - "sha256:cc44e3545d908ecf3e5773266c487ad1877be718d9dc65fc7eb6e7d14960985b", - "sha256:cc6f8246e74dd210d7e2b56c76ceaba1cc52b025cd75dbe96eb48791e0250e98", - "sha256:cd556c79ad665faeae28020a0ab3bda6cd47d94bec48e36970719b0b86e4dcf4", - "sha256:ce6f3a147b4b1a8b09aae48517ae91139b1b010c5f36423fa2b866a8b23df879", - "sha256:ceb499d2b3d1d7b7ba23abe8bf26df5f06ba8c71127f188333dddcf356b4b63f", - "sha256:cef06fb382557f66d81d804230c11ab292d94b840b3cb7bf4450778377b592f4", - "sha256:e448f56cfeae7b1b3b5bcd99bb377cde7c4eb1970a525c770720a352bc4c8044", - "sha256:e52d3d95df81c8f6b2a1685aabffadf2d2d9ad97203a40f8d61e51b70f191e4e", - "sha256:ee2f1d1c223c3d2c24e3afbb2dd38be3f03b1a8d6a83ee3d9eb8c36a52bee899", - "sha256:f2c6888eada180814b8583c3e793f3f343a692fc802546eed45f40a001b1169f", - "sha256:f51dbba78d68a44e99d484ca8c8f604f17e957c1ca09c3ebc2c7e3bbd9ba0448", - "sha256:f54de00baf200b4539a5a092a759f000b5f45fd226d6d25a76b0dff71177a714", - "sha256:fa10fee7e32213f5c7b0d6428ea92e3a3fdd6d725590238a3f92c0de1c78b9d2", - "sha256:fabeeb121735d47d8eab8671b6b031ce08514c86b7ad8f7d5490a7b6dcd6267d", - "sha256:fac3c432851038b3e6afe086f777732bcf7f6ebbfd90951fa04ee53db6d0bcdd", - "sha256:fda29412a66099af6d6de0baa6bd7c52674de177ec2ad2630ca264142d69c6c7", - "sha256:ff1330e8bc996570221b450e2d539134baa9465f5cb98aff0e0f73f34172e0ae" + "sha256:004d1880bed2d97151facef49f08e255a20ceb6f9432df75f4eef018fdd5a78c", + "sha256:01d84219b5cdbfc8122223b39a954820929497a1cb1422824bb86b07b74594b6", + "sha256:040af6c32813fa3eae5305d53f18875bedd079960822ef8ec067a66dd8afcd45", + "sha256:06191eb60f8d8a5bc046f3799f8a07a2d7aefb9504b0209aff0b47298333302a", + "sha256:13034c4409db851670bc9acd836243aeee299949bd5673e11844befcb0149f03", + "sha256:13c4ee887eca0f4c5a247b75398d4114c37882658300e153113dafb1d76de529", + "sha256:184a47bbe0aa6400ed2d41d8e9ed868b8205046518c52464fde713ea06e3a74a", + "sha256:18ba8bbede96a2c3dde7b868de9dcbd55670690af0988713f0603f037848418a", + "sha256:1aa846f56c3d49205c952d8318e76ccc2ae23303351d9270ab220004c580cfe2", + "sha256:217658ec7187497e3f3ebd901afdca1af062b42cfe3e0dafea4cced3983739f6", + "sha256:24d4a7de75446be83244eabbff746d66b9240ae020ced65d060815fac3423759", + "sha256:2910f4d36a6a9b4214bb7038d537f015346f413a975d57ca6b43bf23d6563b53", + "sha256:2949cad1c5208b8298d5686d5a85b66aae46d73eec2c3e08c817dd3513e5848a", + "sha256:2a3859cb82dcbda1cfd3e6f71c27081d18aa251d20a17d87d26d4cd216fb0af4", + "sha256:2cafbbb3af0733db200c9b5f798d18953b1a304d3f86a938367de1567f4b5bff", + "sha256:2e0d881ad471768bf6e6c2bf905d183543f10098e3b3640fc029509530091502", + "sha256:30c77c1dc9f253283e34c27935fded5015f7d1abe83bc7821680ac444eaf7793", + "sha256:3487286bc29a5aa4b93a072e9592f22254291ce96a9fbc5251f566b6b7343cdb", + "sha256:372da284cfd642d8e08ef606917846fa2ee350f64994bebfbd3afb0040436905", + "sha256:41179b8a845742d1eb60449bdb2992196e211341818565abded11cfa90efb821", + "sha256:44d654437b8ddd9eee7d1eaee28b7219bec228520ff809af170488fd2fed3e2b", + "sha256:4a7697d8cb0f27399b0e393c0b90f0f1e40c82023ea4d45d22bce7032a5d7b81", + "sha256:51cb9476a3987c8967ebab3f0fe144819781fca264f57f89760037a2ea191cb0", + "sha256:52596d3d0e8bdf3af43db3e9ba8dcdaac724ba7b5ca3f6358529d56f7a166f8b", + "sha256:53194af30d5bad77fcba80e23a1441c71abfb3e01192034f8246e0d8f99528f3", + "sha256:5fec2d43a2cc6965edc0bb9e83e1e4b557f76f843a77a2496cbe719583ce8184", + "sha256:6c90e11318f0d3c436a42409f2749ee1a115cd8b067d7f14c148f1ce5574d701", + "sha256:74d881fc777ebb11c63736622b60cb9e4aee5cace591ce274fb69e582a12a61a", + "sha256:7501140f755b725495941b43347ba8a2777407fc7f250d4f5a7d2a1050ba8e82", + "sha256:796c9c3c79747146ebd278dbe1e5c5c05dd6b10cc3bcb8389dfdf844f3ead638", + "sha256:869a64f53488f40fa5b5b9dcb9e9b2962a66a87dab37790f3fcfb5144b996ef5", + "sha256:8963a499849a1fc54b35b1c9f162f4108017b2e6db2c46c1bed93a72262ed083", + "sha256:8d0a0725ad7c1a0bcd8d1b437e191107d457e2ec1084b9f190630a4fb1af78e6", + "sha256:900fbf7759501bc7807fd6638c947d7a831fc9fdf742dc10f02956ff7220fa90", + "sha256:92b017ce34b68a7d67bd6d117e6d443a9bf63a2ecf8567bb3d8c6c7bc5014465", + "sha256:970284a88b99673ccb2e4e334cfb38a10aab7cd44f7457564d11898a74b62d0a", + "sha256:972c85d205b51e30e59525694670de6a8a89691186012535f9d7dbaa230e42c3", + "sha256:9a1ef3b66e38ef8618ce5fdc7bea3d9f45f3624e2a66295eea5e57966c85909e", + "sha256:af0e781009aaf59e25c5a678122391cb0f345ac0ec272c7961dc5455e1c40066", + "sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf", + "sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b", + "sha256:c0891a6a97b09c1f3e073a890514d5012eb256845c451bd48f7968ef939bf4ae", + "sha256:c2723d347ab06e7ddad1a58b2a821218239249a9e4365eaff6649d31180c1669", + "sha256:d1f8bf7b90ba55699b3a5e44930e93ff0189aa27186e96071fac7dd0d06a1873", + "sha256:d1f9ce122f83b2305592c11d64f181b87153fc2c2bbd3bb4a3dde8303cfb1a6b", + "sha256:d314ed732c25d29775e84a960c3c60808b682c08d86602ec2c3008e1202e3bb6", + "sha256:d636598c8305e1f90b439dbf4f66437de4a5e3c31fdf47ad29542478c8508bbb", + "sha256:deee1077aae10d8fa88cb02c845cfba9b62c55e1183f52f6ae6a2df6a2187160", + "sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c", + "sha256:f030f8873312a16414c0d8e1a1ddff2d3235655a2174e3648b4fa66b3f2f1079", + "sha256:f0b278ce10936db1a37e6954e15a3730bea96a0997c26d7fee88e6c396c2086d", + "sha256:f11642dddbb0253cc8853254301b51390ba0081750a8ac03f20ea8103f0c56b6" ], "index": "pypi", - "version": "==5.3.1" + "version": "==5.5" }, "coveralls": { "hashes": [ @@ -916,19 +960,19 @@ }, "flake8": { "hashes": [ - "sha256:749dbbd6bfd0cf1318af27bf97a14e28e5ff548ef8e5b1566ccfb25a11e7c839", - "sha256:aadae8761ec651813c24be05c6f7b4680857ef6afaae4651a4eccaef97ce6c3b" + "sha256:12d05ab02614b6aee8df7c36b97d1a3b2372761222b19b58621355e82acddcff", + "sha256:78873e372b12b093da7b5e5ed302e8ad9e988b38b063b61ad937f26ca58fc5f0" ], "index": "pypi", - "version": "==3.8.4" + "version": "==3.9.0" }, "flake8-annotations": { "hashes": [ - "sha256:3a377140556aecf11fa9f3bb18c10db01f5ea56dc79a730e2ec9b4f1f49e2055", - "sha256:e17947a48a5b9f632fe0c72682fc797c385e451048e7dfb20139f448a074cb3e" + "sha256:40a4d504cdf64126ea0bdca39edab1608bc6d515e96569b7e7c3c59c84f66c36", + "sha256:eabbfb2dd59ae0e9835f509f930e79cd99fa4ff1026fe6ca073503a57407037c" ], "index": "pypi", - "version": "==2.5.0" + "version": "==2.6.1" }, "flake8-bugbear": { "hashes": [ @@ -940,11 +984,11 @@ }, "flake8-docstrings": { "hashes": [ - "sha256:3d5a31c7ec6b7367ea6506a87ec293b94a0a46c0bce2bb4975b7f1d09b6f3717", - "sha256:a256ba91bc52307bef1de59e2a009c3cf61c3d0952dbe035d6ff7208940c2edc" + "sha256:99cac583d6c7e32dd28bbfbef120a7c0d1b6dde4adb5a9fd441c4227a6534bde", + "sha256:9fe7c6a306064af8e62a055c2f61e9eb1da55f84bb39caef2b84ce53708ac34b" ], "index": "pypi", - "version": "==1.5.0" + "version": "==1.6.0" }, "flake8-import-order": { "hashes": [ @@ -986,16 +1030,18 @@ }, "identify": { "hashes": [ - "sha256:de7129142a5c86d75a52b96f394d94d96d497881d2aaf8eafe320cdbe8ac4bcc", - "sha256:e0dae57c0397629ce13c289f6ddde0204edf518f557bfdb1e56474aa143e77c3" + "sha256:43cb1965e84cdd247e875dec6d13332ef5be355ddc16776396d98089b9053d87", + "sha256:c7c0f590526008911ccc5ceee6ed7b085cbc92f7b6591d0ee5913a130ad64034" ], - "version": "==1.5.14" + "markers": "python_full_version >= '3.6.1'", + "version": "==2.2.2" }, "idna": { "hashes": [ "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.10" }, "mccabe": { @@ -1022,51 +1068,70 @@ }, "pre-commit": { "hashes": [ - "sha256:6c86d977d00ddc8a60d68eec19f51ef212d9462937acf3ea37c7adec32284ac0", - "sha256:ee784c11953e6d8badb97d19bc46b997a3a9eded849881ec587accd8608d74a4" + "sha256:94c82f1bf5899d56edb1d926732f4e75a7df29a0c8c092559c77420c9d62428b", + "sha256:de55c5c72ce80d79106e48beb1b54104d16495ce7f95b0c7b13d4784193a00af" ], "index": "pypi", - "version": "==2.9.3" + "version": "==2.11.1" }, "pycodestyle": { "hashes": [ - "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367", - "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e" + "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068", + "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef" ], - "version": "==2.6.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.7.0" }, "pydocstyle": { "hashes": [ - "sha256:19b86fa8617ed916776a11cd8bc0197e5b9856d5433b777f51a3defe13075325", - "sha256:aca749e190a01726a4fb472dd4ef23b5c9da7b9205c0a7857c06533de13fd678" + "sha256:164befb520d851dbcf0e029681b91f4f599c62c5cd8933fd54b1bfbd50e89e1f", + "sha256:d4449cf16d7e6709f63192146706933c7a334af7c0f083904799ccb851c50f6d" ], - "version": "==5.1.1" + "markers": "python_version >= '3.6'", + "version": "==6.0.0" }, "pyflakes": { "hashes": [ - "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92", - "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8" + "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3", + "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db" ], - "version": "==2.2.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.3.1" }, "pyyaml": { "hashes": [ - "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97", - "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76", - "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2", - "sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e", - "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648", - "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf", - "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f", - "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2", - "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee", - "sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a", - "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d", - "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c", - "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a" + "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf", + "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696", + "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393", + "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77", + "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922", + "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5", + "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8", + "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10", + "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc", + "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018", + "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e", + "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253", + "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347", + "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183", + "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541", + "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb", + "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185", + "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc", + "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db", + "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa", + "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46", + "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122", + "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b", + "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63", + "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df", + "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc", + "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247", + "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6", + "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0" ], "index": "pypi", - "version": "==5.3.1" + "version": "==5.4.1" }, "requests": { "hashes": [ @@ -1081,6 +1146,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "snowballstemmer": { @@ -1095,21 +1161,24 @@ "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.10.2" }, "urllib3": { "hashes": [ - "sha256:1b465e494e3e0d8939b50680403e3aedaa2bc434b7d5af64dfd3c958d7f5ae80", - "sha256:de3eedaad74a2683334e282005cd8d7f22f4d55fa690a2a1020a416cb0a47e73" + "sha256:2f4da4594db7e1e110a944bb1b551fdf4e6c136ad42e4234131391e21eb5b0df", + "sha256:e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937" ], - "version": "==1.26.3" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", + "version": "==1.26.4" }, "virtualenv": { "hashes": [ - "sha256:147b43894e51dd6bba882cf9c282447f780e2251cd35172403745fc381a0a80d", - "sha256:2be72df684b74df0ea47679a7df93fd0e04e72520022c57b479d8f881485dbe3" + "sha256:49ec4eb4c224c6f7dd81bb6d0a28a09ecae5894f4e593c89b0db0885f565a107", + "sha256:83f95875d382c7abafe06bd2a4cdd1b363e1bb77e02f155ebe8ac082a916b37c" ], - "version": "==20.4.2" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==20.4.3" } } } -- cgit v1.2.3 From b71a1e5d595d0775ffc1b4f502b9fc5efc3ca18d Mon Sep 17 00:00:00 2001 From: MarkKoz Date: Tue, 30 Mar 2021 11:32:11 -0700 Subject: HelpChannels: use aware datetimes everywhere MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix issues converting timestamps to datetimes and vice-versa. The main culprit id `datetime.timestamp()`, which always assumes naïve objects are in local time. That behaviour conflicts with discord.py, which returns naïve objects in UTC rather than local time. Switching from `utcfromtimestamp` to `fromtimestamp` was incorrect since the latter also assumes the timestamp is in local time. --- bot/exts/help_channels/_channel.py | 25 ++++++++++++++----------- bot/exts/help_channels/_cog.py | 18 +++++++++++------- bot/exts/help_channels/_message.py | 18 +++++++++--------- 3 files changed, 34 insertions(+), 27 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index b1960531d..719d341bd 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -1,8 +1,10 @@ import logging import typing as t -from datetime import datetime, timedelta +from datetime import timedelta +import arrow import discord +from arrow import Arrow import bot from bot import constants @@ -25,8 +27,8 @@ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[disco yield channel -async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.Tuple[datetime, str]: - """Return the timestamp at which the given help `channel` should be closed along with the reason.""" +async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.Tuple[Arrow, str]: + """Return the time at which the given help `channel` should be closed along with the reason.""" log.trace(f"Getting the closing time for #{channel} ({channel.id}).") is_empty = await _message.is_empty(channel) @@ -49,23 +51,24 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T msg = await _message.get_last_message(channel) if not msg: - # last message can't be retreived, return datetime.min so channel closes right now. + # Last message can't be retrieved, return datetime.min so channel closes right now. log.debug(f"No idle time available; #{channel} ({channel.id}) has no messages, closing now.") - return datetime.min, "deleted" + return Arrow.min, "deleted" # The time at which a channel should be closed. - return msg.created_at + timedelta(minutes=idle_minutes_claimant), "latest_message" + time = Arrow.fromdatetime(msg.created_at) + timedelta(minutes=idle_minutes_claimant) + return time, "latest_message" # Switch to datetime objects so we can use time deltas - claimant_last_message_time = datetime.fromtimestamp(claimant_last_message_time) + claimant_last_message_time = Arrow.utcfromtimestamp(claimant_last_message_time) non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) if non_claimant_last_message_time: - non_claimant_last_message_time = datetime.fromtimestamp(non_claimant_last_message_time) + non_claimant_last_message_time = Arrow.utcfromtimestamp(non_claimant_last_message_time) else: # If it's falsey, then it indicates a non-claimant has yet to reply to this session. # Set to min date time so it isn't considered when calculating the closing time. - non_claimant_last_message_time = datetime.min + non_claimant_last_message_time = Arrow.min # Get the later time at which a channel should be closed non_claimant_last_message_time += timedelta(minutes=constants.HelpChannels.idle_minutes_others) @@ -92,8 +95,8 @@ async def get_in_use_time(channel_id: int) -> t.Optional[timedelta]: claimed_timestamp = await _caches.claim_times.get(channel_id) if claimed_timestamp: - claimed = datetime.fromtimestamp(claimed_timestamp) - return datetime.utcnow() - claimed + claimed = Arrow.utcfromtimestamp(claimed_timestamp) + return arrow.utcnow() - claimed def is_excluded_channel(channel: discord.abc.GuildChannel) -> bool: diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 0e71661ac..832c9cd84 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -2,9 +2,10 @@ import asyncio import logging import random import typing as t -from datetime import datetime, timedelta +from datetime import timedelta from operator import attrgetter +import arrow import discord import discord.abc from discord.ext import commands @@ -72,7 +73,7 @@ class HelpChannels(commands.Cog): self.channel_queue: asyncio.Queue[discord.TextChannel] = None self.name_queue: t.Deque[str] = None - self.last_notification: t.Optional[datetime] = None + self.last_notification: t.Optional[arrow.Arrow] = None # Asyncio stuff self.queue_tasks: t.List[asyncio.Task] = [] @@ -114,9 +115,12 @@ class HelpChannels(commands.Cog): self.bot.stats.incr("help.claimed") - await _caches.claim_times.set(message.channel.id, message.created_at.timestamp()) - await _caches.claimant_last_message_times.set(message.channel.id, message.created_at.timestamp()) - # Reset thie non_claimant cache for this channel to indicate that this session has yet to be answered. + # datetime.timestamp() would assume it's local, despite d.py giving a (naïve) UTC time. + timestamp = arrow.Arrow.fromdatetime(message.created_at).timestamp() + + await _caches.claim_times.set(message.channel.id, timestamp) + await _caches.claimant_last_message_times.set(message.channel.id, timestamp) + # Delete to indicate that the help session has yet to receive an answer. await _caches.non_claimant_last_message_times.delete(message.channel.id) # Not awaited because it may indefinitely hold the lock while waiting for a channel. @@ -298,7 +302,7 @@ class HelpChannels(commands.Cog): # Closing time is in the past. # Add 1 second due to POSIX timestamps being lower resolution than datetime objects. - if closing_time < (datetime.utcnow() + timedelta(seconds=1)): + if closing_time < (arrow.utcnow() + timedelta(seconds=1)): log.info( f"#{channel} ({channel.id}) is idle past {closing_time} " @@ -311,7 +315,7 @@ class HelpChannels(commands.Cog): if has_task: self.scheduler.cancel(channel.id) - delay = (closing_time - datetime.utcnow()).seconds + delay = (closing_time - arrow.utcnow()).seconds log.info( f"#{channel} ({channel.id}) is still active; " f"scheduling it to be moved after {delay} seconds." diff --git a/bot/exts/help_channels/_message.py b/bot/exts/help_channels/_message.py index d60b31dea..afd698ffe 100644 --- a/bot/exts/help_channels/_message.py +++ b/bot/exts/help_channels/_message.py @@ -1,9 +1,10 @@ import logging import textwrap import typing as t -from datetime import datetime +import arrow import discord +from arrow import Arrow import bot from bot import constants @@ -51,13 +52,12 @@ async def update_message_caches(message: discord.Message) -> None: log.trace(f"Checking if #{channel} ({channel.id}) has had a reply.") claimant_id = await _caches.claimants.get(channel.id) - if not claimant_id: # The mapping for this channel doesn't exist, we can't do anything. return - # Use datetime naive time stamp to be consistant with timestamps from discord. - timestamp = message.created_at.timestamp() + # datetime.timestamp() would assume it's local, despite d.py giving a (naïve) UTC time. + timestamp = Arrow.fromdatetime(message.created_at).timestamp() # Overwrite the appropriate last message cache depending on the author of the message if message.author.id == claimant_id: @@ -128,12 +128,12 @@ async def dm_on_open(message: discord.Message) -> None: ) -async def notify(channel: discord.TextChannel, last_notification: t.Optional[datetime]) -> t.Optional[datetime]: +async def notify(channel: discord.TextChannel, last_notification: t.Optional[Arrow]) -> t.Optional[Arrow]: """ Send a message in `channel` notifying about a lack of available help channels. - If a notification was sent, return the `datetime` at which the message was sent. Otherwise, - return None. + If a notification was sent, return the time at which the message was sent. + Otherwise, return None. Configuration: @@ -147,7 +147,7 @@ async def notify(channel: discord.TextChannel, last_notification: t.Optional[dat log.trace("Notifying about lack of channels.") if last_notification: - elapsed = (datetime.utcnow() - last_notification).seconds + elapsed = (arrow.utcnow() - last_notification).seconds minimum_interval = constants.HelpChannels.notify_minutes * 60 should_send = elapsed >= minimum_interval else: @@ -170,7 +170,7 @@ async def notify(channel: discord.TextChannel, last_notification: t.Optional[dat allowed_mentions=discord.AllowedMentions(everyone=False, roles=allowed_roles) ) - return message.created_at + return Arrow.fromdatetime(message.created_at) except Exception: # Handle it here cause this feature isn't critical for the functionality of the system. log.exception("Failed to send notification about lack of dormant channels!") -- cgit v1.2.3 From feb9b22a40e311807eca5be58de04d0d95e85554 Mon Sep 17 00:00:00 2001 From: MarkKoz Date: Tue, 30 Mar 2021 11:47:36 -0700 Subject: HelpChannels: refactor get_closing_time --- bot/exts/help_channels/_caches.py | 5 +-- bot/exts/help_channels/_channel.py | 80 +++++++++++++++++++------------------- bot/exts/help_channels/_cog.py | 1 - 3 files changed, 43 insertions(+), 43 deletions(-) diff --git a/bot/exts/help_channels/_caches.py b/bot/exts/help_channels/_caches.py index 834c5f4c2..e741fd20f 100644 --- a/bot/exts/help_channels/_caches.py +++ b/bot/exts/help_channels/_caches.py @@ -12,9 +12,8 @@ claimants = RedisCache(namespace="HelpChannels.help_channel_claimants") # RedisCache[discord.TextChannel.id, UtcPosixTimestamp] claimant_last_message_times = RedisCache(namespace="HelpChannels.claimant_last_message_times") -# This cache maps a help channel to the timestamp of the last, non-claimant, -# message. This cache being empty for a given help channel indicates the -# question is unanswered. +# This cache maps a help channel to the timestamp of the last non-claimant message. +# This cache being empty for a given help channel indicates the question is unanswered. # RedisCache[discord.TextChannel.id, UtcPosixTimestamp] non_claimant_last_message_times = RedisCache(namespace="HelpChannels.non_claimant_last_message_times") diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 719d341bd..8af059830 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -28,65 +28,67 @@ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[disco async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.Tuple[Arrow, str]: - """Return the time at which the given help `channel` should be closed along with the reason.""" + """ + Return the time at which the given help `channel` should be closed along with the reason. + + `init_done` is True if the cog has finished loading and False otherwise. + + The time is calculated as follows: + + * If `init_done` is True or the cached time for the claimant's last message is unavailable, + add the configured `idle_minutes_claimant` to the time the most recent message was sent. + * If the help session is empty (see `is_empty`), do the above but with `deleted_idle_minutes`. + * If either of the above is attempted but the channel is completely empty, close the channel + immediately. + * Otherwise, retrieve the times of the claimant's and non-claimant's last messages from the + cache. Add the configured `idle_minutes_claimant` and idle_minutes_others`, respectively, and + choose the time which is furthest in the future. + """ log.trace(f"Getting the closing time for #{channel} ({channel.id}).") is_empty = await _message.is_empty(channel) - if is_empty: idle_minutes_claimant = constants.HelpChannels.deleted_idle_minutes else: idle_minutes_claimant = constants.HelpChannels.idle_minutes_claimant - claimant_last_message_time = await _caches.claimant_last_message_times.get(channel.id) - - if ( - is_empty - or not init_done - or claimant_last_message_time is None - ): - # If the current help channel has no messages, the help system cog is starting or - # the claimant cache is empty, use the last message in the channel to determine closing time instead. + claimant_time = await _caches.claimant_last_message_times.get(channel.id) + # The current session lacks messages, the cog is still starting, or the cache is empty. + if is_empty or not init_done or claimant_time is None: msg = await _message.get_last_message(channel) - if not msg: - # Last message can't be retrieved, return datetime.min so channel closes right now. log.debug(f"No idle time available; #{channel} ({channel.id}) has no messages, closing now.") return Arrow.min, "deleted" - # The time at which a channel should be closed. + # Use the greatest offset to avoid the possibility of prematurely closing the channel. time = Arrow.fromdatetime(msg.created_at) + timedelta(minutes=idle_minutes_claimant) return time, "latest_message" - # Switch to datetime objects so we can use time deltas - claimant_last_message_time = Arrow.utcfromtimestamp(claimant_last_message_time) - non_claimant_last_message_time = await _caches.non_claimant_last_message_times.get(channel.id) + claimant_time = Arrow.utcfromtimestamp(claimant_time) + others_time = await _caches.non_claimant_last_message_times.get(channel.id) - if non_claimant_last_message_time: - non_claimant_last_message_time = Arrow.utcfromtimestamp(non_claimant_last_message_time) + if others_time: + others_time = Arrow.utcfromtimestamp(others_time) else: - # If it's falsey, then it indicates a non-claimant has yet to reply to this session. - # Set to min date time so it isn't considered when calculating the closing time. - non_claimant_last_message_time = Arrow.min - - # Get the later time at which a channel should be closed - non_claimant_last_message_time += timedelta(minutes=constants.HelpChannels.idle_minutes_others) - claimant_last_message_time += timedelta(minutes=idle_minutes_claimant) - - # The further away closing time is what we should use. - if claimant_last_message_time >= non_claimant_last_message_time: - log.trace( - f"#{channel} ({channel.id}) should be closed at " - f"{claimant_last_message_time} due to claimant timeout." - ) - return claimant_last_message_time, "claimant_timeout" + # The help session hasn't received any answers (messages from non-claimants) yet. + # Set to min value so it isn't considered when calculating the closing time. + others_time = Arrow.min + + # Offset the cached times by the configured values. + others_time += timedelta(minutes=constants.HelpChannels.idle_minutes_others) + claimant_time += timedelta(minutes=idle_minutes_claimant) + + # Use the time which is the furthest into the future. + if claimant_time >= others_time: + closing_time = claimant_time + reason = "claimant_timeout" else: - log.trace( - f"#{channel} ({channel.id}) should be closed at " - f"{non_claimant_last_message_time} due to others timeout." - ) - return non_claimant_last_message_time, "others_timeout" + closing_time = others_time + reason = "others_timeout" + + log.trace(f"#{channel} ({channel.id}) should be closed at {closing_time} due to {reason}.") + return closing_time, reason async def get_in_use_time(channel_id: int) -> t.Optional[timedelta]: diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 832c9cd84..183ee8a9b 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -303,7 +303,6 @@ class HelpChannels(commands.Cog): # Closing time is in the past. # Add 1 second due to POSIX timestamps being lower resolution than datetime objects. if closing_time < (arrow.utcnow() + timedelta(seconds=1)): - log.info( f"#{channel} ({channel.id}) is idle past {closing_time} " f"and will be made dormant. Reason: {closed_on}" -- cgit v1.2.3 From 96037aa80717420d3a3b077b38e8cff571488ba8 Mon Sep 17 00:00:00 2001 From: Chris Date: Tue, 30 Mar 2021 20:48:37 +0100 Subject: Enumerate all possible values for closed_on in docstring --- bot/exts/help_channels/_cog.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 183ee8a9b..658e50201 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -366,7 +366,10 @@ class HelpChannels(commands.Cog): Remove the cooldown role from the channel claimant if they have no other channels claimed. Cancel the scheduled cooldown role removal task. - `closed_on` is the reason that the channel was closed for. Examples: "cleanup", "command", "claimant_timeout" + `closed_on` is the reason that the channel was closed for. Possible values for this are: + "cleanup", "command", "claimant_timeout", "others_timeout", "deleted". + All values, except for "command", get prefixed with "auto." within `_stats.report_complete_session()` + before being added to the bot's stats. """ claimant_id = await _caches.claimants.get(channel.id) _unclaim_channel = self._unclaim_channel -- cgit v1.2.3 From b030711b69a50c2b2c068865b1987d61f8267f1c Mon Sep 17 00:00:00 2001 From: Den4200 Date: Tue, 30 Mar 2021 15:58:05 -0400 Subject: Remove `dm_log` from channels and webhooks in the config. --- bot/constants.py | 2 -- config-default.yml | 2 -- 2 files changed, 4 deletions(-) diff --git a/bot/constants.py b/bot/constants.py index 467a4a2c4..4040d757e 100644 --- a/bot/constants.py +++ b/bot/constants.py @@ -414,7 +414,6 @@ class Channels(metaclass=YAMLGetter): cooldown: int attachment_log: int - dm_log: int message_log: int mod_log: int user_log: int @@ -466,7 +465,6 @@ class Webhooks(metaclass=YAMLGetter): big_brother: int dev_log: int - dm_log: int duck_pond: int incidents_archive: int reddit: int diff --git a/config-default.yml b/config-default.yml index 4e7060ea4..39b33ca10 100644 --- a/config-default.yml +++ b/config-default.yml @@ -169,7 +169,6 @@ guild: # Logs attachment_log: &ATTACH_LOG 649243850006855680 - dm_log: 653713721625018428 message_log: &MESSAGE_LOG 467752170159079424 mod_log: &MOD_LOG 282638479504965634 user_log: 528976905546760203 @@ -287,7 +286,6 @@ guild: webhooks: big_brother: 569133704568373283 dev_log: 680501655111729222 - dm_log: 654567640664244225 duck_pond: 637821475327311927 incidents_archive: 720671599790915702 python_news: &PYNEWS_WEBHOOK 704381182279942324 -- cgit v1.2.3 From d77af18516a030dd039014e2fcf5bb56e09e8358 Mon Sep 17 00:00:00 2001 From: Den4200 Date: Tue, 30 Mar 2021 16:00:21 -0400 Subject: Replace usage of `textwrap.dedent` with a cleaner method. --- bot/exts/moderation/dm_relay.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/bot/exts/moderation/dm_relay.py b/bot/exts/moderation/dm_relay.py index a03230b3d..68a48c67c 100644 --- a/bot/exts/moderation/dm_relay.py +++ b/bot/exts/moderation/dm_relay.py @@ -1,5 +1,4 @@ import logging -import textwrap import discord from discord.ext.commands import Cog, Context, command, has_any_role @@ -50,10 +49,10 @@ class DMRelay(Cog): await ctx.send(f"{Emojis.cross_mark} No direct message history with {user.mention}.") return - metadata = textwrap.dedent(f"""\ - User: {user} ({user.id}) - Channel ID: {user.dm_channel.id}\n - """) + metadata = ( + f"User: {user} ({user.id})\n" + f"Channel ID: {user.dm_channel.id}\n\n" + ) paste_link = await send_to_paste_service(metadata + output, extension="txt") await ctx.send(paste_link) -- cgit v1.2.3 From 2334c95d8781df829ac2ec1a1c5abb2b4d776586 Mon Sep 17 00:00:00 2001 From: Den4200 Date: Tue, 30 Mar 2021 16:06:23 -0400 Subject: Gracefully handle failure to upload to hastebin in `!dmrelay`. --- bot/exts/moderation/dm_relay.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bot/exts/moderation/dm_relay.py b/bot/exts/moderation/dm_relay.py index 68a48c67c..1d2206e27 100644 --- a/bot/exts/moderation/dm_relay.py +++ b/bot/exts/moderation/dm_relay.py @@ -55,6 +55,11 @@ class DMRelay(Cog): ) paste_link = await send_to_paste_service(metadata + output, extension="txt") + + if paste_link is None: + await ctx.send(f"{Emojis.cross_mark} Failed to upload output to hastebin.") + return + await ctx.send(paste_link) async def cog_check(self, ctx: Context) -> bool: -- cgit v1.2.3 From 2621f2488409ae4a1cc4b036249861e1bd10b1c7 Mon Sep 17 00:00:00 2001 From: Chris Date: Tue, 30 Mar 2021 21:20:01 +0100 Subject: Change to an Enum for possible closing reasons --- bot/exts/help_channels/_channel.py | 22 +++++++++++++++++----- bot/exts/help_channels/_cog.py | 22 ++++++++++++---------- bot/exts/help_channels/_stats.py | 8 +++----- 3 files changed, 32 insertions(+), 20 deletions(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 8af059830..b8db337fc 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -1,6 +1,7 @@ import logging import typing as t from datetime import timedelta +from enum import Enum import arrow import discord @@ -17,6 +18,17 @@ MAX_CHANNELS_PER_CATEGORY = 50 EXCLUDED_CHANNELS = (constants.Channels.cooldown,) +class ClosingReason(Enum): + """All possible closing reasons for help channels.""" + + COMMAND = "command" + LATEST_MESSSAGE = "auto.latest_message" + CLAIMANT_TIMEOUT = "auto.claimant_timeout" + OTHER_TIMEOUT = "auto.other_timeout" + DELETED = "auto.deleted" + CLEANUP = "auto.deleted" + + def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[discord.TextChannel]: """Yield the text channels of the `category` in an unsorted manner.""" log.trace(f"Getting text channels in the category '{category}' ({category.id}).") @@ -27,7 +39,7 @@ def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[disco yield channel -async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.Tuple[Arrow, str]: +async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.Tuple[Arrow, ClosingReason]: """ Return the time at which the given help `channel` should be closed along with the reason. @@ -59,11 +71,11 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T msg = await _message.get_last_message(channel) if not msg: log.debug(f"No idle time available; #{channel} ({channel.id}) has no messages, closing now.") - return Arrow.min, "deleted" + return Arrow.min, ClosingReason.DELETED # Use the greatest offset to avoid the possibility of prematurely closing the channel. time = Arrow.fromdatetime(msg.created_at) + timedelta(minutes=idle_minutes_claimant) - return time, "latest_message" + return time, ClosingReason.LATEST_MESSSAGE claimant_time = Arrow.utcfromtimestamp(claimant_time) others_time = await _caches.non_claimant_last_message_times.get(channel.id) @@ -82,10 +94,10 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T # Use the time which is the furthest into the future. if claimant_time >= others_time: closing_time = claimant_time - reason = "claimant_timeout" + reason = ClosingReason.CLAIMANT_TIMEOUT else: closing_time = others_time - reason = "others_timeout" + reason = ClosingReason.OTHER_TIMEOUT log.trace(f"#{channel} ({channel.id}) should be closed at {closing_time} due to {reason}.") return closing_time, reason diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py index 658e50201..18457f6a5 100644 --- a/bot/exts/help_channels/_cog.py +++ b/bot/exts/help_channels/_cog.py @@ -192,7 +192,7 @@ class HelpChannels(commands.Cog): # Don't use a discord.py check because the check needs to fail silently. if await self.close_check(ctx): log.info(f"Close command invoked by {ctx.author} in #{ctx.channel}.") - await self.unclaim_channel(ctx.channel, closed_on="command") + await self.unclaim_channel(ctx.channel, closed_on=_channel.ClosingReason.COMMAND) async def get_available_candidate(self) -> discord.TextChannel: """ @@ -238,7 +238,7 @@ class HelpChannels(commands.Cog): elif missing < 0: log.trace(f"Moving {abs(missing)} superfluous available channels over to the Dormant category.") for channel in channels[:abs(missing)]: - await self.unclaim_channel(channel, closed_on="cleanup") + await self.unclaim_channel(channel, closed_on=_channel.ClosingReason.CLEANUP) async def init_categories(self) -> None: """Get the help category objects. Remove the cog if retrieval fails.""" @@ -305,7 +305,7 @@ class HelpChannels(commands.Cog): if closing_time < (arrow.utcnow() + timedelta(seconds=1)): log.info( f"#{channel} ({channel.id}) is idle past {closing_time} " - f"and will be made dormant. Reason: {closed_on}" + f"and will be made dormant. Reason: {closed_on.value}" ) await self.unclaim_channel(channel, closed_on=closed_on) @@ -358,7 +358,7 @@ class HelpChannels(commands.Cog): _stats.report_counts() @lock.lock_arg(f"{NAMESPACE}.unclaim", "channel") - async def unclaim_channel(self, channel: discord.TextChannel, *, closed_on: str) -> None: + async def unclaim_channel(self, channel: discord.TextChannel, *, closed_on: _channel.ClosingReason) -> None: """ Unclaim an in-use help `channel` to make it dormant. @@ -366,10 +366,7 @@ class HelpChannels(commands.Cog): Remove the cooldown role from the channel claimant if they have no other channels claimed. Cancel the scheduled cooldown role removal task. - `closed_on` is the reason that the channel was closed for. Possible values for this are: - "cleanup", "command", "claimant_timeout", "others_timeout", "deleted". - All values, except for "command", get prefixed with "auto." within `_stats.report_complete_session()` - before being added to the bot's stats. + `closed_on` is the reason that the channel was closed. See _channel.ClosingReason for possible values. """ claimant_id = await _caches.claimants.get(channel.id) _unclaim_channel = self._unclaim_channel @@ -382,7 +379,12 @@ class HelpChannels(commands.Cog): return await _unclaim_channel(channel, claimant_id, closed_on) - async def _unclaim_channel(self, channel: discord.TextChannel, claimant_id: int, closed_on: str) -> None: + async def _unclaim_channel( + self, + channel: discord.TextChannel, + claimant_id: int, + closed_on: _channel.ClosingReason + ) -> None: """Actual implementation of `unclaim_channel`. See that for full documentation.""" await _caches.claimants.delete(channel.id) @@ -403,7 +405,7 @@ class HelpChannels(commands.Cog): # Cancel the task that makes the channel dormant only if called by the close command. # In other cases, the task is either already done or not-existent. - if closed_on == "command": + if closed_on == _channel.ClosingReason.COMMAND: self.scheduler.cancel(channel.id) async def move_to_in_use(self, channel: discord.TextChannel) -> None: diff --git a/bot/exts/help_channels/_stats.py b/bot/exts/help_channels/_stats.py index 123604945..eb34e75e1 100644 --- a/bot/exts/help_channels/_stats.py +++ b/bot/exts/help_channels/_stats.py @@ -22,15 +22,13 @@ def report_counts() -> None: log.warning(f"Couldn't find category {name!r} to track channel count stats.") -async def report_complete_session(channel_id: int, closed_on: str) -> None: +async def report_complete_session(channel_id: int, closed_on: _channel.ClosingReason) -> None: """ Report stats for a completed help session channel `channel_id`. - Set `is_auto` to True if the channel was automatically closed or False if manually closed. + `closed_on` is the reason why the channel was closed. See `_channel.ClosingReason` for possible reasons. """ - if closed_on != "command": - closed_on = f"auto.{closed_on}" - bot.instance.stats.incr(f"help.dormant_calls.{closed_on}") + bot.instance.stats.incr(f"help.dormant_calls.{closed_on.value}") in_use_time = await _channel.get_in_use_time(channel_id) if in_use_time: -- cgit v1.2.3 From 0b4e48883949213260733c3263b4067531f785ea Mon Sep 17 00:00:00 2001 From: Den4200 Date: Tue, 30 Mar 2021 16:21:16 -0400 Subject: Removed user event announcements from the config. --- bot/constants.py | 1 - config-default.yml | 3 --- 2 files changed, 4 deletions(-) diff --git a/bot/constants.py b/bot/constants.py index 4040d757e..91b60b9a6 100644 --- a/bot/constants.py +++ b/bot/constants.py @@ -402,7 +402,6 @@ class Channels(metaclass=YAMLGetter): python_events: int python_news: int reddit: int - user_event_announcements: int dev_contrib: int dev_core: int diff --git a/config-default.yml b/config-default.yml index 39b33ca10..e3fd98ac0 100644 --- a/config-default.yml +++ b/config-default.yml @@ -150,7 +150,6 @@ guild: python_events: &PYEVENTS_CHANNEL 729674110270963822 python_news: &PYNEWS_CHANNEL 704372456592506880 reddit: &REDDIT_CHANNEL 458224812528238616 - user_event_announcements: &USER_EVENT_A 592000283102674944 # Development dev_contrib: &DEV_CONTRIB 635950537262759947 @@ -322,7 +321,6 @@ filter: - *MOD_LOG - *STAFF_LOUNGE - *TALENT_POOL - - *USER_EVENT_A role_whitelist: - *ADMINS_ROLE @@ -511,7 +509,6 @@ duck_pond: - *PYEVENTS_CHANNEL - *MAILING_LISTS - *REDDIT_CHANNEL - - *USER_EVENT_A - *DUCK_POND - *CHANGE_LOG - *STAFF_ANNOUNCEMENTS -- cgit v1.2.3 From 57fb66d5b01dabee9d57efdeee419263b7ecd983 Mon Sep 17 00:00:00 2001 From: Chris Date: Tue, 30 Mar 2021 21:27:44 +0100 Subject: Fix copy & paste error in closing reason enum --- bot/exts/help_channels/_channel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index b8db337fc..2837bc7c5 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -26,7 +26,7 @@ class ClosingReason(Enum): CLAIMANT_TIMEOUT = "auto.claimant_timeout" OTHER_TIMEOUT = "auto.other_timeout" DELETED = "auto.deleted" - CLEANUP = "auto.deleted" + CLEANUP = "auto.cleanup" def get_category_channels(category: discord.CategoryChannel) -> t.Iterable[discord.TextChannel]: -- cgit v1.2.3 From 74eed3e9d39a203b6abfa2ea45ef00286f3639b1 Mon Sep 17 00:00:00 2001 From: Chris Date: Tue, 30 Mar 2021 22:26:37 +0100 Subject: Use deleted reason if help channel is closed due to being empty --- bot/exts/help_channels/_channel.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bot/exts/help_channels/_channel.py b/bot/exts/help_channels/_channel.py index 2837bc7c5..0846b28c8 100644 --- a/bot/exts/help_channels/_channel.py +++ b/bot/exts/help_channels/_channel.py @@ -75,7 +75,8 @@ async def get_closing_time(channel: discord.TextChannel, init_done: bool) -> t.T # Use the greatest offset to avoid the possibility of prematurely closing the channel. time = Arrow.fromdatetime(msg.created_at) + timedelta(minutes=idle_minutes_claimant) - return time, ClosingReason.LATEST_MESSSAGE + reason = ClosingReason.DELETED if is_empty else ClosingReason.LATEST_MESSSAGE + return time, reason claimant_time = Arrow.utcfromtimestamp(claimant_time) others_time = await _caches.non_claimant_last_message_times.get(channel.id) -- cgit v1.2.3 From a14dd82dd672b123f4cf00b6324be5bb79528cdd Mon Sep 17 00:00:00 2001 From: kwzrd Date: Wed, 31 Mar 2021 19:59:18 +0200 Subject: Branding: target 'main' branch With the branding-side PR merged, we can now target the production branch. --- bot/exts/backend/branding/_repository.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 3a9745ed5..e6c2396b1 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -9,9 +9,9 @@ from bot.constants import Keys from bot.errors import BrandingMisconfiguration # Base URL for requests into the branding repository. -BRANDING_URL = "https://api.github.com/repos/kwzrd/pydis-branding/contents" +BRANDING_URL = "https://api.github.com/repos/python-discord/branding/contents" -PARAMS = {"ref": "kwzrd/events-rework"} # Target branch. +PARAMS = {"ref": "main"} # Target branch. HEADERS = {"Accept": "application/vnd.github.v3+json"} # Ensure we use API v3. # A GitHub token is not necessary. However, unauthorized requests are limited to 60 per hour. -- cgit v1.2.3 From 220590c7bb6593b06d1796f0807568e06fefa99e Mon Sep 17 00:00:00 2001 From: kwzrd Date: Wed, 31 Mar 2021 20:17:25 +0200 Subject: Branding: apply documentation improvements after review No code changes in this commit. Co-authored-by: Shivansh-007 Co-authored-by: Joe Banks --- bot/exts/backend/branding/_cog.py | 14 +++++++------- bot/exts/backend/branding/_repository.py | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py index b07edbffd..0a4ddcc88 100644 --- a/bot/exts/backend/branding/_cog.py +++ b/bot/exts/backend/branding/_cog.py @@ -144,7 +144,7 @@ class Branding(commands.Cog): timeout = 10 # Seconds. try: - with async_timeout.timeout(timeout): + with async_timeout.timeout(timeout): # Raise after `timeout` seconds. await pydis.edit(**{asset_type.value: file}) except discord.HTTPException: log.exception("Asset upload to Discord failed.") @@ -160,7 +160,7 @@ class Branding(commands.Cog): """ Apply `banner` to the guild and cache its hash if successful. - Banners should always be applied via this method in order to ensure that the last hash is cached. + Banners should always be applied via this method to ensure that the last hash is cached. Return a boolean indicating whether the application was successful. """ @@ -217,9 +217,9 @@ class Branding(commands.Cog): """ Call `rotate_icons` if the configured amount of time has passed since last rotation. - We offset the calculated time difference into the future in order to avoid off-by-a-little-bit errors. - Because there is work to be done before the timestamp is read and written, the next read will likely - commence slightly under 24 hours after the last write. + We offset the calculated time difference into the future to avoid off-by-a-little-bit errors. Because there + is work to be done before the timestamp is read and written, the next read will likely commence slightly + under 24 hours after the last write. """ log.debug("Checking whether it's time for icons to rotate.") @@ -298,7 +298,7 @@ class Branding(commands.Cog): We cache `event` information to ensure that we: * Remember which event we're currently in across restarts - * Provide an on-demand information embed without re-querying the branding repository + * Provide an on-demand informational embed without re-querying the branding repository An event change should always be handled via this function, as it ensures that the cache is populated. @@ -487,7 +487,7 @@ class Branding(commands.Cog): log.trace("Daemon before: calculating time to sleep before loop begins.") now = datetime.utcnow() - # The actual midnight moment is offset into the future in order to prevent issues with imprecise sleep. + # The actual midnight moment is offset into the future to prevent issues with imprecise sleep. tomorrow = now + timedelta(days=1) midnight = datetime.combine(tomorrow, time(minute=1)) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index e6c2396b1..740a4a083 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -19,7 +19,7 @@ if Keys.github: HEADERS["Authorization"] = f"token {Keys.github}" # Since event periods are year-agnostic, we parse them into `datetime` objects with a manually inserted year. -# Please note that this is intentionally a leap year in order to allow Feb 29 to be valid. +# Please note that this is intentionally a leap year to allow Feb 29 to be valid. ARBITRARY_YEAR = 2020 # Format used to parse date strings after we inject `ARBITRARY_YEAR` at the end. -- cgit v1.2.3 From b778c25427108f4ffb20328c5977618e3f97c523 Mon Sep 17 00:00:00 2001 From: kwzrd Date: Wed, 31 Mar 2021 20:19:28 +0200 Subject: Branding: log after successful fetch Co-authored-by: Shivansh-007 Co-authored-by: Joe Banks --- bot/exts/backend/branding/_repository.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py index 740a4a083..7b09d4641 100644 --- a/bot/exts/backend/branding/_repository.py +++ b/bot/exts/backend/branding/_repository.py @@ -107,6 +107,8 @@ class BrandingRepository: async with self.bot.http_session.get(full_url, params=PARAMS, headers=HEADERS) as response: if response.status != 200: raise RuntimeError(f"Failed to fetch directory due to status: {response.status}") + + log.debug("Fetch successful, reading JSON response.") json_directory = await response.json() return {file["name"]: RemoteObject(file) for file in json_directory if file["type"] in types} @@ -122,6 +124,8 @@ class BrandingRepository: async with self.bot.http_session.get(download_url, params=PARAMS, headers=HEADERS) as response: if response.status != 200: raise RuntimeError(f"Failed to fetch file due to status: {response.status}") + + log.debug("Fetch successful, reading payload.") return await response.read() def parse_meta_file(self, raw_file: bytes) -> MetaFile: -- cgit v1.2.3 From 5da727234595fe7fb19fc04efa5ba1984328e06a Mon Sep 17 00:00:00 2001 From: Matteo Bertucci Date: Thu, 1 Apr 2021 08:37:22 +0000 Subject: Update policy documents --- CODE_OF_CONDUCT.md | 3 ++ CONTRIBUTING.md | 124 +---------------------------------------------------- SECURITY.md | 3 ++ 3 files changed, 8 insertions(+), 122 deletions(-) create mode 100644 CODE_OF_CONDUCT.md create mode 100644 SECURITY.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..57ccd80e7 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Code of Conduct + +The Python Discord Code of Conduct can be found [on our website](https://pydis.com/coc). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index addab32ff..f20b53162 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,123 +1,3 @@ -# Contributing to one of Our Projects +# Contributing Guidelines -Our projects are open-source and are automatically deployed whenever commits are pushed to the `main` branch on each repository, so we've created a set of guidelines in order to keep everything clean and in working order. - -Note that contributions may be rejected on the basis of a contributor failing to follow these guidelines. - -## Rules - -1. **No force-pushes** or modifying the Git history in any way. -2. If you have direct access to the repository, **create a branch for your changes** and create a pull request for that branch. If not, create a branch on a fork of the repository and create a pull request from there. - * It's common practice for a repository to reject direct pushes to `main`, so make branching a habit! - * If PRing from your own fork, **ensure that "Allow edits from maintainers" is checked**. This gives permission for maintainers to commit changes directly to your fork, speeding up the review process. -3. **Adhere to the prevailing code style**, which we enforce using [`flake8`](http://flake8.pycqa.org/en/latest/index.html) and [`pre-commit`](https://pre-commit.com/). - * Run `flake8` and `pre-commit` against your code [**before** you push it](https://soundcloud.com/lemonsaurusrex/lint-before-you-push). Your commit will be rejected by the build server if it fails to lint. - * [Git Hooks](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) are a powerful git feature for executing custom scripts when certain important git actions occur. The pre-commit hook is the first hook executed during the commit process and can be used to check the code being committed & abort the commit if issues, such as linting failures, are detected. While git hooks can seem daunting to configure, the `pre-commit` framework abstracts this process away from you and is provided as a dev dependency for this project. Run `pipenv run precommit` when setting up the project and you'll never have to worry about committing code that fails linting. -4. **Make great commits**. A well structured git log is key to a project's maintainability; it efficiently provides insight into when and *why* things were done for future maintainers of the project. - * Commits should be as narrow in scope as possible. Commits that span hundreds of lines across multiple unrelated functions and/or files are very hard for maintainers to follow. After about a week they'll probably be hard for you to follow too. - * Avoid making minor commits for fixing typos or linting errors. Since you've already set up a `pre-commit` hook to run the linting pipeline before a commit, you shouldn't be committing linting issues anyway. - * A more in-depth guide to writing great commit messages can be found in Chris Beam's [*How to Write a Git Commit Message*](https://chris.beams.io/posts/git-commit/) -5. **Avoid frequent pushes to the main repository**. This goes for PRs opened against your fork as well. Our test build pipelines are triggered every time a push to the repository (or PR) is made. Try to batch your commits until you've finished working for that session, or you've reached a point where collaborators need your commits to continue their own work. This also provides you the opportunity to amend commits for minor changes rather than having to commit them on their own because you've already pushed. - * This includes merging main into your branch. Try to leave merging from main for after your PR passes review; a maintainer will bring your PR up to date before merging. Exceptions to this include: resolving merge conflicts, needing something that was pushed to main for your branch, or something was pushed to main that could potentionally affect the functionality of what you're writing. -6. **Don't fight the framework**. Every framework has its flaws, but the frameworks we've picked out have been carefully chosen for their particular merits. If you can avoid it, please resist reimplementing swathes of framework logic - the work has already been done for you! -7. If someone is working on an issue or pull request, **do not open your own pull request for the same task**. Instead, collaborate with the author(s) of the existing pull request. Duplicate PRs opened without communicating with the other author(s) and/or PyDis staff will be closed. Communication is key, and there's no point in two separate implementations of the same thing. - * One option is to fork the other contributor's repository and submit your changes to their branch with your own pull request. We suggest following these guidelines when interacting with their repository as well. - * The author(s) of inactive PRs and claimed issues will be be pinged after a week of inactivity for an update. Continued inactivity may result in the issue being released back to the community and/or PR closure. -8. **Work as a team** and collaborate wherever possible. Keep things friendly and help each other out - these are shared projects and nobody likes to have their feet trodden on. -9. All static content, such as images or audio, **must be licensed for open public use**. - * Static content must be hosted by a service designed to do so. Failing to do so is known as "leeching" and is frowned upon, as it generates extra bandwidth costs to the host without providing benefit. It would be best if appropriately licensed content is added to the repository itself so it can be served by PyDis' infrastructure. - -Above all, the needs of our community should come before the wants of an individual. Work together, build solutions to problems and try to do so in a way that people can learn from easily. Abuse of our trust may result in the loss of your Contributor role. - -## Changes to this Arrangement - -All projects evolve over time, and this contribution guide is no different. This document is open to pull requests or changes by contributors. If you believe you have something valuable to add or change, please don't hesitate to do so in a PR. - -## Supplemental Information -### Developer Environment -Instructions for setting the bot developer environment can be found on the [PyDis wiki](https://pythondiscord.com/pages/contributing/bot/) - -To provide a standalone development environment for this project, docker compose is utilized to pull the current version of the [site backend](https://github.com/python-discord/site). While appropriate for bot-only contributions, any contributions that necessitate backend changes will require the site repository to be appropriately configured as well. Instructions for setting up the site environment can be found on the [PyDis site](https://pythondiscord.com/pages/contributing/site/). - -When pulling down changes from GitHub, remember to sync your environment using `pipenv sync --dev` to ensure you're using the most up-to-date versions the project's dependencies. - -### Type Hinting -[PEP 484](https://www.python.org/dev/peps/pep-0484/) formally specifies type hints for Python functions, added to the Python Standard Library in version 3.5. Type hints are recognized by most modern code editing tools and provide useful insight into both the input and output types of a function, preventing the user from having to go through the codebase to determine these types. - -For example: - -```py -import typing as t - - -def foo(input_1: int, input_2: t.Dict[str, str]) -> bool: - ... -``` - -Tells us that `foo` accepts an `int` and a `dict`, with `str` keys and values, and returns a `bool`. - -All function declarations should be type hinted in code contributed to the PyDis organization. - -For more information, see *[PEP 483](https://www.python.org/dev/peps/pep-0483/) - The Theory of Type Hints* and Python's documentation for the [`typing`](https://docs.python.org/3/library/typing.html) module. - -### AutoDoc Formatting Directives -Many documentation packages provide support for automatic documentation generation from the codebase's docstrings. These tools utilize special formatting directives to enable richer formatting in the generated documentation. - -For example: - -```py -import typing as t - - -def foo(bar: int, baz: t.Optional[t.Dict[str, str]] = None) -> bool: - """ - Does some things with some stuff. - - :param bar: Some input - :param baz: Optional, some dictionary with string keys and values - - :return: Some boolean - """ - ... -``` - -Since PyDis does not utilize automatic documentation generation, use of this syntax should not be used in code contributed to the organization. Should the purpose and type of the input variables not be easily discernable from the variable name and type annotation, a prose explanation can be used. Explicit references to variables, functions, classes, etc. should be wrapped with backticks (`` ` ``). - -For example, the above docstring would become: - -```py -import typing as t - - -def foo(bar: int, baz: t.Optional[t.Dict[str, str]] = None) -> bool: - """ - Does some things with some stuff. - - This function takes an index, `bar` and checks for its presence in the database `baz`, passed as a dictionary. Returns `False` if `baz` is not passed. - """ - ... -``` - -### Logging Levels -The project currently defines [`logging`](https://docs.python.org/3/library/logging.html) levels as follows, from lowest to highest severity: -* **TRACE:** These events should be used to provide a *verbose* trace of every step of a complex process. This is essentially the `logging` equivalent of sprinkling `print` statements throughout the code. - * **Note:** This is a PyDis-implemented logging level. -* **DEBUG:** These events should add context to what's happening in a development setup to make it easier to follow what's going while working on a project. This is in the same vein as **TRACE** logging but at a much lower level of verbosity. -* **INFO:** These events are normal and don't need direct attention but are worth keeping track of in production, like checking which cogs were loaded during a start-up. -* **WARNING:** These events are out of the ordinary and should be fixed, but have not caused a failure. - * **NOTE:** Events at this logging level and higher should be reserved for events that require the attention of the DevOps team. -* **ERROR:** These events have caused a failure in a specific part of the application and require urgent attention. -* **CRITICAL:** These events have caused the whole application to fail and require immediate intervention. - -Ensure that log messages are succinct. Should you want to pass additional useful information that would otherwise make the log message overly verbose the `logging` module accepts an `extra` kwarg, which can be used to pass a dictionary. This is used to populate the `__dict__` of the `LogRecord` created for the logging event with user-defined attributes that can be accessed by a log handler. Additional information and caveats may be found [in Python's `logging` documentation](https://docs.python.org/3/library/logging.html#logging.Logger.debug). - -### Work in Progress (WIP) PRs -Github [provides a PR feature](https://github.blog/2019-02-14-introducing-draft-pull-requests/) that allows the PR author to mark it as a WIP. This provides both a visual and functional indicator that the contents of the PR are in a draft state and not yet ready for formal review. - -This feature should be utilized in place of the traditional method of prepending `[WIP]` to the PR title. - -As stated earlier, **ensure that "Allow edits from maintainers" is checked**. This gives permission for maintainers to commit changes directly to your fork, speeding up the review process. - -## Footnotes - -This document was inspired by the [Glowstone contribution guidelines](https://github.com/GlowstoneMC/Glowstone/blob/dev/docs/CONTRIBUTING.md). +The Contributing Guidelines for Python Discord projects can be found [on our website](https://pydis.com/contributing.md). diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..fa5a88a39 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,3 @@ +# Security Notice + +The Security Notice for Python Discord projects can be found [on our website](https://pydis.com/security.md). -- cgit v1.2.3 From 7175b9d587eed22d65ca74fc3f455303daabbbd1 Mon Sep 17 00:00:00 2001 From: Boris Muratov <8bee278@gmail.com> Date: Sun, 4 Apr 2021 08:24:45 +0300 Subject: Blacklist staff_info for duckpond --- config-default.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/config-default.yml b/config-default.yml index 6dec75fdc..c5e830ec4 100644 --- a/config-default.yml +++ b/config-default.yml @@ -200,6 +200,7 @@ guild: nomination_voting: 822853512709931008 organisation: &ORGANISATION 551789653284356126 staff_lounge: &STAFF_LOUNGE 464905259261755392 + staff_info: &STAFF_INFO 396684402404622347 # Staff announcement channels admin_announcements: &ADMIN_ANNOUNCEMENTS 749736155569848370 @@ -520,6 +521,7 @@ duck_pond: - *STAFF_ANNOUNCEMENTS - *MOD_ANNOUNCEMENTS - *ADMIN_ANNOUNCEMENTS + - *STAFF_INFO python_news: -- cgit v1.2.3