aboutsummaryrefslogtreecommitdiffstats
path: root/bot/exts/backend
diff options
context:
space:
mode:
Diffstat (limited to 'bot/exts/backend')
-rw-r--r--bot/exts/backend/branding/__init__.py7
-rw-r--r--bot/exts/backend/branding/_cog.py647
-rw-r--r--bot/exts/backend/branding/_repository.py240
-rw-r--r--bot/exts/backend/error_handler.py63
-rw-r--r--bot/exts/backend/logging.py2
-rw-r--r--bot/exts/backend/sync/_syncers.py10
6 files changed, 957 insertions, 12 deletions
diff --git a/bot/exts/backend/branding/__init__.py b/bot/exts/backend/branding/__init__.py
new file mode 100644
index 000000000..20a747b7f
--- /dev/null
+++ b/bot/exts/backend/branding/__init__.py
@@ -0,0 +1,7 @@
+from bot.bot import Bot
+from bot.exts.backend.branding._cog import Branding
+
+
+def setup(bot: Bot) -> None:
+ """Load Branding cog."""
+ bot.add_cog(Branding(bot))
diff --git a/bot/exts/backend/branding/_cog.py b/bot/exts/backend/branding/_cog.py
new file mode 100644
index 000000000..0a4ddcc88
--- /dev/null
+++ b/bot/exts/backend/branding/_cog.py
@@ -0,0 +1,647 @@
+import asyncio
+import contextlib
+import logging
+import random
+import typing as t
+from datetime import datetime, time, timedelta
+from enum import Enum
+from operator import attrgetter
+
+import async_timeout
+import discord
+from async_rediscache import RedisCache
+from discord.ext import commands, tasks
+
+from bot.bot import Bot
+from bot.constants import Branding as BrandingConfig, Channels, Colours, Guild, MODERATION_ROLES
+from bot.decorators import mock_in_debug
+from bot.exts.backend.branding._repository import BrandingRepository, Event, RemoteObject
+
+log = logging.getLogger(__name__)
+
+
+class AssetType(Enum):
+ """
+ Recognised Discord guild asset types.
+
+ The value of each member corresponds exactly to a kwarg that can be passed to `Guild.edit`.
+ """
+
+ BANNER = "banner"
+ ICON = "icon"
+
+
+def compound_hash(objects: t.Iterable[RemoteObject]) -> str:
+ """
+ Join SHA attributes of `objects` into a single string.
+
+ Compound hashes are cached to check for change in any of the member `objects`.
+ """
+ return "-".join(item.sha for item in objects)
+
+
+def make_embed(title: str, description: str, *, success: bool) -> discord.Embed:
+ """
+ Construct simple response embed.
+
+ If `success` is True, use green colour, otherwise red.
+
+ For both `title` and `description`, empty string are valid values ~ fields will be empty.
+ """
+ colour = Colours.soft_green if success else Colours.soft_red
+ return discord.Embed(title=title[:256], description=description[:2048], colour=colour)
+
+
+def extract_event_duration(event: Event) -> str:
+ """
+ Extract a human-readable, year-agnostic duration string from `event`.
+
+ In the case that `event` is a fallback event, resolves to 'Fallback'.
+ """
+ if event.meta.is_fallback:
+ return "Fallback"
+
+ fmt = "%B %d" # Ex: August 23
+ start_date = event.meta.start_date.strftime(fmt)
+ end_date = event.meta.end_date.strftime(fmt)
+
+ return f"{start_date} - {end_date}"
+
+
+def extract_event_name(event: Event) -> str:
+ """
+ Extract title-cased event name from the path of `event`.
+
+ An event with a path of 'events/black_history_month' will resolve to 'Black History Month'.
+ """
+ name = event.path.split("/")[-1] # Inner-most directory name.
+ words = name.split("_") # Words from snake case.
+
+ return " ".join(word.title() for word in words)
+
+
+class Branding(commands.Cog):
+ """
+ Guild branding management.
+
+ Extension responsible for automatic synchronisation of the guild's branding with the branding repository.
+ Event definitions and assets are automatically discovered and applied as appropriate.
+
+ All state is stored in Redis. The cog should therefore seamlessly transition across restarts and maintain
+ a consistent icon rotation schedule for events with multiple icon assets.
+
+ By caching hashes of banner & icon assets, we discover changes in currently applied assets and always keep
+ the latest version applied.
+
+ The command interface allows moderators+ to control the daemon or request asset synchronisation, while
+ regular users can see information about the current event and the overall event schedule.
+ """
+
+ # RedisCache[
+ # "daemon_active": bool | If True, daemon starts on start-up. Controlled via commands.
+ # "event_path": str | Current event's path in the branding repo.
+ # "event_description": str | Current event's Markdown description.
+ # "event_duration": str | Current event's human-readable date range.
+ # "banner_hash": str | SHA of the currently applied banner.
+ # "icons_hash": str | Compound SHA of all icons in current rotation.
+ # "last_rotation_timestamp": float | POSIX UTC timestamp.
+ # ]
+ cache_information = RedisCache()
+
+ # Icons in current rotation. Keys (str) are download URLs, values (int) track the amount of times each
+ # icon has been used in the current rotation.
+ cache_icons = RedisCache()
+
+ # All available event names & durations. Cached by the daemon nightly; read by the calendar command.
+ cache_events = RedisCache()
+
+ def __init__(self, bot: Bot) -> None:
+ """Instantiate repository abstraction & allow daemon to start."""
+ self.bot = bot
+ self.repository = BrandingRepository(bot)
+
+ self.bot.loop.create_task(self.maybe_start_daemon()) # Start depending on cache.
+
+ # region: Internal logic & state management
+
+ @mock_in_debug(return_value=True) # Mocked in development environment to prevent API spam.
+ async def apply_asset(self, asset_type: AssetType, download_url: str) -> bool:
+ """
+ Download asset from `download_url` and apply it to PyDis as `asset_type`.
+
+ Return a boolean indicating whether the application was successful.
+ """
+ log.info(f"Applying '{asset_type.value}' asset to the guild.")
+
+ try:
+ file = await self.repository.fetch_file(download_url)
+ except Exception:
+ log.exception(f"Failed to fetch '{asset_type.value}' asset.")
+ return False
+
+ await self.bot.wait_until_guild_available()
+ pydis: discord.Guild = self.bot.get_guild(Guild.id)
+
+ timeout = 10 # Seconds.
+ try:
+ with async_timeout.timeout(timeout): # Raise after `timeout` seconds.
+ await pydis.edit(**{asset_type.value: file})
+ except discord.HTTPException:
+ log.exception("Asset upload to Discord failed.")
+ return False
+ except asyncio.TimeoutError:
+ log.error(f"Asset upload to Discord timed out after {timeout} seconds.")
+ return False
+ else:
+ log.trace("Asset uploaded successfully.")
+ return True
+
+ async def apply_banner(self, banner: RemoteObject) -> bool:
+ """
+ Apply `banner` to the guild and cache its hash if successful.
+
+ Banners should always be applied via this method to ensure that the last hash is cached.
+
+ Return a boolean indicating whether the application was successful.
+ """
+ success = await self.apply_asset(AssetType.BANNER, banner.download_url)
+
+ if success:
+ await self.cache_information.set("banner_hash", banner.sha)
+
+ return success
+
+ async def rotate_icons(self) -> bool:
+ """
+ Choose and apply the next-up icon in rotation.
+
+ We keep track of the amount of times each icon has been used. The values in `cache_icons` can be understood
+ to be iteration IDs. When an icon is chosen & applied, we bump its count, pushing it into the next iteration.
+
+ Once the current iteration (lowest count in the cache) depletes, we move onto the next iteration.
+
+ In the case that there is only 1 icon in the rotation and has already been applied, do nothing.
+
+ Return a boolean indicating whether a new icon was applied successfully.
+ """
+ log.debug("Rotating icons.")
+
+ state = await self.cache_icons.to_dict()
+ log.trace(f"Total icons in rotation: {len(state)}.")
+
+ if not state: # This would only happen if rotation not initiated, but we can handle gracefully.
+ log.warning("Attempted icon rotation with an empty icon cache. This indicates wrong logic.")
+ return False
+
+ if len(state) == 1 and 1 in state.values():
+ log.debug("Aborting icon rotation: only 1 icon is available and has already been applied.")
+ return False
+
+ current_iteration = min(state.values()) # Choose iteration to draw from.
+ options = [download_url for download_url, times_used in state.items() if times_used == current_iteration]
+
+ log.trace(f"Choosing from {len(options)} icons in iteration {current_iteration}.")
+ next_icon = random.choice(options)
+
+ success = await self.apply_asset(AssetType.ICON, next_icon)
+
+ if success:
+ await self.cache_icons.increment(next_icon) # Push the icon into the next iteration.
+
+ timestamp = datetime.utcnow().timestamp()
+ await self.cache_information.set("last_rotation_timestamp", timestamp)
+
+ return success
+
+ async def maybe_rotate_icons(self) -> None:
+ """
+ Call `rotate_icons` if the configured amount of time has passed since last rotation.
+
+ We offset the calculated time difference into the future to avoid off-by-a-little-bit errors. Because there
+ is work to be done before the timestamp is read and written, the next read will likely commence slightly
+ under 24 hours after the last write.
+ """
+ log.debug("Checking whether it's time for icons to rotate.")
+
+ last_rotation_timestamp = await self.cache_information.get("last_rotation_timestamp")
+
+ if last_rotation_timestamp is None: # Maiden case ~ never rotated.
+ await self.rotate_icons()
+ return
+
+ last_rotation = datetime.fromtimestamp(last_rotation_timestamp)
+ difference = (datetime.utcnow() - last_rotation) + timedelta(minutes=5)
+
+ log.trace(f"Icons last rotated at {last_rotation} (difference: {difference}).")
+
+ if difference.days >= BrandingConfig.cycle_frequency:
+ await self.rotate_icons()
+
+ async def initiate_icon_rotation(self, available_icons: t.List[RemoteObject]) -> None:
+ """
+ Set up a new icon rotation.
+
+ This function should be called whenever available icons change. This is generally the case when we enter
+ a new event, but potentially also when the assets of an on-going event change. In such cases, a reset
+ of `cache_icons` is necessary, because it contains download URLs which may have gotten stale.
+
+ This function does not upload a new icon!
+ """
+ log.debug("Initiating new icon rotation.")
+
+ await self.cache_icons.clear()
+
+ new_state = {icon.download_url: 0 for icon in available_icons}
+ await self.cache_icons.update(new_state)
+
+ log.trace(f"Icon rotation initiated for {len(new_state)} icons.")
+
+ await self.cache_information.set("icons_hash", compound_hash(available_icons))
+
+ async def send_info_embed(self, channel_id: int, *, is_notification: bool) -> None:
+ """
+ Send the currently cached event description to `channel_id`.
+
+ When `is_notification` holds, a short contextual message for the #changelog channel is added.
+
+ We read event information from `cache_information`. The caller is therefore responsible for making
+ sure that the cache is up-to-date before calling this function.
+ """
+ log.debug(f"Sending event information event to channel: {channel_id} ({is_notification=}).")
+
+ await self.bot.wait_until_guild_available()
+ channel: t.Optional[discord.TextChannel] = self.bot.get_channel(channel_id)
+
+ if channel is None:
+ log.warning(f"Cannot send event information: channel {channel_id} not found!")
+ return
+
+ log.trace(f"Destination channel: #{channel.name}.")
+
+ description = await self.cache_information.get("event_description")
+ duration = await self.cache_information.get("event_duration")
+
+ if None in (description, duration):
+ content = None
+ embed = make_embed("No event in cache", "Is the daemon enabled?", success=False)
+
+ else:
+ content = "Python Discord is entering a new event!" if is_notification else None
+ embed = discord.Embed(description=description[:2048], colour=discord.Colour.blurple())
+ embed.set_footer(text=duration[:2048])
+
+ await channel.send(content=content, embed=embed)
+
+ async def enter_event(self, event: Event) -> t.Tuple[bool, bool]:
+ """
+ Apply `event` assets and update information cache.
+
+ We cache `event` information to ensure that we:
+ * Remember which event we're currently in across restarts
+ * Provide an on-demand informational embed without re-querying the branding repository
+
+ An event change should always be handled via this function, as it ensures that the cache is populated.
+
+ The #changelog notification is omitted when `event` is fallback, or already applied.
+
+ Return a 2-tuple indicating whether the banner, and the icon, were applied successfully.
+ """
+ log.info(f"Entering event: '{event.path}'.")
+
+ banner_success = await self.apply_banner(event.banner) # Only one asset ~ apply directly.
+
+ await self.initiate_icon_rotation(event.icons) # Prepare a new rotation.
+ icon_success = await self.rotate_icons() # Apply an icon from the new rotation.
+
+ # This will only be False in the case of a manual same-event re-synchronisation.
+ event_changed = event.path != await self.cache_information.get("event_path")
+
+ # Cache event identity to avoid re-entry in case of restart.
+ await self.cache_information.set("event_path", event.path)
+
+ # Cache information shown in the 'about' embed.
+ await self.populate_cache_event_description(event)
+
+ # Notify guild of new event ~ this reads the information that we cached above.
+ if event_changed and not event.meta.is_fallback:
+ await self.send_info_embed(Channels.change_log, is_notification=True)
+ else:
+ log.trace("Omitting #changelog notification. Event has not changed, or new event is fallback.")
+
+ return banner_success, icon_success
+
+ async def synchronise(self) -> t.Tuple[bool, bool]:
+ """
+ Fetch the current event and delegate to `enter_event`.
+
+ This is a convenience function to force synchronisation via a command. It should generally only be used
+ in a recovery scenario. In the usual case, the daemon already has an `Event` instance and can pass it
+ to `enter_event` directly.
+
+ Return a 2-tuple indicating whether the banner, and the icon, were applied successfully.
+ """
+ log.debug("Synchronise: fetching current event.")
+
+ current_event, available_events = await self.repository.get_current_event()
+
+ await self.populate_cache_events(available_events)
+
+ if current_event is None:
+ log.error("Failed to fetch event. Cannot synchronise!")
+ return False, False
+
+ return await self.enter_event(current_event)
+
+ async def populate_cache_events(self, events: t.List[Event]) -> None:
+ """
+ Clear `cache_events` and re-populate with names and durations of `events`.
+
+ For each event, we store its name and duration string. This is the information presented to users in the
+ calendar command. If a format change is needed, it has to be done here.
+
+ The cache does not store the fallback event, as it is not shown in the calendar.
+ """
+ log.debug("Populating events cache.")
+
+ await self.cache_events.clear()
+
+ no_fallback = [event for event in events if not event.meta.is_fallback]
+ chronological_events = sorted(no_fallback, key=attrgetter("meta.start_date"))
+
+ log.trace(f"Writing {len(chronological_events)} events (fallback omitted).")
+
+ with contextlib.suppress(ValueError): # Cache raises when updated with an empty dict.
+ await self.cache_events.update({
+ extract_event_name(event): extract_event_duration(event)
+ for event in chronological_events
+ })
+
+ async def populate_cache_event_description(self, event: Event) -> None:
+ """
+ Cache `event` description & duration.
+
+ This should be called when entering a new event, and can be called periodically to ensure that the cache
+ holds fresh information in the case that the event remains the same, but its description changes.
+
+ The duration is stored formatted for the frontend. It is not intended to be used programmatically.
+ """
+ log.debug("Caching event description & duration.")
+
+ await self.cache_information.set("event_description", event.meta.description)
+ await self.cache_information.set("event_duration", extract_event_duration(event))
+
+ # endregion
+ # region: Daemon
+
+ async def maybe_start_daemon(self) -> None:
+ """
+ Start the daemon depending on cache state.
+
+ The daemon will only start if it has been explicitly enabled via a command.
+ """
+ log.debug("Checking whether daemon should start.")
+
+ should_begin: t.Optional[bool] = await self.cache_information.get("daemon_active") # None if never set!
+
+ if should_begin:
+ self.daemon_loop.start()
+
+ def cog_unload(self) -> None:
+ """
+ Cancel the daemon in case of cog unload.
+
+ This is **not** done automatically! The daemon otherwise remains active in the background.
+ """
+ log.debug("Cog unload: cancelling daemon.")
+
+ self.daemon_loop.cancel()
+
+ async def daemon_main(self) -> None:
+ """
+ Synchronise guild & caches with branding repository.
+
+ Pull the currently active event from the branding repository and check whether it matches the currently
+ active event in the cache. If not, apply the new event.
+
+ However, it is also possible that an event's assets change as it's active. To account for such cases,
+ we check the banner & icons hashes against the currently cached values. If there is a mismatch, each
+ specific asset is re-applied.
+ """
+ log.info("Daemon main: checking current event.")
+
+ new_event, available_events = await self.repository.get_current_event()
+
+ await self.populate_cache_events(available_events)
+
+ if new_event is None:
+ log.warning("Daemon main: failed to get current event from branding repository, will do nothing.")
+ return
+
+ if new_event.path != await self.cache_information.get("event_path"):
+ log.debug("Daemon main: new event detected!")
+ await self.enter_event(new_event)
+ return
+
+ await self.populate_cache_event_description(new_event) # Cache fresh frontend info in case of change.
+
+ log.trace("Daemon main: event has not changed, checking for change in assets.")
+
+ if new_event.banner.sha != await self.cache_information.get("banner_hash"):
+ log.debug("Daemon main: detected banner change.")
+ await self.apply_banner(new_event.banner)
+
+ if compound_hash(new_event.icons) != await self.cache_information.get("icons_hash"):
+ log.debug("Daemon main: detected icon change.")
+ await self.initiate_icon_rotation(new_event.icons)
+ await self.rotate_icons()
+ else:
+ await self.maybe_rotate_icons()
+
+ @tasks.loop(hours=24)
+ async def daemon_loop(self) -> None:
+ """
+ Call `daemon_main` every 24 hours.
+
+ The scheduler maintains an exact 24-hour frequency even if this coroutine takes time to complete. If the
+ coroutine is started at 00:01 and completes at 00:05, it will still be started at 00:01 the next day.
+ """
+ log.trace("Daemon loop: calling daemon main.")
+
+ try:
+ await self.daemon_main()
+ except Exception:
+ log.exception("Daemon loop: failed with an unhandled exception!")
+
+ @daemon_loop.before_loop
+ async def daemon_before(self) -> None:
+ """
+ Call `daemon_loop` immediately, then block the loop until the next-up UTC midnight.
+
+ The first iteration is invoked directly such that synchronisation happens immediately after daemon start.
+ We then calculate the time until the next-up midnight and sleep before letting `daemon_loop` begin.
+ """
+ log.trace("Daemon before: performing start-up iteration.")
+
+ await self.daemon_loop()
+
+ log.trace("Daemon before: calculating time to sleep before loop begins.")
+ now = datetime.utcnow()
+
+ # The actual midnight moment is offset into the future to prevent issues with imprecise sleep.
+ tomorrow = now + timedelta(days=1)
+ midnight = datetime.combine(tomorrow, time(minute=1))
+
+ sleep_secs = (midnight - now).total_seconds()
+ log.trace(f"Daemon before: sleeping {sleep_secs} seconds before next-up midnight: {midnight}.")
+
+ await asyncio.sleep(sleep_secs)
+
+ # endregion
+ # region: Command interface (branding)
+
+ @commands.group(name="branding")
+ async def branding_group(self, ctx: commands.Context) -> None:
+ """Control the branding cog."""
+ if not ctx.invoked_subcommand:
+ await ctx.send_help(ctx.command)
+
+ @branding_group.command(name="about", aliases=("current", "event"))
+ async def branding_about_cmd(self, ctx: commands.Context) -> None:
+ """Show the current event's description and duration."""
+ await self.send_info_embed(ctx.channel.id, is_notification=False)
+
+ @commands.has_any_role(*MODERATION_ROLES)
+ @branding_group.command(name="sync")
+ async def branding_sync_cmd(self, ctx: commands.Context) -> None:
+ """
+ Force branding synchronisation.
+
+ Show which assets have failed to synchronise, if any.
+ """
+ async with ctx.typing():
+ banner_success, icon_success = await self.synchronise()
+
+ failed_assets = ", ".join(
+ name
+ for name, status in [("banner", banner_success), ("icon", icon_success)]
+ if status is False
+ )
+
+ if failed_assets:
+ resp = make_embed("Synchronisation unsuccessful", f"Failed to apply: {failed_assets}.", success=False)
+ resp.set_footer(text="Check log for details.")
+ else:
+ resp = make_embed("Synchronisation successful", "Assets have been applied.", success=True)
+
+ await ctx.send(embed=resp)
+
+ # endregion
+ # region: Command interface (branding calendar)
+
+ @branding_group.group(name="calendar", aliases=("schedule", "events"))
+ async def branding_calendar_group(self, ctx: commands.Context) -> None:
+ """
+ Show the current event calendar.
+
+ We draw event information from `cache_events` and use each key-value pair to create a field in the response
+ embed. As such, we do not need to query the API to get event information. The cache is automatically
+ re-populated by the daemon whenever it makes a request. A moderator+ can also explicitly request a cache
+ refresh using the 'refresh' subcommand.
+
+ Due to Discord limitations, we only show up to 25 events. This is entirely sufficient at the time of writing.
+ In the case that we find ourselves with more than 25 events, a warning log will alert core devs.
+
+ In the future, we may be interested in a field-paginating solution.
+ """
+ if ctx.invoked_subcommand:
+ # If you're wondering why this works: when the 'refresh' subcommand eventually re-invokes
+ # this group, the attribute will be automatically set to None by the framework.
+ return
+
+ available_events = await self.cache_events.to_dict()
+ log.trace(f"Found {len(available_events)} cached events available for calendar view.")
+
+ if not available_events:
+ resp = make_embed("No events found!", "Cache may be empty, try `branding calendar refresh`.", success=False)
+ await ctx.send(embed=resp)
+ return
+
+ embed = discord.Embed(title="Current event calendar", colour=discord.Colour.blurple())
+
+ # Because Discord embeds can only contain up to 25 fields, we only show the first 25.
+ first_25 = list(available_events.items())[:25]
+
+ if len(first_25) != len(available_events): # Alert core devs that a paginating solution is now necessary.
+ log.warning(f"There are {len(available_events)} events, but the calendar view can only display 25.")
+
+ for name, duration in first_25:
+ embed.add_field(name=name[:256], value=duration[:1024])
+
+ embed.set_footer(text="Otherwise, the fallback season is used.")
+
+ await ctx.send(embed=embed)
+
+ @commands.has_any_role(*MODERATION_ROLES)
+ @branding_calendar_group.command(name="refresh")
+ async def branding_calendar_refresh_cmd(self, ctx: commands.Context) -> None:
+ """
+ Refresh event cache and show current event calendar.
+
+ Supplementary subcommand allowing force-refreshing the event cache. Implemented as a subcommand because
+ unlike the supergroup, it requires moderator privileges.
+ """
+ log.info("Performing command-requested event cache refresh.")
+
+ async with ctx.typing():
+ available_events = await self.repository.get_events()
+ await self.populate_cache_events(available_events)
+
+ await ctx.invoke(self.branding_calendar_group)
+
+ # endregion
+ # region: Command interface (branding daemon)
+
+ @commands.has_any_role(*MODERATION_ROLES)
+ @branding_group.group(name="daemon", aliases=("d",))
+ async def branding_daemon_group(self, ctx: commands.Context) -> None:
+ """Control the branding cog's daemon."""
+ if not ctx.invoked_subcommand:
+ await ctx.send_help(ctx.command)
+
+ @branding_daemon_group.command(name="enable", aliases=("start", "on"))
+ async def branding_daemon_enable_cmd(self, ctx: commands.Context) -> None:
+ """Enable the branding daemon."""
+ await self.cache_information.set("daemon_active", True)
+
+ if self.daemon_loop.is_running():
+ resp = make_embed("Daemon is already enabled!", "", success=False)
+ else:
+ self.daemon_loop.start()
+ resp = make_embed("Daemon enabled!", "It will now automatically awaken on start-up.", success=True)
+
+ await ctx.send(embed=resp)
+
+ @branding_daemon_group.command(name="disable", aliases=("stop", "off"))
+ async def branding_daemon_disable_cmd(self, ctx: commands.Context) -> None:
+ """Disable the branding daemon."""
+ await self.cache_information.set("daemon_active", False)
+
+ if self.daemon_loop.is_running():
+ self.daemon_loop.cancel()
+ resp = make_embed("Daemon disabled!", "It will not awaken on start-up.", success=True)
+ else:
+ resp = make_embed("Daemon is already disabled!", "", success=False)
+
+ await ctx.send(embed=resp)
+
+ @branding_daemon_group.command(name="status")
+ async def branding_daemon_status_cmd(self, ctx: commands.Context) -> None:
+ """Check whether the daemon is currently enabled."""
+ if self.daemon_loop.is_running():
+ resp = make_embed("Daemon is enabled", "Use `branding daemon disable` to stop.", success=True)
+ else:
+ resp = make_embed("Daemon is disabled", "Use `branding daemon enable` to start.", success=False)
+
+ await ctx.send(embed=resp)
+
+ # endregion
diff --git a/bot/exts/backend/branding/_repository.py b/bot/exts/backend/branding/_repository.py
new file mode 100644
index 000000000..7b09d4641
--- /dev/null
+++ b/bot/exts/backend/branding/_repository.py
@@ -0,0 +1,240 @@
+import logging
+import typing as t
+from datetime import date, datetime
+
+import frontmatter
+
+from bot.bot import Bot
+from bot.constants import Keys
+from bot.errors import BrandingMisconfiguration
+
+# Base URL for requests into the branding repository.
+BRANDING_URL = "https://api.github.com/repos/python-discord/branding/contents"
+
+PARAMS = {"ref": "main"} # Target branch.
+HEADERS = {"Accept": "application/vnd.github.v3+json"} # Ensure we use API v3.
+
+# A GitHub token is not necessary. However, unauthorized requests are limited to 60 per hour.
+if Keys.github:
+ HEADERS["Authorization"] = f"token {Keys.github}"
+
+# Since event periods are year-agnostic, we parse them into `datetime` objects with a manually inserted year.
+# Please note that this is intentionally a leap year to allow Feb 29 to be valid.
+ARBITRARY_YEAR = 2020
+
+# Format used to parse date strings after we inject `ARBITRARY_YEAR` at the end.
+DATE_FMT = "%B %d %Y" # Ex: July 10 2020
+
+log = logging.getLogger(__name__)
+
+
+class RemoteObject:
+ """
+ Remote file or directory on GitHub.
+
+ The annotations match keys in the response JSON that we're interested in.
+ """
+
+ sha: str # Hash helps us detect asset change.
+ name: str # Filename.
+ path: str # Path from repo root.
+ type: str # Either 'file' or 'dir'.
+ download_url: t.Optional[str] # If type is 'dir', this is None!
+
+ def __init__(self, dictionary: t.Dict[str, t.Any]) -> None:
+ """Initialize by grabbing annotated attributes from `dictionary`."""
+ missing_keys = self.__annotations__.keys() - dictionary.keys()
+ if missing_keys:
+ raise KeyError(f"Fetched object lacks expected keys: {missing_keys}")
+ for annotation in self.__annotations__:
+ setattr(self, annotation, dictionary[annotation])
+
+
+class MetaFile(t.NamedTuple):
+ """Attributes defined in a 'meta.md' file."""
+
+ is_fallback: bool
+ start_date: t.Optional[date]
+ end_date: t.Optional[date]
+ description: str # Markdown event description.
+
+
+class Event(t.NamedTuple):
+ """Event defined in the branding repository."""
+
+ path: str # Path from repo root where event lives. This is the event's identity.
+ meta: MetaFile
+ banner: RemoteObject
+ icons: t.List[RemoteObject]
+
+ def __str__(self) -> str:
+ return f"<Event at '{self.path}'>"
+
+
+class BrandingRepository:
+ """
+ Branding repository abstraction.
+
+ This class represents the branding repository's main branch and exposes available events and assets
+ as objects. It performs the necessary amount of validation to ensure that a misconfigured event
+ isn't returned. Such events are simply ignored, and will be substituted with the fallback event,
+ if available. Warning logs will inform core developers if a misconfigured event is encountered.
+
+ Colliding events cause no special behaviour. In such cases, the first found active event is returned.
+ We work with the assumption that the branding repository checks for such conflicts and prevents them
+ from reaching the main branch.
+
+ This class keeps no internal state. All `get_current_event` calls will result in GitHub API requests.
+ The caller is therefore responsible for being responsible and caching information to prevent API abuse.
+
+ Requests are made using the HTTP session looked up on the bot instance.
+ """
+
+ def __init__(self, bot: Bot) -> None:
+ self.bot = bot
+
+ async def fetch_directory(self, path: str, types: t.Container[str] = ("file", "dir")) -> t.Dict[str, RemoteObject]:
+ """
+ Fetch directory found at `path` in the branding repository.
+
+ Raise an exception if the request fails, or if the response lacks the expected keys.
+
+ Passing custom `types` allows getting only files or directories. By default, both are included.
+ """
+ full_url = f"{BRANDING_URL}/{path}"
+ log.debug(f"Fetching directory from branding repository: '{full_url}'.")
+
+ async with self.bot.http_session.get(full_url, params=PARAMS, headers=HEADERS) as response:
+ if response.status != 200:
+ raise RuntimeError(f"Failed to fetch directory due to status: {response.status}")
+
+ log.debug("Fetch successful, reading JSON response.")
+ json_directory = await response.json()
+
+ return {file["name"]: RemoteObject(file) for file in json_directory if file["type"] in types}
+
+ async def fetch_file(self, download_url: str) -> bytes:
+ """
+ Fetch file as bytes from `download_url`.
+
+ Raise an exception if the request does not succeed.
+ """
+ log.debug(f"Fetching file from branding repository: '{download_url}'.")
+
+ async with self.bot.http_session.get(download_url, params=PARAMS, headers=HEADERS) as response:
+ if response.status != 200:
+ raise RuntimeError(f"Failed to fetch file due to status: {response.status}")
+
+ log.debug("Fetch successful, reading payload.")
+ return await response.read()
+
+ def parse_meta_file(self, raw_file: bytes) -> MetaFile:
+ """
+ Parse a 'meta.md' file from raw bytes.
+
+ The caller is responsible for handling errors caused by misconfiguration.
+ """
+ attrs, description = frontmatter.parse(raw_file, encoding="UTF-8")
+
+ if not description:
+ raise BrandingMisconfiguration("No description found in 'meta.md'!")
+
+ if attrs.get("fallback", False):
+ return MetaFile(is_fallback=True, start_date=None, end_date=None, description=description)
+
+ start_date_raw = attrs.get("start_date")
+ end_date_raw = attrs.get("end_date")
+
+ if None in (start_date_raw, end_date_raw):
+ raise BrandingMisconfiguration("Non-fallback event doesn't have start and end dates defined!")
+
+ # We extend the configured month & day with an arbitrary leap year, allowing a datetime object to exist.
+ # This may raise errors if misconfigured. We let the caller handle such cases.
+ start_date = datetime.strptime(f"{start_date_raw} {ARBITRARY_YEAR}", DATE_FMT).date()
+ end_date = datetime.strptime(f"{end_date_raw} {ARBITRARY_YEAR}", DATE_FMT).date()
+
+ return MetaFile(is_fallback=False, start_date=start_date, end_date=end_date, description=description)
+
+ async def construct_event(self, directory: RemoteObject) -> Event:
+ """
+ Construct an `Event` instance from an event `directory`.
+
+ The caller is responsible for handling errors caused by misconfiguration.
+ """
+ contents = await self.fetch_directory(directory.path)
+
+ missing_assets = {"meta.md", "banner.png", "server_icons"} - contents.keys()
+
+ if missing_assets:
+ raise BrandingMisconfiguration(f"Directory is missing following assets: {missing_assets}")
+
+ server_icons = await self.fetch_directory(contents["server_icons"].path, types=("file",))
+
+ if len(server_icons) == 0:
+ raise BrandingMisconfiguration("Found no server icons!")
+
+ meta_bytes = await self.fetch_file(contents["meta.md"].download_url)
+
+ meta_file = self.parse_meta_file(meta_bytes)
+
+ return Event(directory.path, meta_file, contents["banner.png"], list(server_icons.values()))
+
+ async def get_events(self) -> t.List[Event]:
+ """
+ Discover available events in the branding repository.
+
+ Misconfigured events are skipped. May return an empty list in the catastrophic case.
+ """
+ log.debug("Discovering events in branding repository.")
+
+ try:
+ event_directories = await self.fetch_directory("events", types=("dir",)) # Skip files.
+ except Exception:
+ log.exception("Failed to fetch 'events' directory.")
+ return []
+
+ instances: t.List[Event] = []
+
+ for event_directory in event_directories.values():
+ log.trace(f"Attempting to construct event from directory: '{event_directory.path}'.")
+ try:
+ instance = await self.construct_event(event_directory)
+ except Exception as exc:
+ log.warning(f"Could not construct event '{event_directory.path}'.", exc_info=exc)
+ else:
+ instances.append(instance)
+
+ return instances
+
+ async def get_current_event(self) -> t.Tuple[t.Optional[Event], t.List[Event]]:
+ """
+ Get the currently active event, or the fallback event.
+
+ The second return value is a list of all available events. The caller may discard it, if not needed.
+ Returning all events alongside the current one prevents having to query the API twice in some cases.
+
+ The current event may be None in the case that no event is active, and no fallback event is found.
+ """
+ utc_now = datetime.utcnow()
+ log.debug(f"Finding active event for: {utc_now}.")
+
+ # Construct an object in the arbitrary year for the purpose of comparison.
+ lookup_now = date(year=ARBITRARY_YEAR, month=utc_now.month, day=utc_now.day)
+ log.trace(f"Lookup object in arbitrary year: {lookup_now}.")
+
+ available_events = await self.get_events()
+ log.trace(f"Found {len(available_events)} available events.")
+
+ for event in available_events:
+ meta = event.meta
+ if not meta.is_fallback and (meta.start_date <= lookup_now <= meta.end_date):
+ return event, available_events
+
+ log.trace("No active event found. Looking for fallback event.")
+
+ for event in available_events:
+ if event.meta.is_fallback:
+ return event, available_events
+
+ log.warning("No event is currently active and no fallback event was found!")
+ return None, available_events
diff --git a/bot/exts/backend/error_handler.py b/bot/exts/backend/error_handler.py
index c643d346e..76ab7dfc2 100644
--- a/bot/exts/backend/error_handler.py
+++ b/bot/exts/backend/error_handler.py
@@ -1,4 +1,5 @@
import contextlib
+import difflib
import logging
import typing as t
@@ -8,9 +9,9 @@ from sentry_sdk import push_scope
from bot.api import ResponseCodeError
from bot.bot import Bot
-from bot.constants import Channels, Colours
+from bot.constants import Colours, Icons, MODERATION_ROLES
from bot.converters import TagNameConverter
-from bot.errors import LockedResourceError
+from bot.errors import InvalidInfractedUser, LockedResourceError
from bot.utils.checks import InWhitelistCheckFailure
log = logging.getLogger(__name__)
@@ -47,7 +48,6 @@ class ErrorHandler(Cog):
* If CommandNotFound is raised when invoking the tag (determined by the presence of the
`invoked_from_error_handler` attribute), this error is treated as being unexpected
and therefore sends an error message
- * Commands in the verification channel are ignored
2. UserInputError: see `handle_user_input_error`
3. CheckFailure: see `handle_check_failure`
4. CommandOnCooldown: send an error message in the invoking context
@@ -63,10 +63,9 @@ class ErrorHandler(Cog):
if isinstance(e, errors.CommandNotFound) and not hasattr(ctx, "invoked_from_error_handler"):
if await self.try_silence(ctx):
return
- if ctx.channel.id != Channels.verification:
- # Try to look for a tag with the command's name
- await self.try_get_tag(ctx)
- return # Exit early to avoid logging.
+ # Try to look for a tag with the command's name
+ await self.try_get_tag(ctx)
+ return # Exit early to avoid logging.
elif isinstance(e, errors.UserInputError):
await self.handle_user_input_error(ctx, e)
elif isinstance(e, errors.CheckFailure):
@@ -78,11 +77,19 @@ class ErrorHandler(Cog):
await self.handle_api_error(ctx, e.original)
elif isinstance(e.original, LockedResourceError):
await ctx.send(f"{e.original} Please wait for it to finish and try again later.")
+ elif isinstance(e.original, InvalidInfractedUser):
+ await ctx.send(f"Cannot infract that user. {e.original.reason}")
+ else:
+ await self.handle_unexpected_error(ctx, e.original)
+ return # Exit early to avoid logging.
+ elif isinstance(e, errors.ConversionError):
+ if isinstance(e.original, ResponseCodeError):
+ await self.handle_api_error(ctx, e.original)
else:
await self.handle_unexpected_error(ctx, e.original)
return # Exit early to avoid logging.
elif not isinstance(e, errors.DisabledCommand):
- # ConversionError, MaxConcurrencyReached, ExtensionError
+ # MaxConcurrencyReached, ExtensionError
await self.handle_unexpected_error(ctx, e)
return # Exit early to avoid logging.
@@ -156,10 +163,46 @@ class ErrorHandler(Cog):
)
else:
with contextlib.suppress(ResponseCodeError):
- await ctx.invoke(tags_get_command, tag_name=tag_name)
+ if await ctx.invoke(tags_get_command, tag_name=tag_name):
+ return
+
+ if not any(role.id in MODERATION_ROLES for role in ctx.author.roles):
+ await self.send_command_suggestion(ctx, ctx.invoked_with)
+
# Return to not raise the exception
return
+ async def send_command_suggestion(self, ctx: Context, command_name: str) -> None:
+ """Sends user similar commands if any can be found."""
+ # No similar tag found, or tag on cooldown -
+ # searching for a similar command
+ raw_commands = []
+ for cmd in self.bot.walk_commands():
+ if not cmd.hidden:
+ raw_commands += (cmd.name, *cmd.aliases)
+ if similar_command_data := difflib.get_close_matches(command_name, raw_commands, 1):
+ similar_command_name = similar_command_data[0]
+ similar_command = self.bot.get_command(similar_command_name)
+
+ if not similar_command:
+ return
+
+ log_msg = "Cancelling attempt to suggest a command due to failed checks."
+ try:
+ if not await similar_command.can_run(ctx):
+ log.debug(log_msg)
+ return
+ except errors.CommandError as cmd_error:
+ log.debug(log_msg)
+ await self.on_command_error(ctx, cmd_error)
+ return
+
+ misspelled_content = ctx.message.content
+ e = Embed()
+ e.set_author(name="Did you mean:", icon_url=Icons.questionmark)
+ e.description = f"{misspelled_content.replace(command_name, similar_command_name, 1)}"
+ await ctx.send(embed=e, delete_after=10.0)
+
async def handle_user_input_error(self, ctx: Context, e: errors.UserInputError) -> None:
"""
Send an error message in `ctx` for UserInputError, sometimes invoking the help command too.
@@ -191,10 +234,12 @@ class ErrorHandler(Cog):
elif isinstance(e, errors.BadUnionArgument):
embed = self._get_error_embed("Bad argument", f"{e}\n{e.errors[-1]}")
await ctx.send(embed=embed)
+ await prepared_help_command
self.bot.stats.incr("errors.bad_union_argument")
elif isinstance(e, errors.ArgumentParsingError):
embed = self._get_error_embed("Argument parsing error", str(e))
await ctx.send(embed=embed)
+ prepared_help_command.close()
self.bot.stats.incr("errors.argument_parsing_error")
else:
embed = self._get_error_embed(
diff --git a/bot/exts/backend/logging.py b/bot/exts/backend/logging.py
index 94fa2b139..823f14ea4 100644
--- a/bot/exts/backend/logging.py
+++ b/bot/exts/backend/logging.py
@@ -29,7 +29,7 @@ class Logging(Cog):
url="https://github.com/python-discord/bot",
icon_url=(
"https://raw.githubusercontent.com/"
- "python-discord/branding/master/logos/logo_circle/logo_circle_large.png"
+ "python-discord/branding/main/logos/logo_circle/logo_circle_large.png"
)
)
diff --git a/bot/exts/backend/sync/_syncers.py b/bot/exts/backend/sync/_syncers.py
index 2eb9f9971..c9f2d2da8 100644
--- a/bot/exts/backend/sync/_syncers.py
+++ b/bot/exts/backend/sync/_syncers.py
@@ -5,12 +5,15 @@ from collections import namedtuple
from discord import Guild
from discord.ext.commands import Context
+from more_itertools import chunked
import bot
from bot.api import ResponseCodeError
log = logging.getLogger(__name__)
+CHUNK_SIZE = 1000
+
# These objects are declared as namedtuples because tuples are hashable,
# something that we make use of when diffing site roles against guild roles.
_Role = namedtuple('Role', ('id', 'name', 'colour', 'permissions', 'position'))
@@ -207,10 +210,13 @@ class UserSyncer(Syncer):
@staticmethod
async def _sync(diff: _Diff) -> None:
"""Synchronise the database with the user cache of `guild`."""
+ # Using asyncio.gather would still consume too many resources on the site.
log.trace("Syncing created users...")
if diff.created:
- await bot.instance.api_client.post("bot/users", json=diff.created)
+ for chunk in chunked(diff.created, CHUNK_SIZE):
+ await bot.instance.api_client.post("bot/users", json=chunk)
log.trace("Syncing updated users...")
if diff.updated:
- await bot.instance.api_client.patch("bot/users/bulk_patch", json=diff.updated)
+ for chunk in chunked(diff.updated, CHUNK_SIZE):
+ await bot.instance.api_client.patch("bot/users/bulk_patch", json=chunk)