diff options
author | 2020-12-13 10:57:43 +0100 | |
---|---|---|
committer | 2020-12-13 10:57:43 +0100 | |
commit | dca6ec330694f8ff09a21a0f70dfc2b06feea674 (patch) | |
tree | d96964e01ff4310997671d69ef8ce7932902238d | |
parent | Update docstrings and fix grammar in comments (diff) | |
parent | Add callback to log errors in AoC background tasks (diff) |
Merge branch 'master' into sebastiaan/advent-of-code/refactor-background-tasks
# Conflicts:
# bot/exts/christmas/advent_of_code/_cog.py
# bot/exts/christmas/advent_of_code/_helpers.py
-rw-r--r-- | .github/review-policy.yml | 3 | ||||
-rw-r--r-- | .github/workflows/lint.yaml | 22 | ||||
-rw-r--r-- | .github/workflows/review-check.yaml | 166 | ||||
-rw-r--r-- | .github/workflows/status_embed.yaml | 73 | ||||
-rw-r--r-- | bot/constants.py | 23 | ||||
-rw-r--r-- | bot/exts/christmas/advent_of_code/_cog.py | 55 | ||||
-rw-r--r-- | bot/exts/christmas/advent_of_code/_helpers.py | 105 | ||||
-rw-r--r-- | bot/exts/evergreen/error_handler.py | 4 | ||||
-rw-r--r-- | bot/exts/evergreen/snakes/_snakes_cog.py | 27 |
9 files changed, 252 insertions, 226 deletions
diff --git a/.github/review-policy.yml b/.github/review-policy.yml new file mode 100644 index 00000000..421b30f8 --- /dev/null +++ b/.github/review-policy.yml @@ -0,0 +1,3 @@ +remote: python-discord/.github +path: review-policies/core-developers.yml +ref: main diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 063f406c..a5f45255 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -91,3 +91,25 @@ jobs: - name: Run flake8 run: "flake8 \ --format='::error file=%(path)s,line=%(row)d,col=%(col)d::[flake8] %(code)s: %(text)s'" + + # Prepare the Pull Request Payload artifact. If this fails, we + # we fail silently using the `continue-on-error` option. It's + # nice if this succeeds, but if it fails for any reason, it + # does not mean that our lint checks failed. + - name: Prepare Pull Request Payload artifact + id: prepare-artifact + if: always() && github.event_name == 'pull_request' + continue-on-error: true + run: cat $GITHUB_EVENT_PATH | jq '.pull_request' > pull_request_payload.json + + # This only makes sense if the previous step succeeded. To + # get the original outcome of the previous step before the + # `continue-on-error` conclusion is applied, we use the + # `.outcome` value. This step also fails silently. + - name: Upload a Build Artifact + if: always() && steps.prepare-artifact.outcome == 'success' + continue-on-error: true + uses: actions/upload-artifact@v2 + with: + name: pull-request-payload + path: pull_request_payload.json diff --git a/.github/workflows/review-check.yaml b/.github/workflows/review-check.yaml deleted file mode 100644 index 3e45a4b5..00000000 --- a/.github/workflows/review-check.yaml +++ /dev/null @@ -1,166 +0,0 @@ -name: Review Check - -# This workflow needs to trigger in two situations: -# -# 1. When a pull request is opened, reopened, or synchronized (new commit) -# This is accomplished using the `pull_request_target` event that triggers in -# precisely those situations by default. I've opted for `pull_request_target` -# as we don't need to have access to the PR's code and it's safer to make the -# secrets we need available to the workflow compared to `pull_request`. -# -# The reason we need to run the workflow for this event is because we need to -# make sure that our check is part of the check suite for the current commit. -# -# 2. When a review is added or dismissed. -# Whenever reviews are submitted or dismissed, the number of Core Developer -# approvals may obviously change. -# -# --- -# -# Unfortunately, having two different event triggers means that can't let -# this workflow fail on its own, as GitHub actions registers a separate check -# run result per event trigger. As both triggers need to share the success/fail -# state, we get around that by registering a custom "status". -on: - pull_request_review: - types: - - submitted - - dismissed - pull_request_target: - - -jobs: - review-check: - name: Check Core Dev Reviews - runs-on: ubuntu-latest - - steps: - # Fetch the latest Opinionated reviews from users with write - # access. We can't narrow it down using a specific team here - # yet, so we'll do that later. - - uses: octokit/[email protected] - id: reviews - with: - query: | - query ($repository: String!, $pr: Int!) { - repository(owner: "python-discord", name: $repository) { - pullRequest(number: $pr) { - latestOpinionatedReviews(last: 100, writersOnly: true) { - nodes{ - author{ - login - } - state - } - } - } - } - } - repository: ${{ github.event.repository.name }} - pr: ${{ github.event.pull_request.number }} - env: - GITHUB_TOKEN: ${{ secrets.REPO_TOKEN }} - - # Fetch the members of the Core Developers team so we can - # check if any of them actually approved this PR. - - uses: octokit/[email protected] - id: core_developers - with: - query: | - query { - organization(login: "python-discord") { - team(slug: "core-developers") { - members(first: 100) { - nodes { - login - } - } - } - } - } - env: - GITHUB_TOKEN: ${{ secrets.TEAM_TOKEN }} - - # I've opted for a Python script, as that's what most of us - # are familiar with. We do need to setup Python for that. - - name: Setup python - id: python - uses: actions/setup-python@v2 - with: - python-version: '3.9' - - # This is a small, inline Python script that looks for the - # intersection between approving reviewers and the core dev - # team. If that intersection exists, we have at least one - # approving Core Developer. - # - # I've opted to keep this inline as it's relatively small - # and this workflow will be added to multiple repositories. - - name: Check for Accepting Core Developers - id: core_dev_reviews - run: | - python -c 'import json - reviews = json.loads("""${{ steps.reviews.outputs.data }}""") - reviewers = { - review["author"]["login"] - for review in reviews["repository"]["pullRequest"]["latestOpinionatedReviews"]["nodes"] - if review["state"] == "APPROVED" - } - core_devs = json.loads("""${{ steps.core_developers.outputs.data }}""") - core_devs = { - member["login"] for member in core_devs["organization"]["team"]["members"]["nodes"] - } - approving_core_devs = reviewers & core_devs - approval_check = "success" if approving_core_devs else "failure" - print(f"::set-output name=approval_check::{approval_check}") - ' - - # This step registers a a new status for the head commit of the pull - # request. If a status with the same context and description already - # exists, it will be overwritten. The reason we have to do this is - # because workflows run for the separate `pull_request_target` and - #`pull_request_review` events need to share a single result state. - - name: Add Core Dev Approval status check - uses: octokit/[email protected] - with: - route: POST /repos/:repository/statuses/:sha - repository: ${{ github.repository }} - sha: ${{ github.event.pull_request.head.sha }} - state: ${{ steps.core_dev_reviews.outputs.approval_check }} - description: At least one core developer needs to approve this PR - context: Core Dev Approval - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - # If we have at least one Core Developer approval, this step - # removes the 'waiting for core dev approval' label if it's - # still present for the PR. - - name: Remove "waiting for core dev approval" if a core dev approved this PR - if: >- - steps.core_dev_reviews.outputs.approval_check == 'success' && - contains(github.event.pull_request.labels.*.name, 'waiting for core dev approval') - uses: octokit/[email protected] - with: - route: DELETE /repos/:repository/issues/:number/labels/:label - repository: ${{ github.repository }} - number: ${{ github.event.pull_request.number }} - label: needs core dev approval - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - # If we have do not have one Core Developer approval, this step - # adds the 'waiting for core dev approval' label if it's not - # already present for the PR. - - name: Add "waiting for core dev approval" if no core dev has approved yet - if: >- - steps.core_dev_reviews.outputs.approval_check == 'failure' && - !contains(github.event.pull_request.labels.*.name, 'waiting for core dev approval') - uses: octokit/[email protected] - with: - route: POST /repos/:repository/issues/:number/labels - repository: ${{ github.repository }} - number: ${{ github.event.pull_request.number }} - labels: | - - needs core dev approval - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/status_embed.yaml b/.github/workflows/status_embed.yaml new file mode 100644 index 00000000..28caa8c2 --- /dev/null +++ b/.github/workflows/status_embed.yaml @@ -0,0 +1,73 @@ +name: Status Embed + +on: + workflow_run: + workflows: + - Lint + - Build + types: + - completed + +jobs: + status_embed: + # We send the embed in the following situations: + # - Always after the `Build` workflow, as it runs at the + # end of our workflow sequence regardless of status. + # - Always for the `pull_request` event, as it only + # runs one workflow. + # - Always run for non-success workflows, as they + # terminate the workflow sequence. + if: >- + (github.event.workflow_run.name == 'Build' && github.event.workflow_run.conclusion != 'skipped') || + github.event.workflow_run.event == 'pull_request' || + github.event.workflow_run.conclusion == 'failure' || + github.event.workflow_run.conclusion == 'cancelled' + name: Send Status Embed to Discord + runs-on: ubuntu-latest + + steps: + # A workflow_run event does not contain all the information + # we need for a PR embed. That's why we upload an artifact + # with that information in the Lint workflow. + - name: Get Pull Request Information + id: pr_info + if: github.event.workflow_run.event == 'pull_request' + run: | + curl -s -H "Authorization: token $GITHUB_TOKEN" ${{ github.event.workflow_run.artifacts_url }} > artifacts.json + DOWNLOAD_URL=$(cat artifacts.json | jq -r '.artifacts[] | select(.name == "pull-request-payload") | .archive_download_url') + [ -z "$DOWNLOAD_URL" ] && exit 1 + wget --quiet --header="Authorization: token $GITHUB_TOKEN" -O pull_request_payload.zip $DOWNLOAD_URL || exit 2 + unzip -p pull_request_payload.zip > pull_request_payload.json + [ -s pull_request_payload.json ] || exit 3 + echo "::set-output name=pr_author_login::$(jq -r '.user.login // empty' pull_request_payload.json)" + echo "::set-output name=pr_number::$(jq -r '.number // empty' pull_request_payload.json)" + echo "::set-output name=pr_title::$(jq -r '.title // empty' pull_request_payload.json)" + echo "::set-output name=pr_source::$(jq -r '.head.label // empty' pull_request_payload.json)" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # Send an informational status embed to Discord instead of the + # standard embeds that Discord sends. This embed will contain + # more information and we can fine tune when we actually want + # to send an embed. + - name: GitHub Actions Status Embed for Discord + uses: SebastiaanZ/[email protected] + with: + # Our GitHub Actions webhook + webhook_id: '784184528997842985' + webhook_token: ${{ secrets.GHA_WEBHOOK_TOKEN }} + + # Workflow information + workflow_name: ${{ github.event.workflow_run.name }} + run_id: ${{ github.event.workflow_run.id }} + run_number: ${{ github.event.workflow_run.run_number }} + status: ${{ github.event.workflow_run.conclusion }} + actor: ${{ github.actor }} + repository: ${{ github.repository }} + ref: ${{ github.ref }} + sha: ${{ github.event.workflow_run.head_sha }} + + pr_author_login: ${{ steps.pr_info.outputs.pr_author_login }} + pr_number: ${{ steps.pr_info.outputs.pr_number }} + pr_title: ${{ steps.pr_info.outputs.pr_title }} + pr_source: ${{ steps.pr_info.outputs.pr_source }} diff --git a/bot/constants.py b/bot/constants.py index c696b202..5e97fa2d 100644 --- a/bot/constants.py +++ b/bot/constants.py @@ -1,3 +1,4 @@ +import dataclasses import enum import logging from datetime import datetime @@ -29,11 +30,27 @@ __all__ = ( log = logging.getLogger(__name__) -class AdventOfCodeLeaderboard(NamedTuple): +class AdventOfCodeLeaderboard: id: str - session: str + _session: str join_code: str + # If we notice that the session for this board expired, we set + # this attribute to `True`. We will emit a Sentry error so we + # can handle it, but, in the meantime, we'll try using the + # fallback session to make sure the commands still work. + use_fallback_session: bool = False + + @property + def session(self) -> str: + """Return either the actual `session` cookie or the fallback cookie.""" + if self.use_fallback_session: + log.info(f"Returning fallback cookie for board `{self.id}`.") + return AdventOfCode.fallback_session + + return self._session + def _parse_aoc_leaderboard_env() -> Dict[str, AdventOfCodeLeaderboard]: """ @@ -61,6 +78,7 @@ class AdventOfCode: # Information for the several leaderboards we have leaderboards = _parse_aoc_leaderboard_env() staff_leaderboard_id = environ.get("AOC_STAFF_LEADERBOARD_ID", "") + fallback_session = environ.get("AOC_FALLBACK_SESSION", "") # Other Advent of Code constants ignored_days = environ.get("AOC_IGNORED_DAYS", "").split(",") @@ -77,6 +95,7 @@ class Branding: class Channels(NamedTuple): admins = 365960823622991872 advent_of_code = int(environ.get("AOC_CHANNEL_ID", 782715290437943306)) + advent_of_code_commands = int(environ.get("AOC_COMMANDS_CHANNEL_ID", 607247579608121354)) announcements = int(environ.get("CHANNEL_ANNOUNCEMENTS", 354619224620138496)) big_brother_logs = 468507907357409333 bot = 267659945086812160 diff --git a/bot/exts/christmas/advent_of_code/_cog.py b/bot/exts/christmas/advent_of_code/_cog.py index 29dcc3cf..8c07cdb4 100644 --- a/bot/exts/christmas/advent_of_code/_cog.py +++ b/bot/exts/christmas/advent_of_code/_cog.py @@ -11,13 +11,17 @@ from bot.constants import ( AdventOfCode as AocConfig, Channels, Colours, Emojis, Month, Roles, WHITELISTED_CHANNELS, ) from bot.exts.christmas.advent_of_code import _helpers -from bot.utils.decorators import in_month, override_in_channel, with_role +from bot.utils.decorators import InChannelCheckFailure, in_month, override_in_channel, with_role log = logging.getLogger(__name__) AOC_REQUEST_HEADER = {"user-agent": "PythonDiscord AoC Event Bot"} -AOC_WHITELIST = WHITELISTED_CHANNELS + (Channels.advent_of_code,) +AOC_WHITELIST_RESTRICTED = WHITELISTED_CHANNELS + (Channels.advent_of_code_commands,) + +# Some commands can be run in the regular advent of code channel +# They aren't spammy and foster discussion +AOC_WHITELIST = AOC_WHITELIST_RESTRICTED + (Channels.advent_of_code,) class AdventOfCode(commands.Cog): @@ -35,11 +39,15 @@ class AdventOfCode(commands.Cog): self.countdown_task = None self.status_task = None - announcement_coro = _helpers.new_puzzle_announcement(self.bot) - self.new_puzzle_announcement_task = self.bot.loop.create_task(announcement_coro) + notification_coro = _helpers.new_puzzle_announcement(self.bot) + self.notification_task = self.bot.loop.create_task(notification_coro) + self.notification_task.set_name("Daily AoC Notification") + self.notification_task.add_done_callback(_helpers.background_task_callback) status_coro = _helpers.countdown_status(self.bot) self.status_task = self.bot.loop.create_task(status_coro) + self.status_task.set_name("AoC Status Countdown") + self.status_task.add_done_callback(_helpers.background_task_callback) @commands.group(name="adventofcode", aliases=("aoc",)) @override_in_channel(AOC_WHITELIST) @@ -135,7 +143,11 @@ class AdventOfCode(commands.Cog): if AocConfig.staff_leaderboard_id and any(r.id == Roles.helpers for r in author.roles): join_code = AocConfig.leaderboards[AocConfig.staff_leaderboard_id].join_code else: - join_code = await _helpers.get_public_join_code(author) + try: + join_code = await _helpers.get_public_join_code(author) + except _helpers.FetchingLeaderboardFailed: + await ctx.send(":x: Failed to get join code! Notified maintainers.") + return if not join_code: log.error(f"Failed to get a join code for user {author} ({author.id})") @@ -166,11 +178,16 @@ class AdventOfCode(commands.Cog): aliases=("board", "lb"), brief="Get a snapshot of the PyDis private AoC leaderboard", ) - @override_in_channel(AOC_WHITELIST) + @override_in_channel(AOC_WHITELIST_RESTRICTED) async def aoc_leaderboard(self, ctx: commands.Context) -> None: """Get the current top scorers of the Python Discord Leaderboard.""" async with ctx.typing(): - leaderboard = await _helpers.fetch_leaderboard() + try: + leaderboard = await _helpers.fetch_leaderboard() + except _helpers.FetchingLeaderboardFailed: + await ctx.send(":x: Unable to fetch leaderboard!") + return + number_of_participants = leaderboard["number_of_participants"] top_count = min(AocConfig.leaderboard_displayed_members, number_of_participants) @@ -186,7 +203,7 @@ class AdventOfCode(commands.Cog): aliases=("globalboard", "gb"), brief="Get a link to the global leaderboard", ) - @override_in_channel(AOC_WHITELIST) + @override_in_channel(AOC_WHITELIST_RESTRICTED) async def aoc_global_leaderboard(self, ctx: commands.Context) -> None: """Get a link to the global Advent of Code leaderboard.""" url = self.global_leaderboard_url @@ -202,10 +219,14 @@ class AdventOfCode(commands.Cog): aliases=("dailystats", "ds"), brief="Get daily statistics for the Python Discord leaderboard" ) - @override_in_channel(AOC_WHITELIST) + @override_in_channel(AOC_WHITELIST_RESTRICTED) async def private_leaderboard_daily_stats(self, ctx: commands.Context) -> None: """Send an embed with daily completion statistics for the Python Discord leaderboard.""" - leaderboard = await _helpers.fetch_leaderboard() + try: + leaderboard = await _helpers.fetch_leaderboard() + except _helpers.FetchingLeaderboardFailed: + await ctx.send(":x: Can't fetch leaderboard for stats right now!") + return # The daily stats are serialized as JSON as they have to be cached in Redis daily_stats = json.loads(leaderboard["daily_stats"]) @@ -237,8 +258,12 @@ class AdventOfCode(commands.Cog): many requests to the Advent of Code server. """ async with ctx.typing(): - await _helpers.fetch_leaderboard(invalidate_cache=True) - await ctx.send("\N{OK Hand Sign} Refreshed leaderboard cache!") + try: + await _helpers.fetch_leaderboard(invalidate_cache=True) + except _helpers.FetchingLeaderboardFailed: + await ctx.send(":x: Something went wrong while trying to refresh the cache!") + else: + await ctx.send("\N{OK Hand Sign} Refreshed leaderboard cache!") def cog_unload(self) -> None: """Cancel season-related tasks on cog unload.""" @@ -263,3 +288,9 @@ class AdventOfCode(commands.Cog): about_embed.set_footer(text="Last Updated") return about_embed + + async def cog_command_error(self, ctx: commands.Context, error: Exception) -> None: + """Custom error handler if an advent of code command was posted in the wrong channel.""" + if isinstance(error, InChannelCheckFailure): + await ctx.send(f":x: Please use <#{Channels.advent_of_code_commands}> for aoc commands instead.") + error.handled = True diff --git a/bot/exts/christmas/advent_of_code/_helpers.py b/bot/exts/christmas/advent_of_code/_helpers.py index 7a6d873e..f8c0dc22 100644 --- a/bot/exts/christmas/advent_of_code/_helpers.py +++ b/bot/exts/christmas/advent_of_code/_helpers.py @@ -56,7 +56,19 @@ COUNTDOWN_STEP = 60 * 5 # Create namedtuple that combines a participant's name and their completion # time for a specific star. We're going to use this later to order the results # for each star to compute the rank score. -_StarResult = collections.namedtuple("StarResult", "name completion_time") +StarResult = collections.namedtuple("StarResult", "member_id completion_time") + + +class UnexpectedRedirect(aiohttp.ClientError): + """Raised when an unexpected redirect was detected.""" + + +class UnexpectedResponseStatus(aiohttp.ClientError): + """Raised when an unexpected redirect was detected.""" + + +class FetchingLeaderboardFailed(Exception): + """Raised when one or more leaderboards could not be fetched at all.""" def leaderboard_sorting_function(entry: typing.Tuple[str, dict]) -> typing.Tuple[int, int]: @@ -67,7 +79,7 @@ def leaderboard_sorting_function(entry: typing.Tuple[str, dict]) -> typing.Tuple secondary on the number of stars someone has completed. """ result = entry[1] - return result["score"], result["star_2_count"] + result["star_1_count"] + return result["score"], result["star_2"] + result["star_1"] def _parse_raw_leaderboard_data(raw_leaderboard_data: dict) -> dict: @@ -96,30 +108,34 @@ def _parse_raw_leaderboard_data(raw_leaderboard_data: dict) -> dict: # star view. We need that per star view to compute rank scores per star. for member in raw_leaderboard_data.values(): name = member["name"] if member["name"] else f"Anonymous #{member['id']}" - leaderboard[name] = {"score": 0, "star_1_count": 0, "star_2_count": 0} + member_id = member['id'] + leaderboard[member_id] = {"name": name, "score": 0, "star_1": 0, "star_2": 0} # Iterate over all days for this participant for day, stars in member["completion_day_level"].items(): # Iterate over the complete stars for this day for this participant for star, data in stars.items(): # Record completion of this star for this individual - leaderboard[name][f"star_{star}_count"] += 1 + leaderboard[member_id][f"star_{star}"] += 1 # Record completion datetime for this participant for this day/star completion_time = datetime.datetime.fromtimestamp(int(data['get_star_ts'])) star_results[(day, star)].append( - _StarResult(name=name, completion_time=completion_time) + StarResult(member_id=member_id, completion_time=completion_time) ) # Now that we have a transposed dataset that holds the completion time of all # participants per star, we can compute the rank-based scores each participant # should get for that star. max_score = len(leaderboard) - for(day, _star), results in star_results.items(): + for (day, _star), results in star_results.items(): + # If this day should not count in the ranking, skip it. if day in AdventOfCode.ignored_days: continue - for rank, star_result in enumerate(sorted(results, key=operator.itemgetter(1))): - leaderboard[star_result.name]["score"] += max_score - rank + + sorted_result = sorted(results, key=operator.attrgetter('completion_time')) + for rank, star_result in enumerate(sorted_result): + leaderboard[star_result.member_id]["score"] += max_score - rank # Since dictionaries now retain insertion order, let's use that sorted_leaderboard = dict( @@ -139,22 +155,39 @@ def _parse_raw_leaderboard_data(raw_leaderboard_data: dict) -> dict: return {"daily_stats": daily_stats, "leaderboard": sorted_leaderboard} -def _format_leaderboard(leaderboard: typing.Dict[str, int]) -> str: +def _format_leaderboard(leaderboard: typing.Dict[str, dict]) -> str: """Format the leaderboard using the AOC_TABLE_TEMPLATE.""" leaderboard_lines = [HEADER] - for rank, (name, results) in enumerate(leaderboard.items(), start=1): + for rank, data in enumerate(leaderboard.values(), start=1): leaderboard_lines.append( AOC_TABLE_TEMPLATE.format( rank=rank, - name=name, - score=str(results["score"]), - stars=f"({results['star_1_count']}, {results['star_2_count']})" + name=data["name"], + score=str(data["score"]), + stars=f"({data['star_1']}, {data['star_2']})" ) ) return "\n".join(leaderboard_lines) +async def _leaderboard_request(url: str, board: int, cookies: dict) -> typing.Optional[dict]: + """Make a leaderboard request using the specified session cookie.""" + async with aiohttp.request("GET", url, headers=AOC_REQUEST_HEADER, cookies=cookies) as resp: + # The Advent of Code website redirects silently with a 200 response if a + # session cookie has expired, is invalid, or was not provided. + if str(resp.url) != url: + log.error(f"Fetching leaderboard `{board}` failed! Check the session cookie.") + raise UnexpectedRedirect(f"redirected unexpectedly to {resp.url} for board `{board}`") + + # Every status other than `200` is unexpected, not only 400+ + if not resp.status == 200: + log.error(f"Unexpected response `{resp.status}` while fetching leaderboard `{board}`") + raise UnexpectedResponseStatus(f"status `{resp.status}`") + + return await resp.json() + + async def _fetch_leaderboard_data() -> typing.Dict[str, typing.Any]: """Fetch data for all leaderboards and return a pooled result.""" year = AdventOfCode.year @@ -167,22 +200,34 @@ async def _fetch_leaderboard_data() -> typing.Dict[str, typing.Any]: participants = {} for leaderboard in AdventOfCode.leaderboards.values(): leaderboard_url = AOC_API_URL.format(year=year, leaderboard_id=leaderboard.id) - cookies = {"session": leaderboard.session} - # We don't need to create a session if we're going to throw it away after each request - async with aiohttp.request( - "GET", leaderboard_url, headers=AOC_REQUEST_HEADER, cookies=cookies - ) as resp: - if resp.status == 200: - raw_data = await resp.json() - - # Get the participants and store their current count + # Two attempts, one with the original session cookie and one with the fallback session + for attempt in range(1, 3): + log.info(f"Attempting to fetch leaderboard `{leaderboard.id}` ({attempt}/2)") + cookies = {"session": leaderboard.session} + try: + raw_data = await _leaderboard_request(leaderboard_url, leaderboard.id, cookies) + except UnexpectedRedirect: + if cookies["session"] == AdventOfCode.fallback_session: + log.error("It seems like the fallback cookie has expired!") + raise FetchingLeaderboardFailed from None + + # If we're here, it means that the original session did not + # work. Let's fall back to the fallback session. + leaderboard.use_fallback_session = True + continue + except aiohttp.ClientError: + # Don't retry, something unexpected is wrong and it may not be the session. + raise FetchingLeaderboardFailed from None + else: + # Get the participants and store their current count. board_participants = raw_data["members"] await _caches.leaderboard_counts.set(leaderboard.id, len(board_participants)) participants.update(board_participants) - else: - log.warning(f"Fetching data failed for leaderboard `{leaderboard.id}`") - resp.raise_for_status() + break + else: + log.error(f"reached 'unreachable' state while fetching board `{leaderboard.id}`.") + raise FetchingLeaderboardFailed log.info(f"Fetched leaderboard information for {len(participants)} participants") return participants @@ -532,3 +577,13 @@ async def new_puzzle_announcement(bot: Bot) -> None: # over midnight. This means we're certain to calculate the time to the # next midnight at the top of the loop. await asyncio.sleep(120) + + +def background_task_callback(task: asyncio.Task) -> None: + """Check if the finished background task failed to make sure we log errors.""" + if task.cancelled(): + log.info(f"Background task `{task.get_name()}` was cancelled.") + elif exception := task.exception(): + log.error(f"Background task `{task.get_name()}` failed:", exc_info=exception) + else: + log.info(f"Background task `{task.get_name()}` exited normally.") diff --git a/bot/exts/evergreen/error_handler.py b/bot/exts/evergreen/error_handler.py index 6e518435..99af1519 100644 --- a/bot/exts/evergreen/error_handler.py +++ b/bot/exts/evergreen/error_handler.py @@ -42,8 +42,8 @@ class CommandErrorHandler(commands.Cog): @commands.Cog.listener() async def on_command_error(self, ctx: commands.Context, error: commands.CommandError) -> None: """Activates when a command opens an error.""" - if hasattr(ctx.command, 'on_error'): - logging.debug("A command error occured but the command had it's own error handler.") + if getattr(error, 'handled', False): + logging.debug(f"Command {ctx.command} had its error already handled locally; ignoring.") return error = getattr(error, 'original', error) diff --git a/bot/exts/evergreen/snakes/_snakes_cog.py b/bot/exts/evergreen/snakes/_snakes_cog.py index 70bb0e73..4fa4dcd1 100644 --- a/bot/exts/evergreen/snakes/_snakes_cog.py +++ b/bot/exts/evergreen/snakes/_snakes_cog.py @@ -15,7 +15,7 @@ import aiohttp import async_timeout from PIL import Image, ImageDraw, ImageFont from discord import Colour, Embed, File, Member, Message, Reaction -from discord.ext.commands import BadArgument, Bot, Cog, CommandError, Context, bot_has_permissions, group +from discord.ext.commands import Bot, Cog, CommandError, Context, bot_has_permissions, group from bot.constants import ERROR_REPLIES, Tokens from bot.exts.evergreen.snakes import _utils as utils @@ -1126,26 +1126,15 @@ class Snakes(Cog): # endregion # region: Error handlers - @get_command.error @card_command.error - @video_command.error async def command_error(self, ctx: Context, error: CommandError) -> None: """Local error handler for the Snake Cog.""" - embed = Embed() - embed.colour = Colour.red() - - if isinstance(error, BadArgument): - embed.description = str(error) - embed.title = random.choice(ERROR_REPLIES) - - elif isinstance(error, OSError): - log.error(f"snake_card encountered an OSError: {error} ({error.original})") + original_error = getattr(error, "original", None) + if isinstance(original_error, OSError): + error.handled = True + embed = Embed() + embed.colour = Colour.red() + log.error(f"snake_card encountered an OSError: {error} ({original_error})") embed.description = "Could not generate the snake card! Please try again." embed.title = random.choice(ERROR_REPLIES) - - else: - log.error(f"Unhandled tag command error: {error} ({error.original})") - return - - await ctx.send(embed=embed) - # endregion + await ctx.send(embed=embed) |