aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/CODEOWNERS4
-rw-r--r--.github/review-policy.yml3
-rw-r--r--.github/workflows/build.yaml2
-rw-r--r--.github/workflows/lint.yaml22
-rw-r--r--.github/workflows/review-check.yaml166
-rw-r--r--.github/workflows/sentry_release.yaml24
-rw-r--r--.github/workflows/status_embed.yaml73
-rw-r--r--.pre-commit-config.yaml2
-rw-r--r--Dockerfile6
-rw-r--r--Pipfile2
-rw-r--r--Pipfile.lock136
-rw-r--r--bot/__init__.py5
-rw-r--r--bot/__main__.py9
-rw-r--r--bot/constants.py68
-rw-r--r--bot/exts/christmas/advent_of_code/__init__.py10
-rw-r--r--bot/exts/christmas/advent_of_code/_caches.py5
-rw-r--r--bot/exts/christmas/advent_of_code/_cog.py296
-rw-r--r--bot/exts/christmas/advent_of_code/_helpers.py592
-rw-r--r--bot/exts/christmas/adventofcode.py743
-rw-r--r--bot/exts/evergreen/error_handler.py4
-rw-r--r--bot/exts/evergreen/snakes/_snakes_cog.py128
-rw-r--r--bot/exts/pride/pride_avatar.py107
-rw-r--r--bot/resources/advent_of_code/about.json8
23 files changed, 1313 insertions, 1102 deletions
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 6afbfb31..16e89359 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,3 +1,7 @@
+# Extensions groups
+bot/exts/christmas/** @ks129
+bot/exts/halloween/** @ks129
+
# CI & Docker
.github/workflows/** @Akarys42 @SebastiaanZ @Den4200
Dockerfile @Akarys42 @Den4200
diff --git a/.github/review-policy.yml b/.github/review-policy.yml
new file mode 100644
index 00000000..421b30f8
--- /dev/null
+++ b/.github/review-policy.yml
@@ -0,0 +1,3 @@
+remote: python-discord/.github
+path: review-policies/core-developers.yml
+ref: main
diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
index b0c03139..9d12cd10 100644
--- a/.github/workflows/build.yaml
+++ b/.github/workflows/build.yaml
@@ -61,6 +61,8 @@ jobs:
tags: |
ghcr.io/python-discord/sir-lancebot:latest
ghcr.io/python-discord/sir-lancebot:${{ steps.sha_tag.outputs.tag }}
+ build-args: |
+ git_sha=${{ github.sha }}
- name: Authenticate with Kubernetes
uses: azure/k8s-set-context@v1
diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml
index 063f406c..a5f45255 100644
--- a/.github/workflows/lint.yaml
+++ b/.github/workflows/lint.yaml
@@ -91,3 +91,25 @@ jobs:
- name: Run flake8
run: "flake8 \
--format='::error file=%(path)s,line=%(row)d,col=%(col)d::[flake8] %(code)s: %(text)s'"
+
+ # Prepare the Pull Request Payload artifact. If this fails, we
+ # we fail silently using the `continue-on-error` option. It's
+ # nice if this succeeds, but if it fails for any reason, it
+ # does not mean that our lint checks failed.
+ - name: Prepare Pull Request Payload artifact
+ id: prepare-artifact
+ if: always() && github.event_name == 'pull_request'
+ continue-on-error: true
+ run: cat $GITHUB_EVENT_PATH | jq '.pull_request' > pull_request_payload.json
+
+ # This only makes sense if the previous step succeeded. To
+ # get the original outcome of the previous step before the
+ # `continue-on-error` conclusion is applied, we use the
+ # `.outcome` value. This step also fails silently.
+ - name: Upload a Build Artifact
+ if: always() && steps.prepare-artifact.outcome == 'success'
+ continue-on-error: true
+ uses: actions/upload-artifact@v2
+ with:
+ name: pull-request-payload
+ path: pull_request_payload.json
diff --git a/.github/workflows/review-check.yaml b/.github/workflows/review-check.yaml
deleted file mode 100644
index 3e45a4b5..00000000
--- a/.github/workflows/review-check.yaml
+++ /dev/null
@@ -1,166 +0,0 @@
-name: Review Check
-
-# This workflow needs to trigger in two situations:
-#
-# 1. When a pull request is opened, reopened, or synchronized (new commit)
-# This is accomplished using the `pull_request_target` event that triggers in
-# precisely those situations by default. I've opted for `pull_request_target`
-# as we don't need to have access to the PR's code and it's safer to make the
-# secrets we need available to the workflow compared to `pull_request`.
-#
-# The reason we need to run the workflow for this event is because we need to
-# make sure that our check is part of the check suite for the current commit.
-#
-# 2. When a review is added or dismissed.
-# Whenever reviews are submitted or dismissed, the number of Core Developer
-# approvals may obviously change.
-#
-# ---
-#
-# Unfortunately, having two different event triggers means that can't let
-# this workflow fail on its own, as GitHub actions registers a separate check
-# run result per event trigger. As both triggers need to share the success/fail
-# state, we get around that by registering a custom "status".
-on:
- pull_request_review:
- types:
- - submitted
- - dismissed
- pull_request_target:
-
-
-jobs:
- review-check:
- name: Check Core Dev Reviews
- runs-on: ubuntu-latest
-
- steps:
- # Fetch the latest Opinionated reviews from users with write
- # access. We can't narrow it down using a specific team here
- # yet, so we'll do that later.
- - uses: octokit/[email protected]
- id: reviews
- with:
- query: |
- query ($repository: String!, $pr: Int!) {
- repository(owner: "python-discord", name: $repository) {
- pullRequest(number: $pr) {
- latestOpinionatedReviews(last: 100, writersOnly: true) {
- nodes{
- author{
- login
- }
- state
- }
- }
- }
- }
- }
- repository: ${{ github.event.repository.name }}
- pr: ${{ github.event.pull_request.number }}
- env:
- GITHUB_TOKEN: ${{ secrets.REPO_TOKEN }}
-
- # Fetch the members of the Core Developers team so we can
- # check if any of them actually approved this PR.
- - uses: octokit/[email protected]
- id: core_developers
- with:
- query: |
- query {
- organization(login: "python-discord") {
- team(slug: "core-developers") {
- members(first: 100) {
- nodes {
- login
- }
- }
- }
- }
- }
- env:
- GITHUB_TOKEN: ${{ secrets.TEAM_TOKEN }}
-
- # I've opted for a Python script, as that's what most of us
- # are familiar with. We do need to setup Python for that.
- - name: Setup python
- id: python
- uses: actions/setup-python@v2
- with:
- python-version: '3.9'
-
- # This is a small, inline Python script that looks for the
- # intersection between approving reviewers and the core dev
- # team. If that intersection exists, we have at least one
- # approving Core Developer.
- #
- # I've opted to keep this inline as it's relatively small
- # and this workflow will be added to multiple repositories.
- - name: Check for Accepting Core Developers
- id: core_dev_reviews
- run: |
- python -c 'import json
- reviews = json.loads("""${{ steps.reviews.outputs.data }}""")
- reviewers = {
- review["author"]["login"]
- for review in reviews["repository"]["pullRequest"]["latestOpinionatedReviews"]["nodes"]
- if review["state"] == "APPROVED"
- }
- core_devs = json.loads("""${{ steps.core_developers.outputs.data }}""")
- core_devs = {
- member["login"] for member in core_devs["organization"]["team"]["members"]["nodes"]
- }
- approving_core_devs = reviewers & core_devs
- approval_check = "success" if approving_core_devs else "failure"
- print(f"::set-output name=approval_check::{approval_check}")
- '
-
- # This step registers a a new status for the head commit of the pull
- # request. If a status with the same context and description already
- # exists, it will be overwritten. The reason we have to do this is
- # because workflows run for the separate `pull_request_target` and
- #`pull_request_review` events need to share a single result state.
- - name: Add Core Dev Approval status check
- uses: octokit/[email protected]
- with:
- route: POST /repos/:repository/statuses/:sha
- repository: ${{ github.repository }}
- sha: ${{ github.event.pull_request.head.sha }}
- state: ${{ steps.core_dev_reviews.outputs.approval_check }}
- description: At least one core developer needs to approve this PR
- context: Core Dev Approval
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- # If we have at least one Core Developer approval, this step
- # removes the 'waiting for core dev approval' label if it's
- # still present for the PR.
- - name: Remove "waiting for core dev approval" if a core dev approved this PR
- if: >-
- steps.core_dev_reviews.outputs.approval_check == 'success' &&
- contains(github.event.pull_request.labels.*.name, 'waiting for core dev approval')
- uses: octokit/[email protected]
- with:
- route: DELETE /repos/:repository/issues/:number/labels/:label
- repository: ${{ github.repository }}
- number: ${{ github.event.pull_request.number }}
- label: needs core dev approval
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- # If we have do not have one Core Developer approval, this step
- # adds the 'waiting for core dev approval' label if it's not
- # already present for the PR.
- - name: Add "waiting for core dev approval" if no core dev has approved yet
- if: >-
- steps.core_dev_reviews.outputs.approval_check == 'failure' &&
- !contains(github.event.pull_request.labels.*.name, 'waiting for core dev approval')
- uses: octokit/[email protected]
- with:
- route: POST /repos/:repository/issues/:number/labels
- repository: ${{ github.repository }}
- number: ${{ github.event.pull_request.number }}
- labels: |
- - needs core dev approval
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/sentry_release.yaml b/.github/workflows/sentry_release.yaml
new file mode 100644
index 00000000..0e02dd0c
--- /dev/null
+++ b/.github/workflows/sentry_release.yaml
@@ -0,0 +1,24 @@
+name: Create Sentry release
+
+on:
+ push:
+ branches:
+ - master
+
+jobs:
+ create_sentry_release:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@master
+
+ - name: Create a Sentry.io release
+ uses: tclindner/[email protected]
+ env:
+ SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
+ SENTRY_ORG: python-discord
+ SENTRY_PROJECT: sir-lancebot
+ with:
+ tagName: ${{ github.sha }}
+ environment: production
+ releaseNamePrefix: sir-lancebot@
diff --git a/.github/workflows/status_embed.yaml b/.github/workflows/status_embed.yaml
new file mode 100644
index 00000000..28caa8c2
--- /dev/null
+++ b/.github/workflows/status_embed.yaml
@@ -0,0 +1,73 @@
+name: Status Embed
+
+on:
+ workflow_run:
+ workflows:
+ - Lint
+ - Build
+ types:
+ - completed
+
+jobs:
+ status_embed:
+ # We send the embed in the following situations:
+ # - Always after the `Build` workflow, as it runs at the
+ # end of our workflow sequence regardless of status.
+ # - Always for the `pull_request` event, as it only
+ # runs one workflow.
+ # - Always run for non-success workflows, as they
+ # terminate the workflow sequence.
+ if: >-
+ (github.event.workflow_run.name == 'Build' && github.event.workflow_run.conclusion != 'skipped') ||
+ github.event.workflow_run.event == 'pull_request' ||
+ github.event.workflow_run.conclusion == 'failure' ||
+ github.event.workflow_run.conclusion == 'cancelled'
+ name: Send Status Embed to Discord
+ runs-on: ubuntu-latest
+
+ steps:
+ # A workflow_run event does not contain all the information
+ # we need for a PR embed. That's why we upload an artifact
+ # with that information in the Lint workflow.
+ - name: Get Pull Request Information
+ id: pr_info
+ if: github.event.workflow_run.event == 'pull_request'
+ run: |
+ curl -s -H "Authorization: token $GITHUB_TOKEN" ${{ github.event.workflow_run.artifacts_url }} > artifacts.json
+ DOWNLOAD_URL=$(cat artifacts.json | jq -r '.artifacts[] | select(.name == "pull-request-payload") | .archive_download_url')
+ [ -z "$DOWNLOAD_URL" ] && exit 1
+ wget --quiet --header="Authorization: token $GITHUB_TOKEN" -O pull_request_payload.zip $DOWNLOAD_URL || exit 2
+ unzip -p pull_request_payload.zip > pull_request_payload.json
+ [ -s pull_request_payload.json ] || exit 3
+ echo "::set-output name=pr_author_login::$(jq -r '.user.login // empty' pull_request_payload.json)"
+ echo "::set-output name=pr_number::$(jq -r '.number // empty' pull_request_payload.json)"
+ echo "::set-output name=pr_title::$(jq -r '.title // empty' pull_request_payload.json)"
+ echo "::set-output name=pr_source::$(jq -r '.head.label // empty' pull_request_payload.json)"
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ # Send an informational status embed to Discord instead of the
+ # standard embeds that Discord sends. This embed will contain
+ # more information and we can fine tune when we actually want
+ # to send an embed.
+ - name: GitHub Actions Status Embed for Discord
+ uses: SebastiaanZ/[email protected]
+ with:
+ # Our GitHub Actions webhook
+ webhook_id: '784184528997842985'
+ webhook_token: ${{ secrets.GHA_WEBHOOK_TOKEN }}
+
+ # Workflow information
+ workflow_name: ${{ github.event.workflow_run.name }}
+ run_id: ${{ github.event.workflow_run.id }}
+ run_number: ${{ github.event.workflow_run.run_number }}
+ status: ${{ github.event.workflow_run.conclusion }}
+ actor: ${{ github.actor }}
+ repository: ${{ github.repository }}
+ ref: ${{ github.ref }}
+ sha: ${{ github.event.workflow_run.head_sha }}
+
+ pr_author_login: ${{ steps.pr_info.outputs.pr_author_login }}
+ pr_number: ${{ steps.pr_info.outputs.pr_number }}
+ pr_title: ${{ steps.pr_info.outputs.pr_title }}
+ pr_source: ${{ steps.pr_info.outputs.pr_source }}
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index be57904e..a66bf97c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,6 +20,6 @@ repos:
name: Flake8
description: This hook runs flake8 within our project's pipenv environment.
entry: pipenv run flake8
- language: python
+ language: system
types: [python]
require_serial: true
diff --git a/Dockerfile b/Dockerfile
index 40a77414..328984ad 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,10 +1,14 @@
FROM python:3.8-slim
+# Set SHA build argument
+ARG git_sha="development"
+
# Set pip to have cleaner logs and no saved cache
ENV PIP_NO_CACHE_DIR=false \
PIPENV_HIDE_EMOJIS=1 \
PIPENV_IGNORE_VIRTUALENVS=1 \
- PIPENV_NOSPIN=1
+ PIPENV_NOSPIN=1 \
+ GIT_SHA=$git_sha
# Install git to be able to dowload git dependencies in the Pipfile
RUN apt-get -y update \
diff --git a/Pipfile b/Pipfile
index 100d51a1..c382902f 100644
--- a/Pipfile
+++ b/Pipfile
@@ -10,7 +10,7 @@ beautifulsoup4 = "~=4.8"
fuzzywuzzy = "~=0.17"
pillow = "~=7.2"
pytz = "~=2019.2"
-sentry-sdk = "~=0.14.2"
+sentry-sdk = "~=0.19"
PyYAML = "~=5.3.1"
"discord.py" = {extras = ["voice"], version = "~=1.5.1"}
async-rediscache = {extras = ["fakeredis"], version = "~=0.1.4"}
diff --git a/Pipfile.lock b/Pipfile.lock
index 779d986c..be6f9574 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
- "sha256": "c358b14c467cb5ac9f3827e7835ce338ec6750f708bc5a11735163cf4f095f2d"
+ "sha256": "9be419062bd9db364ac9dddfcd50aef9c932384b45850363e482591fe7d12403"
},
"pipfile-spec": 6,
"requires": {
@@ -96,51 +96,51 @@
},
"certifi": {
"hashes": [
- "sha256:1f422849db327d534e3d0c5f02a263458c3955ec0aae4ff09b95f195c59f4edd",
- "sha256:f05def092c44fbf25834a51509ef6e631dc19765ab8a57b4e7ab85531f0a9cf4"
+ "sha256:1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c",
+ "sha256:719a74fb9e33b9bd44cc7f3a8d94bc35e4049deebe19ba7d8e108280cfd59830"
],
- "version": "==2020.11.8"
+ "version": "==2020.12.5"
},
"cffi": {
"hashes": [
- "sha256:005f2bfe11b6745d726dbb07ace4d53f057de66e336ff92d61b8c7e9c8f4777d",
- "sha256:09e96138280241bd355cd585148dec04dbbedb4f46128f340d696eaafc82dd7b",
- "sha256:0b1ad452cc824665ddc682400b62c9e4f5b64736a2ba99110712fdee5f2505c4",
- "sha256:0ef488305fdce2580c8b2708f22d7785ae222d9825d3094ab073e22e93dfe51f",
- "sha256:15f351bed09897fbda218e4db5a3d5c06328862f6198d4fb385f3e14e19decb3",
- "sha256:22399ff4870fb4c7ef19fff6eeb20a8bbf15571913c181c78cb361024d574579",
- "sha256:23e5d2040367322824605bc29ae8ee9175200b92cb5483ac7d466927a9b3d537",
- "sha256:2791f68edc5749024b4722500e86303a10d342527e1e3bcac47f35fbd25b764e",
- "sha256:2f9674623ca39c9ebe38afa3da402e9326c245f0f5ceff0623dccdac15023e05",
- "sha256:3363e77a6176afb8823b6e06db78c46dbc4c7813b00a41300a4873b6ba63b171",
- "sha256:33c6cdc071ba5cd6d96769c8969a0531be2d08c2628a0143a10a7dcffa9719ca",
- "sha256:3b8eaf915ddc0709779889c472e553f0d3e8b7bdf62dab764c8921b09bf94522",
- "sha256:3cb3e1b9ec43256c4e0f8d2837267a70b0e1ca8c4f456685508ae6106b1f504c",
- "sha256:3eeeb0405fd145e714f7633a5173318bd88d8bbfc3dd0a5751f8c4f70ae629bc",
- "sha256:44f60519595eaca110f248e5017363d751b12782a6f2bd6a7041cba275215f5d",
- "sha256:4d7c26bfc1ea9f92084a1d75e11999e97b62d63128bcc90c3624d07813c52808",
- "sha256:529c4ed2e10437c205f38f3691a68be66c39197d01062618c55f74294a4a4828",
- "sha256:6642f15ad963b5092d65aed022d033c77763515fdc07095208f15d3563003869",
- "sha256:85ba797e1de5b48aa5a8427b6ba62cf69607c18c5d4eb747604b7302f1ec382d",
- "sha256:8f0f1e499e4000c4c347a124fa6a27d37608ced4fe9f7d45070563b7c4c370c9",
- "sha256:a624fae282e81ad2e4871bdb767e2c914d0539708c0f078b5b355258293c98b0",
- "sha256:b0358e6fefc74a16f745afa366acc89f979040e0cbc4eec55ab26ad1f6a9bfbc",
- "sha256:bbd2f4dfee1079f76943767fce837ade3087b578aeb9f69aec7857d5bf25db15",
- "sha256:bf39a9e19ce7298f1bd6a9758fa99707e9e5b1ebe5e90f2c3913a47bc548747c",
- "sha256:c11579638288e53fc94ad60022ff1b67865363e730ee41ad5e6f0a17188b327a",
- "sha256:c150eaa3dadbb2b5339675b88d4573c1be3cb6f2c33a6c83387e10cc0bf05bd3",
- "sha256:c53af463f4a40de78c58b8b2710ade243c81cbca641e34debf3396a9640d6ec1",
- "sha256:cb763ceceae04803adcc4e2d80d611ef201c73da32d8f2722e9d0ab0c7f10768",
- "sha256:cc75f58cdaf043fe6a7a6c04b3b5a0e694c6a9e24050967747251fb80d7bce0d",
- "sha256:d80998ed59176e8cba74028762fbd9b9153b9afc71ea118e63bbf5d4d0f9552b",
- "sha256:de31b5164d44ef4943db155b3e8e17929707cac1e5bd2f363e67a56e3af4af6e",
- "sha256:e66399cf0fc07de4dce4f588fc25bfe84a6d1285cc544e67987d22663393926d",
- "sha256:f0620511387790860b249b9241c2f13c3a80e21a73e0b861a2df24e9d6f56730",
- "sha256:f4eae045e6ab2bb54ca279733fe4eb85f1effda392666308250714e01907f394",
- "sha256:f92cdecb618e5fa4658aeb97d5eb3d2f47aa94ac6477c6daf0f306c5a3b9e6b1",
- "sha256:f92f789e4f9241cd262ad7a555ca2c648a98178a953af117ef7fad46aa1d5591"
- ],
- "version": "==1.14.3"
+ "sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e",
+ "sha256:00e28066507bfc3fe865a31f325c8391a1ac2916219340f87dfad602c3e48e5d",
+ "sha256:045d792900a75e8b1e1b0ab6787dd733a8190ffcf80e8c8ceb2fb10a29ff238a",
+ "sha256:0638c3ae1a0edfb77c6765d487fee624d2b1ee1bdfeffc1f0b58c64d149e7eec",
+ "sha256:105abaf8a6075dc96c1fe5ae7aae073f4696f2905fde6aeada4c9d2926752362",
+ "sha256:155136b51fd733fa94e1c2ea5211dcd4c8879869008fc811648f16541bf99668",
+ "sha256:1a465cbe98a7fd391d47dce4b8f7e5b921e6cd805ef421d04f5f66ba8f06086c",
+ "sha256:1d2c4994f515e5b485fd6d3a73d05526aa0fcf248eb135996b088d25dfa1865b",
+ "sha256:2c24d61263f511551f740d1a065eb0212db1dbbbbd241db758f5244281590c06",
+ "sha256:51a8b381b16ddd370178a65360ebe15fbc1c71cf6f584613a7ea08bfad946698",
+ "sha256:594234691ac0e9b770aee9fcdb8fa02c22e43e5c619456efd0d6c2bf276f3eb2",
+ "sha256:5cf4be6c304ad0b6602f5c4e90e2f59b47653ac1ed9c662ed379fe48a8f26b0c",
+ "sha256:64081b3f8f6f3c3de6191ec89d7dc6c86a8a43911f7ecb422c60e90c70be41c7",
+ "sha256:6bc25fc545a6b3d57b5f8618e59fc13d3a3a68431e8ca5fd4c13241cd70d0009",
+ "sha256:798caa2a2384b1cbe8a2a139d80734c9db54f9cc155c99d7cc92441a23871c03",
+ "sha256:7c6b1dece89874d9541fc974917b631406233ea0440d0bdfbb8e03bf39a49b3b",
+ "sha256:840793c68105fe031f34d6a086eaea153a0cd5c491cde82a74b420edd0a2b909",
+ "sha256:8d6603078baf4e11edc4168a514c5ce5b3ba6e3e9c374298cb88437957960a53",
+ "sha256:9cc46bc107224ff5b6d04369e7c595acb700c3613ad7bcf2e2012f62ece80c35",
+ "sha256:9f7a31251289b2ab6d4012f6e83e58bc3b96bd151f5b5262467f4bb6b34a7c26",
+ "sha256:9ffb888f19d54a4d4dfd4b3f29bc2c16aa4972f1c2ab9c4ab09b8ab8685b9c2b",
+ "sha256:a5ed8c05548b54b998b9498753fb9cadbfd92ee88e884641377d8a8b291bcc01",
+ "sha256:a7711edca4dcef1a75257b50a2fbfe92a65187c47dab5a0f1b9b332c5919a3fb",
+ "sha256:af5c59122a011049aad5dd87424b8e65a80e4a6477419c0c1015f73fb5ea0293",
+ "sha256:b18e0a9ef57d2b41f5c68beefa32317d286c3d6ac0484efd10d6e07491bb95dd",
+ "sha256:b4e248d1087abf9f4c10f3c398896c87ce82a9856494a7155823eb45a892395d",
+ "sha256:ba4e9e0ae13fc41c6b23299545e5ef73055213e466bd107953e4a013a5ddd7e3",
+ "sha256:c6332685306b6417a91b1ff9fae889b3ba65c2292d64bd9245c093b1b284809d",
+ "sha256:d5ff0621c88ce83a28a10d2ce719b2ee85635e85c515f12bac99a95306da4b2e",
+ "sha256:d9efd8b7a3ef378dd61a1e77367f1924375befc2eba06168b6ebfa903a5e59ca",
+ "sha256:df5169c4396adc04f9b0a05f13c074df878b6052430e03f50e68adf3a57aa28d",
+ "sha256:ebb253464a5d0482b191274f1c8bf00e33f7e0b9c66405fbffc61ed2c839c775",
+ "sha256:ec80dc47f54e6e9a78181ce05feb71a0353854cc26999db963695f950b5fb375",
+ "sha256:f032b34669220030f905152045dfa27741ce1a6db3324a5bc0b96b6c7420c87b",
+ "sha256:f60567825f791c6f8a592f3c6e3bd93dd2934e3f9dac189308426bd76b00ef3b",
+ "sha256:f803eaa94c2fcda012c047e62bc7a51b0bdabda1cad7a92a522694ea2d76e49f"
+ ],
+ "version": "==1.14.4"
},
"chardet": {
"hashes": [
@@ -162,10 +162,10 @@
},
"fakeredis": {
"hashes": [
- "sha256:8070b7fce16f828beaef2c757a4354af91698685d5232404f1aeeb233529c7a5",
- "sha256:f8c8ea764d7b6fd801e7f5486e3edd32ca991d506186f1923a01fc072e33c271"
+ "sha256:01cb47d2286825a171fb49c0e445b1fa9307087e07cbb3d027ea10dbff108b6a",
+ "sha256:2c6041cf0225889bc403f3949838b2c53470a95a9e2d4272422937786f5f8f73"
],
- "version": "==1.4.4"
+ "version": "==1.4.5"
},
"fuzzywuzzy": {
"hashes": [
@@ -381,11 +381,13 @@
"sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97",
"sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76",
"sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2",
+ "sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e",
"sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648",
"sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf",
"sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f",
"sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2",
"sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee",
+ "sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a",
"sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d",
"sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c",
"sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"
@@ -403,11 +405,11 @@
},
"sentry-sdk": {
"hashes": [
- "sha256:0e5e947d0f7a969314aa23669a94a9712be5a688ff069ff7b9fc36c66adc160c",
- "sha256:799a8bf76b012e3030a881be00e97bc0b922ce35dde699c6537122b751d80e2c"
+ "sha256:0a711ec952441c2ec89b8f5d226c33bc697914f46e876b44a4edd3e7864cf4d0",
+ "sha256:737a094e49a529dd0fdcaafa9e97cf7c3d5eb964bd229821d640bc77f3502b3f"
],
"index": "pypi",
- "version": "==0.14.4"
+ "version": "==0.19.5"
},
"six": {
"hashes": [
@@ -426,11 +428,11 @@
},
"soupsieve": {
"hashes": [
- "sha256:1634eea42ab371d3d346309b93df7870a88610f0725d47528be902a0d95ecc55",
- "sha256:a59dc181727e95d25f781f0eb4fd1825ff45590ec8ff49eadfd7f1a537cc0232"
+ "sha256:4bb21a6ee4707bf43b61230e80740e71bfe56e55d1f1f50924b087bb2975c851",
+ "sha256:6dc52924dc0bc710a5d16794e6b3480b2c7c08b07729505feab2b2c16661ff6e"
],
"markers": "python_version >= '3.0'",
- "version": "==2.0.1"
+ "version": "==2.1"
},
"urllib3": {
"hashes": [
@@ -520,11 +522,11 @@
},
"flake8-bugbear": {
"hashes": [
- "sha256:a3ddc03ec28ba2296fc6f89444d1c946a6b76460f859795b35b77d4920a51b63",
- "sha256:bd02e4b009fb153fe6072c31c52aeab5b133d508095befb2ffcf3b41c4823162"
+ "sha256:528020129fea2dea33a466b9d64ab650aa3e5f9ffc788b70ea4bc6cf18283538",
+ "sha256:f35b8135ece7a014bc0aee5b5d485334ac30a6da48494998cc1fabf7ec70d703"
],
"index": "pypi",
- "version": "==20.1.4"
+ "version": "==20.11.1"
},
"flake8-docstrings": {
"hashes": [
@@ -559,11 +561,11 @@
},
"flake8-tidy-imports": {
"hashes": [
- "sha256:62059ca07d8a4926b561d392cbab7f09ee042350214a25cf12823384a45d27dd",
- "sha256:c30b40337a2e6802ba3bb611c26611154a27e94c53fc45639e3e282169574fd3"
+ "sha256:52e5f2f987d3d5597538d5941153409ebcab571635835b78f522c7bf03ca23bc",
+ "sha256:76e36fbbfdc8e3c5017f9a216c2855a298be85bc0631e66777f4e6a07a859dc4"
],
"index": "pypi",
- "version": "==4.1.0"
+ "version": "==4.2.1"
},
"flake8-todo": {
"hashes": [
@@ -574,11 +576,11 @@
},
"identify": {
"hashes": [
- "sha256:5dd84ac64a9a115b8e0b27d1756b244b882ad264c3c423f42af8235a6e71ca12",
- "sha256:c9504ba6a043ee2db0a9d69e43246bc138034895f6338d5aed1b41e4a73b1513"
+ "sha256:943cd299ac7f5715fcb3f684e2fc1594c1e0f22a90d15398e5888143bd4144b5",
+ "sha256:cc86e6a9a390879dcc2976cef169dd9cc48843ed70b7380f321d1b118163c60e"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
- "version": "==1.5.9"
+ "version": "==1.5.10"
},
"mccabe": {
"hashes": [
@@ -604,11 +606,11 @@
},
"pre-commit": {
"hashes": [
- "sha256:22e6aa3bd571debb01eb7d34483f11c01b65237be4eebbf30c3d4fb65762d315",
- "sha256:905ebc9b534b991baec87e934431f2d0606ba27f2b90f7f652985f5a5b8b6ae6"
+ "sha256:6c86d977d00ddc8a60d68eec19f51ef212d9462937acf3ea37c7adec32284ac0",
+ "sha256:ee784c11953e6d8badb97d19bc46b997a3a9eded849881ec587accd8608d74a4"
],
"index": "pypi",
- "version": "==2.8.2"
+ "version": "==2.9.3"
},
"pycodestyle": {
"hashes": [
@@ -639,11 +641,13 @@
"sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97",
"sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76",
"sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2",
+ "sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e",
"sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648",
"sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf",
"sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f",
"sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2",
"sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee",
+ "sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a",
"sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d",
"sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c",
"sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"
@@ -676,11 +680,11 @@
},
"virtualenv": {
"hashes": [
- "sha256:b0011228208944ce71052987437d3843e05690b2f23d1c7da4263fde104c97a2",
- "sha256:b8d6110f493af256a40d65e29846c69340a947669eec8ce784fcf3dd3af28380"
+ "sha256:54b05fc737ea9c9ee9f8340f579e5da5b09fb64fd010ab5757eb90268616907c",
+ "sha256:b7a8ec323ee02fb2312f098b6b4c9de99559b462775bc8fe3627a73706603c1b"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
- "version": "==20.1.0"
+ "version": "==20.2.2"
}
}
}
diff --git a/bot/__init__.py b/bot/__init__.py
index a9a0865e..bdb18666 100644
--- a/bot/__init__.py
+++ b/bot/__init__.py
@@ -37,7 +37,8 @@ os.makedirs(log_dir, exist_ok=True)
# File handler rotates logs every 5 MB
file_handler = logging.handlers.RotatingFileHandler(
- log_file, maxBytes=5 * (2**20), backupCount=10)
+ log_file, maxBytes=5 * (2**20), backupCount=10, encoding="utf-8",
+)
file_handler.setLevel(logging.TRACE if Client.debug else logging.DEBUG)
# Console handler prints to terminal
@@ -61,7 +62,7 @@ logging.basicConfig(
format='%(asctime)s - %(name)s %(levelname)s: %(message)s',
datefmt="%D %H:%M:%S",
level=logging.TRACE if Client.debug else logging.DEBUG,
- handlers=[console_handler, file_handler]
+ handlers=[console_handler, file_handler],
)
logging.getLogger().info('Logging initialization complete')
diff --git a/bot/__main__.py b/bot/__main__.py
index cd2d43a9..e9b14a53 100644
--- a/bot/__main__.py
+++ b/bot/__main__.py
@@ -2,9 +2,10 @@ import logging
import sentry_sdk
from sentry_sdk.integrations.logging import LoggingIntegration
+from sentry_sdk.integrations.redis import RedisIntegration
from bot.bot import bot
-from bot.constants import Client, STAFF_ROLES, WHITELISTED_CHANNELS
+from bot.constants import Client, GIT_SHA, STAFF_ROLES, WHITELISTED_CHANNELS
from bot.utils.decorators import in_channel_check
from bot.utils.extensions import walk_extensions
@@ -16,7 +17,11 @@ sentry_logging = LoggingIntegration(
sentry_sdk.init(
dsn=Client.sentry_dsn,
- integrations=[sentry_logging]
+ integrations=[
+ sentry_logging,
+ RedisIntegration()
+ ],
+ release=f"sir-lancebot@{GIT_SHA}"
)
log = logging.getLogger(__name__)
diff --git a/bot/constants.py b/bot/constants.py
index 6999f321..f6da272e 100644
--- a/bot/constants.py
+++ b/bot/constants.py
@@ -1,8 +1,9 @@
+import dataclasses
import enum
import logging
from datetime import datetime
from os import environ
-from typing import NamedTuple
+from typing import Dict, NamedTuple
__all__ = (
"AdventOfCode",
@@ -29,11 +30,60 @@ __all__ = (
log = logging.getLogger(__name__)
+class AdventOfCodeLeaderboard:
+ id: str
+ _session: str
+ join_code: str
+
+ # If we notice that the session for this board expired, we set
+ # this attribute to `True`. We will emit a Sentry error so we
+ # can handle it, but, in the meantime, we'll try using the
+ # fallback session to make sure the commands still work.
+ use_fallback_session: bool = False
+
+ @property
+ def session(self) -> str:
+ """Return either the actual `session` cookie or the fallback cookie."""
+ if self.use_fallback_session:
+ log.info(f"Returning fallback cookie for board `{self.id}`.")
+ return AdventOfCode.fallback_session
+
+ return self._session
+
+
+def _parse_aoc_leaderboard_env() -> Dict[str, AdventOfCodeLeaderboard]:
+ """
+ Parse the environment variable containing leaderboard information.
+
+ A leaderboard should be specified in the format `id,session,join_code`,
+ without the backticks. If more than one leaderboard needs to be added to
+ the constant, separate the individual leaderboards with `::`.
+
+ Example ENV: `id1,session1,join_code1::id2,session2,join_code2`
+ """
+ raw_leaderboards = environ.get("AOC_LEADERBOARDS", "")
+ if not raw_leaderboards:
+ return {}
+
+ leaderboards = {}
+ for leaderboard in raw_leaderboards.split("::"):
+ leaderboard_id, session, join_code = leaderboard.split(",")
+ leaderboards[leaderboard_id] = AdventOfCodeLeaderboard(leaderboard_id, session, join_code)
+
+ return leaderboards
+
+
class AdventOfCode:
- leaderboard_cache_age_threshold_seconds = 3600
- leaderboard_id = 631135
- leaderboard_join_code = str(environ.get("AOC_JOIN_CODE", None))
- leaderboard_max_displayed_members = 10
+ # Information for the several leaderboards we have
+ leaderboards = _parse_aoc_leaderboard_env()
+ staff_leaderboard_id = environ.get("AOC_STAFF_LEADERBOARD_ID", "")
+ fallback_session = environ.get("AOC_FALLBACK_SESSION", "")
+
+ # Other Advent of Code constants
+ ignored_days = environ.get("AOC_IGNORED_DAYS", "").split(",")
+ leaderboard_displayed_members = 10
+ leaderboard_cache_expiry_seconds = 1800
year = int(environ.get("AOC_YEAR", datetime.utcnow().year))
role_id = int(environ.get("AOC_ROLE_ID", 518565788744024082))
@@ -44,7 +94,8 @@ class Branding:
class Channels(NamedTuple):
admins = 365960823622991872
- advent_of_code = int(environ.get("AOC_CHANNEL_ID", 517745814039166986))
+ advent_of_code = int(environ.get("AOC_CHANNEL_ID", 782715290437943306))
+ advent_of_code_commands = int(environ.get("AOC_COMMANDS_CHANNEL_ID", 607247579608121354))
announcements = int(environ.get("CHANNEL_ANNOUNCEMENTS", 354619224620138496))
big_brother_logs = 468507907357409333
bot = 267659945086812160
@@ -193,9 +244,10 @@ class Roles(NamedTuple):
muted = 277914926603829249
owner = 267627879762755584
verified = 352427296948486144
- helpers = 267630620367257601
+ helpers = int(environ.get("ROLE_HELPERS", 267630620367257601))
rockstars = 458226413825294336
core_developers = 587606783669829632
+ events_lead = 778361735739998228
class Tokens(NamedTuple):
@@ -262,6 +314,8 @@ WHITELISTED_CHANNELS = (
Channels.sprint_documentation,
)
+GIT_SHA = environ.get("GIT_SHA", "foobar")
+
# Bot replies
ERROR_REPLIES = [
"Please don't do that.",
diff --git a/bot/exts/christmas/advent_of_code/__init__.py b/bot/exts/christmas/advent_of_code/__init__.py
new file mode 100644
index 00000000..3c521168
--- /dev/null
+++ b/bot/exts/christmas/advent_of_code/__init__.py
@@ -0,0 +1,10 @@
+from bot.bot import Bot
+
+
+def setup(bot: Bot) -> None:
+ """Set up the Advent of Code extension."""
+ # Import the Cog at runtime to prevent side effects like defining
+ # RedisCache instances too early.
+ from ._cog import AdventOfCode
+
+ bot.add_cog(AdventOfCode(bot))
diff --git a/bot/exts/christmas/advent_of_code/_caches.py b/bot/exts/christmas/advent_of_code/_caches.py
new file mode 100644
index 00000000..32d5394f
--- /dev/null
+++ b/bot/exts/christmas/advent_of_code/_caches.py
@@ -0,0 +1,5 @@
+import async_rediscache
+
+leaderboard_counts = async_rediscache.RedisCache(namespace="AOC_leaderboard_counts")
+leaderboard_cache = async_rediscache.RedisCache(namespace="AOC_leaderboard_cache")
+assigned_leaderboard = async_rediscache.RedisCache(namespace="AOC_assigned_leaderboard")
diff --git a/bot/exts/christmas/advent_of_code/_cog.py b/bot/exts/christmas/advent_of_code/_cog.py
new file mode 100644
index 00000000..c3b87f96
--- /dev/null
+++ b/bot/exts/christmas/advent_of_code/_cog.py
@@ -0,0 +1,296 @@
+import json
+import logging
+from datetime import datetime, timedelta
+from pathlib import Path
+
+import discord
+from discord.ext import commands
+
+from bot.bot import Bot
+from bot.constants import (
+ AdventOfCode as AocConfig, Channels, Colours, Emojis, Month, Roles, WHITELISTED_CHANNELS,
+)
+from bot.exts.christmas.advent_of_code import _helpers
+from bot.utils.decorators import InChannelCheckFailure, in_month, override_in_channel, with_role
+
+log = logging.getLogger(__name__)
+
+AOC_REQUEST_HEADER = {"user-agent": "PythonDiscord AoC Event Bot"}
+
+AOC_WHITELIST_RESTRICTED = WHITELISTED_CHANNELS + (Channels.advent_of_code_commands,)
+
+# Some commands can be run in the regular advent of code channel
+# They aren't spammy and foster discussion
+AOC_WHITELIST = AOC_WHITELIST_RESTRICTED + (Channels.advent_of_code,)
+
+
+class AdventOfCode(commands.Cog):
+ """Advent of Code festivities! Ho Ho Ho!"""
+
+ def __init__(self, bot: Bot) -> None:
+ self.bot = bot
+
+ self._base_url = f"https://adventofcode.com/{AocConfig.year}"
+ self.global_leaderboard_url = f"https://adventofcode.com/{AocConfig.year}/leaderboard"
+
+ self.about_aoc_filepath = Path("./bot/resources/advent_of_code/about.json")
+ self.cached_about_aoc = self._build_about_embed()
+
+ self.countdown_task = None
+ self.status_task = None
+
+ notification_coro = _helpers.new_puzzle_notification(self.bot)
+ self.notification_task = self.bot.loop.create_task(notification_coro)
+ self.notification_task.set_name("Daily AoC Notification")
+ self.notification_task.add_done_callback(_helpers.background_task_callback)
+
+ status_coro = _helpers.countdown_status(self.bot)
+ self.status_task = self.bot.loop.create_task(status_coro)
+ self.status_task.set_name("AoC Status Countdown")
+ self.status_task.add_done_callback(_helpers.background_task_callback)
+
+ @commands.group(name="adventofcode", aliases=("aoc",))
+ @override_in_channel(AOC_WHITELIST)
+ async def adventofcode_group(self, ctx: commands.Context) -> None:
+ """All of the Advent of Code commands."""
+ if not ctx.invoked_subcommand:
+ await ctx.send_help(ctx.command)
+
+ @adventofcode_group.command(
+ name="subscribe",
+ aliases=("sub", "notifications", "notify", "notifs"),
+ brief="Notifications for new days"
+ )
+ @override_in_channel(AOC_WHITELIST)
+ async def aoc_subscribe(self, ctx: commands.Context) -> None:
+ """Assign the role for notifications about new days being ready."""
+ current_year = datetime.now().year
+ if current_year != AocConfig.year:
+ await ctx.send(f"You can't subscribe to {current_year}'s Advent of Code announcements yet!")
+ return
+
+ role = ctx.guild.get_role(AocConfig.role_id)
+ unsubscribe_command = f"{ctx.prefix}{ctx.command.root_parent} unsubscribe"
+
+ if role not in ctx.author.roles:
+ await ctx.author.add_roles(role)
+ await ctx.send("Okay! You have been __subscribed__ to notifications about new Advent of Code tasks. "
+ f"You can run `{unsubscribe_command}` to disable them again for you.")
+ else:
+ await ctx.send("Hey, you already are receiving notifications about new Advent of Code tasks. "
+ f"If you don't want them any more, run `{unsubscribe_command}` instead.")
+
+ @in_month(Month.DECEMBER)
+ @adventofcode_group.command(name="unsubscribe", aliases=("unsub",), brief="Notifications for new days")
+ @override_in_channel(AOC_WHITELIST)
+ async def aoc_unsubscribe(self, ctx: commands.Context) -> None:
+ """Remove the role for notifications about new days being ready."""
+ role = ctx.guild.get_role(AocConfig.role_id)
+
+ if role in ctx.author.roles:
+ await ctx.author.remove_roles(role)
+ await ctx.send("Okay! You have been __unsubscribed__ from notifications about new Advent of Code tasks.")
+ else:
+ await ctx.send("Hey, you don't even get any notifications about new Advent of Code tasks currently anyway.")
+
+ @adventofcode_group.command(name="countdown", aliases=("count", "c"), brief="Return time left until next day")
+ @override_in_channel(AOC_WHITELIST)
+ async def aoc_countdown(self, ctx: commands.Context) -> None:
+ """Return time left until next day."""
+ if not _helpers.is_in_advent():
+ datetime_now = datetime.now(_helpers.EST)
+
+ # Calculate the delta to this & next year's December 1st to see which one is closest and not in the past
+ this_year = datetime(datetime_now.year, 12, 1, tzinfo=_helpers.EST)
+ next_year = datetime(datetime_now.year + 1, 12, 1, tzinfo=_helpers.EST)
+ deltas = (dec_first - datetime_now for dec_first in (this_year, next_year))
+ delta = min(delta for delta in deltas if delta >= timedelta()) # timedelta() gives 0 duration delta
+
+ # Add a finer timedelta if there's less than a day left
+ if delta.days == 0:
+ delta_str = f"approximately {delta.seconds // 3600} hours"
+ else:
+ delta_str = f"{delta.days} days"
+
+ await ctx.send(f"The Advent of Code event is not currently running. "
+ f"The next event will start in {delta_str}.")
+ return
+
+ tomorrow, time_left = _helpers.time_left_to_est_midnight()
+
+ hours, minutes = time_left.seconds // 3600, time_left.seconds // 60 % 60
+
+ await ctx.send(f"There are {hours} hours and {minutes} minutes left until day {tomorrow.day}.")
+
+ @adventofcode_group.command(name="about", aliases=("ab", "info"), brief="Learn about Advent of Code")
+ @override_in_channel(AOC_WHITELIST)
+ async def about_aoc(self, ctx: commands.Context) -> None:
+ """Respond with an explanation of all things Advent of Code."""
+ await ctx.send("", embed=self.cached_about_aoc)
+
+ @adventofcode_group.command(name="join", aliases=("j",), brief="Learn how to join the leaderboard (via DM)")
+ @override_in_channel(AOC_WHITELIST)
+ async def join_leaderboard(self, ctx: commands.Context) -> None:
+ """DM the user the information for joining the Python Discord leaderboard."""
+ current_year = datetime.now().year
+ if current_year != AocConfig.year:
+ await ctx.send(f"The Python Discord leaderboard for {current_year} is not yet available!")
+ return
+
+ author = ctx.message.author
+ log.info(f"{author.name} ({author.id}) has requested a PyDis AoC leaderboard code")
+
+ if AocConfig.staff_leaderboard_id and any(r.id == Roles.helpers for r in author.roles):
+ join_code = AocConfig.leaderboards[AocConfig.staff_leaderboard_id].join_code
+ else:
+ try:
+ join_code = await _helpers.get_public_join_code(author)
+ except _helpers.FetchingLeaderboardFailed:
+ await ctx.send(":x: Failed to get join code! Notified maintainers.")
+ return
+
+ if not join_code:
+ log.error(f"Failed to get a join code for user {author} ({author.id})")
+ error_embed = discord.Embed(
+ title="Unable to get join code",
+ description="Failed to get a join code to one of our boards. Please notify staff.",
+ colour=discord.Colour.red(),
+ )
+ await ctx.send(embed=error_embed)
+ return
+
+ info_str = [
+ "To join our leaderboard, follow these steps:",
+ "• Log in on https://adventofcode.com",
+ "• Head over to https://adventofcode.com/leaderboard/private",
+ f"• Use this code `{join_code}` to join the Python Discord leaderboard!",
+ ]
+ try:
+ await author.send("\n".join(info_str))
+ except discord.errors.Forbidden:
+ log.debug(f"{author.name} ({author.id}) has disabled DMs from server members")
+ await ctx.send(f":x: {author.mention}, please (temporarily) enable DMs to receive the join code")
+ else:
+ await ctx.message.add_reaction(Emojis.envelope)
+
+ @adventofcode_group.command(
+ name="leaderboard",
+ aliases=("board", "lb"),
+ brief="Get a snapshot of the PyDis private AoC leaderboard",
+ )
+ @override_in_channel(AOC_WHITELIST_RESTRICTED)
+ async def aoc_leaderboard(self, ctx: commands.Context) -> None:
+ """Get the current top scorers of the Python Discord Leaderboard."""
+ async with ctx.typing():
+ try:
+ leaderboard = await _helpers.fetch_leaderboard()
+ except _helpers.FetchingLeaderboardFailed:
+ await ctx.send(":x: Unable to fetch leaderboard!")
+ return
+
+ number_of_participants = leaderboard["number_of_participants"]
+
+ top_count = min(AocConfig.leaderboard_displayed_members, number_of_participants)
+ header = f"Here's our current top {top_count}! {Emojis.christmas_tree * 3}"
+
+ table = f"```\n{leaderboard['top_leaderboard']}\n```"
+ info_embed = _helpers.get_summary_embed(leaderboard)
+
+ await ctx.send(content=f"{header}\n\n{table}", embed=info_embed)
+
+ @adventofcode_group.command(
+ name="global",
+ aliases=("globalboard", "gb"),
+ brief="Get a link to the global leaderboard",
+ )
+ @override_in_channel(AOC_WHITELIST_RESTRICTED)
+ async def aoc_global_leaderboard(self, ctx: commands.Context) -> None:
+ """Get a link to the global Advent of Code leaderboard."""
+ url = self.global_leaderboard_url
+ global_leaderboard = discord.Embed(
+ title="Advent of Code — Global Leaderboard",
+ description=f"You can find the global leaderboard [here]({url})."
+ )
+ global_leaderboard.set_thumbnail(url=_helpers.AOC_EMBED_THUMBNAIL)
+ await ctx.send(embed=global_leaderboard)
+
+ @adventofcode_group.command(
+ name="stats",
+ aliases=("dailystats", "ds"),
+ brief="Get daily statistics for the Python Discord leaderboard"
+ )
+ @override_in_channel(AOC_WHITELIST_RESTRICTED)
+ async def private_leaderboard_daily_stats(self, ctx: commands.Context) -> None:
+ """Send an embed with daily completion statistics for the Python Discord leaderboard."""
+ try:
+ leaderboard = await _helpers.fetch_leaderboard()
+ except _helpers.FetchingLeaderboardFailed:
+ await ctx.send(":x: Can't fetch leaderboard for stats right now!")
+ return
+
+ # The daily stats are serialized as JSON as they have to be cached in Redis
+ daily_stats = json.loads(leaderboard["daily_stats"])
+ async with ctx.typing():
+ lines = ["Day ⭐ ⭐⭐ | %⭐ %⭐⭐\n================================"]
+ for day, stars in daily_stats.items():
+ star_one = stars["star_one"]
+ star_two = stars["star_two"]
+ p_star_one = star_one / leaderboard["number_of_participants"]
+ p_star_two = star_two / leaderboard["number_of_participants"]
+ lines.append(
+ f"{day:>2}) {star_one:>4} {star_two:>4} | {p_star_one:>7.2%} {p_star_two:>7.2%}"
+ )
+ table = "\n".join(lines)
+ info_embed = _helpers.get_summary_embed(leaderboard)
+ await ctx.send(f"```\n{table}\n```", embed=info_embed)
+
+ @with_role(Roles.admin, Roles.events_lead)
+ @adventofcode_group.command(
+ name="refresh",
+ aliases=("fetch",),
+ brief="Force a refresh of the leaderboard cache.",
+ )
+ async def refresh_leaderboard(self, ctx: commands.Context) -> None:
+ """
+ Force a refresh of the leaderboard cache.
+
+ Note: This should be used sparingly, as we want to prevent sending too
+ many requests to the Advent of Code server.
+ """
+ async with ctx.typing():
+ try:
+ await _helpers.fetch_leaderboard(invalidate_cache=True)
+ except _helpers.FetchingLeaderboardFailed:
+ await ctx.send(":x: Something went wrong while trying to refresh the cache!")
+ else:
+ await ctx.send("\N{OK Hand Sign} Refreshed leaderboard cache!")
+
+ def cog_unload(self) -> None:
+ """Cancel season-related tasks on cog unload."""
+ log.debug("Unloading the cog and canceling the background task.")
+ self.countdown_task.cancel()
+ self.status_task.cancel()
+
+ def _build_about_embed(self) -> discord.Embed:
+ """Build and return the informational "About AoC" embed from the resources file."""
+ with self.about_aoc_filepath.open("r", encoding="utf8") as f:
+ embed_fields = json.load(f)
+
+ about_embed = discord.Embed(
+ title=self._base_url,
+ colour=Colours.soft_green,
+ url=self._base_url,
+ timestamp=datetime.utcnow()
+ )
+ about_embed.set_author(name="Advent of Code", url=self._base_url)
+ for field in embed_fields:
+ about_embed.add_field(**field)
+
+ about_embed.set_footer(text="Last Updated")
+ return about_embed
+
+ async def cog_command_error(self, ctx: commands.Context, error: Exception) -> None:
+ """Custom error handler if an advent of code command was posted in the wrong channel."""
+ if isinstance(error, InChannelCheckFailure):
+ await ctx.send(f":x: Please use <#{Channels.advent_of_code_commands}> for aoc commands instead.")
+ error.handled = True
diff --git a/bot/exts/christmas/advent_of_code/_helpers.py b/bot/exts/christmas/advent_of_code/_helpers.py
new file mode 100644
index 00000000..b7adc895
--- /dev/null
+++ b/bot/exts/christmas/advent_of_code/_helpers.py
@@ -0,0 +1,592 @@
+import asyncio
+import collections
+import datetime
+import json
+import logging
+import math
+import operator
+import typing
+from typing import Tuple
+
+import aiohttp
+import discord
+import pytz
+
+from bot.bot import Bot
+from bot.constants import AdventOfCode, Channels, Colours
+from bot.exts.christmas.advent_of_code import _caches
+
+log = logging.getLogger(__name__)
+
+PASTE_URL = "https://paste.pythondiscord.com/documents"
+RAW_PASTE_URL_TEMPLATE = "https://paste.pythondiscord.com/raw/{key}"
+
+# Base API URL for Advent of Code Private Leaderboards
+AOC_API_URL = "https://adventofcode.com/{year}/leaderboard/private/view/{leaderboard_id}.json"
+AOC_REQUEST_HEADER = {"user-agent": "PythonDiscord AoC Event Bot"}
+
+# Leaderboard Line Template
+AOC_TABLE_TEMPLATE = "{rank: >4} | {name:25.25} | {score: >5} | {stars}"
+HEADER = AOC_TABLE_TEMPLATE.format(rank="", name="Name", score="Score", stars="⭐, ⭐⭐")
+HEADER = f"{HEADER}\n{'-' * (len(HEADER) + 2)}"
+HEADER_LINES = len(HEADER.splitlines())
+TOP_LEADERBOARD_LINES = HEADER_LINES + AdventOfCode.leaderboard_displayed_members
+
+# Keys that need to be set for a cached leaderboard
+REQUIRED_CACHE_KEYS = (
+ "full_leaderboard",
+ "top_leaderboard",
+ "full_leaderboard_url",
+ "leaderboard_fetched_at",
+ "number_of_participants",
+ "daily_stats",
+)
+
+AOC_EMBED_THUMBNAIL = (
+ "https://raw.githubusercontent.com/python-discord"
+ "/branding/master/seasonal/christmas/server_icons/festive_256.gif"
+)
+
+# Create an easy constant for the EST timezone
+EST = pytz.timezone("EST")
+
+# Step size for the challenge countdown status
+COUNTDOWN_STEP = 60 * 5
+
+# Create namedtuple that combines a participant's name and their completion
+# time for a specific star. We're going to use this later to order the results
+# for each star to compute the rank score.
+StarResult = collections.namedtuple("StarResult", "member_id completion_time")
+
+
+class UnexpectedRedirect(aiohttp.ClientError):
+ """Raised when an unexpected redirect was detected."""
+
+
+class UnexpectedResponseStatus(aiohttp.ClientError):
+ """Raised when an unexpected redirect was detected."""
+
+
+class FetchingLeaderboardFailed(Exception):
+ """Raised when one or more leaderboards could not be fetched at all."""
+
+
+def leaderboard_sorting_function(entry: typing.Tuple[str, dict]) -> typing.Tuple[int, int]:
+ """
+ Provide a sorting value for our leaderboard.
+
+ The leaderboard is sorted primarily on the score someone has received and
+ secondary on the number of stars someone has completed.
+ """
+ result = entry[1]
+ return result["score"], result["star_2"] + result["star_1"]
+
+
+def _parse_raw_leaderboard_data(raw_leaderboard_data: dict) -> dict:
+ """
+ Parse the leaderboard data received from the AoC website.
+
+ The data we receive from AoC is structured by member, not by day/star. This
+ means that we need to "transpose" the data to a per star structure in order
+ to calculate the rank scores each individual should get.
+
+ As we need our data both "per participant" as well as "per day", we return
+ the parsed and analyzed data in both formats.
+ """
+ # We need to get an aggregate of completion times for each star of each day,
+ # instead of per participant to compute the rank scores. This dictionary will
+ # provide such a transposed dataset.
+ star_results = collections.defaultdict(list)
+
+ # As we're already iterating over the participants, we can record the number of
+ # first stars and second stars they've achieved right here and now. This means
+ # we won't have to iterate over the participants again later.
+ leaderboard = {}
+
+ # The data we get from the AoC website is structured by member, not by day/star,
+ # which means we need to iterate over the members to transpose the data to a per
+ # star view. We need that per star view to compute rank scores per star.
+ for member in raw_leaderboard_data.values():
+ name = member["name"] if member["name"] else f"Anonymous #{member['id']}"
+ member_id = member['id']
+ leaderboard[member_id] = {"name": name, "score": 0, "star_1": 0, "star_2": 0}
+
+ # Iterate over all days for this participant
+ for day, stars in member["completion_day_level"].items():
+ # Iterate over the complete stars for this day for this participant
+ for star, data in stars.items():
+ # Record completion of this star for this individual
+ leaderboard[member_id][f"star_{star}"] += 1
+
+ # Record completion datetime for this participant for this day/star
+ completion_time = datetime.datetime.fromtimestamp(int(data['get_star_ts']))
+ star_results[(day, star)].append(
+ StarResult(member_id=member_id, completion_time=completion_time)
+ )
+
+ # Now that we have a transposed dataset that holds the completion time of all
+ # participants per star, we can compute the rank-based scores each participant
+ # should get for that star.
+ max_score = len(leaderboard)
+ for (day, _star), results in star_results.items():
+ # If this day should not count in the ranking, skip it.
+ if day in AdventOfCode.ignored_days:
+ continue
+
+ sorted_result = sorted(results, key=operator.attrgetter('completion_time'))
+ for rank, star_result in enumerate(sorted_result):
+ leaderboard[star_result.member_id]["score"] += max_score - rank
+
+ # Since dictionaries now retain insertion order, let's use that
+ sorted_leaderboard = dict(
+ sorted(leaderboard.items(), key=leaderboard_sorting_function, reverse=True)
+ )
+
+ # Create summary stats for the stars completed for each day of the event.
+ daily_stats = {}
+ for day in range(1, 26):
+ day = str(day)
+ star_one = len(star_results.get((day, "1"), []))
+ star_two = len(star_results.get((day, "2"), []))
+ # By using a dictionary instead of namedtuple here, we can serialize
+ # this data to JSON in order to cache it in Redis.
+ daily_stats[day] = {"star_one": star_one, "star_two": star_two}
+
+ return {"daily_stats": daily_stats, "leaderboard": sorted_leaderboard}
+
+
+def _format_leaderboard(leaderboard: typing.Dict[str, dict]) -> str:
+ """Format the leaderboard using the AOC_TABLE_TEMPLATE."""
+ leaderboard_lines = [HEADER]
+ for rank, data in enumerate(leaderboard.values(), start=1):
+ leaderboard_lines.append(
+ AOC_TABLE_TEMPLATE.format(
+ rank=rank,
+ name=data["name"],
+ score=str(data["score"]),
+ stars=f"({data['star_1']}, {data['star_2']})"
+ )
+ )
+
+ return "\n".join(leaderboard_lines)
+
+
+async def _leaderboard_request(url: str, board: int, cookies: dict) -> typing.Optional[dict]:
+ """Make a leaderboard request using the specified session cookie."""
+ async with aiohttp.request("GET", url, headers=AOC_REQUEST_HEADER, cookies=cookies) as resp:
+ # The Advent of Code website redirects silently with a 200 response if a
+ # session cookie has expired, is invalid, or was not provided.
+ if str(resp.url) != url:
+ log.error(f"Fetching leaderboard `{board}` failed! Check the session cookie.")
+ raise UnexpectedRedirect(f"redirected unexpectedly to {resp.url} for board `{board}`")
+
+ # Every status other than `200` is unexpected, not only 400+
+ if not resp.status == 200:
+ log.error(f"Unexpected response `{resp.status}` while fetching leaderboard `{board}`")
+ raise UnexpectedResponseStatus(f"status `{resp.status}`")
+
+ return await resp.json()
+
+
+async def _fetch_leaderboard_data() -> typing.Dict[str, typing.Any]:
+ """Fetch data for all leaderboards and return a pooled result."""
+ year = AdventOfCode.year
+
+ # We'll make our requests one at a time to not flood the AoC website with
+ # up to six simultaneous requests. This may take a little longer, but it
+ # does avoid putting unnecessary stress on the Advent of Code website.
+
+ # Container to store the raw data of each leaderboard
+ participants = {}
+ for leaderboard in AdventOfCode.leaderboards.values():
+ leaderboard_url = AOC_API_URL.format(year=year, leaderboard_id=leaderboard.id)
+
+ # Two attempts, one with the original session cookie and one with the fallback session
+ for attempt in range(1, 3):
+ log.info(f"Attempting to fetch leaderboard `{leaderboard.id}` ({attempt}/2)")
+ cookies = {"session": leaderboard.session}
+ try:
+ raw_data = await _leaderboard_request(leaderboard_url, leaderboard.id, cookies)
+ except UnexpectedRedirect:
+ if cookies["session"] == AdventOfCode.fallback_session:
+ log.error("It seems like the fallback cookie has expired!")
+ raise FetchingLeaderboardFailed from None
+
+ # If we're here, it means that the original session did not
+ # work. Let's fall back to the fallback session.
+ leaderboard.use_fallback_session = True
+ continue
+ except aiohttp.ClientError:
+ # Don't retry, something unexpected is wrong and it may not be the session.
+ raise FetchingLeaderboardFailed from None
+ else:
+ # Get the participants and store their current count.
+ board_participants = raw_data["members"]
+ await _caches.leaderboard_counts.set(leaderboard.id, len(board_participants))
+ participants.update(board_participants)
+ break
+ else:
+ log.error(f"reached 'unreachable' state while fetching board `{leaderboard.id}`.")
+ raise FetchingLeaderboardFailed
+
+ log.info(f"Fetched leaderboard information for {len(participants)} participants")
+ return participants
+
+
+async def _upload_leaderboard(leaderboard: str) -> str:
+ """Upload the full leaderboard to our paste service and return the URL."""
+ async with aiohttp.request("POST", PASTE_URL, data=leaderboard) as resp:
+ try:
+ resp_json = await resp.json()
+ except Exception:
+ log.exception("Failed to upload full leaderboard to paste service")
+ return ""
+
+ if "key" in resp_json:
+ return RAW_PASTE_URL_TEMPLATE.format(key=resp_json["key"])
+
+ log.error(f"Unexpected response from paste service while uploading leaderboard {resp_json}")
+ return ""
+
+
+def _get_top_leaderboard(full_leaderboard: str) -> str:
+ """Get the leaderboard up to the maximum specified entries."""
+ return "\n".join(full_leaderboard.splitlines()[:TOP_LEADERBOARD_LINES])
+
+
+@_caches.leaderboard_cache.atomic_transaction
+async def fetch_leaderboard(invalidate_cache: bool = False) -> dict:
+ """
+ Get the current Python Discord combined leaderboard.
+
+ The leaderboard is cached and only fetched from the API if the current data
+ is older than the lifetime set in the constants. To prevent multiple calls
+ to this function fetching new leaderboard information in case of a cache
+ miss, this function is locked to one call at a time using a decorator.
+ """
+ cached_leaderboard = await _caches.leaderboard_cache.to_dict()
+
+ # Check if the cached leaderboard contains everything we expect it to. If it
+ # does not, this probably means the cache has not been created yet or has
+ # expired in Redis. This check also accounts for a malformed cache.
+ if invalidate_cache or any(key not in cached_leaderboard for key in REQUIRED_CACHE_KEYS):
+ log.info("No leaderboard cache available, fetching leaderboards...")
+ # Fetch the raw data
+ raw_leaderboard_data = await _fetch_leaderboard_data()
+
+ # Parse it to extract "per star, per day" data and participant scores
+ parsed_leaderboard_data = _parse_raw_leaderboard_data(raw_leaderboard_data)
+
+ leaderboard = parsed_leaderboard_data["leaderboard"]
+ number_of_participants = len(leaderboard)
+ formatted_leaderboard = _format_leaderboard(leaderboard)
+ full_leaderboard_url = await _upload_leaderboard(formatted_leaderboard)
+ leaderboard_fetched_at = datetime.datetime.utcnow().isoformat()
+
+ cached_leaderboard = {
+ "full_leaderboard": formatted_leaderboard,
+ "top_leaderboard": _get_top_leaderboard(formatted_leaderboard),
+ "full_leaderboard_url": full_leaderboard_url,
+ "leaderboard_fetched_at": leaderboard_fetched_at,
+ "number_of_participants": number_of_participants,
+ "daily_stats": json.dumps(parsed_leaderboard_data["daily_stats"]),
+ }
+
+ # Store the new values in Redis
+ await _caches.leaderboard_cache.update(cached_leaderboard)
+
+ # Set an expiry on the leaderboard RedisCache
+ with await _caches.leaderboard_cache._get_pool_connection() as connection:
+ await connection.expire(
+ _caches.leaderboard_cache.namespace,
+ AdventOfCode.leaderboard_cache_expiry_seconds
+ )
+
+ return cached_leaderboard
+
+
+def get_summary_embed(leaderboard: dict) -> discord.Embed:
+ """Get an embed with the current summary stats of the leaderboard."""
+ leaderboard_url = leaderboard['full_leaderboard_url']
+ refresh_minutes = AdventOfCode.leaderboard_cache_expiry_seconds // 60
+
+ aoc_embed = discord.Embed(
+ colour=Colours.soft_green,
+ timestamp=datetime.datetime.fromisoformat(leaderboard["leaderboard_fetched_at"]),
+ description=f"*The leaderboard is refreshed every {refresh_minutes} minutes.*"
+ )
+ aoc_embed.add_field(
+ name="Number of Participants",
+ value=leaderboard["number_of_participants"],
+ inline=True,
+ )
+ if leaderboard_url:
+ aoc_embed.add_field(
+ name="Full Leaderboard",
+ value=f"[Python Discord Leaderboard]({leaderboard_url})",
+ inline=True,
+ )
+ aoc_embed.set_author(name="Advent of Code", url=leaderboard_url)
+ aoc_embed.set_footer(text="Last Updated")
+ aoc_embed.set_thumbnail(url=AOC_EMBED_THUMBNAIL)
+
+ return aoc_embed
+
+
+async def get_public_join_code(author: discord.Member) -> typing.Optional[str]:
+ """
+ Get the join code for one of the non-staff leaderboards.
+
+ If a user has previously requested a join code and their assigned board
+ hasn't filled up yet, we'll return the same join code to prevent them from
+ getting join codes for multiple boards.
+ """
+ # Make sure to fetch new leaderboard information if the cache is older than
+ # 30 minutes. While this still means that there could be a discrepancy
+ # between the current leaderboard state and the numbers we have here, this
+ # should work fairly well given the buffer of slots that we have.
+ await fetch_leaderboard()
+ previously_assigned_board = await _caches.assigned_leaderboard.get(author.id)
+ current_board_counts = await _caches.leaderboard_counts.to_dict()
+
+ # Remove the staff board from the current board counts as it should be ignored.
+ current_board_counts.pop(AdventOfCode.staff_leaderboard_id, None)
+
+ # If this user has already received a join code, we'll give them the
+ # exact same one to prevent them from joining multiple boards and taking
+ # up multiple slots.
+ if previously_assigned_board:
+ # Check if their previously assigned board still has room for them
+ if current_board_counts.get(previously_assigned_board, 0) < 200:
+ log.info(f"{author} ({author.id}) was already assigned to a board with open slots.")
+ return AdventOfCode.leaderboards[previously_assigned_board].join_code
+
+ log.info(
+ f"User {author} ({author.id}) previously received the join code for "
+ f"board `{previously_assigned_board}`, but that board's now full. "
+ "Assigning another board to this user."
+ )
+
+ # If we don't have the current board counts cached, let's force fetching a new cache
+ if not current_board_counts:
+ log.warning("Leaderboard counts were missing from the cache unexpectedly!")
+ await fetch_leaderboard(invalidate_cache=True)
+ current_board_counts = await _caches.leaderboard_counts.to_dict()
+
+ # Find the board with the current lowest participant count. As we can't
+ best_board, _count = min(current_board_counts.items(), key=operator.itemgetter(1))
+
+ if current_board_counts.get(best_board, 0) >= 200:
+ log.warning(f"User {author} `{author.id}` requested a join code, but all boards are full!")
+ return
+
+ log.info(f"Assigning user {author} ({author.id}) to board `{best_board}`")
+ await _caches.assigned_leaderboard.set(author.id, best_board)
+
+ # Return the join code for this board
+ return AdventOfCode.leaderboards[best_board].join_code
+
+
+def is_in_advent() -> bool:
+ """
+ Check if we're currently on an Advent of Code day, excluding 25 December.
+
+ This helper function is used to check whether or not a feature that prepares
+ something for the next Advent of Code challenge should run. As the puzzle
+ published on the 25th is the last puzzle, this check excludes that date.
+ """
+ return datetime.datetime.now(EST).day in range(1, 25) and datetime.datetime.now(EST).month == 12
+
+
+def time_left_to_est_midnight() -> Tuple[datetime.datetime, datetime.timedelta]:
+ """Calculate the amount of time left until midnight EST/UTC-5."""
+ # Change all time properties back to 00:00
+ todays_midnight = datetime.datetime.now(EST).replace(
+ microsecond=0,
+ second=0,
+ minute=0,
+ hour=0
+ )
+
+ # We want tomorrow so add a day on
+ tomorrow = todays_midnight + datetime.timedelta(days=1)
+
+ # Calculate the timedelta between the current time and midnight
+ return tomorrow, tomorrow - datetime.datetime.now(EST)
+
+
+async def wait_for_advent_of_code(*, hours_before: int = 1) -> None:
+ """
+ Wait for the Advent of Code event to start.
+
+ This function returns `hours_before` (default: 1) the Advent of Code
+ actually starts. This allows functions to schedule and execute code that
+ needs to run before the event starts.
+
+ If the event has already started, this function returns immediately.
+
+ Note: The "next Advent of Code" is determined based on the current value
+ of the `AOC_YEAR` environment variable. This allows callers to exit early
+ if we're already past the Advent of Code edition the bot is currently
+ configured for.
+ """
+ start = datetime.datetime(AdventOfCode.year, 12, 1, 0, 0, 0, tzinfo=EST)
+ target = start - datetime.timedelta(hours=hours_before)
+ now = datetime.datetime.now(EST)
+
+ # If we've already reached or passed to target, we
+ # simply return immediately.
+ if now >= target:
+ return
+
+ delta = target - now
+ await asyncio.sleep(delta.total_seconds())
+
+
+async def countdown_status(bot: Bot) -> None:
+ """
+ Add the time until the next challenge is published to the bot's status.
+
+ This function sleeps until 2 hours before the event and exists one hour
+ after the last challenge has been published. It will not start up again
+ automatically for next year's event, as it will wait for the environment
+ variable AOC_YEAR to be updated.
+
+ This ensures that the task will only start sleeping again once the next
+ event approaches and we're making preparations for that event.
+ """
+ log.debug("Initializing status countdown task.")
+ # We wait until 2 hours before the event starts. Then we
+ # set our first countdown status.
+ await wait_for_advent_of_code(hours_before=2)
+
+ # Log that we're going to start with the countdown status.
+ log.info("The Advent of Code has started or will start soon, starting countdown status.")
+
+ # Trying to change status too early in the bot's startup sequence will fail
+ # the task because the websocket instance has not yet been created. Waiting
+ # for this event means that both the websocket instance has been initialized
+ # and that the connection to Discord is mature enough to change the presence
+ # of the bot.
+ await bot.wait_until_guild_available()
+
+ # Calculate when the task needs to stop running. To prevent the task from
+ # sleeping for the entire year, it will only wait in the currently
+ # configured year. This means that the task will only start hibernating once
+ # we start preparing the next event by changing environment variables.
+ last_challenge = datetime.datetime(AdventOfCode.year, 12, 25, 0, 0, 0, tzinfo=EST)
+ end = last_challenge + datetime.timedelta(hours=1)
+
+ while datetime.datetime.now(EST) < end:
+ _, time_left = time_left_to_est_midnight()
+
+ aligned_seconds = int(math.ceil(time_left.seconds / COUNTDOWN_STEP)) * COUNTDOWN_STEP
+ hours, minutes = aligned_seconds // 3600, aligned_seconds // 60 % 60
+
+ if aligned_seconds == 0:
+ playing = "right now!"
+ elif aligned_seconds == COUNTDOWN_STEP:
+ playing = f"in less than {minutes} minutes"
+ elif hours == 0:
+ playing = f"in {minutes} minutes"
+ elif hours == 23:
+ playing = f"since {60 - minutes} minutes ago"
+ else:
+ playing = f"in {hours} hours and {minutes} minutes"
+
+ log.trace(f"Changing presence to {playing!r}")
+ # Status will look like "Playing in 5 hours and 30 minutes"
+ await bot.change_presence(activity=discord.Game(playing))
+
+ # Sleep until next aligned time or a full step if already aligned
+ delay = time_left.seconds % COUNTDOWN_STEP or COUNTDOWN_STEP
+ log.trace(f"The countdown status task will sleep for {delay} seconds.")
+ await asyncio.sleep(delay)
+
+
+async def new_puzzle_notification(bot: Bot) -> None:
+ """
+ Announce the release of a new Advent of Code puzzle.
+
+ This background task hibernates until just before the Advent of Code starts
+ and will then start announcing puzzles as they are published. After the
+ event has finished, this task will terminate.
+ """
+ # We wake up one hour before the event starts to prepare the announcement
+ # of the release of the first puzzle.
+ await wait_for_advent_of_code(hours_before=1)
+
+ log.info("The Advent of Code has started or will start soon, waking up notification task.")
+
+ # Ensure that the guild cache is loaded so we can get the Advent of Code
+ # channel and role.
+ await bot.wait_until_guild_available()
+ aoc_channel = bot.get_channel(Channels.advent_of_code)
+ aoc_role = aoc_channel.guild.get_role(AdventOfCode.role_id)
+
+ if not aoc_channel:
+ log.error("Could not find the AoC channel to send notification in")
+ return
+
+ if not aoc_role:
+ log.error("Could not find the AoC role to announce the daily puzzle")
+ return
+
+ # The last event day is 25 December, so we only have to schedule
+ # a reminder if the current day is before 25 December.
+ end = datetime.datetime(AdventOfCode.year, 12, 25, tzinfo=EST)
+ while datetime.datetime.now(EST) < end:
+ log.trace("Started puzzle notification loop.")
+ tomorrow, time_left = time_left_to_est_midnight()
+
+ # Use `total_seconds` to get the time left in fractional seconds This
+ # should wake us up very close to the target. As a safe guard, the sleep
+ # duration is padded with 0.1 second to make sure we wake up after
+ # midnight.
+ sleep_seconds = time_left.total_seconds() + 0.1
+ log.trace(f"The puzzle notification task will sleep for {sleep_seconds} seconds")
+ await asyncio.sleep(sleep_seconds)
+
+ puzzle_url = f"https://adventofcode.com/{AdventOfCode.year}/day/{tomorrow.day}"
+
+ # Check if the puzzle is already available to prevent our members from spamming
+ # the puzzle page before it's available by making a small HEAD request.
+ for retry in range(1, 5):
+ log.debug(f"Checking if the puzzle is already available (attempt {retry}/4)")
+ async with bot.http_session.head(puzzle_url, raise_for_status=False) as resp:
+ if resp.status == 200:
+ log.debug("Puzzle is available; let's send an announcement message.")
+ break
+ log.debug(f"The puzzle is not yet available (status={resp.status})")
+ await asyncio.sleep(10)
+ else:
+ log.error(
+ "The puzzle does does not appear to be available "
+ "at this time, canceling announcement"
+ )
+ break
+
+ await aoc_channel.send(
+ f"{aoc_role.mention} Good morning! Day {tomorrow.day} is ready to be attempted. "
+ f"View it online now at {puzzle_url}. Good luck!",
+ allowed_mentions=discord.AllowedMentions(
+ everyone=False,
+ users=False,
+ roles=[aoc_role],
+ )
+ )
+
+ # Ensure that we don't send duplicate announcements by sleeping to well
+ # over midnight. This means we're certain to calculate the time to the
+ # next midnight at the top of the loop.
+ await asyncio.sleep(120)
+
+
+def background_task_callback(task: asyncio.Task) -> None:
+ """Check if the finished background task failed to make sure we log errors."""
+ if task.cancelled():
+ log.info(f"Background task `{task.get_name()}` was cancelled.")
+ elif exception := task.exception():
+ log.error(f"Background task `{task.get_name()}` failed:", exc_info=exception)
+ else:
+ log.info(f"Background task `{task.get_name()}` exited normally.")
diff --git a/bot/exts/christmas/adventofcode.py b/bot/exts/christmas/adventofcode.py
deleted file mode 100644
index b3fe0623..00000000
--- a/bot/exts/christmas/adventofcode.py
+++ /dev/null
@@ -1,743 +0,0 @@
-import asyncio
-import json
-import logging
-import math
-import re
-from datetime import datetime, timedelta
-from pathlib import Path
-from typing import List, Tuple
-
-import aiohttp
-import discord
-from bs4 import BeautifulSoup
-from discord.ext import commands
-from pytz import timezone
-
-from bot.constants import AdventOfCode as AocConfig, Channels, Colours, Emojis, Month, Tokens, WHITELISTED_CHANNELS
-from bot.utils import unlocked_role
-from bot.utils.decorators import in_month, override_in_channel
-
-log = logging.getLogger(__name__)
-
-AOC_REQUEST_HEADER = {"user-agent": "PythonDiscord AoC Event Bot"}
-AOC_SESSION_COOKIE = {"session": Tokens.aoc_session_cookie}
-
-EST = timezone("EST")
-COUNTDOWN_STEP = 60 * 5
-
-AOC_WHITELIST = WHITELISTED_CHANNELS + (Channels.advent_of_code,)
-
-
-def is_in_advent() -> bool:
- """Utility function to check if we are between December 1st and December 25th."""
- # Run the code from the 1st to the 24th
- return datetime.now(EST).day in range(1, 25) and datetime.now(EST).month == 12
-
-
-def time_left_to_aoc_midnight() -> Tuple[datetime, timedelta]:
- """Calculates the amount of time left until midnight in UTC-5 (Advent of Code maintainer timezone)."""
- # Change all time properties back to 00:00
- todays_midnight = datetime.now(EST).replace(microsecond=0,
- second=0,
- minute=0,
- hour=0)
-
- # We want tomorrow so add a day on
- tomorrow = todays_midnight + timedelta(days=1)
-
- # Calculate the timedelta between the current time and midnight
- return tomorrow, tomorrow - datetime.now(EST)
-
-
-async def countdown_status(bot: commands.Bot) -> None:
- """Set the playing status of the bot to the minutes & hours left until the next day's challenge."""
- while is_in_advent():
- _, time_left = time_left_to_aoc_midnight()
-
- aligned_seconds = int(math.ceil(time_left.seconds / COUNTDOWN_STEP)) * COUNTDOWN_STEP
- hours, minutes = aligned_seconds // 3600, aligned_seconds // 60 % 60
-
- if aligned_seconds == 0:
- playing = "right now!"
- elif aligned_seconds == COUNTDOWN_STEP:
- playing = f"in less than {minutes} minutes"
- elif hours == 0:
- playing = f"in {minutes} minutes"
- elif hours == 23:
- playing = f"since {60 - minutes} minutes ago"
- else:
- playing = f"in {hours} hours and {minutes} minutes"
-
- # Status will look like "Playing in 5 hours and 30 minutes"
- await bot.change_presence(activity=discord.Game(playing))
-
- # Sleep until next aligned time or a full step if already aligned
- delay = time_left.seconds % COUNTDOWN_STEP or COUNTDOWN_STEP
- await asyncio.sleep(delay)
-
-
-async def day_countdown(bot: commands.Bot) -> None:
- """
- Calculate the number of seconds left until the next day of Advent.
-
- Once we have calculated this we should then sleep that number and when the time is reached, ping
- the Advent of Code role notifying them that the new challenge is ready.
- """
- while is_in_advent():
- tomorrow, time_left = time_left_to_aoc_midnight()
-
- # Correct `time_left.seconds` for the sleep we have after unlocking the role (-5) and adding
- # a second (+1) as the bot is consistently ~0.5 seconds early in announcing the puzzles.
- await asyncio.sleep(time_left.seconds - 4)
-
- channel = bot.get_channel(Channels.advent_of_code)
-
- if not channel:
- log.error("Could not find the AoC channel to send notification in")
- break
-
- aoc_role = channel.guild.get_role(AocConfig.role_id)
- if not aoc_role:
- log.error("Could not find the AoC role to announce the daily puzzle")
- break
-
- async with unlocked_role(aoc_role, delay=5):
- puzzle_url = f"https://adventofcode.com/{AocConfig.year}/day/{tomorrow.day}"
-
- # Check if the puzzle is already available to prevent our members from spamming
- # the puzzle page before it's available by making a small HEAD request.
- for retry in range(1, 5):
- log.debug(f"Checking if the puzzle is already available (attempt {retry}/4)")
- async with bot.http_session.head(puzzle_url, raise_for_status=False) as resp:
- if resp.status == 200:
- log.debug("Puzzle is available; let's send an announcement message.")
- break
- log.debug(f"The puzzle is not yet available (status={resp.status})")
- await asyncio.sleep(10)
- else:
- log.error("The puzzle does does not appear to be available at this time, canceling announcement")
- break
-
- await channel.send(
- f"{aoc_role.mention} Good morning! Day {tomorrow.day} is ready to be attempted. "
- f"View it online now at {puzzle_url}. Good luck!"
- )
-
- # Wait a couple minutes so that if our sleep didn't sleep enough
- # time we don't end up announcing twice.
- await asyncio.sleep(120)
-
-
-class AdventOfCode(commands.Cog):
- """Advent of Code festivities! Ho Ho Ho!"""
-
- def __init__(self, bot: commands.Bot):
- self.bot = bot
-
- self._base_url = f"https://adventofcode.com/{AocConfig.year}"
- self.global_leaderboard_url = f"https://adventofcode.com/{AocConfig.year}/leaderboard"
- self.private_leaderboard_url = f"{self._base_url}/leaderboard/private/view/{AocConfig.leaderboard_id}"
-
- self.about_aoc_filepath = Path("./bot/resources/advent_of_code/about.json")
- self.cached_about_aoc = self._build_about_embed()
-
- self.cached_global_leaderboard = None
- self.cached_private_leaderboard = None
-
- self.countdown_task = None
- self.status_task = None
-
- countdown_coro = day_countdown(self.bot)
- self.countdown_task = self.bot.loop.create_task(countdown_coro)
-
- status_coro = countdown_status(self.bot)
- self.status_task = self.bot.loop.create_task(status_coro)
-
- @in_month(Month.DECEMBER)
- @commands.group(name="adventofcode", aliases=("aoc",))
- @override_in_channel(AOC_WHITELIST)
- async def adventofcode_group(self, ctx: commands.Context) -> None:
- """All of the Advent of Code commands."""
- if not ctx.invoked_subcommand:
- await ctx.send_help(ctx.command)
-
- @adventofcode_group.command(
- name="subscribe",
- aliases=("sub", "notifications", "notify", "notifs"),
- brief="Notifications for new days"
- )
- @override_in_channel(AOC_WHITELIST)
- async def aoc_subscribe(self, ctx: commands.Context) -> None:
- """Assign the role for notifications about new days being ready."""
- role = ctx.guild.get_role(AocConfig.role_id)
- unsubscribe_command = f"{ctx.prefix}{ctx.command.root_parent} unsubscribe"
-
- if role not in ctx.author.roles:
- await ctx.author.add_roles(role)
- await ctx.send("Okay! You have been __subscribed__ to notifications about new Advent of Code tasks. "
- f"You can run `{unsubscribe_command}` to disable them again for you.")
- else:
- await ctx.send("Hey, you already are receiving notifications about new Advent of Code tasks. "
- f"If you don't want them any more, run `{unsubscribe_command}` instead.")
-
- @adventofcode_group.command(name="unsubscribe", aliases=("unsub",), brief="Notifications for new days")
- @override_in_channel(AOC_WHITELIST)
- async def aoc_unsubscribe(self, ctx: commands.Context) -> None:
- """Remove the role for notifications about new days being ready."""
- role = ctx.guild.get_role(AocConfig.role_id)
-
- if role in ctx.author.roles:
- await ctx.author.remove_roles(role)
- await ctx.send("Okay! You have been __unsubscribed__ from notifications about new Advent of Code tasks.")
- else:
- await ctx.send("Hey, you don't even get any notifications about new Advent of Code tasks currently anyway.")
-
- @adventofcode_group.command(name="countdown", aliases=("count", "c"), brief="Return time left until next day")
- @override_in_channel(AOC_WHITELIST)
- async def aoc_countdown(self, ctx: commands.Context) -> None:
- """Return time left until next day."""
- if not is_in_advent():
- datetime_now = datetime.now(EST)
-
- # Calculate the delta to this & next year's December 1st to see which one is closest and not in the past
- this_year = datetime(datetime_now.year, 12, 1, tzinfo=EST)
- next_year = datetime(datetime_now.year + 1, 12, 1, tzinfo=EST)
- deltas = (dec_first - datetime_now for dec_first in (this_year, next_year))
- delta = min(delta for delta in deltas if delta >= timedelta()) # timedelta() gives 0 duration delta
-
- # Add a finer timedelta if there's less than a day left
- if delta.days == 0:
- delta_str = f"approximately {delta.seconds // 3600} hours"
- else:
- delta_str = f"{delta.days} days"
-
- await ctx.send(f"The Advent of Code event is not currently running. "
- f"The next event will start in {delta_str}.")
- return
-
- tomorrow, time_left = time_left_to_aoc_midnight()
-
- hours, minutes = time_left.seconds // 3600, time_left.seconds // 60 % 60
-
- await ctx.send(f"There are {hours} hours and {minutes} minutes left until day {tomorrow.day}.")
-
- @adventofcode_group.command(name="about", aliases=("ab", "info"), brief="Learn about Advent of Code")
- @override_in_channel(AOC_WHITELIST)
- async def about_aoc(self, ctx: commands.Context) -> None:
- """Respond with an explanation of all things Advent of Code."""
- await ctx.send("", embed=self.cached_about_aoc)
-
- @adventofcode_group.command(name="join", aliases=("j",), brief="Learn how to join the leaderboard (via DM)")
- @override_in_channel(AOC_WHITELIST)
- async def join_leaderboard(self, ctx: commands.Context) -> None:
- """DM the user the information for joining the PyDis AoC private leaderboard."""
- author = ctx.message.author
- log.info(f"{author.name} ({author.id}) has requested the PyDis AoC leaderboard code")
-
- info_str = (
- "Head over to https://adventofcode.com/leaderboard/private "
- f"with code `{AocConfig.leaderboard_join_code}` to join the PyDis private leaderboard!"
- )
- try:
- await author.send(info_str)
- except discord.errors.Forbidden:
- log.debug(f"{author.name} ({author.id}) has disabled DMs from server members")
- await ctx.send(f":x: {author.mention}, please (temporarily) enable DMs to receive the join code")
- else:
- await ctx.message.add_reaction(Emojis.envelope)
-
- @adventofcode_group.command(
- name="leaderboard",
- aliases=("board", "lb"),
- brief="Get a snapshot of the PyDis private AoC leaderboard",
- )
- @override_in_channel(AOC_WHITELIST)
- async def aoc_leaderboard(self, ctx: commands.Context, number_of_people_to_display: int = 10) -> None:
- """
- Pull the top number_of_people_to_display members from the PyDis leaderboard and post an embed.
-
- For readability, number_of_people_to_display defaults to 10. A maximum value is configured in the
- Advent of Code section of the bot constants. number_of_people_to_display values greater than this
- limit will default to this maximum and provide feedback to the user.
- """
- async with ctx.typing():
- await self._check_leaderboard_cache(ctx)
-
- if not self.cached_private_leaderboard:
- # Feedback on issues with leaderboard caching are sent by _check_leaderboard_cache()
- # Short circuit here if there's an issue
- return
-
- number_of_people_to_display = await self._check_n_entries(ctx, number_of_people_to_display)
-
- # Generate leaderboard table for embed
- members_to_print = self.cached_private_leaderboard.top_n(number_of_people_to_display)
- table = AocPrivateLeaderboard.build_leaderboard_embed(members_to_print)
-
- # Build embed
- aoc_embed = discord.Embed(
- description=f"Total members: {len(self.cached_private_leaderboard.members)}",
- colour=Colours.soft_green,
- timestamp=self.cached_private_leaderboard.last_updated
- )
- aoc_embed.set_author(name="Advent of Code", url=self.private_leaderboard_url)
- aoc_embed.set_footer(text="Last Updated")
-
- await ctx.send(
- content=f"Here's the current Top {number_of_people_to_display}! {Emojis.christmas_tree*3}\n\n{table}",
- embed=aoc_embed,
- )
-
- @adventofcode_group.command(
- name="stats",
- aliases=("dailystats", "ds"),
- brief="Get daily statistics for the PyDis private leaderboard"
- )
- @override_in_channel(AOC_WHITELIST)
- async def private_leaderboard_daily_stats(self, ctx: commands.Context) -> None:
- """
- Respond with a table of the daily completion statistics for the PyDis private leaderboard.
-
- Embed will display the total members and the number of users who have completed each day's puzzle
- """
- async with ctx.typing():
- await self._check_leaderboard_cache(ctx)
-
- if not self.cached_private_leaderboard:
- # Feedback on issues with leaderboard caching are sent by _check_leaderboard_cache()
- # Short circuit here if there's an issue
- return
-
- # Build ASCII table
- total_members = len(self.cached_private_leaderboard.members)
- _star = Emojis.star
- header = f"{'Day':4}{_star:^8}{_star*2:^4}{'% ' + _star:^8}{'% ' + _star*2:^4}\n{'='*35}"
- table = ""
- for day, completions in enumerate(self.cached_private_leaderboard.daily_completion_summary):
- per_one_star = f"{(completions[0]/total_members)*100:.2f}"
- per_two_star = f"{(completions[1]/total_members)*100:.2f}"
-
- table += f"{day+1:3}){completions[0]:^8}{completions[1]:^6}{per_one_star:^10}{per_two_star:^6}\n"
-
- table = f"```\n{header}\n{table}```"
-
- # Build embed
- daily_stats_embed = discord.Embed(
- colour=Colours.soft_green, timestamp=self.cached_private_leaderboard.last_updated
- )
- daily_stats_embed.set_author(name="Advent of Code", url=self._base_url)
- daily_stats_embed.set_footer(text="Last Updated")
-
- await ctx.send(
- content=f"Here's the current daily statistics!\n\n{table}", embed=daily_stats_embed
- )
-
- @adventofcode_group.command(
- name="global",
- aliases=("globalboard", "gb"),
- brief="Get a snapshot of the global AoC leaderboard",
- )
- @override_in_channel(AOC_WHITELIST)
- async def global_leaderboard(self, ctx: commands.Context, number_of_people_to_display: int = 10) -> None:
- """
- Pull the top number_of_people_to_display members from the global AoC leaderboard and post an embed.
-
- For readability, number_of_people_to_display defaults to 10. A maximum value is configured in the
- Advent of Code section of the bot constants. number_of_people_to_display values greater than this
- limit will default to this maximum and provide feedback to the user.
- """
- async with ctx.typing():
- await self._check_leaderboard_cache(ctx, global_board=True)
-
- if not self.cached_global_leaderboard:
- # Feedback on issues with leaderboard caching are sent by _check_leaderboard_cache()
- # Short circuit here if there's an issue
- return
-
- number_of_people_to_display = await self._check_n_entries(ctx, number_of_people_to_display)
-
- # Generate leaderboard table for embed
- members_to_print = self.cached_global_leaderboard.top_n(number_of_people_to_display)
- table = AocGlobalLeaderboard.build_leaderboard_embed(members_to_print)
-
- # Build embed
- aoc_embed = discord.Embed(colour=Colours.soft_green, timestamp=self.cached_global_leaderboard.last_updated)
- aoc_embed.set_author(name="Advent of Code", url=self._base_url)
- aoc_embed.set_footer(text="Last Updated")
-
- await ctx.send(
- f"Here's the current global Top {number_of_people_to_display}! {Emojis.christmas_tree*3}\n\n{table}",
- embed=aoc_embed,
- )
-
- async def _check_leaderboard_cache(self, ctx: commands.Context, global_board: bool = False) -> None:
- """
- Check age of current leaderboard & pull a new one if the board is too old.
-
- global_board is a boolean to toggle between the global board and the Pydis private board
- """
- # Toggle between global & private leaderboards
- if global_board:
- log.debug("Checking global leaderboard cache")
- leaderboard_str = "cached_global_leaderboard"
- _shortstr = "global"
- else:
- log.debug("Checking private leaderboard cache")
- leaderboard_str = "cached_private_leaderboard"
- _shortstr = "private"
-
- leaderboard = getattr(self, leaderboard_str)
- if not leaderboard:
- log.debug(f"No cached {_shortstr} leaderboard found")
- await self._boardgetter(global_board)
- else:
- leaderboard_age = datetime.utcnow() - leaderboard.last_updated
- age_seconds = leaderboard_age.total_seconds()
- if age_seconds < AocConfig.leaderboard_cache_age_threshold_seconds:
- log.debug(f"Cached {_shortstr} leaderboard age less than threshold ({age_seconds} seconds old)")
- else:
- log.debug(f"Cached {_shortstr} leaderboard age greater than threshold ({age_seconds} seconds old)")
- await self._boardgetter(global_board)
-
- leaderboard = getattr(self, leaderboard_str)
- if not leaderboard:
- await ctx.send(
- "",
- embed=_error_embed_helper(
- title=f"Something's gone wrong and there's no cached {_shortstr} leaderboard!",
- description="Please check in with a staff member.",
- ),
- )
-
- async def _check_n_entries(self, ctx: commands.Context, number_of_people_to_display: int) -> int:
- """Check for n > max_entries and n <= 0."""
- max_entries = AocConfig.leaderboard_max_displayed_members
- author = ctx.message.author
- if not 0 <= number_of_people_to_display <= max_entries:
- log.debug(
- f"{author.name} ({author.id}) attempted to fetch an invalid number "
- f" of entries from the AoC leaderboard ({number_of_people_to_display})"
- )
- await ctx.send(
- f":x: {author.mention}, number of entries to display must be a positive "
- f"integer less than or equal to {max_entries}\n\n"
- f"Head to {self.private_leaderboard_url} to view the entire leaderboard"
- )
- number_of_people_to_display = max_entries
-
- return number_of_people_to_display
-
- def _build_about_embed(self) -> discord.Embed:
- """Build and return the informational "About AoC" embed from the resources file."""
- with self.about_aoc_filepath.open("r", encoding="utf8") as f:
- embed_fields = json.load(f)
-
- about_embed = discord.Embed(title=self._base_url, colour=Colours.soft_green, url=self._base_url)
- about_embed.set_author(name="Advent of Code", url=self._base_url)
- for field in embed_fields:
- about_embed.add_field(**field)
-
- about_embed.set_footer(text=f"Last Updated (UTC): {datetime.utcnow()}")
-
- return about_embed
-
- async def _boardgetter(self, global_board: bool) -> None:
- """Invoke the proper leaderboard getter based on the global_board boolean."""
- if global_board:
- self.cached_global_leaderboard = await AocGlobalLeaderboard.from_url()
- else:
- self.cached_private_leaderboard = await AocPrivateLeaderboard.from_url()
-
- def cog_unload(self) -> None:
- """Cancel season-related tasks on cog unload."""
- log.debug("Unloading the cog and canceling the background task.")
- self.countdown_task.cancel()
- self.status_task.cancel()
-
-
-class AocMember:
- """Object representing the Advent of Code user."""
-
- def __init__(self, name: str, aoc_id: int, stars: int, starboard: list, local_score: int, global_score: int):
- self.name = name
- self.aoc_id = aoc_id
- self.stars = stars
- self.starboard = starboard
- self.local_score = local_score
- self.global_score = global_score
- self.completions = self._completions_from_starboard(self.starboard)
-
- def __repr__(self):
- """Generate a user-friendly representation of the AocMember & their score."""
- return f"<{self.name} ({self.aoc_id}): {self.local_score}>"
-
- @classmethod
- def member_from_json(cls, injson: dict) -> "AocMember":
- """
- Generate an AocMember from AoC's private leaderboard API JSON.
-
- injson is expected to be the dict contained in:
-
- AoC_APIjson['members'][<member id>:str]
-
- Returns an AocMember object
- """
- return cls(
- name=injson["name"] if injson["name"] else "Anonymous User",
- aoc_id=int(injson["id"]),
- stars=injson["stars"],
- starboard=cls._starboard_from_json(injson["completion_day_level"]),
- local_score=injson["local_score"],
- global_score=injson["global_score"],
- )
-
- @staticmethod
- def _starboard_from_json(injson: dict) -> list:
- """
- Generate starboard from AoC's private leaderboard API JSON.
-
- injson is expected to be the dict contained in:
-
- AoC_APIjson['members'][<member id>:str]['completion_day_level']
-
- Returns a list of 25 lists, where each nested list contains a pair of booleans representing
- the code challenge completion status for that day
- """
- # Basic input validation
- if not isinstance(injson, dict):
- raise ValueError
-
- # Initialize starboard
- starboard = []
- for _i in range(25):
- starboard.append([False, False])
-
- # Iterate over days, which are the keys of injson (as str)
- for day in injson:
- idx = int(day) - 1
- # If there is a second star, the first star must be completed
- if "2" in injson[day].keys():
- starboard[idx] = [True, True]
- # If the day exists in injson, then at least the first star is completed
- else:
- starboard[idx] = [True, False]
-
- return starboard
-
- @staticmethod
- def _completions_from_starboard(starboard: list) -> tuple:
- """Return days completed, as a (1 star, 2 star) tuple, from starboard."""
- completions = [0, 0]
- for day in starboard:
- if day[0]:
- completions[0] += 1
- if day[1]:
- completions[1] += 1
-
- return tuple(completions)
-
-
-class AocPrivateLeaderboard:
- """Object representing the Advent of Code private leaderboard."""
-
- def __init__(self, members: list, owner_id: int, event_year: int):
- self.members = members
- self._owner_id = owner_id
- self._event_year = event_year
- self.last_updated = datetime.utcnow()
-
- self.daily_completion_summary = self.calculate_daily_completion()
-
- def top_n(self, n: int = 10) -> dict:
- """
- Return the top n participants on the leaderboard.
-
- If n is not specified, default to the top 10
- """
- return self.members[:n]
-
- def calculate_daily_completion(self) -> List[tuple]:
- """
- Calculate member completion rates by day.
-
- Return a list of tuples for each day containing the number of users who completed each part
- of the challenge
- """
- daily_member_completions = []
- for day in range(25):
- one_star_count = 0
- two_star_count = 0
- for member in self.members:
- if member.starboard[day][1]:
- one_star_count += 1
- two_star_count += 1
- elif member.starboard[day][0]:
- one_star_count += 1
- else:
- daily_member_completions.append((one_star_count, two_star_count))
-
- return(daily_member_completions)
-
- @staticmethod
- async def json_from_url(
- leaderboard_id: int = AocConfig.leaderboard_id, year: int = AocConfig.year
- ) -> "AocPrivateLeaderboard":
- """
- Request the API JSON from Advent of Code for leaderboard_id for the specified year's event.
-
- If no year is input, year defaults to the current year
- """
- api_url = f"https://adventofcode.com/{year}/leaderboard/private/view/{leaderboard_id}.json"
-
- log.debug("Querying Advent of Code Private Leaderboard API")
- async with aiohttp.ClientSession(cookies=AOC_SESSION_COOKIE, headers=AOC_REQUEST_HEADER) as session:
- async with session.get(api_url) as resp:
- if resp.status == 200:
- raw_dict = await resp.json()
- else:
- log.warning(f"Bad response received from AoC ({resp.status}), check session cookie")
- resp.raise_for_status()
-
- return raw_dict
-
- @classmethod
- def from_json(cls, injson: dict) -> "AocPrivateLeaderboard":
- """Generate an AocPrivateLeaderboard object from AoC's private leaderboard API JSON."""
- return cls(
- members=cls._sorted_members(injson["members"]), owner_id=injson["owner_id"], event_year=injson["event"]
- )
-
- @classmethod
- async def from_url(cls) -> "AocPrivateLeaderboard":
- """Helper wrapping of AocPrivateLeaderboard.json_from_url and AocPrivateLeaderboard.from_json."""
- api_json = await cls.json_from_url()
- return cls.from_json(api_json)
-
- @staticmethod
- def _sorted_members(injson: dict) -> list:
- """
- Generate a sorted list of AocMember objects from AoC's private leaderboard API JSON.
-
- Output list is sorted based on the AocMember.local_score
- """
- members = [AocMember.member_from_json(injson[member]) for member in injson]
- members.sort(key=lambda x: x.local_score, reverse=True)
-
- return members
-
- @staticmethod
- def build_leaderboard_embed(members_to_print: List[AocMember]) -> str:
- """
- Build a text table from members_to_print, a list of AocMember objects.
-
- Returns a string to be used as the content of the bot's leaderboard response
- """
- stargroup = f"{Emojis.star}, {Emojis.star*2}"
- header = f"{' '*3}{'Score'} {'Name':^25} {stargroup:^7}\n{'-'*44}"
- table = ""
- for i, member in enumerate(members_to_print):
- if member.name == "Anonymous User":
- name = f"{member.name} #{member.aoc_id}"
- else:
- name = member.name
-
- table += (
- f"{i+1:2}) {member.local_score:4} {name:25.25} "
- f"({member.completions[0]:2}, {member.completions[1]:2})\n"
- )
- else:
- table = f"```{header}\n{table}```"
-
- return table
-
-
-class AocGlobalLeaderboard:
- """Object representing the Advent of Code global leaderboard."""
-
- def __init__(self, members: List[tuple]):
- self.members = members
- self.last_updated = datetime.utcnow()
-
- def top_n(self, n: int = 10) -> dict:
- """
- Return the top n participants on the leaderboard.
-
- If n is not specified, default to the top 10
- """
- return self.members[:n]
-
- @classmethod
- async def from_url(cls) -> "AocGlobalLeaderboard":
- """
- Generate an list of tuples for the entries on AoC's global leaderboard.
-
- Because there is no API for this, web scraping needs to be used
- """
- aoc_url = f"https://adventofcode.com/{AocConfig.year}/leaderboard"
-
- async with aiohttp.ClientSession(headers=AOC_REQUEST_HEADER) as session:
- async with session.get(aoc_url) as resp:
- if resp.status == 200:
- raw_html = await resp.text()
- else:
- log.warning(f"Bad response received from AoC ({resp.status}), check session cookie")
- resp.raise_for_status()
-
- soup = BeautifulSoup(raw_html, "html.parser")
- ele = soup.find_all("div", class_="leaderboard-entry")
-
- exp = r"(?:[ ]{,2}(\d+)\))?[ ]+(\d+)\s+([\w\(\)\#\@\-\d ]+)"
-
- lb_list = []
- for entry in ele:
- # Strip off the AoC++ decorator
- raw_str = entry.text.replace("(AoC++)", "").rstrip()
-
- # Use a regex to extract the info from the string to unify formatting
- # Group 1: Rank
- # Group 2: Global Score
- # Group 3: Member string
- r = re.match(exp, raw_str)
-
- rank = int(r.group(1)) if r.group(1) else None
- global_score = int(r.group(2))
-
- member = r.group(3)
- if member.lower().startswith("(anonymous"):
- # Normalize anonymous user string by stripping () and title casing
- member = re.sub(r"[\(\)]", "", member).title()
-
- lb_list.append((rank, global_score, member))
-
- return cls(lb_list)
-
- @staticmethod
- def build_leaderboard_embed(members_to_print: List[tuple]) -> str:
- """
- Build a text table from members_to_print, a list of tuples.
-
- Returns a string to be used as the content of the bot's leaderboard response
- """
- header = f"{' '*4}{'Score'} {'Name':^25}\n{'-'*36}"
- table = ""
- for member in members_to_print:
- # In the event of a tie, rank is None
- if member[0]:
- rank = f"{member[0]:3})"
- else:
- rank = f"{' ':4}"
- table += f"{rank} {member[1]:4} {member[2]:25.25}\n"
- else:
- table = f"```{header}\n{table}```"
-
- return table
-
-
-def _error_embed_helper(title: str, description: str) -> discord.Embed:
- """Return a red-colored Embed with the given title and description."""
- return discord.Embed(title=title, description=description, colour=discord.Colour.red())
-
-
-def setup(bot: commands.Bot) -> None:
- """Advent of Code Cog load."""
- bot.add_cog(AdventOfCode(bot))
diff --git a/bot/exts/evergreen/error_handler.py b/bot/exts/evergreen/error_handler.py
index 6e518435..99af1519 100644
--- a/bot/exts/evergreen/error_handler.py
+++ b/bot/exts/evergreen/error_handler.py
@@ -42,8 +42,8 @@ class CommandErrorHandler(commands.Cog):
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error: commands.CommandError) -> None:
"""Activates when a command opens an error."""
- if hasattr(ctx.command, 'on_error'):
- logging.debug("A command error occured but the command had it's own error handler.")
+ if getattr(error, 'handled', False):
+ logging.debug(f"Command {ctx.command} had its error already handled locally; ignoring.")
return
error = getattr(error, 'original', error)
diff --git a/bot/exts/evergreen/snakes/_snakes_cog.py b/bot/exts/evergreen/snakes/_snakes_cog.py
index 70bb0e73..d5e4f206 100644
--- a/bot/exts/evergreen/snakes/_snakes_cog.py
+++ b/bot/exts/evergreen/snakes/_snakes_cog.py
@@ -15,7 +15,8 @@ import aiohttp
import async_timeout
from PIL import Image, ImageDraw, ImageFont
from discord import Colour, Embed, File, Member, Message, Reaction
-from discord.ext.commands import BadArgument, Bot, Cog, CommandError, Context, bot_has_permissions, group
+from discord.errors import HTTPException
+from discord.ext.commands import Bot, Cog, CommandError, Context, bot_has_permissions, group
from bot.constants import ERROR_REPLIES, Tokens
from bot.exts.evergreen.snakes import _utils as utils
@@ -151,6 +152,7 @@ class Snakes(Cog):
self.snake_idioms = utils.get_resource("snake_idioms")
self.snake_quizzes = utils.get_resource("snake_quiz")
self.snake_facts = utils.get_resource("snake_facts")
+ self.num_movie_pages = None
# region: Helper methods
@staticmethod
@@ -739,71 +741,68 @@ class Snakes(Cog):
@snakes_group.command(name='movie')
async def movie_command(self, ctx: Context) -> None:
"""
- Gets a random snake-related movie from OMDB.
+ Gets a random snake-related movie from TMDB.
Written by Samuel.
Modified by gdude.
+ Modified by Will Da Silva.
"""
- url = "http://www.omdbapi.com/"
- page = random.randint(1, 27)
+ # Initially 8 pages are fetched. The actual number of pages is set after the first request.
+ page = random.randint(1, self.num_movie_pages or 8)
- response = await self.bot.http_session.get(
- url,
- params={
- "s": "snake",
- "page": page,
- "type": "movie",
- "apikey": Tokens.omdb
- }
- )
- data = await response.json()
- movie = random.choice(data["Search"])["imdbID"]
-
- response = await self.bot.http_session.get(
- url,
- params={
- "i": movie,
- "apikey": Tokens.omdb
- }
- )
- data = await response.json()
-
- embed = Embed(
- title=data["Title"],
- color=SNAKE_COLOR
- )
-
- del data["Response"], data["imdbID"], data["Title"]
-
- for key, value in data.items():
- if not value or value == "N/A" or key in ("Response", "imdbID", "Title", "Type"):
- continue
+ async with ctx.typing():
+ response = await self.bot.http_session.get(
+ "https://api.themoviedb.org/3/search/movie",
+ params={
+ "query": "snake",
+ "page": page,
+ "language": "en-US",
+ "api_key": Tokens.tmdb,
+ }
+ )
+ data = await response.json()
+ if self.num_movie_pages is None:
+ self.num_movie_pages = data["total_pages"]
+ movie = random.choice(data["results"])["id"]
+
+ response = await self.bot.http_session.get(
+ f"https://api.themoviedb.org/3/movie/{movie}",
+ params={
+ "language": "en-US",
+ "api_key": Tokens.tmdb,
+ }
+ )
+ data = await response.json()
- if key == "Ratings": # [{'Source': 'Internet Movie Database', 'Value': '7.6/10'}]
- rating = random.choice(value)
+ embed = Embed(title=data["title"], color=SNAKE_COLOR)
- if rating["Source"] != "Internet Movie Database":
- embed.add_field(name=f"Rating: {rating['Source']}", value=rating["Value"])
+ if data["poster_path"] is not None:
+ embed.set_image(url=f"https://images.tmdb.org/t/p/original{data['poster_path']}")
- continue
+ if data["overview"]:
+ embed.add_field(name="Overview", value=data["overview"])
- if key == "Poster":
- embed.set_image(url=value)
- continue
+ if data["release_date"]:
+ embed.add_field(name="Release Date", value=data["release_date"])
- elif key == "imdbRating":
- key = "IMDB Rating"
+ if data["genres"]:
+ embed.add_field(name="Genres", value=", ".join([x["name"] for x in data["genres"]]))
- elif key == "imdbVotes":
- key = "IMDB Votes"
+ if data["vote_count"]:
+ embed.add_field(name="Rating", value=f"{data['vote_average']}/10 ({data['vote_count']} votes)", inline=True)
- embed.add_field(name=key, value=value, inline=True)
+ if data["budget"] and data["revenue"]:
+ embed.add_field(name="Budget", value=data["budget"], inline=True)
+ embed.add_field(name="Revenue", value=data["revenue"], inline=True)
- embed.set_footer(text="Data provided by the OMDB API")
+ embed.set_footer(text="This product uses the TMDb API but is not endorsed or certified by TMDb.")
+ embed.set_thumbnail(url="https://i.imgur.com/LtFtC8H.png")
- await ctx.channel.send(
- embed=embed
- )
+ try:
+ await ctx.channel.send(embed=embed)
+ except HTTPException as err:
+ await ctx.channel.send("An error occurred while fetching a snake-related movie!")
+ raise err from None
@snakes_group.command(name='quiz')
@locked()
@@ -1126,26 +1125,15 @@ class Snakes(Cog):
# endregion
# region: Error handlers
- @get_command.error
@card_command.error
- @video_command.error
async def command_error(self, ctx: Context, error: CommandError) -> None:
"""Local error handler for the Snake Cog."""
- embed = Embed()
- embed.colour = Colour.red()
-
- if isinstance(error, BadArgument):
- embed.description = str(error)
- embed.title = random.choice(ERROR_REPLIES)
-
- elif isinstance(error, OSError):
- log.error(f"snake_card encountered an OSError: {error} ({error.original})")
+ original_error = getattr(error, "original", None)
+ if isinstance(original_error, OSError):
+ error.handled = True
+ embed = Embed()
+ embed.colour = Colour.red()
+ log.error(f"snake_card encountered an OSError: {error} ({original_error})")
embed.description = "Could not generate the snake card! Please try again."
embed.title = random.choice(ERROR_REPLIES)
-
- else:
- log.error(f"Unhandled tag command error: {error} ({error.original})")
- return
-
- await ctx.send(embed=embed)
- # endregion
+ await ctx.send(embed=embed)
diff --git a/bot/exts/pride/pride_avatar.py b/bot/exts/pride/pride_avatar.py
index 3f9878e3..2eade796 100644
--- a/bot/exts/pride/pride_avatar.py
+++ b/bot/exts/pride/pride_avatar.py
@@ -1,10 +1,12 @@
import logging
from io import BytesIO
from pathlib import Path
+from typing import Tuple
+import aiohttp
import discord
-from PIL import Image, ImageDraw
-from discord.ext import commands
+from PIL import Image, ImageDraw, UnidentifiedImageError
+from discord.ext.commands import Bot, Cog, Context, group
from bot.constants import Colours
@@ -53,10 +55,10 @@ OPTIONS = {
}
-class PrideAvatar(commands.Cog):
+class PrideAvatar(Cog):
"""Put an LGBT spin on your avatar!"""
- def __init__(self, bot: commands.Bot):
+ def __init__(self, bot: Bot):
self.bot = bot
@staticmethod
@@ -78,8 +80,41 @@ class PrideAvatar(commands.Cog):
ring.putalpha(mask)
return ring
- @commands.group(aliases=["avatarpride", "pridepfp", "prideprofile"], invoke_without_command=True)
- async def prideavatar(self, ctx: commands.Context, option: str = "lgbt", pixels: int = 64) -> None:
+ @staticmethod
+ def process_options(option: str, pixels: int) -> Tuple[str, int, str]:
+ """Does some shared preprocessing for the prideavatar commands."""
+ return option.lower(), max(0, min(512, pixels)), OPTIONS.get(option)
+
+ async def process_image(self, ctx: Context, image_bytes: bytes, pixels: int, flag: str, option: str) -> None:
+ """Constructs the final image, embeds it, and sends it."""
+ try:
+ avatar = Image.open(BytesIO(image_bytes))
+ except UnidentifiedImageError:
+ return await ctx.send("Cannot identify image from provided URL")
+ avatar = avatar.convert("RGBA").resize((1024, 1024))
+
+ avatar = self.crop_avatar(avatar)
+
+ ring = Image.open(Path(f"bot/resources/pride/flags/{flag}.png")).resize((1024, 1024))
+ ring = ring.convert("RGBA")
+ ring = self.crop_ring(ring, pixels)
+
+ avatar.alpha_composite(ring, (0, 0))
+ bufferedio = BytesIO()
+ avatar.save(bufferedio, format="PNG")
+ bufferedio.seek(0)
+
+ file = discord.File(bufferedio, filename="pride_avatar.png") # Creates file to be used in embed
+ embed = discord.Embed(
+ name="Your Lovely Pride Avatar",
+ description=f"Here is your lovely avatar, surrounded by\n a beautiful {option} flag. Enjoy :D"
+ )
+ embed.set_image(url="attachment://pride_avatar.png")
+ embed.set_footer(text=f"Made by {ctx.author.display_name}", icon_url=ctx.author.avatar_url)
+ await ctx.send(file=file, embed=embed)
+
+ @group(aliases=["avatarpride", "pridepfp", "prideprofile"], invoke_without_command=True)
+ async def prideavatar(self, ctx: Context, option: str = "lgbt", pixels: int = 64) -> None:
"""
This surrounds an avatar with a border of a specified LGBT flag.
@@ -88,45 +123,43 @@ class PrideAvatar(commands.Cog):
This has a maximum of 512px and defaults to a 64px border.
The full image is 1024x1024.
"""
- pixels = 0 if pixels < 0 else 512 if pixels > 512 else pixels
-
- option = option.lower()
-
- if option not in OPTIONS.keys():
+ option, pixels, flag = self.process_options(option, pixels)
+ if flag is None:
return await ctx.send("I don't have that flag!")
- flag = OPTIONS[option]
-
async with ctx.typing():
-
- # Get avatar bytes
image_bytes = await ctx.author.avatar_url.read()
- avatar = Image.open(BytesIO(image_bytes))
- avatar = avatar.convert("RGBA").resize((1024, 1024))
-
- avatar = self.crop_avatar(avatar)
-
- ring = Image.open(Path(f"bot/resources/pride/flags/{flag}.png")).resize((1024, 1024))
- ring = ring.convert("RGBA")
- ring = self.crop_ring(ring, pixels)
+ await self.process_image(ctx, image_bytes, pixels, flag, option)
- avatar.alpha_composite(ring, (0, 0))
- bufferedio = BytesIO()
- avatar.save(bufferedio, format="PNG")
- bufferedio.seek(0)
+ @prideavatar.command()
+ async def image(self, ctx: Context, url: str, option: str = "lgbt", pixels: int = 64) -> None:
+ """
+ This surrounds the image specified by the URL with a border of a specified LGBT flag.
- file = discord.File(bufferedio, filename="pride_avatar.png") # Creates file to be used in embed
- embed = discord.Embed(
- name="Your Lovely Pride Avatar",
- description=f"Here is your lovely avatar, surrounded by\n a beautiful {option} flag. Enjoy :D"
- )
- embed.set_image(url="attachment://pride_avatar.png")
- embed.set_footer(text=f"Made by {ctx.author.display_name}", icon_url=ctx.author.avatar_url)
+ This defaults to the LGBT rainbow flag if none is given.
+ The amount of pixels can be given which determines the thickness of the flag border.
+ This has a maximum of 512px and defaults to a 64px border.
+ The full image is 1024x1024.
+ """
+ option, pixels, flag = self.process_options(option, pixels)
+ if flag is None:
+ return await ctx.send("I don't have that flag!")
- await ctx.send(file=file, embed=embed)
+ async with ctx.typing():
+ async with aiohttp.ClientSession() as session:
+ try:
+ response = await session.get(url)
+ except aiohttp.client_exceptions.ClientConnectorError:
+ return await ctx.send("Cannot connect to provided URL!")
+ except aiohttp.client_exceptions.InvalidURL:
+ return await ctx.send("Invalid URL!")
+ if response.status != 200:
+ return await ctx.send("Bad response from provided URL!")
+ image_bytes = await response.read()
+ await self.process_image(ctx, image_bytes, pixels, flag, option)
@prideavatar.command()
- async def flags(self, ctx: commands.Context) -> None:
+ async def flags(self, ctx: Context) -> None:
"""This lists the flags that can be used with the prideavatar command."""
choices = sorted(set(OPTIONS.values()))
options = "• " + "\n• ".join(choices)
@@ -139,6 +172,6 @@ class PrideAvatar(commands.Cog):
await ctx.send(embed=embed)
-def setup(bot: commands.Bot) -> None:
+def setup(bot: Bot) -> None:
"""Cog load."""
bot.add_cog(PrideAvatar(bot))
diff --git a/bot/resources/advent_of_code/about.json b/bot/resources/advent_of_code/about.json
index 91ae6813..dd0fe59a 100644
--- a/bot/resources/advent_of_code/about.json
+++ b/bot/resources/advent_of_code/about.json
@@ -6,22 +6,22 @@
},
{
"name": "How do I sign up?",
- "value": "AoC utilizes the following services' OAuth:",
+ "value": "Sign up with one of these services:",
"inline": true
},
{
- "name": "Service",
+ "name": "Auth Services",
"value": "GitHub\nGoogle\nTwitter\nReddit",
"inline": true
},
{
"name": "How does scoring work?",
- "value": "Getting a star first is worth 100 points, second is 99, and so on down to 1 point at 100th place.\n\nCheck out AoC's [global leaderboard](https://adventofcode.com/leaderboard) to see who's leading this year's event!",
+ "value": "For the [global leaderboard](https://adventofcode.com/leaderboard), the first person to get a star first gets 100 points, the second person gets 99 points, and so on down to 1 point at 100th place.\n\nFor private leaderboards, the first person to get a star gets N points, where N is the number of people on the leaderboard. The second person to get the star gets N-1 points and so on and so forth.",
"inline": false
},
{
"name": "Join our private leaderboard!",
- "value": "In addition to the global leaderboard, AoC also offers private leaderboards, where you can compete against a smaller group of friends!\n\nGet the join code using `.aoc join` and head over to AoC's [private leaderboard page](https://adventofcode.com/leaderboard/private) to join the PyDis private leaderboard!",
+ "value": "Come join the Python Discord private leaderboard and compete against other people in the community! Get the join code using `.aoc join` and visit the [private leaderboard page](https://adventofcode.com/leaderboard/private) to join our leaderboard.",
"inline": false
}
]