aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGravatar kwzrd <[email protected]>2021-03-28 14:53:16 +0200
committerGravatar kwzrd <[email protected]>2021-03-28 14:53:16 +0200
commit4b1e90feab0cb6fc1eef708b709d1d2ed25ddbdc (patch)
treee041f7bb680414356bfba11be4d9ffc318c1d253
parentBranding: add contextual message to #changelog notifications (diff)
parentMerge pull request #1466 from vcokltfre/discord-tags (diff)
Merge: changes from 'upstream/main' & conflict resolution
Conflict in the lockfile resolved by re-locking the merged Pipfile. Conflict in Branding constants resolved by keeping my local version. Change in the cog's target branch to 'main' from 'master' is currently irrelevant as we targets a development branch anyway.
-rw-r--r--.gitattributes1
-rw-r--r--.github/CODEOWNERS1
-rw-r--r--.github/FUNDING.yml2
-rw-r--r--.github/workflows/build.yml2
-rw-r--r--.github/workflows/deploy.yml3
-rw-r--r--.github/workflows/lint-test.yml2
-rw-r--r--.github/workflows/sentry_release.yml4
-rw-r--r--.pre-commit-config.yaml2
-rw-r--r--CONTRIBUTING.md6
-rw-r--r--Pipfile1
-rw-r--r--Pipfile.lock146
-rw-r--r--README.md14
-rw-r--r--bot/constants.py1
-rw-r--r--bot/exts/backend/logging.py2
-rw-r--r--bot/exts/filters/webhook_remover.py2
-rw-r--r--bot/exts/help_channels/_cog.py2
-rw-r--r--bot/exts/help_channels/_name.py12
-rw-r--r--bot/exts/info/codeblock/_parsing.py3
-rw-r--r--bot/exts/info/information.py3
-rw-r--r--bot/exts/info/pypi.py68
-rw-r--r--bot/exts/info/source.py2
-rw-r--r--bot/exts/moderation/defcon.py4
-rw-r--r--bot/exts/moderation/infraction/_scheduler.py6
-rw-r--r--bot/exts/moderation/infraction/_utils.py46
-rw-r--r--bot/exts/moderation/infraction/infractions.py2
-rw-r--r--bot/exts/moderation/watchchannels/_watchchannel.py86
-rw-r--r--bot/exts/recruitment/__init__.py0
-rw-r--r--bot/exts/recruitment/talentpool/__init__.py8
-rw-r--r--bot/exts/recruitment/talentpool/_cog.py (renamed from bot/exts/moderation/watchchannels/talentpool.py)189
-rw-r--r--bot/exts/recruitment/talentpool/_review.py324
-rw-r--r--bot/exts/utils/utils.py27
-rw-r--r--bot/log.py49
-rw-r--r--bot/resources/elements.json119
-rw-r--r--bot/resources/foods.json52
-rw-r--r--bot/resources/stars.json2
-rw-r--r--bot/resources/tags/customhelp.md3
-rw-r--r--bot/resources/tags/intents.md19
-rw-r--r--bot/utils/time.py8
-rw-r--r--config-default.yml11
-rw-r--r--docker-compose.yml5
-rw-r--r--tests/bot/exts/moderation/infraction/test_infractions.py2
-rw-r--r--tests/bot/exts/moderation/infraction/test_utils.py12
42 files changed, 854 insertions, 399 deletions
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 000000000..176a458f9
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+* text=auto
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 7217cb443..634bb4bca 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -12,6 +12,7 @@ bot/exts/info/information.py @mbaruh
bot/exts/filters/** @mbaruh
bot/exts/fun/** @ks129
bot/exts/utils/** @ks129
+bot/exts/recruitment/** @wookie184
# Rules
bot/rules/** @mbaruh
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
deleted file mode 100644
index 6d9919ef2..000000000
--- a/.github/FUNDING.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-patreon: python_discord
-custom: https://www.redbubble.com/people/pythondiscord
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 6c97e8784..e6826e09b 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -4,7 +4,7 @@ on:
workflow_run:
workflows: ["Lint & Test"]
branches:
- - master
+ - main
types:
- completed
diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml
index 5a4aede30..8b809b777 100644
--- a/.github/workflows/deploy.yml
+++ b/.github/workflows/deploy.yml
@@ -4,12 +4,13 @@ on:
workflow_run:
workflows: ["Build"]
branches:
- - master
+ - main
types:
- completed
jobs:
build:
+ environment: production
if: github.event.workflow_run.conclusion == 'success'
name: Build & Push
runs-on: ubuntu-latest
diff --git a/.github/workflows/lint-test.yml b/.github/workflows/lint-test.yml
index 6fa8e8333..95bed2e14 100644
--- a/.github/workflows/lint-test.yml
+++ b/.github/workflows/lint-test.yml
@@ -3,7 +3,7 @@ name: Lint & Test
on:
push:
branches:
- - master
+ - main
pull_request:
diff --git a/.github/workflows/sentry_release.yml b/.github/workflows/sentry_release.yml
index b8d92e90a..f6a1e1f0e 100644
--- a/.github/workflows/sentry_release.yml
+++ b/.github/workflows/sentry_release.yml
@@ -3,14 +3,14 @@ name: Create Sentry release
on:
push:
branches:
- - master
+ - main
jobs:
create_sentry_release:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@master
+ uses: actions/checkout@main
- name: Create a Sentry.io release
uses: tclindner/[email protected]
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 1597592ca..52500a282 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -7,8 +7,6 @@ repos:
- id: check-yaml
args: [--unsafe] # Required due to custom constructors (e.g. !ENV)
- id: end-of-file-fixer
- - id: mixed-line-ending
- args: [--fix=lf]
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/pre-commit/pygrep-hooks
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index be591d17e..addab32ff 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,6 +1,6 @@
# Contributing to one of Our Projects
-Our projects are open-source and are automatically deployed whenever commits are pushed to the `master` branch on each repository, so we've created a set of guidelines in order to keep everything clean and in working order.
+Our projects are open-source and are automatically deployed whenever commits are pushed to the `main` branch on each repository, so we've created a set of guidelines in order to keep everything clean and in working order.
Note that contributions may be rejected on the basis of a contributor failing to follow these guidelines.
@@ -8,7 +8,7 @@ Note that contributions may be rejected on the basis of a contributor failing to
1. **No force-pushes** or modifying the Git history in any way.
2. If you have direct access to the repository, **create a branch for your changes** and create a pull request for that branch. If not, create a branch on a fork of the repository and create a pull request from there.
- * It's common practice for a repository to reject direct pushes to `master`, so make branching a habit!
+ * It's common practice for a repository to reject direct pushes to `main`, so make branching a habit!
* If PRing from your own fork, **ensure that "Allow edits from maintainers" is checked**. This gives permission for maintainers to commit changes directly to your fork, speeding up the review process.
3. **Adhere to the prevailing code style**, which we enforce using [`flake8`](http://flake8.pycqa.org/en/latest/index.html) and [`pre-commit`](https://pre-commit.com/).
* Run `flake8` and `pre-commit` against your code [**before** you push it](https://soundcloud.com/lemonsaurusrex/lint-before-you-push). Your commit will be rejected by the build server if it fails to lint.
@@ -18,7 +18,7 @@ Note that contributions may be rejected on the basis of a contributor failing to
* Avoid making minor commits for fixing typos or linting errors. Since you've already set up a `pre-commit` hook to run the linting pipeline before a commit, you shouldn't be committing linting issues anyway.
* A more in-depth guide to writing great commit messages can be found in Chris Beam's [*How to Write a Git Commit Message*](https://chris.beams.io/posts/git-commit/)
5. **Avoid frequent pushes to the main repository**. This goes for PRs opened against your fork as well. Our test build pipelines are triggered every time a push to the repository (or PR) is made. Try to batch your commits until you've finished working for that session, or you've reached a point where collaborators need your commits to continue their own work. This also provides you the opportunity to amend commits for minor changes rather than having to commit them on their own because you've already pushed.
- * This includes merging master into your branch. Try to leave merging from master for after your PR passes review; a maintainer will bring your PR up to date before merging. Exceptions to this include: resolving merge conflicts, needing something that was pushed to master for your branch, or something was pushed to master that could potentionally affect the functionality of what you're writing.
+ * This includes merging main into your branch. Try to leave merging from main for after your PR passes review; a maintainer will bring your PR up to date before merging. Exceptions to this include: resolving merge conflicts, needing something that was pushed to main for your branch, or something was pushed to main that could potentionally affect the functionality of what you're writing.
6. **Don't fight the framework**. Every framework has its flaws, but the frameworks we've picked out have been carefully chosen for their particular merits. If you can avoid it, please resist reimplementing swathes of framework logic - the work has already been done for you!
7. If someone is working on an issue or pull request, **do not open your own pull request for the same task**. Instead, collaborate with the author(s) of the existing pull request. Duplicate PRs opened without communicating with the other author(s) and/or PyDis staff will be closed. Communication is key, and there's no point in two separate implementations of the same thing.
* One option is to fork the other contributor's repository and submit your changes to their branch with your own pull request. We suggest following these guidelines when interacting with their repository as well.
diff --git a/Pipfile b/Pipfile
index 56ece7611..86add29cb 100644
--- a/Pipfile
+++ b/Pipfile
@@ -29,7 +29,6 @@ sphinx = "~=2.2"
statsd = "~=3.3"
arrow = "~=0.17"
emoji = "~=0.6"
-python-json-logger = "~=2.0"
[dev-packages]
coverage = "~=5.0"
diff --git a/Pipfile.lock b/Pipfile.lock
index 589af71b7..240e2542e 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
- "sha256": "bf99bc953819e8c890d5482834bf243b3c3bc4f5f637fcff4a94b0a046e7593e"
+ "sha256": "0f60e21b90fbc90c75f5978e15ed584f7cab7cb358d24c0f1d6b132fbc8b1907"
},
"pipfile-spec": 6,
"requires": {
@@ -265,10 +265,10 @@
},
"fakeredis": {
"hashes": [
- "sha256:01cb47d2286825a171fb49c0e445b1fa9307087e07cbb3d027ea10dbff108b6a",
- "sha256:2c6041cf0225889bc403f3949838b2c53470a95a9e2d4272422937786f5f8f73"
+ "sha256:1ac0cef767c37f51718874a33afb5413e69d132988cb6a80c6e6dbeddf8c7623",
+ "sha256:e0416e4941cecd3089b0d901e60c8dc3c944f6384f5e29e2261c0d3c5fa99669"
],
- "version": "==1.4.5"
+ "version": "==1.5.0"
},
"feedparser": {
"hashes": [
@@ -373,46 +373,45 @@
},
"lxml": {
"hashes": [
- "sha256:0448576c148c129594d890265b1a83b9cd76fd1f0a6a04620753d9a6bcfd0a4d",
- "sha256:127f76864468d6630e1b453d3ffbbd04b024c674f55cf0a30dc2595137892d37",
- "sha256:1471cee35eba321827d7d53d104e7b8c593ea3ad376aa2df89533ce8e1b24a01",
- "sha256:2363c35637d2d9d6f26f60a208819e7eafc4305ce39dc1d5005eccc4593331c2",
- "sha256:2e5cc908fe43fe1aa299e58046ad66981131a66aea3129aac7770c37f590a644",
- "sha256:2e6fd1b8acd005bd71e6c94f30c055594bbd0aa02ef51a22bbfa961ab63b2d75",
- "sha256:366cb750140f221523fa062d641393092813b81e15d0e25d9f7c6025f910ee80",
- "sha256:42ebca24ba2a21065fb546f3e6bd0c58c3fe9ac298f3a320147029a4850f51a2",
- "sha256:4e751e77006da34643ab782e4a5cc21ea7b755551db202bc4d3a423b307db780",
- "sha256:4fb85c447e288df535b17ebdebf0ec1cf3a3f1a8eba7e79169f4f37af43c6b98",
- "sha256:50c348995b47b5a4e330362cf39fc503b4a43b14a91c34c83b955e1805c8e308",
- "sha256:535332fe9d00c3cd455bd3dd7d4bacab86e2d564bdf7606079160fa6251caacf",
- "sha256:535f067002b0fd1a4e5296a8f1bf88193080ff992a195e66964ef2a6cfec5388",
- "sha256:5be4a2e212bb6aa045e37f7d48e3e1e4b6fd259882ed5a00786f82e8c37ce77d",
- "sha256:60a20bfc3bd234d54d49c388950195d23a5583d4108e1a1d47c9eef8d8c042b3",
- "sha256:648914abafe67f11be7d93c1a546068f8eff3c5fa938e1f94509e4a5d682b2d8",
- "sha256:681d75e1a38a69f1e64ab82fe4b1ed3fd758717bed735fb9aeaa124143f051af",
- "sha256:68a5d77e440df94011214b7db907ec8f19e439507a70c958f750c18d88f995d2",
- "sha256:69a63f83e88138ab7642d8f61418cf3180a4d8cd13995df87725cb8b893e950e",
- "sha256:6e4183800f16f3679076dfa8abf2db3083919d7e30764a069fb66b2b9eff9939",
- "sha256:6fd8d5903c2e53f49e99359b063df27fdf7acb89a52b6a12494208bf61345a03",
- "sha256:791394449e98243839fa822a637177dd42a95f4883ad3dec2a0ce6ac99fb0a9d",
- "sha256:7a7669ff50f41225ca5d6ee0a1ec8413f3a0d8aa2b109f86d540887b7ec0d72a",
- "sha256:7e9eac1e526386df7c70ef253b792a0a12dd86d833b1d329e038c7a235dfceb5",
- "sha256:7ee8af0b9f7de635c61cdd5b8534b76c52cd03536f29f51151b377f76e214a1a",
- "sha256:8246f30ca34dc712ab07e51dc34fea883c00b7ccb0e614651e49da2c49a30711",
- "sha256:8c88b599e226994ad4db29d93bc149aa1aff3dc3a4355dd5757569ba78632bdf",
- "sha256:923963e989ffbceaa210ac37afc9b906acebe945d2723e9679b643513837b089",
- "sha256:94d55bd03d8671686e3f012577d9caa5421a07286dd351dfef64791cf7c6c505",
- "sha256:97db258793d193c7b62d4e2586c6ed98d51086e93f9a3af2b2034af01450a74b",
- "sha256:a9d6bc8642e2c67db33f1247a77c53476f3a166e09067c0474facb045756087f",
- "sha256:cd11c7e8d21af997ee8079037fff88f16fda188a9776eb4b81c7e4c9c0a7d7fc",
- "sha256:d8d3d4713f0c28bdc6c806a278d998546e8efc3498949e3ace6e117462ac0a5e",
- "sha256:e0bfe9bb028974a481410432dbe1b182e8191d5d40382e5b8ff39cdd2e5c5931",
- "sha256:f4822c0660c3754f1a41a655e37cb4dbbc9be3d35b125a37fab6f82d47674ebc",
- "sha256:f83d281bb2a6217cd806f4cf0ddded436790e66f393e124dfe9731f6b3fb9afe",
- "sha256:fc37870d6716b137e80d19241d0e2cff7a7643b925dfa49b4c8ebd1295eb506e"
+ "sha256:079f3ae844f38982d156efce585bc540c16a926d4436712cf4baee0cce487a3d",
+ "sha256:0fbcf5565ac01dff87cbfc0ff323515c823081c5777a9fc7703ff58388c258c3",
+ "sha256:122fba10466c7bd4178b07dba427aa516286b846b2cbd6f6169141917283aae2",
+ "sha256:1b7584d421d254ab86d4f0b13ec662a9014397678a7c4265a02a6d7c2b18a75f",
+ "sha256:26e761ab5b07adf5f555ee82fb4bfc35bf93750499c6c7614bd64d12aaa67927",
+ "sha256:289e9ca1a9287f08daaf796d96e06cb2bc2958891d7911ac7cae1c5f9e1e0ee3",
+ "sha256:2a9d50e69aac3ebee695424f7dbd7b8c6d6eb7de2a2eb6b0f6c7db6aa41e02b7",
+ "sha256:33bb934a044cf32157c12bfcfbb6649807da20aa92c062ef51903415c704704f",
+ "sha256:3439c71103ef0e904ea0a1901611863e51f50b5cd5e8654a151740fde5e1cade",
+ "sha256:39b78571b3b30645ac77b95f7c69d1bffc4cf8c3b157c435a34da72e78c82468",
+ "sha256:4289728b5e2000a4ad4ab8da6e1db2e093c63c08bdc0414799ee776a3f78da4b",
+ "sha256:4bff24dfeea62f2e56f5bab929b4428ae6caba2d1eea0c2d6eb618e30a71e6d4",
+ "sha256:542d454665a3e277f76954418124d67516c5f88e51a900365ed54a9806122b83",
+ "sha256:5a0a14e264069c03e46f926be0d8919f4105c1623d620e7ec0e612a2e9bf1c04",
+ "sha256:66e575c62792c3f9ca47cb8b6fab9e35bab91360c783d1606f758761810c9791",
+ "sha256:74f7d8d439b18fa4c385f3f5dfd11144bb87c1da034a466c5b5577d23a1d9b51",
+ "sha256:7610b8c31688f0b1be0ef882889817939490a36d0ee880ea562a4e1399c447a1",
+ "sha256:76fa7b1362d19f8fbd3e75fe2fb7c79359b0af8747e6f7141c338f0bee2f871a",
+ "sha256:7728e05c35412ba36d3e9795ae8995e3c86958179c9770e65558ec3fdfd3724f",
+ "sha256:8157dadbb09a34a6bd95a50690595e1fa0af1a99445e2744110e3dca7831c4ee",
+ "sha256:820628b7b3135403540202e60551e741f9b6d3304371712521be939470b454ec",
+ "sha256:884ab9b29feaca361f7f88d811b1eea9bfca36cf3da27768d28ad45c3ee6f969",
+ "sha256:89b8b22a5ff72d89d48d0e62abb14340d9e99fd637d046c27b8b257a01ffbe28",
+ "sha256:92e821e43ad382332eade6812e298dc9701c75fe289f2a2d39c7960b43d1e92a",
+ "sha256:b007cbb845b28db4fb8b6a5cdcbf65bacb16a8bd328b53cbc0698688a68e1caa",
+ "sha256:bc4313cbeb0e7a416a488d72f9680fffffc645f8a838bd2193809881c67dd106",
+ "sha256:bccbfc27563652de7dc9bdc595cb25e90b59c5f8e23e806ed0fd623755b6565d",
+ "sha256:c4f05c5a7c49d2fb70223d0d5bcfbe474cf928310ac9fa6a7c6dddc831d0b1d4",
+ "sha256:ce256aaa50f6cc9a649c51be3cd4ff142d67295bfc4f490c9134d0f9f6d58ef0",
+ "sha256:d2e35d7bf1c1ac8c538f88d26b396e73dd81440d59c1ef8522e1ea77b345ede4",
+ "sha256:df7c53783a46febb0e70f6b05df2ba104610f2fb0d27023409734a3ecbb78fb2",
+ "sha256:efac139c3f0bf4f0939f9375af4b02c5ad83a622de52d6dfa8e438e8e01d0eb0",
+ "sha256:efd7a09678fd8b53117f6bae4fa3825e0a22b03ef0a932e070c0bdbb3a35e654",
+ "sha256:f2380a6376dfa090227b663f9678150ef27543483055cc327555fb592c5967e2",
+ "sha256:f8380c03e45cf09f8557bdaa41e1fa7c81f3ae22828e1db470ab2a6c96d8bc23",
+ "sha256:f90ba11136bfdd25cae3951af8da2e95121c9b9b93727b1b896e3fa105b2f586"
],
"index": "pypi",
- "version": "==4.6.2"
+ "version": "==4.6.3"
},
"markdownify": {
"hashes": [
@@ -627,13 +626,6 @@
"index": "pypi",
"version": "==1.0.0"
},
- "python-json-logger": {
- "hashes": [
- "sha256:f26eea7898db40609563bed0a7ca11af12e2a79858632706d835a0f961b7d398"
- ],
- "index": "pypi",
- "version": "==2.0.1"
- },
"pytz": {
"hashes": [
"sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da",
@@ -724,11 +716,11 @@
},
"soupsieve": {
"hashes": [
- "sha256:407fa1e8eb3458d1b5614df51d9651a1180ea5fedf07feb46e45d7e25e6d6cdd",
- "sha256:d3a5ea5b350423f47d07639f74475afedad48cf41c0ad7a82ca13a3928af34f6"
+ "sha256:052774848f448cf19c7e959adf5566904d525f33a3f8b6ba6f6f8f26ec7de0cc",
+ "sha256:c2c1c2d44f158cdbddab7824a9af8c4f83c76b1e23e049479aa432feb6c4c23b"
],
"markers": "python_version >= '3.0'",
- "version": "==2.2"
+ "version": "==2.2.1"
},
"sphinx": {
"hashes": [
@@ -981,19 +973,19 @@
},
"flake8": {
"hashes": [
- "sha256:749dbbd6bfd0cf1318af27bf97a14e28e5ff548ef8e5b1566ccfb25a11e7c839",
- "sha256:aadae8761ec651813c24be05c6f7b4680857ef6afaae4651a4eccaef97ce6c3b"
+ "sha256:12d05ab02614b6aee8df7c36b97d1a3b2372761222b19b58621355e82acddcff",
+ "sha256:78873e372b12b093da7b5e5ed302e8ad9e988b38b063b61ad937f26ca58fc5f0"
],
"index": "pypi",
- "version": "==3.8.4"
+ "version": "==3.9.0"
},
"flake8-annotations": {
"hashes": [
- "sha256:8968ff12f296433028ad561c680ccc03a7cd62576d100c3f1475e058b3c11b43",
- "sha256:bd0505616c0d85ebb45c6052d339c69f320d3f87fa079ab4e91a4f234a863d05"
+ "sha256:40a4d504cdf64126ea0bdca39edab1608bc6d515e96569b7e7c3c59c84f66c36",
+ "sha256:eabbfb2dd59ae0e9835f509f930e79cd99fa4ff1026fe6ca073503a57407037c"
],
"index": "pypi",
- "version": "==2.6.0"
+ "version": "==2.6.1"
},
"flake8-bugbear": {
"hashes": [
@@ -1005,11 +997,11 @@
},
"flake8-docstrings": {
"hashes": [
- "sha256:3d5a31c7ec6b7367ea6506a87ec293b94a0a46c0bce2bb4975b7f1d09b6f3717",
- "sha256:a256ba91bc52307bef1de59e2a009c3cf61c3d0952dbe035d6ff7208940c2edc"
+ "sha256:99cac583d6c7e32dd28bbfbef120a7c0d1b6dde4adb5a9fd441c4227a6534bde",
+ "sha256:9fe7c6a306064af8e62a055c2f61e9eb1da55f84bb39caef2b84ce53708ac34b"
],
"index": "pypi",
- "version": "==1.5.0"
+ "version": "==1.6.0"
},
"flake8-import-order": {
"hashes": [
@@ -1051,11 +1043,11 @@
},
"identify": {
"hashes": [
- "sha256:46d1816c6a4fc2d1e8758f293a5dcc1ae6404ab344179d7c1e73637bf283beb1",
- "sha256:ed4a05fb80e3cbd12e83c959f9ff7f729ba6b66ab8d6178850fd5cb4c1cf6c5d"
+ "sha256:1cfb05b578de996677836d5a2dde14b3dffde313cf7d2b3e793a0787a36e26dd",
+ "sha256:9cc5f58996cd359b7b72f0a5917d8639de5323917e6952a3bfbf36301b576f40"
],
"markers": "python_full_version >= '3.6.1'",
- "version": "==2.1.3"
+ "version": "==2.2.1"
},
"idna": {
"hashes": [
@@ -1097,27 +1089,27 @@
},
"pycodestyle": {
"hashes": [
- "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367",
- "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"
+ "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068",
+ "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
- "version": "==2.6.0"
+ "version": "==2.7.0"
},
"pydocstyle": {
"hashes": [
- "sha256:19b86fa8617ed916776a11cd8bc0197e5b9856d5433b777f51a3defe13075325",
- "sha256:aca749e190a01726a4fb472dd4ef23b5c9da7b9205c0a7857c06533de13fd678"
+ "sha256:164befb520d851dbcf0e029681b91f4f599c62c5cd8933fd54b1bfbd50e89e1f",
+ "sha256:d4449cf16d7e6709f63192146706933c7a334af7c0f083904799ccb851c50f6d"
],
- "markers": "python_version >= '3.5'",
- "version": "==5.1.1"
+ "markers": "python_version >= '3.6'",
+ "version": "==6.0.0"
},
"pyflakes": {
"hashes": [
- "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92",
- "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"
+ "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3",
+ "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
- "version": "==2.2.0"
+ "version": "==2.3.1"
},
"pyyaml": {
"hashes": [
@@ -1195,11 +1187,11 @@
},
"virtualenv": {
"hashes": [
- "sha256:147b43894e51dd6bba882cf9c282447f780e2251cd35172403745fc381a0a80d",
- "sha256:2be72df684b74df0ea47679a7df93fd0e04e72520022c57b479d8f881485dbe3"
+ "sha256:49ec4eb4c224c6f7dd81bb6d0a28a09ecae5894f4e593c89b0db0885f565a107",
+ "sha256:83f95875d382c7abafe06bd2a4cdd1b363e1bb77e02f155ebe8ac082a916b37c"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
- "version": "==20.4.2"
+ "version": "==20.4.3"
}
}
}
diff --git a/README.md b/README.md
index ac45e6340..9df905dc8 100644
--- a/README.md
+++ b/README.md
@@ -12,11 +12,11 @@ and other tools to help keep the server running like a well-oiled machine.
Read the [Contributing Guide](https://pythondiscord.com/pages/contributing/bot/) on our website if you're interested in helping out.
-[1]: https://github.com/python-discord/bot/workflows/Lint%20&%20Test/badge.svg?branch=master
-[2]: https://github.com/python-discord/bot/actions?query=workflow%3A%22Lint+%26+Test%22+branch%3Amaster
-[3]: https://github.com/python-discord/bot/workflows/Build/badge.svg?branch=master
-[4]: https://github.com/python-discord/bot/actions?query=workflow%3ABuild+branch%3Amaster
-[5]: https://github.com/python-discord/bot/workflows/Deploy/badge.svg?branch=master
-[6]: https://github.com/python-discord/bot/actions?query=workflow%3ADeploy+branch%3Amaster
-[7]: https://raw.githubusercontent.com/python-discord/branding/master/logos/badge/badge_github.svg
+[1]: https://github.com/python-discord/bot/workflows/Lint%20&%20Test/badge.svg?branch=main
+[2]: https://github.com/python-discord/bot/actions?query=workflow%3A%22Lint+%26+Test%22+branch%3Amain
+[3]: https://github.com/python-discord/bot/workflows/Build/badge.svg?branch=main
+[4]: https://github.com/python-discord/bot/actions?query=workflow%3ABuild+branch%3Amain
+[5]: https://github.com/python-discord/bot/workflows/Deploy/badge.svg?branch=main
+[6]: https://github.com/python-discord/bot/actions?query=workflow%3ADeploy+branch%3Amain
+[7]: https://raw.githubusercontent.com/python-discord/branding/main/logos/badge/badge_github.svg
[8]: https://discord.gg/python
diff --git a/bot/constants.py b/bot/constants.py
index 394d59a73..467a4a2c4 100644
--- a/bot/constants.py
+++ b/bot/constants.py
@@ -438,6 +438,7 @@ class Channels(metaclass=YAMLGetter):
mods: int
mod_alerts: int
mod_spam: int
+ nomination_voting: int
organisation: int
admin_announcements: int
diff --git a/bot/exts/backend/logging.py b/bot/exts/backend/logging.py
index 94fa2b139..823f14ea4 100644
--- a/bot/exts/backend/logging.py
+++ b/bot/exts/backend/logging.py
@@ -29,7 +29,7 @@ class Logging(Cog):
url="https://github.com/python-discord/bot",
icon_url=(
"https://raw.githubusercontent.com/"
- "python-discord/branding/master/logos/logo_circle/logo_circle_large.png"
+ "python-discord/branding/main/logos/logo_circle/logo_circle_large.png"
)
)
diff --git a/bot/exts/filters/webhook_remover.py b/bot/exts/filters/webhook_remover.py
index 08fe94055..f11fc8912 100644
--- a/bot/exts/filters/webhook_remover.py
+++ b/bot/exts/filters/webhook_remover.py
@@ -14,7 +14,7 @@ WEBHOOK_URL_RE = re.compile(r"((?:https?://)?discord(?:app)?\.com/api/webhooks/\
ALERT_MESSAGE_TEMPLATE = (
"{user}, looks like you posted a Discord webhook URL. Therefore, your "
"message has been removed. Your webhook may have been **compromised** so "
- "please re-create the webhook **immediately**. If you believe this was "
+ "please re-create the webhook **immediately**. If you believe this was a "
"mistake, please let us know."
)
diff --git a/bot/exts/help_channels/_cog.py b/bot/exts/help_channels/_cog.py
index 6abf99810..1c730dce9 100644
--- a/bot/exts/help_channels/_cog.py
+++ b/bot/exts/help_channels/_cog.py
@@ -54,7 +54,7 @@ class HelpChannels(commands.Cog):
* Contains channels which aren't in use
* Channels are used to refill the Available category
- Help channels are named after the chemical elements in `bot/resources/elements.json`.
+ Help channels are named after the foods in `bot/resources/foods.json`.
"""
def __init__(self, bot: Bot):
diff --git a/bot/exts/help_channels/_name.py b/bot/exts/help_channels/_name.py
index 728234b1e..061f855ae 100644
--- a/bot/exts/help_channels/_name.py
+++ b/bot/exts/help_channels/_name.py
@@ -14,11 +14,11 @@ log = logging.getLogger(__name__)
def create_name_queue(*categories: discord.CategoryChannel) -> deque:
"""
- Return a queue of element names to use for creating new channels.
+ Return a queue of food names to use for creating new channels.
Skip names that are already in use by channels in `categories`.
"""
- log.trace("Creating the chemical element name queue.")
+ log.trace("Creating the food name queue.")
used_names = _get_used_names(*categories)
@@ -31,7 +31,7 @@ def create_name_queue(*categories: discord.CategoryChannel) -> deque:
def _get_names() -> t.List[str]:
"""
- Return a truncated list of prefixed element names.
+ Return a truncated list of prefixed food names.
The amount of names is configured with `HelpChannels.max_total_channels`.
The prefix is configured with `HelpChannels.name_prefix`.
@@ -39,10 +39,10 @@ def _get_names() -> t.List[str]:
count = constants.HelpChannels.max_total_channels
prefix = constants.HelpChannels.name_prefix
- log.trace(f"Getting the first {count} element names from JSON.")
+ log.trace(f"Getting the first {count} food names from JSON.")
- with Path("bot/resources/elements.json").open(encoding="utf-8") as elements_file:
- all_names = json.load(elements_file)
+ with Path("bot/resources/foods.json").open(encoding="utf-8") as foods_file:
+ all_names = json.load(foods_file)
if prefix:
return [prefix + name for name in all_names[:count]]
diff --git a/bot/exts/info/codeblock/_parsing.py b/bot/exts/info/codeblock/_parsing.py
index e35fbca22..73fd11b94 100644
--- a/bot/exts/info/codeblock/_parsing.py
+++ b/bot/exts/info/codeblock/_parsing.py
@@ -103,6 +103,9 @@ def _is_python_code(content: str) -> bool:
"""Return True if `content` is valid Python consisting of more than just expressions."""
log.trace("Checking if content is Python code.")
try:
+ # Remove null bytes because they cause ast.parse to raise a ValueError.
+ content = content.replace("\x00", "")
+
# Attempt to parse the message into an AST node.
# Invalid Python code will raise a SyntaxError.
tree = ast.parse(content)
diff --git a/bot/exts/info/information.py b/bot/exts/info/information.py
index 92ddf0fbd..c54ca96bf 100644
--- a/bot/exts/info/information.py
+++ b/bot/exts/info/information.py
@@ -64,7 +64,8 @@ class Information(Cog):
defcon_info = ""
if cog := self.bot.get_cog("Defcon"):
- defcon_info = f"Defcon threshold: {humanize_delta(cog.threshold)}\n"
+ threshold = humanize_delta(cog.threshold) if cog.threshold else "-"
+ defcon_info = f"Defcon threshold: {threshold}\n"
verification = f"Verification level: {ctx.guild.verification_level.name}\n"
diff --git a/bot/exts/info/pypi.py b/bot/exts/info/pypi.py
index 3e326e8bb..2e42e7d6b 100644
--- a/bot/exts/info/pypi.py
+++ b/bot/exts/info/pypi.py
@@ -1,19 +1,23 @@
import itertools
import logging
import random
+import re
from discord import Embed
from discord.ext.commands import Cog, Context, command
from discord.utils import escape_markdown
from bot.bot import Bot
-from bot.constants import Colours, NEGATIVE_REPLIES
+from bot.constants import Colours, NEGATIVE_REPLIES, RedirectOutput
URL = "https://pypi.org/pypi/{package}/json"
-FIELDS = ("author", "requires_python", "summary", "license")
PYPI_ICON = "https://cdn.discordapp.com/emojis/766274397257334814.png"
+
PYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))
+ILLEGAL_CHARACTERS = re.compile(r"[^-_.a-zA-Z0-9]+")
+INVALID_INPUT_DELETE_DELAY = RedirectOutput.delete_delay
+
log = logging.getLogger(__name__)
@@ -26,43 +30,47 @@ class PyPi(Cog):
@command(name="pypi", aliases=("package", "pack"))
async def get_package_info(self, ctx: Context, package: str) -> None:
"""Provide information about a specific package from PyPI."""
- embed = Embed(
- title=random.choice(NEGATIVE_REPLIES),
- colour=Colours.soft_red
- )
+ embed = Embed(title=random.choice(NEGATIVE_REPLIES), colour=Colours.soft_red)
embed.set_thumbnail(url=PYPI_ICON)
- async with self.bot.http_session.get(URL.format(package=package)) as response:
- if response.status == 404:
- embed.description = "Package could not be found."
+ error = True
+
+ if characters := re.search(ILLEGAL_CHARACTERS, package):
+ embed.description = f"Illegal character(s) passed into command: '{escape_markdown(characters.group(0))}'"
+
+ else:
+ async with self.bot.http_session.get(URL.format(package=package)) as response:
+ if response.status == 404:
+ embed.description = "Package could not be found."
+
+ elif response.status == 200 and response.content_type == "application/json":
+ response_json = await response.json()
+ info = response_json["info"]
- elif response.status == 200 and response.content_type == "application/json":
- response_json = await response.json()
- info = response_json["info"]
+ embed.title = f"{info['name']} v{info['version']}"
- embed.title = f"{info['name']} v{info['version']}"
- embed.url = info['package_url']
- embed.colour = next(PYPI_COLOURS)
+ embed.url = info["package_url"]
+ embed.colour = next(PYPI_COLOURS)
- for field in FIELDS:
- field_data = info[field]
+ summary = escape_markdown(info["summary"])
- # Field could be completely empty, in some cases can be a string with whitespaces, or None.
- if field_data and not field_data.isspace():
- if '\n' in field_data and field == "license":
- field_data = field_data.split('\n')[0]
+ # Summary could be completely empty, or just whitespace.
+ if summary and not summary.isspace():
+ embed.description = summary
+ else:
+ embed.description = "No summary provided."
- embed.add_field(
- name=field.replace("_", " ").title(),
- value=escape_markdown(field_data),
- inline=False,
- )
+ error = False
- else:
- embed.description = "There was an error when fetching your PyPi package."
- log.trace(f"Error when fetching PyPi package: {response.status}.")
+ else:
+ embed.description = "There was an error when fetching your PyPi package."
+ log.trace(f"Error when fetching PyPi package: {response.status}.")
- await ctx.send(embed=embed)
+ if error:
+ await ctx.send(embed=embed, delete_after=INVALID_INPUT_DELETE_DELAY)
+ await ctx.message.delete(delay=INVALID_INPUT_DELETE_DELAY)
+ else:
+ await ctx.send(embed=embed)
def setup(bot: Bot) -> None:
diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py
index 7b41352d4..49e74f204 100644
--- a/bot/exts/info/source.py
+++ b/bot/exts/info/source.py
@@ -97,7 +97,7 @@ class BotSource(commands.Cog):
else:
file_location = Path(filename).relative_to(Path.cwd()).as_posix()
- url = f"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}"
+ url = f"{URLs.github_bot_repo}/blob/main/{file_location}{lines_extension}"
return url, file_location, first_line_no or None
diff --git a/bot/exts/moderation/defcon.py b/bot/exts/moderation/defcon.py
index bd16289b9..bab95405c 100644
--- a/bot/exts/moderation/defcon.py
+++ b/bot/exts/moderation/defcon.py
@@ -157,9 +157,9 @@ class Defcon(Cog):
await ctx.send(embed=embed)
- @defcon_group.command(aliases=('t', 'd'))
+ @defcon_group.command(name="threshold", aliases=('t', 'd'))
@has_any_role(*MODERATION_ROLES)
- async def threshold(
+ async def threshold_command(
self, ctx: Context, threshold: Union[DurationDelta, int], expiry: Optional[Expiry] = None
) -> None:
"""
diff --git a/bot/exts/moderation/infraction/_scheduler.py b/bot/exts/moderation/infraction/_scheduler.py
index a73f2e8da..988fb7220 100644
--- a/bot/exts/moderation/infraction/_scheduler.py
+++ b/bot/exts/moderation/infraction/_scheduler.py
@@ -173,6 +173,8 @@ class InfractionScheduler:
total = len(infractions)
end_msg = f" (#{id_} ; {total} infraction{ngettext('', 's', total)} total)"
+ purge = infraction.get("purge", "")
+
# Execute the necessary actions to apply the infraction on Discord.
if action_coro:
log.trace(f"Awaiting the infraction #{id_} application action coroutine.")
@@ -210,7 +212,7 @@ class InfractionScheduler:
log.error(f"Deletion of {infr_type} infraction #{id_} failed with error code {e.status}.")
infr_message = ""
else:
- infr_message = f" **{' '.join(infr_type.split('_'))}** to {user.mention}{expiry_msg}{end_msg}"
+ infr_message = f" **{purge}{' '.join(infr_type.split('_'))}** to {user.mention}{expiry_msg}{end_msg}"
# Send a confirmation message to the invoking context.
log.trace(f"Sending infraction #{id_} confirmation message.")
@@ -234,7 +236,7 @@ class InfractionScheduler:
footer=f"ID {infraction['id']}"
)
- log.info(f"Applied {infr_type} infraction #{id_} to {user}.")
+ log.info(f"Applied {purge}{infr_type} infraction #{id_} to {user}.")
return not failed
async def pardon_infraction(
diff --git a/bot/exts/moderation/infraction/_utils.py b/bot/exts/moderation/infraction/_utils.py
index e766c1e5c..a98b4828b 100644
--- a/bot/exts/moderation/infraction/_utils.py
+++ b/bot/exts/moderation/infraction/_utils.py
@@ -22,7 +22,6 @@ INFRACTION_ICONS = {
"voice_ban": (Icons.voice_state_red, Icons.voice_state_green),
}
RULES_URL = "https://pythondiscord.com/pages/rules"
-APPEALABLE_INFRACTIONS = ("ban", "mute", "voice_ban")
# Type aliases
UserObject = t.Union[discord.Member, discord.User]
@@ -31,8 +30,12 @@ Infraction = t.Dict[str, t.Union[str, int, bool]]
APPEAL_EMAIL = "[email protected]"
-INFRACTION_TITLE = f"Please review our rules over at {RULES_URL}"
-INFRACTION_APPEAL_FOOTER = f"To appeal this infraction, send an e-mail to {APPEAL_EMAIL}"
+INFRACTION_TITLE = "Please review our rules"
+INFRACTION_APPEAL_EMAIL_FOOTER = f"To appeal this infraction, send an e-mail to {APPEAL_EMAIL}"
+INFRACTION_APPEAL_MODMAIL_FOOTER = (
+ 'If you would like to discuss or appeal this infraction, '
+ 'send a message to the ModMail bot'
+)
INFRACTION_AUTHOR_NAME = "Infraction information"
INFRACTION_DESCRIPTION_TEMPLATE = (
@@ -71,13 +74,13 @@ async def post_user(ctx: Context, user: UserSnowflake) -> t.Optional[dict]:
async def post_infraction(
- ctx: Context,
- user: UserSnowflake,
- infr_type: str,
- reason: str,
- expires_at: datetime = None,
- hidden: bool = False,
- active: bool = True
+ ctx: Context,
+ user: UserSnowflake,
+ infr_type: str,
+ reason: str,
+ expires_at: datetime = None,
+ hidden: bool = False,
+ active: bool = True
) -> t.Optional[dict]:
"""Posts an infraction to the API."""
if isinstance(user, (discord.Member, discord.User)) and user.bot:
@@ -150,11 +153,11 @@ async def get_active_infraction(
async def notify_infraction(
- user: UserObject,
- infr_type: str,
- expires_at: t.Optional[str] = None,
- reason: t.Optional[str] = None,
- icon_url: str = Icons.token_removed
+ user: UserObject,
+ infr_type: str,
+ expires_at: t.Optional[str] = None,
+ reason: t.Optional[str] = None,
+ icon_url: str = Icons.token_removed
) -> bool:
"""DM a user about their new infraction and return True if the DM is successful."""
log.trace(f"Sending {user} a DM about their {infr_type} infraction.")
@@ -178,17 +181,18 @@ async def notify_infraction(
embed.title = INFRACTION_TITLE
embed.url = RULES_URL
- if infr_type in APPEALABLE_INFRACTIONS:
- embed.set_footer(text=INFRACTION_APPEAL_FOOTER)
+ embed.set_footer(
+ text=INFRACTION_APPEAL_EMAIL_FOOTER if infr_type == 'Ban' else INFRACTION_APPEAL_MODMAIL_FOOTER
+ )
return await send_private_embed(user, embed)
async def notify_pardon(
- user: UserObject,
- title: str,
- content: str,
- icon_url: str = Icons.user_verified
+ user: UserObject,
+ title: str,
+ content: str,
+ icon_url: str = Icons.user_verified
) -> bool:
"""DM a user about their pardoned infraction and return True if the DM is successful."""
log.trace(f"Sending {user} a DM about their pardoned infraction.")
diff --git a/bot/exts/moderation/infraction/infractions.py b/bot/exts/moderation/infraction/infractions.py
index 3b5b1df45..d89e80acc 100644
--- a/bot/exts/moderation/infraction/infractions.py
+++ b/bot/exts/moderation/infraction/infractions.py
@@ -318,6 +318,8 @@ class Infractions(InfractionScheduler, commands.Cog):
if infraction is None:
return
+ infraction["purge"] = "purge " if purge_days else ""
+
self.mod_log.ignore(Event.member_remove, user.id)
if reason:
diff --git a/bot/exts/moderation/watchchannels/_watchchannel.py b/bot/exts/moderation/watchchannels/_watchchannel.py
index f9fc12dc3..9f26c34f2 100644
--- a/bot/exts/moderation/watchchannels/_watchchannel.py
+++ b/bot/exts/moderation/watchchannels/_watchchannel.py
@@ -5,9 +5,8 @@ import textwrap
from abc import abstractmethod
from collections import defaultdict, deque
from dataclasses import dataclass
-from typing import Optional
+from typing import Any, Dict, Optional
-import dateutil.parser
import discord
from discord import Color, DMChannel, Embed, HTTPException, Message, errors
from discord.ext.commands import Cog, Context
@@ -20,7 +19,7 @@ from bot.exts.filters.webhook_remover import WEBHOOK_URL_RE
from bot.exts.moderation.modlog import ModLog
from bot.pagination import LinePaginator
from bot.utils import CogABCMeta, messages
-from bot.utils.time import time_since
+from bot.utils.time import get_time_delta
log = logging.getLogger(__name__)
@@ -47,7 +46,9 @@ class WatchChannel(metaclass=CogABCMeta):
webhook_id: int,
api_endpoint: str,
api_default_params: dict,
- logger: logging.Logger
+ logger: logging.Logger,
+ *,
+ disable_header: bool = False
) -> None:
self.bot = bot
@@ -66,6 +67,7 @@ class WatchChannel(metaclass=CogABCMeta):
self.channel = None
self.webhook = None
self.message_history = MessageHistory()
+ self.disable_header = disable_header
self._start = self.bot.loop.create_task(self.start_watchchannel())
@@ -133,7 +135,10 @@ class WatchChannel(metaclass=CogABCMeta):
if not await self.fetch_user_cache():
await self.modlog.send_log_message(
title=f"Warning: Failed to retrieve user cache for the {self.__class__.__name__} watch channel",
- text="Could not retrieve the list of watched users from the API and messages will not be relayed.",
+ text=(
+ "Could not retrieve the list of watched users from the API. "
+ "Messages will not be relayed, and reviews not rescheduled."
+ ),
ping_everyone=True,
icon_url=Icons.token_removed,
colour=Color.red()
@@ -267,6 +272,9 @@ class WatchChannel(metaclass=CogABCMeta):
async def send_header(self, msg: Message) -> None:
"""Sends a header embed with information about the relayed messages to the watch channel."""
+ if self.disable_header:
+ return
+
user_id = msg.author.id
guild = self.bot.get_guild(GuildConfig.id)
@@ -274,7 +282,7 @@ class WatchChannel(metaclass=CogABCMeta):
actor = actor.display_name if actor else self.watched_users[user_id]['actor']
inserted_at = self.watched_users[user_id]['inserted_at']
- time_delta = self._get_time_delta(inserted_at)
+ time_delta = get_time_delta(inserted_at)
reason = self.watched_users[user_id]['reason']
@@ -302,35 +310,61 @@ class WatchChannel(metaclass=CogABCMeta):
The optional kwarg `update_cache` specifies whether the cache should
be refreshed by polling the API.
"""
- if update_cache:
- if not await self.fetch_user_cache():
- await ctx.send(f":x: Failed to update {self.__class__.__name__} user cache, serving from cache")
- update_cache = False
+ watched_data = await self.prepare_watched_users_data(ctx, oldest_first, update_cache)
- lines = []
- for user_id, user_data in self.watched_users.items():
- inserted_at = user_data['inserted_at']
- time_delta = self._get_time_delta(inserted_at)
- lines.append(f"β€’ <@{user_id}> (added {time_delta})")
+ if update_cache and not watched_data["updated"]:
+ await ctx.send(f":x: Failed to update {self.__class__.__name__} user cache, serving from cache")
- if oldest_first:
- lines.reverse()
-
- lines = lines or ("There's nothing here yet.",)
+ lines = watched_data["info"].values() or ("There's nothing here yet.",)
embed = Embed(
- title=f"{self.__class__.__name__} watched users ({'updated' if update_cache else 'cached'})",
+ title=watched_data["title"],
color=Color.blue()
)
await LinePaginator.paginate(lines, ctx, embed, empty=False)
- @staticmethod
- def _get_time_delta(time_string: str) -> str:
- """Returns the time in human-readable time delta format."""
- date_time = dateutil.parser.isoparse(time_string).replace(tzinfo=None)
- time_delta = time_since(date_time, precision="minutes", max_units=1)
+ async def prepare_watched_users_data(
+ self, ctx: Context, oldest_first: bool = False, update_cache: bool = True
+ ) -> Dict[str, Any]:
+ """
+ Prepare overview information of watched users to list.
+
+ The optional kwarg `oldest_first` orders the list by oldest entry.
+
+ The optional kwarg `update_cache` specifies whether the cache should
+ be refreshed by polling the API.
+
+ Returns a dictionary with a "title" key for the list's title, and a "info" key with
+ information about each user.
+
+ The dictionary additionally has an "updated" field which is true if a cache update was
+ requested and it succeeded.
+ """
+ list_data = {}
+ if update_cache:
+ if not await self.fetch_user_cache():
+ update_cache = False
+ list_data["updated"] = update_cache
+
+ watched_iter = self.watched_users.items()
+ if oldest_first:
+ watched_iter = reversed(watched_iter)
+
+ list_data["info"] = {}
+ for user_id, user_data in watched_iter:
+ member = ctx.guild.get_member(user_id)
+ line = f"β€’ `{user_id}`"
+ if member:
+ line += f" ({member.name}#{member.discriminator})"
+ inserted_at = user_data['inserted_at']
+ line += f", added {get_time_delta(inserted_at)}"
+ if not member: # Cross off users who left the server.
+ line = f"~~{line}~~"
+ list_data["info"][user_id] = line
+
+ list_data["title"] = f"{self.__class__.__name__} watched users ({'updated' if update_cache else 'cached'})"
- return time_delta
+ return list_data
def _remove_user(self, user_id: int) -> None:
"""Removes a user from a watch channel."""
diff --git a/bot/exts/recruitment/__init__.py b/bot/exts/recruitment/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/bot/exts/recruitment/__init__.py
diff --git a/bot/exts/recruitment/talentpool/__init__.py b/bot/exts/recruitment/talentpool/__init__.py
new file mode 100644
index 000000000..52d27eb99
--- /dev/null
+++ b/bot/exts/recruitment/talentpool/__init__.py
@@ -0,0 +1,8 @@
+from bot.bot import Bot
+
+
+def setup(bot: Bot) -> None:
+ """Load the TalentPool cog."""
+ from bot.exts.recruitment.talentpool._cog import TalentPool
+
+ bot.add_cog(TalentPool(bot))
diff --git a/bot/exts/moderation/watchchannels/talentpool.py b/bot/exts/recruitment/talentpool/_cog.py
index dd3349c3a..b809cea17 100644
--- a/bot/exts/moderation/watchchannels/talentpool.py
+++ b/bot/exts/recruitment/talentpool/_cog.py
@@ -11,9 +11,12 @@ from bot.bot import Bot
from bot.constants import Channels, Guild, MODERATION_ROLES, STAFF_ROLES, Webhooks
from bot.converters import FetchedMember
from bot.exts.moderation.watchchannels._watchchannel import WatchChannel
+from bot.exts.recruitment.talentpool._review import Reviewer
from bot.pagination import LinePaginator
from bot.utils import time
+REASON_MAX_CHARS = 1000
+
log = logging.getLogger(__name__)
@@ -28,8 +31,12 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
api_endpoint='bot/nominations',
api_default_params={'active': 'true', 'ordering': '-inserted_at'},
logger=log,
+ disable_header=True,
)
+ self.reviewer = Reviewer(self.__class__.__name__, bot, self)
+ self.bot.loop.create_task(self.reviewer.reschedule_reviews())
+
@group(name='talentpool', aliases=('tp', 'talent', 'nomination', 'n'), invoke_without_command=True)
@has_any_role(*MODERATION_ROLES)
async def nomination_group(self, ctx: Context) -> None:
@@ -39,7 +46,10 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
@nomination_group.command(name='watched', aliases=('all', 'list'), root_aliases=("nominees",))
@has_any_role(*MODERATION_ROLES)
async def watched_command(
- self, ctx: Context, oldest_first: bool = False, update_cache: bool = True
+ self,
+ ctx: Context,
+ oldest_first: bool = False,
+ update_cache: bool = True
) -> None:
"""
Shows the users that are currently being monitored in the talent pool.
@@ -51,6 +61,47 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
"""
await self.list_watched_users(ctx, oldest_first=oldest_first, update_cache=update_cache)
+ async def list_watched_users(
+ self,
+ ctx: Context,
+ oldest_first: bool = False,
+ update_cache: bool = True
+ ) -> None:
+ """
+ Gives an overview of the nominated users list.
+
+ It specifies the users' mention, name, how long ago they were nominated, and whether their
+ review was scheduled or already posted.
+
+ The optional kwarg `oldest_first` orders the list by oldest entry.
+
+ The optional kwarg `update_cache` specifies whether the cache should
+ be refreshed by polling the API.
+ """
+ # TODO Once the watch channel is removed, this can be done in a smarter way, without splitting and overriding
+ # the list_watched_users function.
+ watched_data = await self.prepare_watched_users_data(ctx, oldest_first, update_cache)
+
+ if update_cache and not watched_data["updated"]:
+ await ctx.send(f":x: Failed to update {self.__class__.__name__} user cache, serving from cache")
+
+ lines = []
+ for user_id, line in watched_data["info"].items():
+ if self.watched_users[user_id]['reviewed']:
+ line += " *(reviewed)*"
+ elif user_id in self.reviewer:
+ line += " *(scheduled)*"
+ lines.append(line)
+
+ if not lines:
+ lines = ("There's nothing here yet.",)
+
+ embed = Embed(
+ title=watched_data["title"],
+ color=Color.blue()
+ )
+ await LinePaginator.paginate(lines, ctx, embed, empty=False)
+
@nomination_group.command(name='oldest')
@has_any_role(*MODERATION_ROLES)
async def oldest_command(self, ctx: Context, update_cache: bool = True) -> None:
@@ -83,8 +134,8 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
await ctx.send(f":x: Failed to update the user cache; can't add {user}")
return
- if user.id in self.watched_users:
- await ctx.send(f":x: {user} is already being watched in the talent pool")
+ if len(reason) > REASON_MAX_CHARS:
+ await ctx.send(f":x: Maxiumum allowed characters for the reason is {REASON_MAX_CHARS}.")
return
# Manual request with `raise_for_status` as False because we want the actual response
@@ -101,14 +152,20 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
async with session.post(url, **kwargs) as resp:
response_data = await resp.json()
- if resp.status == 400 and response_data.get('user', False):
- await ctx.send(":x: The specified user can't be found in the database tables")
+ if resp.status == 400:
+ if response_data.get('user', False):
+ await ctx.send(":x: The specified user can't be found in the database tables")
+ elif response_data.get('actor', False):
+ await ctx.send(":x: You have already nominated this user")
+
return
else:
resp.raise_for_status()
self.watched_users[user.id] = response_data
- msg = f":white_check_mark: Messages sent by {user} will now be relayed to the talent pool channel"
+
+ if user.id not in self.reviewer:
+ self.reviewer.schedule_review(user.id)
history = await self.bot.api_client.get(
self.api_endpoint,
@@ -119,10 +176,9 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
}
)
+ msg = f"βœ… The nomination for {user} has been added to the talent pool"
if history:
- total = f"({len(history)} previous nominations in total)"
- start_reason = f"Watched: {textwrap.shorten(history[0]['reason'], width=500, placeholder='...')}"
- msg += f"\n\nUser's previous watch reasons {total}:```{start_reason}```"
+ msg += f"\n\n({len(history)} previous nominations in total)"
await ctx.send(msg)
@@ -163,6 +219,10 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
Providing a `reason` is required.
"""
+ if len(reason) > REASON_MAX_CHARS:
+ await ctx.send(f":x: Maxiumum allowed characters for the end reason is {REASON_MAX_CHARS}.")
+ return
+
if await self.unwatch(user.id, reason):
await ctx.send(f":white_check_mark: Messages sent by {user} will no longer be relayed")
else:
@@ -176,33 +236,87 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
@nomination_edit_group.command(name='reason')
@has_any_role(*MODERATION_ROLES)
- async def edit_reason_command(self, ctx: Context, nomination_id: int, *, reason: str) -> None:
- """
- Edits the reason/unnominate reason for the nomination with the given `id` depending on the status.
+ async def edit_reason_command(self, ctx: Context, nomination_id: int, actor: FetchedMember, *, reason: str) -> None:
+ """Edits the reason of a specific nominator in a specific active nomination."""
+ if len(reason) > REASON_MAX_CHARS:
+ await ctx.send(f":x: Maxiumum allowed characters for the reason is {REASON_MAX_CHARS}.")
+ return
+
+ try:
+ nomination = await self.bot.api_client.get(f"{self.api_endpoint}/{nomination_id}")
+ except ResponseCodeError as e:
+ if e.response.status == 404:
+ self.log.trace(f"Nomination API 404: Can't find a nomination with id {nomination_id}")
+ await ctx.send(f":x: Can't find a nomination with id `{nomination_id}`")
+ return
+ else:
+ raise
+
+ if not nomination["active"]:
+ await ctx.send(":x: Can't edit the reason of an inactive nomination.")
+ return
+
+ if not any(entry["actor"] == actor.id for entry in nomination["entries"]):
+ await ctx.send(f":x: {actor} doesn't have an entry in this nomination.")
+ return
+
+ self.log.trace(f"Changing reason for nomination with id {nomination_id} of actor {actor} to {repr(reason)}")
+
+ await self.bot.api_client.patch(
+ f"{self.api_endpoint}/{nomination_id}",
+ json={"actor": actor.id, "reason": reason}
+ )
+ await self.fetch_user_cache() # Update cache
+ await ctx.send(":white_check_mark: Successfully updated nomination reason.")
+
+ @nomination_edit_group.command(name='end_reason')
+ @has_any_role(*MODERATION_ROLES)
+ async def edit_end_reason_command(self, ctx: Context, nomination_id: int, *, reason: str) -> None:
+ """Edits the unnominate reason for the nomination with the given `id`."""
+ if len(reason) > REASON_MAX_CHARS:
+ await ctx.send(f":x: Maxiumum allowed characters for the end reason is {REASON_MAX_CHARS}.")
+ return
- If the nomination is active, the reason for nominating the user will be edited;
- If the nomination is no longer active, the reason for ending the nomination will be edited instead.
- """
try:
nomination = await self.bot.api_client.get(f"{self.api_endpoint}/{nomination_id}")
except ResponseCodeError as e:
if e.response.status == 404:
- self.log.trace(f"Nomination API 404: Can't nomination with id {nomination_id}")
+ self.log.trace(f"Nomination API 404: Can't find a nomination with id {nomination_id}")
await ctx.send(f":x: Can't find a nomination with id `{nomination_id}`")
return
else:
raise
- field = "reason" if nomination["active"] else "end_reason"
+ if nomination["active"]:
+ await ctx.send(":x: Can't edit the end reason of an active nomination.")
+ return
- self.log.trace(f"Changing {field} for nomination with id {nomination_id} to {reason}")
+ self.log.trace(f"Changing end reason for nomination with id {nomination_id} to {repr(reason)}")
await self.bot.api_client.patch(
f"{self.api_endpoint}/{nomination_id}",
- json={field: reason}
+ json={"end_reason": reason}
)
await self.fetch_user_cache() # Update cache.
- await ctx.send(f":white_check_mark: Updated the {field} of the nomination!")
+ await ctx.send(":white_check_mark: Updated the end reason of the nomination!")
+
+ @nomination_group.command(aliases=('mr',))
+ @has_any_role(*MODERATION_ROLES)
+ async def mark_reviewed(self, ctx: Context, user_id: int) -> None:
+ """Mark a user's nomination as reviewed and cancel the review task."""
+ if not await self.reviewer.mark_reviewed(ctx, user_id):
+ return
+ await ctx.send(f"βœ… The user with ID `{user_id}` was marked as reviewed.")
+
+ @nomination_group.command(aliases=('review',))
+ @has_any_role(*MODERATION_ROLES)
+ async def post_review(self, ctx: Context, user_id: int) -> None:
+ """Post the automatic review for the user ahead of time."""
+ if not await self.reviewer.mark_reviewed(ctx, user_id):
+ return
+
+ await self.reviewer.post_review(user_id, update_database=False)
+ await ctx.message.add_reaction("βœ…")
@Cog.listener()
async def on_member_ban(self, guild: Guild, user: Union[User, Member]) -> None:
@@ -232,19 +346,28 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
)
self._remove_user(user_id)
+ self.reviewer.cancel(user_id)
+
return True
def _nomination_to_string(self, nomination_object: dict) -> str:
"""Creates a string representation of a nomination."""
guild = self.bot.get_guild(Guild.id)
+ entries = []
+ for site_entry in nomination_object["entries"]:
+ actor_id = site_entry["actor"]
+ actor = guild.get_member(actor_id)
+
+ reason = site_entry["reason"] or "*None*"
+ created = time.format_infraction(site_entry["inserted_at"])
+ entries.append(
+ f"Actor: {actor.mention if actor else actor_id}\nCreated: {created}\nReason: {reason}"
+ )
- actor_id = nomination_object["actor"]
- actor = guild.get_member(actor_id)
+ entries_string = "\n\n".join(entries)
active = nomination_object["active"]
- reason = nomination_object["reason"] or "*None*"
-
start_date = time.format_infraction(nomination_object["inserted_at"])
if active:
lines = textwrap.dedent(
@@ -252,9 +375,9 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
===============
Status: **Active**
Date: {start_date}
- Actor: {actor.mention if actor else actor_id}
- Reason: {reason}
Nomination ID: `{nomination_object["id"]}`
+
+ {entries_string}
===============
"""
)
@@ -265,19 +388,19 @@ class TalentPool(WatchChannel, Cog, name="Talentpool"):
===============
Status: Inactive
Date: {start_date}
- Actor: {actor.mention if actor else actor_id}
- Reason: {reason}
+ Nomination ID: `{nomination_object["id"]}`
+
+ {entries_string}
End date: {end_date}
Unwatch reason: {nomination_object["end_reason"]}
- Nomination ID: `{nomination_object["id"]}`
===============
"""
)
return lines.strip()
-
-def setup(bot: Bot) -> None:
- """Load the TalentPool cog."""
- bot.add_cog(TalentPool(bot))
+ def cog_unload(self) -> None:
+ """Cancels all review tasks on cog unload."""
+ super().cog_unload()
+ self.reviewer.cancel_all()
diff --git a/bot/exts/recruitment/talentpool/_review.py b/bot/exts/recruitment/talentpool/_review.py
new file mode 100644
index 000000000..fb3461238
--- /dev/null
+++ b/bot/exts/recruitment/talentpool/_review.py
@@ -0,0 +1,324 @@
+import asyncio
+import logging
+import random
+import textwrap
+import typing
+from collections import Counter
+from datetime import datetime, timedelta
+from typing import List, Optional, Union
+
+from dateutil.parser import isoparse
+from dateutil.relativedelta import relativedelta
+from discord import Emoji, Member, Message, TextChannel
+from discord.ext.commands import Context
+
+from bot.api import ResponseCodeError
+from bot.bot import Bot
+from bot.constants import Channels, Guild, Roles
+from bot.utils.scheduling import Scheduler
+from bot.utils.time import get_time_delta, humanize_delta, time_since
+
+if typing.TYPE_CHECKING:
+ from bot.exts.recruitment.talentpool._cog import TalentPool
+
+log = logging.getLogger(__name__)
+
+# Maximum amount of days before an automatic review is posted.
+MAX_DAYS_IN_POOL = 30
+
+# Maximum amount of characters allowed in a message
+MAX_MESSAGE_SIZE = 2000
+
+
+class Reviewer:
+ """Schedules, formats, and publishes reviews of helper nominees."""
+
+ def __init__(self, name: str, bot: Bot, pool: 'TalentPool'):
+ self.bot = bot
+ self._pool = pool
+ self._review_scheduler = Scheduler(name)
+
+ def __contains__(self, user_id: int) -> bool:
+ """Return True if the user with ID user_id is scheduled for review, False otherwise."""
+ return user_id in self._review_scheduler
+
+ async def reschedule_reviews(self) -> None:
+ """Reschedule all active nominations to be reviewed at the appropriate time."""
+ log.trace("Rescheduling reviews")
+ await self.bot.wait_until_guild_available()
+ # TODO Once the watch channel is removed, this can be done in a smarter way, e.g create a sync function.
+ await self._pool.fetch_user_cache()
+
+ for user_id, user_data in self._pool.watched_users.items():
+ if not user_data["reviewed"]:
+ self.schedule_review(user_id)
+
+ def schedule_review(self, user_id: int) -> None:
+ """Schedules a single user for review."""
+ log.trace(f"Scheduling review of user with ID {user_id}")
+
+ user_data = self._pool.watched_users[user_id]
+ inserted_at = isoparse(user_data['inserted_at']).replace(tzinfo=None)
+ review_at = inserted_at + timedelta(days=MAX_DAYS_IN_POOL)
+
+ # If it's over a day overdue, it's probably an old nomination and shouldn't be automatically reviewed.
+ if datetime.utcnow() - review_at < timedelta(days=1):
+ self._review_scheduler.schedule_at(review_at, user_id, self.post_review(user_id, update_database=True))
+
+ async def post_review(self, user_id: int, update_database: bool) -> None:
+ """Format a generic review of a user and post it to the nomination voting channel."""
+ log.trace(f"Posting the review of {user_id}")
+
+ nomination = self._pool.watched_users[user_id]
+ if not nomination:
+ log.trace(f"There doesn't appear to be an active nomination for {user_id}")
+ return
+
+ guild = self.bot.get_guild(Guild.id)
+ channel = guild.get_channel(Channels.nomination_voting)
+ member = guild.get_member(user_id)
+
+ if update_database:
+ await self.bot.api_client.patch(f"{self._pool.api_endpoint}/{nomination['id']}", json={"reviewed": True})
+
+ if not member:
+ await channel.send(
+ f"I tried to review the user with ID `{user_id}`, but they don't appear to be on the server πŸ˜”"
+ )
+ return
+
+ opening = f"<@&{Roles.moderators}> <@&{Roles.admins}>\n{member.mention} ({member}) for Helper!"
+
+ current_nominations = "\n\n".join(
+ f"**<@{entry['actor']}>:** {entry['reason'] or '*no reason given*'}" for entry in nomination['entries']
+ )
+ current_nominations = f"**Nominated by:**\n{current_nominations}"
+
+ review_body = await self._construct_review_body(member)
+
+ seen_emoji = self._random_ducky(guild)
+ vote_request = (
+ "*Refer to their nomination and infraction histories for further details*.\n"
+ f"*Please react {seen_emoji} if you've seen this post."
+ " Then react πŸ‘ for approval, or πŸ‘Ž for disapproval*."
+ )
+
+ review = "\n\n".join(part for part in (opening, current_nominations, review_body, vote_request))
+
+ message = (await self._bulk_send(channel, review))[-1]
+ for reaction in (seen_emoji, "πŸ‘", "πŸ‘Ž"):
+ await message.add_reaction(reaction)
+
+ async def _construct_review_body(self, member: Member) -> str:
+ """Formats the body of the nomination, with details of activity, infractions, and previous nominations."""
+ activity = await self._activity_review(member)
+ infractions = await self._infractions_review(member)
+ prev_nominations = await self._previous_nominations_review(member)
+
+ body = f"{activity}\n\n{infractions}"
+ if prev_nominations:
+ body += f"\n\n{prev_nominations}"
+ return body
+
+ async def _activity_review(self, member: Member) -> str:
+ """
+ Format the activity of the nominee.
+
+ Adds details on how long they've been on the server, their total message count,
+ and the channels they're the most active in.
+ """
+ log.trace(f"Fetching the metricity data for {member.id}'s review")
+ try:
+ user_activity = await self.bot.api_client.get(f"bot/users/{member.id}/metricity_review_data")
+ except ResponseCodeError as e:
+ if e.status == 404:
+ log.trace(f"The user {member.id} seems to have no activity logged in Metricity.")
+ messages = "no"
+ channels = ""
+ else:
+ log.trace(f"An unexpected error occured while fetching information of user {member.id}.")
+ raise
+ else:
+ log.trace(f"Activity found for {member.id}, formatting review.")
+ messages = user_activity["total_messages"]
+ # Making this part flexible to the amount of expected and returned channels.
+ first_channel = user_activity["top_channel_activity"][0]
+ channels = f", with {first_channel[1]} messages in {first_channel[0]}"
+
+ if len(user_activity["top_channel_activity"]) > 1:
+ channels += ", " + ", ".join(
+ f"{count} in {channel}" for channel, count in user_activity["top_channel_activity"][1: -1]
+ )
+ last_channel = user_activity["top_channel_activity"][-1]
+ channels += f", and {last_channel[1]} in {last_channel[0]}"
+
+ time_on_server = humanize_delta(relativedelta(datetime.utcnow(), member.joined_at), max_units=2)
+ review = (
+ f"{member.name} has been on the server for **{time_on_server}**"
+ f" and has **{messages} messages**{channels}."
+ )
+
+ return review
+
+ async def _infractions_review(self, member: Member) -> str:
+ """
+ Formats the review of the nominee's infractions, if any.
+
+ The infractions are listed by type and amount, and it is stated how long ago the last one was issued.
+ """
+ log.trace(f"Fetching the infraction data for {member.id}'s review")
+ infraction_list = await self.bot.api_client.get(
+ 'bot/infractions/expanded',
+ params={'user__id': str(member.id), 'ordering': '-inserted_at'}
+ )
+
+ log.trace(f"{len(infraction_list)} infractions found for {member.id}, formatting review.")
+ if not infraction_list:
+ return "They have no infractions."
+
+ # Count the amount of each type of infraction.
+ infr_stats = list(Counter(infr["type"] for infr in infraction_list).items())
+
+ # Format into a sentence.
+ if len(infr_stats) == 1:
+ infr_type, count = infr_stats[0]
+ infractions = f"{count} {self._format_infr_name(infr_type, count)}"
+ else: # We already made sure they have infractions.
+ infractions = ", ".join(
+ f"{count} {self._format_infr_name(infr_type, count)}"
+ for infr_type, count in infr_stats[:-1]
+ )
+ last_infr, last_count = infr_stats[-1]
+ infractions += f", and {last_count} {self._format_infr_name(last_infr, last_count)}"
+
+ infractions = f"**{infractions}**"
+
+ # Show when the last one was issued.
+ if len(infraction_list) == 1:
+ infractions += ", issued "
+ else:
+ infractions += ", with the last infraction issued "
+
+ # Infractions were ordered by time since insertion descending.
+ infractions += get_time_delta(infraction_list[0]['inserted_at'])
+
+ return f"They have {infractions}."
+
+ @staticmethod
+ def _format_infr_name(infr_type: str, count: int) -> str:
+ """
+ Format the infraction type in a way readable in a sentence.
+
+ Underscores are replaced with spaces, as well as *attempting* to show the appropriate plural form if necessary.
+ This function by no means covers all rules of grammar.
+ """
+ formatted = infr_type.replace("_", " ")
+ if count > 1:
+ if infr_type.endswith(('ch', 'sh')):
+ formatted += "e"
+ formatted += "s"
+
+ return formatted
+
+ async def _previous_nominations_review(self, member: Member) -> Optional[str]:
+ """
+ Formats the review of the nominee's previous nominations.
+
+ The number of previous nominations and unnominations are shown, as well as the reason the last one ended.
+ """
+ log.trace(f"Fetching the nomination history data for {member.id}'s review")
+ history = await self.bot.api_client.get(
+ self._pool.api_endpoint,
+ params={
+ "user__id": str(member.id),
+ "active": "false",
+ "ordering": "-inserted_at"
+ }
+ )
+
+ log.trace(f"{len(history)} previous nominations found for {member.id}, formatting review.")
+ if not history:
+ return
+
+ num_entries = sum(len(nomination["entries"]) for nomination in history)
+
+ nomination_times = f"{num_entries} times" if num_entries > 1 else "once"
+ rejection_times = f"{len(history)} times" if len(history) > 1 else "once"
+ end_time = time_since(isoparse(history[0]['ended_at']).replace(tzinfo=None), max_units=2)
+
+ review = (
+ f"They were nominated **{nomination_times}** before"
+ f", but their nomination was called off **{rejection_times}**."
+ f"\nThe last one ended {end_time} with the reason: {history[0]['end_reason']}"
+ )
+
+ return review
+
+ @staticmethod
+ def _random_ducky(guild: Guild) -> Union[Emoji, str]:
+ """Picks a random ducky emoji to be used to mark the vote as seen. If no duckies found returns πŸ‘€."""
+ duckies = [emoji for emoji in guild.emojis if emoji.name.startswith("ducky")]
+ if not duckies:
+ return "πŸ‘€"
+ return random.choice(duckies)
+
+ @staticmethod
+ async def _bulk_send(channel: TextChannel, text: str) -> List[Message]:
+ """
+ Split a text into several if necessary, and post them to the channel.
+
+ Returns the resulting message objects.
+ """
+ messages = textwrap.wrap(text, width=MAX_MESSAGE_SIZE, replace_whitespace=False)
+ log.trace(f"The provided string will be sent to the channel {channel.id} as {len(messages)} messages.")
+
+ results = []
+ for message in messages:
+ await asyncio.sleep(1)
+ results.append(await channel.send(message))
+
+ return results
+
+ async def mark_reviewed(self, ctx: Context, user_id: int) -> bool:
+ """
+ Mark an active nomination as reviewed, updating the database and canceling the review task.
+
+ Returns True if the user was successfully marked as reviewed, False otherwise.
+ """
+ log.trace(f"Updating user {user_id} as reviewed")
+ await self._pool.fetch_user_cache()
+ if user_id not in self._pool.watched_users:
+ log.trace(f"Can't find a nominated user with id {user_id}")
+ await ctx.send(f"❌ Can't find a currently nominated user with id `{user_id}`")
+ return False
+
+ nomination = self._pool.watched_users[user_id]
+ if nomination["reviewed"]:
+ await ctx.send("❌ This nomination was already reviewed, but here's a cookie πŸͺ")
+ return False
+
+ await self.bot.api_client.patch(f"{self._pool.api_endpoint}/{nomination['id']}", json={"reviewed": True})
+ if user_id in self._review_scheduler:
+ self._review_scheduler.cancel(user_id)
+
+ return True
+
+ def cancel(self, user_id: int) -> None:
+ """
+ Cancels the review of the nominee with ID `user_id`.
+
+ It's important to note that this applies only until reschedule_reviews is called again.
+ To permanently cancel someone's review, either remove them from the pool, or use mark_reviewed.
+ """
+ log.trace(f"Canceling the review of user {user_id}.")
+ self._review_scheduler.cancel(user_id)
+
+ def cancel_all(self) -> None:
+ """
+ Cancels all reviews.
+
+ It's important to note that this applies only until reschedule_reviews is called again.
+ To permanently cancel someone's review, either remove them from the pool, or use mark_reviewed.
+ """
+ log.trace("Canceling all reviews.")
+ self._review_scheduler.cancel_all()
diff --git a/bot/exts/utils/utils.py b/bot/exts/utils/utils.py
index eb92dfca7..a5d6f69b9 100644
--- a/bot/exts/utils/utils.py
+++ b/bot/exts/utils/utils.py
@@ -14,6 +14,7 @@ from bot.converters import Snowflake
from bot.decorators import in_whitelist
from bot.pagination import LinePaginator
from bot.utils import messages
+from bot.utils.checks import has_no_roles_check
from bot.utils.time import time_since
log = logging.getLogger(__name__)
@@ -156,18 +157,22 @@ class Utils(Cog):
@command(aliases=("snf", "snfl", "sf"))
@in_whitelist(channels=(Channels.bot_commands,), roles=STAFF_ROLES)
- async def snowflake(self, ctx: Context, snowflake: Snowflake) -> None:
+ async def snowflake(self, ctx: Context, *snowflakes: Snowflake) -> None:
"""Get Discord snowflake creation time."""
- created_at = snowflake_time(snowflake)
- embed = Embed(
- description=f"**Created at {created_at}** ({time_since(created_at, max_units=3)}).",
- colour=Colour.blue()
- )
- embed.set_author(
- name=f"Snowflake: {snowflake}",
- icon_url="https://github.com/twitter/twemoji/blob/master/assets/72x72/2744.png?raw=true"
- )
- await ctx.send(embed=embed)
+ if len(snowflakes) > 1 and await has_no_roles_check(ctx, *STAFF_ROLES):
+ raise BadArgument("Cannot process more than one snowflake in one invocation.")
+
+ for snowflake in snowflakes:
+ created_at = snowflake_time(snowflake)
+ embed = Embed(
+ description=f"**Created at {created_at}** ({time_since(created_at, max_units=3)}).",
+ colour=Colour.blue()
+ )
+ embed.set_author(
+ name=f"Snowflake: {snowflake}",
+ icon_url="https://github.com/twitter/twemoji/blob/master/assets/72x72/2744.png?raw=true"
+ )
+ await ctx.send(embed=embed)
@command(aliases=("poll",))
@has_any_role(*MODERATION_ROLES)
diff --git a/bot/log.py b/bot/log.py
index bc3bba0af..e92233a33 100644
--- a/bot/log.py
+++ b/bot/log.py
@@ -1,12 +1,11 @@
import logging
import os
import sys
-from logging import Logger, StreamHandler, handlers
+from logging import Logger, handlers
from pathlib import Path
import coloredlogs
import sentry_sdk
-from pythonjsonlogger import jsonlogger
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.redis import RedisIntegration
@@ -14,15 +13,6 @@ from bot import constants
TRACE_LEVEL = 5
-PROD_FIELDS = [
- "asctime",
- "name",
- "levelname",
- "message",
- "funcName",
- "filename"
-]
-
def setup() -> None:
"""Set up loggers."""
@@ -43,28 +33,21 @@ def setup() -> None:
root_log.setLevel(log_level)
root_log.addHandler(file_handler)
- if constants.DEBUG_MODE:
- if "COLOREDLOGS_LEVEL_STYLES" not in os.environ:
- coloredlogs.DEFAULT_LEVEL_STYLES = {
- **coloredlogs.DEFAULT_LEVEL_STYLES,
- "trace": {"color": 246},
- "critical": {"background": "red"},
- "debug": coloredlogs.DEFAULT_LEVEL_STYLES["info"]
- }
-
- if "COLOREDLOGS_LOG_FORMAT" not in os.environ:
- coloredlogs.DEFAULT_LOG_FORMAT = format_string
-
- if "COLOREDLOGS_LOG_LEVEL" not in os.environ:
- coloredlogs.DEFAULT_LOG_LEVEL = log_level
-
- coloredlogs.install(logger=root_log, stream=sys.stdout)
- else:
- json_format = " ".join([f"%({field})s" for field in PROD_FIELDS])
- stream_handler = StreamHandler()
- formatter = jsonlogger.JsonFormatter(json_format)
- stream_handler.setFormatter(formatter)
- root_log.addHandler(stream_handler)
+ if "COLOREDLOGS_LEVEL_STYLES" not in os.environ:
+ coloredlogs.DEFAULT_LEVEL_STYLES = {
+ **coloredlogs.DEFAULT_LEVEL_STYLES,
+ "trace": {"color": 246},
+ "critical": {"background": "red"},
+ "debug": coloredlogs.DEFAULT_LEVEL_STYLES["info"]
+ }
+
+ if "COLOREDLOGS_LOG_FORMAT" not in os.environ:
+ coloredlogs.DEFAULT_LOG_FORMAT = format_string
+
+ if "COLOREDLOGS_LOG_LEVEL" not in os.environ:
+ coloredlogs.DEFAULT_LOG_LEVEL = log_level
+
+ coloredlogs.install(logger=root_log, stream=sys.stdout)
logging.getLogger("discord").setLevel(logging.WARNING)
logging.getLogger("websockets").setLevel(logging.WARNING)
diff --git a/bot/resources/elements.json b/bot/resources/elements.json
deleted file mode 100644
index a3ac5b99f..000000000
--- a/bot/resources/elements.json
+++ /dev/null
@@ -1,119 +0,0 @@
-[
- "hydrogen",
- "helium",
- "lithium",
- "beryllium",
- "boron",
- "carbon",
- "nitrogen",
- "oxygen",
- "fluorine",
- "neon",
- "sodium",
- "magnesium",
- "aluminium",
- "silicon",
- "phosphorus",
- "sulfur",
- "chlorine",
- "argon",
- "potassium",
- "calcium",
- "scandium",
- "titanium",
- "vanadium",
- "chromium",
- "manganese",
- "iron",
- "cobalt",
- "nickel",
- "copper",
- "zinc",
- "gallium",
- "germanium",
- "arsenic",
- "bromine",
- "krypton",
- "rubidium",
- "strontium",
- "yttrium",
- "zirconium",
- "niobium",
- "molybdenum",
- "technetium",
- "ruthenium",
- "rhodium",
- "palladium",
- "silver",
- "cadmium",
- "indium",
- "tin",
- "antimony",
- "tellurium",
- "iodine",
- "xenon",
- "caesium",
- "barium",
- "lanthanum",
- "cerium",
- "praseodymium",
- "neodymium",
- "promethium",
- "samarium",
- "europium",
- "gadolinium",
- "terbium",
- "dysprosium",
- "holmium",
- "erbium",
- "thulium",
- "ytterbium",
- "lutetium",
- "hafnium",
- "tantalum",
- "tungsten",
- "rhenium",
- "osmium",
- "iridium",
- "platinum",
- "gold",
- "mercury",
- "thallium",
- "lead",
- "bismuth",
- "polonium",
- "astatine",
- "radon",
- "francium",
- "radium",
- "actinium",
- "thorium",
- "protactinium",
- "uranium",
- "neptunium",
- "plutonium",
- "americium",
- "curium",
- "berkelium",
- "californium",
- "einsteinium",
- "fermium",
- "mendelevium",
- "nobelium",
- "lawrencium",
- "rutherfordium",
- "dubnium",
- "seaborgium",
- "bohrium",
- "hassium",
- "meitnerium",
- "darmstadtium",
- "roentgenium",
- "copernicium",
- "nihonium",
- "flerovium",
- "moscovium",
- "livermorium",
- "tennessine",
- "oganesson"
-]
diff --git a/bot/resources/foods.json b/bot/resources/foods.json
new file mode 100644
index 000000000..61d9ea98f
--- /dev/null
+++ b/bot/resources/foods.json
@@ -0,0 +1,52 @@
+[
+ "apple",
+ "avocado",
+ "bagel",
+ "banana",
+ "bread",
+ "broccoli",
+ "burrito",
+ "cake",
+ "candy",
+ "carrot",
+ "cheese",
+ "cherries",
+ "chestnut",
+ "chili",
+ "chocolate",
+ "coconut",
+ "coffee",
+ "cookie",
+ "corn",
+ "croissant",
+ "cupcake",
+ "donut",
+ "dumpling",
+ "falafel",
+ "grapes",
+ "honey",
+ "kiwi",
+ "lemon",
+ "lollipop",
+ "mango",
+ "mushroom",
+ "orange",
+ "pancakes",
+ "peanut",
+ "pear",
+ "pie",
+ "pineapple",
+ "popcorn",
+ "potato",
+ "pretzel",
+ "ramen",
+ "rice",
+ "salad",
+ "spaghetti",
+ "stew",
+ "strawberry",
+ "sushi",
+ "taco",
+ "tomato",
+ "watermelon"
+]
diff --git a/bot/resources/stars.json b/bot/resources/stars.json
index c0b253120..5ecad0213 100644
--- a/bot/resources/stars.json
+++ b/bot/resources/stars.json
@@ -17,7 +17,7 @@
"Bruce Springsteen",
"Bruno Mars",
"Bryan Adams",
- "Celine Dion",
+ "CΓ©line Dion",
"Cher",
"Christina Aguilera",
"David Bowie",
diff --git a/bot/resources/tags/customhelp.md b/bot/resources/tags/customhelp.md
new file mode 100644
index 000000000..6f0b17642
--- /dev/null
+++ b/bot/resources/tags/customhelp.md
@@ -0,0 +1,3 @@
+**Custom help commands in discord.py**
+
+To learn more about how to create custom help commands in discord.py by subclassing the help command, please see [this tutorial](https://gist.github.com/InterStella0/b78488fb28cadf279dfd3164b9f0cf96#embed-minimalhelpcommand) by Stella#2000
diff --git a/bot/resources/tags/intents.md b/bot/resources/tags/intents.md
new file mode 100644
index 000000000..464caf0ba
--- /dev/null
+++ b/bot/resources/tags/intents.md
@@ -0,0 +1,19 @@
+**Using intents in discord.py**
+
+Intents are a feature of Discord that tells the gateway exactly which events to send your bot. By default, discord.py has all intents enabled, except for the `Members` and `Presences` intents, which are needed for events such as `on_member` and to get members' statuses.
+
+To enable one of these intents, you need to first go to the [Discord developer portal](https://discord.com/developers/applications), then to the bot page of your bot's application. Scroll down to the `Privileged Gateway Intents` section, then enable the intents that you need.
+
+Next, in your bot you need to set the intents you want to connect with in the bot's constructor using the `intents` keyword argument, like this:
+
+```py
+from discord import Intents
+from discord.ext import commands
+
+intents = Intents.default()
+intents.members = True
+
+bot = commands.Bot(command_prefix="!", intents=intents)
+```
+
+For more info about using intents, see the [discord.py docs on intents](https://discordpy.readthedocs.io/en/latest/intents.html), and for general information about them, see the [Discord developer documentation on intents](https://discord.com/developers/docs/topics/gateway#gateway-intents).
diff --git a/bot/utils/time.py b/bot/utils/time.py
index f862e40f7..466f0adc2 100644
--- a/bot/utils/time.py
+++ b/bot/utils/time.py
@@ -85,6 +85,14 @@ def humanize_delta(delta: relativedelta, precision: str = "seconds", max_units:
return humanized
+def get_time_delta(time_string: str) -> str:
+ """Returns the time in human-readable time delta format."""
+ date_time = dateutil.parser.isoparse(time_string).replace(tzinfo=None)
+ time_delta = time_since(date_time, precision="minutes", max_units=1)
+
+ return time_delta
+
+
def parse_duration_string(duration: str) -> Optional[relativedelta]:
"""
Converts a `duration` string to a relativedelta object.
diff --git a/config-default.yml b/config-default.yml
index 18d9cd370..502f0f861 100644
--- a/config-default.yml
+++ b/config-default.yml
@@ -89,8 +89,8 @@ style:
filtering: "https://cdn.discordapp.com/emojis/472472638594482195.png"
- green_checkmark: "https://raw.githubusercontent.com/python-discord/branding/master/icons/checkmark/green-checkmark-dist.png"
- green_questionmark: "https://raw.githubusercontent.com/python-discord/branding/master/icons/checkmark/green-question-mark-dist.png"
+ green_checkmark: "https://raw.githubusercontent.com/python-discord/branding/main/icons/checkmark/green-checkmark-dist.png"
+ green_questionmark: "https://raw.githubusercontent.com/python-discord/branding/main/icons/checkmark/green-question-mark-dist.png"
guild_update: "https://cdn.discordapp.com/emojis/469954765141442561.png"
hash_blurple: "https://cdn.discordapp.com/emojis/469950142942806017.png"
@@ -195,9 +195,11 @@ guild:
incidents_archive: 720668923636351037
mods: &MODS 305126844661760000
mod_alerts: 473092532147060736
+ mod_appeals: &MOD_APPEALS 808790025688711198
mod_meta: &MOD_META 775412552795947058
mod_spam: &MOD_SPAM 620607373828030464
mod_tools: &MOD_TOOLS 775413915391098921
+ nomination_voting: 822853512709931008
organisation: &ORGANISATION 551789653284356126
staff_lounge: &STAFF_LOUNGE 464905259261755392
@@ -230,6 +232,7 @@ guild:
moderation_channels:
- *ADMINS
- *ADMIN_SPAM
+ - *MOD_APPEALS
- *MOD_META
- *MOD_TOOLS
- *MODS
@@ -358,8 +361,8 @@ urls:
discord_api: &DISCORD_API "https://discordapp.com/api/v7/"
discord_invite_api: !JOIN [*DISCORD_API, "invites"]
- # Misc URLs
- bot_avatar: "https://raw.githubusercontent.com/discord-python/branding/master/logos/logo_circle/logo_circle.png"
+ # Misc URLsw
+ bot_avatar: "https://raw.githubusercontent.com/python-discord/branding/main/logos/logo_circle/logo_circle.png"
github_bot_repo: "https://github.com/python-discord/bot"
diff --git a/docker-compose.yml b/docker-compose.yml
index 0002d1d56..8afdd6ef1 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -57,8 +57,7 @@ services:
- web
- redis
- snekbox
+ env_file:
+ - .env
environment:
- BOT_TOKEN: ${BOT_TOKEN}
BOT_API_KEY: badbot13m0n8f570f942013fc818f234916ca531
- REDDIT_CLIENT_ID: ${REDDIT_CLIENT_ID}
- REDDIT_SECRET: ${REDDIT_SECRET}
diff --git a/tests/bot/exts/moderation/infraction/test_infractions.py b/tests/bot/exts/moderation/infraction/test_infractions.py
index 86c2617ea..08f39cd50 100644
--- a/tests/bot/exts/moderation/infraction/test_infractions.py
+++ b/tests/bot/exts/moderation/infraction/test_infractions.py
@@ -39,7 +39,7 @@ class TruncationTests(unittest.IsolatedAsyncioTestCase):
delete_message_days=0
)
self.cog.apply_infraction.assert_awaited_once_with(
- self.ctx, {"foo": "bar"}, self.target, self.ctx.guild.ban.return_value
+ self.ctx, {"foo": "bar", "purge": ""}, self.target, self.ctx.guild.ban.return_value
)
@patch("bot.exts.moderation.infraction._utils.post_infraction")
diff --git a/tests/bot/exts/moderation/infraction/test_utils.py b/tests/bot/exts/moderation/infraction/test_utils.py
index 5b62463e0..ee9ff650c 100644
--- a/tests/bot/exts/moderation/infraction/test_utils.py
+++ b/tests/bot/exts/moderation/infraction/test_utils.py
@@ -146,7 +146,7 @@ class ModerationUtilsTests(unittest.IsolatedAsyncioTestCase):
name=utils.INFRACTION_AUTHOR_NAME,
url=utils.RULES_URL,
icon_url=Icons.token_removed
- ).set_footer(text=utils.INFRACTION_APPEAL_FOOTER),
+ ).set_footer(text=utils.INFRACTION_APPEAL_MODMAIL_FOOTER),
"send_result": True
},
{
@@ -164,9 +164,11 @@ class ModerationUtilsTests(unittest.IsolatedAsyncioTestCase):
name=utils.INFRACTION_AUTHOR_NAME,
url=utils.RULES_URL,
icon_url=Icons.token_removed
- ),
+ ).set_footer(text=utils.INFRACTION_APPEAL_MODMAIL_FOOTER),
"send_result": False
},
+ # Note that this test case asserts that the DM that *would* get sent to the user is formatted
+ # correctly, even though that message is deliberately never sent.
{
"args": (self.user, "note", None, None, Icons.defcon_denied),
"expected_output": Embed(
@@ -182,7 +184,7 @@ class ModerationUtilsTests(unittest.IsolatedAsyncioTestCase):
name=utils.INFRACTION_AUTHOR_NAME,
url=utils.RULES_URL,
icon_url=Icons.defcon_denied
- ),
+ ).set_footer(text=utils.INFRACTION_APPEAL_MODMAIL_FOOTER),
"send_result": False
},
{
@@ -200,7 +202,7 @@ class ModerationUtilsTests(unittest.IsolatedAsyncioTestCase):
name=utils.INFRACTION_AUTHOR_NAME,
url=utils.RULES_URL,
icon_url=Icons.defcon_denied
- ).set_footer(text=utils.INFRACTION_APPEAL_FOOTER),
+ ).set_footer(text=utils.INFRACTION_APPEAL_MODMAIL_FOOTER),
"send_result": False
},
{
@@ -218,7 +220,7 @@ class ModerationUtilsTests(unittest.IsolatedAsyncioTestCase):
name=utils.INFRACTION_AUTHOR_NAME,
url=utils.RULES_URL,
icon_url=Icons.defcon_denied
- ).set_footer(text=utils.INFRACTION_APPEAL_FOOTER),
+ ).set_footer(text=utils.INFRACTION_APPEAL_MODMAIL_FOOTER),
"send_result": True
}
]