aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bot/exts/evergreen/error_handler.py2
-rw-r--r--bot/exts/evergreen/githubinfo.py3
-rw-r--r--bot/exts/evergreen/movie.py10
-rw-r--r--bot/exts/evergreen/reddit.py3
-rw-r--r--bot/exts/evergreen/snakes/__init__.py2
-rw-r--r--bot/exts/evergreen/snakes/_snakes_cog.py175
6 files changed, 96 insertions, 99 deletions
diff --git a/bot/exts/evergreen/error_handler.py b/bot/exts/evergreen/error_handler.py
index 62529f52..faaf1386 100644
--- a/bot/exts/evergreen/error_handler.py
+++ b/bot/exts/evergreen/error_handler.py
@@ -39,7 +39,7 @@ class CommandErrorHandler(commands.Cog):
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error: commands.CommandError) -> None:
- """Activates when a command opens an error."""
+ """Activates when a command raises an error."""
if getattr(error, "handled", False):
logging.debug(f"Command {ctx.command} had its error already handled locally; ignoring.")
return
diff --git a/bot/exts/evergreen/githubinfo.py b/bot/exts/evergreen/githubinfo.py
index fe126aa2..27e607e5 100644
--- a/bot/exts/evergreen/githubinfo.py
+++ b/bot/exts/evergreen/githubinfo.py
@@ -5,7 +5,6 @@ from urllib.parse import quote
import discord
from discord.ext import commands
-from discord.ext.commands.cooldowns import BucketType
from bot.bot import Bot
from bot.constants import Colours, NEGATIVE_REPLIES
@@ -28,7 +27,7 @@ class GithubInfo(commands.Cog):
return await r.json()
@commands.group(name="github", aliases=("gh", "git"))
- @commands.cooldown(1, 10, BucketType.user)
+ @commands.cooldown(1, 10, commands.BucketType.user)
async def github_group(self, ctx: commands.Context) -> None:
"""Commands for finding information related to GitHub."""
if ctx.invoked_subcommand is None:
diff --git a/bot/exts/evergreen/movie.py b/bot/exts/evergreen/movie.py
index e67f8d04..fa284417 100644
--- a/bot/exts/evergreen/movie.py
+++ b/bot/exts/evergreen/movie.py
@@ -72,13 +72,13 @@ class Movie(Cog):
# Capitalize genre for getting data from Enum, get random page, send help when genre don't exist.
genre = genre.capitalize()
try:
- result = await self.get_movies_list(self.http_session, MovieGenres[genre].value, 1)
+ result = await self.get_movies_data(self.http_session, MovieGenres[genre].value, 1)
except KeyError:
await invoke_help_command(ctx)
return
# Check if "results" is in result. If not, throw error.
- if "results" not in result.keys():
+ if "results" not in result:
err_msg = f"There is problem while making TMDB API request. Response Code: {result['status_code']}, " \
f"{result['status_message']}."
await ctx.send(err_msg)
@@ -88,8 +88,8 @@ class Movie(Cog):
page = random.randint(1, result["total_pages"])
# Get movies list from TMDB, check if results key in result. When not, raise error.
- movies = await self.get_movies_list(self.http_session, MovieGenres[genre].value, page)
- if "results" not in movies.keys():
+ movies = await self.get_movies_data(self.http_session, MovieGenres[genre].value, page)
+ if "results" not in movies:
err_msg = f"There is problem while making TMDB API request. Response Code: {result['status_code']}, " \
f"{result['status_message']}."
await ctx.send(err_msg)
@@ -106,7 +106,7 @@ class Movie(Cog):
"""Show all currently available genres for .movies command."""
await ctx.send(f"Current available genres: {', '.join('`' + genre.name + '`' for genre in MovieGenres)}")
- async def get_movies_list(self, client: ClientSession, genre_id: str, page: int) -> Dict[str, Any]:
+ async def get_movies_data(self, client: ClientSession, genre_id: str, page: int) -> List[Dict[str, Any]]:
"""Return JSON of TMDB discover request."""
# Define params of request
params = {
diff --git a/bot/exts/evergreen/reddit.py b/bot/exts/evergreen/reddit.py
index f2b95fe2..bda155c3 100644
--- a/bot/exts/evergreen/reddit.py
+++ b/bot/exts/evergreen/reddit.py
@@ -3,7 +3,6 @@ import random
import discord
from discord.ext import commands
-from discord.ext.commands.cooldowns import BucketType
from bot.bot import Bot
from bot.utils.pagination import ImagePaginator
@@ -31,7 +30,7 @@ class Reddit(commands.Cog):
return await response.json()
@commands.command(name="reddit")
- @commands.cooldown(1, 10, BucketType.user)
+ @commands.cooldown(1, 10, commands.BucketType.user)
async def get_reddit(self, ctx: commands.Context, subreddit: str = "python", sort: str = "hot") -> None:
"""
Fetch reddit posts by using this command.
diff --git a/bot/exts/evergreen/snakes/__init__.py b/bot/exts/evergreen/snakes/__init__.py
index 049bc964..7740429b 100644
--- a/bot/exts/evergreen/snakes/__init__.py
+++ b/bot/exts/evergreen/snakes/__init__.py
@@ -7,5 +7,5 @@ log = logging.getLogger(__name__)
def setup(bot: Bot) -> None:
- """Snakes Cog load."""
+ """Load the Snakes Cog."""
bot.add_cog(Snakes(bot))
diff --git a/bot/exts/evergreen/snakes/_snakes_cog.py b/bot/exts/evergreen/snakes/_snakes_cog.py
index 353bcd66..62795aef 100644
--- a/bot/exts/evergreen/snakes/_snakes_cog.py
+++ b/bot/exts/evergreen/snakes/_snakes_cog.py
@@ -9,15 +9,15 @@ import textwrap
import urllib
from functools import partial
from io import BytesIO
-from typing import Any, Dict, List
+from typing import Any, Dict, List, Optional
-import aiohttp
import async_timeout
from PIL import Image, ImageDraw, ImageFont
from discord import Colour, Embed, File, Member, Message, Reaction
from discord.errors import HTTPException
-from discord.ext.commands import Bot, Cog, CommandError, Context, bot_has_permissions, group
+from discord.ext.commands import Cog, CommandError, Context, bot_has_permissions, group
+from bot.bot import Bot
from bot.constants import ERROR_REPLIES, Tokens
from bot.exts.evergreen.snakes import _utils as utils
from bot.exts.evergreen.snakes._converter import Snake
@@ -275,13 +275,13 @@ class Snakes(Cog):
return message
- async def _fetch(self, session: aiohttp.ClientSession, url: str, params: dict = None) -> dict:
+ async def _fetch(self, url: str, params: Optional[dict] = None) -> dict:
"""Asynchronous web request helper method."""
if params is None:
params = {}
async with async_timeout.timeout(10):
- async with session.get(url, params=params) as response:
+ async with self.bot.http_session.get(url, params=params) as response:
return await response.json()
def _get_random_long_message(self, messages: List[str], retries: int = 10) -> str:
@@ -309,96 +309,95 @@ class Snakes(Cog):
"""
snake_info = {}
- async with aiohttp.ClientSession() as session:
- params = {
- "format": "json",
- "action": "query",
- "list": "search",
- "srsearch": name,
- "utf8": "",
- "srlimit": "1",
- }
-
- json = await self._fetch(session, URL, params=params)
-
- # Wikipedia does have a error page
- try:
- pageid = json["query"]["search"][0]["pageid"]
- except KeyError:
- # Wikipedia error page ID(?)
- pageid = 41118
- except IndexError:
- return None
-
- params = {
- "format": "json",
- "action": "query",
- "prop": "extracts|images|info",
- "exlimit": "max",
- "explaintext": "",
- "inprop": "url",
- "pageids": pageid
- }
+ params = {
+ "format": "json",
+ "action": "query",
+ "list": "search",
+ "srsearch": name,
+ "utf8": "",
+ "srlimit": "1",
+ }
- json = await self._fetch(session, URL, params=params)
+ json = await self._fetch(URL, params=params)
- # Constructing dict - handle exceptions later
- try:
- snake_info["title"] = json["query"]["pages"][f"{pageid}"]["title"]
- snake_info["extract"] = json["query"]["pages"][f"{pageid}"]["extract"]
- snake_info["images"] = json["query"]["pages"][f"{pageid}"]["images"]
- snake_info["fullurl"] = json["query"]["pages"][f"{pageid}"]["fullurl"]
- snake_info["pageid"] = json["query"]["pages"][f"{pageid}"]["pageid"]
- except KeyError:
- snake_info["error"] = True
-
- if snake_info["images"]:
- i_url = "https://commons.wikimedia.org/wiki/Special:FilePath/"
- image_list = []
- map_list = []
- thumb_list = []
-
- # Wikipedia has arbitrary images that are not snakes
- banned = [
- "Commons-logo.svg",
- "Red%20Pencil%20Icon.png",
- "distribution",
- "The%20Death%20of%20Cleopatra%20arthur.jpg",
- "Head%20of%20holotype",
- "locator",
- "Woma.png",
- "-map.",
- ".svg",
- "ange.",
- "Adder%20(PSF).png"
- ]
-
- for image in snake_info["images"]:
- # Images come in the format of `File:filename.extension`
- file, sep, filename = image["title"].partition(":")
- filename = filename.replace(" ", "%20") # Wikipedia returns good data!
-
- if not filename.startswith("Map"):
- if any(ban in filename for ban in banned):
- pass
- else:
- image_list.append(f"{i_url}{filename}")
- thumb_list.append(f"{i_url}{filename}?width=100")
+ # Wikipedia does have a error page
+ try:
+ pageid = json["query"]["search"][0]["pageid"]
+ except KeyError:
+ # Wikipedia error page ID(?)
+ pageid = 41118
+ except IndexError:
+ return None
+
+ params = {
+ "format": "json",
+ "action": "query",
+ "prop": "extracts|images|info",
+ "exlimit": "max",
+ "explaintext": "",
+ "inprop": "url",
+ "pageids": pageid
+ }
+
+ json = await self._fetch(URL, params=params)
+
+ # Constructing dict - handle exceptions later
+ try:
+ snake_info["title"] = json["query"]["pages"][f"{pageid}"]["title"]
+ snake_info["extract"] = json["query"]["pages"][f"{pageid}"]["extract"]
+ snake_info["images"] = json["query"]["pages"][f"{pageid}"]["images"]
+ snake_info["fullurl"] = json["query"]["pages"][f"{pageid}"]["fullurl"]
+ snake_info["pageid"] = json["query"]["pages"][f"{pageid}"]["pageid"]
+ except KeyError:
+ snake_info["error"] = True
+
+ if snake_info["images"]:
+ i_url = "https://commons.wikimedia.org/wiki/Special:FilePath/"
+ image_list = []
+ map_list = []
+ thumb_list = []
+
+ # Wikipedia has arbitrary images that are not snakes
+ banned = [
+ "Commons-logo.svg",
+ "Red%20Pencil%20Icon.png",
+ "distribution",
+ "The%20Death%20of%20Cleopatra%20arthur.jpg",
+ "Head%20of%20holotype",
+ "locator",
+ "Woma.png",
+ "-map.",
+ ".svg",
+ "ange.",
+ "Adder%20(PSF).png"
+ ]
+
+ for image in snake_info["images"]:
+ # Images come in the format of `File:filename.extension`
+ file, sep, filename = image["title"].partition(":")
+ filename = filename.replace(" ", "%20") # Wikipedia returns good data!
+
+ if not filename.startswith("Map"):
+ if any(ban in filename for ban in banned):
+ pass
else:
- map_list.append(f"{i_url}{filename}")
+ image_list.append(f"{i_url}{filename}")
+ thumb_list.append(f"{i_url}{filename}?width=100")
+ else:
+ map_list.append(f"{i_url}{filename}")
- snake_info["image_list"] = image_list
- snake_info["map_list"] = map_list
- snake_info["thumb_list"] = thumb_list
- snake_info["name"] = name
+ snake_info["image_list"] = image_list
+ snake_info["map_list"] = map_list
+ snake_info["thumb_list"] = thumb_list
+ snake_info["name"] = name
- match = self.wiki_brief.match(snake_info["extract"])
- info = match.group(1) if match else None
+ match = self.wiki_brief.match(snake_info["extract"])
+ info = match.group(1) if match else None
- if info:
- info = info.replace("\n", "\n\n") # Give us some proper paragraphs.
+ if info:
+ info = info.replace("\n", "\n\n") # Give us some proper paragraphs.
- snake_info["info"] = info
+ snake_info["info"] = info
return snake_info