From d764720481136f786593a67f152ac876ef7b151d Mon Sep 17 00:00:00 2001 From: Rohan Date: Wed, 2 Dec 2020 21:52:10 +0530 Subject: Add Reddit class and emojis to constants file. --- bot/exts/evergreen/reddit.py | 128 ------------------------------------------- 1 file changed, 128 deletions(-) delete mode 100644 bot/exts/evergreen/reddit.py (limited to 'bot/exts/evergreen/reddit.py') diff --git a/bot/exts/evergreen/reddit.py b/bot/exts/evergreen/reddit.py deleted file mode 100644 index 49127bea..00000000 --- a/bot/exts/evergreen/reddit.py +++ /dev/null @@ -1,128 +0,0 @@ -import logging -import random - -import discord -from discord.ext import commands -from discord.ext.commands.cooldowns import BucketType - -from bot.utils.pagination import ImagePaginator - -log = logging.getLogger(__name__) - - -class Reddit(commands.Cog): - """Fetches reddit posts.""" - - def __init__(self, bot: commands.Bot): - self.bot = bot - - async def fetch(self, url: str) -> dict: - """Send a get request to the reddit API and get json response.""" - session = self.bot.http_session - params = { - 'limit': 50 - } - headers = { - 'User-Agent': 'Iceman' - } - - async with session.get(url=url, params=params, headers=headers) as response: - return await response.json() - - @commands.command(name='reddit') - @commands.cooldown(1, 10, BucketType.user) - async def get_reddit(self, ctx: commands.Context, subreddit: str = 'python', sort: str = "hot") -> None: - """ - Fetch reddit posts by using this command. - - Gets a post from r/python by default. - Usage: - --> .reddit [subreddit_name] [hot/top/new] - """ - pages = [] - sort_list = ["hot", "new", "top", "rising"] - if sort.lower() not in sort_list: - await ctx.send(f"Invalid sorting: {sort}\nUsing default sorting: `Hot`") - sort = "hot" - - data = await self.fetch(f'https://www.reddit.com/r/{subreddit}/{sort}/.json') - - try: - posts = data["data"]["children"] - except KeyError: - return await ctx.send('Subreddit not found!') - if not posts: - return await ctx.send('No posts available!') - - if posts[1]["data"]["over_18"] is True: - return await ctx.send( - "You cannot access this Subreddit as it is ment for those who " - "are 18 years or older." - ) - - embed_titles = "" - - # Chooses k unique random elements from a population sequence or set. - random_posts = random.sample(posts, k=5) - - # ----------------------------------------------------------- - # This code below is bound of change when the emojis are added. - - upvote_emoji = self.bot.get_emoji(755845219890757644) - comment_emoji = self.bot.get_emoji(755845255001014384) - user_emoji = self.bot.get_emoji(755845303822974997) - text_emoji = self.bot.get_emoji(676030265910493204) - video_emoji = self.bot.get_emoji(676030265839190047) - image_emoji = self.bot.get_emoji(676030265734201344) - reddit_emoji = self.bot.get_emoji(676030265734332427) - - # ------------------------------------------------------------ - - for i, post in enumerate(random_posts, start=1): - post_title = post["data"]["title"][0:50] - post_url = post['data']['url'] - if post_title == "": - post_title = "No Title." - elif post_title == post_url: - post_title = "Title is itself a link." - - # ------------------------------------------------------------------ - # Embed building. - - embed_titles += f"**{i}.[{post_title}]({post_url})**\n" - image_url = " " - post_stats = f"{text_emoji}" # Set default content type to text. - - if post["data"]["is_video"] is True or "youtube" in post_url.split("."): - # This means the content type in the post is a video. - post_stats = f"{video_emoji} " - - elif post_url.endswith("jpg") or post_url.endswith("png") or post_url.endswith("gif"): - # This means the content type in the post is an image. - post_stats = f"{image_emoji} " - image_url = post_url - - votes = f'{upvote_emoji}{post["data"]["ups"]}' - comments = f'{comment_emoji}\u2002{ post["data"]["num_comments"]}' - post_stats += ( - f"\u2002{votes}\u2003" - f"{comments}" - f'\u2003{user_emoji}\u2002{post["data"]["author"]}\n' - ) - embed_titles += f"{post_stats}\n" - page_text = f"**[{post_title}]({post_url})**\n{post_stats}\n{post['data']['selftext'][0:200]}" - - embed = discord.Embed() - page_tuple = (page_text, image_url) - pages.append(page_tuple) - - # ------------------------------------------------------------------ - - pages.insert(0, (embed_titles, " ")) - embed.set_author(name=f"r/{posts[0]['data']['subreddit']} - {sort}", icon_url=reddit_emoji.url) - await ImagePaginator.paginate(pages, ctx, embed) - - -def setup(bot: commands.Bot) -> None: - """Load the Cog.""" - bot.add_cog(Reddit(bot)) -- cgit v1.2.3 From 6e32bda97aa91af1d100ea46f7efdf7031f87bff Mon Sep 17 00:00:00 2001 From: Rohan Date: Wed, 2 Dec 2020 21:54:20 +0530 Subject: Migrate reddit command from Bot repo and add pagination. --- bot/exts/evergreen/reddit.py | 360 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 360 insertions(+) create mode 100644 bot/exts/evergreen/reddit.py (limited to 'bot/exts/evergreen/reddit.py') diff --git a/bot/exts/evergreen/reddit.py b/bot/exts/evergreen/reddit.py new file mode 100644 index 00000000..fb447cda --- /dev/null +++ b/bot/exts/evergreen/reddit.py @@ -0,0 +1,360 @@ +import asyncio +import logging +import random +import textwrap +from collections import namedtuple +from datetime import datetime, timedelta +from typing import List, Union + +from aiohttp import BasicAuth, ClientError +from discord import Colour, Embed, TextChannel +from discord.ext.commands import Cog, Context, group, has_any_role +from discord.ext.tasks import loop +from discord.utils import escape_markdown, sleep_until + +from bot.bot import Bot +from bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES +from bot.utils.converters import Subreddit +from bot.utils.messages import sub_clyde +from bot.utils.pagination import ImagePaginator, LinePaginator + +log = logging.getLogger(__name__) + +AccessToken = namedtuple("AccessToken", ["token", "expires_at"]) + + +class Reddit(Cog): + """Track subreddit posts and show detailed statistics about them.""" + + HEADERS = {"User-Agent": "python3:python-discord/bot:1.0.0 (by /u/PythonDiscord)"} + URL = "https://www.reddit.com" + OAUTH_URL = "https://oauth.reddit.com" + MAX_RETRIES = 3 + + def __init__(self, bot: Bot): + self.bot = bot + + self.webhook = None + self.access_token = None + self.client_auth = BasicAuth(RedditConfig.client_id, RedditConfig.secret) + + bot.loop.create_task(self.init_reddit_ready()) + self.auto_poster_loop.start() + + def cog_unload(self) -> None: + """Stop the loop task and revoke the access token when the cog is unloaded.""" + self.auto_poster_loop.cancel() + if self.access_token and self.access_token.expires_at > datetime.utcnow(): + asyncio.create_task(self.revoke_access_token()) + + async def init_reddit_ready(self) -> None: + """Sets the reddit webhook when the cog is loaded.""" + await self.bot.wait_until_guild_available() + if not self.webhook: + self.webhook = await self.bot.fetch_webhook(RedditConfig.webhook) + + @property + def channel(self) -> TextChannel: + """Get the #reddit channel object from the bot's cache.""" + return self.bot.get_channel(Channels.reddit) + + def build_pagination_pages(self, posts: List[dict]) -> List[tuple]: + """Build embed pages required for Paginator.""" + pages = [] + first_page = "" + for i, post in enumerate(posts, start=1): + post_page = "" + image_url = "" + + data = post["data"] + + title = textwrap.shorten(data["title"], width=64, placeholder="...") + + # Normal brackets interfere with Markdown. + title = escape_markdown(title).replace("[", "⦋").replace("]", "⦌") + link = self.URL + data["permalink"] + + first_page += f"**{i}. [{title.replace('*', '')}]({link})**\n" + post_page += f"**{i}. [{title}]({link})**\n\n" + + text = data["selftext"] + if text: + first_page += textwrap.shorten(text, width=128, placeholder="...").replace("*", "") + "\n" + post_page += textwrap.shorten(text, width=252, placeholder="...") + "\n\n" + + ups = data["ups"] + comments = data["num_comments"] + author = data["author"] + + content_type = Emojis.reddit_post_text + if data["is_video"] is True or "youtube" in data["url"].split("."): + # This means the content type in the post is a video. + content_type = f"{Emojis.reddit_post_video}" + + elif any(data["url"].endswith(pic_format) for pic_format in ("jpg", "png", "gif")): + # This means the content type in the post is an image. + content_type = f"{Emojis.reddit_post_photo}" + image_url = data["url"] + + first_page += ( + f"{content_type}\u2003{Emojis.reddit_upvote}{ups}\u2003{Emojis.reddit_comments}" + f"\u2002{comments}\u2003{Emojis.reddit_users}{author}\n\n" + ) + post_page += ( + f"{content_type}\u2003{Emojis.reddit_upvote}{ups}\u2003{Emojis.reddit_comments}\u2002" + f"{comments}\u2003{Emojis.reddit_users}{author}" + ) + + pages.append((post_page, image_url)) + + pages.insert(0, (first_page, "")) + return pages + + async def get_access_token(self) -> None: + """ + Get a Reddit API OAuth2 access token and assign it to self.access_token. + + A token is valid for 1 hour. There will be MAX_RETRIES to get a token, after which the cog + will be unloaded and a ClientError raised if retrieval was still unsuccessful. + """ + for i in range(1, self.MAX_RETRIES + 1): + response = await self.bot.http_session.post( + url=f"{self.URL}/api/v1/access_token", + headers=self.HEADERS, + auth=self.client_auth, + data={ + "grant_type": "client_credentials", + "duration": "temporary" + } + ) + + if response.status == 200 and response.content_type == "application/json": + content = await response.json() + expiration = int(content["expires_in"]) - 60 # Subtract 1 minute for leeway. + self.access_token = AccessToken( + token=content["access_token"], + expires_at=datetime.utcnow() + timedelta(seconds=expiration) + ) + + log.debug(f"New token acquired; expires on UTC {self.access_token.expires_at}") + return + else: + log.debug( + f"Failed to get an access token: " + f"status {response.status} & content type {response.content_type}; " + f"retrying ({i}/{self.MAX_RETRIES})" + ) + + await asyncio.sleep(3) + + self.bot.remove_cog(self.qualified_name) + raise ClientError("Authentication with the Reddit API failed. Unloading the cog.") + + async def revoke_access_token(self) -> None: + """ + Revoke the OAuth2 access token for the Reddit API. + + For security reasons, it's good practice to revoke the token when it's no longer being used. + """ + response = await self.bot.http_session.post( + url=f"{self.URL}/api/v1/revoke_token", + headers=self.HEADERS, + auth=self.client_auth, + data={ + "token": self.access_token.token, + "token_type_hint": "access_token" + } + ) + + if response.status == 204 and response.content_type == "application/json": + self.access_token = None + else: + log.warning(f"Unable to revoke access token: status {response.status}.") + + async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]: + """A helper method to fetch a certain amount of Reddit posts at a given route.""" + # Reddit's JSON responses only provide 25 posts at most. + if not 25 >= amount > 0: + raise ValueError("Invalid amount of subreddit posts requested.") + + # Renew the token if necessary. + if not self.access_token or self.access_token.expires_at < datetime.utcnow(): + await self.get_access_token() + + url = f"{self.OAUTH_URL}/{route}" + for _ in range(self.MAX_RETRIES): + response = await self.bot.http_session.get( + url=url, + headers={**self.HEADERS, "Authorization": f"bearer {self.access_token.token}"}, + params=params + ) + if response.status == 200 and response.content_type == 'application/json': + # Got appropriate response - process and return. + content = await response.json() + posts = content["data"]["children"] + + filtered_posts = [post for post in posts if not post["data"]["over_18"]] + + return filtered_posts[:amount] + + await asyncio.sleep(3) + + log.debug(f"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}") + return list() # Failed to get appropriate response within allowed number of retries. + + async def get_top_posts( + self, subreddit: Subreddit, time: str = "all", amount: int = 5, paginate: bool = False + ) -> Union[Embed, List[tuple]]: + """ + Get the top amount of posts for a given subreddit within a specified timeframe. + + A time of "all" will get posts from all time, "day" will get top daily posts and "week" will get the top + weekly posts. + + The amount should be between 0 and 25 as Reddit's JSON requests only provide 25 posts at most. + """ + embed = Embed(description="") + + posts = await self.fetch_posts( + route=f"{subreddit}/top", + amount=amount, + params={"t": time} + ) + if not posts: + embed.title = random.choice(ERROR_REPLIES) + embed.colour = Colour.red() + embed.description = ( + "Sorry! We couldn't find any SFW posts from that subreddit. " + "If this problem persists, please let us know." + ) + + return embed + + pages = self.build_pagination_pages(posts) + + if paginate: + return pages + + embed.description += pages[0] + embed.colour = Colour.blurple() + return embed + + @loop() + async def auto_poster_loop(self) -> None: + """Post the top 5 posts daily, and the top 5 posts weekly.""" + # once d.py get support for `time` parameter in loop decorator, + # this can be removed and the loop can use the `time=datetime.time.min` parameter + now = datetime.utcnow() + tomorrow = now + timedelta(days=1) + midnight_tomorrow = tomorrow.replace(hour=0, minute=0, second=0) + + await sleep_until(midnight_tomorrow) + + await self.bot.wait_until_guild_available() + if not self.webhook: + await self.bot.fetch_webhook(RedditConfig.webhook) + + if datetime.utcnow().weekday() == 0: + await self.top_weekly_posts() + # if it's a monday send the top weekly posts + + for subreddit in RedditConfig.subreddits: + top_posts = await self.get_top_posts(subreddit=subreddit, time="day") + username = sub_clyde(f"{subreddit} Top Daily Posts") + message = await self.webhook.send(username=username, embed=top_posts, wait=True) + + if message.channel.is_news(): + await message.publish() + + async def top_weekly_posts(self) -> None: + """Post a summary of the top posts.""" + for subreddit in RedditConfig.subreddits: + # Send and pin the new weekly posts. + top_posts = await self.get_top_posts(subreddit=subreddit, time="week") + username = sub_clyde(f"{subreddit} Top Weekly Posts") + message = await self.webhook.send(wait=True, username=username, embed=top_posts) + + if subreddit.lower() == "r/python": + if not self.channel: + log.warning("Failed to get #reddit channel to remove pins in the weekly loop.") + return + + # Remove the oldest pins so that only 12 remain at most. + pins = await self.channel.pins() + + while len(pins) >= 12: + await pins[-1].unpin() + del pins[-1] + + await message.pin() + + if message.channel.is_news(): + await message.publish() + + @group(name="reddit", invoke_without_command=True) + async def reddit_group(self, ctx: Context) -> None: + """View the top posts from various subreddits.""" + await ctx.send_help(ctx.command) + + @reddit_group.command(name="top") + async def top_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None: + """Send the top posts of all time from a given subreddit.""" + async with ctx.typing(): + pages = await self.get_top_posts(subreddit=subreddit, time="all", paginate=True) + + embed = Embed( + title=f"{Emojis.reddit} {subreddit} - Top\n\n", + color=Colour.blurple() + ) + + await ImagePaginator.paginate(pages, ctx, embed) + + @reddit_group.command(name="daily") + async def daily_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None: + """Send the top posts of today from a given subreddit.""" + async with ctx.typing(): + pages = await self.get_top_posts(subreddit=subreddit, time="day", paginate=True) + + embed = Embed( + title=f"{Emojis.reddit} {subreddit} - Daily\n\n", + color=Colour.blurple() + ) + + await ImagePaginator.paginate(pages, ctx, embed) + + @reddit_group.command(name="weekly") + async def weekly_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None: + """Send the top posts of this week from a given subreddit.""" + async with ctx.typing(): + pages = await self.get_top_posts(subreddit=subreddit, time="week", paginate=True) + + embed = Embed( + title=f"{Emojis.reddit} {subreddit} - Weekly\n\n", + color=Colour.blurple() + ) + + await ImagePaginator.paginate(pages, ctx, embed) + + @has_any_role(*STAFF_ROLES) + @reddit_group.command(name="subreddits", aliases=("subs",)) + async def subreddits_command(self, ctx: Context) -> None: + """Send a paginated embed of all the subreddits we're relaying.""" + embed = Embed() + embed.title = "Relayed subreddits." + embed.colour = Colour.blurple() + + await LinePaginator.paginate( + RedditConfig.subreddits, + ctx, embed, + footer_text="Use the reddit commands along with these to view their posts.", + empty=False, + max_lines=15 + ) + + +def setup(bot: Bot) -> None: + """Load the Reddit cog.""" + if not RedditConfig.secret or not RedditConfig.client_id: + log.error("Credentials not provided, cog not loaded.") + return + bot.add_cog(Reddit(bot)) -- cgit v1.2.3 From 94822085a9f3e1677a8c566796fd1655b0b40ebf Mon Sep 17 00:00:00 2001 From: Rohan Date: Wed, 9 Dec 2020 20:55:27 +0530 Subject: Changes to command output. --- bot/exts/evergreen/reddit.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'bot/exts/evergreen/reddit.py') diff --git a/bot/exts/evergreen/reddit.py b/bot/exts/evergreen/reddit.py index fb447cda..ddc0cc27 100644 --- a/bot/exts/evergreen/reddit.py +++ b/bot/exts/evergreen/reddit.py @@ -62,24 +62,24 @@ class Reddit(Cog): """Build embed pages required for Paginator.""" pages = [] first_page = "" - for i, post in enumerate(posts, start=1): + for post in posts: post_page = "" image_url = "" data = post["data"] - title = textwrap.shorten(data["title"], width=64, placeholder="...") + title = textwrap.shorten(data["title"], width=50, placeholder="...") # Normal brackets interfere with Markdown. title = escape_markdown(title).replace("[", "⦋").replace("]", "⦌") link = self.URL + data["permalink"] - first_page += f"**{i}. [{title.replace('*', '')}]({link})**\n" - post_page += f"**{i}. [{title}]({link})**\n\n" + first_page += f"**[{title.replace('*', '')}]({link})**\n" + post_page += f"**[{title}]({link})**\n\n" text = data["selftext"] if text: - first_page += textwrap.shorten(text, width=128, placeholder="...").replace("*", "") + "\n" + first_page += textwrap.shorten(text, width=100, placeholder="...").replace("*", "") + "\n" post_page += textwrap.shorten(text, width=252, placeholder="...") + "\n\n" ups = data["ups"] @@ -107,7 +107,7 @@ class Reddit(Cog): pages.append((post_page, image_url)) - pages.insert(0, (first_page, "")) + pages.insert(0, (first_page, "")) # Using image paginator, hence settings image url to empty string return pages async def get_access_token(self) -> None: @@ -235,6 +235,7 @@ class Reddit(Cog): if paginate: return pages + # Use only starting summary page for #reddit channel posts. embed.description += pages[0] embed.colour = Colour.blurple() return embed @@ -302,8 +303,8 @@ class Reddit(Cog): async with ctx.typing(): pages = await self.get_top_posts(subreddit=subreddit, time="all", paginate=True) + await ctx.send("Here are the top r/Python posts of all time!") embed = Embed( - title=f"{Emojis.reddit} {subreddit} - Top\n\n", color=Colour.blurple() ) @@ -315,8 +316,8 @@ class Reddit(Cog): async with ctx.typing(): pages = await self.get_top_posts(subreddit=subreddit, time="day", paginate=True) + await ctx.send("Here are today's top r/Python posts!") embed = Embed( - title=f"{Emojis.reddit} {subreddit} - Daily\n\n", color=Colour.blurple() ) @@ -328,8 +329,8 @@ class Reddit(Cog): async with ctx.typing(): pages = await self.get_top_posts(subreddit=subreddit, time="week", paginate=True) + await ctx.send("Here are this week's top r/Python posts!") embed = Embed( - title=f"{Emojis.reddit} {subreddit} - Weekly\n\n", color=Colour.blurple() ) -- cgit v1.2.3 From ac68262b8c3ec96f4476db7d4a00ebeb6b4149f8 Mon Sep 17 00:00:00 2001 From: Rohan Date: Tue, 29 Dec 2020 09:40:42 +0530 Subject: Fix bug in auto_poster_loop() regarding embed description. --- bot/exts/evergreen/reddit.py | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) (limited to 'bot/exts/evergreen/reddit.py') diff --git a/bot/exts/evergreen/reddit.py b/bot/exts/evergreen/reddit.py index ddc0cc27..f5134105 100644 --- a/bot/exts/evergreen/reddit.py +++ b/bot/exts/evergreen/reddit.py @@ -58,7 +58,7 @@ class Reddit(Cog): """Get the #reddit channel object from the bot's cache.""" return self.bot.get_channel(Channels.reddit) - def build_pagination_pages(self, posts: List[dict]) -> List[tuple]: + def build_pagination_pages(self, posts: List[dict], paginate) -> Union[List[tuple], str]: """Build embed pages required for Paginator.""" pages = [] first_page = "" @@ -75,19 +75,17 @@ class Reddit(Cog): link = self.URL + data["permalink"] first_page += f"**[{title.replace('*', '')}]({link})**\n" - post_page += f"**[{title}]({link})**\n\n" text = data["selftext"] if text: first_page += textwrap.shorten(text, width=100, placeholder="...").replace("*", "") + "\n" - post_page += textwrap.shorten(text, width=252, placeholder="...") + "\n\n" ups = data["ups"] comments = data["num_comments"] author = data["author"] content_type = Emojis.reddit_post_text - if data["is_video"] is True or "youtube" in data["url"].split("."): + if data["is_video"] is True or {"youtube", "youtu.be"}.issubset(set(data["url"].split("."))): # This means the content type in the post is a video. content_type = f"{Emojis.reddit_post_video}" @@ -100,12 +98,21 @@ class Reddit(Cog): f"{content_type}\u2003{Emojis.reddit_upvote}{ups}\u2003{Emojis.reddit_comments}" f"\u2002{comments}\u2003{Emojis.reddit_users}{author}\n\n" ) - post_page += ( - f"{content_type}\u2003{Emojis.reddit_upvote}{ups}\u2003{Emojis.reddit_comments}\u2002" - f"{comments}\u2003{Emojis.reddit_users}{author}" - ) - pages.append((post_page, image_url)) + if paginate: + post_page += f"**[{title}]({link})**\n\n" + if text: + post_page += textwrap.shorten(text, width=252, placeholder="...") + "\n\n" + post_page += ( + f"{content_type}\u2003{Emojis.reddit_upvote}{ups}\u2003{Emojis.reddit_comments}\u2002" + f"{comments}\u2003{Emojis.reddit_users}{author}" + ) + + pages.append((post_page, image_url)) + + if not paginate: + # Return the first summery page if pagination is not required + return first_page pages.insert(0, (first_page, "")) # Using image paginator, hence settings image url to empty string return pages @@ -213,7 +220,7 @@ class Reddit(Cog): The amount should be between 0 and 25 as Reddit's JSON requests only provide 25 posts at most. """ - embed = Embed(description="") + embed = Embed() posts = await self.fetch_posts( route=f"{subreddit}/top", @@ -230,13 +237,11 @@ class Reddit(Cog): return embed - pages = self.build_pagination_pages(posts) - if paginate: - return pages + return self.build_pagination_pages(posts, paginate=True) # Use only starting summary page for #reddit channel posts. - embed.description += pages[0] + embed.description = self.build_pagination_pages(posts, paginate=False) embed.colour = Colour.blurple() return embed @@ -303,7 +308,7 @@ class Reddit(Cog): async with ctx.typing(): pages = await self.get_top_posts(subreddit=subreddit, time="all", paginate=True) - await ctx.send("Here are the top r/Python posts of all time!") + await ctx.send(f"Here are the top {subreddit} posts of all time!") embed = Embed( color=Colour.blurple() ) @@ -316,7 +321,7 @@ class Reddit(Cog): async with ctx.typing(): pages = await self.get_top_posts(subreddit=subreddit, time="day", paginate=True) - await ctx.send("Here are today's top r/Python posts!") + await ctx.send(f"Here are today's top {subreddit} posts!") embed = Embed( color=Colour.blurple() ) @@ -329,7 +334,7 @@ class Reddit(Cog): async with ctx.typing(): pages = await self.get_top_posts(subreddit=subreddit, time="week", paginate=True) - await ctx.send("Here are this week's top r/Python posts!") + await ctx.send(f"Here are this week's top {subreddit} posts!") embed = Embed( color=Colour.blurple() ) -- cgit v1.2.3 From c9f0d26601f7d3bf01257fbff9384df76aa381f6 Mon Sep 17 00:00:00 2001 From: Rohan Date: Wed, 27 Jan 2021 13:02:06 +0530 Subject: Fix lint error: Missing type annotation for function arugment `paginate`. --- bot/exts/evergreen/reddit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'bot/exts/evergreen/reddit.py') diff --git a/bot/exts/evergreen/reddit.py b/bot/exts/evergreen/reddit.py index f5134105..1a4f9add 100644 --- a/bot/exts/evergreen/reddit.py +++ b/bot/exts/evergreen/reddit.py @@ -58,7 +58,7 @@ class Reddit(Cog): """Get the #reddit channel object from the bot's cache.""" return self.bot.get_channel(Channels.reddit) - def build_pagination_pages(self, posts: List[dict], paginate) -> Union[List[tuple], str]: + def build_pagination_pages(self, posts: List[dict], paginate: bool) -> Union[List[tuple], str]: """Build embed pages required for Paginator.""" pages = [] first_page = "" -- cgit v1.2.3 From b2ec5813ddc4e7abc38c4143d79a0165fa591cd7 Mon Sep 17 00:00:00 2001 From: Rohan Date: Thu, 15 Apr 2021 19:16:42 +0530 Subject: Use custom help command util for sending command help. --- bot/exts/evergreen/reddit.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'bot/exts/evergreen/reddit.py') diff --git a/bot/exts/evergreen/reddit.py b/bot/exts/evergreen/reddit.py index 1a4f9add..7f4ce6a0 100644 --- a/bot/exts/evergreen/reddit.py +++ b/bot/exts/evergreen/reddit.py @@ -15,6 +15,7 @@ from discord.utils import escape_markdown, sleep_until from bot.bot import Bot from bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES from bot.utils.converters import Subreddit +from bot.utils.extensions import invoke_help_command from bot.utils.messages import sub_clyde from bot.utils.pagination import ImagePaginator, LinePaginator @@ -300,7 +301,7 @@ class Reddit(Cog): @group(name="reddit", invoke_without_command=True) async def reddit_group(self, ctx: Context) -> None: """View the top posts from various subreddits.""" - await ctx.send_help(ctx.command) + await invoke_help_command(ctx) @reddit_group.command(name="top") async def top_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None: -- cgit v1.2.3 From 541efecc44fffec87f7e9346619dcae0710e2a08 Mon Sep 17 00:00:00 2001 From: Rohan Date: Thu, 15 Apr 2021 20:42:14 +0530 Subject: Apply code review suggestions. --- bot/exts/evergreen/reddit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'bot/exts/evergreen/reddit.py') diff --git a/bot/exts/evergreen/reddit.py b/bot/exts/evergreen/reddit.py index 7f4ce6a0..916563ac 100644 --- a/bot/exts/evergreen/reddit.py +++ b/bot/exts/evergreen/reddit.py @@ -86,11 +86,11 @@ class Reddit(Cog): author = data["author"] content_type = Emojis.reddit_post_text - if data["is_video"] is True or {"youtube", "youtu.be"}.issubset(set(data["url"].split("."))): + if data["is_video"] or {"youtube", "youtu.be"}.issubset(set(data["url"].split("."))): # This means the content type in the post is a video. content_type = f"{Emojis.reddit_post_video}" - elif any(data["url"].endswith(pic_format) for pic_format in ("jpg", "png", "gif")): + elif data["url"].endswith(("jpg", "png", "gif")): # This means the content type in the post is an image. content_type = f"{Emojis.reddit_post_photo}" image_url = data["url"] -- cgit v1.2.3 From fb556c6407bab3ef078bc23620074ba206fb7ad8 Mon Sep 17 00:00:00 2001 From: Hassan Abouelela Date: Mon, 10 May 2021 00:27:25 +0300 Subject: Fixes Expected Token Revoke Status Code --- bot/exts/evergreen/reddit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'bot/exts/evergreen/reddit.py') diff --git a/bot/exts/evergreen/reddit.py b/bot/exts/evergreen/reddit.py index 916563ac..e57fa2c0 100644 --- a/bot/exts/evergreen/reddit.py +++ b/bot/exts/evergreen/reddit.py @@ -174,7 +174,7 @@ class Reddit(Cog): } ) - if response.status == 204 and response.content_type == "application/json": + if response.status in [200, 204] and response.content_type == "application/json": self.access_token = None else: log.warning(f"Unable to revoke access token: status {response.status}.") -- cgit v1.2.3 From cc5364421c69e8ad533483591cb37b063b7ea5a6 Mon Sep 17 00:00:00 2001 From: wookie184 Date: Fri, 27 Aug 2021 13:56:26 +0100 Subject: Escape markdown properly in post body to fix rendering on android --- bot/exts/evergreen/reddit.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'bot/exts/evergreen/reddit.py') diff --git a/bot/exts/evergreen/reddit.py b/bot/exts/evergreen/reddit.py index e57fa2c0..4df170c6 100644 --- a/bot/exts/evergreen/reddit.py +++ b/bot/exts/evergreen/reddit.py @@ -79,7 +79,8 @@ class Reddit(Cog): text = data["selftext"] if text: - first_page += textwrap.shorten(text, width=100, placeholder="...").replace("*", "") + "\n" + text = escape_markdown(text).replace("[", "⦋").replace("]", "⦌") + first_page += textwrap.shorten(text, width=100, placeholder="...") + "\n" ups = data["ups"] comments = data["num_comments"] -- cgit v1.2.3