aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGravatar D0rs4n <[email protected]>2021-08-08 15:16:17 +0200
committerGravatar D0rs4n <[email protected]>2021-08-08 15:16:17 +0200
commit95d7a4dcf95a1bf444c0ebe46199df56e26289f1 (patch)
treec8899f5753bcddcac30ce5d03c9f8ef0076a8a02
parentRefactor wiki_request, add External API related error,-handler (diff)
parentMerge #804 - circumvent URL injections in various cogs (diff)
Resolve merge conflict
-rw-r--r--bot/constants.py4
-rw-r--r--bot/exts/evergreen/githubinfo.py9
-rw-r--r--bot/exts/evergreen/movie.py11
-rw-r--r--bot/exts/evergreen/stackoverflow.py88
-rw-r--r--bot/exts/evergreen/wikipedia.py37
-rw-r--r--bot/exts/evergreen/wolfram.py34
-rw-r--r--bot/exts/halloween/hacktoberstats.py17
-rw-r--r--bot/exts/valentines/movie_generator.py5
8 files changed, 165 insertions, 40 deletions
diff --git a/bot/constants.py b/bot/constants.py
index ff901c8e..bb602361 100644
--- a/bot/constants.py
+++ b/bot/constants.py
@@ -226,6 +226,10 @@ class Emojis:
status_dnd = "<:status_dnd:470326272082313216>"
status_offline = "<:status_offline:470326266537705472>"
+
+ stackoverflow_tag = "<:stack_tag:870926975307501570>"
+ stackoverflow_views = "<:stack_eye:870926992692879371>"
+
# Reddit emojis
reddit = "<:reddit:676030265734332427>"
reddit_post_text = "<:reddit_post_text:676030265910493204>"
diff --git a/bot/exts/evergreen/githubinfo.py b/bot/exts/evergreen/githubinfo.py
index 27e607e5..d29f3aa9 100644
--- a/bot/exts/evergreen/githubinfo.py
+++ b/bot/exts/evergreen/githubinfo.py
@@ -1,7 +1,7 @@
import logging
import random
from datetime import datetime
-from urllib.parse import quote
+from urllib.parse import quote, quote_plus
import discord
from discord.ext import commands
@@ -37,7 +37,7 @@ class GithubInfo(commands.Cog):
async def github_user_info(self, ctx: commands.Context, username: str) -> None:
"""Fetches a user's GitHub information."""
async with ctx.typing():
- user_data = await self.fetch_data(f"{GITHUB_API_URL}/users/{username}")
+ user_data = await self.fetch_data(f"{GITHUB_API_URL}/users/{quote_plus(username)}")
# User_data will not have a message key if the user exists
if "message" in user_data:
@@ -91,7 +91,10 @@ class GithubInfo(commands.Cog):
)
if user_data["type"] == "User":
- embed.add_field(name="Gists", value=f"[{gists}](https://gist.github.com/{quote(username, safe='')})")
+ embed.add_field(
+ name="Gists",
+ value=f"[{gists}](https://gist.github.com/{quote_plus(username, safe='')})"
+ )
embed.add_field(
name=f"Organization{'s' if len(orgs)!=1 else ''}",
diff --git a/bot/exts/evergreen/movie.py b/bot/exts/evergreen/movie.py
index 10638aea..c6af4bcd 100644
--- a/bot/exts/evergreen/movie.py
+++ b/bot/exts/evergreen/movie.py
@@ -2,7 +2,6 @@ import logging
import random
from enum import Enum
from typing import Any, Dict, List, Tuple
-from urllib.parse import urlencode
from aiohttp import ClientSession
from discord import Embed
@@ -121,10 +120,10 @@ class Movie(Cog):
"with_genres": genre_id
}
- url = BASE_URL + "discover/movie?" + urlencode(params)
+ url = BASE_URL + "discover/movie"
# Make discover request to TMDB, return result
- async with client.get(url) as resp:
+ async with client.get(url, params=params) as resp:
return await resp.json()
async def get_pages(self, client: ClientSession, movies: Dict[str, Any], amount: int) -> List[Tuple[str, str]]:
@@ -142,9 +141,11 @@ class Movie(Cog):
async def get_movie(self, client: ClientSession, movie: int) -> Dict:
"""Get Movie by movie ID from TMDB. Return result dictionary."""
- url = BASE_URL + f"movie/{movie}?" + urlencode(MOVIE_PARAMS)
+ if not isinstance(movie, int):
+ raise ValueError("Error while fetching movie from TMDB, movie argument must be integer. ")
+ url = BASE_URL + f"movie/{movie}"
- async with client.get(url) as resp:
+ async with client.get(url, params=MOVIE_PARAMS) as resp:
return await resp.json()
async def create_page(self, movie: Dict[str, Any]) -> Tuple[str, str]:
diff --git a/bot/exts/evergreen/stackoverflow.py b/bot/exts/evergreen/stackoverflow.py
new file mode 100644
index 00000000..40f149c9
--- /dev/null
+++ b/bot/exts/evergreen/stackoverflow.py
@@ -0,0 +1,88 @@
+import logging
+from html import unescape
+from urllib.parse import quote_plus
+
+from discord import Embed, HTTPException
+from discord.ext import commands
+
+from bot import bot
+from bot.constants import Colours, Emojis
+
+logger = logging.getLogger(__name__)
+
+BASE_URL = "https://api.stackexchange.com/2.2/search/advanced"
+SO_PARAMS = {
+ "order": "desc",
+ "sort": "activity",
+ "site": "stackoverflow"
+}
+SEARCH_URL = "https://stackoverflow.com/search?q={query}"
+ERR_EMBED = Embed(
+ title="Error in fetching results from Stackoverflow",
+ description=(
+ "Sorry, there was en error while trying to fetch data from the Stackoverflow website. Please try again in some "
+ "time. If this issue persists, please contact the staff or send a message in #dev-contrib."
+ ),
+ color=Colours.soft_red
+)
+
+
+class Stackoverflow(commands.Cog):
+ """Contains command to interact with stackoverflow from discord."""
+
+ def __init__(self, bot: bot.Bot):
+ self.bot = bot
+
+ @commands.command(aliases=["so"])
+ @commands.cooldown(1, 15, commands.cooldowns.BucketType.user)
+ async def stackoverflow(self, ctx: commands.Context, *, search_query: str) -> None:
+ """Sends the top 5 results of a search query from stackoverflow."""
+ params = SO_PARAMS | {"q": search_query}
+ async with self.bot.http_session.get(url=BASE_URL, params=params) as response:
+ if response.status == 200:
+ data = await response.json()
+ else:
+ logger.error(f'Status code is not 200, it is {response.status}')
+ await ctx.send(embed=ERR_EMBED)
+ return
+ if not data['items']:
+ no_search_result = Embed(
+ title=f"No search results found for {search_query}",
+ color=Colours.soft_red
+ )
+ await ctx.send(embed=no_search_result)
+ return
+
+ top5 = data["items"][:5]
+ encoded_search_query = quote_plus(search_query)
+ embed = Embed(
+ title="Search results - Stackoverflow",
+ url=SEARCH_URL.format(query=encoded_search_query),
+ description=f"Here are the top {len(top5)} results:",
+ color=Colours.orange
+ )
+ for item in top5:
+ embed.add_field(
+ name=unescape(item['title']),
+ value=(
+ f"[{Emojis.reddit_upvote} {item['score']} "
+ f"{Emojis.stackoverflow_views} {item['view_count']} "
+ f"{Emojis.reddit_comments} {item['answer_count']} "
+ f"{Emojis.stackoverflow_tag} {', '.join(item['tags'][:3])}]"
+ f"({item['link']})"
+ ),
+ inline=False)
+ embed.set_footer(text="View the original link for more results.")
+ try:
+ await ctx.send(embed=embed)
+ except HTTPException:
+ search_query_too_long = Embed(
+ title="Your search query is too long, please try shortening your search query",
+ color=Colours.soft_red
+ )
+ await ctx.send(embed=search_query_too_long)
+
+
+def setup(bot: bot.Bot) -> None:
+ """Load the Stackoverflow Cog."""
+ bot.add_cog(Stackoverflow(bot))
diff --git a/bot/exts/evergreen/wikipedia.py b/bot/exts/evergreen/wikipedia.py
index d53424fd..879146a6 100644
--- a/bot/exts/evergreen/wikipedia.py
+++ b/bot/exts/evergreen/wikipedia.py
@@ -14,9 +14,18 @@ from bot.utils.exceptions import ExternalAPIError
log = logging.getLogger(__name__)
SEARCH_API = (
- "https://en.wikipedia.org/w/api.php?action=query&list=search&prop=info&inprop=url&utf8=&"
- "format=json&origin=*&srlimit={number_of_results}&srsearch={string}"
+ "https://en.wikipedia.org/w/api.php"
)
+WIKI_PARAMS = {
+ "action": "query",
+ "list": "search",
+ "prop": "info",
+ "inprop": "url",
+ "utf8": "",
+ "format": "json",
+ "origin": "*",
+
+}
WIKI_THUMBNAIL = (
"https://upload.wikimedia.org/wikipedia/en/thumb/8/80/Wikipedia-logo-v2.svg"
"/330px-Wikipedia-logo-v2.svg.png"
@@ -36,11 +45,31 @@ class WikipediaSearch(commands.Cog):
async def wiki_request(self, channel: TextChannel, search: str) -> Optional[List[str]]:
"""Search wikipedia search string and return formatted first 10 pages found."""
- url = SEARCH_API.format(number_of_results=10, string=search)
- async with self.bot.http_session.get(url=url) as resp:
+ params = WIKI_PARAMS | {"srlimit": 10, "srsearch": search}
+ async with self.bot.http_session.get(url=SEARCH_API, params=params) as resp:
if resp.status != 200:
log.info(f"Unexpected response `{resp.status}` while searching wikipedia for `{search}`")
raise ExternalAPIError("Wikipedia API")
+ raw_data = await resp.json()
+ number_of_results = raw_data["query"]["searchinfo"]["totalhits"]
+
+ if number_of_results:
+ results = raw_data["query"]["search"]
+ lines = []
+
+ for article in results:
+ line = WIKI_SEARCH_RESULT.format(
+ name=article["title"],
+ description=unescape(
+ re.sub(
+ WIKI_SNIPPET_REGEX, "", article["snippet"]
+ )
+ ),
+ url=f"https://en.wikipedia.org/?curid={article['pageid']}"
+ )
+ lines.append(line)
+
+ return lines
raw_data = await resp.json()
if raw_data.get("query", None) is None:
diff --git a/bot/exts/evergreen/wolfram.py b/bot/exts/evergreen/wolfram.py
index d23afd6f..26674d37 100644
--- a/bot/exts/evergreen/wolfram.py
+++ b/bot/exts/evergreen/wolfram.py
@@ -1,7 +1,7 @@
import logging
from io import BytesIO
from typing import Callable, List, Optional, Tuple
-from urllib import parse
+from urllib.parse import urlencode
import arrow
import discord
@@ -17,7 +17,7 @@ log = logging.getLogger(__name__)
APPID = Wolfram.key
DEFAULT_OUTPUT_FORMAT = "JSON"
-QUERY = "http://api.wolframalpha.com/v2/{request}?{data}"
+QUERY = "http://api.wolframalpha.com/v2/{request}"
WOLF_IMAGE = "https://www.symbols.com/gi.php?type=1&id=2886&i=1"
MAX_PODS = 20
@@ -108,7 +108,7 @@ def custom_cooldown(*ignore: List[int]) -> Callable:
async def get_pod_pages(ctx: Context, bot: Bot, query: str) -> Optional[List[Tuple]]:
"""Get the Wolfram API pod pages for the provided query."""
async with ctx.typing():
- url_str = parse.urlencode({
+ params = {
"input": query,
"appid": APPID,
"output": DEFAULT_OUTPUT_FORMAT,
@@ -116,27 +116,27 @@ async def get_pod_pages(ctx: Context, bot: Bot, query: str) -> Optional[List[Tup
"location": "the moon",
"latlong": "0.0,0.0",
"ip": "1.1.1.1"
- })
- request_url = QUERY.format(request="query", data=url_str)
+ }
+ request_url = QUERY.format(request="query")
- async with bot.http_session.get(request_url) as response:
+ async with bot.http_session.get(url=request_url, params=params) as response:
json = await response.json(content_type="text/plain")
result = json["queryresult"]
-
+ log_full_url = f"{request_url}?{urlencode(params)}"
if result["error"]:
# API key not set up correctly
if result["error"]["msg"] == "Invalid appid":
message = "Wolfram API key is invalid or missing."
log.warning(
"API key seems to be missing, or invalid when "
- f"processing a wolfram request: {url_str}, Response: {json}"
+ f"processing a wolfram request: {log_full_url}, Response: {json}"
)
await send_embed(ctx, message)
return
message = "Something went wrong internally with your request, please notify staff!"
- log.warning(f"Something went wrong getting a response from wolfram: {url_str}, Response: {json}")
+ log.warning(f"Something went wrong getting a response from wolfram: {log_full_url}, Response: {json}")
await send_embed(ctx, message)
return
@@ -172,18 +172,18 @@ class Wolfram(Cog):
@custom_cooldown(*STAFF_ROLES)
async def wolfram_command(self, ctx: Context, *, query: str) -> None:
"""Requests all answers on a single image, sends an image of all related pods."""
- url_str = parse.urlencode({
+ params = {
"i": query,
"appid": APPID,
"location": "the moon",
"latlong": "0.0,0.0",
"ip": "1.1.1.1"
- })
- query = QUERY.format(request="simple", data=url_str)
+ }
+ request_url = QUERY.format(request="simple")
# Give feedback that the bot is working.
async with ctx.typing():
- async with self.bot.http_session.get(query) as response:
+ async with self.bot.http_session.get(url=request_url, params=params) as response:
status = response.status
image_bytes = await response.read()
@@ -257,18 +257,18 @@ class Wolfram(Cog):
@custom_cooldown(*STAFF_ROLES)
async def wolfram_short_command(self, ctx: Context, *, query: str) -> None:
"""Requests an answer to a simple question."""
- url_str = parse.urlencode({
+ params = {
"i": query,
"appid": APPID,
"location": "the moon",
"latlong": "0.0,0.0",
"ip": "1.1.1.1"
- })
- query = QUERY.format(request="result", data=url_str)
+ }
+ request_url = QUERY.format(request="result")
# Give feedback that the bot is working.
async with ctx.typing():
- async with self.bot.http_session.get(query) as response:
+ async with self.bot.http_session.get(url=request_url, params=params) as response:
status = response.status
response_text = await response.text()
diff --git a/bot/exts/halloween/hacktoberstats.py b/bot/exts/halloween/hacktoberstats.py
index 50d3aaf6..24106a5e 100644
--- a/bot/exts/halloween/hacktoberstats.py
+++ b/bot/exts/halloween/hacktoberstats.py
@@ -4,6 +4,7 @@ import re
from collections import Counter
from datetime import datetime, timedelta
from typing import List, Optional, Tuple, Union
+from urllib.parse import quote_plus
import discord
from async_rediscache import RedisCache
@@ -208,24 +209,24 @@ class HacktoberStats(commands.Cog):
None will be returned when the GitHub user was not found.
"""
log.info(f"Fetching Hacktoberfest Stats for GitHub user: '{github_username}'")
- base_url = "https://api.github.com/search/issues?q="
+ base_url = "https://api.github.com/search/issues"
action_type = "pr"
is_query = "public"
not_query = "draft"
date_range = f"{CURRENT_YEAR}-09-30T10:00Z..{CURRENT_YEAR}-11-01T12:00Z"
per_page = "300"
- query_url = (
- f"{base_url}"
+ query_params = (
f"+type:{action_type}"
f"+is:{is_query}"
- f"+author:{github_username}"
+ f"+author:{quote_plus(github_username)}"
f"+-is:{not_query}"
f"+created:{date_range}"
f"&per_page={per_page}"
)
- log.debug(f"GitHub query URL generated: {query_url}")
- jsonresp = await self._fetch_url(query_url, REQUEST_HEADERS)
+ log.debug(f"GitHub query parameters generated: {query_params}")
+
+ jsonresp = await self._fetch_url(base_url, REQUEST_HEADERS, {"q": query_params})
if "message" in jsonresp:
# One of the parameters is invalid, short circuit for now
api_message = jsonresp["errors"][0]["message"]
@@ -295,9 +296,9 @@ class HacktoberStats(commands.Cog):
outlist.append(itemdict)
return outlist
- async def _fetch_url(self, url: str, headers: dict) -> dict:
+ async def _fetch_url(self, url: str, headers: dict, params: dict) -> dict:
"""Retrieve API response from URL."""
- async with self.bot.http_session.get(url, headers=headers) as resp:
+ async with self.bot.http_session.get(url, headers=headers, params=params) as resp:
return await resp.json()
@staticmethod
diff --git a/bot/exts/valentines/movie_generator.py b/bot/exts/valentines/movie_generator.py
index 0fc5edb4..d2dc8213 100644
--- a/bot/exts/valentines/movie_generator.py
+++ b/bot/exts/valentines/movie_generator.py
@@ -1,7 +1,6 @@
import logging
import random
from os import environ
-from urllib import parse
import discord
from discord.ext import commands
@@ -35,8 +34,8 @@ class RomanceMovieFinder(commands.Cog):
"with_genres": "10749"
}
# The api request url
- request_url = "https://api.themoviedb.org/3/discover/movie?" + parse.urlencode(params)
- async with self.bot.http_session.get(request_url) as resp:
+ request_url = "https://api.themoviedb.org/3/discover/movie"
+ async with self.bot.http_session.get(request_url, params=params) as resp:
# Trying to load the json file returned from the api
try:
data = await resp.json()