diff options
| -rw-r--r-- | bot/exts/info/code_snippets.py | 78 | ||||
| -rw-r--r-- | bot/exts/info/doc/_markdown.py | 31 | ||||
| -rw-r--r-- | bot/exts/info/doc/_parsing.py | 2 | ||||
| -rw-r--r-- | bot/exts/info/pep.py | 182 | ||||
| -rw-r--r-- | poetry.lock | 56 | ||||
| -rw-r--r-- | pyproject.toml | 4 |
6 files changed, 170 insertions, 183 deletions
diff --git a/bot/exts/info/code_snippets.py b/bot/exts/info/code_snippets.py index a44b0c475..eba15e825 100644 --- a/bot/exts/info/code_snippets.py +++ b/bot/exts/info/code_snippets.py @@ -38,6 +38,13 @@ BITBUCKET_RE = re.compile( r"/(?P<file_path>[^#>]+)(\?[^#>]+)?(#lines-(?P<start_line>\d+)(:(?P<end_line>\d+))?)" ) +PYDIS_PASTEBIN_RE = re.compile( + r"https://paste\.(?:pythondiscord\.com|pydis\.wtf)/(?P<paste_id>[a-zA-Z0-9]+)" + r"#(?P<selections>(?:\d+L\d+-L\d+)(?:,\d+L\d+-L\d+)*)" +) + +PASTEBIN_LINE_SELECTION_RE = re.compile(r"(\d+)L(\d+)-L(\d+)") + class CodeSnippets(Cog): """ @@ -54,7 +61,8 @@ class CodeSnippets(Cog): (GITHUB_RE, self._fetch_github_snippet), (GITHUB_GIST_RE, self._fetch_github_gist_snippet), (GITLAB_RE, self._fetch_gitlab_snippet), - (BITBUCKET_RE, self._fetch_bitbucket_snippet) + (BITBUCKET_RE, self._fetch_bitbucket_snippet), + (PYDIS_PASTEBIN_RE, self._fetch_pastebin_snippets), ] async def _fetch_response(self, url: str, response_format: str, **kwargs) -> Any: @@ -170,7 +178,40 @@ class CodeSnippets(Cog): ) return self._snippet_to_codeblock(file_contents, file_path, start_line, end_line) - def _snippet_to_codeblock(self, file_contents: str, file_path: str, start_line: str, end_line: str) -> str: + async def _fetch_pastebin_snippets(self, paste_id: str, selections: str) -> list[str]: + """Fetches snippets from paste.pythondiscord.com.""" + paste_data = await self._fetch_response( + f"https://paste.pythondiscord.com/api/v1/paste/{paste_id}", + "json" + ) + + snippets = [] + for match in PASTEBIN_LINE_SELECTION_RE.finditer(selections): + file_num, start, end = match.groups() + file_num = int(file_num) - 1 + + file = paste_data["files"][file_num] + file_name = file.get("name") or f"file {file_num + 1}" + snippet = self._snippet_to_codeblock( + file["content"], + file_name, + start, + end, + language=file["lexer"], + ) + + snippets.append(snippet) + + return snippets + + def _snippet_to_codeblock( + self, + file_contents: str, + file_path: str, + start_line: str, + end_line: str|None, + language: str|None = None + ) -> str: """ Given the entire file contents and target lines, creates a code block. @@ -203,15 +244,16 @@ class CodeSnippets(Cog): required = "\n".join(split_file_contents[start_line - 1:end_line]) required = textwrap.dedent(required).rstrip().replace("`", "`\u200b") - # Extracts the code language and checks whether it's a "valid" language - language = file_path.split("/")[-1].split(".")[-1] - trimmed_language = language.replace("-", "").replace("+", "").replace("_", "") - is_valid_language = trimmed_language.isalnum() - if not is_valid_language: - language = "" + if language is None: + # Extracts the code language and checks whether it's a "valid" language + language = file_path.split("/")[-1].split(".")[-1] + trimmed_language = language.replace("-", "").replace("+", "").replace("_", "") + is_valid_language = trimmed_language.isalnum() + if not is_valid_language: + language = "" - if language == "pyi": - language = "py" + if language == "pyi": + language = "py" # Adds a label showing the file path to the snippet if start_line == end_line: @@ -231,8 +273,7 @@ class CodeSnippets(Cog): for pattern, handler in self.pattern_handlers: for match in pattern.finditer(content): try: - snippet = await handler(**match.groupdict()) - all_snippets.append((match.start(), snippet)) + result = await handler(**match.groupdict()) except ClientResponseError as error: error_message = error.message log.log( @@ -241,8 +282,17 @@ class CodeSnippets(Cog): f"{error_message} for GET {error.request_info.real_url.human_repr()}" ) - # Sorts the list of snippets by their match index and joins them into a single message - return "\n".join(x[1] for x in sorted(all_snippets)) + if isinstance(result, list): + # The handler returned multiple snippets (currently only possible with our pastebin) + all_snippets.extend((match.start(), snippet) for snippet in result) + else: + all_snippets.append((match.start(), result)) + + # Sort the list of snippets by ONLY their match index + all_snippets.sort(key=lambda item: item[0]) + + # Join them into a single message + return "\n".join(x[1] for x in all_snippets) @Cog.listener() async def on_message(self, message: discord.Message) -> None: diff --git a/bot/exts/info/doc/_markdown.py b/bot/exts/info/doc/_markdown.py index a030903ed..52e00c2f3 100644 --- a/bot/exts/info/doc/_markdown.py +++ b/bot/exts/info/doc/_markdown.py @@ -1,21 +1,20 @@ -import re from urllib.parse import urljoin import markdownify from bs4.element import PageElement -# See https://github.com/matthewwithanm/python-markdownify/issues/31 -markdownify.whitespace_re = re.compile(r"[\r\n\s\t ]+") - class DocMarkdownConverter(markdownify.MarkdownConverter): """Subclass markdownify's MarkdownCoverter to provide custom conversion methods.""" def __init__(self, *, page_url: str, **options): - super().__init__(**options) + # Reflow text to avoid unwanted line breaks. + default_options = {"wrap": True, "wrap_width": None} + + super().__init__(**default_options | options) self.page_url = page_url - def convert_li(self, el: PageElement, text: str, convert_as_inline: bool) -> str: + def convert_li(self, el: PageElement, text: str, parent_tags: set[str]) -> str: """Fix markdownify's erroneous indexing in ol tags.""" parent = el.parent if parent is not None and parent.name == "ol": @@ -31,38 +30,38 @@ class DocMarkdownConverter(markdownify.MarkdownConverter): bullet = bullets[depth % len(bullets)] return f"{bullet} {text}\n" - def _convert_hn(self, _n: int, el: PageElement, text: str, convert_as_inline: bool) -> str: + def _convert_hn(self, _n: int, el: PageElement, text: str, parent_tags: set[str]) -> str: """Convert h tags to bold text with ** instead of adding #.""" - if convert_as_inline: + if "_inline" in parent_tags: return text return f"**{text}**\n\n" - def convert_code(self, el: PageElement, text: str, convert_as_inline: bool) -> str: + def convert_code(self, el: PageElement, text: str, parent_tags: set[str]) -> str: """Undo `markdownify`s underscore escaping.""" return f"`{text}`".replace("\\", "") - def convert_pre(self, el: PageElement, text: str, convert_as_inline: bool) -> str: + def convert_pre(self, el: PageElement, text: str, parent_tags: set[str]) -> str: """Wrap any codeblocks in `py` for syntax highlighting.""" code = "".join(el.strings) return f"```py\n{code}```" - def convert_a(self, el: PageElement, text: str, convert_as_inline: bool) -> str: + def convert_a(self, el: PageElement, text: str, parent_tags: set[str]) -> str: """Resolve relative URLs to `self.page_url`.""" el["href"] = urljoin(self.page_url, el["href"]) # Discord doesn't handle titles properly, showing links with them as raw text. el["title"] = None - return super().convert_a(el, text, convert_as_inline) + return super().convert_a(el, text, parent_tags) - def convert_p(self, el: PageElement, text: str, convert_as_inline: bool) -> str: + def convert_p(self, el: PageElement, text: str, parent_tags: set[str]) -> str: """Include only one newline instead of two when the parent is a li tag.""" - if convert_as_inline: + if "_inline" in parent_tags: return text parent = el.parent if parent is not None and parent.name == "li": return f"{text}\n" - return super().convert_p(el, text, convert_as_inline) + return super().convert_p(el, text, parent_tags) - def convert_hr(self, el: PageElement, text: str, convert_as_inline: bool) -> str: + def convert_hr(self, el: PageElement, text: str, parent_tags: set[str]) -> str: """Ignore `hr` tag.""" return "" diff --git a/bot/exts/info/doc/_parsing.py b/bot/exts/info/doc/_parsing.py index bc5a5bd31..0f5734a15 100644 --- a/bot/exts/info/doc/_parsing.py +++ b/bot/exts/info/doc/_parsing.py @@ -159,7 +159,7 @@ def _get_truncated_description( if rendered_length + element_length < max_length: if is_tag: - element_markdown = markdown_converter.process_tag(element, convert_as_inline=False) + element_markdown = markdown_converter.process_tag(element) else: element_markdown = markdown_converter.process_text(element) diff --git a/bot/exts/info/pep.py b/bot/exts/info/pep.py index 2b552dc4f..4655b21ff 100644 --- a/bot/exts/info/pep.py +++ b/bot/exts/info/pep.py @@ -1,26 +1,31 @@ from datetime import UTC, datetime, timedelta -from email.parser import HeaderParser -from io import StringIO +from typing import TypedDict from discord import Colour, Embed from discord.ext.commands import Cog, Context, command -from pydis_core.utils.caching import AsyncCache from bot.bot import Bot -from bot.constants import Keys from bot.log import get_logger log = get_logger(__name__) ICON_URL = "https://www.python.org/static/opengraph-icon-200x200.png" -BASE_PEP_URL = "https://peps.python.org/pep-" -PEPS_LISTING_API_URL = "https://api.github.com/repos/python/peps/contents/peps?ref=main" +PEP_API_URL = "https://peps.python.org/api/peps.json" -pep_cache = AsyncCache() +class PEPInfo(TypedDict): + """ + Useful subset of the PEP API response. -GITHUB_API_HEADERS = {} -if Keys.github: - GITHUB_API_HEADERS["Authorization"] = f"token {Keys.github}" + Full structure documented at https://peps.python.org/api/ + """ + + number: int + title: str + url: str + status: str + python_version: str | None + created: str + type: str class PythonEnhancementProposals(Cog): @@ -28,136 +33,67 @@ class PythonEnhancementProposals(Cog): def __init__(self, bot: Bot): self.bot = bot - self.peps: dict[int, str] = {} - # Ensure peps are refreshed the first time this is checked - self.last_refreshed_peps: datetime = datetime.min.replace(tzinfo=UTC) - - async def refresh_peps_urls(self) -> None: - """Refresh PEP URLs listing in every 3 hours.""" - # Wait until HTTP client is available - await self.bot.wait_until_ready() - log.trace("Started refreshing PEP URLs.") + self.peps: dict[int, PEPInfo] = {} + self.last_refreshed_peps: datetime | None = None + + async def refresh_pep_data(self) -> None: + """Refresh PEP data.""" + # Putting this first should prevent any race conditions self.last_refreshed_peps = datetime.now(tz=UTC) - async with self.bot.http_session.get( - PEPS_LISTING_API_URL, - headers=GITHUB_API_HEADERS - ) as resp: + log.trace("Started refreshing PEP data.") + async with self.bot.http_session.get(PEP_API_URL) as resp: if resp.status != 200: - log.warning(f"Fetching PEP URLs from GitHub API failed with code {resp.status}") + log.warning( + f"Fetching PEP data from PEP API failed with code {resp.status}" + ) return - listing = await resp.json() - log.trace("Got PEP URLs listing from GitHub API") - - for file in listing: - name = file["name"] - if name.startswith("pep-") and name.endswith((".rst", ".txt")): - pep_number = name.replace("pep-", "").split(".")[0] - self.peps[int(pep_number)] = file["download_url"] - - log.info("Successfully refreshed PEP URLs listing.") - - @staticmethod - def get_pep_zero_embed() -> Embed: - """Get information embed about PEP 0.""" - pep_embed = Embed( - title="**PEP 0 - Index of Python Enhancement Proposals (PEPs)**", - url="https://peps.python.org/" - ) - pep_embed.set_thumbnail(url=ICON_URL) - pep_embed.add_field(name="Status", value="Active") - pep_embed.add_field(name="Created", value="13-Jul-2000") - pep_embed.add_field(name="Type", value="Informational") + for pep_num, pep_info in listing.items(): + self.peps[int(pep_num)] = pep_info - return pep_embed - - async def validate_pep_number(self, pep_nr: int) -> Embed | None: - """Validate is PEP number valid. When it isn't, return error embed, otherwise None.""" - if ( - pep_nr not in self.peps - and (self.last_refreshed_peps + timedelta(minutes=30)) <= datetime.now(tz=UTC) - and len(str(pep_nr)) < 5 - ): - await self.refresh_peps_urls() - - if pep_nr not in self.peps: - log.trace(f"PEP {pep_nr} was not found") - return Embed( - title="PEP not found", - description=f"PEP {pep_nr} does not exist.", - colour=Colour.red() - ) + log.info("Successfully refreshed PEP data.") - return None - - def generate_pep_embed(self, pep_header: dict, pep_nr: int) -> Embed: - """Generate PEP embed based on PEP headers data.""" - # the parsed header can be wrapped to multiple lines, so we need to make sure that is removed - # for an example of a pep with this issue, see pep 500 - title = " ".join(pep_header["Title"].split()) - # Assemble the embed - pep_embed = Embed( - title=f"**PEP {pep_nr} - {title}**", - url=f"{BASE_PEP_URL}{pep_nr:04}", + def generate_pep_embed(self, pep: PEPInfo) -> Embed: + """Generate PEP embed.""" + embed = Embed( + title=f"**PEP {pep['number']} - {pep['title']}**", + url=pep["url"], ) + embed.set_thumbnail(url=ICON_URL) - pep_embed.set_thumbnail(url=ICON_URL) + fields_to_check = ("status", "python_version", "created", "type") + for field_name in fields_to_check: + if field_value := pep.get(field_name): + field_name = field_name.replace("_", " ").title() + embed.add_field(name=field_name, value=field_value) - # Add the interesting information - fields_to_check = ("Status", "Python-Version", "Created", "Type") - for field in fields_to_check: - # Check for a PEP metadata field that is present but has an empty value - # embed field values can't contain an empty string - if pep_header.get(field, ""): - pep_embed.add_field(name=field, value=pep_header[field]) - - return pep_embed - - @pep_cache(arg_offset=1) - async def get_pep_embed(self, pep_nr: int) -> tuple[Embed, bool]: - """Fetch, generate and return PEP embed. Second item of return tuple show does getting success.""" - response = await self.bot.http_session.get(self.peps[pep_nr]) - - if response.status == 200: - log.trace(f"PEP {pep_nr} found") - pep_content = await response.text() - - # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179 - pep_header = HeaderParser().parse(StringIO(pep_content)) - return self.generate_pep_embed(pep_header, pep_nr), True - - log.trace( - f"The user requested PEP {pep_nr}, but the response had an unexpected status code: {response.status}." - ) - return Embed( - title="Unexpected error", - description="Unexpected HTTP error during PEP search. Please let us know.", - colour=Colour.red() - ), False + return embed @command(name="pep", aliases=("get_pep", "p")) async def pep_command(self, ctx: Context, pep_number: int) -> None: """Fetches information about a PEP and sends it to the channel.""" - # Trigger typing in chat to show users that bot is responding - await ctx.typing() + # Refresh the PEP data up to every hour, as e.g. the PEP status might have changed. + if ( + self.last_refreshed_peps is None or ( + (self.last_refreshed_peps + timedelta(hours=1)) <= datetime.now(tz=UTC) + and len(str(pep_number)) < 5 + ) + ): + await self.refresh_pep_data() - # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs. - if pep_number == 0: - pep_embed = self.get_pep_zero_embed() - success = True - else: - success = False - if not (pep_embed := await self.validate_pep_number(pep_number)): - pep_embed, success = await self.get_pep_embed(pep_number) - - await ctx.send(embed=pep_embed) - if success: - log.trace(f"PEP {pep_number} getting and sending finished successfully. Increasing stat.") - self.bot.stats.incr(f"pep_fetches.{pep_number}") + if pep := self.peps.get(pep_number): + embed = self.generate_pep_embed(pep) else: - log.trace(f"Getting PEP {pep_number} failed. Error embed sent.") + log.trace(f"PEP {pep_number} was not found") + embed = Embed( + title="PEP not found", + description=f"PEP {pep_number} does not exist.", + colour=Colour.red(), + ) + + await ctx.send(embed=embed) async def setup(bot: Bot) -> None: diff --git a/poetry.lock b/poetry.lock index 88bbc1904..b853a89a9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -574,22 +574,27 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "deepdiff" -version = "7.0.1" +version = "8.5.0" description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "deepdiff-7.0.1-py3-none-any.whl", hash = "sha256:447760081918216aa4fd4ca78a4b6a848b81307b2ea94c810255334b759e1dc3"}, - {file = "deepdiff-7.0.1.tar.gz", hash = "sha256:260c16f052d4badbf60351b4f77e8390bee03a0b516246f6839bc813fb429ddf"}, + {file = "deepdiff-8.5.0-py3-none-any.whl", hash = "sha256:d4599db637f36a1c285f5fdfc2cd8d38bde8d8be8636b65ab5e425b67c54df26"}, + {file = "deepdiff-8.5.0.tar.gz", hash = "sha256:a4dd3529fa8d4cd5b9cbb6e3ea9c95997eaa919ba37dac3966c1b8f872dc1cd1"}, ] [package.dependencies] -ordered-set = ">=4.1.0,<4.2.0" +orderly-set = ">=5.4.1,<6" [package.extras] -cli = ["click (==8.1.7)", "pyyaml (==6.0.1)"] +cli = ["click (>=8.1.0,<8.2.0)", "pyyaml (>=6.0.0,<6.1.0)"] +coverage = ["coverage (>=7.6.0,<7.7.0)"] +dev = ["bump2version (>=1.0.0,<1.1.0)", "ipdb (>=0.13.0,<0.14.0)", "jsonpickle (>=4.0.0,<4.1.0)", "nox (==2025.5.1)", "numpy (>=2.0,<3.0) ; python_version < \"3.10\"", "numpy (>=2.2.0,<2.3.0) ; python_version >= \"3.10\"", "orjson (>=3.10.0,<3.11.0)", "pandas (>=2.2.0,<2.3.0)", "polars (>=1.21.0,<1.22.0)", "python-dateutil (>=2.9.0,<2.10.0)", "tomli (>=2.2.0,<2.3.0)", "tomli-w (>=1.2.0,<1.3.0)"] +docs = ["Sphinx (>=6.2.0,<6.3.0)", "sphinx-sitemap (>=2.6.0,<2.7.0)", "sphinxemoji (>=0.3.0,<0.4.0)"] optimize = ["orjson"] +static = ["flake8 (>=7.1.0,<7.2.0)", "flake8-pyproject (>=1.2.3,<1.3.0)", "pydantic (>=2.10.0,<2.11.0)"] +test = ["pytest (>=8.3.0,<8.4.0)", "pytest-benchmark (>=5.1.0,<5.2.0)", "pytest-cov (>=6.0.0,<6.1.0)", "python-dotenv (>=1.0.0,<1.1.0)"] [[package]] name = "discord-py" @@ -815,31 +820,31 @@ files = [ [[package]] name = "h11" -version = "0.14.0" +version = "0.16.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, ] [[package]] name = "httpcore" -version = "1.0.7" +version = "1.0.9" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, - {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, + {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, + {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, ] [package.dependencies] certifi = "*" -h11 = ">=0.13,<0.15" +h11 = ">=0.16" [package.extras] asyncio = ["anyio (>=4.0,<5.0)"] @@ -1191,14 +1196,14 @@ source = ["Cython (>=3.0.11,<3.1.0)"] [[package]] name = "markdownify" -version = "0.14.1" +version = "1.1.0" description = "Convert HTML to markdown." optional = false python-versions = "*" groups = ["main"] files = [ - {file = "markdownify-0.14.1-py3-none-any.whl", hash = "sha256:4c46a6c0c12c6005ddcd49b45a5a890398b002ef51380cd319db62df5e09bc2a"}, - {file = "markdownify-0.14.1.tar.gz", hash = "sha256:a62a7a216947ed0b8dafb95b99b2ef4a0edd1e18d5653c656f68f03db2bfb2f1"}, + {file = "markdownify-1.1.0-py3-none-any.whl", hash = "sha256:32a5a08e9af02c8a6528942224c91b933b4bd2c7d078f9012943776fc313eeef"}, + {file = "markdownify-1.1.0.tar.gz", hash = "sha256:449c0bbbf1401c5112379619524f33b63490a8fa479456d41de9dc9e37560ebd"}, ] [package.dependencies] @@ -1333,20 +1338,17 @@ files = [ ] [[package]] -name = "ordered-set" -version = "4.1.0" -description = "An OrderedSet is a custom MutableSet that remembers its order, so that every" +name = "orderly-set" +version = "5.4.1" +description = "Orderly set" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" groups = ["main"] files = [ - {file = "ordered-set-4.1.0.tar.gz", hash = "sha256:694a8e44c87657c59292ede72891eb91d34131f6531463aab3009191c77364a8"}, - {file = "ordered_set-4.1.0-py3-none-any.whl", hash = "sha256:046e1132c71fcf3330438a539928932caf51ddbc582496833e23de611de14562"}, + {file = "orderly_set-5.4.1-py3-none-any.whl", hash = "sha256:b5e21d21680bd9ef456885db800c5cb4f76a03879880c0175e1b077fb166fd83"}, + {file = "orderly_set-5.4.1.tar.gz", hash = "sha256:a1fb5a4fdc5e234e9e8d8e5c1bbdbc4540f4dfe50d12bf17c8bc5dbf1c9c878d"}, ] -[package.extras] -dev = ["black", "mypy", "pytest"] - [[package]] name = "packaging" version = "24.2" @@ -2758,4 +2760,4 @@ propcache = ">=0.2.0" [metadata] lock-version = "2.1" python-versions = "3.12.*" -content-hash = "ab706b41230a6e46d4aaa098f0fc65de8802889995d047a6f8df61cacdceb5df" +content-hash = "daca3f3e3d76a4df328b8b923301b699eeb1640f2b73349bbc2522b7cdd497c3" diff --git a/pyproject.toml b/pyproject.toml index c8858057d..4c32ae488 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,11 +16,11 @@ arrow = "1.3.0" beautifulsoup4 = "4.12.3" colorama = { version = "0.4.6", markers = "sys_platform == 'win32'" } coloredlogs = "15.0.1" -deepdiff = "7.0.1" +deepdiff = "8.5.0" emoji = "2.14.1" feedparser = "6.0.11" lxml = "5.3.1" -markdownify = "0.14.1" +markdownify = "1.1.0" pydantic = "2.10.6" pydantic-settings = "2.8.1" python-dateutil = "2.9.0.post0" |