aboutsummaryrefslogtreecommitdiffstats
path: root/docs/utils.py
blob: aaa656c38121a90ef7c3400e26d4c235acdc0927 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
"""Utilities used in generating docs."""

import ast
import importlib
import inspect
import typing
from pathlib import Path

import docutils.nodes
import docutils.parsers.rst.states
import releases

PROJECT_ROOT = Path(__file__).parent.parent


def linkcode_resolve(source_url: str, domain: str, info: dict[str, str]) -> typing.Optional[str]:
    """
    Function called by linkcode to get the URL for a given resource.

    See for more details:
    https://www.sphinx-doc.org/en/master/usage/extensions/linkcode.html#confval-linkcode_resolve
    """
    if domain != "py":
        raise Exception("Unknown domain passed to linkcode function.")

    symbol_name = info["fullname"]

    module = importlib.import_module(info["module"])

    symbol = [module]
    for name in symbol_name.split("."):
        symbol.append(getattr(symbol[-1], name))
        symbol_name = name

    try:
        lines, start = inspect.getsourcelines(symbol[-1])
        end = start + len(lines)
    except TypeError:
        # Find variables by parsing the ast
        source = ast.parse(inspect.getsource(symbol[-2]))
        while isinstance(source.body[0], ast.ClassDef):
            source = source.body[0]

        for ast_obj in source.body:
            if isinstance(ast_obj, ast.Assign):
                names = []
                for target in ast_obj.targets:
                    if isinstance(target, ast.Tuple):
                        names.extend([name.id for name in target.elts])
                    else:
                        names.append(target.id)

                if symbol_name in names:
                    start, end = ast_obj.lineno, ast_obj.end_lineno
                    break
        else:
            raise Exception(f"Could not find symbol `{symbol_name}` in {module.__name__}.")

        _, offset = inspect.getsourcelines(symbol[-2])
        if offset != 0:
            offset -= 1
        start += offset
        end += offset

    file = Path(inspect.getfile(module)).relative_to(PROJECT_ROOT).as_posix()

    url = f"{source_url}/{file}#L{start}"
    if end != start:
        url += f"-L{end}"

    return url


def cleanup() -> None:
    """Remove unneeded autogenerated doc files, and clean up others."""
    included = __get_included()

    for file in (PROJECT_ROOT / "docs" / "output").iterdir():
        if file.name in ("botcore.rst", "botcore.exts.rst", "botcore.utils.rst") and file.name in included:
            content = file.read_text(encoding="utf-8").splitlines(keepends=True)

            # Rename the extension to be less wordy
            # Example: botcore.exts -> Botcore Exts
            title = content[0].split()[0].strip().replace("botcore.", "").replace(".", " ").title()
            title = f"{title}\n{'=' * len(title)}\n\n"
            content = title, *content[3:]

            file.write_text("".join(content), encoding="utf-8")

        elif file.name in included:
            # Clean up the submodule name so it's just the name without the top level module name
            # example: `botcore.regex module` -> `regex`
            lines = file.read_text(encoding="utf-8").splitlines(keepends=True)
            lines[0] = lines[0].replace("module", "").strip().split(".")[-1] + "\n"
            file.write_text("".join(lines))

        else:
            # These are files that have not been explicitly included in the docs via __all__
            print("Deleted file", file.name)
            file.unlink()
            continue

        # Take the opportunity to configure autodoc
        content = file.read_text(encoding="utf-8").replace("undoc-members", "special-members")
        file.write_text(content, encoding="utf-8")


def __get_included() -> set[str]:
    """Get a list of files that should be included in the final build."""

    def get_all_from_module(module_name: str) -> set[str]:
        try:
            module = importlib.import_module(module_name)
        except ModuleNotFoundError:
            return set()
        _modules = {module.__name__ + ".rst"}

        if hasattr(module, "__all__"):
            for sub_module in module.__all__:
                _modules.update(get_all_from_module(sub_module))

        return _modules

    return get_all_from_module("botcore")


def reorder_release_entries(release_list: list[releases.Release]) -> None:
    """
    Sort `releases` based on `release.type`.

    This is meant to be used as an override for `releases.reorder_release_entries` to support
    custom types.
    """
    order = {"breaking": 0, "feature": 1, "bug": 2, "support": 3}
    for release in release_list:
        release["entries"].sort(key=lambda entry: order[entry.type])


def emphasized_url(
    name: str, rawtext: str, text: str, lineno: int, inliner: docutils.parsers.rst.states.Inliner, *__
) -> tuple[list, list]:
    """
    Sphinx role to add hyperlinked literals.

    ReST: :literal-url:`Google <https://google.com>`
    Markdown equivalent: [`Google`](https://google.com)

    Refer to https://docutils.sourceforge.io/docs/howto/rst-roles.html for details on the input and output.
    """
    arguments = text.rsplit(maxsplit=1)
    if len(arguments) != 2:
        message = inliner.reporter.error(
            f"`{name}` expects a message and a URL, formatted as: :{name}:`message <url>`",
            line=lineno
        )
        problem = inliner.problematic(text, rawtext, message)
        return [problem], [message]

    message, url = arguments
    url: str = url[1:-1]  # Remove the angled brackets off the start and end

    literal = docutils.nodes.literal(rawtext, message)
    return [docutils.nodes.reference(rawtext, "", literal, refuri=url)], []


def get_recursive_file_uris(folder: Path, match_pattern: str) -> list[str]:
    """Get the URI of any file relative to folder which matches the `match_pattern` regex."""
    return [file.relative_to(folder).as_posix() for file in folder.rglob(match_pattern)]