1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
|
import datetime
import functools
import json
import tarfile
import tempfile
import typing
from io import BytesIO
from pathlib import Path
import frontmatter
import httpx
import markdown
import yaml
from django.http import Http404
from django.utils import timezone
from markdown.extensions.toc import TocExtension
from pydis_site import settings
from .models import Commit, Tag
TAG_CACHE_TTL = datetime.timedelta(hours=1)
def github_client(**kwargs) -> httpx.Client:
"""Get a client to access the GitHub API with important settings pre-configured."""
client = httpx.Client(
base_url=settings.GITHUB_API,
follow_redirects=True,
timeout=settings.TIMEOUT_PERIOD,
**kwargs
)
if settings.GITHUB_TOKEN: # pragma: no cover
if not client.headers.get("Authorization"):
client.headers = {"Authorization": f"token {settings.GITHUB_TOKEN}"}
return client
def get_category(path: Path) -> dict[str, str]:
"""Load category information by name from _info.yml."""
if not path.is_dir():
raise Http404("Category not found.")
return yaml.safe_load(path.joinpath("_info.yml").read_text(encoding="utf-8"))
def get_categories(path: Path) -> dict[str, dict]:
"""Get information for all categories."""
categories = {}
for item in path.iterdir():
if item.is_dir():
categories[item.name] = get_category(item)
return categories
@functools.cache
def get_tags_static() -> list[Tag]:
"""
Fetch tag information in static builds.
This also includes some fake tags to preview the tag groups feature.
This will return a cached value, so it should only be used for static builds.
"""
tags = fetch_tags()
for tag in tags[3:5]: # pragma: no cover
tag.group = "very-cool-group"
return tags
def fetch_tags() -> list[Tag]:
"""
Fetch tag data from the GitHub API.
The entire repository is downloaded and extracted locally because
getting file content would require one request per file, and can get rate-limited.
"""
client = github_client()
# Grab metadata
metadata = client.get("/repos/python-discord/bot/contents/bot/resources")
metadata.raise_for_status()
hashes = {}
for entry in metadata.json():
if entry["type"] == "dir":
# Tag group
files = client.get(entry["url"])
files.raise_for_status()
files = files.json()
else:
files = [entry]
for file in files:
hashes[file["name"]] = file["sha"]
# Download the files
tar_file = client.get("/repos/python-discord/bot/tarball")
tar_file.raise_for_status()
client.close()
tags = []
with tempfile.TemporaryDirectory() as folder:
with tarfile.open(fileobj=BytesIO(tar_file.content)) as repo:
included = []
for file in repo.getmembers():
if "/bot/resources/tags" in file.path:
included.append(file)
repo.extractall(folder, included)
for tag_file in Path(folder).rglob("*.md"):
name = tag_file.name
group = None
if tag_file.parent.name != "tags":
# Tags in sub-folders are considered part of a group
group = tag_file.parent.name
tags.append(Tag(
name=name.removesuffix(".md"),
sha=hashes[name],
group=group,
body=tag_file.read_text(encoding="utf-8"),
last_commit=None,
))
return tags
def set_tag_commit(tag: Tag) -> None:
"""Fetch commit information from the API, and save it for the tag."""
if settings.STATIC_BUILD: # pragma: no cover
# Static builds request every page during build, which can ratelimit it.
# Instead, we return some fake data.
tag.last_commit = Commit(
sha="68da80efc00d9932a209d5cccd8d344cec0f09ea",
message="Initial Commit\n\nTHIS IS FAKE DEMO DATA",
date=datetime.datetime(2018, 2, 3, 12, 20, 26, tzinfo=datetime.timezone.utc),
authors=json.dumps([{"name": "Joseph", "email": "[email protected]"}]),
)
return
path = "/bot/resources/tags"
if tag.group:
path += f"/{tag.group}"
path += f"/{tag.name}.md"
# Fetch and set the commit
with github_client() as client:
data = client.get("/repos/python-discord/bot/commits", params={"path": path})
data.raise_for_status()
data = data.json()[0]
commit = data["commit"]
author, committer = commit["author"], commit["committer"]
date = datetime.datetime.strptime(committer["date"], settings.GITHUB_TIMESTAMP_FORMAT)
date = date.replace(tzinfo=datetime.timezone.utc)
if author["email"] == committer["email"]:
authors = [author]
else:
authors = [author, committer]
commit_obj, _ = Commit.objects.get_or_create(
sha=data["sha"],
message=commit["message"],
date=date,
authors=json.dumps(authors),
)
tag.last_commit = commit_obj
tag.save()
def record_tags(tags: list[Tag]) -> None:
"""Sync the database with an updated set of tags."""
# Remove entries which no longer exist
Tag.objects.exclude(name__in=[tag.name for tag in tags]).delete()
# Insert/update the tags
for tag in tags:
try:
old_tag = Tag.objects.get(name=tag.name)
except Tag.DoesNotExist:
# The tag is not in the database yet,
# pretend it's previous state is the current state
old_tag = tag
if old_tag.sha == tag.sha and old_tag.last_commit is not None:
# We still have an up-to-date commit entry
tag.last_commit = old_tag.last_commit
tag.save()
# Drop old, unused commits
Commit.objects.filter(tag__isnull=True).delete()
def get_tags() -> list[Tag]:
"""Return a list of all tags visible to the application, from the cache or API."""
if settings.STATIC_BUILD: # pragma: no cover
last_update = None
else:
last_update = (
Tag.objects.values_list("last_updated", flat=True)
.order_by("last_updated").first()
)
if last_update is None or timezone.now() >= (last_update + TAG_CACHE_TTL):
# Stale or empty cache
if settings.STATIC_BUILD: # pragma: no cover
tags = get_tags_static()
else:
tags = fetch_tags()
record_tags(tags)
return tags
else:
# Get tags from database
return Tag.objects.all()
def get_tag(path: str, *, skip_sync: bool = False) -> typing.Union[Tag, list[Tag]]:
"""
Return a tag based on the search location.
If certain tag data is out of sync (for instance a commit date is missing),
an extra request will be made to sync the information.
The tag name and group must match. If only one argument is provided in the path,
it's assumed to either be a group name, or a no-group tag name.
If it's a group name, a list of tags which belong to it is returned.
"""
path = path.split("/")
if len(path) == 2:
group, name = path
else:
name = path[0]
group = None
matches = []
for tag in get_tags():
if tag.name == name and tag.group == group:
if tag.last_commit is None and not skip_sync:
set_tag_commit(tag)
return tag
elif tag.group == name and group is None:
matches.append(tag)
if matches:
return matches
raise Tag.DoesNotExist()
def get_tag_category(
tags: typing.Optional[list[Tag]] = None, *, collapse_groups: bool
) -> dict[str, dict]:
"""
Generate context data for `tags`, or all tags if None.
If `tags` is None, `get_tag` is used to populate the data.
If `collapse_groups` is True, tags with parent groups are not included in the list,
and instead the parent itself is included as a single entry with it's sub-tags
in the description.
"""
if not tags:
tags = get_tags()
data = []
groups = {}
# Create all the metadata for the tags
for tag in tags:
if tag.group is None or not collapse_groups:
content = frontmatter.parse(tag.body)[1]
data.append({
"title": tag.name,
"description": markdown.markdown(content, extensions=["pymdownx.superfences"]),
"icon": "fas fa-tag",
})
else:
if tag.group not in groups:
groups[tag.group] = {
"title": tag.group,
"description": [tag.name],
"icon": "fas fa-tags",
}
else:
groups[tag.group]["description"].append(tag.name)
# Flatten group description into a single string
for group in groups.values():
# If the following string is updated, make sure to update it in the frontend JS as well
group["description"] = "Contains the following tags: " + ", ".join(group["description"])
data.append(group)
# Sort the tags, and return them in the proper format
return {tag["title"]: tag for tag in sorted(data, key=lambda tag: tag["title"].lower())}
def get_category_pages(path: Path) -> dict[str, dict]:
"""Get all page names and their metadata at a category path."""
# Special handling for tags
if path == Path(__file__).parent / "resources/tags":
return get_tag_category(collapse_groups=True)
pages = {}
for item in path.glob("*.md"):
# Only list page if there is no category with the same name
if item.is_file() and not item.with_suffix("").is_dir():
pages[item.stem] = frontmatter.load(item).metadata
return pages
def get_page(path: Path) -> tuple[str, dict]:
"""Get one specific page."""
if not path.is_file():
raise Http404("Page not found.")
metadata, content = frontmatter.parse(path.read_text(encoding="utf-8"))
toc_depth = metadata.get("toc", 1)
md = markdown.Markdown(
extensions=[
"extra",
# Empty string for marker to disable text searching for [TOC]
# By using a metadata key instead, we save time on long markdown documents
TocExtension(permalink=True, marker="", toc_depth=toc_depth)
]
)
html = md.convert(content)
# Don't set the TOC if the metadata does not specify one
if "toc" in metadata:
metadata["toc"] = md.toc
return str(html), metadata
|