diff --git a/website/build.py b/website/build.py
index f8d79d4a..3e94a2b2 100644
--- a/website/build.py
+++ b/website/build.py
@@ -92,6 +92,10 @@ def category_public_url(category: ParsedSection) -> str:
return f"{SITE_URL}categories/{category['slug']}/"
+def group_path(group_slug: str) -> str:
+ return f"/categories/{group_slug}/"
+
+
def group_public_url(group_slug: str) -> str:
return f"{SITE_URL}categories/{group_slug}/"
@@ -315,11 +319,9 @@ def build(repo_root: Path) -> None:
entries = sort_entries(entries)
category_urls = {cat["name"]: category_path(cat) for cat in categories}
- filter_urls: dict[str, str] = {}
- for cat in categories:
- filter_urls[cat["name"]] = category_path(cat)
+ filter_urls: dict[str, str] = dict(category_urls)
for group in parsed_groups:
- filter_urls[group["name"]] = f"/categories/{group['slug']}/"
+ filter_urls[group["name"]] = group_path(group["slug"])
for entry in entries:
for sub in entry.get("subcategories", []):
filter_urls[sub["value"]] = sub["url"]
@@ -348,7 +350,7 @@ def build(repo_root: Path) -> None:
build_date=build_date.strftime("%B %d, %Y"),
sponsors=sponsors,
category_urls=category_urls,
- filter_urls_json=json.dumps(filter_urls, sort_keys=True),
+ filter_urls_json=json.dumps(filter_urls, sort_keys=True, ensure_ascii=False).replace("", "<\\/"),
),
encoding="utf-8",
)
diff --git a/website/tests/test_build.py b/website/tests/test_build.py
index 9ea2b617..85c7ea21 100644
--- a/website/tests/test_build.py
+++ b/website/tests/test_build.py
@@ -655,6 +655,35 @@ class TestBuild:
assert data["AI & ML"] == "/categories/ai-ml/"
assert data["Machine Learning > Classical"] == "/categories/machine-learning/classical/"
+ def test_filter_urls_json_escapes_closing_script_tag(self, tmp_path):
+ readme = textwrap.dedent("""\
+ # T
+
+ ---
+
+ ## Sneaky
+
+ - [a](https://example.com) - A.
+
+ # Contributing
+
+ Done.
+ """)
+ self._copy_real_templates(tmp_path)
+ (tmp_path / "README.md").write_text(readme, encoding="utf-8")
+ build(tmp_path)
+
+ site = tmp_path / "website" / "output"
+ index_html = (site / "index.html").read_text(encoding="utf-8")
+
+ marker = '", start)
+ block = index_html[start:end]
+ assert "" not in block
+ data = json.loads(block)
+ assert any("Sneaky" in key for key in data)
+
def test_build_creates_group_pages(self, tmp_path):
readme = textwrap.dedent("""\
# T