This commit is contained in:
Maxime Réaux 2026-04-16 10:04:58 +02:00
parent 61d7f6b646
commit 186492de85
2 changed files with 137 additions and 72 deletions

View file

@ -6,14 +6,24 @@ from urllib.parse import urlparse, parse_qs, unquote
import unicodedata
# --------------------------------------------------
# PATHS
# CONFIG
# --------------------------------------------------
PAGES_DIR = Path("../output/cleaned_pages")
REGISTRY_PATH = Path("../output/equivalence_registry.json")
OUTPUT_DIR = Path("../output/link_scan")
PAGES_DIR = Path("../output_ok/cleaned_pages")
REGISTRY_PATH = Path("../output_ok/equivalence_registry.json")
OUTPUT_DIR = Path("../output_ok/link_scan")
OUTPUT_DIR.mkdir(exist_ok=True)
IGNORED_PREFIXES = (
"file ",
"image ",
"category ",
"template ",
"special ",
"help ",
"user ",
"talk ",
)
# --------------------------------------------------
# LOAD REGISTRY
@ -34,88 +44,68 @@ def normalize_title(title: str) -> str:
if not title:
return
title = title.strip()
title = unquote(title)
title = Path(title).stem
title = unicodedata.normalize("NFKC", title)
title = title.replace("_", " ")
title = title.replace("", "'").replace("", "'").replace("", '"').replace("", '"')
title = re.sub(r"\s+", " ", title)
return title.casefold()
# -------------------------
# Extract MediaWiki target
# -------------------------
def extract_mediawiki_target(href: str):
if not href:
return None
# ignore anchors
if href.startswith("#"):
return None
parsed = urlparse(href)
# external link
if parsed.scheme in ("http", "https"):
return None
path = parsed.path or ""
# /wiki/Page_Name
if "/wiki/" in path:
return path.split("/wiki/", 1)[1]
# index.php?title=Page
if "index.php" in path:
qs = parse_qs(parsed.query)
if "title" in qs:
return qs["title"][0]
# fallback filename-like
return Path(path).stem
# -------------------------
# Ignore unwanted namespaces
# -------------------------
IGNORED_PREFIXES = (
"file:",
"image:",
"template:",
"special:",
"help:",
"user:",
"talk:",
)
def is_ignored_namespace(title_norm: str):
return title_norm.startswith(IGNORED_PREFIXES)
# -------------------------
# Extract article content
# -------------------------
def extract_article_links(soup):
content = soup.find("div", id="mw-content-text")
if not content:
return []
links = []
for a in content.select("a[href]"):
# ignore navboxes / metadata
if a.find_parent(class_="navbox"):
continue
href = a.get("href")
links.append(href)
links.append({
"href": a.get("href"),
"title": a.get("title"),
"text": a.get_text(strip=True),
})
return links
def resolve_link(raw_target, title_attr):
candidates = []
if title_attr:
candidates.append(title_attr)
if raw_target:
candidates.append(raw_target)
for candidate in candidates:
norm = normalize_title(candidate)
if not norm:
continue
if is_ignored_namespace(norm):
return None, "ignored"
if norm in equivalences:
return equivalences[norm], "equivalence"
filename = norm.replace(" ", "_") + ".html"
if filename in valid_targets:
return filename, "direct"
return None, "unresolved"
# --------------------------------------------------
# MAIN SCAN
@ -123,43 +113,29 @@ def extract_article_links(soup):
resolved_links = []
unresolved_links = []
files = list(PAGES_DIR.glob("*.html"))
print(f"{len(files)} pages à analyser")
for i, file_path in enumerate(files, 1):
html = file_path.read_text(encoding="utf-8", errors="ignore")
soup = BeautifulSoup(html, "html.parser")
links = extract_article_links(soup)
for href in links:
raw_target = extract_mediawiki_target(href)
norm = normalize_title(raw_target)
if not norm:
continue
if is_ignored_namespace(norm):
continue
for link in links:
raw_target = extract_mediawiki_target(link["href"])
resolved, method = resolve_link(raw_target, link["title"])
entry = {
"source": file_path.name,
"href": href,
"normalized": norm,
"href": link["href"],
"title": link["title"],
"method": method,
}
resolved = equivalences.get(norm)
if resolved:
entry["resolved_title"] = resolved
entry["resolved"] = resolved
resolved_links.append(entry)
else:
entry["raw_target"] = raw_target
unresolved_links.append(entry)
if i % 100 == 0:
if i % 200 == 0:
print(f"{i}/{len(files)} analysées")
# --------------------------------------------------