import time
from typing import Dict, List, Optional
from urllib.parse import urljoin

import requests
from bs4 import BeautifulSoup

BASE = "https://www.moef.go.kr"
LIST_URL = f"{BASE}/mi/orgnzt/org.do"  # ?bbsId=...&menuNo=...

def _req(url: str, ua: str, params: Optional[dict] = None) -> requests.Response:
    headers = {
        "User-Agent": ua or "govbot/1.0",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    }
    r = requests.get(url, headers=headers, params=params, timeout=20)
    r.raise_for_status()
    return r

def _clean(s: str) -> str:
    return " ".join((s or "").split())

def get_org_links(ua: str) -> List[Dict[str, str]]:
    """
    목록 페이지에서 a[href*="orgId="] 수집
      - href: 상세 경로 (/mi/orgnzt/orgDetail.do?orgId=...)
      - title: a의 title 속성(없으면 텍스트)
    """
    res = _req(LIST_URL, ua, params={
        "bbsId": "MOSFBBS_000000000097",
        "menuNo": "9040100",
    })
    soup = BeautifulSoup(res.text, "html.parser")
    out: List[Dict[str, str]] = []
    seen = set()

    for a in soup.select('a[href*="orgId="]'):
        href = a.get("href") or ""
        title = a.get("title") or _clean(a.get_text(" ", strip=True))
        if not href:
            continue
        full = urljoin(BASE, href)
        if full in seen:
            continue
        seen.add(full)
        out.append({"link": full, "title": _clean(title)})

    return out

def parse_org_page(ua: str, url: str) -> List[Dict[str, str]]:
    """
    상세 페이지에서 표 파싱
      td1: 부서, td2: 이름, td3: 직위, td4: 전화, td5: 담당업무
    """
    res = _req(url, ua)
    soup = BeautifulSoup(res.text, "html.parser")
    rows = soup.select("table tr")
    out: List[Dict[str, str]] = []

    for tr in rows:
        tds = tr.find_all("td")
        if len(tds) < 5:
            continue
        department = _clean(tds[0].get_text(" ", strip=True))
        name        = _clean(tds[1].get_text(" ", strip=True))
        position    = _clean(tds[2].get_text(" ", strip=True))
        phone       = _clean(tds[3].get_text(" ", strip=True))
        resp        = _clean(tds[4].get_text(" ", strip=True))

        out.append({
            "department": department,
            "name": name,
            "position": position,
            "phone": phone,
            "task": resp,  # DB 컬럼명에 맞춰 'task'로 저장
        })

    # 서버 배려
    time.sleep(0.3)
    return out
