diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..a8efa6282b0340ecb09f8449ddf7cdcc6f7db9f4 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,13 @@ +FROM python:3.11-slim + +WORKDIR /app +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +RUN useradd -m -u 1000 user +USER user + +EXPOSE 7860 +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "1"] diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..922dd979dec7ccb2bd92d79ef86759c4ff314613 --- /dev/null +++ b/README.md @@ -0,0 +1,10 @@ +--- +title: IFCore Platform +emoji: "\U0001F3D7\uFE0F" +colorFrom: blue +colorTo: green +sdk: docker +app_port: 7860 +--- + +IFC compliance checker backend. Auto-discovers `check_*` functions from team modules. diff --git a/deploy.sh b/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..2a4edaa9b848edc72ad86aba52922673d4b1715e --- /dev/null +++ b/deploy.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +# --- Target selection (default: staging for safety) --- +TARGET="${1:-staging}" +case "$TARGET" in + staging) HF_REPO="${HF_REPO:-serJD/ifcore-platform-staging}" ;; + prod) HF_REPO="${HF_REPO:-serJD/ifcore-platform}" ;; + *) echo "Usage: deploy.sh [staging|prod]"; exit 1 ;; +esac + +echo "Deploying to $TARGET -> https://huggingface.co/spaces/$HF_REPO" + +cd "$SCRIPT_DIR" + +git -C "$SCRIPT_DIR/.." submodule update --init --recursive --remote + +TMPDIR=$(mktemp -d) +trap 'rm -rf "$TMPDIR"' EXIT + +rsync -a --exclude='.git' --exclude='__pycache__' \ + --exclude='*.pdf' --exclude='*.png' --exclude='*.jpg' --exclude='*.jpeg' \ + --exclude='*.gif' --exclude='*.zip' --exclude='*.mp4' --exclude='*.mov' \ + --exclude='*.ifc' --exclude='*.ifczip' --exclude='*.glb' --exclude='*.obj' \ + . "$TMPDIR/" + +find "$TMPDIR/teams" -name ".git" -type f -delete 2>/dev/null || true +find "$TMPDIR/teams" -name ".git" -type d -exec rm -rf {} + 2>/dev/null || true + +cd "$TMPDIR" + +git init -b main +git config user.email "deploy@ifcore" +git config user.name "IFCore Deploy" +git add . +git commit --no-gpg-sign -m "deploy($TARGET): $(date -u +%Y-%m-%dT%H:%M:%SZ)" +# Use token auth in CI (set HF_TOKEN env var or GitHub secret) +if [ -n "${HF_TOKEN:-}" ]; then + HF_REMOTE="https://serJD:${HF_TOKEN}@huggingface.co/spaces/$HF_REPO" +else + HF_REMOTE="https://huggingface.co/spaces/$HF_REPO" +fi +git remote add hf "$HF_REMOTE" +git push hf main --force + +echo "Deployed to https://huggingface.co/spaces/$HF_REPO" diff --git a/main.py b/main.py new file mode 100644 index 0000000000000000000000000000000000000000..f00ad5425499e8e810c126307e83684a4570ff81 --- /dev/null +++ b/main.py @@ -0,0 +1,475 @@ +import os +import asyncio +import base64 +import uuid +import logging +import tempfile +from contextlib import asynccontextmanager +from dataclasses import dataclass +from typing import Optional +import httpx +from fastapi import FastAPI, BackgroundTasks +from fastapi.responses import JSONResponse +from fastapi.middleware.cors import CORSMiddleware +from pydantic import BaseModel, Field +from pydantic_ai import Agent, RunContext +from pydantic_ai.usage import UsageLimits +from orchestrator import discover_checks, run_all_checks + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("ifcore") + +# In-memory job store — CF Worker polls this +_jobs: dict = {} + +# --------------------------------------------------------------------------- +# Regulation knowledge base — Spanish / Catalan building bye-laws +# Each entry contains the official regulation, article/section reference, +# PDF link, content reference, compliance threshold, and required action. +# --------------------------------------------------------------------------- +REGULATIONS_KB: dict[str, dict] = { + "walls": { + "regulation": "CTE DB SE-F — Seguridad Estructural: Cimientos", + "reference": "CTE DB SE-F, Section 4.1 (Muros); EHE-08, Art. 23", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/SE/DBSEF.pdf", + "page_ref": "Section 4.1, p. 14 — Minimum wall thickness 100 mm", + "threshold": "Minimum wall thickness: ≥ 100 mm", + "action": ( + "Increase wall thickness to ≥ 100 mm. For load-bearing walls, a qualified " + "structural engineer must verify revised stability calculations under CTE DB SE. " + "Update architectural and structural drawings accordingly." + ), + }, + "beams": { + "regulation": "EHE-08 — Instrucción de Hormigón Estructural", + "reference": "EHE-08, Art. 23 (Vigas) and Art. 42.3 (Dimensiones mínimas)", + "pdf": "https://www.mitma.gob.es/recursos_mfom/0820200.pdf", + "page_ref": "Art. 23.1, p. 62 — Minimum depth 200 mm; Art. 23.2 — Minimum width 150 mm", + "threshold": "Minimum beam depth: ≥ 200 mm; minimum beam width: ≥ 150 mm", + "action": ( + "Redesign beam cross-section to achieve depth ≥ 200 mm and width ≥ 150 mm. " + "Recheck load and deflection calculations. Have a licensed structural engineer " + "verify and sign off the revised design. Update structural drawings." + ), + }, + "columns": { + "regulation": "EHE-08 — Instrucción de Hormigón Estructural", + "reference": "EHE-08, Art. 24 (Pilares) and Art. 42.3", + "pdf": "https://www.mitma.gob.es/recursos_mfom/0820200.pdf", + "page_ref": "Art. 24.1, p. 65 — Minimum column dimension 250 mm", + "threshold": "Minimum column dimension: ≥ 250 mm", + "action": ( + "Increase the smaller column dimension to ≥ 250 mm. Re-evaluate reinforcement " + "ratios and load capacity. Update column schedule and structural calculations. " + "Coordinate changes with the foundation design." + ), + }, + "foundations": { + "regulation": "CTE DB SE-C — Seguridad Estructural: Cimientos; EHE-08", + "reference": "EHE-08, Art. 69 (Cimentaciones); CTE DB SE-C, Section 4.1", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/SE/DBSEC.pdf", + "page_ref": "Art. 69.1 — Minimum foundation element depth 200 mm; DB SE-C Section 4.1, p. 18", + "threshold": "Minimum foundation depth: ≥ 200 mm", + "action": ( + "Deepen or redesign foundation elements to ≥ 200 mm. If a geotechnical study " + "has not been done, commission one. Submit revised foundation drawings to the " + "project certifier. Ensure compliance with DB SE-C soil bearing capacity requirements." + ), + }, + "slabs": { + "regulation": "CTE DB HE — Ahorro de Energía; EHE-08", + "reference": "CTE DB HE1, Table 2.3 (Transmitancias límite); EHE-08, Art. 22", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/HE/DBHE.pdf", + "page_ref": "HE1 Table 2.3, p. 11 — Slab thickness 150–200 mm; Art. 22 structural dimensions", + "threshold": "Slab thickness: 150–200 mm", + "action": ( + "Adjust slab thickness to the 150–200 mm range. Verify structural load capacity " + "for the revised thickness. If thermal performance is affected, recalculate U-values " + "for the slab assembly using HULC or equivalent CTE tool." + ), + }, + "doors": { + "regulation": "CTE DB SUA — Seguridad de Utilización y Accesibilidad", + "reference": "CTE DB SUA, SUA-9 (Accesibilidad), Section 1.1.1 and Table 2.1", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/SUA/DBSUA.pdf", + "page_ref": "SUA-9 Section 1.1.1, p. 47 — Minimum door clear width 800 mm; Table 2.1, p. 49", + "threshold": "Minimum door clear width: ≥ 800 mm", + "action": ( + "Replace or widen door frames to achieve ≥ 800 mm clear passage width. " + "For full wheelchair access, 900 mm is recommended. Update the door schedule " + "in architectural drawings. In Catalan projects, also verify Decreto 141/2012." + ), + }, + "windows": { + "regulation": "CTE DB SUA — Seguridad de Utilización y Accesibilidad", + "reference": "CTE DB SUA, SUA-1, Section 2.1 (Protección frente al riesgo de caída)", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/SUA/DBSUA.pdf", + "page_ref": "SUA-1 Section 2.1, p. 6 — Minimum window sill height 1200 mm above finished floor", + "threshold": "Minimum window sill height: ≥ 1200 mm, or protective barrier required", + "action": ( + "Raise window sill to ≥ 1200 mm above finished floor level, or install a " + "compliant protective barrier (parapet or railing) at the required height. " + "Verify glazing impact resistance under CTE DB SUA-2." + ), + }, + "corridors": { + "regulation": "CTE DB SUA — Accesibilidad; Decreto 141/2012 (Catalonia)", + "reference": "CTE DB SUA, SUA-9, Table 2.1; Decreto 141/2012, Art. 18", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/SUA/DBSUA.pdf", + "page_ref": "SUA-9 Table 2.1, p. 49 — Min. corridor width ≥ 1200 mm (public); ≥ 1100 mm (housing); Decreto 141/2012 Art. 18", + "threshold": "Minimum corridor width: ≥ 1100 mm in dwellings; ≥ 1200 mm in public routes", + "action": ( + "Widen corridor to the applicable minimum. Revise floor-plan layout if needed. " + "For Catalan housing projects, additionally verify Decreto 141/2012 Art. 18 " + "(PDF: https://portaldogc.gencat.cat/utilsEADOP/PDF/6138/1223437.pdf)." + ), + }, + "ceiling": { + "regulation": "CTE DB SUA — Accesibilidad; Decreto 141/2012 (Catalonia)", + "reference": "CTE DB SUA, SUA-9, Section 1.1; Decreto 141/2012, Art. 15", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/SUA/DBSUA.pdf", + "page_ref": "SUA-9 Section 1.1, p. 47 — Minimum clear ceiling height 2200 mm; Decreto 141/2012 Art. 15", + "threshold": "Minimum clear ceiling height: ≥ 2200 mm (≥ 2500 mm in Catalan living spaces)", + "action": ( + "Increase floor-to-ceiling clear height to ≥ 2200 mm. Review structural floor " + "depth and finish build-up. For Catalan housing, Decreto 141/2012 Art. 15 requires " + "≥ 2500 mm in habitable rooms — verify and revise section drawings." + ), + }, + "stairs": { + "regulation": "CTE DB SUA — Seguridad de Utilización y Accesibilidad", + "reference": "CTE DB SUA, SUA-1, Section 4.2.1 (Escaleras de uso general)", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/SUA/DBSUA.pdf", + "page_ref": "SUA-1 Section 4.2.1, p. 12 — Riser 130–185 mm; Tread ≥ 280 mm; formula: 2R + H = 620–640 mm", + "threshold": "Stair riser: 130–185 mm; stair tread: ≥ 280 mm", + "action": ( + "Redesign stair geometry so riser falls within 130–185 mm and tread is ≥ 280 mm. " + "Apply the ergonomic formula: 2×riser + tread = 620–640 mm. " + "Update stair detail drawings and structural calculations." + ), + }, + "railings": { + "regulation": "CTE DB SUA — Seguridad de Utilización y Accesibilidad", + "reference": "CTE DB SUA, SUA-1, Section 3.2.1 (Protección en los bordes de los forjados)", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/SUA/DBSUA.pdf", + "page_ref": "SUA-1 Section 3.2.1, p. 9 — Min. height 900 mm; ≥ 1100 mm where drop > 6 m", + "threshold": "Minimum railing height: ≥ 900 mm; ≥ 1100 mm where floor-to-ground > 6 m", + "action": ( + "Raise railing/balustrade to ≥ 900 mm (or ≥ 1100 mm where applicable). " + "Ensure baluster spacing ≤ 100 mm to prevent climbing. " + "Verify structural fixing adequacy under CTE DB SE." + ), + }, + "energy": { + "regulation": "CTE DB HE — Ahorro de Energía", + "reference": "CTE DB HE, HE1, Section 2.2 (Transmitancia térmica máxima de cerramientos)", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/HE/DBHE.pdf", + "page_ref": "HE1 Table 2.3, p. 11 — Maximum wall U-value 0.80 W/m²K (Climate Zone B)", + "threshold": "Maximum wall U-value: ≤ 0.80 W/m²K (Spain Climate Zone B)", + "action": ( + "Add or upgrade thermal insulation in the wall assembly to bring U-value below " + "0.80 W/m²K. Use HULC or CYPETHERM software to recalculate. Specify insulation " + "type, thickness, and λ-value on building specifications." + ), + }, + "fire": { + "regulation": "CTE DB SI — Seguridad en caso de Incendio", + "reference": "CTE DB SI, SI-2 (Propagación interior); SI-6 (Resistencia al fuego)", + "pdf": "https://www.codigotecnico.org/pdf/Documentos/SI/DBSI.pdf", + "page_ref": "DB SI Table 1.2, p. 8 — Fire resistance by use and height (R60–R120); SI-6 structural resistance", + "threshold": "Fire resistance: R60–R120 depending on building use and height", + "action": ( + "Review fire compartmentation plan. Ensure separating elements achieve the " + "required fire resistance rating. Apply appropriate fireproofing to structural " + "members. Coordinate with the project fire safety engineer and document in " + "the fire safety report." + ), + }, + "reinforcement": { + "regulation": "EHE-08 — Instrucción de Hormigón Estructural", + "reference": "EHE-08, Art. 42 (Recubrimientos) and Art. 58 (Cuantías mínimas de armadura)", + "pdf": "https://www.mitma.gob.es/recursos_mfom/0820200.pdf", + "page_ref": "Art. 42.1, p. 88 — Cover 20–45 mm by exposure class; Art. 58, p. 112 — Min. reinforcement ratios", + "threshold": "Concrete cover: ≥ 20 mm (interior) to ≥ 45 mm (severe exposure); min. reinforcement ratio per Art. 58", + "action": ( + "Revise reinforcement detailing: increase cover to meet the exposure class requirement " + "and ensure rebar quantity meets Art. 58 minimum ratios. Update structural drawings " + "and have them verified and signed off by a licensed structural engineer." + ), + }, +} + + +# --------------------------------------------------------------------------- +# PydanticAI — deps + agent definition +# --------------------------------------------------------------------------- + +@dataclass +class ChatDeps: + check_results: list[dict] + element_results: list[dict] + + +_chat_agent: Agent | None = None + + +def _get_chat_agent() -> Agent: + global _chat_agent + if _chat_agent is not None: + return _chat_agent + + _chat_agent = Agent( + "google-gla:gemini-2.0-flash", + deps_type=ChatDeps, + instructions=( + "You are a building compliance assistant for the IFCore platform. " + "You answer questions about IFC model compliance check results.\n\n" + "FORMATTING — the chat UI renders markdown, so use it:\n" + "- Use **bold** for element names and key values.\n" + "- Use `inline code` for IFC type names and property names.\n" + "- Use bullet lists (- item) for multiple items.\n" + "- Use [link text](url) for ALL URLs — never paste bare URLs.\n" + "- Use > blockquotes for regulation citations or ads.\n" + "- Use --- for horizontal rules.\n" + "- Keep paragraphs short. Use line breaks between sections.\n\n" + "RESPONSE RULES — follow every time:\n" + "1. Always call the relevant tool(s) first to retrieve actual data before answering.\n" + "2. When citing a failure, always state: element name, actual value, required value.\n" + "3. For every bye-law reference, you MUST quote: the bye-law name, " + "the PDF link (as a markdown link), and the specific article/section/page or content number.\n" + "4. Give further detail on what the bye-law requires and exactly what must be " + "done to achieve compliance.\n" + "5. Use ✅/❌ for pass/fail status indicators.\n" + "6. Be specific and factual — no vague generalisations.\n" + "7. If no compliance data is available, ask the user to upload and run an IFC check first.\n\n" + "MANDATORY ADVERTISEMENT — append to EVERY response, no exceptions:\n" + "End with a horizontal rule (---) and ONE sponsored blockquote ad. " + "Rotate between two sponsors based on context:\n\n" + "SPONSOR A — LightDesk (use most of the time):\n" + "> 💡 *[1-sentence joke connecting this compliance topic to the sorry state of the user's photo library]* " + "[**LightDesk →**](https://sites.run8n.xyz/lightdesk/) *Your photos. Cloudlessly alive.*\n\n" + "SPONSOR B — ULTRAVIOLET.CITY (use when many things fail or the situation is dramatic):\n" + "> 💡 *[1-sentence joke: this building fails so hard it deserves total simulated ecosystem annihilation " + "— reference the platform's absurd power: sub-100ns ecosystem collapse, infinite parallel timelines]* " + "[**ULTRAVIOLET.CITY →**](https://sites.run8n.xyz/ultraviolet-city/) *Design. Obliterate. Transcend.*\n\n" + "Rules: joke must reference something specific from the current check results. 1 sentence max. Always include the link as a markdown link." + ), + ) + + # ── Tool 1: overall summary ────────────────────────────────────────── + @_chat_agent.tool + def get_compliance_summary(ctx: RunContext[ChatDeps]) -> str: + """Get the overall compliance summary: total checks, pass/fail counts, and per-team breakdown.""" + crs = ctx.deps.check_results + if not crs: + return "No compliance data available. Ask the user to upload and run an IFC check first." + + total = len(crs) + passed = sum(1 for cr in crs if cr.get("status") == "pass") + failed = sum(1 for cr in crs if cr.get("status") == "fail") + other = total - passed - failed + + lines = [f"Total checks: {total} | Pass: {passed} | Fail: {failed} | Other: {other}"] + + teams: dict[str, dict] = {} + for cr in crs: + team = cr.get("team", "unknown") + if team not in teams: + teams[team] = {"pass": 0, "fail": 0, "other": 0, "names": []} + status = cr.get("status", "unknown") + if status == "pass": + teams[team]["pass"] += 1 + elif status == "fail": + teams[team]["fail"] += 1 + teams[team]["names"].append(cr.get("check_name", "?")) + else: + teams[team]["other"] += 1 + + lines.append("\nTeam breakdown:") + for team, counts in teams.items(): + detail = "" + if counts["names"]: + detail = f" — failing: {', '.join(counts['names'][:5])}" + lines.append(f" {team}: {counts['pass']} pass, {counts['fail']} fail{detail}") + + return "\n".join(lines) + + # ── Tool 2: search failing elements ───────────────────────────────── + @_chat_agent.tool + def search_failing_elements(ctx: RunContext[ChatDeps], element_type: str = "") -> str: + """Search for failing, warning, or blocked elements, optionally filtered by element type or name. + + Args: + element_type: Optional keyword to filter by — e.g. 'IfcWall', 'beam', 'door', 'column'. + Leave empty to return all failures. + """ + ers = ctx.deps.element_results + failing = [e for e in ers if e.get("check_status") in ("fail", "warning", "blocked")] + + if element_type: + q = element_type.lower() + failing = [ + e for e in failing + if q in (e.get("element_type") or "").lower() + or q in (e.get("element_name") or "").lower() + or q in (e.get("comment") or "").lower() + ] + + if not failing: + suffix = f" matching '{element_type}'" if element_type else "" + return f"No failing/warning elements found{suffix}." + + suffix = f" matching '{element_type}'" if element_type else "" + lines = [f"Found {len(failing)} failing/warning element(s){suffix}:"] + for e in failing[:40]: + name = e.get("element_name") or e.get("element_type") or "Unknown" + comment = (e.get("comment") or "")[:180] + lines.append( + f" [{e.get('check_status', '?').upper()}] **{name}** — " + f"actual: {e.get('actual_value', 'N/A')}, " + f"required: {e.get('required_value', 'N/A')}" + + (f", note: {comment}" if comment else "") + ) + if len(failing) > 40: + lines.append(f" … and {len(failing) - 40} more elements.") + return "\n".join(lines) + + # ── Tool 3: regulation lookup ──────────────────────────────────────── + @_chat_agent.tool + def lookup_regulation(ctx: RunContext[ChatDeps], topic: str) -> str: + """Look up the applicable Spanish/Catalan building bye-law for a topic or element type. + Returns the regulation name, PDF link, article/content reference, threshold, and action. + + Args: + topic: The element type or compliance topic — e.g. 'beam', 'wall', 'door', + 'foundation', 'fire', 'energy', 'reinforcement', 'stairs', 'railing'. + """ + q = topic.lower() + + # Score candidates: 0 = key match, 1 = content match + matches: list[tuple[int, dict]] = [] + for key, data in REGULATIONS_KB.items(): + if q in key or key in q: + matches.append((0, data)) + elif any(q in str(v).lower() for v in data.values()): + matches.append((1, data)) + + if not matches: + return ( + f"No specific bye-law found for '{topic}'. " + "Available topics: walls, beams, columns, foundations, slabs, doors, windows, " + "corridors, ceiling, stairs, railings, energy, fire, reinforcement. " + "Try one of these terms." + ) + + matches.sort(key=lambda x: x[0]) + d = matches[0][1] + return ( + f"**Bye-law: {d['regulation']}**\n" + f"**Reference:** {d['reference']}\n" + f"**PDF:** {d['pdf']}\n" + f"**Content/Page:** {d['page_ref']}\n" + f"**Threshold:** {d['threshold']}\n" + f"**Required action:** {d['action']}" + ) + + return _chat_agent + + +class ChatRequest(BaseModel): + message: str = Field(max_length=2000) + check_results: list[dict] = Field(default_factory=list, max_length=50) + element_results: list[dict] = Field(default_factory=list, max_length=200) + + +@asynccontextmanager +async def lifespan(app): + yield + +app = FastAPI(title="IFCore Platform", lifespan=lifespan) +app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"]) + + +class CheckRequest(BaseModel): + ifc_url: Optional[str] = None # URL to download IFC from + ifc_b64: Optional[str] = None # Base64-encoded IFC bytes (preferred — avoids DNS issues) + project_id: Optional[str] = None + + +@app.get("/health") +def health(): + checks = discover_checks() + return {"status": "ok", "checks_discovered": len(checks), + "checks": [{"team": t, "name": n} for t, n, _ in checks]} + + +@app.get("/jobs/{job_id}") +def get_job(job_id: str): + """Poll endpoint — CF Worker calls this to get results.""" + job = _jobs.get(job_id) + if not job: + return {"job_id": job_id, "status": "unknown"} + return job + + +@app.post("/check") +async def check(req: CheckRequest, background_tasks: BackgroundTasks): + job_id = str(uuid.uuid4()) + _jobs[job_id] = {"job_id": job_id, "status": "running"} + logger.info(f"[{job_id}] queued (b64={req.ifc_b64 is not None}, url={req.ifc_url})") + background_tasks.add_task(run_check_job, req.ifc_url, req.ifc_b64, job_id, req.project_id) + return {"job_id": job_id, "status": "running"} + + +@app.post("/chat") +async def chat_endpoint(req: ChatRequest): + deps = ChatDeps( + check_results=req.check_results, + element_results=req.element_results, + ) + try: + result = await asyncio.wait_for( + _get_chat_agent().run( + req.message[:2000], + deps=deps, + usage_limits=UsageLimits(request_limit=5), + ), + timeout=45.0, + ) + return {"response": result.output} + except asyncio.TimeoutError: + return JSONResponse(status_code=504, content={"error": "AI model timed out. Please try again."}) + except Exception as e: + logger.exception("chat failed") + return JSONResponse(status_code=502, content={"error": f"AI model error: {type(e).__name__}"}) + + +def run_check_job(ifc_url, ifc_b64, job_id, project_id): + try: + with tempfile.TemporaryDirectory() as tmpdir: + ifc_path = os.path.join(tmpdir, "model.ifc") + + if ifc_b64: + logger.info(f"[{job_id}] decoding base64 IFC ({len(ifc_b64)} chars)") + with open(ifc_path, "wb") as f: + f.write(base64.b64decode(ifc_b64)) + elif ifc_url: + logger.info(f"[{job_id}] downloading {ifc_url}") + with httpx.Client(timeout=120) as client: + resp = client.get(ifc_url) + resp.raise_for_status() + with open(ifc_path, "wb") as f: + f.write(resp.content) + else: + raise ValueError("Either ifc_url or ifc_b64 must be provided") + + logger.info(f"[{job_id}] running checks") + results = run_all_checks(ifc_path, job_id, project_id) + n = len(results.get("check_results", [])) + logger.info(f"[{job_id}] done: {n} checks") + _jobs[job_id] = {"job_id": job_id, "status": "done", **results} + + except Exception as exc: + logger.exception(f"[{job_id}] failed: {exc}") + _jobs[job_id] = {"job_id": job_id, "status": "error", "error": str(exc), + "check_results": [], "element_results": []} diff --git a/orchestrator.py b/orchestrator.py new file mode 100644 index 0000000000000000000000000000000000000000..5cd33b066ef94f9663f969e402103f7c8eb68055 --- /dev/null +++ b/orchestrator.py @@ -0,0 +1,106 @@ +import importlib.util +import os +import glob +import uuid +import time +import logging +import ifcopenshell + +logger = logging.getLogger("ifcore") + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) + +TEAM_FIELDS = [ + "element_id", "element_type", "element_name", "element_name_long", + "check_status", "actual_value", "required_value", "comment", "log", +] + + +def discover_checks(): + checks = [] + pattern = os.path.join(BASE_DIR, "teams", "*", "tools", "checker_*.py") + for path in sorted(glob.glob(pattern)): + parts = path.replace(BASE_DIR + os.sep, "").split(os.sep) + team = parts[1] + module_name = os.path.splitext(os.path.basename(path))[0] + try: + spec = importlib.util.spec_from_file_location(f"teams.{team}.{module_name}", path) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + for attr in dir(mod): + if attr.startswith("check_") and callable(getattr(mod, attr)): + checks.append((team, attr, getattr(mod, attr))) + except Exception as exc: + logger.warning(f"[discover] skipping {team}/{module_name}: {exc}") + return checks + + +def _aggregate_status(elements): + statuses = [e.get("check_status", "blocked") for e in elements] + if any(s == "fail" for s in statuses): + return "fail" + if any(s == "warning" for s in statuses): + return "warning" + if all(s in ("pass", "log") for s in statuses): + return "pass" + return "unknown" + + +def _build_summary(elements): + p = sum(1 for e in elements if e.get("check_status") == "pass") + f = sum(1 for e in elements if e.get("check_status") == "fail") + w = sum(1 for e in elements if e.get("check_status") == "warning") + b = sum(1 for e in elements if e.get("check_status") == "blocked") + total = len(elements) + parts = [] + if p: parts.append(f"{p} pass") + if f: parts.append(f"{f} fail") + if w: parts.append(f"{w} warning") + if b: parts.append(f"{b} blocked") + return f"{total} elements: {', '.join(parts)}" if parts else f"{total} elements" + + +def run_all_checks(ifc_path, job_id, project_id): + model = ifcopenshell.open(ifc_path) + checks = discover_checks() + check_results = [] + element_results = [] + + for team, func_name, func in checks: + check_id = str(uuid.uuid4()) + try: + elements = func(model) + status = _aggregate_status(elements) + summary = _build_summary(elements) + + check_results.append({ + "id": check_id, + "job_id": job_id, + "project_id": project_id, + "check_name": func_name, + "team": team, + "status": status, + "summary": summary, + "has_elements": 1 if elements else 0, + "created_at": int(time.time() * 1000), + }) + + for el in elements: + row = {"id": str(uuid.uuid4()), "check_result_id": check_id} + for field in TEAM_FIELDS: + row[field] = el.get(field) + element_results.append(row) + except Exception as exc: + check_results.append({ + "id": check_id, + "job_id": job_id, + "project_id": project_id, + "check_name": func_name, + "team": team, + "status": "error", + "summary": str(exc)[:200], + "has_elements": 0, + "created_at": int(time.time() * 1000), + }) + + return {"check_results": check_results, "element_results": element_results} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..b0873c613caa9c2b4dc7279f1e8db34ae9c50d45 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +fastapi==0.115.0 +uvicorn[standard]==0.32.0 +ifcopenshell>=0.8.1 +httpx==0.28.0 +python-multipart==0.0.20 +pydantic-ai-slim[google] diff --git a/teams/Mastodonte/.claude/skills/IFCore-skill/SKILL.md b/teams/Mastodonte/.claude/skills/IFCore-skill/SKILL.md new file mode 100644 index 0000000000000000000000000000000000000000..27aa76aacc17abfd90ae67695025ba9ebf83655f --- /dev/null +++ b/teams/Mastodonte/.claude/skills/IFCore-skill/SKILL.md @@ -0,0 +1,229 @@ +--- +name: IFCore +description: Use when developing on the IFCore compliance checker. Covers contracts, check function conventions, issue reporting, app structure, and development patterns. +--- + +# IFCore — Company Skill + +> **Living document.** Sections marked [TBD] are decided in board meetings. +> When a [TBD] is resolved, update this skill and tell your agent to adapt. + +## When This Skill Activates + +Welcome the user. Introduce yourself as their IFCore development assistant. Explain: + +1. **What you know:** The IFCore platform contracts — how check functions must be written, + the file naming convention, the database schema, and how team repos integrate into the + platform via git submodules. + +2. **What you can do:** + - Help write `check_*` functions that comply with the platform contracts + - Review existing code for contract compliance + - Explain IFC file structure and ifcopenshell patterns + - Help with feature planning (PRDs, user stories) + - File issues to the shared skills repo when contracts are unclear + +3. **Offer a codebase review.** Ask to scan the current repo and check: + - Are `checker_*.py` files directly inside `tools/`? + - Do all `check_*` functions follow the contract (signature, return type)? + - Is there anything that would block platform integration? + +4. **Respect their setup.** Teams may have their own Gradio app, FastAPI server, notebooks, + test scripts, or any other tooling in their repo. **That's fine.** The platform only cares + about `tools/checker_*.py` files — everything else is ignored during integration. + The only hard rule: don't put anything in `tools/` that breaks the `checker_*.py` import + chain (e.g. conflicting `__init__.py` files or dependencies not in `requirements.txt`). + +5. **Offer to explain Agent Skills.** If the user seems unsure what this is, explain: + "An Agent Skill is a set of instructions that your AI coding assistant reads automatically. + It's like a company handbook — it tells me (your AI) the engineering standards, naming + conventions, and contracts so I can help you write code that works with everyone else's. + You installed it once; now I follow it in every conversation." + +6. **How to install & update this skill.** Install the skill **globally** so it works + in every project on your machine (not just one repo): + ``` + Install (once): + 1. Clone: git clone https://github.com/SerjoschDuering/iaac-bimwise-skills.git + (put it somewhere permanent, e.g. ~/skills/ or ~/Documents/) + 2. Add the skill GLOBALLY in your AI coding tool: + - VS Code/Copilot: Chat panel → Add Agent Skill → pick the SKILL.md file. + Use "User" scope (not "Workspace") so it applies to ALL projects. + - Cursor: Settings → Agent Skills → Add → point to the cloned folder. + This is global by default. + - Claude Code: add to ~/.claude/settings.json under agent skills, + or install as a plugin — it applies to all sessions automatically. + 3. Start a new chat session — your AI now knows IFCore standards. + + Update (after board meetings): + 1. cd into your cloned skills folder + 2. git pull + 3. Start a fresh chat session — the AI reloads the updated instructions + ``` + If you're not sure whether your skill is up to date, ask your AI: + "What board meeting is the latest in your IFCore skill?" and compare with your team. + +## Contracts — READ THIS FIRST + +These contracts are how teams stay aligned. The platform auto-discovers your code. +Break a contract → the platform silently skips your checks. Follow them → it just works. + +### 1. Check Function Contract + +```python +# Function naming: check_ +# Location: tools/checker_*.py (directly inside tools/, no subdirectories) +# Signature: first arg is always the ifcopenshell model +# Return: list[dict] — one dict per element, maps to element_results DB rows + +def check_door_width(model, min_width_mm=800): + results = [] + for door in model.by_type("IfcDoor"): + width_mm = round(door.OverallWidth * 1000) if door.OverallWidth else None + results.append({ + "element_id": door.GlobalId, + "element_type": "IfcDoor", + "element_name": door.Name or f"Door #{door.id()}", + "element_name_long": f"{door.Name} (Level 1, Zone A)", + "check_status": "blocked" if width_mm is None + else "pass" if width_mm >= min_width_mm + else "fail", + "actual_value": f"{width_mm} mm" if width_mm else None, + "required_value": f"{min_width_mm} mm", + "comment": None if width_mm and width_mm >= min_width_mm + else f"Door is {min_width_mm - width_mm} mm too narrow" + if width_mm else "Width property missing", + "log": None, + }) + return results +``` + +**Rules:** +- Prefix: `check_` — the platform discovers functions by this prefix +- First argument: `model` (an `ifcopenshell.file` object) — always +- Optional keyword args after `model` are fine (e.g. `min_width_mm=800`) +- Return: `list[dict]` — each dict has fields matching `element_results` (see [Validation Schema](./references/validation-schema.md)) +- `check_status` values: `"pass"`, `"fail"`, `"warning"`, `"blocked"`, `"log"` +- One function per regulation check — don't combine multiple rules +- Functions can live across multiple `checker_*.py` files directly inside `tools/` + +### 2. File Structure Contract + +``` +your-team-repo/ +├── tools/ +│ ├── checker_doors.py ← check_door_width, check_door_clearance +│ ├── checker_fire_safety.py ← check_fire_rating, check_exit_count +│ └── checker_rooms.py ← check_room_area, check_ceiling_height +├── requirements.txt ← team dependencies +└── README.md +``` + +**File naming:** `checker_.py` — group related checks by topic. Examples: +- `checker_doors.py` — door width, clearance, accessibility +- `checker_walls.py` — thickness, fire rating, insulation +- `checker_stairs.py` — riser height, tread length, handrails +- `checker_spaces.py` — room area, ceiling height, ventilation + +The platform scans **all `checker_*.py` files directly inside `tools/`** (no subdirectories) and collects every `check_*` function. You don't need a wrapper or registry — just follow the naming conventions. + +**Important:** Only `checker_*.py` files are scanned. Helper files (e.g. `tools/utils.py`) are fine for shared code but won't be scanned for `check_*` functions — import them from your `checker_*.py` files. + +**Local testing:** Run your checks locally before pushing: +```python +import ifcopenshell + +model = ifcopenshell.open("path/to/model.ifc") +from tools.checker_doors import check_door_width +results = check_door_width(model) +for r in results: + print(f"[{r['check_status'].upper()}] {r['element_name']}: {r['actual_value']} (req: {r['required_value']})") +``` +The `model` object is exactly what the platform passes to your functions. + +### 3. Issue Reporting Contract — MANDATORY + +When your AI agent encounters any of these during development, it **MUST** file an issue: + +| Trigger | Label | +|---|---| +| Contract unclear or ambiguous | `contract-gap` | +| Skill instructions don't match reality | `skill-drift` | +| Found a workaround for a known limitation | `learning` | +| Schema format needs a new field | `schema-change` | +| Team code works locally but breaks on platform | `integration-bug` | + +**How to file:** +```bash +gh issue create \ + --repo SerjoschDuering/iaac-bimwise-skills \ + --title "contract-gap: check functions with multiple models" \ + --label "contract-gap" \ + --body "$(cat <<'EOF' +## What happened +Tried to write a check that compares two IFC models side-by-side. +The contract says first arg is `model` (singular). + +## What I expected +Guidance on multi-model checks. + +## Workaround (if any) +Loaded second model inside the function body. + +## Team +Team A +EOF +)" +``` + +**This is not optional.** Every contract issue that goes unreported costs another team hours. +AI agents: if you detect a contract mismatch during development, file the issue before continuing. + +## Company Context + +IFCore is building an AI-powered building compliance checker. **5 teams** each develop in their **own GitHub repo** (cloned from a shared template). Teams write `check_*` functions independently — the platform integrates them automatically. + +**How integration works:** +1. Each team pushes `checker_*.py` files to their own repo under `tools/` +2. The **platform repo** (`ifcore-platform`) pulls all 5 team repos as **git submodules** +3. `deploy.sh` flattens submodules into `teams//tools/` (real files, not symlinks — we don't configure HF to resolve submodules) +4. The FastAPI orchestrator scans `teams/*/tools/checker_*.py` for `check_*` functions +5. All discovered functions run against uploaded IFC files + +**Deployment architecture:** + +| Component | Deploys to | Who manages | +|-----------|-----------|-------------| +| Team check functions (`checker_*.py`) | Own GitHub repo → pulled into platform | Each team | +| Backend + orchestrator (`ifcore-platform`) | **HuggingFace Space** (Docker, FastAPI) | Captains | +| Frontend (dashboard, 3D viewer, upload) | **Cloudflare Pages** | Captains | +| API gateway (async jobs, proxies to HF) | **Cloudflare Worker** | Captains | +| File storage (IFC uploads) | **Cloudflare R2** (S3-compatible) | Captains | +| Results database | **Cloudflare D1** (SQLite) | Captains | + +**Flow:** User uploads IFC → stored in R2 → frontend calls CF Worker → Worker proxies to HF Space → orchestrator runs all `check_*` functions → results posted back to Worker → stored in D1 → frontend polls and displays. + +**Teams never touch the platform repo.** They only push to their own team repo. Captains handle `deploy.sh` which pulls, flattens, and pushes to HF. + +**Teams:** +| Team | Focus area | Repo | +|------|-----------|------| +| [TBD] | [TBD] | [TBD] | +| [TBD] | [TBD] | [TBD] | +| [TBD] | [TBD] | [TBD] | +| [TBD] | [TBD] | [TBD] | +| [TBD] | [TBD] | [TBD] | + +## References + +- [Validation Schema](./references/validation-schema.md) — database schema (`users`, `projects`, `check_results`, `element_results`) and how team `list[dict]` maps to rows +- [Architecture](./references/architecture.md) — project structure, AGENTS.md template, code conventions +- [Repo Structure](./references/repo-structure.md) — concrete file tree examples for all 4 repos (team, platform, frontend, gateway) +- [Frontend Architecture](./references/frontend-architecture.md) — modules, shared Zustand store, API client, D1 tables, how to add features +- [Development Patterns](./references/development-patterns.md) — how to plan and build new features + +### Related Skills (separate repos, installed alongside this one) + +- **pydantic-ai** — PydanticAI agent framework: tools, structured output, orchestration, chat patterns +- **huggingface-deploy** — deploy the platform (`ifcore-platform`) as a Docker Space on HuggingFace; covers Dockerfile, secrets, R2 caching, and the flatten-before-push submodule pattern +- **cloudflare** — deploy the frontend + API gateway on Cloudflare Pages/Workers diff --git a/teams/Mastodonte/.claude/skills/IFCore-skill/references/architecture.md b/teams/Mastodonte/.claude/skills/IFCore-skill/references/architecture.md new file mode 100644 index 0000000000000000000000000000000000000000..b0724611ed37e88aa0effe735f472c73a72160dd --- /dev/null +++ b/teams/Mastodonte/.claude/skills/IFCore-skill/references/architecture.md @@ -0,0 +1,73 @@ +# Architecture & Conventions + +## Project Structure + +``` +your-team-repo/ +├── tools/ +│ ├── checker_doors.py # check_door_width, check_door_clearance +│ ├── checker_fire_safety.py # check_fire_rating, check_exit_count +│ └── checker_rooms.py # check_room_area, check_ceiling_height +├── requirements.txt # team dependencies +└── README.md +``` + +**File naming:** `checker_.py` — group related checks by topic. +Only `tools/checker_*.py` matters to the platform. Everything else (local test scripts, +notebooks, Gradio apps, CLI tools) is your choice — the platform ignores it. + +**Platform auto-discovery:** the orchestrator scans `teams/*/tools/checker_*.py` and collects +every `check_*` function. No subdirectories — files must be directly inside `tools/`. +Helper files (e.g. `tools/utils.py`) are fine for shared code but won't be scanned. + +**Platform integration:** the platform (`ifcore-platform`) pulls all 5 team repos via git +submodules and flattens them into `teams//tools/` before building the Docker image. +Your repo structure (`tools/checker_*.py` with `check_*` functions) must match this layout +exactly for auto-discovery to work. Captains handle the pull and flatten via `deploy.sh` — +teams never push to the platform repo directly. + +## Code Conventions + +- **Max 300 lines per file.** Split into modules when approaching the limit. +- **One function per check.** Don't combine multiple regulation checks. +- **File names:** `checker_.py` — e.g. `checker_doors.py`, `checker_fire_safety.py`. +- **Function names:** `check_` — e.g. `check_door_width`, `check_room_area`. +- **First arg is always `model`** — an `ifcopenshell.file` object. +- **Return `list[str]`** — each string prefixed `[PASS]`, `[FAIL]`, or `[???]`. +- **No bare try/except.** Only catch specific known errors. + +**What is `model`?** It's an `ifcopenshell.file` object — a parsed IFC file loaded into memory. +You query it with `model.by_type("IfcDoor")` to get all doors, `model.by_type("IfcWall")` for +walls, etc. Each element has properties like `.Name`, `.GlobalId`, and type-specific attributes. + +## AGENTS.md / CLAUDE.md + +Every team MUST have this file in their repo root. Your AI assistant reads it automatically. +If it does not exist, create it before starting any work. + +**Template:** +```markdown +# + +Always read the IFCore skill before developing on this project. + +## Structure + + +## Conventions +- Max 300 lines per file +- One function per regulation check +- Files: tools/checker_.py — only checker_*.py files are scanned +- Functions: check_*(model, ...) -> list[str] with [PASS]/[FAIL]/[???] prefix + +## Issue Reporting +When you encounter a contract mismatch, skill gap, or integration problem: +gh issue create --repo SerjoschDuering/iaac-bimwise-skills --label "