Silly98 commited on
Commit
868f8ea
·
verified ·
1 Parent(s): 9a3ba2b

Update text

Browse files
Files changed (1) hide show
  1. text +520 -209
text CHANGED
@@ -1,260 +1,571 @@
1
- import sys
2
- sys.path.append("../..")
3
- import json
4
- from typing import Dict, List, Optional
5
- import sys
6
- from sqlalchemy import text
7
- from sqlalchemy.engine import Result
8
- from app.db import engine
9
- from app.config import settings
10
- from app.cleaning.prompts import CLEAN_PROMPT
11
- from app.cleaning.llm import LLM
12
 
 
 
13
 
14
- """
15
- IN-PLACE CLEANING PIPELINE (Original Logic, No Mirroring)
16
-
17
- - Reads from SAME TABLE
18
- - Writes back into SAME TABLE
19
- - Adds <col>_clean columns if missing
20
- - Cleans only up to clean_cap rows (default 20)
21
- - Preserves original data completely
22
- - No target schema, no cloned tables, no mirroring
23
- """
24
 
25
- MAX_ROWS_PER_TABLE = 20
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- # ----------------------------------------------------
29
- # ADD <col>_clean columns into SAME TABLE
30
- # ----------------------------------------------------
31
- def ensure_clean_columns(schema: str, table: str, culprit_columns: List[str]):
32
- if not culprit_columns:
33
- return
34
 
35
- with engine.begin() as cx:
36
- for col in culprit_columns:
37
- col_clean = f"{col}_clean"
38
- sql = (
39
- f'ALTER TABLE "{schema}"."{table}" '
40
- f'ADD COLUMN IF NOT EXISTS "{col_clean}" TEXT'
41
- )
42
- cx.execute(text(sql))
43
-
44
-
45
- # ----------------------------------------------------
46
- # Count rows
47
- # ----------------------------------------------------
48
- def _count_source_rows(schema: str, table: str) -> int:
49
- try:
50
- with engine.begin() as cx:
51
- row = cx.execute(
52
- text(f'SELECT COUNT(*) FROM "{schema}"."{table}"')
53
- ).first()
54
- return int(row[0])
55
- except Exception:
56
- return -1
57
 
58
 
59
- # ----------------------------------------------------
60
- # Stream rows from SAME TABLE
61
- # ----------------------------------------------------
62
- def stream_rows(schema: str, table: str, batch_size=1000, max_rows=None):
63
- limit = ""
64
- if max_rows:
65
- limit = f" LIMIT {int(max_rows)}"
66
 
67
- sql = f'SELECT * FROM "{schema}"."{table}"{limit}'
68
 
69
- with engine.begin() as cx:
70
- result: Result = (
71
- cx.execution_options(stream_results=True)
72
- .execute(text(sql))
73
- )
74
 
75
- batch = []
76
- yielded = 0
77
 
78
- for row in result.mappings():
79
- batch.append(dict(row))
80
- yielded += 1
81
 
82
- if len(batch) >= batch_size:
83
- yield batch
84
- batch = []
85
 
86
- if max_rows and yielded >= max_rows:
87
- break
88
 
89
- if batch:
90
- yield batch
91
 
92
 
93
- # ----------------------------------------------------
94
- # Write back INTO SAME TABLE
95
- # ----------------------------------------------------
96
- def write_batch(schema: str, table: str, rows: List[Dict], pk_col: str):
97
- if not rows:
98
- return
99
 
100
- for r in rows:
101
- # json encode dict/list so SQL can accept it
102
- for k, v in list(r.items()):
103
- if isinstance(v, (dict, list)):
104
- r[k] = json.dumps(v)
 
 
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  with engine.begin() as cx:
107
- for r in rows:
108
- pk_val = r[pk_col]
109
 
110
- set_list = ", ".join(
111
- f'"{c}" = :{c}' for c in r.keys() if c != pk_col
112
- )
113
 
114
- sql = (
115
- f'UPDATE "{schema}"."{table}" '
116
- f'SET {set_list} '
117
- f'WHERE "{pk_col}" = :{pk_col}'
118
- )
119
 
120
- cx.execute(text(sql), r)
121
 
122
 
123
- # ----------------------------------------------------
124
- # LLM clean
125
- # ----------------------------------------------------
126
- def clean_value(llm: LLM, value: str) -> str:
127
- if not llm.enabled():
128
- return value
129
 
130
- out = llm.clean_text(
131
- value,
132
- system=CLEAN_PROMPT,
133
- instruction="Clean the following product text."
134
- )
135
- return out.strip() or value
136
-
137
-
138
- # ----------------------------------------------------
139
- # MAIN FUNCTION — identical logic to old pipeline
140
- # ----------------------------------------------------
141
- def run_clean_table(
142
- schema: str,
143
- table: str,
144
- culprit_columns: List[str],
145
- batch_size: int = 1000,
146
- clean_cap: Optional[int] = None,
147
- primary_key: Optional[str] = None,
148
- clean_all: bool = False,
149
- ):
150
- if not primary_key:
151
- raise ValueError("primary_key required")
152
 
153
- llm = LLM()
154
 
155
- # Ensure <col>_clean exists
156
- ensure_clean_columns(schema, table, culprit_columns)
157
 
158
- # Determine cleaning cap
159
- total_rows = _count_source_rows(schema, table)
160
- cap = None if clean_all else (clean_cap or MAX_ROWS_PER_TABLE)
161
 
162
- print(f"\n→ In-place cleaning {schema}.{table} (rows={total_rows}, cap={cap})")
163
- sys.stdout.flush()
164
 
165
- # Skip rows already cleaned
166
- skip_pks = set()
167
- if culprit_columns:
168
- cond = " AND ".join([f'"{c}_clean" IS NOT NULL' for c in culprit_columns])
169
- sql = (
170
- f'SELECT "{primary_key}" FROM "{schema}"."{table}" '
171
- f'WHERE {cond}'
172
- )
173
- try:
174
- with engine.begin() as cx:
175
- rows = cx.execute(text(sql)).fetchall()
176
- skip_pks = {r[0] for r in rows}
177
- except:
178
- skip_pks = set()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
- rows_cleaned = 0
181
- rows_processed = 0
182
- skipped_existing = 0
183
 
184
- # STREAM + CLEAN + UPDATE SAME TABLE
185
- for rows in stream_rows(schema, table, batch_size=batch_size):
186
- out_rows = []
187
 
188
- for r in rows:
189
- pk = r.get(primary_key)
190
 
191
- # Skip already cleaned rows
192
- if pk in skip_pks:
193
- skipped_existing += 1
194
- continue
 
 
 
 
 
 
 
 
195
 
196
- will_clean = (cap is None) or (rows_cleaned < cap)
197
 
198
- # Clean selected columns
199
- for col in culprit_columns:
200
- original = r.get(col)
201
- original_s = None if original is None else str(original)
 
 
 
 
202
 
203
- if will_clean:
204
- cleaned = clean_value(llm, original_s) if original_s else None
205
- r[f"{col}_clean"] = cleaned
206
- rows_cleaned += 1
207
- else:
208
- # out-of-cap rows → NULL clean column (old behavior)
209
- r[f"{col}_clean"] = None
210
 
211
- out_rows.append(r)
 
 
 
 
212
 
213
- # Write back to same table
214
- write_batch(schema, table, out_rows, pk_col=primary_key)
215
- rows_processed += len(out_rows)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
 
217
- # progress
218
- target = cap or total_rows
219
- pct = int(min(rows_cleaned, target) * 100 / target)
 
 
220
 
221
- print(
222
- f" {table}: cleaned {rows_cleaned}/{target} ({pct}%) "
223
- f"| updated rows: {rows_processed} | skipped: {skipped_existing}"
 
 
 
 
224
  )
225
- sys.stdout.flush()
226
 
227
- print(
228
- f"✓ DONE: {schema}.{table} in-place cleaned "
229
- f"(cleaned={rows_cleaned}, skipped={skipped_existing})\n"
230
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
 
 
 
 
 
 
 
 
232
 
233
- # ----------------------------------------------------
234
- # YAML Loader
235
- # ----------------------------------------------------
236
- def run_cleaning_from_yaml(
237
- yaml_path: str,
238
- batch_size: int = 1000,
239
- clean_cap: Optional[int] = None,
240
- clean_all: bool = False,
241
- ):
242
- import yaml
243
-
244
- with open(yaml_path, "r") as f:
245
- cfg = yaml.safe_load(f)
246
-
247
- for t in cfg.get("tables", []):
248
- run_clean_table(
249
- schema=t["schema"],
250
- table=t["name"],
251
- culprit_columns=t["culprit_columns"],
252
- batch_size=batch_size,
253
- primary_key=t["primary_key"],
254
- clean_cap=clean_cap,
255
- clean_all=clean_all,
256
  )
257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
 
259
 
260
 
 
1
+ # app/cleaning/prompts.py
2
+ from __future__ import annotations
 
 
 
 
 
 
 
 
 
3
 
4
+ from textwrap import dedent
5
+ from typing import Dict, Iterable, List, Optional, Set, Tuple
6
 
7
+ from app.config import settings
 
 
 
 
 
 
 
 
 
8
 
 
9
 
10
+ DEFAULT_ALLOWED_TABLES: Tuple[str, ...] = (
11
+ # "test_products", ## sample table for navin testing
12
+ "tblModelDetails",
13
+ "tblDimensionInfo",
14
+ "tblInstances",
15
+ "tblModelBoundingBox",
16
+ "tblModelCheckerResults",
17
+ "tblModelDetailsHub",
18
+ "tblModelFloorPlanMapping",
19
+ "tblModels",
20
+ "tblNWCViewExport",
21
+ "tblTypes",
22
+ "tblProjects",
23
+ "tblProjectSubTypes",
24
+ "tblRFISubType",
25
+ "tblViews",
26
+ "tblUsers",
27
+ "tblWorkflowStatuses",
28
+ "tblCategory",
29
+ "tblAssembly",
30
+ "tblElements",
31
+ "tblMaterials",
32
+ "tblModelRoomDetails",
33
+ "tblGeometryDataNew",
34
+ "tblGeometryDataBatchMapping",
35
+ )
36
+
37
+ RELATIONSHIP_SUMMARY = dedent("""\
38
+ # Relational Structure Summary
39
+
40
+ tblModelDetails ← (central entity)
41
+ ↑ tblDimensionInfo.ModelID
42
+ ↑ tblInstances.ModelID
43
+ ↑ tblModelBoundingBox.ModelID
44
+ ↑ tblModelCheckerResults.ModelID
45
+ ↑ tblModelDetailsHub.ModelID
46
+ ↑ tblModelFloorPlanMapping.ModelID
47
+ ↑ tblModels.ModelID
48
+ ↑ tblNWCViewExport.ModelID
49
+ ↑ tblTypes.ModelID
50
+
51
+ tblProjects ← referenced by:
52
+ tblModelDetails.ProjectID
53
+ tblTypes.ProjectID
54
+
55
+ tblTypes ← referenced by:
56
+ tblInstances.TypeID / RFITypeID / ProjectTypeID
57
+ tblProjectSubTypes.TypeID / RFITypeID / ProjectTypeID
58
+ tblRFISubType.TypeID / RFITypeID / ProjectTypeID
59
+
60
+ tblViews ← referenced by:
61
+ tblModelBoundingBox.ViewID
62
+
63
+ tblUsers ← referenced by:
64
+ tblModelDetails.ExportedBy
65
+
66
+ tblWorkflowStatuses ← referenced by:
67
+ tblModelDetails.StatusID
68
+
69
+ tblCategory ← referenced by:
70
+ tblTypes.CategoryID
71
+ """)
72
+
73
+ # -------------------------------
74
+ # Cleaning prompt (for culprit columns)
75
+ # -------------------------------
76
+ CLEAN_PROMPT = dedent("""\
77
+ You are cleaning noisy product/part names into a canonical short form.
78
+
79
+ Rules:
80
+ - Normalize inch units: 4", 4 inch, 4 in -> "4 in".
81
+ - Fix obvious misspellings: "elbw" -> "elbow".
82
+ - Remove opaque trailing codes like "-rr20" unless essential to identity.
83
+ - Keep it compact and human-readable.
84
+ - Return ONLY the cleaned text. No quotes, no explanations.
85
+ """)
86
+
87
+
88
+ # -------------------------------
89
+ # SQL generation (system prompt)
90
+ # -------------------------------
91
+ SQL_PROMPT_SYSTEM = dedent("""\
92
+ You are an expert Postgres SQL generator.
93
+
94
+ CONTEXT
95
+ - All tables are in the `public` schema of the `TransactionalData` database.
96
+ - The `public` schema already contains cleaned columns (e.g., *_clean).
97
+ - Use only the tables and columns that exist in `public`.
98
+ - You MUST restrict yourself to only the tables/relations that appear in the relational summary
99
+ provided in the user message. Do not invent tables or columns.
100
+
101
+ OUTPUT
102
+ - Return a single SELECT statement only (no CTEs across multiple statements, no DDL/DML).
103
+ - Qualify identifiers when needed using the `public.` schema.
104
+ - Avoid vendor-specific functions beyond standard Postgres.
105
+ - If units appear, assume inches unless the column clearly uses *_mm.
106
+
107
+ JOINING & FILTERING
108
+ - Use only joins that are consistent with the relational summary (respect directionality).
109
+ - When matching human-entered part names/sizes:
110
+ * Prefer cleaned columns (e.g., name_clean, title_clean) when they exist.
111
+ * Otherwise use ILIKE with robust patterns and simple normalization
112
+ (e.g., replace double quotes with " in).
113
+ - Limit columns to those that answer the question clearly.
114
+
115
+ IF UNSURE
116
+ - If the schema lacks a required join path, prefer a simpler query over a wrong join.
117
+ - You may add safe assumptions in comments inside the SQL (using --) but DO NOT return prose.
118
+ """)
119
+
120
+
121
+
122
+ # -------------------------------
123
+ # Catalog-aware prompt (default for sql_gen)
124
+ # -------------------------------
125
+ def sql_user_prompt(
126
+ question: str,
127
+ catalog_snapshot: Iterable[Dict[str, str]],
128
+ allowed_tables: Iterable[str] = DEFAULT_ALLOWED_TABLES,
129
+ ) -> str:
130
+ allowed = list(dict.fromkeys(allowed_tables))
131
+
132
+ # The source and target schema are the same now.
133
+ target_schema = settings.POSTGRES_SOURCE_SCHEMAS
134
+
135
+ column_map: Dict[str, Dict[str, Set[str]]] = {}
136
+ for row in catalog_snapshot:
137
+ row_data = row if isinstance(row, dict) else dict(row)
138
+ schema = row_data.get("table_schema")
139
+ table = row_data.get("table_name")
140
+ column = row_data.get("column_name")
141
+ if not schema or not table or not column:
142
+ continue
143
+ if table not in allowed:
144
+ continue
145
+ column_map.setdefault(schema, {}).setdefault(table, set()).add(column)
146
+
147
+ def render_columns(schema: str) -> List[str]:
148
+ lines: List[str] = []
149
+ tables = column_map.get(schema)
150
+ if not tables:
151
+ return lines
152
+ lines.append(f"{schema}:")
153
+ for table in allowed:
154
+ cols = tables.get(table)
155
+ if not cols:
156
+ continue
157
+ lines.append(f" {table}:")
158
+ for col in sorted(cols):
159
+ lines.append(f" - {col}")
160
+ lines.append("")
161
+ return lines
162
+
163
+ column_lines: List[str] = []
164
+ seen_schemas: Set[str] = set()
165
+
166
+ # Only one schema: public
167
+ for schema in [target_schema]:
168
+ if schema in seen_schemas:
169
+ continue
170
+ seen_schemas.add(schema)
171
+ column_lines.extend(render_columns(schema))
172
+
173
+ if not column_lines:
174
+ columns_block = "(no columns discovered for the curated tables)"
175
+ else:
176
+ columns_block = "\n".join(column_lines).rstrip()
177
+
178
+ relationship_block = RELATIONSHIP_SUMMARY.strip()
179
+
180
+ return dedent(f"""\
181
+ Question: {question}
182
+
183
+ Focus tables and columns (curated set):
184
+ {columns_block}
185
+
186
+ {relationship_block}
187
+
188
+ Guidance:
189
+ - Use tables only from the `public` schema.
190
+ - Prefer cleaned columns (e.g., *_clean) when they exist in `public`.
191
+ - Restrict joins to the relationships shown above; do not invent tables or links.
192
+ - Return ONE PostgreSQL SELECT statement (no modifications).
193
+ """)
194
+
195
+
196
+ # -------------------------------
197
+ # Convenience: pack system + user together
198
+ # -------------------------------
199
+ def build_full_sql_prompt(
200
+ question: str,
201
+ allowed_tables: Iterable[str] = DEFAULT_ALLOWED_TABLES,
202
+ catalog_snapshot: Optional[Iterable[Dict[str, str]]] = None,
203
+ ) -> Dict[str, str]:
204
+ """
205
+ Returns {"system": SQL_PROMPT_SYSTEM, "user": <combined user text>}
206
+ """
207
+ if catalog_snapshot is None:
208
+ from app.db import get_catalog_snapshot # local import to avoid cycles
209
+ catalog_snapshot = get_catalog_snapshot()
210
+
211
+ user = sql_user_prompt(
212
+ question=question,
213
+ catalog_snapshot=catalog_snapshot or [],
214
+ allowed_tables=allowed_tables,
215
+ )
216
+ return {"system": SQL_PROMPT_SYSTEM, "user": user}
217
 
 
 
 
 
 
 
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
 
 
 
 
 
 
 
 
221
 
 
222
 
 
 
 
 
 
223
 
 
 
224
 
 
 
 
225
 
 
 
 
226
 
 
 
227
 
 
 
228
 
229
 
 
 
 
 
 
 
230
 
231
+ import re
232
+ import sqlparse
233
+ from sqlalchemy import text
234
+ from app.db import engine, get_catalog_snapshot
235
+ from app.cleaning.llm import LLM
236
+ from app.cleaning.prompts import SQL_PROMPT_SYSTEM, sql_user_prompt
237
+ from app.config import settings
238
 
239
+ def is_safe_select(sql: str) -> bool:
240
+ parsed = sqlparse.parse(sql)
241
+ if not parsed:
242
+ return False
243
+ stmt = parsed[0]
244
+ if stmt.get_type() != "SELECT":
245
+ return False
246
+ # crude safeguards
247
+ forbidden = re.compile(r'\b(INSERT|UPDATE|DELETE|DROP|TRUNCATE|ALTER|CREATE|GRANT|REVOKE)\b', re.I)
248
+ return not forbidden.search(sql)
249
+
250
+ def _quote_schema_identifiers(sql: str) -> str:
251
+ """Quote occurrences of <schema>.<identifier> where not already quoted.
252
+ This fixes case-sensitivity issues when the LLM emits MixedCase table names.
253
+ """
254
+ import re as _re
255
+ schema = settings.POSTGRES_SOURCE_SCHEMAS
256
+ # Quote occurrences like normalizeddata.tblTypes -> "normalizeddata"."tblTypes"
257
+ pat = _re.compile(rf'(?<!\")\b{_re.escape(schema)}\.(?!\")(\w+)', _re.IGNORECASE)
258
+ sql = pat.sub(lambda m: f'"{schema}"."{m.group(1)}"', sql)
259
+ # Also quote public.<identifier> forms
260
+ pat_pub = _re.compile(r'(?<!\")\bpublic\.(?!\")(\w+)', _re.IGNORECASE)
261
+ sql = pat_pub.sub(lambda m: f'"public"."{m.group(1)}"', sql)
262
+ return sql
263
+
264
+ def _quote_alias_columns(sql: str) -> str:
265
+ """Quote occurrences of alias.column where column isn't already quoted or a wildcard.
266
+ Keeps alias as-is and wraps the column with quotes to preserve case.
267
+ """
268
+ import re as _re
269
+ # Skip cases like alias."Column" and alias.*
270
+ pat = _re.compile(r'\b([A-Za-z_][\w]*)\.(?!\")(\w+)')
271
+ reserved = {"public", settings.POSTGRES_SOURCE_SCHEMAS.lower(), "pg_catalog", "information_schema"}
272
+ def _repl(m: _re.Match[str]) -> str:
273
+ alias = m.group(1)
274
+ col = m.group(2)
275
+ if alias.lower() in reserved:
276
+ return m.group(0)
277
+ return f'{alias}."{col}"'
278
+ return pat.sub(_repl, sql)
279
+
280
+ def nl_to_sql(question: str) -> str:
281
+ catalog = get_catalog_snapshot()
282
+ prompt = sql_user_prompt(question, catalog)
283
+ llm = LLM()
284
+ if not llm.enabled():
285
+ raise RuntimeError("LLM disabled: set LLM_PROVIDER in .env to enable NL→SQL.")
286
+ sql = llm.nl_to_sql(SQL_PROMPT_SYSTEM, prompt)
287
+ # strip code fences if any
288
+ sql = sql.strip().strip("`")
289
+ if sql.startswith("sql"):
290
+ sql = sql[3:].strip()
291
+ if "```" in sql:
292
+ sql = re.sub(r"```sql|```", "", sql, flags=re.I).strip()
293
+ # Quote schema identifiers to preserve MixedCase table names
294
+ sql = _quote_schema_identifiers(sql)
295
+ # Quote alias columns (e.g., e.ElementID -> e."ElementID")
296
+ sql = _quote_alias_columns(sql)
297
+ if not is_safe_select(sql):
298
+ raise ValueError("Generated SQL failed safety checks (SELECT-only required).")
299
+ return sql
300
+
301
+ def run_sql(sql: str, limit: int = 200):
302
+ # enforce a hard cap to avoid huge result sets
303
+ sql_wrapped = f"SELECT * FROM ({sql}) AS sub LIMIT {limit}"
304
  with engine.begin() as cx:
305
+ rows = cx.execute(text(sql_wrapped)).mappings().all()
306
+ return [dict(r) for r in rows]
307
 
 
 
 
308
 
 
 
 
 
 
309
 
 
310
 
311
 
 
 
 
 
 
 
312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
 
 
314
 
 
 
315
 
 
 
 
316
 
 
 
317
 
318
+ import re
319
+ from typing import List, Tuple, Dict, Set
320
+
321
+ from sqlalchemy import text
322
+ from app.config import settings
323
+
324
+ from app.db import engine, get_catalog_snapshot
325
+ from app.cleaning.llm import LLM
326
+ from app.cleaning.prompts import SQL_PROMPT_SYSTEM, sql_user_prompt
327
+
328
+
329
+ def _strip_code_fences(s: str) -> str:
330
+ s = s.strip()
331
+ s = re.sub(r"^```sql\s*|\s*```$", "", s, flags=re.I | re.M)
332
+ return s.strip("`").strip()
333
+
334
+
335
+ EXTRA_GUIDANCE = """
336
+ Requirements:
337
+ - Search across cleaned columns only (use *_clean):
338
+ Types(TypeName_clean, FamilyName_clean), Elements(ElementName_clean),
339
+ Assembly(AssemblyName_clean), Materials(Name_clean), Views(ViewName_clean),
340
+ ModelDetails(ModelName_clean), ModelRoomDetails(RoomName_clean),
341
+ ModelFloorPlanMapping(FloorPlanName_clean), GeometryDataNew(GeometryFileName_clean).
342
+ - Prefer normalized units (e.g., 4 in). Treat 4", 4 inch, 4 inches as 4 in.
343
+ - Match singular/plural (e.g., wall/walls, elbow/elbows) using robust ILIKE patterns.
344
+ - Follow relationships to Types to return the correct TypeID when searching non-Type tables.
345
+ - Final output should be a single SELECT that includes a column aliased exactly as TypeID
346
+ (e.g., SELECT DISTINCT t."TypeID" AS TypeID, ...). Avoid returning other columns.
347
+ """
348
 
 
 
 
349
 
350
+ def _is_select(sql: str) -> bool:
351
+ return bool(sql and sql.strip().lower().startswith("select"))
 
352
 
 
 
353
 
354
+ def _quote_schema_identifiers(sql: str) -> str:
355
+ """Quote occurrences of <schema>.<identifier> where not already quoted.
356
+ This helps when the LLM emits MixedCase table names without quotes.
357
+ """
358
+ schema = settings.POSTGRES_SOURCE_SCHEMAS
359
+ # Quote occurrences like normalizeddata.tblTypes -> "normalizeddata"."tblTypes"
360
+ pat = re.compile(rf'(?<!")\b{re.escape(schema)}\.(?!")(\w+)', re.IGNORECASE)
361
+ sql = pat.sub(lambda m: f'"{schema}"."{m.group(1)}"', sql)
362
+ # Also quote public.<identifier> forms
363
+ pat_pub = re.compile(r'(?<!")\bpublic\.(?!")(\w+)', re.IGNORECASE)
364
+ sql = pat_pub.sub(lambda m: f'"public"."{m.group(1)}"', sql)
365
+ return sql
366
 
 
367
 
368
+ def nl_to_typeids(question: str) -> Tuple[List[int], str]:
369
+ """Generate SQL via LLM that returns DISTINCT TypeID for the user question
370
+ and execute it, returning a list of TypeID values.
371
+ """
372
+ # Build a focused prompt with columns and relations
373
+ catalog = get_catalog_snapshot()
374
+ user = sql_user_prompt(question, catalog_snapshot=catalog)
375
+ user += "\n\n" + EXTRA_GUIDANCE
376
 
377
+ llm = LLM()
378
+ if not llm.enabled():
379
+ raise RuntimeError("LLM disabled: set LLM_PROVIDER in .env to enable NL→SQL.")
 
 
 
 
380
 
381
+ sql = llm.nl_to_sql(SQL_PROMPT_SYSTEM, user)
382
+ sql = _strip_code_fences(sql)
383
+ sql = _quote_schema_identifiers(sql)
384
+ if not _is_select(sql):
385
+ raise ValueError("LLM did not return a SELECT statement.")
386
 
387
+ # Execute LLM SQL directly and extract any column that looks like TypeID
388
+ try:
389
+ with engine.begin() as cx:
390
+ # Favor normalizeddata then public on the search_path for convenience
391
+ try:
392
+ cx.execute(text(
393
+ f"SET LOCAL search_path TO \"{settings.POSTGRES_SOURCE_SCHEMAS}\", " + ",".join(settings.source_schemas)
394
+ ))
395
+ except Exception:
396
+ pass
397
+ rows = cx.execute(text(sql)).mappings().all()
398
+
399
+ if not rows:
400
+ return ([], sql)
401
+
402
+ # Find a key resembling TypeID (case-insensitive, underscores allowed)
403
+ keys = list(rows[0].keys())
404
+
405
+ def is_typeid(k: str) -> bool:
406
+ return bool(re.fullmatch(r"(?i)type[_ ]?id", k))
407
+
408
+ cand = None
409
+ for k in keys:
410
+ if is_typeid(k):
411
+ cand = k
412
+ break
413
+ if cand is None:
414
+ for k in keys:
415
+ if k.lower() in ("typeid", "type_id"):
416
+ cand = k
417
+ break
418
+ if cand is None:
419
+ return ([], sql)
420
+
421
+ vals: set[int] = set()
422
+ for r in rows:
423
+ v = r.get(cand)
424
+ if v is None:
425
+ continue
426
+ try:
427
+ vals.add(int(v))
428
+ except Exception:
429
+ try:
430
+ vals.add(int(str(v).strip()))
431
+ except Exception:
432
+ continue
433
+ return (sorted(vals), sql)
434
+ except Exception:
435
+ # Fallback: build a deterministic UNION-based search using only verified joins/columns
436
+ return _fallback_union_search(question)
437
+
438
+
439
+ def _fallback_union_search(question: str) -> Tuple[List[int], str]:
440
+ """Fallback SQL builder that unions TypeIDs discovered via safe, known paths."""
441
+ catalog = get_catalog_snapshot()
442
+ target = settings.POSTGRES_SOURCE_SCHEMAS
443
+ cmap: Dict[str, Dict[str, Set[str]]] = {}
444
+ for r in catalog:
445
+ schema = r["table_schema"]
446
+ table = r["table_name"]
447
+ col = r["column_name"]
448
+ cmap.setdefault(schema, {}).setdefault(table, set()).add(col)
449
+
450
+ def has(table: str, *cols: str, schema: str = target) -> bool:
451
+ return table in cmap.get(schema, {}) and all(c in cmap[schema][table] for c in cols)
452
+
453
+ def normalize_inches(txt: str) -> str:
454
+ t = txt
455
+ t = re.sub(r"(\d+)\s*(?:inches|inch|in|\")\b", r"\1 in", t, flags=re.I)
456
+ return t
457
+
458
+ qn = normalize_inches(question.lower())
459
+ tokens = []
460
+ for tok in re.findall(r"\d+\s+in|[a-zA-Z]+", qn):
461
+ tokens.append(tok.strip())
462
+ alts: Set[str] = set(tokens)
463
+ for t in list(tokens):
464
+ if len(t) > 2 and t.isalpha():
465
+ if t.endswith("s"):
466
+ alts.add(t[:-1])
467
+ else:
468
+ alts.add(t + "s")
469
+ pats = [f"%{t}%" for t in sorted(alts)]
470
+
471
+ def like_all(col: str) -> str:
472
+ return " AND ".join([f"{col} ILIKE :p{i}" for i in range(len(pats))]) if pats else "TRUE"
473
+
474
+ parts: List[str] = []
475
+
476
+ if has("tblTypes", "TypeID", "TypeName_clean"):
477
+ conds = like_all('t."TypeName_clean"')
478
+ parts.append(
479
+ f'SELECT DISTINCT t."TypeID" AS TypeID FROM "{target}"."tblTypes" t WHERE {conds}'
480
+ )
481
+ if has("tblTypes", "TypeID", "FamilyName_clean"):
482
+ conds = like_all('t."FamilyName_clean"')
483
+ parts.append(
484
+ f'SELECT DISTINCT t."TypeID" AS TypeID FROM "{target}"."tblTypes" t WHERE {conds}'
485
+ )
486
 
487
+ if has("tblElements", "TypeID", "ElementName_clean"):
488
+ conds = like_all('e."ElementName_clean"')
489
+ parts.append(
490
+ f'SELECT DISTINCT e."TypeID" AS TypeID FROM "{target}"."tblElements" e WHERE {conds}'
491
+ )
492
 
493
+ if has("tblTypes", "TypeID", "MaterialID") and has("tblMaterials", "MaterialID", "Name_clean"):
494
+ conds = like_all('m."Name_clean"')
495
+ parts.append(
496
+ f'SELECT DISTINCT t."TypeID" AS TypeID '
497
+ f'FROM "{target}"."tblTypes" t '
498
+ f'JOIN "{target}"."tblMaterials" m ON t."MaterialID" = m."MaterialID" '
499
+ f'WHERE {conds}'
500
  )
 
501
 
502
+ if has("tblTypes", "TypeID", "ModelID") and has("tblModelDetails", "ModelID", "ModelName_clean"):
503
+ conds = like_all('md."ModelName_clean"')
504
+ parts.append(
505
+ f'SELECT DISTINCT t."TypeID" AS TypeID '
506
+ f'FROM "{target}"."tblTypes" t '
507
+ f'JOIN "{target}"."tblModelDetails" md ON t."ModelID" = md."ModelID" '
508
+ f'WHERE {conds}'
509
+ )
510
+
511
+ if has("tblTypes", "TypeID", "ModelID") and has("tblViews", "ModelID", "ViewName_clean"):
512
+ conds = like_all('v."ViewName_clean"')
513
+ parts.append(
514
+ f'SELECT DISTINCT t."TypeID" AS TypeID '
515
+ f'FROM "{target}"."tblTypes" t '
516
+ f'JOIN "{target}"."tblViews" v ON t."ModelID" = v."ModelID" '
517
+ f'WHERE {conds}'
518
+ )
519
+
520
+ if has("tblTypes", "TypeID", "ModelID") and has("tblAssembly", "ModelID", "AssemblyName_clean"):
521
+ conds = like_all('a."AssemblyName_clean"')
522
+ parts.append(
523
+ f'SELECT DISTINCT t."TypeID" AS TypeID '
524
+ f'FROM "{target}"."tblTypes" t '
525
+ f'JOIN "{target}"."tblAssembly" a ON t."ModelID" = a."ModelID" '
526
+ f'WHERE {conds}'
527
+ )
528
+
529
+ if has("tblTypes", "TypeID", "ModelID") and has("tblModelRoomDetails", "ModelID", "RoomName_clean"):
530
+ conds = like_all('mrd."RoomName_clean"')
531
+ parts.append(
532
+ f'SELECT DISTINCT t."TypeID" AS TypeID '
533
+ f'FROM "{target}"."tblTypes" t '
534
+ f'JOIN "{target}"."tblModelRoomDetails" mrd ON t."ModelID" = mrd."ModelID" '
535
+ f'WHERE {conds}'
536
+ )
537
 
538
+ if has("tblTypes", "TypeID", "ModelID") and has("tblModelFloorPlanMapping", "ModelID", "FloorPlanName_clean"):
539
+ conds = like_all('mfpm."FloorPlanName_clean"')
540
+ parts.append(
541
+ f'SELECT DISTINCT t."TypeID" AS TypeID '
542
+ f'FROM "{target}"."tblTypes" t '
543
+ f'JOIN "{target}"."tblModelFloorPlanMapping" mfpm ON t."ModelID" = mfpm."ModelID" '
544
+ f'WHERE {conds}'
545
+ )
546
 
547
+ if has("tblTypes", "TypeID", "ModelID") and has("tblGeometryDataNew", "ModelID", "GeometryFileName_clean"):
548
+ conds = like_all('g."GeometryFileName_clean"')
549
+ parts.append(
550
+ f'SELECT DISTINCT t."TypeID" AS TypeID '
551
+ f'FROM "{target}"."tblTypes" t '
552
+ f'JOIN "{target}"."tblGeometryDataNew" g ON t."ModelID" = g."ModelID" '
553
+ f'WHERE {conds}'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554
  )
555
 
556
+ if not parts:
557
+ return ([], "-- no valid search paths found")
558
+
559
+ sql = "\nUNION\n".join(parts)
560
+
561
+ params = {f"p{i}": pats[i] for i in range(len(pats))}
562
+ with engine.begin() as cx:
563
+ rows = cx.execute(text(sql), params).scalars().all()
564
+ # rows already ints
565
+ out = sorted({int(x) for x in rows if x is not None})
566
+ return (out, sql)
567
+
568
+
569
 
570
 
571