Silly98 commited on
Commit
7b05dcb
Β·
verified Β·
1 Parent(s): 868f8ea

Update text

Browse files
Files changed (1) hide show
  1. text +203 -526
text CHANGED
@@ -1,571 +1,248 @@
1
- # app/cleaning/prompts.py
2
- from __future__ import annotations
3
-
4
- from textwrap import dedent
5
- from typing import Dict, Iterable, List, Optional, Set, Tuple
6
-
7
  from app.config import settings
 
 
8
 
9
 
10
- DEFAULT_ALLOWED_TABLES: Tuple[str, ...] = (
11
- # "test_products", ## sample table for navin testing
12
- "tblModelDetails",
13
- "tblDimensionInfo",
14
- "tblInstances",
15
- "tblModelBoundingBox",
16
- "tblModelCheckerResults",
17
- "tblModelDetailsHub",
18
- "tblModelFloorPlanMapping",
19
- "tblModels",
20
- "tblNWCViewExport",
21
- "tblTypes",
22
- "tblProjects",
23
- "tblProjectSubTypes",
24
- "tblRFISubType",
25
- "tblViews",
26
- "tblUsers",
27
- "tblWorkflowStatuses",
28
- "tblCategory",
29
- "tblAssembly",
30
- "tblElements",
31
- "tblMaterials",
32
- "tblModelRoomDetails",
33
- "tblGeometryDataNew",
34
- "tblGeometryDataBatchMapping",
35
- )
36
-
37
- RELATIONSHIP_SUMMARY = dedent("""\
38
- # Relational Structure Summary
39
-
40
- tblModelDetails ← (central entity)
41
- ↑ tblDimensionInfo.ModelID
42
- ↑ tblInstances.ModelID
43
- ↑ tblModelBoundingBox.ModelID
44
- ↑ tblModelCheckerResults.ModelID
45
- ↑ tblModelDetailsHub.ModelID
46
- ↑ tblModelFloorPlanMapping.ModelID
47
- ↑ tblModels.ModelID
48
- ↑ tblNWCViewExport.ModelID
49
- ↑ tblTypes.ModelID
50
-
51
- tblProjects ← referenced by:
52
- tblModelDetails.ProjectID
53
- tblTypes.ProjectID
54
-
55
- tblTypes ← referenced by:
56
- tblInstances.TypeID / RFITypeID / ProjectTypeID
57
- tblProjectSubTypes.TypeID / RFITypeID / ProjectTypeID
58
- tblRFISubType.TypeID / RFITypeID / ProjectTypeID
59
-
60
- tblViews ← referenced by:
61
- tblModelBoundingBox.ViewID
62
-
63
- tblUsers ← referenced by:
64
- tblModelDetails.ExportedBy
65
-
66
- tblWorkflowStatuses ← referenced by:
67
- tblModelDetails.StatusID
68
-
69
- tblCategory ← referenced by:
70
- tblTypes.CategoryID
71
- """)
72
-
73
- # -------------------------------
74
- # Cleaning prompt (for culprit columns)
75
- # -------------------------------
76
- CLEAN_PROMPT = dedent("""\
77
- You are cleaning noisy product/part names into a canonical short form.
78
-
79
- Rules:
80
- - Normalize inch units: 4", 4 inch, 4 in -> "4 in".
81
- - Fix obvious misspellings: "elbw" -> "elbow".
82
- - Remove opaque trailing codes like "-rr20" unless essential to identity.
83
- - Keep it compact and human-readable.
84
- - Return ONLY the cleaned text. No quotes, no explanations.
85
- """)
86
-
87
-
88
- # -------------------------------
89
- # SQL generation (system prompt)
90
- # -------------------------------
91
- SQL_PROMPT_SYSTEM = dedent("""\
92
- You are an expert Postgres SQL generator.
93
-
94
- CONTEXT
95
- - All tables are in the `public` schema of the `TransactionalData` database.
96
- - The `public` schema already contains cleaned columns (e.g., *_clean).
97
- - Use only the tables and columns that exist in `public`.
98
- - You MUST restrict yourself to only the tables/relations that appear in the relational summary
99
- provided in the user message. Do not invent tables or columns.
100
-
101
- OUTPUT
102
- - Return a single SELECT statement only (no CTEs across multiple statements, no DDL/DML).
103
- - Qualify identifiers when needed using the `public.` schema.
104
- - Avoid vendor-specific functions beyond standard Postgres.
105
- - If units appear, assume inches unless the column clearly uses *_mm.
106
-
107
- JOINING & FILTERING
108
- - Use only joins that are consistent with the relational summary (respect directionality).
109
- - When matching human-entered part names/sizes:
110
- * Prefer cleaned columns (e.g., name_clean, title_clean) when they exist.
111
- * Otherwise use ILIKE with robust patterns and simple normalization
112
- (e.g., replace double quotes with " in).
113
- - Limit columns to those that answer the question clearly.
114
-
115
- IF UNSURE
116
- - If the schema lacks a required join path, prefer a simpler query over a wrong join.
117
- - You may add safe assumptions in comments inside the SQL (using --) but DO NOT return prose.
118
- """)
119
-
120
-
121
-
122
- # -------------------------------
123
- # Catalog-aware prompt (default for sql_gen)
124
- # -------------------------------
125
- def sql_user_prompt(
126
- question: str,
127
- catalog_snapshot: Iterable[Dict[str, str]],
128
- allowed_tables: Iterable[str] = DEFAULT_ALLOWED_TABLES,
129
- ) -> str:
130
- allowed = list(dict.fromkeys(allowed_tables))
131
-
132
- # The source and target schema are the same now.
133
- target_schema = settings.POSTGRES_SOURCE_SCHEMAS
134
-
135
- column_map: Dict[str, Dict[str, Set[str]]] = {}
136
- for row in catalog_snapshot:
137
- row_data = row if isinstance(row, dict) else dict(row)
138
- schema = row_data.get("table_schema")
139
- table = row_data.get("table_name")
140
- column = row_data.get("column_name")
141
- if not schema or not table or not column:
142
- continue
143
- if table not in allowed:
144
- continue
145
- column_map.setdefault(schema, {}).setdefault(table, set()).add(column)
146
-
147
- def render_columns(schema: str) -> List[str]:
148
- lines: List[str] = []
149
- tables = column_map.get(schema)
150
- if not tables:
151
- return lines
152
- lines.append(f"{schema}:")
153
- for table in allowed:
154
- cols = tables.get(table)
155
- if not cols:
156
- continue
157
- lines.append(f" {table}:")
158
- for col in sorted(cols):
159
- lines.append(f" - {col}")
160
- lines.append("")
161
- return lines
162
-
163
- column_lines: List[str] = []
164
- seen_schemas: Set[str] = set()
165
-
166
- # Only one schema: public
167
- for schema in [target_schema]:
168
- if schema in seen_schemas:
169
- continue
170
- seen_schemas.add(schema)
171
- column_lines.extend(render_columns(schema))
172
-
173
- if not column_lines:
174
- columns_block = "(no columns discovered for the curated tables)"
175
- else:
176
- columns_block = "\n".join(column_lines).rstrip()
177
-
178
- relationship_block = RELATIONSHIP_SUMMARY.strip()
179
-
180
- return dedent(f"""\
181
- Question: {question}
182
-
183
- Focus tables and columns (curated set):
184
- {columns_block}
185
-
186
- {relationship_block}
187
-
188
- Guidance:
189
- - Use tables only from the `public` schema.
190
- - Prefer cleaned columns (e.g., *_clean) when they exist in `public`.
191
- - Restrict joins to the relationships shown above; do not invent tables or links.
192
- - Return ONE PostgreSQL SELECT statement (no modifications).
193
- """)
194
-
195
-
196
- # -------------------------------
197
- # Convenience: pack system + user together
198
- # -------------------------------
199
- def build_full_sql_prompt(
200
- question: str,
201
- allowed_tables: Iterable[str] = DEFAULT_ALLOWED_TABLES,
202
- catalog_snapshot: Optional[Iterable[Dict[str, str]]] = None,
203
- ) -> Dict[str, str]:
204
- """
205
- Returns {"system": SQL_PROMPT_SYSTEM, "user": <combined user text>}
206
- """
207
- if catalog_snapshot is None:
208
- from app.db import get_catalog_snapshot # local import to avoid cycles
209
- catalog_snapshot = get_catalog_snapshot()
210
-
211
- user = sql_user_prompt(
212
- question=question,
213
- catalog_snapshot=catalog_snapshot or [],
214
- allowed_tables=allowed_tables,
215
- )
216
- return {"system": SQL_PROMPT_SYSTEM, "user": user}
217
 
 
218
 
219
 
 
 
 
 
 
 
220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
 
 
 
 
 
 
 
223
 
 
 
 
 
 
224
 
 
 
225
 
 
 
 
226
 
 
 
 
227
 
 
 
228
 
229
 
 
 
 
 
 
 
 
 
 
 
 
 
230
 
231
- import re
232
- import sqlparse
233
- from sqlalchemy import text
234
- from app.db import engine, get_catalog_snapshot
235
- from app.cleaning.llm import LLM
236
- from app.cleaning.prompts import SQL_PROMPT_SYSTEM, sql_user_prompt
237
- from app.config import settings
238
-
239
- def is_safe_select(sql: str) -> bool:
240
- parsed = sqlparse.parse(sql)
241
- if not parsed:
242
- return False
243
- stmt = parsed[0]
244
- if stmt.get_type() != "SELECT":
245
- return False
246
- # crude safeguards
247
- forbidden = re.compile(r'\b(INSERT|UPDATE|DELETE|DROP|TRUNCATE|ALTER|CREATE|GRANT|REVOKE)\b', re.I)
248
- return not forbidden.search(sql)
249
-
250
- def _quote_schema_identifiers(sql: str) -> str:
251
- """Quote occurrences of <schema>.<identifier> where not already quoted.
252
- This fixes case-sensitivity issues when the LLM emits MixedCase table names.
253
- """
254
- import re as _re
255
- schema = settings.POSTGRES_SOURCE_SCHEMAS
256
- # Quote occurrences like normalizeddata.tblTypes -> "normalizeddata"."tblTypes"
257
- pat = _re.compile(rf'(?<!\")\b{_re.escape(schema)}\.(?!\")(\w+)', _re.IGNORECASE)
258
- sql = pat.sub(lambda m: f'"{schema}"."{m.group(1)}"', sql)
259
- # Also quote public.<identifier> forms
260
- pat_pub = _re.compile(r'(?<!\")\bpublic\.(?!\")(\w+)', _re.IGNORECASE)
261
- sql = pat_pub.sub(lambda m: f'"public"."{m.group(1)}"', sql)
262
- return sql
263
-
264
- def _quote_alias_columns(sql: str) -> str:
265
- """Quote occurrences of alias.column where column isn't already quoted or a wildcard.
266
- Keeps alias as-is and wraps the column with quotes to preserve case.
267
- """
268
- import re as _re
269
- # Skip cases like alias."Column" and alias.*
270
- pat = _re.compile(r'\b([A-Za-z_][\w]*)\.(?!\")(\w+)')
271
- reserved = {"public", settings.POSTGRES_SOURCE_SCHEMAS.lower(), "pg_catalog", "information_schema"}
272
- def _repl(m: _re.Match[str]) -> str:
273
- alias = m.group(1)
274
- col = m.group(2)
275
- if alias.lower() in reserved:
276
- return m.group(0)
277
- return f'{alias}."{col}"'
278
- return pat.sub(_repl, sql)
279
-
280
- def nl_to_sql(question: str) -> str:
281
- catalog = get_catalog_snapshot()
282
- prompt = sql_user_prompt(question, catalog)
283
- llm = LLM()
284
- if not llm.enabled():
285
- raise RuntimeError("LLM disabled: set LLM_PROVIDER in .env to enable NL→SQL.")
286
- sql = llm.nl_to_sql(SQL_PROMPT_SYSTEM, prompt)
287
- # strip code fences if any
288
- sql = sql.strip().strip("`")
289
- if sql.startswith("sql"):
290
- sql = sql[3:].strip()
291
- if "```" in sql:
292
- sql = re.sub(r"```sql|```", "", sql, flags=re.I).strip()
293
- # Quote schema identifiers to preserve MixedCase table names
294
- sql = _quote_schema_identifiers(sql)
295
- # Quote alias columns (e.g., e.ElementID -> e."ElementID")
296
- sql = _quote_alias_columns(sql)
297
- if not is_safe_select(sql):
298
- raise ValueError("Generated SQL failed safety checks (SELECT-only required).")
299
- return sql
300
-
301
- def run_sql(sql: str, limit: int = 200):
302
- # enforce a hard cap to avoid huge result sets
303
- sql_wrapped = f"SELECT * FROM ({sql}) AS sub LIMIT {limit}"
304
  with engine.begin() as cx:
305
- rows = cx.execute(text(sql_wrapped)).mappings().all()
306
- return [dict(r) for r in rows]
307
-
308
-
309
-
310
-
311
-
312
-
313
 
 
 
 
314
 
 
 
 
 
 
315
 
 
316
 
317
 
318
- import re
319
- from typing import List, Tuple, Dict, Set
 
 
 
 
320
 
321
- from sqlalchemy import text
322
- from app.config import settings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
 
324
- from app.db import engine, get_catalog_snapshot
325
- from app.cleaning.llm import LLM
326
- from app.cleaning.prompts import SQL_PROMPT_SYSTEM, sql_user_prompt
327
-
328
-
329
- def _strip_code_fences(s: str) -> str:
330
- s = s.strip()
331
- s = re.sub(r"^```sql\s*|\s*```$", "", s, flags=re.I | re.M)
332
- return s.strip("`").strip()
333
-
334
-
335
- EXTRA_GUIDANCE = """
336
- Requirements:
337
- - Search across cleaned columns only (use *_clean):
338
- Types(TypeName_clean, FamilyName_clean), Elements(ElementName_clean),
339
- Assembly(AssemblyName_clean), Materials(Name_clean), Views(ViewName_clean),
340
- ModelDetails(ModelName_clean), ModelRoomDetails(RoomName_clean),
341
- ModelFloorPlanMapping(FloorPlanName_clean), GeometryDataNew(GeometryFileName_clean).
342
- - Prefer normalized units (e.g., 4 in). Treat 4", 4 inch, 4 inches as 4 in.
343
- - Match singular/plural (e.g., wall/walls, elbow/elbows) using robust ILIKE patterns.
344
- - Follow relationships to Types to return the correct TypeID when searching non-Type tables.
345
- - Final output should be a single SELECT that includes a column aliased exactly as TypeID
346
- (e.g., SELECT DISTINCT t."TypeID" AS TypeID, ...). Avoid returning other columns.
347
- """
348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
 
350
- def _is_select(sql: str) -> bool:
351
- return bool(sql and sql.strip().lower().startswith("select"))
 
352
 
 
 
 
353
 
354
- def _quote_schema_identifiers(sql: str) -> str:
355
- """Quote occurrences of <schema>.<identifier> where not already quoted.
356
- This helps when the LLM emits MixedCase table names without quotes.
357
- """
358
- schema = settings.POSTGRES_SOURCE_SCHEMAS
359
- # Quote occurrences like normalizeddata.tblTypes -> "normalizeddata"."tblTypes"
360
- pat = re.compile(rf'(?<!")\b{re.escape(schema)}\.(?!")(\w+)', re.IGNORECASE)
361
- sql = pat.sub(lambda m: f'"{schema}"."{m.group(1)}"', sql)
362
- # Also quote public.<identifier> forms
363
- pat_pub = re.compile(r'(?<!")\bpublic\.(?!")(\w+)', re.IGNORECASE)
364
- sql = pat_pub.sub(lambda m: f'"public"."{m.group(1)}"', sql)
365
- return sql
366
 
 
 
 
367
 
368
- def nl_to_typeids(question: str) -> Tuple[List[int], str]:
369
- """Generate SQL via LLM that returns DISTINCT TypeID for the user question
370
- and execute it, returning a list of TypeID values.
371
- """
372
- # Build a focused prompt with columns and relations
373
- catalog = get_catalog_snapshot()
374
- user = sql_user_prompt(question, catalog_snapshot=catalog)
375
- user += "\n\n" + EXTRA_GUIDANCE
376
 
377
- llm = LLM()
378
- if not llm.enabled():
379
- raise RuntimeError("LLM disabled: set LLM_PROVIDER in .env to enable NL→SQL.")
380
 
381
- sql = llm.nl_to_sql(SQL_PROMPT_SYSTEM, user)
382
- sql = _strip_code_fences(sql)
383
- sql = _quote_schema_identifiers(sql)
384
- if not _is_select(sql):
385
- raise ValueError("LLM did not return a SELECT statement.")
386
 
387
- # Execute LLM SQL directly and extract any column that looks like TypeID
388
- try:
389
- with engine.begin() as cx:
390
- # Favor normalizeddata then public on the search_path for convenience
391
- try:
392
- cx.execute(text(
393
- f"SET LOCAL search_path TO \"{settings.POSTGRES_SOURCE_SCHEMAS}\", " + ",".join(settings.source_schemas)
394
- ))
395
- except Exception:
396
- pass
397
- rows = cx.execute(text(sql)).mappings().all()
398
-
399
- if not rows:
400
- return ([], sql)
401
-
402
- # Find a key resembling TypeID (case-insensitive, underscores allowed)
403
- keys = list(rows[0].keys())
404
-
405
- def is_typeid(k: str) -> bool:
406
- return bool(re.fullmatch(r"(?i)type[_ ]?id", k))
407
-
408
- cand = None
409
- for k in keys:
410
- if is_typeid(k):
411
- cand = k
412
- break
413
- if cand is None:
414
- for k in keys:
415
- if k.lower() in ("typeid", "type_id"):
416
- cand = k
417
- break
418
- if cand is None:
419
- return ([], sql)
420
-
421
- vals: set[int] = set()
422
- for r in rows:
423
- v = r.get(cand)
424
- if v is None:
425
- continue
426
- try:
427
- vals.add(int(v))
428
- except Exception:
429
- try:
430
- vals.add(int(str(v).strip()))
431
- except Exception:
432
- continue
433
- return (sorted(vals), sql)
434
- except Exception:
435
- # Fallback: build a deterministic UNION-based search using only verified joins/columns
436
- return _fallback_union_search(question)
437
-
438
-
439
- def _fallback_union_search(question: str) -> Tuple[List[int], str]:
440
- """Fallback SQL builder that unions TypeIDs discovered via safe, known paths."""
441
- catalog = get_catalog_snapshot()
442
- target = settings.POSTGRES_SOURCE_SCHEMAS
443
- cmap: Dict[str, Dict[str, Set[str]]] = {}
444
- for r in catalog:
445
- schema = r["table_schema"]
446
- table = r["table_name"]
447
- col = r["column_name"]
448
- cmap.setdefault(schema, {}).setdefault(table, set()).add(col)
449
-
450
- def has(table: str, *cols: str, schema: str = target) -> bool:
451
- return table in cmap.get(schema, {}) and all(c in cmap[schema][table] for c in cols)
452
-
453
- def normalize_inches(txt: str) -> str:
454
- t = txt
455
- t = re.sub(r"(\d+)\s*(?:inches|inch|in|\")\b", r"\1 in", t, flags=re.I)
456
- return t
457
-
458
- qn = normalize_inches(question.lower())
459
- tokens = []
460
- for tok in re.findall(r"\d+\s+in|[a-zA-Z]+", qn):
461
- tokens.append(tok.strip())
462
- alts: Set[str] = set(tokens)
463
- for t in list(tokens):
464
- if len(t) > 2 and t.isalpha():
465
- if t.endswith("s"):
466
- alts.add(t[:-1])
467
- else:
468
- alts.add(t + "s")
469
- pats = [f"%{t}%" for t in sorted(alts)]
470
-
471
- def like_all(col: str) -> str:
472
- return " AND ".join([f"{col} ILIKE :p{i}" for i in range(len(pats))]) if pats else "TRUE"
473
-
474
- parts: List[str] = []
475
-
476
- if has("tblTypes", "TypeID", "TypeName_clean"):
477
- conds = like_all('t."TypeName_clean"')
478
- parts.append(
479
- f'SELECT DISTINCT t."TypeID" AS TypeID FROM "{target}"."tblTypes" t WHERE {conds}'
480
- )
481
- if has("tblTypes", "TypeID", "FamilyName_clean"):
482
- conds = like_all('t."FamilyName_clean"')
483
- parts.append(
484
- f'SELECT DISTINCT t."TypeID" AS TypeID FROM "{target}"."tblTypes" t WHERE {conds}'
485
- )
486
 
487
- if has("tblElements", "TypeID", "ElementName_clean"):
488
- conds = like_all('e."ElementName_clean"')
489
- parts.append(
490
- f'SELECT DISTINCT e."TypeID" AS TypeID FROM "{target}"."tblElements" e WHERE {conds}'
491
- )
492
 
493
- if has("tblTypes", "TypeID", "MaterialID") and has("tblMaterials", "MaterialID", "Name_clean"):
494
- conds = like_all('m."Name_clean"')
495
- parts.append(
496
- f'SELECT DISTINCT t."TypeID" AS TypeID '
497
- f'FROM "{target}"."tblTypes" t '
498
- f'JOIN "{target}"."tblMaterials" m ON t."MaterialID" = m."MaterialID" '
499
- f'WHERE {conds}'
500
- )
501
 
502
- if has("tblTypes", "TypeID", "ModelID") and has("tblModelDetails", "ModelID", "ModelName_clean"):
503
- conds = like_all('md."ModelName_clean"')
504
- parts.append(
505
- f'SELECT DISTINCT t."TypeID" AS TypeID '
506
- f'FROM "{target}"."tblTypes" t '
507
- f'JOIN "{target}"."tblModelDetails" md ON t."ModelID" = md."ModelID" '
508
- f'WHERE {conds}'
509
- )
510
 
511
- if has("tblTypes", "TypeID", "ModelID") and has("tblViews", "ModelID", "ViewName_clean"):
512
- conds = like_all('v."ViewName_clean"')
513
- parts.append(
514
- f'SELECT DISTINCT t."TypeID" AS TypeID '
515
- f'FROM "{target}"."tblTypes" t '
516
- f'JOIN "{target}"."tblViews" v ON t."ModelID" = v."ModelID" '
517
- f'WHERE {conds}'
518
- )
519
 
520
- if has("tblTypes", "TypeID", "ModelID") and has("tblAssembly", "ModelID", "AssemblyName_clean"):
521
- conds = like_all('a."AssemblyName_clean"')
522
- parts.append(
523
- f'SELECT DISTINCT t."TypeID" AS TypeID '
524
- f'FROM "{target}"."tblTypes" t '
525
- f'JOIN "{target}"."tblAssembly" a ON t."ModelID" = a."ModelID" '
526
- f'WHERE {conds}'
527
  )
 
528
 
529
- if has("tblTypes", "TypeID", "ModelID") and has("tblModelRoomDetails", "ModelID", "RoomName_clean"):
530
- conds = like_all('mrd."RoomName_clean"')
531
- parts.append(
532
- f'SELECT DISTINCT t."TypeID" AS TypeID '
533
- f'FROM "{target}"."tblTypes" t '
534
- f'JOIN "{target}"."tblModelRoomDetails" mrd ON t."ModelID" = mrd."ModelID" '
535
- f'WHERE {conds}'
536
- )
537
 
538
- if has("tblTypes", "TypeID", "ModelID") and has("tblModelFloorPlanMapping", "ModelID", "FloorPlanName_clean"):
539
- conds = like_all('mfpm."FloorPlanName_clean"')
540
- parts.append(
541
- f'SELECT DISTINCT t."TypeID" AS TypeID '
542
- f'FROM "{target}"."tblTypes" t '
543
- f'JOIN "{target}"."tblModelFloorPlanMapping" mfpm ON t."ModelID" = mfpm."ModelID" '
544
- f'WHERE {conds}'
545
- )
546
 
547
- if has("tblTypes", "TypeID", "ModelID") and has("tblGeometryDataNew", "ModelID", "GeometryFileName_clean"):
548
- conds = like_all('g."GeometryFileName_clean"')
549
- parts.append(
550
- f'SELECT DISTINCT t."TypeID" AS TypeID '
551
- f'FROM "{target}"."tblTypes" t '
552
- f'JOIN "{target}"."tblGeometryDataNew" g ON t."ModelID" = g."ModelID" '
553
- f'WHERE {conds}'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554
  )
555
 
556
- if not parts:
557
- return ([], "-- no valid search paths found")
558
-
559
- sql = "\nUNION\n".join(parts)
560
-
561
- params = {f"p{i}": pats[i] for i in range(len(pats))}
562
- with engine.begin() as cx:
563
- rows = cx.execute(text(sql), params).scalars().all()
564
- # rows already ints
565
- out = sorted({int(x) for x in rows if x is not None})
566
- return (out, sql)
567
-
568
-
569
-
570
-
571
-
 
1
+ import json
2
+ from typing import Dict, List, Optional
3
+ import sys
4
+ from sqlalchemy import text
5
+ from sqlalchemy.engine import Result
6
+ from app.db import engine
7
  from app.config import settings
8
+ from app.cleaning.prompts import CLEAN_PROMPT
9
+ from app.cleaning.llm import LLM
10
 
11
 
12
+ """
13
+ IN-PLACE CLEANING PIPELINE (Original Logic, No Mirroring)
14
+
15
+ - Reads from SAME TABLE
16
+ - Writes back into SAME TABLE
17
+ - Adds <col>_clean columns if missing
18
+ - Cleans only up to clean_cap rows (default 20)
19
+ - Preserves original data completely
20
+ - No target schema, no cloned tables, no mirroring
21
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ MAX_ROWS_PER_TABLE = 20
24
 
25
 
26
+ # ----------------------------------------------------
27
+ # ADD <col>_clean columns into SAME TABLE
28
+ # ----------------------------------------------------
29
+ def ensure_clean_columns(schema: str, table: str, culprit_columns: List[str]):
30
+ if not culprit_columns:
31
+ return
32
 
33
+ with engine.begin() as cx:
34
+ for col in culprit_columns:
35
+ col_clean = f"{col}_clean"
36
+ sql = (
37
+ f'ALTER TABLE "{schema}"."{table}" '
38
+ f'ADD COLUMN IF NOT EXISTS "{col_clean}" TEXT'
39
+ )
40
+ cx.execute(text(sql))
41
+
42
+
43
+ # ----------------------------------------------------
44
+ # Count rows
45
+ # ----------------------------------------------------
46
+ def _count_source_rows(schema: str, table: str) -> int:
47
+ try:
48
+ with engine.begin() as cx:
49
+ row = cx.execute(
50
+ text(f'SELECT COUNT(*) FROM "{schema}"."{table}"')
51
+ ).first()
52
+ return int(row[0])
53
+ except Exception:
54
+ return -1
55
 
56
 
57
+ # ----------------------------------------------------
58
+ # Stream rows from SAME TABLE
59
+ # ----------------------------------------------------
60
+ def stream_rows(schema: str, table: str, batch_size=1000):
61
+ limit = ""
62
+ sql = f'SELECT * FROM "{schema}"."{table}"{limit}'
63
 
64
+ with engine.begin() as cx:
65
+ result: Result = (
66
+ cx.execution_options(stream_results=True)
67
+ .execute(text(sql))
68
+ )
69
 
70
+ batch = []
71
+ yielded = 0
72
 
73
+ for row in result.mappings():
74
+ batch.append(dict(row))
75
+ yielded += 1
76
 
77
+ if len(batch) >= batch_size:
78
+ yield batch
79
+ batch = []
80
 
81
+ if batch:
82
+ yield batch
83
 
84
 
85
+ # ----------------------------------------------------
86
+ # Write back INTO SAME TABLE
87
+ # ----------------------------------------------------
88
+ def write_batch(schema: str, table: str, rows: List[Dict], pk_col: str):
89
+ if not rows:
90
+ return
91
+ print(f"rows : {rows}")
92
+ for r in rows:
93
+ # json encode dict/list so SQL can accept it
94
+ for k, v in list(r.items()):
95
+ if isinstance(v, (dict, list)):
96
+ r[k] = json.dumps(v)
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  with engine.begin() as cx:
99
+ for r in rows:
100
+ pk_val = r[pk_col]
 
 
 
 
 
 
101
 
102
+ set_list = ", ".join(
103
+ f'"{c}" = :{c}' for c in r.keys() if c != pk_col
104
+ )
105
 
106
+ sql = (
107
+ f'UPDATE "{schema}"."{table}" '
108
+ f'SET {set_list} '
109
+ f'WHERE "{pk_col}" = :{pk_col}'
110
+ )
111
 
112
+ cx.execute(text(sql), r)
113
 
114
 
115
+ # ----------------------------------------------------
116
+ # LLM clean
117
+ # ----------------------------------------------------
118
+ def clean_value(llm: LLM, value: str) -> str:
119
+ if not llm.enabled():
120
+ return value
121
 
122
+ out = llm.clean_text(
123
+ value,
124
+ system=CLEAN_PROMPT,
125
+ instruction="Clean the following product text."
126
+ )
127
+ return out.strip() or value
128
+
129
+
130
+ # ----------------------------------------------------
131
+ # MAIN FUNCTION β€” identical logic to old pipeline
132
+ # ----------------------------------------------------
133
+ def run_clean_table(
134
+ schema: str,
135
+ table: str,
136
+ culprit_columns: List[str],
137
+ batch_size: int = 1000,
138
+ clean_cap: Optional[int] = None,
139
+ primary_key: Optional[str] = None,
140
+ clean_all: bool = False,
141
+ ):
142
+ if not primary_key:
143
+ raise ValueError("primary_key required")
144
 
145
+ llm = LLM()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
 
147
+ # Ensure <col>_clean exists
148
+ ensure_clean_columns(schema, table, culprit_columns) ## Adding cleaned columns if missing
149
+ total_rows = _count_source_rows(schema, table) ## Count total rows in the table
150
+
151
+ cap = None if clean_all else (clean_cap or MAX_ROWS_PER_TABLE)
152
+
153
+ print(f"\n→ In-place cleaning {schema}.{table} (rows={total_rows}, cap={cap})")
154
+ sys.stdout.flush()
155
+
156
+ # Skip rows already cleaned
157
+ skip_pks = set()
158
+ if culprit_columns:
159
+ cond = " AND ".join([f'"{c}_clean" IS NOT NULL' for c in culprit_columns])
160
+ sql = (
161
+ f'SELECT "{primary_key}" FROM "{schema}"."{table}" '
162
+ f'WHERE {cond}'
163
+ )
164
+ try:
165
+ with engine.begin() as cx:
166
+ rows = cx.execute(text(sql)).fetchall()
167
+ skip_pks = {r[0] for r in rows}
168
+ except:
169
+ skip_pks = set()
170
 
171
+ rows_cleaned = 0
172
+ rows_processed = 0
173
+ skipped_existing = 0
174
 
175
+ # STREAM + CLEAN + UPDATE SAME TABLE
176
+ for rows in stream_rows(schema, table, batch_size=batch_size):
177
+ out_rows = []
178
 
179
+ for r in rows:
180
+ pk = r.get(primary_key)
 
 
 
 
 
 
 
 
 
 
181
 
182
+ if pk in skip_pks:
183
+ skipped_existing += 1
184
+ continue
185
 
186
+ will_clean = (cap is None) or (rows_cleaned < cap)
 
 
 
 
 
 
 
187
 
188
+ for col in culprit_columns:
189
+ original = r.get(col)
190
+ original_s = None if original is None else str(original)
191
 
192
+ if will_clean:
193
+ cleaned = clean_value(llm, original_s) if original_s else None
194
+ r[f"{col}_clean"] = cleaned
195
+ else:
196
+ r[f"{col}_clean"] = None
197
 
198
+ if will_clean:
199
+ rows_cleaned += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
+ out_rows.append(r)
 
 
 
 
202
 
 
 
 
 
 
 
 
 
203
 
204
+ # Write back to same table
205
+ write_batch(schema, table, out_rows, pk_col=primary_key)
206
+ rows_processed += len(out_rows)
 
 
 
 
 
207
 
208
+ # progress
209
+ target = cap or total_rows
210
+ pct = int(min(rows_cleaned, target) * 100 / target)
 
 
 
 
 
211
 
212
+ print(
213
+ f" {table}: cleaned {rows_cleaned}/{target} ({pct}%) "
214
+ f"| updated rows: {rows_processed} | skipped: {skipped_existing}"
 
 
 
 
215
  )
216
+ sys.stdout.flush()
217
 
218
+ print(
219
+ f"βœ“ DONE: {schema}.{table} in-place cleaned "
220
+ f"(cleaned={rows_cleaned}, skipped={skipped_existing})\n"
221
+ )
 
 
 
 
222
 
 
 
 
 
 
 
 
 
223
 
224
+ # ----------------------------------------------------
225
+ # YAML Loader
226
+ # ----------------------------------------------------
227
+ def run_cleaning_from_yaml(
228
+ yaml_path: str,
229
+ batch_size: int = 1000,
230
+ clean_cap: Optional[int] = None,
231
+ clean_all: bool = False,
232
+ ):
233
+ import yaml
234
+
235
+ with open(yaml_path, "r") as f:
236
+ cfg = yaml.safe_load(f)
237
+
238
+ for t in cfg.get("tables", []):
239
+ run_clean_table(
240
+ schema=t["schema"], ## public
241
+ table=t["name"], ## test_products
242
+ culprit_columns=t["culprit_columns"], ## ["title", "description"]
243
+ batch_size=batch_size, ## 30
244
+ primary_key=t["primary_key"], ## id
245
+ clean_cap=clean_cap, ## 10
246
+ clean_all=clean_all,## True/False
247
  )
248