|
|
""" |
|
|
Rule Engine β Deterministic Design System Analysis |
|
|
=================================================== |
|
|
|
|
|
This module handles ALL calculations that don't need LLM reasoning: |
|
|
- Type scale detection |
|
|
- AA/AAA contrast checking |
|
|
- Algorithmic color fixes |
|
|
- Spacing grid detection |
|
|
- Color statistics and deduplication |
|
|
|
|
|
LLMs should ONLY be used for: |
|
|
- Brand color identification (requires context understanding) |
|
|
- Palette cohesion (subjective assessment) |
|
|
- Design maturity scoring (holistic evaluation) |
|
|
- Prioritized recommendations (business reasoning) |
|
|
""" |
|
|
|
|
|
import colorsys |
|
|
import re |
|
|
from dataclasses import dataclass, field |
|
|
from functools import reduce |
|
|
from math import gcd |
|
|
from typing import Optional |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
|
class TypeScaleAnalysis: |
|
|
"""Results of type scale analysis.""" |
|
|
detected_ratio: float |
|
|
closest_standard_ratio: float |
|
|
scale_name: str |
|
|
is_consistent: bool |
|
|
variance: float |
|
|
sizes_px: list[float] |
|
|
ratios_between_sizes: list[float] |
|
|
recommendation: float |
|
|
recommendation_name: str |
|
|
base_size: float = 16.0 |
|
|
|
|
|
def to_dict(self) -> dict: |
|
|
return { |
|
|
"detected_ratio": round(self.detected_ratio, 3), |
|
|
"closest_standard_ratio": self.closest_standard_ratio, |
|
|
"scale_name": self.scale_name, |
|
|
"is_consistent": self.is_consistent, |
|
|
"variance": round(self.variance, 3), |
|
|
"sizes_px": self.sizes_px, |
|
|
"base_size": self.base_size, |
|
|
"recommendation": self.recommendation, |
|
|
"recommendation_name": self.recommendation_name, |
|
|
} |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class ColorAccessibility: |
|
|
"""Accessibility analysis for a single color.""" |
|
|
hex_color: str |
|
|
name: str |
|
|
contrast_on_white: float |
|
|
contrast_on_black: float |
|
|
passes_aa_normal: bool |
|
|
passes_aa_large: bool |
|
|
passes_aaa_normal: bool |
|
|
best_text_color: str |
|
|
suggested_fix: Optional[str] = None |
|
|
suggested_fix_contrast: Optional[float] = None |
|
|
|
|
|
def to_dict(self) -> dict: |
|
|
return { |
|
|
"color": self.hex_color, |
|
|
"name": self.name, |
|
|
"contrast_white": round(self.contrast_on_white, 2), |
|
|
"contrast_black": round(self.contrast_on_black, 2), |
|
|
"aa_normal": self.passes_aa_normal, |
|
|
"aa_large": self.passes_aa_large, |
|
|
"aaa_normal": self.passes_aaa_normal, |
|
|
"best_text": self.best_text_color, |
|
|
"suggested_fix": self.suggested_fix, |
|
|
"suggested_fix_contrast": round(self.suggested_fix_contrast, 2) if self.suggested_fix_contrast else None, |
|
|
} |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class SpacingGridAnalysis: |
|
|
"""Results of spacing grid analysis.""" |
|
|
detected_base: int |
|
|
is_aligned: bool |
|
|
alignment_percentage: float |
|
|
misaligned_values: list[int] |
|
|
recommendation: int |
|
|
recommendation_reason: str |
|
|
current_values: list[int] |
|
|
suggested_scale: list[int] |
|
|
|
|
|
def to_dict(self) -> dict: |
|
|
return { |
|
|
"detected_base": self.detected_base, |
|
|
"is_aligned": self.is_aligned, |
|
|
"alignment_percentage": round(self.alignment_percentage, 1), |
|
|
"misaligned_values": self.misaligned_values, |
|
|
"recommendation": self.recommendation, |
|
|
"recommendation_reason": self.recommendation_reason, |
|
|
"current_values": self.current_values, |
|
|
"suggested_scale": self.suggested_scale, |
|
|
} |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class ColorStatistics: |
|
|
"""Statistical analysis of color palette.""" |
|
|
total_count: int |
|
|
unique_count: int |
|
|
duplicate_count: int |
|
|
gray_count: int |
|
|
saturated_count: int |
|
|
near_duplicates: list[tuple[str, str, float]] |
|
|
hue_distribution: dict[str, int] |
|
|
|
|
|
def to_dict(self) -> dict: |
|
|
return { |
|
|
"total": self.total_count, |
|
|
"unique": self.unique_count, |
|
|
"duplicates": self.duplicate_count, |
|
|
"grays": self.gray_count, |
|
|
"saturated": self.saturated_count, |
|
|
"near_duplicates_count": len(self.near_duplicates), |
|
|
"hue_distribution": self.hue_distribution, |
|
|
} |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class RuleEngineResults: |
|
|
"""Complete rule engine analysis results.""" |
|
|
typography: TypeScaleAnalysis |
|
|
accessibility: list[ColorAccessibility] |
|
|
spacing: SpacingGridAnalysis |
|
|
color_stats: ColorStatistics |
|
|
|
|
|
|
|
|
aa_failures: int |
|
|
consistency_score: int |
|
|
|
|
|
def to_dict(self) -> dict: |
|
|
return { |
|
|
"typography": self.typography.to_dict(), |
|
|
"accessibility": [a.to_dict() for a in self.accessibility if not a.passes_aa_normal], |
|
|
"accessibility_all": [a.to_dict() for a in self.accessibility], |
|
|
"spacing": self.spacing.to_dict(), |
|
|
"color_stats": self.color_stats.to_dict(), |
|
|
"summary": { |
|
|
"aa_failures": self.aa_failures, |
|
|
"consistency_score": self.consistency_score, |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def hex_to_rgb(hex_color: str) -> tuple[int, int, int]: |
|
|
"""Convert hex to RGB tuple.""" |
|
|
hex_color = hex_color.lstrip('#') |
|
|
if len(hex_color) == 3: |
|
|
hex_color = ''.join([c*2 for c in hex_color]) |
|
|
return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) |
|
|
|
|
|
|
|
|
def rgb_to_hex(r: int, g: int, b: int) -> str: |
|
|
"""Convert RGB to hex string.""" |
|
|
r = max(0, min(255, r)) |
|
|
g = max(0, min(255, g)) |
|
|
b = max(0, min(255, b)) |
|
|
return f"#{r:02x}{g:02x}{b:02x}" |
|
|
|
|
|
|
|
|
def get_relative_luminance(hex_color: str) -> float: |
|
|
"""Calculate relative luminance per WCAG 2.1.""" |
|
|
r, g, b = hex_to_rgb(hex_color) |
|
|
|
|
|
def channel_luminance(c): |
|
|
c = c / 255 |
|
|
return c / 12.92 if c <= 0.03928 else ((c + 0.055) / 1.055) ** 2.4 |
|
|
|
|
|
return 0.2126 * channel_luminance(r) + 0.7152 * channel_luminance(g) + 0.0722 * channel_luminance(b) |
|
|
|
|
|
|
|
|
def get_contrast_ratio(color1: str, color2: str) -> float: |
|
|
"""Calculate WCAG contrast ratio between two colors.""" |
|
|
l1 = get_relative_luminance(color1) |
|
|
l2 = get_relative_luminance(color2) |
|
|
lighter = max(l1, l2) |
|
|
darker = min(l1, l2) |
|
|
return (lighter + 0.05) / (darker + 0.05) |
|
|
|
|
|
|
|
|
def is_gray(hex_color: str, threshold: float = 0.1) -> bool: |
|
|
"""Check if color is a gray (low saturation).""" |
|
|
r, g, b = hex_to_rgb(hex_color) |
|
|
h, s, v = colorsys.rgb_to_hsv(r/255, g/255, b/255) |
|
|
return s < threshold |
|
|
|
|
|
|
|
|
def get_saturation(hex_color: str) -> float: |
|
|
"""Get saturation value (0-1).""" |
|
|
r, g, b = hex_to_rgb(hex_color) |
|
|
h, s, v = colorsys.rgb_to_hsv(r/255, g/255, b/255) |
|
|
return s |
|
|
|
|
|
|
|
|
def get_hue_name(hex_color: str) -> str: |
|
|
"""Get human-readable hue name.""" |
|
|
r, g, b = hex_to_rgb(hex_color) |
|
|
h, s, v = colorsys.rgb_to_hsv(r/255, g/255, b/255) |
|
|
|
|
|
if s < 0.1: |
|
|
return "gray" |
|
|
|
|
|
hue_deg = h * 360 |
|
|
|
|
|
if hue_deg < 15 or hue_deg >= 345: |
|
|
return "red" |
|
|
elif hue_deg < 45: |
|
|
return "orange" |
|
|
elif hue_deg < 75: |
|
|
return "yellow" |
|
|
elif hue_deg < 150: |
|
|
return "green" |
|
|
elif hue_deg < 210: |
|
|
return "cyan" |
|
|
elif hue_deg < 270: |
|
|
return "blue" |
|
|
elif hue_deg < 315: |
|
|
return "purple" |
|
|
else: |
|
|
return "pink" |
|
|
|
|
|
|
|
|
def color_distance(hex1: str, hex2: str) -> float: |
|
|
"""Calculate perceptual color distance (0-1, lower = more similar).""" |
|
|
r1, g1, b1 = hex_to_rgb(hex1) |
|
|
r2, g2, b2 = hex_to_rgb(hex2) |
|
|
|
|
|
|
|
|
dr = (r1 - r2) / 255 |
|
|
dg = (g1 - g2) / 255 |
|
|
db = (b1 - b2) / 255 |
|
|
|
|
|
return (dr**2 + dg**2 + db**2) ** 0.5 / (3 ** 0.5) |
|
|
|
|
|
|
|
|
def darken_color(hex_color: str, factor: float) -> str: |
|
|
"""Darken a color by a factor (0-1).""" |
|
|
r, g, b = hex_to_rgb(hex_color) |
|
|
r = int(r * (1 - factor)) |
|
|
g = int(g * (1 - factor)) |
|
|
b = int(b * (1 - factor)) |
|
|
return rgb_to_hex(r, g, b) |
|
|
|
|
|
|
|
|
def lighten_color(hex_color: str, factor: float) -> str: |
|
|
"""Lighten a color by a factor (0-1).""" |
|
|
r, g, b = hex_to_rgb(hex_color) |
|
|
r = int(r + (255 - r) * factor) |
|
|
g = int(g + (255 - g) * factor) |
|
|
b = int(b + (255 - b) * factor) |
|
|
return rgb_to_hex(r, g, b) |
|
|
|
|
|
|
|
|
def find_aa_compliant_color(hex_color: str, background: str = "#ffffff", target_contrast: float = 4.5) -> str: |
|
|
""" |
|
|
Algorithmically adjust a color until it meets AA contrast requirements. |
|
|
|
|
|
Returns the original color if it already passes, otherwise returns |
|
|
a darkened/lightened version that passes. |
|
|
""" |
|
|
current_contrast = get_contrast_ratio(hex_color, background) |
|
|
|
|
|
if current_contrast >= target_contrast: |
|
|
return hex_color |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bg_luminance = get_relative_luminance(background) |
|
|
color_luminance = get_relative_luminance(hex_color) |
|
|
|
|
|
should_darken = color_luminance >= bg_luminance |
|
|
|
|
|
best_color = hex_color |
|
|
best_contrast = current_contrast |
|
|
|
|
|
for i in range(1, 101): |
|
|
factor = i / 100 |
|
|
|
|
|
if should_darken: |
|
|
new_color = darken_color(hex_color, factor) |
|
|
else: |
|
|
new_color = lighten_color(hex_color, factor) |
|
|
|
|
|
new_contrast = get_contrast_ratio(new_color, background) |
|
|
|
|
|
if new_contrast >= target_contrast: |
|
|
return new_color |
|
|
|
|
|
if new_contrast > best_contrast: |
|
|
best_contrast = new_contrast |
|
|
best_color = new_color |
|
|
|
|
|
|
|
|
|
|
|
should_darken = not should_darken |
|
|
for i in range(1, 101): |
|
|
factor = i / 100 |
|
|
|
|
|
if should_darken: |
|
|
new_color = darken_color(hex_color, factor) |
|
|
else: |
|
|
new_color = lighten_color(hex_color, factor) |
|
|
|
|
|
new_contrast = get_contrast_ratio(new_color, background) |
|
|
|
|
|
if new_contrast >= target_contrast: |
|
|
return new_color |
|
|
|
|
|
if new_contrast > best_contrast: |
|
|
best_contrast = new_contrast |
|
|
best_color = new_color |
|
|
|
|
|
return best_color |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
STANDARD_SCALES = { |
|
|
1.067: "Minor Second", |
|
|
1.125: "Major Second", |
|
|
1.200: "Minor Third", |
|
|
1.250: "Major Third", |
|
|
1.333: "Perfect Fourth", |
|
|
1.414: "Augmented Fourth", |
|
|
1.500: "Perfect Fifth", |
|
|
1.618: "Golden Ratio", |
|
|
2.000: "Octave", |
|
|
} |
|
|
|
|
|
|
|
|
def parse_size_to_px(size: str) -> Optional[float]: |
|
|
"""Convert any size string to pixels.""" |
|
|
if isinstance(size, (int, float)): |
|
|
return float(size) |
|
|
|
|
|
size = str(size).strip().lower() |
|
|
|
|
|
|
|
|
match = re.search(r'([\d.]+)', size) |
|
|
if not match: |
|
|
return None |
|
|
|
|
|
value = float(match.group(1)) |
|
|
|
|
|
if 'rem' in size: |
|
|
return value * 16 |
|
|
elif 'em' in size: |
|
|
return value * 16 |
|
|
elif 'px' in size or size.replace('.', '').isdigit(): |
|
|
return value |
|
|
|
|
|
return value |
|
|
|
|
|
|
|
|
def analyze_type_scale(typography_tokens: dict) -> TypeScaleAnalysis: |
|
|
""" |
|
|
Analyze typography tokens to detect type scale ratio. |
|
|
|
|
|
Args: |
|
|
typography_tokens: Dict of typography tokens with font_size |
|
|
|
|
|
Returns: |
|
|
TypeScaleAnalysis with detected ratio and recommendations |
|
|
""" |
|
|
|
|
|
sizes = [] |
|
|
for name, token in typography_tokens.items(): |
|
|
if isinstance(token, dict): |
|
|
size = token.get("font_size") or token.get("fontSize") or token.get("size") |
|
|
else: |
|
|
size = getattr(token, "font_size", None) |
|
|
|
|
|
if size: |
|
|
px = parse_size_to_px(size) |
|
|
if px and px > 0: |
|
|
sizes.append(px) |
|
|
|
|
|
|
|
|
sizes_px = sorted(set(sizes)) |
|
|
|
|
|
if len(sizes_px) < 2: |
|
|
base_size = sizes_px[0] if sizes_px else 16.0 |
|
|
return TypeScaleAnalysis( |
|
|
detected_ratio=1.0, |
|
|
closest_standard_ratio=1.25, |
|
|
scale_name="Unknown", |
|
|
is_consistent=False, |
|
|
variance=0, |
|
|
sizes_px=sizes_px, |
|
|
ratios_between_sizes=[], |
|
|
recommendation=1.25, |
|
|
recommendation_name="Major Third", |
|
|
base_size=base_size, |
|
|
) |
|
|
|
|
|
|
|
|
ratios = [] |
|
|
for i in range(len(sizes_px) - 1): |
|
|
if sizes_px[i] > 0: |
|
|
ratio = sizes_px[i + 1] / sizes_px[i] |
|
|
if 1.0 < ratio < 3.0: |
|
|
ratios.append(ratio) |
|
|
|
|
|
if not ratios: |
|
|
|
|
|
base_candidates = [s for s in sizes_px if 14 <= s <= 18] |
|
|
base_size = min(base_candidates, key=lambda x: abs(x - 16)) if base_candidates else (min(sizes_px, key=lambda x: abs(x - 16)) if sizes_px else 16.0) |
|
|
return TypeScaleAnalysis( |
|
|
detected_ratio=1.0, |
|
|
closest_standard_ratio=1.25, |
|
|
scale_name="Unknown", |
|
|
is_consistent=False, |
|
|
variance=0, |
|
|
sizes_px=sizes_px, |
|
|
ratios_between_sizes=[], |
|
|
recommendation=1.25, |
|
|
recommendation_name="Major Third", |
|
|
base_size=base_size, |
|
|
) |
|
|
|
|
|
|
|
|
avg_ratio = sum(ratios) / len(ratios) |
|
|
|
|
|
|
|
|
variance = max(ratios) - min(ratios) if ratios else 0 |
|
|
is_consistent = variance < 0.15 |
|
|
|
|
|
|
|
|
closest_scale = min(STANDARD_SCALES.keys(), key=lambda x: abs(x - avg_ratio)) |
|
|
scale_name = STANDARD_SCALES[closest_scale] |
|
|
|
|
|
|
|
|
|
|
|
base_candidates = [s for s in sizes_px if 14 <= s <= 18] |
|
|
if base_candidates: |
|
|
|
|
|
if 16 in base_candidates: |
|
|
base_size = 16.0 |
|
|
else: |
|
|
base_size = min(base_candidates, key=lambda x: abs(x - 16)) |
|
|
elif sizes_px: |
|
|
|
|
|
base_size = min(sizes_px, key=lambda x: abs(x - 16)) |
|
|
else: |
|
|
base_size = 16.0 |
|
|
|
|
|
|
|
|
if is_consistent and abs(avg_ratio - closest_scale) < 0.05: |
|
|
|
|
|
recommendation = closest_scale |
|
|
recommendation_name = scale_name |
|
|
else: |
|
|
|
|
|
recommendation = 1.25 |
|
|
recommendation_name = "Major Third" |
|
|
|
|
|
return TypeScaleAnalysis( |
|
|
detected_ratio=avg_ratio, |
|
|
closest_standard_ratio=closest_scale, |
|
|
scale_name=scale_name, |
|
|
is_consistent=is_consistent, |
|
|
variance=variance, |
|
|
sizes_px=sizes_px, |
|
|
ratios_between_sizes=ratios, |
|
|
recommendation=recommendation, |
|
|
recommendation_name=recommendation_name, |
|
|
base_size=base_size, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def analyze_accessibility(color_tokens: dict, fg_bg_pairs: list[dict] = None) -> list[ColorAccessibility]: |
|
|
""" |
|
|
Analyze all colors for WCAG accessibility compliance. |
|
|
|
|
|
Args: |
|
|
color_tokens: Dict of color tokens with value/hex |
|
|
fg_bg_pairs: Optional list of actual foreground/background pairs |
|
|
extracted from the DOM (each dict has 'foreground', |
|
|
'background', 'element' keys). |
|
|
|
|
|
Returns: |
|
|
List of ColorAccessibility results |
|
|
""" |
|
|
results = [] |
|
|
|
|
|
for name, token in color_tokens.items(): |
|
|
if isinstance(token, dict): |
|
|
hex_color = token.get("value") or token.get("hex") or token.get("color") |
|
|
else: |
|
|
hex_color = getattr(token, "value", None) |
|
|
|
|
|
if not hex_color or not hex_color.startswith("#"): |
|
|
continue |
|
|
|
|
|
try: |
|
|
contrast_white = get_contrast_ratio(hex_color, "#ffffff") |
|
|
contrast_black = get_contrast_ratio(hex_color, "#000000") |
|
|
|
|
|
passes_aa_normal = contrast_white >= 4.5 or contrast_black >= 4.5 |
|
|
passes_aa_large = contrast_white >= 3.0 or contrast_black >= 3.0 |
|
|
passes_aaa_normal = contrast_white >= 7.0 or contrast_black >= 7.0 |
|
|
|
|
|
best_text = "#ffffff" if contrast_white > contrast_black else "#000000" |
|
|
|
|
|
|
|
|
suggested_fix = None |
|
|
suggested_fix_contrast = None |
|
|
|
|
|
if not passes_aa_normal: |
|
|
suggested_fix = find_aa_compliant_color(hex_color, "#ffffff", 4.5) |
|
|
suggested_fix_contrast = get_contrast_ratio(suggested_fix, "#ffffff") |
|
|
|
|
|
results.append(ColorAccessibility( |
|
|
hex_color=hex_color, |
|
|
name=name, |
|
|
contrast_on_white=contrast_white, |
|
|
contrast_on_black=contrast_black, |
|
|
passes_aa_normal=passes_aa_normal, |
|
|
passes_aa_large=passes_aa_large, |
|
|
passes_aaa_normal=passes_aaa_normal, |
|
|
best_text_color=best_text, |
|
|
suggested_fix=suggested_fix, |
|
|
suggested_fix_contrast=suggested_fix_contrast, |
|
|
)) |
|
|
except Exception: |
|
|
continue |
|
|
|
|
|
|
|
|
if fg_bg_pairs: |
|
|
for pair in fg_bg_pairs: |
|
|
fg = pair.get("foreground", "").lower() |
|
|
bg = pair.get("background", "").lower() |
|
|
element = pair.get("element", "") |
|
|
if not (fg.startswith("#") and bg.startswith("#")): |
|
|
continue |
|
|
|
|
|
if fg == bg: |
|
|
continue |
|
|
try: |
|
|
ratio = get_contrast_ratio(fg, bg) |
|
|
|
|
|
if ratio < 1.1: |
|
|
continue |
|
|
if ratio < 4.5: |
|
|
|
|
|
fix = find_aa_compliant_color(fg, bg, 4.5) |
|
|
fix_contrast = get_contrast_ratio(fix, bg) |
|
|
results.append(ColorAccessibility( |
|
|
hex_color=fg, |
|
|
name=f"fg:{fg} on bg:{bg} ({element}) [{ratio:.1f}:1]", |
|
|
contrast_on_white=get_contrast_ratio(fg, "#ffffff"), |
|
|
contrast_on_black=get_contrast_ratio(fg, "#000000"), |
|
|
passes_aa_normal=False, |
|
|
passes_aa_large=ratio >= 3.0, |
|
|
passes_aaa_normal=False, |
|
|
best_text_color="#ffffff" if get_contrast_ratio(fg, "#ffffff") > get_contrast_ratio(fg, "#000000") else "#000000", |
|
|
suggested_fix=fix, |
|
|
suggested_fix_contrast=fix_contrast, |
|
|
)) |
|
|
except Exception: |
|
|
continue |
|
|
|
|
|
return results |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def analyze_spacing_grid(spacing_tokens: dict) -> SpacingGridAnalysis: |
|
|
""" |
|
|
Analyze spacing tokens to detect grid alignment. |
|
|
|
|
|
Args: |
|
|
spacing_tokens: Dict of spacing tokens with value_px or value |
|
|
|
|
|
Returns: |
|
|
SpacingGridAnalysis with detected grid and recommendations |
|
|
""" |
|
|
values = [] |
|
|
|
|
|
for name, token in spacing_tokens.items(): |
|
|
if isinstance(token, dict): |
|
|
px = token.get("value_px") or token.get("value") |
|
|
else: |
|
|
px = getattr(token, "value_px", None) or getattr(token, "value", None) |
|
|
|
|
|
if px: |
|
|
try: |
|
|
px_val = int(float(str(px).replace('px', ''))) |
|
|
if px_val > 0: |
|
|
values.append(px_val) |
|
|
except (ValueError, TypeError): |
|
|
continue |
|
|
|
|
|
if not values: |
|
|
return SpacingGridAnalysis( |
|
|
detected_base=8, |
|
|
is_aligned=False, |
|
|
alignment_percentage=0, |
|
|
misaligned_values=[], |
|
|
recommendation=8, |
|
|
recommendation_reason="No spacing values detected, defaulting to 8px grid", |
|
|
current_values=[], |
|
|
suggested_scale=[0, 4, 8, 12, 16, 20, 24, 32, 40, 48, 64], |
|
|
) |
|
|
|
|
|
values = sorted(set(values)) |
|
|
|
|
|
|
|
|
detected_base = reduce(gcd, values) |
|
|
|
|
|
|
|
|
aligned_to_4 = all(v % 4 == 0 for v in values) |
|
|
aligned_to_8 = all(v % 8 == 0 for v in values) |
|
|
|
|
|
|
|
|
misaligned = [v for v in values if v % detected_base != 0] if detected_base > 1 else values |
|
|
|
|
|
alignment_percentage = (len(values) - len(misaligned)) / len(values) * 100 if values else 0 |
|
|
|
|
|
|
|
|
if aligned_to_8: |
|
|
recommendation = 8 |
|
|
recommendation_reason = "All values already align to 8px grid" |
|
|
is_aligned = True |
|
|
elif aligned_to_4: |
|
|
recommendation = 4 |
|
|
recommendation_reason = "Values align to 4px grid (consider 8px for simpler system)" |
|
|
is_aligned = True |
|
|
elif detected_base in [4, 8]: |
|
|
recommendation = detected_base |
|
|
recommendation_reason = f"Detected {detected_base}px base with {alignment_percentage:.0f}% alignment" |
|
|
is_aligned = alignment_percentage >= 80 |
|
|
else: |
|
|
recommendation = 8 |
|
|
recommendation_reason = f"Inconsistent spacing detected (GCD={detected_base}), recommend 8px grid" |
|
|
is_aligned = False |
|
|
|
|
|
|
|
|
base = recommendation |
|
|
suggested_scale = [0] + [base * i for i in [0.5, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10, 12, 16] if base * i == int(base * i)] |
|
|
suggested_scale = sorted(set([int(v) for v in suggested_scale])) |
|
|
|
|
|
return SpacingGridAnalysis( |
|
|
detected_base=detected_base, |
|
|
is_aligned=is_aligned, |
|
|
alignment_percentage=alignment_percentage, |
|
|
misaligned_values=misaligned, |
|
|
recommendation=recommendation, |
|
|
recommendation_reason=recommendation_reason, |
|
|
current_values=values, |
|
|
suggested_scale=suggested_scale, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def analyze_color_statistics(color_tokens: dict, similarity_threshold: float = 0.05) -> ColorStatistics: |
|
|
""" |
|
|
Analyze color palette statistics. |
|
|
|
|
|
Args: |
|
|
color_tokens: Dict of color tokens |
|
|
similarity_threshold: Distance threshold for "near duplicate" (0-1) |
|
|
|
|
|
Returns: |
|
|
ColorStatistics with palette analysis |
|
|
""" |
|
|
colors = [] |
|
|
|
|
|
for name, token in color_tokens.items(): |
|
|
if isinstance(token, dict): |
|
|
hex_color = token.get("value") or token.get("hex") |
|
|
else: |
|
|
hex_color = getattr(token, "value", None) |
|
|
|
|
|
if hex_color and hex_color.startswith("#"): |
|
|
colors.append(hex_color.lower()) |
|
|
|
|
|
unique_colors = list(set(colors)) |
|
|
|
|
|
|
|
|
grays = [c for c in unique_colors if is_gray(c)] |
|
|
saturated = [c for c in unique_colors if get_saturation(c) > 0.3] |
|
|
|
|
|
|
|
|
near_duplicates = [] |
|
|
for i, c1 in enumerate(unique_colors): |
|
|
for c2 in unique_colors[i+1:]: |
|
|
dist = color_distance(c1, c2) |
|
|
if dist < similarity_threshold and dist > 0: |
|
|
near_duplicates.append((c1, c2, round(dist, 4))) |
|
|
|
|
|
|
|
|
hue_dist = {} |
|
|
for c in unique_colors: |
|
|
hue = get_hue_name(c) |
|
|
hue_dist[hue] = hue_dist.get(hue, 0) + 1 |
|
|
|
|
|
return ColorStatistics( |
|
|
total_count=len(colors), |
|
|
unique_count=len(unique_colors), |
|
|
duplicate_count=len(colors) - len(unique_colors), |
|
|
gray_count=len(grays), |
|
|
saturated_count=len(saturated), |
|
|
near_duplicates=near_duplicates, |
|
|
hue_distribution=hue_dist, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_rule_engine( |
|
|
typography_tokens: dict, |
|
|
color_tokens: dict, |
|
|
spacing_tokens: dict, |
|
|
radius_tokens: dict = None, |
|
|
shadow_tokens: dict = None, |
|
|
log_callback: Optional[callable] = None, |
|
|
fg_bg_pairs: list[dict] = None, |
|
|
) -> RuleEngineResults: |
|
|
""" |
|
|
Run complete rule-based analysis on design tokens. |
|
|
|
|
|
This is FREE (no LLM costs) and handles all deterministic calculations. |
|
|
|
|
|
Args: |
|
|
typography_tokens: Dict of typography tokens |
|
|
color_tokens: Dict of color tokens |
|
|
spacing_tokens: Dict of spacing tokens |
|
|
radius_tokens: Dict of border radius tokens (optional) |
|
|
shadow_tokens: Dict of shadow tokens (optional) |
|
|
log_callback: Function to log messages |
|
|
|
|
|
Returns: |
|
|
RuleEngineResults with all analysis data |
|
|
""" |
|
|
|
|
|
def log(msg: str): |
|
|
if log_callback: |
|
|
log_callback(msg) |
|
|
|
|
|
log("") |
|
|
log("β" * 60) |
|
|
log("βοΈ LAYER 1: RULE ENGINE (FREE - $0.00)") |
|
|
log("β" * 60) |
|
|
log("") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log(" π TYPE SCALE ANALYSIS") |
|
|
log(" " + "β" * 40) |
|
|
typography = analyze_type_scale(typography_tokens) |
|
|
|
|
|
consistency_icon = "β
" if typography.is_consistent else "β οΈ" |
|
|
log(f" ββ Detected Ratio: {typography.detected_ratio:.3f}") |
|
|
log(f" ββ Closest Standard: {typography.scale_name} ({typography.closest_standard_ratio})") |
|
|
log(f" ββ Consistent: {consistency_icon} {'Yes' if typography.is_consistent else f'No (variance: {typography.variance:.2f})'}") |
|
|
log(f" ββ Sizes Found: {typography.sizes_px}") |
|
|
log(f" ββ π‘ Recommendation: {typography.recommendation} ({typography.recommendation_name})") |
|
|
log("") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log(" βΏ ACCESSIBILITY CHECK (WCAG AA/AAA)") |
|
|
log(" " + "β" * 40) |
|
|
accessibility = analyze_accessibility(color_tokens, fg_bg_pairs=fg_bg_pairs) |
|
|
|
|
|
|
|
|
pair_failures = [a for a in accessibility if not a.passes_aa_normal and a.name.startswith("fg:")] |
|
|
color_only_failures = [a for a in accessibility if not a.passes_aa_normal and not a.name.startswith("fg:")] |
|
|
failures = [a for a in accessibility if not a.passes_aa_normal] |
|
|
passes = len(accessibility) - len(failures) |
|
|
|
|
|
pair_count = len(fg_bg_pairs) if fg_bg_pairs else 0 |
|
|
log(f" ββ Colors Analyzed: {len(accessibility)}") |
|
|
log(f" ββ FG/BG Pairs Checked: {pair_count}") |
|
|
log(f" ββ AA Pass: {passes} β
") |
|
|
log(f" ββ AA Fail (color vs white/black): {len(color_only_failures)} {'β' if color_only_failures else 'β
'}") |
|
|
log(f" ββ AA Fail (real FG/BG pairs): {len(pair_failures)} {'β' if pair_failures else 'β
'}") |
|
|
|
|
|
if color_only_failures: |
|
|
log(" β") |
|
|
log(" β β οΈ FAILING COLORS (vs white/black):") |
|
|
for i, f in enumerate(color_only_failures[:5]): |
|
|
fix_info = f" β π‘ Fix: {f.suggested_fix} ({f.suggested_fix_contrast:.1f}:1)" if f.suggested_fix else "" |
|
|
log(f" β ββ {f.name}: {f.hex_color} ({f.contrast_on_white:.1f}:1 on white){fix_info}") |
|
|
if len(color_only_failures) > 5: |
|
|
log(f" β ββ ... and {len(color_only_failures) - 5} more") |
|
|
|
|
|
if pair_failures: |
|
|
log(" β") |
|
|
log(" β β FAILING FG/BG PAIRS (actual on-page combinations):") |
|
|
for i, f in enumerate(pair_failures[:5]): |
|
|
fix_info = f" β π‘ Fix: {f.suggested_fix} ({f.suggested_fix_contrast:.1f}:1)" if f.suggested_fix else "" |
|
|
log(f" β ββ {f.name}{fix_info}") |
|
|
if len(pair_failures) > 5: |
|
|
log(f" β ββ ... and {len(pair_failures) - 5} more") |
|
|
|
|
|
log("") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log(" π SPACING GRID ANALYSIS") |
|
|
log(" " + "β" * 40) |
|
|
spacing = analyze_spacing_grid(spacing_tokens) |
|
|
|
|
|
alignment_icon = "β
" if spacing.is_aligned else "β οΈ" |
|
|
log(f" ββ Detected Base: {spacing.detected_base}px") |
|
|
log(f" ββ Grid Aligned: {alignment_icon} {spacing.alignment_percentage:.0f}%") |
|
|
|
|
|
if spacing.misaligned_values: |
|
|
log(f" ββ Misaligned Values: {spacing.misaligned_values[:8]}{'...' if len(spacing.misaligned_values) > 8 else ''}") |
|
|
|
|
|
log(f" ββ Suggested Scale: {spacing.suggested_scale[:10]}...") |
|
|
log(f" ββ π‘ Recommendation: {spacing.recommendation}px ({spacing.recommendation_reason})") |
|
|
log("") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log(" π¨ COLOR PALETTE STATISTICS") |
|
|
log(" " + "β" * 40) |
|
|
color_stats = analyze_color_statistics(color_tokens) |
|
|
|
|
|
dup_icon = "β οΈ" if color_stats.duplicate_count > 10 else "β
" |
|
|
unique_icon = "β οΈ" if color_stats.unique_count > 30 else "β
" |
|
|
|
|
|
log(f" ββ Total Colors: {color_stats.total_count}") |
|
|
log(f" ββ Unique Colors: {color_stats.unique_count} {unique_icon}") |
|
|
log(f" ββ Exact Duplicates: {color_stats.duplicate_count} {dup_icon}") |
|
|
log(f" ββ Near-Duplicates: {len(color_stats.near_duplicates)}") |
|
|
log(f" ββ Grays: {color_stats.gray_count} | Saturated: {color_stats.saturated_count}") |
|
|
log(f" ββ Hue Distribution: {dict(list(color_stats.hue_distribution.items())[:5])}...") |
|
|
log("") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
type_score = 25 if typography.is_consistent else 10 |
|
|
aa_score = 25 * (passes / max(len(accessibility), 1)) |
|
|
spacing_score = 25 * (spacing.alignment_percentage / 100) |
|
|
color_score = 25 * (1 - min(color_stats.duplicate_count / max(color_stats.total_count, 1), 1)) |
|
|
|
|
|
consistency_score = int(type_score + aa_score + spacing_score + color_score) |
|
|
|
|
|
log(" " + "β" * 40) |
|
|
log(f" π RULE ENGINE SUMMARY") |
|
|
log(f" ββ Consistency Score: {consistency_score}/100") |
|
|
log(f" ββ AA Failures: {len(failures)}") |
|
|
log(f" ββ Cost: $0.00 (free)") |
|
|
log("") |
|
|
|
|
|
return RuleEngineResults( |
|
|
typography=typography, |
|
|
accessibility=accessibility, |
|
|
spacing=spacing, |
|
|
color_stats=color_stats, |
|
|
aa_failures=len(failures), |
|
|
consistency_score=consistency_score, |
|
|
) |