File size: 25,294 Bytes
85a0eea
86c46c5
306f5c1
86c46c5
85a0eea
 
 
 
 
 
 
 
 
 
86c46c5
85a0eea
86c46c5
 
 
 
 
891cc60
86c46c5
85a0eea
92368cc
 
 
 
 
306f5c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
891cc60
 
 
 
 
 
85a0eea
 
86c46c5
 
 
85a0eea
 
 
 
86c46c5
 
 
bdabd67
85a0eea
 
86c46c5
85a0eea
86c46c5
 
85a0eea
86c46c5
85a0eea
86c46c5
 
 
 
85a0eea
306f5c1
 
 
 
 
 
 
92368cc
 
 
 
 
 
85a0eea
 
86c46c5
85a0eea
 
 
86c46c5
 
92368cc
 
86c46c5
92368cc
37fe429
85a0eea
 
 
 
 
86c46c5
85a0eea
92368cc
 
 
 
 
 
 
85a0eea
 
86c46c5
85a0eea
86c46c5
 
85a0eea
306f5c1
b10050d
 
 
 
 
 
 
 
 
 
 
 
a4803d2
b10050d
 
 
 
 
 
 
 
 
 
 
306f5c1
b10050d
 
 
 
 
 
 
 
 
 
 
306f5c1
 
 
 
b10050d
306f5c1
 
 
 
 
 
b10050d
306f5c1
 
 
 
 
 
 
 
 
 
 
b10050d
306f5c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b10050d
86c46c5
306f5c1
 
 
 
86c46c5
306f5c1
 
 
 
 
85a0eea
306f5c1
 
 
 
 
 
85a0eea
306f5c1
 
 
 
 
 
 
 
 
 
86c46c5
85a0eea
92368cc
 
306f5c1
 
 
 
85a0eea
 
92368cc
 
85a0eea
92368cc
86c46c5
85a0eea
86c46c5
85a0eea
92368cc
 
 
 
 
 
85a0eea
 
 
306f5c1
85a0eea
cccdab4
306f5c1
cccdab4
 
 
86c46c5
85a0eea
 
 
 
 
 
306f5c1
85a0eea
 
 
86c46c5
85a0eea
86c46c5
 
85a0eea
 
 
 
306f5c1
85a0eea
8ec4c18
306f5c1
8ec4c18
 
 
cccdab4
86c46c5
85a0eea
86c46c5
85a0eea
8ec4c18
85a0eea
 
 
 
 
 
 
 
86c46c5
85a0eea
 
 
 
891cc60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85a0eea
86c46c5
 
891cc60
 
85a0eea
86c46c5
85a0eea
a8028c5
85a0eea
 
 
 
891cc60
 
 
85a0eea
 
 
 
86c46c5
85a0eea
86c46c5
 
 
 
 
85a0eea
 
 
 
 
 
 
 
 
 
 
 
 
306f5c1
 
85a0eea
 
86c46c5
 
 
 
 
85a0eea
 
 
86c46c5
306f5c1
85a0eea
 
86c46c5
 
 
 
 
 
 
 
 
 
306f5c1
 
 
86c46c5
 
 
85a0eea
 
86c46c5
 
85a0eea
 
 
 
 
 
 
 
 
 
 
 
306f5c1
 
 
 
 
 
 
 
 
 
 
 
faa5089
37fe429
85a0eea
92368cc
 
 
 
 
85a0eea
86c46c5
85a0eea
 
 
 
 
86c46c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
891cc60
86c46c5
 
 
 
 
891cc60
 
86c46c5
 
 
 
 
85a0eea
891cc60
85a0eea
 
 
86c46c5
 
 
 
 
306f5c1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
# =============================================================================
# app/providers.py
# 09.03.2026 | updated 23.03.2026
# LLM + Search Provider Registry + Fallback Chain
# Universal MCP Hub (Sandboxed) - based on PyFundaments Architecture
# Copyright 2026 - Volkan KΓΌcΓΌkbudak
# Apache License V. 2 + ESOL 1.1
# Repo: https://github.com/VolkanSah/Universal-MCP-Hub-sandboxed
# =============================================================================
# ARCHITECTURE NOTE:
#   This file lives exclusively in app/ and is ONLY started by app/app.py.
#   NO direct access to fundaments/*, .env, or Guardian (main.py).
#   All config comes from app/.pyfun via app/config.py.
#
# PROVIDER PRINCIPLE:
#   No key = no provider = no tool = no crash.
#   Server always starts, just with fewer providers.
#   Adding a new provider = update .pyfun + add class here. Never touch mcp.py!
#
# FALLBACK CHAIN:
#   Defined in .pyfun per provider via fallback_to field.
#   anthropic β†’ fails β†’ gemini β†’ fails β†’ openrouter β†’ fails β†’ RuntimeError
#   Visited set prevents infinite loops.
#
# SECURITY NOTE:
#   API keys are NEVER logged or included in exception messages.
#   All errors are sanitized before propagation β€” only HTTP status codes
#   and safe_url (query params stripped) are ever exposed in logs.
#
# CACHING NOTE:
#   Anthropic β†’ prompt_caching (cache_control: ephemeral)
#     Requires anthropic-beta: prompt-caching-2024-07-31 header.
#     Caches system prompt + long user prompts (>1024 tokens estimated).
#     Saves up to 90% input token costs on repeated context.
#     Enable per provider in .pyfun: supports_cache = "true"
#
#   Gemini β†’ Implicit caching (automatic, no extra API call needed)
#     Google automatically caches repeated prompt prefixes server-side.
#     No code change needed β€” Gemini handles it transparently.
#     Explicit Context Caching API exists but requires separate cache management
#     and is only worth it for very large static contexts (32k+ tokens).
#     Enable per provider in .pyfun: supports_cache = "true"
#     (currently used as log hint only for Gemini β€” implicit cache is always on)
#
# HOW TO ADD A NEW LLM PROVIDER β€” 3 steps, nothing else to touch:
#   1. Add class below (copy a dummy, implement complete())
#   2. Register name β†’ class in _PROVIDER_CLASSES dict
#   3. Add [LLM_PROVIDER.yourprovider] block in app/.pyfun
#      β†’ env_key, base_url, default_model, fallback_to
#
# DEPENDENCY CHAIN (app/* only, no fundaments!):
#   config.py    β†’ parses app/.pyfun β€” single source of truth
#   providers.py β†’ LLM + Search registry + fallback chain
#   tools.py     β†’ calls providers.llm_complete() / providers.search()
#   mcp.py       β†’ calls providers.list_active_llm() / list_active_search()
# =============================================================================

import os
import logging
import httpx

from . import config
logging.getLogger("httpx").setLevel(logging.WARNING)
logger = logging.getLogger("providers")


# =============================================================================
# SECTION 1 β€” Base Provider
# Shared HTTP logic β€” implemented ONCE, reused by all providers.
# =============================================================================

class BaseProvider:
    """
    Base class for all LLM providers.
    Subclasses only implement complete() β€” HTTP logic lives here.
    """
    def __init__(self, name: str, cfg: dict):
        self.name          = name
        self.key           = os.getenv(cfg.get("env_key", ""))
        self.base_url      = cfg.get("base_url", "")
        self.fallback      = cfg.get("fallback_to", "")
        self.timeout       = int(config.get_limits().get("REQUEST_TIMEOUT_SEC", "60"))
        self.model         = cfg.get("default_model", "")
        self.supports_cache = cfg.get("supports_cache", "false").lower() == "true"
        # Safe key hint for debug logs β€” never log the full key
        self._key_hint = (
            f"{self.key[:4]}...{self.key[-4:]}"
            if self.key and len(self.key) > 8
            else "***"
        )

    async def complete(self, prompt: str, model: str, max_tokens: int) -> str:
        """Override in each provider subclass."""
        raise NotImplementedError

    async def _post(self, url: str, headers: dict, payload: dict) -> dict:
        """
        Shared HTTP POST β€” used by all providers.
        Raises RuntimeError with sanitized message on non-2xx responses.
        API keys are never included in raised exceptions or log output.
        """
        safe_url = url.split("?")[0]  # strip query params (may contain API keys)
        logger.debug(f"POST β†’ {safe_url}")
        async with httpx.AsyncClient() as client:
            r = await client.post(
                url,
                headers=headers,
                json=payload,
                timeout=self.timeout,
            )
            try:
                r.raise_for_status()
            except httpx.HTTPStatusError as e:
                # Sanitize: only status code + safe_url, never headers or body
                raise RuntimeError(
                    f"HTTP {e.response.status_code} from {safe_url}"
                ) from None
            return r.json()


# =============================================================================
# SECTION 2 β€” LLM Provider Implementations
# Only the API-specific parsing logic differs per provider.
# =============================================================================

# --- SmolLM2 (Custom Assistant Space) ----------------------------------------
class SmolLMProvider(BaseProvider):
    """
    SmolLM2 Custom Assistant Space β€” OpenAI-compatible, ADI routing included.
    Free tier on HF Spaces (CPU). Falls back to next provider on 503.
    Response includes extra 'adi' field with score + decision (ignored by hub).
    Deploy: https://github.com/VolkanSah/Multi-LLM-API-Gateway (smollm-space/)

    .pyfun block:
        [LLM_PROVIDER.smollm]
        active        = "true"
        base_url      = "https://codey-lab-SmolLM2-customs.hf.space/v1"
        env_key       = "SMOLLM_API_KEY"
        default_model = "smollm2-360m"
        models        = "smollm2-360m, codey-lab/model.universal-mcp-hub"
        fallback_to   = "anthropic"
        [LLM_PROVIDER.smollm_END]
    """

    async def complete(self, prompt: str, model: str = None, max_tokens: int = 150) -> str:
        data = await self._post(
            f"{self.base_url}/chat/completions",
            headers={
                "Authorization": f"Bearer {self.key}",
                "X-IP-Token":     self.key,
                "content-type":  "application/json",
            },
            payload={
                "model":      model or self.model,
                "max_tokens": max_tokens,
                "messages":   [{"role": "user", "content": prompt}],
            },
        )
        return data["choices"][0]["message"]["content"]


# --- Anthropic ----------------------------------------------------------------
class AnthropicProvider(BaseProvider):
    """
    Anthropic Claude API β€” Messages endpoint.

    Prompt Caching (supports_cache = "true" in .pyfun):
        Uses cache_control: ephemeral on system prompt and long user prompts.
        Requires anthropic-beta: prompt-caching-2024-07-31 header.
        Cache TTL: 5 minutes, extended on each cache hit.
        Min tokens to cache: ~1024 (Anthropic requirement).
        Cost: cache write ~25% more, cache read ~90% less than normal input.

    .pyfun block:
        [LLM_PROVIDER.anthropic]
        active           = "true"
        base_url         = "https://api.anthropic.com/v1"
        env_key          = "ANTHROPIC_API_KEY"
        api_version_header = "2023-06-01"
        default_model    = "claude-haiku-4-5"
        supports_cache   = "true"
        fallback_to      = "gemini"
        [LLM_PROVIDER.anthropic_END]
    """

    # Rough chars-per-token estimate β€” avoids importing tiktoken in sandbox
    _CHARS_PER_TOKEN = 4
    _CACHE_MIN_TOKENS = 1024

    def _is_cacheable(self, text: str) -> bool:
        """Estimate if text is long enough to benefit from caching."""
        return len(text) >= self._CACHE_MIN_TOKENS * self._CHARS_PER_TOKEN

    async def complete(
        self,
        prompt: str,
        model: str = None,
        max_tokens: int = 1024,
        system: str = None,
    ) -> str:
        cfg = config.get_active_llm_providers().get("anthropic", {})

        headers = {
            "x-api-key":         self.key,
            "anthropic-version": cfg.get("api_version_header", "2023-06-01"),
            "content-type":      "application/json",
        }

        # --- Build user content ---
        # Add cache_control if caching enabled + prompt long enough
        if self.supports_cache and self._is_cacheable(prompt):
            user_content = [
                {
                    "type":          "text",
                    "text":          prompt,
                    "cache_control": {"type": "ephemeral"},
                }
            ]
            headers["anthropic-beta"] = "prompt-caching-2024-07-31"
            logger.debug("Anthropic: prompt cache_control applied to user message.")
        else:
            user_content = prompt  # short prompt β€” plain string, no overhead

        payload = {
            "model":      model or self.model,
            "max_tokens": max_tokens,
            "messages":   [{"role": "user", "content": user_content}],
        }

        # --- Optional system prompt with cache_control ---
        if system:
            if self.supports_cache and self._is_cacheable(system):
                payload["system"] = [
                    {
                        "type":          "text",
                        "text":          system,
                        "cache_control": {"type": "ephemeral"},
                    }
                ]
                headers["anthropic-beta"] = "prompt-caching-2024-07-31"
                logger.debug("Anthropic: prompt cache_control applied to system prompt.")
            else:
                payload["system"] = system

        data = await self._post(f"{self.base_url}/messages", headers, payload)
        return data["content"][0]["text"]


# --- Gemini ------------------------------------------------------------------
class GeminiProvider(BaseProvider):
    """
    Google Gemini API β€” generateContent endpoint.

    Implicit Caching (always active on Gemini side, no code needed):
        Google automatically caches repeated prompt prefixes server-side.
        No extra API call, no cache key, no TTL management needed.
        Just send the same prompt structure and Gemini handles the rest.
        supports_cache = "true" in .pyfun logs cache hint only.

    Explicit Context Caching (NOT implemented here β€” when to use it):
        Only worth the extra API complexity for very large static contexts
        (32k+ tokens, e.g. large documents sent on every request).
        Requires separate POST to /cachedContents, returns a cache_name,
        which is then referenced in generateContent as cachedContent.name.
        Implement as a separate tool (cache_create / cache_use) when needed.

    .pyfun block:
        [LLM_PROVIDER.gemini]
        active         = "true"
        base_url       = "https://generativelanguage.googleapis.com/v1beta"
        env_key        = "GEMINI_API_KEY"
        default_model  = "gemini-2.0-flash"
        supports_cache = "true"
        fallback_to    = "openrouter"
        [LLM_PROVIDER.gemini_END]
    """

    async def complete(self, prompt: str, model: str = None, max_tokens: int = 1024) -> str:
        m        = model or self.model
        safe_url = f"{self.base_url}/models/{m}:generateContent"

        if self.supports_cache:
            logger.debug(f"Gemini: implicit caching active for model {m} (server-side, automatic).")

        async with httpx.AsyncClient() as client:
            r = await client.post(
                safe_url,
                params={"key": self.key},  # key in query param, never in logs
                json={
                    "contents":         [{"parts": [{"text": prompt}]}],
                    "generationConfig": {"maxOutputTokens": max_tokens},
                },
                timeout=self.timeout,
            )
            try:
                r.raise_for_status()
            except httpx.HTTPStatusError as e:
                raise RuntimeError(
                    f"HTTP {e.response.status_code} from {safe_url}"
                ) from None
            return r.json()["candidates"][0]["content"]["parts"][0]["text"]


# --- OpenRouter ---------------------------------------------------------------
class OpenRouterProvider(BaseProvider):
    """OpenRouter API β€” OpenAI-compatible chat completions endpoint.

    Required headers: HTTP-Referer + X-Title (required by OpenRouter for
    free models and rate limit attribution).
    """

    async def complete(self, prompt: str, model: str = None, max_tokens: int = 1024) -> str:
        data = await self._post(
            f"{self.base_url}/chat/completions",
            headers={
                "Authorization": f"Bearer {self.key}",
                "HTTP-Referer":  os.getenv("APP_URL", "https://huggingface.co"),
                "X-Title":       os.getenv("HUB_NAME", "Universal AI Hub"),  # required!
                "content-type":  "application/json",
            },
            payload={
                "model":      model or self.model,
                "max_tokens": max_tokens,
                "messages":   [{"role": "user", "content": prompt}],
            },
        )
        return data["choices"][0]["message"]["content"]


# --- HuggingFace --------------------------------------------------------------
class HuggingFaceProvider(BaseProvider):
    """HuggingFace Inference API β€” OpenAI-compatible serverless endpoint.

    base_url in .pyfun: https://api-inference.huggingface.co/v1
    Model goes in payload, not in URL.
    Free tier: max ~8B models. PRO required for 70B+.
    """

    async def complete(self, prompt: str, model: str = None, max_tokens: int = 512) -> str:
        m    = model or self.model
        data = await self._post(
            f"{self.base_url}/chat/completions",
            headers={
                "Authorization": f"Bearer {self.key}",
                "content-type":  "application/json",
            },
            payload={
                "model":      m,
                "max_tokens": max_tokens,
                "messages":   [{"role": "user", "content": prompt}],
            },
        )
        return data["choices"][0]["message"]["content"]


# =============================================================================
# DUMMY PROVIDERS β€” copy, uncomment, adapt
# Steps: (1) uncomment class  (2) add to _PROVIDER_CLASSES  (3) add to .pyfun
# =============================================================================

# --- OpenAI -------------------------------------------------------------------
# .pyfun block to add:
#
#   [LLM_PROVIDER.openai]
#   active        = "true"
#   base_url      = "https://api.openai.com/v1"
#   env_key       = "OPENAI_API_KEY"
#   default_model = "gpt-4o-mini"
#   models        = "gpt-4o, gpt-4o-mini, gpt-3.5-turbo"
#   fallback_to   = ""
#   [LLM_PROVIDER.openai_END]
#
# class OpenAIProvider(BaseProvider):
#     """OpenAI API β€” OpenAI-compatible chat completions endpoint."""
#
#     async def complete(self, prompt: str, model: str = None, max_tokens: int = 1024) -> str:
#         data = await self._post(
#             f"{self.base_url}/chat/completions",
#             headers={
#                 "Authorization": f"Bearer {self.key}",
#                 "content-type":  "application/json",
#             },
#             payload={
#                 "model":      model or self.model,
#                 "max_tokens": max_tokens,
#                 "messages":   [{"role": "user", "content": prompt}],
#             },
#         )
#         return data["choices"][0]["message"]["content"]


# --- Mistral ------------------------------------------------------------------
# .pyfun block to add:
#
#   [LLM_PROVIDER.mistral]
#   active        = "true"
#   base_url      = "https://api.mistral.ai/v1"
#   env_key       = "MISTRAL_API_KEY"
#   default_model = "mistral-large-latest"
#   models        = "mistral-large-latest, mistral-small-latest, codestral-latest"
#   fallback_to   = ""
#   [LLM_PROVIDER.mistral_END]
#
# class MistralProvider(BaseProvider):
#     """Mistral AI API β€” OpenAI-compatible chat completions endpoint."""
#
#     async def complete(self, prompt: str, model: str = None, max_tokens: int = 1024) -> str:
#         data = await self._post(
#             f"{self.base_url}/chat/completions",
#             headers={
#                 "Authorization": f"Bearer {self.key}",
#                 "content-type":  "application/json",
#             },
#             payload={
#                 "model":      model or self.model,
#                 "max_tokens": max_tokens,
#                 "messages":   [{"role": "user", "content": prompt}],
#             },
#         )
#         return data["choices"][0]["message"]["content"]


# --- xAI (Grok) ---------------------------------------------------------------
# .pyfun block to add:
#
#   [LLM_PROVIDER.xai]
#   active        = "true"
#   base_url      = "https://api.x.ai/v1"
#   env_key       = "XAI_API_KEY"
#   default_model = "grok-3-mini"
#   models        = "grok-3, grok-3-mini, grok-3-fast"
#   fallback_to   = ""
#   [LLM_PROVIDER.xai_END]
#
# class XAIProvider(BaseProvider):
#     """xAI Grok API β€” OpenAI-compatible chat completions endpoint."""
#
#     async def complete(self, prompt: str, model: str = None, max_tokens: int = 1024) -> str:
#         data = await self._post(
#             f"{self.base_url}/chat/completions",
#             headers={
#                 "Authorization": f"Bearer {self.key}",
#                 "content-type":  "application/json",
#             },
#             payload={
#                 "model":      model or self.model,
#                 "max_tokens": max_tokens,
#                 "messages":   [{"role": "user", "content": prompt}],
#             },
#         )
#         return data["choices"][0]["message"]["content"]


# =============================================================================
# SECTION 3 β€” Provider Registry
# Built from .pyfun [LLM_PROVIDERS] at initialize().
# Maps provider names β†’ classes.
# To activate a dummy: uncomment class above + add entry here.
# =============================================================================

_PROVIDER_CLASSES = {
    "smollm":      SmolLMProvider,
    "anthropic":   AnthropicProvider,
    "gemini":      GeminiProvider,
    "openrouter":  OpenRouterProvider,
    "huggingface": HuggingFaceProvider,
    # "openai":   OpenAIProvider,    # ← uncomment to activate
    # "mistral":  MistralProvider,   # ← uncomment to activate
    # "xai":      XAIProvider,       # ← uncomment to activate
}

_registry: dict = {}


def initialize() -> None:
    """
    Build provider registry from .pyfun [LLM_PROVIDERS].
    Called once by mcp.py during startup sequence.
    Skips providers with missing ENV keys β€” no crash, just fewer tools.
    """
    global _registry
    active = config.get_active_llm_providers()

    for name, cfg in active.items():
        env_key = cfg.get("env_key", "")
        if not env_key or not os.getenv(env_key):
            logger.info(f"Provider '{name}' skipped β€” ENV key not set.")
            continue
        cls = _PROVIDER_CLASSES.get(name)
        if not cls:
            logger.info(f"Provider '{name}' has no handler yet β€” skipped.")
            continue
        _registry[name] = cls(name, cfg)
        cache_hint = " [cache: ON]" if cfg.get("supports_cache", "false") == "true" else ""
        logger.info(f"Provider registered: {name}{cache_hint}")


# =============================================================================
# SECTION 4 β€” LLM Execution + Fallback Chain
# =============================================================================

async def llm_complete(
    prompt: str,
    provider_name: str = None,
    model: str = None,
    max_tokens: int = 1024,
    system: str = None,
) -> str:
    """
    Send prompt to LLM provider with automatic fallback chain.
    Fallback order is defined in .pyfun via fallback_to field.
    Raises RuntimeError if all providers in the chain fail.

    Args:
        prompt:        Input text to send to the model.
        provider_name: Provider name override. Defaults to default_provider
                       from .pyfun [TOOL.llm_complete].
        model:         Model name override. Defaults to provider's default_model.
        max_tokens:    Max tokens in response. Default: 1024.
        system:        Optional system prompt. Passed to providers that support it.
                       AnthropicProvider caches it automatically if supports_cache = true
                       and the system prompt is long enough (>= ~1024 tokens).

    Returns:
        Model response as plain text string.
    """
    if not provider_name:
        tools_cfg     = config.get_active_tools()
        provider_name = tools_cfg.get("llm_complete", {}).get("default_provider", "anthropic")

    visited = set()
    current = provider_name

    while current and current not in visited:
        visited.add(current)
        provider = _registry.get(current)

        if not provider:
            logger.warning(f"Provider '{current}' not in registry β€” trying fallback.")
        else:
            try:
                # Pass system prompt if provider supports it (Anthropic)
                # Other providers accept **kwargs and ignore unknown params safely
                if system is not None and hasattr(provider, 'complete'):
                    import inspect
                    sig = inspect.signature(provider.complete)
                    if 'system' in sig.parameters:
                        result = await provider.complete(prompt, model, max_tokens, system=system)
                    else:
                        result = await provider.complete(prompt, model, max_tokens)
                else:
                    result = await provider.complete(prompt, model, max_tokens)

                logger.info(f"Response from provider: '{current}'")
                return f"[{current}] {result}"
            except Exception as e:
                # Log only exception type + sanitized message β€” never raw {e}
                # which may contain headers, keys, or response bodies
                logger.warning(
                    f"Provider '{current}' failed: {type(e).__name__}: {e} β€” trying fallback."
                )

        cfg     = config.get_active_llm_providers().get(current, {})
        current = cfg.get("fallback_to", "")

    raise RuntimeError("All providers failed β€” no fallback available.")


# Alias β€” used internally by tools.py
complete = llm_complete


# =============================================================================
# SECTION 5 β€” Search Execution
# Search providers not yet implemented β€” returns placeholder.
# Add BraveProvider, TavilyProvider here when ready.
# =============================================================================

async def search(
    query: str,
    provider_name: str = None,
    max_results: int = 5,
) -> str:
    """
    Search the web via configured search provider.
    Search providers not yet implemented β€” placeholder until BraveProvider ready.

    Args:
        query:         Search query string.
        provider_name: Provider name override (e.g. 'brave', 'tavily').
        max_results:   Maximum number of results. Default: 5.

    Returns:
        Formatted search results as plain text string.
    """
    # TODO: implement BraveProvider, TavilyProvider
    # Same pattern as LLM providers β€” add class + register in _SEARCH_REGISTRY
    logger.info(f"web_search called β€” query: '{query}' β€” search providers not yet active.")
    return f"Search not yet implemented. Query was: {query}"


# =============================================================================
# SECTION 6 β€” Registry Helpers
# Used by mcp.py for tool registration decisions.
# =============================================================================

def list_active_llm() -> list:
    """Returns list of active LLM provider names."""
    return list(_registry.keys())


def list_active_search() -> list:
    """
    Returns list of active search provider names.
    Empty until search providers are implemented.
    """
    # TODO: return list(_search_registry.keys()) when search providers are ready
    return []


def get(name: str) -> BaseProvider:
    """Get a specific provider instance by name."""
    return _registry.get(name)


# =============================================================================
# Direct execution guard
# =============================================================================

if __name__ == "__main__":
    print("WARNING: Run via main.py β†’ app.py, not directly.")