repo_name
stringlengths
1
62
dataset
stringclasses
1 value
lang
stringclasses
11 values
pr_id
int64
1
20.1k
owner
stringlengths
2
34
reviewer
stringlengths
2
39
diff_hunk
stringlengths
15
262k
code_review_comment
stringlengths
1
99.6k
langfuse-python
github_2023
python
981
langfuse
greptile-apps[bot]
@@ -140,6 +135,22 @@ def _parse_token_usage( if additional_kwargs := getattr(response, "additional_kwargs", None): return _parse_usage_from_mapping(additional_kwargs) + def _get_generation_client(self, id: str) -> StatefulGenerationClient: + trace_id = self._context.trace_id + i...
logic: Creating a new StatefulGenerationClient for each update could lead to race conditions or inconsistent state. Consider caching the client instances by ID.
langfuse-python
github_2023
python
981
langfuse
greptile-apps[bot]
@@ -140,6 +135,22 @@ def _parse_token_usage( if additional_kwargs := getattr(response, "additional_kwargs", None): return _parse_usage_from_mapping(additional_kwargs) + def _get_generation_client(self, id: str) -> StatefulGenerationClient: + trace_id = self._context.trace_id + i...
logic: Creating a new trace_id when context.trace_id is None could lead to orphaned or disconnected traces. This should be handled at a higher level.
langfuse-python
github_2023
python
981
langfuse
greptile-apps[bot]
@@ -99,14 +99,8 @@ def __init__( sdk_integration="llama-index_instrumentation", ) self._observation_updates = {}
logic: observation_updates dict is declared but never used after initialization
langfuse-python
github_2023
python
981
langfuse
greptile-apps[bot]
@@ -217,7 +201,10 @@ def _is_generation(self, id_: str, instance: Optional[Any] = None) -> bool: def _get_generation_client(self, id: str) -> StatefulGenerationClient: trace_id = self._context.trace_id if trace_id is None: - raise ValueError("Trace ID is not set") + logger.w...
logic: Auto-generating trace IDs could lead to orphaned spans if the trace ID is missing due to a bug rather than normal operation
langfuse-python
github_2023
python
967
langfuse
greptile-apps[bot]
@@ -322,6 +325,13 @@ def add_task(self, event: dict): if not self._sampler.sample_event(event): return # event was sampled out + # mask input/output + if self._mask: + body = event["body"] + for key in ("input", "output"): + ...
logic: Move the JSON serialization after the masking to ensure the masked data is what gets serialized.
langfuse-python
github_2023
python
967
langfuse
greptile-apps[bot]
@@ -322,6 +325,13 @@ def add_task(self, event: dict): if not self._sampler.sample_event(event): return # event was sampled out + # mask input/output + if self._mask: + body = event["body"] + for key in ("input", "output"): + ...
logic: Move the JSON serialization after the masking to ensure the masked data is what gets serialized
langfuse-python
github_2023
python
977
langfuse
greptile-apps[bot]
@@ -322,6 +328,11 @@ def __init__( self.prompt_cache = PromptCache() + def _apply_mask(self, data: Any) -> Any: + if self.mask: + return self.mask(data) + return data
logic: The _apply_mask method should handle exceptions from the user-provided mask function to prevent crashes if the masking fails
langfuse-python
github_2023
python
977
langfuse
greptile-apps[bot]
@@ -322,6 +325,13 @@ def add_task(self, event: dict): if not self._sampler.sample_event(event): return # event was sampled out + # mask input/output + if self._mask: + body = event["body"] + for key in ("input", "output"): + ...
logic: Missing null check on `event['body']` before accessing it. Could raise KeyError if event doesn't have a body field.
langfuse-python
github_2023
python
977
langfuse
greptile-apps[bot]
@@ -544,3 +545,48 @@ def test_truncate_item_in_place(httpserver): <= MAX_MSG_SIZE ) assert complex_item["body"]["input"] is None + + +def test_mask_body_input_output(httpserver: HTTPServer): + langfuse_client = setup_langfuse_client( + get_host(httpserver.url_for("/api/public/ingestion")) +...
logic: This mask function modifies the data structure by adding a new field rather than actually masking sensitive data. Consider testing with a more realistic mask function that replaces sensitive values.
langfuse-python
github_2023
python
977
langfuse
greptile-apps[bot]
@@ -544,3 +545,48 @@ def test_truncate_item_in_place(httpserver): <= MAX_MSG_SIZE ) assert complex_item["body"]["input"] is None + + +def test_mask_body_input_output(httpserver: HTTPServer): + langfuse_client = setup_langfuse_client( + get_host(httpserver.url_for("/api/public/ingestion")) +...
logic: Using .copy() only creates a shallow copy. Use deepcopy if mock_body contains nested structures to prevent unintended mutations.
langfuse-python
github_2023
python
977
langfuse
greptile-apps[bot]
@@ -334,8 +340,22 @@ def add_task(self, event: dict): return False + def _apply_mask_in_place(self, event: dict): + """Apply the mask function to the event. This is done in place.""" + if not self._mask: + return + + body = event["body"]
logic: Missing null check on `event['body']` before accessing it. Could raise KeyError if event doesn't have a body field.
langfuse-python
github_2023
python
977
langfuse
greptile-apps[bot]
@@ -322,6 +326,8 @@ def add_task(self, event: dict): if not self._sampler.sample_event(event): return # event was sampled out + self._apply_mask_in_place(event) + json.dumps(event, cls=EventSerializer)
logic: logic: Serialization check should happen before masking to fail fast if event is not serializable
langfuse-python
github_2023
python
977
langfuse
greptile-apps[bot]
@@ -334,8 +340,22 @@ def add_task(self, event: dict): return False + def _apply_mask_in_place(self, event: dict): + """Apply the mask function to the event. This is done in place.""" + if not self._mask: + return + + body = event["body"] + for key in ("input", ...
logic: logic: No validation that mask function returns serializable data. Could cause runtime errors.
langfuse-python
github_2023
python
402
langfuse
maxdeichmann
@@ -10,3 +10,4 @@ def clean_logger(): backoff_logger = logging.getLogger("backoff") backoff_logger.setLevel(logging.WARNING) # Set the desired log level backoff_logger.addHandler(console_handler) + backoff_logger.propagate = False # Stop propagation to the root logger
https://docs.python.org/3/library/logging.html#logging.Logger.propagate I just found this here. If our users use backoff and hence import it as well, i think we will turn off logs for them too with this implementation.
langfuse-python
github_2023
python
961
langfuse
ellipsis-dev[bot]
@@ -2095,3 +2095,11 @@ def test_get_langchain_chat_prompt_with_precompiled_prompt(): assert system_message.content == "This is a dog." assert user_message.content == "This is a langchain chain." + + +def test_bedrock(): + from langchain_aws import ChatBedrockConverse + + llm = ChatBedrockConverse(mode...
Replace the print statement with assertions to verify the expected behavior.
langfuse-python
github_2023
python
961
langfuse
ellipsis-dev[bot]
@@ -1110,7 +1110,9 @@ def auth_check(self) -> bool: try: return self.client_instance.auth_check() except Exception as e: - self._log.error("No Langfuse object found in the current context", e) + self._log.error( + "No Langfuse object found in the curre...
The `exc_info` parameter should be set to `True` to include the stack trace in the log, not `e`. ```suggestion "No Langfuse object found in the current context", exc_info=True ```
langfuse-python
github_2023
python
946
langfuse
ellipsis-dev[bot]
@@ -0,0 +1,166 @@ +from typing import Optional, Any, Union, Dict, Mapping + +from langfuse.client import ( + Langfuse, +) +from langfuse.model import ModelUsage + + +try: + from llama_index.core.base.llms.types import ( + ChatResponse, + CompletionResponse, + ) + from llama_index.core.instrume...
The `extra="allow"` argument is not valid for a standard Python class. It should be removed.
langfuse-python
github_2023
python
946
langfuse
greptile-apps[bot]
@@ -0,0 +1,166 @@ +from typing import Optional, Any, Union, Dict, Mapping + +from langfuse.client import ( + Langfuse, +) +from langfuse.model import ModelUsage + + +try: + from llama_index.core.base.llms.types import ( + ChatResponse, + CompletionResponse, + ) + from llama_index.core.instrume...
logic: The check 'isinstance(usage, object)' will always be True. Consider removing this condition or using a more specific type check
langfuse-python
github_2023
python
946
langfuse
greptile-apps[bot]
@@ -0,0 +1,183 @@ +import httpx +import uuid +from contextlib import contextmanager +from typing import Optional, Dict, Any, List +from logging import getLogger +from langfuse import Langfuse + +from langfuse.client import StatefulTraceClient, StateType +from langfuse.utils.langfuse_singleton import LangfuseSingleton +...
logic: Consider raising an exception instead of ignoring parent_observation_id when trace_id is None
langfuse-python
github_2023
python
946
langfuse
greptile-apps[bot]
@@ -0,0 +1,183 @@ +import httpx +import uuid +from contextlib import contextmanager +from typing import Optional, Dict, Any, List +from logging import getLogger +from langfuse import Langfuse + +from langfuse.client import StatefulTraceClient, StateType +from langfuse.utils.langfuse_singleton import LangfuseSingleton +...
logic: Consider adding error handling for cases where trace_id is None
langfuse-python
github_2023
python
946
langfuse
greptile-apps[bot]
@@ -32,7 +32,7 @@ def validate_llm_generation(generation, model_name="openai_llm"): [ generation.name == model_name, generation.usage.input > 0, - generation.usage.output > 0, + # generation.usage.output > 0, todo: enable when streaming output tokens are working
logic: The check for output tokens is commented out. Ensure this doesn't affect the overall functionality of the tests.
langfuse-python
github_2023
others
947
langfuse
greptile-apps[bot]
@@ -14,11 +14,16 @@ backoff = ">=1.10.0" openai = { version = ">=0.27.8", optional = true } wrapt = "^1.14" langchain = { version = ">=0.0.309", optional = true } +<<<<<<< Updated upstream llama-index = {version = ">=0.10.12, <2.0.0", optional = true} packaging = ">=23.2,<25.0" +======= +packaging = "^24.1" +>>>>>...
logic: Resolve merge conflict for packaging and llama-index dependencies
langfuse-python
github_2023
others
947
langfuse
greptile-apps[bot]
@@ -14,11 +14,16 @@ backoff = ">=1.10.0" openai = { version = ">=0.27.8", optional = true } wrapt = "^1.14" langchain = { version = ">=0.0.309", optional = true } +<<<<<<< Updated upstream llama-index = {version = ">=0.10.12, <2.0.0", optional = true}
logic: llama-index is defined twice with different versions
langfuse-python
github_2023
others
947
langfuse
greptile-apps[bot]
@@ -14,11 +14,16 @@ backoff = ">=1.10.0" openai = { version = ">=0.27.8", optional = true } wrapt = "^1.14" langchain = { version = ">=0.0.309", optional = true } +<<<<<<< Updated upstream llama-index = {version = ">=0.10.12, <2.0.0", optional = true} packaging = ">=23.2,<25.0" +======= +packaging = "^24.1" +>>>>>...
logic: This line conflicts with the earlier llama-index specification
langfuse-python
github_2023
python
544
langfuse
marcklingen
@@ -124,6 +124,7 @@ def __init__( user_id=None, tags=None, parent_observation_id=None, + langfuse_prompt=None,
thanks, super helpful, added as comment for future reference
langfuse-python
github_2023
python
544
langfuse
marcklingen
@@ -197,6 +197,31 @@ def test_openai_chat_completion_with_trace(): assert generation.data[0].trace_id == trace_id +def test_openai_chat_completion_with_langfuse_prompt(): + api = get_api() + generation_name = create_uuid() + langfuse = Langfuse() + prompt_name = create_uuid() + prompt_client = l...
merged
langfuse-python
github_2023
python
543
langfuse
maxdeichmann
@@ -73,17 +72,56 @@ def get_langchain_prompt(self): pass @staticmethod - def get_langchain_prompt_string(content: str): + def _get_langchain_prompt_string(content: str): return re.sub(r"\{\{(.*?)\}\}", r"{\g<1>}", content) + @staticmethod + def _compile_template_string(content: st...
Did you try to implement that with regex? I would prefer that over looping ourselves through the string.
langfuse-python
github_2023
python
543
langfuse
maxdeichmann
@@ -0,0 +1,37 @@ +def compile_template_string(content: str, **kwargs) -> str: + opening = "{{" + closing = "}}" + + result_list = [] + curr_idx = 0 + + while curr_idx < len(content): + # Find the next opening tag + var_start = content.find(opening, curr_idx) + + if var_start == -1: +...
This here would happen in case of a male formatted sting, right? `{{var1}} something{{random {{var2}}` would not match var2, right?
langfuse-python
github_2023
python
532
langfuse
maxdeichmann
@@ -325,6 +330,8 @@ async def _get_langfuse_data_from_async_streaming_response( resource, responses ) + langfuse.trace(id=generation.trace_id, output=completion)
In these cases, we do not know anymore whether the trace was generated by the user becore and whether he provided the traceId himself. Can we somehow get that information here and only update in this case? Similar to how we do that with the input.
langfuse-python
github_2023
python
526
langfuse
maxdeichmann
@@ -3,6 +3,9 @@ from .resources import ( AccessDeniedError, BaseEvent, + BasePrompt,
Can you generate fern again for your changes? As discussed, i fixed something there. Important: before generating, please merge main again in your langfuse/langfuse feature branch to make that work.
langfuse-python
github_2023
python
518
langfuse
maxdeichmann
@@ -743,6 +743,9 @@ def _update_trace(self, run_id: str, parent_run_id: Optional[str], output: any): ): self.trace = self.trace.update(output=output) + # Remove the run details after updating the trace.
Can we quickly rename the function? E.g. _update_trace_and_remove_state(...) to make this more readable?
langfuse-python
github_2023
python
907
langfuse
greptile-apps[bot]
@@ -673,7 +673,7 @@ def _parse_output_from_event(self, event: CallbackEvent): payload["num_chunks"] = len(chunks) if EventPayload.COMPLETION in payload: - return payload.get(EventPayload.COMPLETION) + return str(payload.get(EventPayload.COMPLETION))
logic: Converting CompletionResponse to string may lose structured data
langfuse-python
github_2023
python
913
langfuse
greptile-apps[bot]
@@ -0,0 +1,53 @@ +import logging +from typing import Union + +from openai import APIError + +from langfuse.request import APIErrors + +SUPPORT_URL = "https://langfuse.com/support" +API_DOCS_URL = "https://api.reference.langfuse.com" +RBAC_DOCS_URL = "https://langfuse.com/docs/rbac" +RATE_LIMITS_URL = "https://langfuse....
logic: NPM_PACKAGE_URL points to PyPI, not npm. Rename to PYPI_PACKAGE_URL for clarity
langfuse-python
github_2023
python
913
langfuse
greptile-apps[bot]
@@ -0,0 +1,53 @@ +import logging +from typing import Union + +from openai import APIError + +from langfuse.request import APIErrors + +SUPPORT_URL = "https://langfuse.com/support" +API_DOCS_URL = "https://api.reference.langfuse.com" +RBAC_DOCS_URL = "https://langfuse.com/docs/rbac" +RATE_LIMITS_URL = "https://langfuse....
logic: APIError.status_code might be an int, but errorResponseByCode uses string keys. Convert to str
langfuse-python
github_2023
others
911
langfuse
greptile-apps[bot]
@@ -0,0 +1,161 @@ +# Finto Python Library + +[![fern shield](https://img.shields.io/badge/%F0%9F%8C%BF-SDK%20generated%20by%20Fern-brightgreen)](https://github.com/fern-api/fern) +[![pypi](https://img.shields.io/pypi/v/finto)](https://pypi.python.org/pypi/finto) + +The Finto Python library provides convenient access to...
logic: Inconsistent naming: README refers to 'Finto' but the repository is 'langfuse'
langfuse-python
github_2023
others
911
langfuse
greptile-apps[bot]
@@ -0,0 +1,161 @@ +# Finto Python Library + +[![fern shield](https://img.shields.io/badge/%F0%9F%8C%BF-SDK%20generated%20by%20Fern-brightgreen)](https://github.com/fern-api/fern) +[![pypi](https://img.shields.io/pypi/v/finto)](https://pypi.python.org/pypi/finto) + +The Finto Python library provides convenient access to...
logic: Installation command uses 'finto' instead of 'langfuse'
langfuse-python
github_2023
others
911
langfuse
greptile-apps[bot]
@@ -0,0 +1,161 @@ +# Finto Python Library + +[![fern shield](https://img.shields.io/badge/%F0%9F%8C%BF-SDK%20generated%20by%20Fern-brightgreen)](https://github.com/fern-api/fern) +[![pypi](https://img.shields.io/pypi/v/finto)](https://pypi.python.org/pypi/finto) + +The Finto Python library provides convenient access to...
logic: Imports use 'finto' instead of 'langfuse'
langfuse-python
github_2023
python
911
langfuse
greptile-apps[bot]
@@ -13,36 +13,28 @@ class BaseScore(pydantic_v1.BaseModel): trace_id: str = pydantic_v1.Field(alias="traceId") name: str source: ScoreSource - observation_id: typing.Optional[str] = pydantic_v1.Field( - alias="observationId", default=None - ) + observation_id: typing.Optional[str] = pydan...
logic: This change merges two dict representations with different exclusion rules. Ensure this doesn't unintentionally expose sensitive or default data.
langfuse-python
github_2023
others
909
langfuse
greptile-apps[bot]
@@ -15,7 +15,7 @@ openai = { version = ">=0.27.8", optional = true } wrapt = "^1.14" langchain = { version = ">=0.0.309", optional = true } llama-index = {version = ">=0.10.12, <2.0.0", optional = true} -packaging = "^24.1" +packaging = "23.2"
logic: Downgrading packaging from '^24.1' to '23.2' may cause compatibility issues. Ensure this change is necessary and doesn't break any functionality.
langfuse-python
github_2023
python
902
langfuse
greptile-apps[bot]
@@ -22,11 +28,122 @@ def get_epoch_seconds() -> int: return int(datetime.now().timestamp()) +class PromptCacheRefreshConsumer(Thread): + _log = logging.getLogger("langfuse") + _queue: Queue + _identifier: int + running: bool = True + + def __init__(self, queue: Queue, identifier: int): + ...
logic: _refreshing_keys is defined but never used in the provided code
langfuse-python
github_2023
python
902
langfuse
maxdeichmann
@@ -1058,21 +1058,30 @@ def get_prompt( raise e if cached_prompt.is_expired(): + self.log.debug(f"Stale prompt '{cache_key}' found in cache.") try: - return self._fetch_prompt_and_update_cache( - name, - version=ver...
Please adjust all the error / warning logs here so that user can do something with it.
langfuse-python
github_2023
python
902
langfuse
maxdeichmann
@@ -22,11 +28,119 @@ def get_epoch_seconds() -> int: return int(datetime.now().timestamp()) +class PromptCacheRefreshConsumer(Thread): + _log = logging.getLogger("langfuse") + _queue: Queue + _identifier: int + running: bool = True + + def __init__(self, queue: Queue, identifier: int): + ...
This should be done only once. As discussed.
langfuse-python
github_2023
python
874
langfuse
maxdeichmann
@@ -26,6 +26,8 @@ def sample_event(self, event: dict): trace_id = event["body"]["trace_id"] elif "traceId" in event["body"]: trace_id = event["body"]["traceId"] + elif "id" in event["body"]: + trace_id = event["body"]["id"]
Why did you make this change? Above, we get the event.body.id, in case the event is a trace-create type.
langfuse-python
github_2023
python
874
langfuse
maxdeichmann
@@ -67,8 +67,10 @@ def _convert_usage_input(usage: typing.Union[pydantic.BaseModel, ModelUsage]): for k in ( "promptTokens", "prompt_tokens", + "input_token_count", "completionTokens", "completion_tokens", + "generated_token_count",
Can we revert this change and handle this in the Langchain callback handler? - Functionality in here should be abstracted for Langfuse. Hence, everything going in here should already have been converted to prompt_tokens, completion_tokens etc. How does this work in the langchain case for IBM? We parse the usage f...
langfuse-python
github_2023
python
871
langfuse
greptile-apps[bot]
@@ -88,8 +85,7 @@ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]: return super().dict(**kwargs_with_defaults) -def get_llama_index_index(callback, force_rebuild: bool = False): - Settings.callback_manager = CallbackManager([callback]) +def get_llama_index_index(handler, force_rebui...
logic: The `handler` parameter is not used in the function body. Consider how it should be integrated with the index creation or loading process.
langfuse-python
github_2023
python
851
langfuse
hassiebp
@@ -558,13 +558,13 @@ async def _wrap_async( openai_response = await wrapped(**arg_extractor.get_openai_args()) if _is_streaming_response(openai_response): - return LangfuseResponseGeneratorAsync( + return aiter(LangfuseResponseGeneratorAsync(
The `LangfuseResponseGeneratorAsync` should already support async iteration as implemented by the `__aiter__` method [here](https://github.com/sinwoobang/langfuse-python/blob/e8cc42110512939fa3a22c2650b073a4759c2b3b/langfuse/openai.py#L755). @sinwoobang Could you please explain what issue you ran into that required thi...
langfuse-python
github_2023
python
863
langfuse
greptile-apps[bot]
@@ -31,6 +31,9 @@ def default(self, obj: Any): # Timezone-awareness check return serialize_datetime(obj) + if isinstance(obj, Exception): + return obj.message
logic: This change may not handle all Exception types correctly. Some exceptions might not have a 'message' attribute, leading to potential AttributeError. Consider using str(obj) or obj.args[0] as a fallback.
langfuse-python
github_2023
python
863
langfuse
vegetablest
@@ -31,6 +31,9 @@ def default(self, obj: Any): # Timezone-awareness check return serialize_datetime(obj) + if isinstance(obj, Exception):
I suggest changing it to this, WDYT? ``` if isinstance(obj, (Exception, KeyboardInterrupt)): return f"{type(obj).__name__}: {str(obj)}" ```
langfuse-python
github_2023
python
831
langfuse
hassiebp
@@ -0,0 +1,51 @@ +import logging +import random + +log = logging.getLogger("langfuse") + + +class Sampler: + sample_rate: float + + def __init__(self, sample_rate: float): + self.sample_rate = sample_rate + random.seed(42) # Fixed seed for reproducibility + + def sample_event(self, event: dict):...
*question*: where does the `hash` function come from?
langfuse-python
github_2023
python
831
langfuse
hassiebp
@@ -174,6 +174,7 @@ def __init__( sdk_integration: Optional[str] = "default", httpx_client: Optional[httpx.Client] = None, enabled: Optional[bool] = True, + sample_rate: Optional[float] = None,
We should add this to the docstring
langfuse-python
github_2023
python
831
langfuse
hassiebp
@@ -212,6 +213,20 @@ def __init__( self.enabled = enabled public_key = public_key or os.environ.get("LANGFUSE_PUBLIC_KEY") secret_key = secret_key or os.environ.get("LANGFUSE_SECRET_KEY") + sample_rate = ( + sample_rate + if sample_rate + is not None #...
We might run into issues when setting via env var as it might get parsed as string. We had this issue in the past where we had to force the env variable to an integer via `int()`
langfuse-python
github_2023
python
821
langfuse
hassiebp
@@ -312,7 +312,7 @@ def add_task(self, event: dict): try: json.dumps(event, cls=EventSerializer) - event["timestamp"] = datetime.utcnow().replace(tzinfo=timezone.utc) + event["timestamp"] = datetime.now(timezone.utc)
Thanks @abhishek-compro! We have a util for that `from langfuse.utils import _get_timestamp` could you please use that one?
langfuse-python
github_2023
python
789
langfuse
hassiebp
@@ -78,7 +78,7 @@ def test_shutdown(): assert langfuse.task_manager._queue.empty() -def test_create_score(): +def test_create_numeric_score():
For the default (i.e. no data_type passed), are we allowing both string and floats as values or only floats?
langfuse-python
github_2023
python
768
langfuse
hassiebp
@@ -309,25 +328,51 @@ def auth_check(self) -> bool: self.log.exception(e) raise e + def get_dataset_runs( + self, + dataset_name: str, + *, + page: typing.Optional[int] = None, + limit: typing.Optional[int] = None,
*question*: Docstring below mentions that limit has default 50, but here initialized to None. Will the default be enforced elsewhere?
langfuse-python
github_2023
python
768
langfuse
hassiebp
@@ -2817,7 +2862,7 @@ class DatasetClient: created_at: dt.datetime updated_at: dt.datetime items: typing.List[DatasetItemClient] - runs: typing.List[str] + runs: typing.List[str] = [] # deprecated
I'd recommend either to maintain the previous behavior (option 1) or remove the attribute entirely (option 3) to force an AttributeError. Option 2 (empty array) might lead to unexpected / puzzling results (no runs in that array) for users with previously working code
langfuse-python
github_2023
python
714
langfuse
maxdeichmann
@@ -648,3 +602,97 @@ def _filter_image_data(messages: List[dict]): del content[index]["image_url"] return output_messages + + +class LangfuseResponseGeneratorSync: + def __init__( + self, + *, + resource, + response, + generation, + langfuse, ...
what about typing this as the openai sdk does it? We could subclass from `AsyncResponseContextManager`
langfuse-python
github_2023
python
669
langfuse
marcklingen
@@ -163,15 +171,15 @@ def __init__( ) if not public_key: - self.log.warning("public_key is not set.") - raise ValueError( - "public_key is required, set as a parameter or environment variable 'LANGFUSE_PUBLIC_KEY'" + self.enabled = False + s...
dx improvement: log only a single warning when disabled and state reason in this warning. currently user gets up to 3 warnings
langfuse-python
github_2023
python
669
langfuse
marcklingen
@@ -251,6 +254,9 @@ def init_resources(self): self._consumers.append(consumer) def add_task(self, event: dict): + if not self._enabled:
this is so clean 🤩
langfuse-python
github_2023
python
669
langfuse
maxdeichmann
@@ -140,6 +142,27 @@ def __init__( langfuse = Langfuse() ``` """ + self.enabled = enabled + public_key = public_key or os.environ.get("LANGFUSE_PUBLIC_KEY") + secret_key = secret_key or os.environ.get("LANGFUSE_SECRET_KEY") + + if not self.enabled: + ...
can we add a link to the docs here on how to set up?
langfuse-python
github_2023
python
646
langfuse
marcklingen
@@ -120,8 +120,23 @@ def __init__( ] = {} def set_root( - self, root: Optional[Union[StatefulTraceClient, StatefulSpanClient]] + self, + root: Optional[Union[StatefulTraceClient, StatefulSpanClient]], + *, + update_root: bool = False, ) -> None: + """Set th...
```suggestion root (Optional[Union[StatefulTraceClient, StatefulSpanClient]]): The root trace or observation to ```
langfuse-python
github_2023
python
644
langfuse
marcklingen
@@ -2251,12 +2253,15 @@ def update( task_manager=self.task_manager, ) - def get_langchain_handler(self): + def get_langchain_handler(self, update_parent: bool = False): """Get langchain callback handler associated with the current trace. This method creates and ...
```suggestion update_parent (bool): If set to True, the parent trace or observation will be updated with the outcome of the Langchain run. ```
langfuse-python
github_2023
python
633
langfuse
maxdeichmann
@@ -151,11 +158,13 @@ def get_langchain_prompt(self): class ChatPromptClient(BasePromptClient): def __init__(self, prompt: Prompt_Chat): super().__init__(prompt) - self.prompt = prompt.prompt + self.prompt = [ + ChatMessageDict(role=p.role, content=p.content) for p in prompt.prom...
Is this a breaking change if someone imported `ChatMessage` originally? I think yes. The import path would have involved a path to the api module, correct? If yes, this is hard to change, if no, i would suggest to keep the naming of the exported package.
langfuse-python
github_2023
python
633
langfuse
maxdeichmann
@@ -38,5 +38,19 @@ def set(self, key: str, value: PromptClient, ttl_seconds: Optional[int]): self._cache[key] = PromptCacheItem(value, ttl_seconds) @staticmethod - def generate_cache_key(name: str, version: Optional[int]) -> str: - return f"{name}-{version or 'latest'}" + def generate_cache...
this assumption makes sense.
langfuse-python
github_2023
python
633
langfuse
maxdeichmann
@@ -167,6 +209,38 @@ def test_create_prompt_with_null_config(): assert prompt.config == {} +def test_get_prompt_by_version_or_label(): + langfuse = Langfuse() + prompt_name = create_uuid() + + for i in range(3): + langfuse.create_prompt( + name=prompt_name, + prompt="test ...
![image](https://github.com/langfuse/langfuse-python/assets/17686849/57d6246c-44c3-4315-99c2-6adcd1784901) Do you have the same signature when hovering?
langfuse-python
github_2023
python
633
langfuse
maxdeichmann
@@ -538,6 +542,7 @@ def get_prompt( name: str, version: Optional[int] = None, *, + label: Optional[str] = None,
would it make sense to add another overload like: ``` @overload def get_prompt( self, name: str, *, version: Optional[int] = None, label: Optional[str] = None, type: Literal["text"] = "text", cache_ttl_seconds: Optional[int] = None, ) ->...
langfuse-python
github_2023
python
633
langfuse
maxdeichmann
@@ -566,26 +572,34 @@ def get_prompt( Exception: Propagates any exceptions raised during the fetching of a new prompt, unless there is an expired prompt in the cache, in which case it logs a warning and returns the expired prompt. """ - self.log.debug(f"Getting prompt {name}, v...
Idea: wdyt about defaulting to one of the values here so that we do not break the application? I dont think there is a a clear assumption to default to one of the two, so i think we should keep it as is.
langfuse-python
github_2023
python
633
langfuse
maxdeichmann
@@ -648,7 +666,8 @@ def create_prompt( *, name: str, prompt: Union[str, List[ChatMessage]], - is_active: bool, + is_active: Optional[bool] = None, # deprecated + labels: List[str] = [],
as these are all named arguments, order does not matter - correct?
langfuse-python
github_2023
python
460
langfuse
maxdeichmann
@@ -29,6 +30,8 @@ def default(self, obj: Any): if is_dataclass(obj): return asdict(obj) + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes)):
In general, i like this approach. Should this condition come below line 48 `isinstance(obj, (dict, list, str, int, float, type(None)))`? Also, is it possible that obj is a Sequence and a str at the same time? If no, i think the second condition is not necessary, right?
langfuse-python
github_2023
python
508
langfuse
maxdeichmann
@@ -47,3 +49,254 @@ def test_entire_llm_call_using_langchain_openai(expected_model, model): generation = list(filter(lambda o: o.type == "GENERATION", trace.observations))[0] assert generation.model == expected_model + + +@pytest.mark.asyncio +@pytest.mark.parametrize( # noqa: F821 + "expected_model,mod...
Do you have an openai key at hand? You can set the OPENAI_API_KEY env variable and we can test the entire LLM call and dont need the try..except block. This is also how we do it in our CI pipeline.
langfuse-python
github_2023
python
508
langfuse
maxdeichmann
@@ -1,7 +1,9 @@ from langchain_openai import AzureChatOpenAI, AzureOpenAI, ChatOpenAI, OpenAI import pytest - +import types from langfuse.callback import CallbackHandler +from langchain.prompts import ChatPromptTemplate +from langchain.schema import StrOutputParser
FAILED tests/test_extract_model_langchain_openai.py::test_chain_streaming_llm_call_with_langchain_openai[gpt-3.5-turbo-model0] - assert 2 == 4 FAILED tests/test_extract_model_langchain_openai.py::test_chain_streaming_llm_call_with_langchain_openai[gpt-3.5-turbo-instruct-model1] - assert 2 == 4 FAILED tests/test_extract...
langfuse-python
github_2023
python
508
langfuse
maxdeichmann
@@ -47,3 +49,254 @@ def test_entire_llm_call_using_langchain_openai(expected_model, model): generation = list(filter(lambda o: o.type == "GENERATION", trace.observations))[0] assert generation.model == expected_model + + +@pytest.mark.asyncio +@pytest.mark.parametrize( # noqa: F821 + "expected_model,mod...
We would probably want to assert that we have input, output set to some value here to ensure we are tracing this.
langfuse-python
github_2023
python
508
langfuse
maxdeichmann
@@ -47,3 +49,254 @@ def test_entire_llm_call_using_langchain_openai(expected_model, model): generation = list(filter(lambda o: o.type == "GENERATION", trace.observations))[0] assert generation.model == expected_model + + +@pytest.mark.asyncio +@pytest.mark.parametrize( # noqa: F821 + "expected_model,mod...
Minor: you can import this function also from https://github.com/noble-varghese/langfuse-python/blob/318f3238c0dcb7c0bf361c0bad58433386dc46e4/langfuse/openai.py#L451
langfuse-python
github_2023
python
425
langfuse
maxdeichmann
@@ -840,7 +841,7 @@ def generation( start_time: typing.Optional[dt.datetime] = None, end_time: typing.Optional[dt.datetime] = None, metadata: typing.Optional[typing.Any] = None, - level: typing.Optional[Literal["DEBUG", "DEFAULT", "WARNING", "ERROR"]] = None, + level: typing.Opt...
In general i like this change for DRY. However, it has the downside, that users wont see all the literals in intellisense. SpanLevel could be anything then. Hence i would probably leave that out here.
langfuse-python
github_2023
python
425
langfuse
maxdeichmann
@@ -0,0 +1,23 @@ +from datetime import datetime +from typing import Any, List, Optional, TypedDict, Literal + +SpanLevel = Literal["DEBUG", "DEFAULT", "WARNING", "ERROR"] + + +class TraceMetadata(TypedDict): + name: Optional[str] + user_id: Optional[str] + session_id: Optional[str] + version: Optional[str] ...
Observations do not have session_id, tags, user_id
langfuse-python
github_2023
python
425
langfuse
maxdeichmann
@@ -0,0 +1,388 @@ +from collections import defaultdict +from contextvars import ContextVar +from datetime import datetime +from functools import wraps +import logging +import os +from typing import Any, Callable, DefaultDict, List, Optional, Union + +from langfuse.client import Langfuse, StatefulSpanClient, StatefulTra...
This here might be called concurrently / parallel. I guess we shoudl make sure this behaves like a singleton. We have a similar implementation in the openai integration. Feel free to check that out.
langfuse-python
github_2023
python
425
langfuse
maxdeichmann
@@ -0,0 +1,388 @@ +from collections import defaultdict +from contextvars import ContextVar +from datetime import datetime +from functools import wraps +import logging +import os +from typing import Any, Callable, DefaultDict, List, Optional, Union + +from langfuse.client import Langfuse, StatefulSpanClient, StatefulTra...
Quick question: say i have a function and annotate it with a trace. Within that function i start two threads, which execute two annotated functions. Will both get the same parent?
langfuse-python
github_2023
python
425
langfuse
maxdeichmann
@@ -0,0 +1,388 @@ +from collections import defaultdict +from contextvars import ContextVar +from datetime import datetime +from functools import wraps +import logging +import os +from typing import Any, Callable, DefaultDict, List, Optional, Union + +from langfuse.client import Langfuse, StatefulSpanClient, StatefulTra...
This is great DX!!!!
langfuse-python
github_2023
python
425
langfuse
maxdeichmann
@@ -0,0 +1,536 @@ +import asyncio +from collections import defaultdict +from contextvars import ContextVar +from datetime import datetime +from functools import wraps +import logging +import os +from typing import ( + Any, + Callable, + DefaultDict, + List, + Optional, + Union, + Literal, + Dict...
Good implementation above, i think this is the way to go!
langfuse-python
github_2023
python
425
langfuse
maxdeichmann
@@ -0,0 +1,497 @@ +import asyncio +from contextvars import ContextVar +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor +import pytest + +from langchain_community.chat_models import ChatOpenAI +from langchain.prompts import ChatPromptTemplate +from langfuse.decorators import langfu...
Nice!
langfuse-python
github_2023
python
425
langfuse
maxdeichmann
@@ -150,7 +160,13 @@ async def async_wrapper(*args, **kwargs): except Exception as e: self._handle_exception(observation, e) finally: + if inspect.isasyncgen(result):
Nice!
langfuse-python
github_2023
python
425
langfuse
maxdeichmann
@@ -477,7 +485,7 @@ def get_current_langchain_handler(self): return observation.get_langchain_handler() - def get_current_trace_id(self): + def get_current_trace_id(self, log_warnings: bool = True):
Wdyt about always logging a warning here? I think this would be unexpected for the user to not get an id here.
langfuse-python
github_2023
python
425
langfuse
maxdeichmann
@@ -17,14 +17,28 @@ def __new__(cls): cls._instance = super(LangfuseSingleton, cls).__new__(cls) return cls._instance - def get(self) -> Langfuse: + def get( + self, + public_key: Optional[str] = None, + secret_key: Optional[str] = None, + host: Optional...
can you remove this?
langfuse-python
github_2023
python
430
langfuse
maxdeichmann
@@ -72,3 +73,12 @@ def __eq__(self, other): ) return False + + def get_langchain_prompt(self) -> str: + """Converts string of Langfuse prompt template prompt into string compatible + with Lanchain PromptTemplate.
nit: type lanchain -> langchain
langfuse-python
github_2023
python
405
langfuse
maxdeichmann
@@ -480,24 +476,21 @@ def score( id: typing.Optional[str] = None, comment: typing.Optional[str] = None, observation_id: typing.Optional[str] = None, - kwargs=None,
Yes, this is a bug. Coudl you quickly add it?
langfuse-python
github_2023
python
405
langfuse
maxdeichmann
@@ -1,4 +1,12 @@ import logging + +log = logging.getLogger("langfuse") + +try: # Test that langchain is installed before proceeding + import langchain +except ImportError as e: + log.exception(f"Could not import langchain. Some functionality may be missing. {e}")
Thanks!
langfuse-python
github_2023
others
412
langfuse
maxdeichmann
@@ -7,15 +7,15 @@ license = "MIT" readme = "README.md" [tool.poetry.dependencies] -python = ">=3.8.1,<3.13"
The 13 was intended so that 12 is included. Alternatively, <= 3.12
langfuse-python
github_2023
others
412
langfuse
maxdeichmann
@@ -7,15 +7,15 @@ license = "MIT" readme = "README.md" [tool.poetry.dependencies] -python = ">=3.8.1,<3.13" +python = ">=3.8.1,<3.12" httpx = ">=0.15.4,<0.26.0" pydantic = ">=1.10.7, <3.0" backoff = "^2.2.1" openai = ">=0.27.8" wrapt = "1.14" langchain = { version = ">=0.0.309", optional = true } chevron = "...
Is this intended?
langfuse-python
github_2023
python
412
langfuse
maxdeichmann
@@ -33,3 +33,12 @@ class ParsedLLMEndPayload(TypedDict): output: Optional[dict] usage: Optional[ModelUsage] model: Optional[str] + + +class TraceMetadata(TypedDict): + name: Optional[str] + user_id: Optional[str] + session_id: Optional[str] + version: Optional[str] + metadata: Optional[Any...
Can you also add `release` here?
langfuse-python
github_2023
python
412
langfuse
maxdeichmann
@@ -311,3 +311,54 @@ def test_callback_with_root_span(): second_embedding_generation, second_llm_generation = generations[-2:] assert validate_embedding_generation(second_embedding_generation) assert validate_llm_generation(second_llm_generation) + + +def test_callback_with_custom_trace_metadata(): + ...
maybe call it `callback.trace()`?
langfuse-python
github_2023
python
412
langfuse
maxdeichmann
@@ -92,13 +106,49 @@ def __init__( self.event_map: Dict[str, List[CallbackEvent]] = defaultdict(list) self._llama_index_trace_name: Optional[str] = None self._token_counter = TokenCounter(tokenizer) + self.tags = tags # For stream-chat, the last LLM end_event arrives after t...
Please ensure that the context is cleared after llamaindex invocation so that no succeeding llamaindex usage gets the same contexts.
langfuse-python
github_2023
python
412
langfuse
maxdeichmann
@@ -182,16 +263,33 @@ def _create_observations_from_trace_map( ) def _get_root_observation(self) -> Union[StatefulTraceClient, StatefulSpanClient]:
To reiterate on our dicussion earlier. We decided to: - do not create another span for roots - add the same llamaindex spans / generations for root as for non root cases - in case that trace was provide, do not override. I think this is implmented correctly.
langfuse-python
github_2023
python
401
langfuse
maxdeichmann
@@ -257,8 +276,59 @@ def _handle_LLM_events( }, ) + # Register orphaned LLM event (only start event, no end event) to be later upserted with the correct trace_id + if len(events) == 1: + self._orphaned_LLM_generations[event_id] = (generation, self.trace) + retur...
pls. refactor
langfuse-python
github_2023
python
400
langfuse
maxdeichmann
@@ -187,17 +189,24 @@ def _get_langfuse_data_from_kwargs( if user_id is not None and not isinstance(user_id, str): raise TypeError("user_id must be a string") + tags = kwargs.get("tags", None) + if tags is not None and ( + not isinstance(tags, list) or not all(isinstance(tag, str) for tag i...
I think we have to use `List` here. See import on top.
langfuse-python
github_2023
python
388
langfuse
maxdeichmann
@@ -29,19 +29,9 @@ SystemMessage, ) except ImportError: - logging.getLogger("langfuse").warning( - "Could not import langchain. Some functionality may be missing." + raise ModuleNotFoundError( + "Please install langchain to use the Langfuse langchain integration: 'pip install langchai...
Thanks a lot!
langfuse-python
github_2023
python
384
langfuse
maxdeichmann
@@ -0,0 +1,366 @@ +from collections import defaultdict +from typing import Any, Dict, List, Optional, Union, Tuple, Callable +from uuid import uuid4 +import logging + +from langfuse.client import ( + StatefulSpanClient, + StatefulTraceClient, + StatefulGenerationClient, +) +from langfuse.decorators.error_loggi...
Placeholder: our usage object
langfuse-python
github_2023
python
384
langfuse
maxdeichmann
@@ -0,0 +1,366 @@ +from collections import defaultdict +from typing import Any, Dict, List, Optional, Union, Tuple, Callable +from uuid import uuid4 +import logging + +from langfuse.client import ( + StatefulSpanClient, + StatefulTraceClient, + StatefulGenerationClient, +) +from langfuse.decorators.error_loggi...
We only start creating generations when this function is called?
langfuse-python
github_2023
python
384
langfuse
maxdeichmann
@@ -0,0 +1,366 @@ +from collections import defaultdict +from typing import Any, Dict, List, Optional, Union, Tuple, Callable +from uuid import uuid4 +import logging + +from langfuse.client import ( + StatefulSpanClient, + StatefulTraceClient, + StatefulGenerationClient, +) +from langfuse.decorators.error_loggi...
Is this something our users need to provide?
langfuse-python
github_2023
others
384
langfuse
maxdeichmann
@@ -7,14 +7,15 @@ license = "MIT" readme = "README.md" [tool.poetry.dependencies] -python = ">=3.8.1,<4.0" +python = ">=3.8.1,<3.12"
:D
langfuse-python
github_2023
python
384
langfuse
maxdeichmann
@@ -0,0 +1,43 @@ +import functools +import logging + +logger = logging.getLogger("langfuse") + + +def catch_and_log_errors(func): + """Catch all exceptions and log them. Do NOT re-raise the exception.""" + + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **...
add condition that init is excluded from this.
langfuse-python
github_2023
python
384
langfuse
maxdeichmann
@@ -0,0 +1,366 @@ +from collections import defaultdict +from typing import Any, Dict, List, Optional, Union, Tuple, Callable +from uuid import uuid4 +import logging + +from langfuse.client import ( + StatefulSpanClient, + StatefulTraceClient, + StatefulGenerationClient, +) +from langfuse.decorators.error_loggi...
chat format