Skip to content

refactor: Implement conditional imports for all integration modules #480

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jul 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 18 additions & 6 deletions src/openlayer/lib/integrations/anthropic_tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,25 @@
import logging
import time
from functools import wraps
from typing import Any, Dict, Iterator, Optional, Union
from typing import Any, Dict, Iterator, Optional, Union, TYPE_CHECKING

import anthropic
try:
import anthropic
HAVE_ANTHROPIC = True
except ImportError:
HAVE_ANTHROPIC = False

if TYPE_CHECKING:
import anthropic

from ..tracing import tracer

logger = logging.getLogger(__name__)


def trace_anthropic(
client: anthropic.Anthropic,
) -> anthropic.Anthropic:
client: "anthropic.Anthropic",
) -> "anthropic.Anthropic":
"""Patch the Anthropic client to trace chat completions.

The following information is collected for each chat completion:
Expand All @@ -42,6 +49,11 @@ def trace_anthropic(
anthropic.Anthropic
The patched Anthropic client.
"""
if not HAVE_ANTHROPIC:
raise ImportError(
"Anthropic library is not installed. Please install it with: pip install anthropic"
)

create_func = client.messages.create

@wraps(create_func)
Expand Down Expand Up @@ -180,7 +192,7 @@ def handle_non_streaming_create(
*args,
inference_id: Optional[str] = None,
**kwargs,
) -> anthropic.types.Message:
) -> "anthropic.types.Message":
"""Handles the create method when streaming is disabled.

Parameters
Expand Down Expand Up @@ -227,7 +239,7 @@ def handle_non_streaming_create(


def parse_non_streaming_output_data(
response: anthropic.types.Message,
response: "anthropic.types.Message",
) -> Union[str, Dict[str, Any], None]:
"""Parses the output data from a non-streaming completion.

Expand Down
20 changes: 16 additions & 4 deletions src/openlayer/lib/integrations/async_openai_tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,16 @@
import logging
import time
from functools import wraps
from typing import Any, AsyncIterator, Optional, Union
from typing import Any, AsyncIterator, Optional, Union, TYPE_CHECKING

import openai
try:
import openai
HAVE_OPENAI = True
except ImportError:
HAVE_OPENAI = False

if TYPE_CHECKING:
import openai

from .openai_tracer import (
get_model_parameters,
Expand All @@ -19,8 +26,8 @@


def trace_async_openai(
client: Union[openai.AsyncOpenAI, openai.AsyncAzureOpenAI],
) -> Union[openai.AsyncOpenAI, openai.AsyncAzureOpenAI]:
client: Union["openai.AsyncOpenAI", "openai.AsyncAzureOpenAI"],
) -> Union["openai.AsyncOpenAI", "openai.AsyncAzureOpenAI"]:
"""Patch the AsyncOpenAI or AsyncAzureOpenAI client to trace chat completions.

The following information is collected for each chat completion:
Expand All @@ -47,6 +54,11 @@ def trace_async_openai(
Union[openai.AsyncOpenAI, openai.AsyncAzureOpenAI]
The patched AsyncOpenAI client.
"""
if not HAVE_OPENAI:
raise ImportError(
"OpenAI library is not installed. Please install it with: pip install openai"
)

is_azure_openai = isinstance(client, openai.AsyncAzureOpenAI)
create_func = client.chat.completions.create

Expand Down
20 changes: 16 additions & 4 deletions src/openlayer/lib/integrations/groq_tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,25 @@
import logging
import time
from functools import wraps
from typing import Any, Dict, Iterator, Optional, Union
from typing import Any, Dict, Iterator, Optional, Union, TYPE_CHECKING

import groq
try:
import groq
HAVE_GROQ = True
except ImportError:
HAVE_GROQ = False

if TYPE_CHECKING:
import groq

from ..tracing import tracer

logger = logging.getLogger(__name__)


def trace_groq(
client: groq.Groq,
) -> groq.Groq:
client: "groq.Groq",
) -> "groq.Groq":
"""Patch the Groq client to trace chat completions.

The following information is collected for each chat completion:
Expand All @@ -42,6 +49,11 @@ def trace_groq(
groq.Groq
The patched Groq client.
"""
if not HAVE_GROQ:
raise ImportError(
"Groq library is not installed. Please install it with: pip install groq"
)

create_func = client.chat.completions.create

@wraps(create_func)
Expand Down
44 changes: 31 additions & 13 deletions src/openlayer/lib/integrations/langchain_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,19 @@

# pylint: disable=unused-argument
import time
from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING
from uuid import UUID

from langchain import schema as langchain_schema
from langchain.callbacks.base import BaseCallbackHandler
try:
from langchain import schema as langchain_schema
from langchain.callbacks.base import BaseCallbackHandler
HAVE_LANGCHAIN = True
except ImportError:
HAVE_LANGCHAIN = False

if TYPE_CHECKING:
from langchain import schema as langchain_schema
from langchain.callbacks.base import BaseCallbackHandler

from ..tracing import tracer, steps, traces, enums
from .. import utils
Expand All @@ -18,10 +26,20 @@
}


class OpenlayerHandler(BaseCallbackHandler):
if HAVE_LANGCHAIN:
BaseCallbackHandlerClass = BaseCallbackHandler
else:
BaseCallbackHandlerClass = object


class OpenlayerHandler(BaseCallbackHandlerClass): # type: ignore[misc]
"""LangChain callback handler that logs to Openlayer."""

def __init__(self, **kwargs: Any) -> None:
if not HAVE_LANGCHAIN:
raise ImportError(
"LangChain library is not installed. Please install it with: pip install langchain"
)
super().__init__()
self.metadata: Dict[str, Any] = kwargs or {}
self.steps: Dict[UUID, steps.Step] = {}
Expand Down Expand Up @@ -197,7 +215,7 @@ def _convert_step_objects_recursively(self, step: steps.Step) -> None:
def _convert_langchain_objects(self, obj: Any) -> Any:
"""Recursively convert LangChain objects to JSON-serializable format."""
# Explicit check for LangChain BaseMessage and its subclasses
if isinstance(obj, langchain_schema.BaseMessage):
if HAVE_LANGCHAIN and isinstance(obj, langchain_schema.BaseMessage):
return self._message_to_dict(obj)

# Handle ChatPromptValue objects which contain messages
Expand Down Expand Up @@ -249,7 +267,7 @@ def _convert_langchain_objects(self, obj: Any) -> Any:
# For everything else, convert to string
return str(obj)

def _message_to_dict(self, message: langchain_schema.BaseMessage) -> Dict[str, str]:
def _message_to_dict(self, message: "langchain_schema.BaseMessage") -> Dict[str, str]:
"""Convert a LangChain message to a JSON-serializable dictionary."""
message_type = getattr(message, "type", "user")

Expand All @@ -262,7 +280,7 @@ def _message_to_dict(self, message: langchain_schema.BaseMessage) -> Dict[str, s
return {"role": role, "content": str(message.content)}

def _messages_to_prompt_format(
self, messages: List[List[langchain_schema.BaseMessage]]
self, messages: List[List["langchain_schema.BaseMessage"]]
) -> List[Dict[str, str]]:
"""Convert LangChain messages to Openlayer prompt format using
unified conversion."""
Expand Down Expand Up @@ -302,7 +320,7 @@ def _extract_model_info(
}

def _extract_token_info(
self, response: langchain_schema.LLMResult
self, response: "langchain_schema.LLMResult"
) -> Dict[str, Any]:
"""Extract token information generically from LLM response."""
llm_output = response.llm_output or {}
Expand Down Expand Up @@ -340,7 +358,7 @@ def _extract_token_info(
"tokens": token_usage.get("total_tokens", 0),
}

def _extract_output(self, response: langchain_schema.LLMResult) -> str:
def _extract_output(self, response: "langchain_schema.LLMResult") -> str:
"""Extract output text from LLM response."""
output = ""
for generations in response.generations:
Expand Down Expand Up @@ -384,7 +402,7 @@ def on_llm_start(
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[langchain_schema.BaseMessage]],
messages: List[List["langchain_schema.BaseMessage"]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
Expand Down Expand Up @@ -414,7 +432,7 @@ def on_chat_model_start(

def on_llm_end(
self,
response: langchain_schema.LLMResult,
response: "langchain_schema.LLMResult",
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
Expand Down Expand Up @@ -590,7 +608,7 @@ def on_text(self, text: str, **kwargs: Any) -> Any:

def on_agent_action(
self,
action: langchain_schema.AgentAction,
action: "langchain_schema.AgentAction",
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
Expand All @@ -612,7 +630,7 @@ def on_agent_action(

def on_agent_finish(
self,
finish: langchain_schema.AgentFinish,
finish: "langchain_schema.AgentFinish",
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
Expand Down
24 changes: 18 additions & 6 deletions src/openlayer/lib/integrations/mistral_tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,25 @@
import logging
import time
from functools import wraps
from typing import Any, Dict, Iterator, Optional, Union
from typing import Any, Dict, Iterator, Optional, Union, TYPE_CHECKING

import mistralai
try:
import mistralai
HAVE_MISTRAL = True
except ImportError:
HAVE_MISTRAL = False

if TYPE_CHECKING:
import mistralai

from ..tracing import tracer

logger = logging.getLogger(__name__)


def trace_mistral(
client: mistralai.Mistral,
) -> mistralai.Mistral:
client: "mistralai.Mistral",
) -> "mistralai.Mistral":
"""Patch the Mistral client to trace chat completions.

The following information is collected for each chat completion:
Expand All @@ -42,6 +49,11 @@ def trace_mistral(
mistralai.Mistral
The patched Mistral client.
"""
if not HAVE_MISTRAL:
raise ImportError(
"Mistral library is not installed. Please install it with: pip install mistralai"
)

stream_func = client.chat.stream
create_func = client.chat.complete

Expand Down Expand Up @@ -184,7 +196,7 @@ def handle_non_streaming_create(
*args,
inference_id: Optional[str] = None,
**kwargs,
) -> mistralai.models.ChatCompletionResponse:
) -> "mistralai.models.ChatCompletionResponse":
"""Handles the create method when streaming is disabled.

Parameters
Expand Down Expand Up @@ -231,7 +243,7 @@ def handle_non_streaming_create(


def parse_non_streaming_output_data(
response: mistralai.models.ChatCompletionResponse,
response: "mistralai.models.ChatCompletionResponse",
) -> Union[str, Dict[str, Any], None]:
"""Parses the output data from a non-streaming completion.

Expand Down
29 changes: 23 additions & 6 deletions src/openlayer/lib/integrations/openai_tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,25 @@
import logging
import time
from functools import wraps
from typing import Any, Dict, Iterator, List, Optional, Union
from typing import Any, Dict, Iterator, List, Optional, Union, TYPE_CHECKING

import openai
try:
import openai
HAVE_OPENAI = True
except ImportError:
HAVE_OPENAI = False

if TYPE_CHECKING:
import openai

from ..tracing import tracer

logger = logging.getLogger(__name__)


def trace_openai(
client: Union[openai.OpenAI, openai.AzureOpenAI],
) -> Union[openai.OpenAI, openai.AzureOpenAI]:
client: Union["openai.OpenAI", "openai.AzureOpenAI"],
) -> Union["openai.OpenAI", "openai.AzureOpenAI"]:
"""Patch the OpenAI or AzureOpenAI client to trace chat completions.

The following information is collected for each chat completion:
Expand All @@ -42,6 +49,11 @@ def trace_openai(
Union[openai.OpenAI, openai.AzureOpenAI]
The patched OpenAI client.
"""
if not HAVE_OPENAI:
raise ImportError(
"OpenAI library is not installed. Please install it with: pip install openai"
)

is_azure_openai = isinstance(client, openai.AzureOpenAI)
create_func = client.chat.completions.create

Expand Down Expand Up @@ -358,12 +370,17 @@ def parse_non_streaming_output_data(

# --------------------------- OpenAI Assistants API -------------------------- #
def trace_openai_assistant_thread_run(
client: openai.OpenAI, run: "openai.types.beta.threads.run.Run"
client: "openai.OpenAI", run: "openai.types.beta.threads.run.Run"
) -> None:
"""Trace a run from an OpenAI assistant.

Once the run is completed, the thread data is published to Openlayer,
along with the latency, and number of tokens used."""
if not HAVE_OPENAI:
raise ImportError(
"OpenAI library is not installed. Please install it with: pip install openai"
)

_type_check_run(run)

# Do nothing if the run is not completed
Expand Down Expand Up @@ -398,7 +415,7 @@ def trace_openai_assistant_thread_run(

def _type_check_run(run: "openai.types.beta.threads.run.Run") -> None:
"""Validate the run object."""
if not isinstance(run, openai.types.beta.threads.run.Run):
if HAVE_OPENAI and not isinstance(run, openai.types.beta.threads.run.Run):
raise ValueError(f"Expected a Run object, but got {type(run)}.")


Expand Down
Loading
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy