Skip to content

Server API Reference

Complete API documentation for the DeepFix Server.

API Endpoints

AnalyseArtifactsAPI

Bases: LitAPI

API endpoint for artifact analysis.

Provides a LitServe API for analyzing ML artifacts (datasets, training, deepchecks, model checkpoints) and returning diagnostic results.

Source code in deepfix-server/src/deepfix_server/api.py
class AnalyseArtifactsAPI(ls.LitAPI):
    """API endpoint for artifact analysis.

    Provides a LitServe API for analyzing ML artifacts (datasets, training,
    deepchecks, model checkpoints) and returning diagnostic results.
    """

    def _ensure_initialized(self) -> None:
        """Initialize dependencies if setup has not run."""
        if (
            getattr(self, "portal_client", None) is not None
            and getattr(self, "coordinator", None) is not None
        ):
            return

        self.llm_config = LLMConfig.load_from_env()
        self.coordinator = ArtifactAnalysisCoordinator(config=self.llm_config)

    def setup(self, device: str) -> None:
        """Setup the API endpoint.

        Initializes logging and creates the artifact analysis coordinator.

        Args:
            device: Device specification (unused, kept for LitAPI compatibility).
        """
        if os.getenv("MLFLOW_EXP_NAME") and os.getenv("MLFLOW_TRACKING_URI"):
            setup_dspy_logging(
                experiment_name=os.getenv("MLFLOW_EXP_NAME"),
                tracking_uri=os.getenv("MLFLOW_TRACKING_URI"),
            )
        else:
            print(f"Error setting up DSPy logging: {traceback.format_exc()}")

        self._ensure_initialized()

    async def decode_request(self, request: APIRequest) -> AgentContext:
        """Decode API request into AgentContext.

        Args:
            request: APIRequest containing artifacts and configuration.

        Returns:
            AgentContext with artifacts and settings.

        Raises:
            HTTPException: If request decoding fails (status 400).
        """
        try:
            dataset_artifacts = request.dataset_artifacts
            if isinstance(request.dataset_artifacts, dict):
                dataset_artifacts = DatasetArtifacts.from_dict(
                    request.dataset_artifacts
                )
            elif not isinstance(request.dataset_artifacts, DatasetArtifacts):
                raise ValueError("Dataset artifacts must be a DatasetArtifacts object")
            return AgentContext(
                dataset_artifacts=dataset_artifacts,
                training_artifacts=request.training_artifacts,
                deepchecks_artifacts=request.deepchecks_artifacts,
                model_checkpoint_artifacts=request.model_checkpoint_artifacts,
                dataset_name=request.dataset_name,
                language=request.language,
            )
        except Exception as exc:
            raise HTTPException(
                status_code=400,
                detail=f"Error decoding request: {exc}",
            ) from exc

    async def predict(self, request_ctx: AgentContext) -> APIResponse:
        """Run artifact analysis and return results.

        Args:
            request_ctx: AgentContext containing artifacts to analyze.

        Returns:
            APIResponse with analysis results from all agents.

        Raises:
            HTTPException: If analysis fails (status 500).
        """
        try:
            results = await self.coordinator.arun(request_ctx)
            response = APIResponse(
                agent_results=results.get_agent_results(),
                summary=results.summary,
                additional_outputs=results.additional_outputs,
                error_messages=results.get_error_messages(),
                dataset_name=request_ctx.dataset_name,
            )
            return response
        except Exception as exc:
            raise HTTPException(status_code=500, detail=traceback.format_exc()) from exc

decode_request(request) async

Decode API request into AgentContext.

Parameters:

Name Type Description Default
request APIRequest

APIRequest containing artifacts and configuration.

required

Returns:

Type Description
AgentContext

AgentContext with artifacts and settings.

Raises:

Type Description
HTTPException

If request decoding fails (status 400).

Source code in deepfix-server/src/deepfix_server/api.py
async def decode_request(self, request: APIRequest) -> AgentContext:
    """Decode API request into AgentContext.

    Args:
        request: APIRequest containing artifacts and configuration.

    Returns:
        AgentContext with artifacts and settings.

    Raises:
        HTTPException: If request decoding fails (status 400).
    """
    try:
        dataset_artifacts = request.dataset_artifacts
        if isinstance(request.dataset_artifacts, dict):
            dataset_artifacts = DatasetArtifacts.from_dict(
                request.dataset_artifacts
            )
        elif not isinstance(request.dataset_artifacts, DatasetArtifacts):
            raise ValueError("Dataset artifacts must be a DatasetArtifacts object")
        return AgentContext(
            dataset_artifacts=dataset_artifacts,
            training_artifacts=request.training_artifacts,
            deepchecks_artifacts=request.deepchecks_artifacts,
            model_checkpoint_artifacts=request.model_checkpoint_artifacts,
            dataset_name=request.dataset_name,
            language=request.language,
        )
    except Exception as exc:
        raise HTTPException(
            status_code=400,
            detail=f"Error decoding request: {exc}",
        ) from exc

predict(request_ctx) async

Run artifact analysis and return results.

Parameters:

Name Type Description Default
request_ctx AgentContext

AgentContext containing artifacts to analyze.

required

Returns:

Type Description
APIResponse

APIResponse with analysis results from all agents.

Raises:

Type Description
HTTPException

If analysis fails (status 500).

Source code in deepfix-server/src/deepfix_server/api.py
async def predict(self, request_ctx: AgentContext) -> APIResponse:
    """Run artifact analysis and return results.

    Args:
        request_ctx: AgentContext containing artifacts to analyze.

    Returns:
        APIResponse with analysis results from all agents.

    Raises:
        HTTPException: If analysis fails (status 500).
    """
    try:
        results = await self.coordinator.arun(request_ctx)
        response = APIResponse(
            agent_results=results.get_agent_results(),
            summary=results.summary,
            additional_outputs=results.additional_outputs,
            error_messages=results.get_error_messages(),
            dataset_name=request_ctx.dataset_name,
        )
        return response
    except Exception as exc:
        raise HTTPException(status_code=500, detail=traceback.format_exc()) from exc

setup(device)

Setup the API endpoint.

Initializes logging and creates the artifact analysis coordinator.

Parameters:

Name Type Description Default
device str

Device specification (unused, kept for LitAPI compatibility).

required
Source code in deepfix-server/src/deepfix_server/api.py
def setup(self, device: str) -> None:
    """Setup the API endpoint.

    Initializes logging and creates the artifact analysis coordinator.

    Args:
        device: Device specification (unused, kept for LitAPI compatibility).
    """
    if os.getenv("MLFLOW_EXP_NAME") and os.getenv("MLFLOW_TRACKING_URI"):
        setup_dspy_logging(
            experiment_name=os.getenv("MLFLOW_EXP_NAME"),
            tracking_uri=os.getenv("MLFLOW_TRACKING_URI"),
        )
    else:
        print(f"Error setting up DSPy logging: {traceback.format_exc()}")

    self._ensure_initialized()

Coordinators

ArtifactAnalysisCoordinator

Bases: Agent

Main orchestrator that coordinates specialized analyzer agents.

Source code in deepfix-server/src/deepfix_server/coordinators.py
class ArtifactAnalysisCoordinator(Agent):
    """Main orchestrator that coordinates specialized analyzer agents."""

    def __init__(
        self,
        config: Optional[LLMConfig] = None,
    ):
        super().__init__(config=config)

        # initialize agents and loaders
        self.analyzer_agents = self._initialize_analyzer_agents()
        self.cross_artifact_reasoning_agent = CrossArtifactReasoningAgent(
            llm_config=self._llm_config
        )

    async def _analyze_one_artifact(self, artifact: Artifacts) -> AgentResult:
        agent_name = None
        try:
            analyzer_agent = self._get_analyzer_agent(artifact)
            agent_name = analyzer_agent.agent_name
            if analyzer_agent:
                focused_context = self._create_focused_context(artifact)
                result = await analyzer_agent.arun(focused_context)
                return result
        except Exception as e:
            LOGGER.error(f"Error with agent {agent_name}:\n {traceback.format_exc()}")
            raise e

    async def aforward(self, context: AgentContext) -> ArtifactAnalysisResult:
        """Analyze artifacts asynchronously and return results.

        Args:
            context: Agent context containing artifacts and configuration.

        Returns:
            ArtifactAnalysisResult containing analysis results from all agents.
        """
        # 1. Analyze artifacts
        LOGGER.info(
            f"Analyzing {len(context.artifacts)} artifacts linked to dataset {context.dataset_name}..."
        )
        results = await asyncio.gather(
            *[self._analyze_one_artifact(artifact) for artifact in context.artifacts]
        )
        for result in results:
            context.agent_results[result.agent_name] = result

        # 2. Cross-artifact reasoning
        LOGGER.info("Cross-artifact reasoning...")
        out = await self.cross_artifact_reasoning_agent.arun(
            previous_analyses=context.agent_results, output_language=context.language
        )
        context.agent_results[out.agent_name] = out

        # 3. Output results
        output = ArtifactAnalysisResult(
            context=context,
            summary=out.additional_outputs.get("summary", None),
        )
        return output

    async def arun(self, context: AgentContext) -> ArtifactAnalysisResult:
        """Run the coordinator asynchronously with error handling.

        Args:
            context: Agent context containing artifacts and configuration.

        Returns:
            ArtifactAnalysisResult with analysis results or error message if execution fails.
        """
        try:
            return await self.acall(context)
        except Exception as e:
            LOGGER.error(
                f"Error with coordinator {self.agent_name}:\n {traceback.format_exc()}"
            )
            error_result = AgentResult(agent_name=self.agent_name, error_message=str(e))
            context.agent_results[self.agent_name] = error_result
            return ArtifactAnalysisResult(
                context=context,
                summary=None,
            )

    def run(self, context: AgentContext) -> ArtifactAnalysisResult:
        """Run the coordinator (alias for arun for backward compatibility).

        Args:
            context: Agent context containing artifacts and configuration.

        Returns:
            ArtifactAnalysisResult containing analysis results from all agents.
        """
        try:
            return self(context)
        except Exception as e:
            LOGGER.error(
                f"Error with coordinator {self.agent_name}:\n {traceback.format_exc()}"
            )
            error_result = AgentResult(agent_name=self.agent_name, error_message=str(e))
            context.agent_results[self.agent_name] = error_result
            return ArtifactAnalysisResult(
                context=context,
                summary=None,
            )

    def _get_analyzer_agent(self, artifact: Artifacts) -> ArtifactAnalyzer:
        for analyzer_agent in self.analyzer_agents:
            if analyzer_agent.supports_artifact(artifact):
                return analyzer_agent
        raise ValueError(
            f"No analyzer agent found for artifact of type: {type(artifact)}"
        )

    def _create_focused_context(self, artifact: Artifacts) -> AgentContext:
        ctx = AgentContext()
        ctx.insert_artifact(artifact)
        return ctx

    def forward(self, context: AgentContext) -> ArtifactAnalysisResult:
        # 1. Analyze artifacts
        LOGGER.info(
            f"Analyzing {len(context.artifacts)} artifacts linked to dataset {context.dataset_name}..."
        )
        with ThreadPoolExecutor(max_workers=len(context.artifacts)) as executor:
            results = list(executor.map(self._analyze_one_artifact, context.artifacts))
        for result in results:
            context.agent_results[result.agent_name] = result

        # 2. Cross-artifact reasoning
        LOGGER.info("Cross-artifact reasoning...")
        out = self.cross_artifact_reasoning_agent.run(
            previous_analyses=context.agent_results, output_language=context.language
        )
        context.agent_results[out.agent_name] = out

        # 3. Output results
        output = ArtifactAnalysisResult(
            context=context,
            summary=out.additional_outputs.get("summary", None),
        )
        return output

    def _initialize_analyzer_agents(self) -> List[ArtifactAnalyzer]:
        """Initialize specialized analyzer agents"""
        agents = [
            DeepchecksArtifactsAnalyzer(config=self._llm_config),
            DatasetArtifactsAnalyzer(config=self._llm_config),
            ModelCheckpointArtifactsAnalyzer(config=self._llm_config),
        ]
        return agents

aforward(context) async

Analyze artifacts asynchronously and return results.

Parameters:

Name Type Description Default
context AgentContext

Agent context containing artifacts and configuration.

required

Returns:

Type Description
ArtifactAnalysisResult

ArtifactAnalysisResult containing analysis results from all agents.

Source code in deepfix-server/src/deepfix_server/coordinators.py
async def aforward(self, context: AgentContext) -> ArtifactAnalysisResult:
    """Analyze artifacts asynchronously and return results.

    Args:
        context: Agent context containing artifacts and configuration.

    Returns:
        ArtifactAnalysisResult containing analysis results from all agents.
    """
    # 1. Analyze artifacts
    LOGGER.info(
        f"Analyzing {len(context.artifacts)} artifacts linked to dataset {context.dataset_name}..."
    )
    results = await asyncio.gather(
        *[self._analyze_one_artifact(artifact) for artifact in context.artifacts]
    )
    for result in results:
        context.agent_results[result.agent_name] = result

    # 2. Cross-artifact reasoning
    LOGGER.info("Cross-artifact reasoning...")
    out = await self.cross_artifact_reasoning_agent.arun(
        previous_analyses=context.agent_results, output_language=context.language
    )
    context.agent_results[out.agent_name] = out

    # 3. Output results
    output = ArtifactAnalysisResult(
        context=context,
        summary=out.additional_outputs.get("summary", None),
    )
    return output

arun(context) async

Run the coordinator asynchronously with error handling.

Parameters:

Name Type Description Default
context AgentContext

Agent context containing artifacts and configuration.

required

Returns:

Type Description
ArtifactAnalysisResult

ArtifactAnalysisResult with analysis results or error message if execution fails.

Source code in deepfix-server/src/deepfix_server/coordinators.py
async def arun(self, context: AgentContext) -> ArtifactAnalysisResult:
    """Run the coordinator asynchronously with error handling.

    Args:
        context: Agent context containing artifacts and configuration.

    Returns:
        ArtifactAnalysisResult with analysis results or error message if execution fails.
    """
    try:
        return await self.acall(context)
    except Exception as e:
        LOGGER.error(
            f"Error with coordinator {self.agent_name}:\n {traceback.format_exc()}"
        )
        error_result = AgentResult(agent_name=self.agent_name, error_message=str(e))
        context.agent_results[self.agent_name] = error_result
        return ArtifactAnalysisResult(
            context=context,
            summary=None,
        )

run(context)

Run the coordinator (alias for arun for backward compatibility).

Parameters:

Name Type Description Default
context AgentContext

Agent context containing artifacts and configuration.

required

Returns:

Type Description
ArtifactAnalysisResult

ArtifactAnalysisResult containing analysis results from all agents.

Source code in deepfix-server/src/deepfix_server/coordinators.py
def run(self, context: AgentContext) -> ArtifactAnalysisResult:
    """Run the coordinator (alias for arun for backward compatibility).

    Args:
        context: Agent context containing artifacts and configuration.

    Returns:
        ArtifactAnalysisResult containing analysis results from all agents.
    """
    try:
        return self(context)
    except Exception as e:
        LOGGER.error(
            f"Error with coordinator {self.agent_name}:\n {traceback.format_exc()}"
        )
        error_result = AgentResult(agent_name=self.agent_name, error_message=str(e))
        context.agent_results[self.agent_name] = error_result
        return ArtifactAnalysisResult(
            context=context,
            summary=None,
        )

Configuration

LLMConfig

Bases: BaseModel

Configuration for LLM provider settings.

Attributes:

Name Type Description
api_key Optional[str]

Optional API key for the LLM provider.

base_url Optional[str]

Optional base URL for the LLM API endpoint.

model_name str

Name of the LLM model to use.

temperature float

Sampling temperature for text generation. Defaults to 0.7.

max_tokens int

Maximum number of tokens to generate. Defaults to 8000.

cache bool

Whether to cache LLM requests. Defaults to True.

track_usage bool

Whether to track LLM usage. Defaults to True.

Source code in deepfix-server/src/deepfix_server/config.py
class LLMConfig(BaseModel):
    """Configuration for LLM provider settings.

    Attributes:
        api_key: Optional API key for the LLM provider.
        base_url: Optional base URL for the LLM API endpoint.
        model_name: Name of the LLM model to use.
        temperature: Sampling temperature for text generation. Defaults to 0.7.
        max_tokens: Maximum number of tokens to generate. Defaults to 8000.
        cache: Whether to cache LLM requests. Defaults to True.
        track_usage: Whether to track LLM usage. Defaults to True.
    """

    api_key: Optional[str] = Field(
        default=None, description="API key for the LLM provider"
    )
    base_url: Optional[str] = Field(
        default=None, description="Base URL for the LLM API"
    )
    model_name: str = Field(default=None, description="Model name to use for the LLM")
    temperature: float = Field(
        default=0.7, description="Sampling temperature for text generation"
    )
    max_tokens: int = Field(
        default=8000, description="Maximum tokens to generate in the response"
    )
    cache: bool = Field(default=True, description="Cache request")
    track_usage: bool = Field(default=True, description="Track usage")

    @classmethod
    def load_from_env(cls, env_file: Optional[str] = None) -> "LLMConfig":
        """Load LLM configuration from environment variables.

        Reads the following environment variables:
        - DEEPFIX_LLM_API_KEY
        - DEEPFIX_LLM_BASE_URL
        - DEEPFIX_LLM_MODEL_NAME
        - DEEPFIX_LLM_TEMPERATURE
        - DEEPFIX_LLM_MAX_TOKENS
        - DEEPFIX_LLM_CACHE
        - DEEPFIX_LLM_TRACK_USAGE

        Args:
            env_file: Optional path to .env file to load.

        Returns:
            LLMConfig instance populated from environment variables.
        """
        if env_file is not None:
            load_dotenv(env_file)
        api_key = os.getenv("DEEPFIX_LLM_API_KEY")
        base_url = os.getenv("DEEPFIX_LLM_BASE_URL")
        model_name = os.getenv("DEEPFIX_LLM_MODEL_NAME")
        temperature = float(os.getenv("DEEPFIX_LLM_TEMPERATURE"))
        max_tokens = int(os.getenv("DEEPFIX_LLM_MAX_TOKENS"))
        cache = bool(os.getenv("DEEPFIX_LLM_CACHE"))
        track_usage = bool(os.getenv("DEEPFIX_LLM_TRACK_USAGE"))
        return cls(
            api_key=api_key,
            base_url=base_url,
            model_name=model_name,
            temperature=temperature,
            max_tokens=max_tokens,
            cache=cache,
            track_usage=track_usage,
        )

load_from_env(env_file=None) classmethod

Load LLM configuration from environment variables.

Reads the following environment variables: - DEEPFIX_LLM_API_KEY - DEEPFIX_LLM_BASE_URL - DEEPFIX_LLM_MODEL_NAME - DEEPFIX_LLM_TEMPERATURE - DEEPFIX_LLM_MAX_TOKENS - DEEPFIX_LLM_CACHE - DEEPFIX_LLM_TRACK_USAGE

Parameters:

Name Type Description Default
env_file Optional[str]

Optional path to .env file to load.

None

Returns:

Type Description
LLMConfig

LLMConfig instance populated from environment variables.

Source code in deepfix-server/src/deepfix_server/config.py
@classmethod
def load_from_env(cls, env_file: Optional[str] = None) -> "LLMConfig":
    """Load LLM configuration from environment variables.

    Reads the following environment variables:
    - DEEPFIX_LLM_API_KEY
    - DEEPFIX_LLM_BASE_URL
    - DEEPFIX_LLM_MODEL_NAME
    - DEEPFIX_LLM_TEMPERATURE
    - DEEPFIX_LLM_MAX_TOKENS
    - DEEPFIX_LLM_CACHE
    - DEEPFIX_LLM_TRACK_USAGE

    Args:
        env_file: Optional path to .env file to load.

    Returns:
        LLMConfig instance populated from environment variables.
    """
    if env_file is not None:
        load_dotenv(env_file)
    api_key = os.getenv("DEEPFIX_LLM_API_KEY")
    base_url = os.getenv("DEEPFIX_LLM_BASE_URL")
    model_name = os.getenv("DEEPFIX_LLM_MODEL_NAME")
    temperature = float(os.getenv("DEEPFIX_LLM_TEMPERATURE"))
    max_tokens = int(os.getenv("DEEPFIX_LLM_MAX_TOKENS"))
    cache = bool(os.getenv("DEEPFIX_LLM_CACHE"))
    track_usage = bool(os.getenv("DEEPFIX_LLM_TRACK_USAGE"))
    return cls(
        api_key=api_key,
        base_url=base_url,
        model_name=model_name,
        temperature=temperature,
        max_tokens=max_tokens,
        cache=cache,
        track_usage=track_usage,
    )

Agents

Agent classes are internal to the server. See Architecture for details.

REST API

POST /v1/analyse

Analyze ML artifacts and return diagnostic results.

Request Body:

{
  "dataset_name": "my-dataset",
  "dataset_artifacts": {...},
  "deepchecks_artifacts": {...},
  "model_checkpoint_artifacts": {...},
  "training_artifacts": {...},
  "language": "english"
}

Response:

{
  "agent_results": {...},
  "summary": "Cross-artifact summary...",
  "additional_outputs": {...},
  "error_messages": {}
}

Examples

See the Quickstart Guide for usage examples.