Main Interface
get_active_experiment()
Get the currently active experiment
Source code in tracelet/interface.py
| def get_active_experiment() -> Optional[Experiment]:
"""Get the currently active experiment"""
return _active_experiment
|
start_logging(exp_name=None, project=None, backend=None, api_key=None, backend_url=None, config=None)
Start logging metrics and metadata for your ML experiment.
Args:
exp_name: Name of the experiment. If not provided, uses TRACELET_EXPERIMENT_NAME env var
project: Project name. If not provided, uses TRACELET_PROJECT env var
backend: Backend to use ("mlflow", "wandb", "aim"). If not provided, uses TRACELET_BACKEND env var
api_key: API key for the backend. If not provided, uses TRACELET_API_KEY env var
backend_url: Backend URL. If not provided, uses TRACELET_BACKEND_URL env var
config: Additional configuration to override defaults and env vars
Returns:
Experiment: The active experiment instance
Example:
import tracelet
# Start logging with minimal config
tracelet.start_logging("my_experiment")
# Or with more configuration
tracelet.start_logging(
exp_name="my_experiment",
project="my_project",
backend="wandb",
api_key="...",
)
Source code in tracelet/interface.py
| def start_logging(
exp_name: Optional[str] = None,
project: Optional[str] = None,
backend: Optional[str] = None,
api_key: Optional[str] = None,
backend_url: Optional[str] = None,
config: Optional[dict[str, Any]] = None,
) -> Experiment:
"""Start logging metrics and metadata for your ML experiment.
Args:
exp_name: Name of the experiment. If not provided, uses TRACELET_EXPERIMENT_NAME env var
project: Project name. If not provided, uses TRACELET_PROJECT env var
backend: Backend to use ("mlflow", "wandb", "aim"). If not provided, uses TRACELET_BACKEND env var
api_key: API key for the backend. If not provided, uses TRACELET_API_KEY env var
backend_url: Backend URL. If not provided, uses TRACELET_BACKEND_URL env var
config: Additional configuration to override defaults and env vars
Returns:
Experiment: The active experiment instance
Example:
```python
import tracelet
# Start logging with minimal config
tracelet.start_logging("my_experiment")
# Or with more configuration
tracelet.start_logging(
exp_name="my_experiment",
project="my_project",
backend="wandb",
api_key="...",
)
```
"""
global _active_experiment, _settings
# Stop any existing experiment first
if _active_experiment:
_active_experiment.stop()
_active_experiment = None
# Initialize settings - let TraceletSettings handle env vars
settings_dict = {}
if exp_name is not None:
settings_dict["experiment_name"] = exp_name
if project is not None:
settings_dict["project_name"] = project
if backend is not None:
settings_dict["backend"] = backend
if api_key is not None:
settings_dict["api_key"] = api_key
if backend_url is not None:
settings_dict["backend_url"] = backend_url
if config:
settings_dict.update(config)
_settings = TraceletSettings(**settings_dict)
# Create experiment config
exp_config = ExperimentConfig(
track_metrics=True,
track_environment=_settings.track_env,
track_args=True,
track_stdout=True,
track_checkpoints=True,
track_system_metrics=_settings.track_system_metrics,
track_git=_settings.track_git,
)
# Create experiment
_active_experiment = Experiment(
name=_settings.experiment_name or "default_experiment",
config=exp_config,
backend=_settings.backend,
tags=[f"project:{_settings.project_name}"],
)
# Initialize frameworks based on settings
if _settings.track_tensorboard:
pytorch = PyTorchFramework(patch_tensorboard=True)
_active_experiment._framework = pytorch
pytorch.initialize(_active_experiment)
if _settings.track_lightning:
lightning = LightningFramework()
lightning.initialize(_active_experiment)
# Start tracking
_active_experiment.start()
return _active_experiment
|
stop_logging()
Stop the active experiment and cleanup
Source code in tracelet/interface.py
| def stop_logging():
"""Stop the active experiment and cleanup"""
global _active_experiment
if _active_experiment:
_active_experiment.stop()
_active_experiment = None
|
options:
show_source: true
show_bases: true
heading_level: 2
Main Public Functions
The main interface provides three primary functions for experiment tracking:
start_logging
Start logging metrics and metadata for your ML experiment.
Args:
exp_name: Name of the experiment. If not provided, uses TRACELET_EXPERIMENT_NAME env var
project: Project name. If not provided, uses TRACELET_PROJECT env var
backend: Backend to use ("mlflow", "wandb", "aim"). If not provided, uses TRACELET_BACKEND env var
api_key: API key for the backend. If not provided, uses TRACELET_API_KEY env var
backend_url: Backend URL. If not provided, uses TRACELET_BACKEND_URL env var
config: Additional configuration to override defaults and env vars
Returns:
Experiment: The active experiment instance
Example:
import tracelet
# Start logging with minimal config
tracelet.start_logging("my_experiment")
# Or with more configuration
tracelet.start_logging(
exp_name="my_experiment",
project="my_project",
backend="wandb",
api_key="...",
)
Source code in tracelet/interface.py
| def start_logging(
exp_name: Optional[str] = None,
project: Optional[str] = None,
backend: Optional[str] = None,
api_key: Optional[str] = None,
backend_url: Optional[str] = None,
config: Optional[dict[str, Any]] = None,
) -> Experiment:
"""Start logging metrics and metadata for your ML experiment.
Args:
exp_name: Name of the experiment. If not provided, uses TRACELET_EXPERIMENT_NAME env var
project: Project name. If not provided, uses TRACELET_PROJECT env var
backend: Backend to use ("mlflow", "wandb", "aim"). If not provided, uses TRACELET_BACKEND env var
api_key: API key for the backend. If not provided, uses TRACELET_API_KEY env var
backend_url: Backend URL. If not provided, uses TRACELET_BACKEND_URL env var
config: Additional configuration to override defaults and env vars
Returns:
Experiment: The active experiment instance
Example:
```python
import tracelet
# Start logging with minimal config
tracelet.start_logging("my_experiment")
# Or with more configuration
tracelet.start_logging(
exp_name="my_experiment",
project="my_project",
backend="wandb",
api_key="...",
)
```
"""
global _active_experiment, _settings
# Stop any existing experiment first
if _active_experiment:
_active_experiment.stop()
_active_experiment = None
# Initialize settings - let TraceletSettings handle env vars
settings_dict = {}
if exp_name is not None:
settings_dict["experiment_name"] = exp_name
if project is not None:
settings_dict["project_name"] = project
if backend is not None:
settings_dict["backend"] = backend
if api_key is not None:
settings_dict["api_key"] = api_key
if backend_url is not None:
settings_dict["backend_url"] = backend_url
if config:
settings_dict.update(config)
_settings = TraceletSettings(**settings_dict)
# Create experiment config
exp_config = ExperimentConfig(
track_metrics=True,
track_environment=_settings.track_env,
track_args=True,
track_stdout=True,
track_checkpoints=True,
track_system_metrics=_settings.track_system_metrics,
track_git=_settings.track_git,
)
# Create experiment
_active_experiment = Experiment(
name=_settings.experiment_name or "default_experiment",
config=exp_config,
backend=_settings.backend,
tags=[f"project:{_settings.project_name}"],
)
# Initialize frameworks based on settings
if _settings.track_tensorboard:
pytorch = PyTorchFramework(patch_tensorboard=True)
_active_experiment._framework = pytorch
pytorch.initialize(_active_experiment)
if _settings.track_lightning:
lightning = LightningFramework()
lightning.initialize(_active_experiment)
# Start tracking
_active_experiment.start()
return _active_experiment
|
options:
show_source: true
heading_level: 3
Example Usage:
import tracelet
# Basic usage with MLflow
experiment = tracelet.start_logging(
exp_name="image_classification",
project="computer_vision",
backend="mlflow"
)
# With custom configuration
experiment = tracelet.start_logging(
exp_name="hyperparameter_tuning",
project="optimization",
backend="wandb",
config={
"entity": "my_team",
"tags": ["pytorch", "resnet"]
}
)
# Multi-backend tracking
experiment = tracelet.start_logging(
exp_name="model_comparison",
project="research",
backend=["mlflow", "wandb", "clearml"]
)
get_active_experiment
Get the currently active experiment
Source code in tracelet/interface.py
| def get_active_experiment() -> Optional[Experiment]:
"""Get the currently active experiment"""
return _active_experiment
|
options:
show_source: true
heading_level: 3
Example Usage:
# Start tracking
tracelet.start_logging(exp_name="my_exp", project="my_project", backend="mlflow")
# Get the active experiment from anywhere in your code
experiment = tracelet.get_active_experiment()
if experiment:
experiment.log_metric("loss", 0.1, iteration=50)
experiment.log_params({"learning_rate": 0.001})
else:
print("No active experiment found")
stop_logging
Stop the active experiment and cleanup
Source code in tracelet/interface.py
| def stop_logging():
"""Stop the active experiment and cleanup"""
global _active_experiment
if _active_experiment:
_active_experiment.stop()
_active_experiment = None
|
options:
show_source: true
heading_level: 3
Example Usage:
# Stop the current experiment
tracelet.stop_logging()
# Verify no active experiment
assert tracelet.get_active_experiment() is None
Integration Patterns
Context Manager Pattern
import tracelet
# Using with automatic cleanup
with tracelet.start_logging(exp_name="context_exp", project="test", backend="mlflow") as exp:
exp.log_metric("accuracy", 0.95)
exp.log_params({"epochs": 10})
# Automatically calls stop_logging() when exiting context
Error Handling
import tracelet
try:
tracelet.start_logging(
exp_name="my_experiment",
project="my_project",
backend="invalid_backend"
)
except ValueError as e:
print(f"Backend error: {e}")
# Fall back to default backend
tracelet.start_logging(
exp_name="my_experiment",
project="my_project",
backend="mlflow" # Default fallback
)
Configuration-Based Setup
from tracelet.settings import TraceletSettings
import tracelet
# Load configuration
settings = TraceletSettings(
project="ml_pipeline",
backend=["mlflow", "wandb"],
track_system=True,
metrics_interval=10.0
)
# Use settings
experiment = tracelet.start_logging(
exp_name="pipeline_run_001",
settings=settings
)