Skip to content

2.4. Backend

The package is organized around a backend system. The standard installation uses the NumPy implementation, while accelerated backends are optional and selected through specular.change_backend(...).

2.4.1. Backend selection

import specular

specular.backend_info()
specular.change_backend("cpu_jax")

Heavy machine-learning frameworks such as JAX, TensorFlow, and PyTorch are not imported when import specular is executed. They are checked only when the user explicitly selects the corresponding backend.

2.4.2. Backend support

Backend Calculation ODE Optimization
NumPy supported supported supported
Numba supported supported (recommended) not supported
JAX supported supported supported (recommended)
TensorFlow supported supported not supported
PyTorch supported supported not supported

2.4.3. JAX backend

See the official homepage of JAX.

The JAX backend is currently experimental and undergoing verification. JAX calculation and optimization are available through the normal public API after selecting the JAX backend.

Requirement: objective functions should use jax.numpy instead of standard numpy.

import jax.numpy as jnp
import specular

specular.change_backend("cpu_jax")

ReLU = lambda x: jnp.maximum(x, 0)
specular.derivative(ReLU, 0.0)

To enable 64-bit precision, update the JAX configuration before defining the functions that will be evaluated:

import jax
jax.config.update("jax_enable_x64", True)

import jax.numpy as jnp
import specular

specular.change_backend("cpu_jax")

ReLU = lambda x: jnp.maximum(x, 0)
specular.derivative(ReLU, 0.0)

For a detailed comparison of the algorithms, see:

2.4.4. API Reference

specular.backend

backend_info()

Print the supported, available, and current backends.

Example::

>>> import specular
>>> specular.backend_info()
supported backends: cpu_numpy, cpu_numba, cpu_tensorflow, gpu_tensorflow, cpu_pytorch, gpu_pytorch, cpu_jax, gpu_jax
available backends: cpu_numpy, cpu_numba
current backend   : cpu_numpy
Source code in specular\backend.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
def backend_info():
    """Print the supported, available, and current backends.

    Example::

        >>> import specular
        >>> specular.backend_info()
        supported backends: cpu_numpy, cpu_numba, cpu_tensorflow, gpu_tensorflow, cpu_pytorch, gpu_pytorch, cpu_jax, gpu_jax
        available backends: cpu_numpy, cpu_numba
        current backend   : cpu_numpy
    """
    print(f"supported backends: {', '.join(_BACKEND_ORDER)}")
    print(f"available backends: {', '.join(b for b in _BACKEND_ORDER if b in _AVAILABLE_BACKENDS)}")
    print(f"current backend   : {_CURRENT_BACKEND}")

change_backend(new_backend)

Change the active backend for the current session.

Parameters

new_backend : str Name of the backend to switch to. Must be one of the available backends on this machine (see backend_info()).

Raises

ValueError If new_backend is not in _AVAILABLE_BACKENDS.

Example::

>>> import specular
>>> specular.change_backend("cpu_pytorch")
Source code in specular\backend.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
def change_backend(new_backend):
    """Change the active backend for the current session.

    Parameters
    ----------
    new_backend : str
        Name of the backend to switch to. Must be one of the available
        backends on this machine (see ``backend_info()``).

    Raises
    ------
    ValueError
        If ``new_backend`` is not in ``_AVAILABLE_BACKENDS``.

    Example::

        >>> import specular
        >>> specular.change_backend("cpu_pytorch")
    """
    global _CURRENT_BACKEND

    if new_backend not in _SUPPORTED_BACKENDS:
        raise ValueError(
            f"{new_backend!r} is not a valid backend. "
            f"Choose from: {', '.join(_BACKEND_ORDER)}"
        )

    if _ensure_backend_available(new_backend):
        _CURRENT_BACKEND = new_backend
        return

    hint = _INSTALL_HINTS.get(new_backend, "")
    raise ValueError(
        f"{new_backend!r} is supported but not available on this machine. "
        + (f"Run: {hint}" if hint else "")
    )

specular.calculation

This module provides implementations of specular directional derivatives, specular partial derivatives, specular derivatives, specular gradients, and specular Jacobians.

A(alpha, beta, zero_tol=1e-08, quasi_Fermat=False, monotonicity=False)

Compute the specular function A(alpha, beta).

Examples:

>>> import specular
>>> specular.A(1.0, 2.0)
1.3874258867227933
Source code in specular\calculation.py
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def A(
    alpha: "float | np.number | int | np.ndarray",
    beta: "float | np.number | int | np.ndarray",
    zero_tol: float = 1e-8,
    quasi_Fermat: bool = False,
    monotonicity: bool = False,
) -> "float | np.ndarray | list[float] | list[np.ndarray]":
    """Compute the specular function A(alpha, beta).

    Examples:
        >>> import specular
        >>> specular.A(1.0, 2.0)
        1.3874258867227933
    """
    return _get_backend_module().A(
        alpha, beta, zero_tol, quasi_Fermat, monotonicity
    )

derivative(f, x, h=1e-06, zero_tol=1e-08, quasi_Fermat=False, monotonicity=False)

Approximate the specular derivative of f at scalar x.

Examples:

>>> import specular
>>> f = lambda x: abs(x)
>>> specular.derivative(f, x=0.0)
0.0
Source code in specular\calculation.py
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def derivative(
    f: "Callable[[int | float | np.number], int | float | np.number | list | np.ndarray]",
    x: ArrayLike,
    h: float = 1e-6,
    zero_tol: float = 1e-8,
    quasi_Fermat: bool = False,
    monotonicity: bool = False
) -> "float | np.ndarray | list[float] | list[np.ndarray]":
    """Approximate the specular derivative of f at scalar x.

    Examples:
        >>> import specular
        >>> f = lambda x: abs(x)
        >>> specular.derivative(f, x=0.0)
        0.0
    """
    if h <= 0:
        raise ValueError(f"Mesh size 'h' must be positive. Got {h}")

    return _get_backend_module().derivative(
        f, x, h, zero_tol, quasi_Fermat, monotonicity
    )

directional_derivative(f, x, v, h=1e-06, zero_tol=1e-08)

Approximate the specular directional derivative of f at x in direction v.

Source code in specular\calculation.py
79
80
81
82
83
84
85
86
87
88
89
90
def directional_derivative(
    f: "Callable[[list | np.ndarray], int | float | np.number]",
    x: ArrayLike,
    v: ArrayLike,
    h: float = 1e-6,
    zero_tol: float = 1e-8
) -> float:
    """Approximate the specular directional derivative of f at x in direction v."""
    if h <= 0:
        raise ValueError(f"Mesh size 'h' must be positive. Got {h}")

    return _get_backend_module().directional_derivative(f, x, v, h, zero_tol)

gradient(f, x, h=1e-06, zero_tol=1e-08, quasi_Fermat=False, monotonicity=False)

Approximate the specular gradient of f at x.

Examples:

>>> import specular
>>> import numpy as np
>>> f = lambda x: np.linalg.norm(x)
>>> specular.gradient(f, x=[1.4, -3.47, 4.57, 9.9])
array([ 0.12144298, -0.3010051 ,  0.39642458,  0.85877534])
Source code in specular\calculation.py
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
def gradient(
    f: "Callable[[list | np.ndarray], int | float | np.number]",
    x: ArrayLike,
    h: float = 1e-6,
    zero_tol: float = 1e-8,
    quasi_Fermat: bool = False,
    monotonicity: bool = False
) -> "np.ndarray | List[np.ndarray]":
    """Approximate the specular gradient of f at x.

    Examples:
        >>> import specular
        >>> import numpy as np
        >>> f = lambda x: np.linalg.norm(x)
        >>> specular.gradient(f, x=[1.4, -3.47, 4.57, 9.9])
        array([ 0.12144298, -0.3010051 ,  0.39642458,  0.85877534])
    """
    if h <= 0:
        raise ValueError(f"Mesh size 'h' must be positive. Got {h}")

    return _get_backend_module().gradient(
        f, x, h, zero_tol, quasi_Fermat, monotonicity
    )

jacobian(f, x, h=1e-06, zero_tol=1e-08, quasi_Fermat=False, monotonicity=False)

Approximate the specular Jacobian of f at x, shape (m, n).

Source code in specular\calculation.py
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
def jacobian(
    f: "Callable[[list | np.ndarray], int | float | np.number | list | np.ndarray]",
    x: ArrayLike,
    h: float = 1e-6,
    zero_tol: float = 1e-8,
    quasi_Fermat: bool = False,
    monotonicity: bool = False
) -> "np.ndarray | List[np.ndarray]":
    """Approximate the specular Jacobian of f at x, shape (m, n)."""
    if h <= 0:
        raise ValueError(f"Mesh size 'h' must be positive. Got {h}")

    return _get_backend_module().jacobian(
        f, x, h, zero_tol, quasi_Fermat, monotonicity
    )

partial_derivative(f, x, i, h=1e-06, zero_tol=1e-08)

Approximate the i-th specular partial derivative of f at x (1-indexed).

Source code in specular\calculation.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
def partial_derivative(
    f: "Callable[[list | np.ndarray], int | float | np.number]",
    x: ArrayLike,
    i: "int | np.integer",
    h: float = 1e-6,
    zero_tol: float = 1e-8
) -> float:
    """Approximate the i-th specular partial derivative of f at x (1-indexed)."""
    if h <= 0:
        raise ValueError(f"Mesh size 'h' must be positive. Got {h}")

    return _get_backend_module().partial_derivative(f, x, i, h, zero_tol)

specular.optimization.solver

gradient_method(f, x_0, step_size, h=1e-06, form='specular gradient', tol=1e-06, zero_tol=1e-08, max_iter=1000, f_j=None, m=1, switch_iter=2, record_history=True, print_bar=True)

Minimize a nonsmooth convex function using the current backend.

Source code in specular\optimization\solver.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def gradient_method(
    f: Callable[
        [int | float | np.number | list | np.ndarray],
        int | float | np.number,
    ],
    x_0: Any,
    step_size: StepSize,
    h: float = 1e-6,
    form: str = "specular gradient",
    tol: float = 1e-6,
    zero_tol: float = 1e-8,
    max_iter: int = 1000,
    f_j: Sequence[ComponentFunc] | Callable | None = None,
    m: int = 1,
    switch_iter: int | None = 2,
    record_history: bool = True,
    print_bar: bool = True,
) -> OptimizationResult:
    """
    Minimize a nonsmooth convex function using the current backend.
    """
    impl = cast(Any, _get_solver_module().gradient_method)

    return impl(
        f=f,
        x_0=x_0,
        step_size=step_size,
        h=h,
        form=form,
        tol=tol,
        zero_tol=zero_tol,
        max_iter=max_iter,
        f_j=f_j,
        m=m,
        switch_iter=switch_iter,
        record_history=record_history,
        print_bar=print_bar,
    )