Skip to content
Open
Show file tree
Hide file tree
Changes from 9 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
08c26a7
tweak train_QAOA to also allow for non-shot based calculations
jf-kong Feb 24, 2026
6282279
added adapter to use qiboml in QAOA optimisation
jf-kong Feb 24, 2026
590ca86
modified QUBO to allow for qiboml optimisation
jf-kong Feb 24, 2026
810bb3c
added maxcut notebook example for optimisation with qiboml
jf-kong Feb 24, 2026
4f718d5
updated qiboml dependencies
jf-kong Feb 24, 2026
66d12c1
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 24, 2026
e71b9de
issue warnings for unsupported optimisers
jf-kong Mar 13, 2026
21bc184
tweak train_QAOA to also allow for non-shot based calculations
jf-kong Feb 24, 2026
02a8d43
added adapter to use qiboml in QAOA optimisation
jf-kong Feb 24, 2026
640d30b
modified QUBO to allow for qiboml optimisation
jf-kong Feb 24, 2026
a386efd
added maxcut notebook example for optimisation with qiboml
jf-kong Feb 24, 2026
aedc6e9
updated qiboml dependencies
jf-kong Feb 24, 2026
92f6a21
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 24, 2026
8cfaba5
issue warnings for unsupported optimisers
jf-kong Mar 13, 2026
fa626bb
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 13, 2026
4be04bd
Update poetry.lock
shangtai Mar 14, 2026
78cfb0f
fixed merging issues
jf-kong Mar 15, 2026
63087fc
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 15, 2026
f0c3180
fixing some lint errors
jf-kong Mar 15, 2026
2db6391
Merge branch 'adopt-qiboml' of https://github.com/qiboteam/qiboopt in…
jf-kong Mar 15, 2026
2e25fbb
fixed import issues
jf-kong Mar 15, 2026
0ac3291
added tests for coverage
jf-kong Mar 15, 2026
168ebcd
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 15, 2026
3a63585
fixed test errors
jf-kong Mar 15, 2026
3126a3f
Merge branch 'adopt-qiboml' of https://github.com/qiboteam/qiboopt in…
jf-kong Mar 15, 2026
c3366f9
chore: rerun CI
jf-kong Mar 15, 2026
bd715ea
install qiboml in ubuntu tests
jf-kong Mar 15, 2026
3007ed5
fix pylint errors with detach
jf-kong Mar 15, 2026
03181dc
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Mar 15, 2026
42efee7
trying to fix the lint errors related to detach
jf-kong Mar 16, 2026
2fba83d
Merge branch 'adopt-qiboml' of https://github.com/qiboteam/qiboopt in…
jf-kong Mar 16, 2026
ebf0682
trying to fix linting errors for detach
jf-kong Mar 16, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions doc/source/getting-started/quickstart.rst
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,31 @@ The Conditional Variance at Risk (CVaR) can also be used as an alternative loss
gammas = [0.1, 0.2]
betas = [0.3, 0.4]
output = qp.train_QAOA(gammas=gammas, betas=betas, regular_loss=False, cvar_delta=0.1)

To use qiboml's pytorch training loop instead of the legacy optimizer, set ``engine="qiboml"``:

.. code-block:: python

from qiboopt.opt_class.opt_class import QUBO
gammas = [0.1, 0.2]
betas = [0.3, 0.4]
output = qp.train_QAOA(
gammas=gammas,
betas=betas,
engine="qiboml",
optimizer="adam",
lr=0.05,
epochs=100,
)

You can also run in exact (no-shot) mode by setting ``nshots=None`` (or ``nshots=0``):

.. code-block:: python

from qiboopt.opt_class.opt_class import QUBO
gammas = [0.1, 0.2]
betas = [0.3, 0.4]
output = qp.train_QAOA(gammas=gammas, betas=betas, nshots=None)

In sampled mode (``nshots > 0``), the returned dictionary contains bitstring counts.
In exact mode (``nshots is None`` or ``nshots == 0``), it contains exact bitstring probabilities.
695 changes: 657 additions & 38 deletions poetry.lock

Large diffs are not rendered by default.

14 changes: 13 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,12 @@ packages = [{include="qiboopt", from="src"}]

[tool.poetry.dependencies]
python = ">=3.10,<3.14"
qibo = "^0.2"
qibo = ">=0.2,<0.3"
qiboml = {version = ">=0.1", optional = true}
torch = {version = "^2.7.0", optional = true}

[tool.poetry.extras]
qiboml = ["qiboml", "torch"]

[build-system]
requires = ["poetry-core"]
Expand Down Expand Up @@ -38,6 +43,13 @@ pytest-env = ">=0.8.1"
pylint = "^3.3.5"
matplotlib = "^3.9.2"

[tool.poetry.group.qiboml]
optional = true

[tool.poetry.group.qiboml.dependencies]
qiboml = ">=0.1"
torch = "^2.7.0"

[tool.pytest.ini_options]
testpaths = ['tests/']
filterwarnings = ['ignore::RuntimeWarning']
Expand Down
9 changes: 6 additions & 3 deletions src/qiboopt/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
import importlib.metadata as im
try:
import importlib.metadata as im

__version__ = im.version(__package__)
__version__ = im.version(__package__)
except Exception:
__version__ = "0.0.1"

from qiboopt import combinatorial, opt_class
from qiboopt import combinatorial, continuous_bandits, integrations, opt_class
138 changes: 138 additions & 0 deletions src/qiboopt/integrations/qiboml_adapter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
"""qiboml integration helpers for QAOA training."""

from __future__ import annotations

from typing import Any

import numpy as np


def _energy_shift(qubo) -> float:
"""Constant shift between Ising expectation and QUBO objective value."""
_h, _J, constant = qubo.qubo_to_ising()
return float(constant)


def _get_differentiation_class(name: str | None):
if name is None or name == "torch":
return None
from qiboml.operations.differentiation import PSR, Adjoint, Jax

mapping = {
"psr": PSR,
"jax": Jax,
"adjoint": Adjoint,
"torch": None,
}
diff = mapping.get(name.lower())
if diff is None:
raise ValueError(
"Unknown qiboml differentiation method. "
"Supported values are: None, 'psr', 'jax', 'adjoint', 'torch'."
)
return diff


def optimize_qaoa_with_qiboml(
*,
qubo,
parameters,
p: int,
nshots: int | None,
noise_model,
custom_mixer,
has_alphas: bool,
optimizer: str,
lr: float,
epochs: int,
differentiation: str | None,
backend,
) -> tuple[float, np.ndarray, dict[str, Any]]:
"""Optimize QAOA parameters using qiboml's pytorch interface."""
try:
import torch
except ImportError as exc:
raise ImportError(
"engine='qiboml' requires torch. Install optional dependencies, "
"for example with `poetry install --with qiboml`."
) from exc

try:
from qiboml.interfaces.pytorch import QuantumModel
from qiboml.models.decoding import Expectation
except ImportError as exc:
raise ImportError(
"engine='qiboml' requires qiboml. Install optional dependencies, "
"for example with `poetry install --with qiboml`."
) from exc

# Reuse qiboopt's own QAOA-object construction path for the Hamiltonian.
hamiltonian = qubo.qubo_to_qaoa_object().hamiltonian
if not hasattr(hamiltonian, "expectation_from_circuit"):
# TODO: remove once minimum required qiboml version is > 0.1.0.
# qiboml 0.1.0 uses `expectation_from_circuit`; older builds expose only `expectation`.
hamiltonian.expectation_from_circuit = hamiltonian.expectation
circuit_builder = qubo.make_qaoa_circuit_callable(
p=p,
custom_mixer=custom_mixer,
has_alphas=has_alphas,
include_measurements=False,
)
decoder = Expectation(
nqubits=qubo.n,
observable=hamiltonian,
nshots=nshots,
noise_model=noise_model,
backend=backend,
)
energy_shift = _energy_shift(qubo)

diff_class = _get_differentiation_class(differentiation)
model = QuantumModel(
circuit_structure=[circuit_builder],
decoding=decoder,
parameters_initialization=np.asarray(parameters, dtype=np.float64),
differentiation=diff_class,
)
model = model.to(dtype=torch.float64)

if isinstance(optimizer, str):
_OPT_MAP = {
"adam": torch.optim.Adam,
"sgd": torch.optim.SGD,
}
opt_cls = _OPT_MAP.get(optimizer.lower())
if opt_cls is None:
raise ValueError(
f"Unknown optimizer string '{optimizer}'. "
f"Pass a torch.optim.Optimizer subclass directly, or use one of: {list(_OPT_MAP)}."
)
else:
opt_cls = optimizer # user supplied a class directly

torch_optimizer = opt_cls(model.parameters(), lr=lr)

losses = []
best = float("inf")
best_params = np.asarray(parameters, dtype=np.float64)
for _ in range(epochs):
torch_optimizer.zero_grad()
loss = model()
if loss.ndim > 0:
loss = loss.reshape(-1)[0]
loss.backward()
torch_optimizer.step()
loss_value = float(loss.detach().cpu().item()) + energy_shift
losses.append(loss_value)
if loss_value < best:
best = loss_value
best_params = model.circuit_parameters.detach().cpu().numpy().copy()

extra = {
"engine": "qiboml",
"optimizer": getattr(opt_cls, "__name__", str(opt_cls)),
"learning_rate": lr,
"epochs": epochs,
"loss_history": losses,
}
return best, best_params, extra
Loading
Loading