Skip to content
Merged

NFQ #897

Changes from 2 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
5e6f09d
NFQ before refactor
CasBex Jun 2, 2023
c423048
NFQ after refactor
CasBex Jun 2, 2023
c88559e
Move to dqns
CasBex Jun 7, 2023
1560ad1
Refactor
CasBex Jun 8, 2023
02ab01b
Add NFQ to RLZoo
CasBex Jun 7, 2023
093f1f4
Set up experiment
CasBex Jun 7, 2023
7402cb2
Merge remote-tracking branch 'origin/main' into NFQ
CasBex Jun 12, 2023
c1e49da
Update algorithm for refactor
CasBex Jun 13, 2023
0edd287
rng and loss type
HenriDeh Jun 15, 2023
8461c15
remove duplicate
HenriDeh Jun 15, 2023
b89f67e
dispatch on trajectory
HenriDeh Jun 15, 2023
19e0a97
optimise is dummy by default
HenriDeh Jun 15, 2023
98444e4
optimise! is dispatched on traj and loops it
HenriDeh Jun 15, 2023
6be2450
Fix precompilation warnings
CasBex Jun 16, 2023
2ed5ffb
Avoid running post episode optimise! multiple times
CasBex Jun 16, 2023
da384b2
Tune experiment
CasBex Jun 16, 2023
b30a08e
Merge branch 'main' into NFQ
CasBex Jun 16, 2023
5ab7a1c
Remove commented code
CasBex Jun 19, 2023
afc21b6
Drop gpu call
CasBex Jun 19, 2023
033dcdf
Use `sample` to get batch from trajectory
CasBex Jun 19, 2023
31b55b5
optimise! for AbstractLearner
CasBex Jun 19, 2023
8605dbf
Merge remote-tracking branch 'origin/main' into NFQ
CasBex Jun 19, 2023
b53c96b
NFQ optimise! calls at the correct time
CasBex Jun 19, 2023
e949807
Merge branch 'main' into NFQ
HenriDeh Jun 23, 2023
f77e198
Remove superfluous function due to main merge
CasBex Jun 23, 2023
66ea89b
Anonymous loop variable
CasBex Jun 23, 2023
37be2a6
Update NFQ docs
CasBex Jun 23, 2023
c43f37a
Update julia_words.txt
HenriDeh Jun 26, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 78 additions & 0 deletions src/ReinforcementLearningZoo/src/algorithms/offline_rl/NFQ.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
"""
NFQ{A<:AbstractApproximator, F, R} <: AbstractLearner
NFQ(approximator::A, num_iterations::Integer epochs::Integer, loss_function::F, batch_size::Integer, rng::R, γ::Float32) where {A<:AbstractApproximator, F, R}
Neural Fitted Q-iteration as implemented in [1]

# Keyword arguments
- `approximator::AbstractApproximator` neural network
- `num_iterations::Integer` number of value iteration iterations in FQI loop (i.e. the outer loop)
- `epochs` number of epochs to train neural network per iteration
- `loss_function::F` loss function of the NN
- `sampler::BatchSampler{SARTS}` data sampler
- `rng::R` random number generator
- `γ::Float32` discount rate

# References
[1] Riedmiller, M. (2005). Neural Fitted Q Iteration – First Experiences with a Data Efficient Neural Reinforcement Learning Method. In: Gama, J., Camacho, R., Brazdil, P.B., Jorge, A.M., Torgo, L. (eds) Machine Learning: ECML 2005. ECML 2005. Lecture Notes in Computer Science(), vol 3720. Springer, Berlin, Heidelberg. https://doi.org/10.1007/11564096_32
"""
Base.@kwdef struct NFQ{A<:NeuralNetworkApproximator, F, R} <: AbstractLearner
approximator::A
num_iterations::Integer = 20
epochs::Integer = 100
loss_function::F = mse
rng::R = Random.GLOBAL_RNG
γ::Float32 = 0.9f0
end

function NFQ(;
approximator::A,
num_iterations::Integer = 20,
epochs::Integer = 1000,
loss_function::F = mse,
rng=Random.GLOBAL_RNG,
γ::Float32 = 0.9f0,
) where {A<:NeuralNetworkApproximator, F}
NFQ(approximator, num_iterations, epochs, loss_function, rng, γ)
end

# Copied from BasicDQN but sure whether it's appropriate
Flux.functor(x::NFQ) = (Q = x.approximator,), y -> begin
x = @set x.approximator = y.Q
x
end

function RLBase.plan!(learner::NFQ, env::AbstractEnv)
as = action_space(env)
return vcat(repeat(state(env), inner=(1, length(as))), transpose(as)) |> x -> send_to_device(device(learner.approximator), x) |> learner.approximator |> send_to_host |> vec
end

# Avoid optimisation in the middle of an episode
function RLBase.optimise!(::NFQ, ::NamedTuple) end

# Instead do optimisation at the end of an episode
function Base.push!(agent::Agent{<:QBasedPolicy{<:NFQ}}, ::PostEpisodeStage, env::AbstractEnv)
for batch in agent.trajectory
_optimise!(agent.policy.learner, batch, env)
end
end

function _optimise!(learner::NFQ, batch::NamedTuple, env::AbstractEnv)
Q = learner.approximator
γ = learner.γ
loss_func = learner.loss_function

as = action_space(env)
las = length(as)


(s, a, r, ss) = batch[[:state, :action, :reward, :next_state]]
a = Float32.(a)
s, a, r, ss = map(x->send_to_device(device(Q), x), (s, a, r, ss))
for i = 1:learner.num_iterations
# Make an input x samples x |action space| array -- Q --> samples x |action space| -- max --> samples
G = r .+ γ .* (cat(repeat(ss, inner=(1, 1, las)), reshape(repeat(as, outer=(1, size(ss, 2))), (1, size(ss, 2), las)), dims=1) |> Q |> x -> maximum(x, dims=3) |> vec)
for e = 1:learner.epochs
Flux.train!((x, y) -> loss_func(Q(x), y), params(Q.model), [(vcat(s, transpose(a)), transpose(G))], Q.optimizer)
end
end
end