Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion lib/axon/loop.ex
Original file line number Diff line number Diff line change
Expand Up @@ -1775,7 +1775,7 @@ defmodule Axon.Loop do
defp build_loss_scale_fns(invalid) do
raise ArgumentError,
"Invalid loss scale #{inspect(invalid)}, a valid" <>
" loss scale is an atom amtching the name of a loss" <>
" loss scale is an atom matching the name of a loss" <>
" scale implementation in Axon.LossScale or a 3-tuple" <>
" of {init_scale, scale_fn, unscale_fn}. See Axon.LossScale" <>
" for more information"
Expand Down
8 changes: 4 additions & 4 deletions notebooks/basics/xor.livemd
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

```elixir
Mix.install([
{:axon, github: "elixir-nx/axon"},
{:nx, "~> 0.3.0", override: true},
{:exla, "~> 0.3.0"},
{:kino_vega_lite, "~> 0.1.3"}
{:axon, "~> 0.3.0"},
{:nx, "~> 0.4.0", override: true},
{:exla, "~> 0.4.0"},
{:kino_vega_lite, "~> 0.1.6"}
])

Nx.Defn.default_options(compiler: EXLA)
Expand Down
8 changes: 4 additions & 4 deletions notebooks/generative/fashionmnist_autoencoder.livemd
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

```elixir
Mix.install([
{:axon, github: "elixir-nx/axon"},
{:nx, "~> 0.3.0", override: true},
{:exla, "~> 0.3.0"},
{:scidata, "~> 0.1.5"}
{:axon, "~> 0.3.0"},
{:nx, "~> 0.4.0", override: true},
{:exla, "~> 0.4.0"},
{:scidata, "~> 0.1.9"}
])

Nx.Defn.default_options(compiler: EXLA)
Expand Down
29 changes: 15 additions & 14 deletions notebooks/generative/fashionmnist_vae.livemd
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,16 @@

```elixir
Mix.install([
{:exla, "~> 0.3.0"},
{:nx, "~> 0.3.0"},
{:axon, "~> 0.2.0"},
{:exla, "~> 0.4.0"},
{:nx, "~> 0.4.0", override: true},
{:axon, "~> 0.3.0"},
{:req, "~> 0.3.1"},
{:kino, "~> 0.7.0"},
{:scidata, "~> 0.1.9"},
{:stb_image, "~> 0.5.2"},
{:kino_vega_lite, "~> 0.1.4"},
{:vega_lite, "~> 0.1.6"}
{:kino_vega_lite, "~> 0.1.6"},
{:vega_lite, "~> 0.1.6"},
{:table_rex, "~> 3.1.1"}
])

alias VegaLite, as: Vl
Expand All @@ -34,7 +35,7 @@ This section will proceed without much explanation as most of it is extracted fr
```elixir
defmodule Data do
@moduledoc """
A module to hold useful data processing utilities,
A module to hold useful data processing utilities,
mostly extracted from the previous notebook
"""

Expand Down Expand Up @@ -132,7 +133,7 @@ model =
|> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4")
|> CustomLayer.scaling_layer()
# Turn it back into a 28x28 single channel image
|> Axon.reshape({1, 28, 28})
|> Axon.reshape({:auto, 1, 28, 28})

# We can use Axon.Display to show us what each of the layers would look like
# assuming we send in a batch of 4 images
Expand Down Expand Up @@ -161,7 +162,7 @@ We also have `plot_losses/1` function to visualize our train and validation loss
```elixir
defmodule KinoAxon do
@doc """
Adds handler function which adds a frame with a "stop" button
Adds handler function which adds a frame with a "stop" button
to the cell with the training loop.

Clicking "stop" will halt the training loop.
Expand Down Expand Up @@ -212,7 +213,7 @@ defmodule KinoAxon do
handler = fn state ->
%Axon.Loop.State{metrics: metrics, epoch: epoch} = state
loss = metrics["loss"] |> Nx.to_number()
val_loss = metrics["validation_0"]["loss"] |> Nx.to_number()
val_loss = metrics["validation_loss"] |> Nx.to_number()

points = [
%{epoch: epoch, loss: loss, dataset: "train"},
Expand Down Expand Up @@ -290,7 +291,7 @@ decoder =
|> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4")
|> CustomLayer.scaling_layer()
# Turn it back into a 28x28 single channel image
|> Axon.reshape({1, 28, 28})
|> Axon.reshape({:auto, 1, 28, 28})

Axon.Display.as_table(encoder, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()
Axon.Display.as_table(decoder, Nx.template({4, 10}, :f32)) |> IO.puts()
Expand Down Expand Up @@ -336,7 +337,7 @@ num_steps = 100
# index 1 is where we'll end
latents = Axon.predict(encoder, params, test_images[[images: 0..1]])
# Latents is a {2, 10} tensor
# The step we'll add to our latent to move it towards image[1]
# The step we'll add to our latent to move it towards image[1]
step = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps)
# We can make a batch of all our new latents
new_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0])
Expand Down Expand Up @@ -397,7 +398,7 @@ defmodule Vae do
# Bottleneck layer
|> Axon.dense(@latent_features * 2, name: "bottleneck_layer")
# Split up the mu and logvar
|> Axon.reshape({2, @latent_features})
|> Axon.reshape({:auto, 2, @latent_features})
|> sampling_layer()
end

Expand All @@ -415,7 +416,7 @@ defmodule Vae do
|> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4")
|> CustomLayer.scaling_layer()
# Turn it back into a 28x28 single channel image
|> Axon.reshape({1, 28, 28})
|> Axon.reshape({:auto, 1, 28, 28})
end

def autoencoder() do
Expand Down Expand Up @@ -492,7 +493,7 @@ num_steps = 100
# index 1 is where we'll end
latents = Axon.predict(Vae.encoder(), params, test_images[[images: 0..1]])
# Latents is a {2, 10} tensor
# The step we'll add to our latent to move it towards image[1]
# The step we'll add to our latent to move it towards image[1]
step = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps)
# We can make a batch of all our new latents
new_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0])
Expand Down
11 changes: 6 additions & 5 deletions notebooks/generative/mnist_autoencoder_using_kino.livemd
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@

```elixir
Mix.install([
{:exla, "~> 0.3.0"},
{:nx, "~> 0.3.0"},
{:axon, "~> 0.2.0"},
{:exla, "~> 0.4.0"},
{:nx, "~> 0.4.0", override: true},
{:axon, "~> 0.3.0"},
{:req, "~> 0.3.1"},
{:kino, "~> 0.7.0"},
{:scidata, "~> 0.1.9"},
{:stb_image, "~> 0.5.2"}
{:stb_image, "~> 0.5.2"},
{:table_rex, "~> 3.1.1"}
])
```

Expand Down Expand Up @@ -103,7 +104,7 @@ model =
|> Axon.dense(256, activation: :relu)
|> Axon.dense(784, activation: :sigmoid)
# Turn it back into a 28x28 single channel image
|> Axon.reshape({1, 28, 28})
|> Axon.reshape({:auto, 1, 28, 28})

# We can use Axon.Display to show us what each of the layers would look like
# assuming we send in a batch of 4 images
Expand Down
6 changes: 3 additions & 3 deletions notebooks/structured/credit_card_fraud.livemd
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@

```elixir
Mix.install([
{:axon, github: "elixir-nx/axon"},
{:nx, "~> 0.3.0", override: true},
{:exla, "~> 0.3.0"},
{:axon, "~> 0.3.0"},
{:nx, "~> 0.4.0", override: true},
{:exla, "~> 0.4.0"},
{:explorer, "~> 0.3.1"},
{:kino, "~> 0.7.0"}
])
Expand Down
8 changes: 4 additions & 4 deletions notebooks/text/lstm_generation.livemd
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

```elixir
Mix.install([
{:axon, github: "elixir-nx/axon"},
{:nx, "~> 0.3.0", override: true},
{:exla, "~> 0.3.0"},
{:req, "~> 0.3.0"}
{:axon, "~> 0.3.0"},
{:nx, "~> 0.4.0", override: true},
{:exla, "~> 0.4.0"},
{:req, "~> 0.3.1"}
])

Nx.Defn.default_options(compiler: EXLA)
Expand Down
12 changes: 6 additions & 6 deletions notebooks/vision/horses_or_humans.livemd
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@

```elixir
Mix.install([
{:axon, github: "elixir-nx/axon"},
{:nx, github: "elixir-nx/nx", sparse: "nx", override: true},
{:exla, github: "elixir-nx/nx", sparse: "exla", override: true},
{:axon, "~> 0.3.0"},
{:nx, "~> 0.4.0", sparse: "nx", override: true},
{:exla, "~> 0.4.0", sparse: "exla", override: true},
{:stb_image, "~> 0.5.2"},
{:req, "~> 0.3.0"},
{:req, "~> 0.3.1"},
{:kino, "~> 0.7.0"}
])

Expand Down Expand Up @@ -203,7 +203,7 @@ optimizer = Axon.Optimizers.adam(1.0e-4)

params =
model
|> Axon.Loop.trainer(:categorical_cross_entropy, optimizer, log: 1)
|> Axon.Loop.trainer(:categorical_cross_entropy, optimizer, :identity, log: 1)
|> Axon.Loop.metric(:accuracy)
|> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch)
```
Expand All @@ -218,7 +218,7 @@ We can improve the training by applying gradient centralization. It is a techniq
centralized_optimizer = Axon.Updates.compose(Axon.Updates.centralize(), optimizer)

model
|> Axon.Loop.trainer(:categorical_cross_entropy, centralized_optimizer, log: 1)
|> Axon.Loop.trainer(:categorical_cross_entropy, centralized_optimizer, :identity, log: 1)
|> Axon.Loop.metric(:accuracy)
|> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch)
```
Expand Down
8 changes: 4 additions & 4 deletions notebooks/vision/mnist.livemd
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

```elixir
Mix.install([
{:axon, github: "elixir-nx/axon"},
{:nx, "~> 0.3.0", override: true},
{:exla, "~> 0.3.0"},
{:req, "~> 0.3.0"}
{:axon, "~> 0.3.0"},
{:nx, "~> 0.4.0", override: true},
{:exla, "~> 0.4.0"},
{:req, "~> 0.3.1"}
])
```

Expand Down