diff --git a/lib/axon/loop.ex b/lib/axon/loop.ex index bb6a4829a..e5e215fff 100644 --- a/lib/axon/loop.ex +++ b/lib/axon/loop.ex @@ -1775,7 +1775,7 @@ defmodule Axon.Loop do defp build_loss_scale_fns(invalid) do raise ArgumentError, "Invalid loss scale #{inspect(invalid)}, a valid" <> - " loss scale is an atom amtching the name of a loss" <> + " loss scale is an atom matching the name of a loss" <> " scale implementation in Axon.LossScale or a 3-tuple" <> " of {init_scale, scale_fn, unscale_fn}. See Axon.LossScale" <> " for more information" diff --git a/notebooks/basics/xor.livemd b/notebooks/basics/xor.livemd index 82a80a248..b64030519 100644 --- a/notebooks/basics/xor.livemd +++ b/notebooks/basics/xor.livemd @@ -2,10 +2,10 @@ ```elixir Mix.install([ - {:axon, github: "elixir-nx/axon"}, - {:nx, "~> 0.3.0", override: true}, - {:exla, "~> 0.3.0"}, - {:kino_vega_lite, "~> 0.1.3"} + {:axon, "~> 0.3.0"}, + {:nx, "~> 0.4.0", override: true}, + {:exla, "~> 0.4.0"}, + {:kino_vega_lite, "~> 0.1.6"} ]) Nx.Defn.default_options(compiler: EXLA) diff --git a/notebooks/generative/fashionmnist_autoencoder.livemd b/notebooks/generative/fashionmnist_autoencoder.livemd index 0b9bb70ed..efd4120ab 100644 --- a/notebooks/generative/fashionmnist_autoencoder.livemd +++ b/notebooks/generative/fashionmnist_autoencoder.livemd @@ -2,10 +2,10 @@ ```elixir Mix.install([ - {:axon, github: "elixir-nx/axon"}, - {:nx, "~> 0.3.0", override: true}, - {:exla, "~> 0.3.0"}, - {:scidata, "~> 0.1.5"} + {:axon, "~> 0.3.0"}, + {:nx, "~> 0.4.0", override: true}, + {:exla, "~> 0.4.0"}, + {:scidata, "~> 0.1.9"} ]) Nx.Defn.default_options(compiler: EXLA) diff --git a/notebooks/generative/fashionmnist_vae.livemd b/notebooks/generative/fashionmnist_vae.livemd index 2cef5c5cd..352357bed 100644 --- a/notebooks/generative/fashionmnist_vae.livemd +++ b/notebooks/generative/fashionmnist_vae.livemd @@ -2,15 +2,16 @@ ```elixir Mix.install([ - {:exla, "~> 0.3.0"}, - {:nx, "~> 0.3.0"}, - {:axon, "~> 0.2.0"}, + {:exla, "~> 0.4.0"}, + {:nx, "~> 0.4.0", override: true}, + {:axon, "~> 0.3.0"}, {:req, "~> 0.3.1"}, {:kino, "~> 0.7.0"}, {:scidata, "~> 0.1.9"}, {:stb_image, "~> 0.5.2"}, - {:kino_vega_lite, "~> 0.1.4"}, - {:vega_lite, "~> 0.1.6"} + {:kino_vega_lite, "~> 0.1.6"}, + {:vega_lite, "~> 0.1.6"}, + {:table_rex, "~> 3.1.1"} ]) alias VegaLite, as: Vl @@ -34,7 +35,7 @@ This section will proceed without much explanation as most of it is extracted fr ```elixir defmodule Data do @moduledoc """ - A module to hold useful data processing utilities, + A module to hold useful data processing utilities, mostly extracted from the previous notebook """ @@ -132,7 +133,7 @@ model = |> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4") |> CustomLayer.scaling_layer() # Turn it back into a 28x28 single channel image - |> Axon.reshape({1, 28, 28}) + |> Axon.reshape({:auto, 1, 28, 28}) # We can use Axon.Display to show us what each of the layers would look like # assuming we send in a batch of 4 images @@ -161,7 +162,7 @@ We also have `plot_losses/1` function to visualize our train and validation loss ```elixir defmodule KinoAxon do @doc """ - Adds handler function which adds a frame with a "stop" button + Adds handler function which adds a frame with a "stop" button to the cell with the training loop. Clicking "stop" will halt the training loop. @@ -212,7 +213,7 @@ defmodule KinoAxon do handler = fn state -> %Axon.Loop.State{metrics: metrics, epoch: epoch} = state loss = metrics["loss"] |> Nx.to_number() - val_loss = metrics["validation_0"]["loss"] |> Nx.to_number() + val_loss = metrics["validation_loss"] |> Nx.to_number() points = [ %{epoch: epoch, loss: loss, dataset: "train"}, @@ -290,7 +291,7 @@ decoder = |> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4") |> CustomLayer.scaling_layer() # Turn it back into a 28x28 single channel image - |> Axon.reshape({1, 28, 28}) + |> Axon.reshape({:auto, 1, 28, 28}) Axon.Display.as_table(encoder, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts() Axon.Display.as_table(decoder, Nx.template({4, 10}, :f32)) |> IO.puts() @@ -336,7 +337,7 @@ num_steps = 100 # index 1 is where we'll end latents = Axon.predict(encoder, params, test_images[[images: 0..1]]) # Latents is a {2, 10} tensor -# The step we'll add to our latent to move it towards image[1] +# The step we'll add to our latent to move it towards image[1] step = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps) # We can make a batch of all our new latents new_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0]) @@ -397,7 +398,7 @@ defmodule Vae do # Bottleneck layer |> Axon.dense(@latent_features * 2, name: "bottleneck_layer") # Split up the mu and logvar - |> Axon.reshape({2, @latent_features}) + |> Axon.reshape({:auto, 2, @latent_features}) |> sampling_layer() end @@ -415,7 +416,7 @@ defmodule Vae do |> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4") |> CustomLayer.scaling_layer() # Turn it back into a 28x28 single channel image - |> Axon.reshape({1, 28, 28}) + |> Axon.reshape({:auto, 1, 28, 28}) end def autoencoder() do @@ -492,7 +493,7 @@ num_steps = 100 # index 1 is where we'll end latents = Axon.predict(Vae.encoder(), params, test_images[[images: 0..1]]) # Latents is a {2, 10} tensor -# The step we'll add to our latent to move it towards image[1] +# The step we'll add to our latent to move it towards image[1] step = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps) # We can make a batch of all our new latents new_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0]) diff --git a/notebooks/generative/mnist_autoencoder_using_kino.livemd b/notebooks/generative/mnist_autoencoder_using_kino.livemd index f0adc62b2..b56ed8a7f 100644 --- a/notebooks/generative/mnist_autoencoder_using_kino.livemd +++ b/notebooks/generative/mnist_autoencoder_using_kino.livemd @@ -2,13 +2,14 @@ ```elixir Mix.install([ - {:exla, "~> 0.3.0"}, - {:nx, "~> 0.3.0"}, - {:axon, "~> 0.2.0"}, + {:exla, "~> 0.4.0"}, + {:nx, "~> 0.4.0", override: true}, + {:axon, "~> 0.3.0"}, {:req, "~> 0.3.1"}, {:kino, "~> 0.7.0"}, {:scidata, "~> 0.1.9"}, - {:stb_image, "~> 0.5.2"} + {:stb_image, "~> 0.5.2"}, + {:table_rex, "~> 3.1.1"} ]) ``` @@ -103,7 +104,7 @@ model = |> Axon.dense(256, activation: :relu) |> Axon.dense(784, activation: :sigmoid) # Turn it back into a 28x28 single channel image - |> Axon.reshape({1, 28, 28}) + |> Axon.reshape({:auto, 1, 28, 28}) # We can use Axon.Display to show us what each of the layers would look like # assuming we send in a batch of 4 images diff --git a/notebooks/structured/credit_card_fraud.livemd b/notebooks/structured/credit_card_fraud.livemd index 5cbb6007f..e407723ff 100644 --- a/notebooks/structured/credit_card_fraud.livemd +++ b/notebooks/structured/credit_card_fraud.livemd @@ -2,9 +2,9 @@ ```elixir Mix.install([ - {:axon, github: "elixir-nx/axon"}, - {:nx, "~> 0.3.0", override: true}, - {:exla, "~> 0.3.0"}, + {:axon, "~> 0.3.0"}, + {:nx, "~> 0.4.0", override: true}, + {:exla, "~> 0.4.0"}, {:explorer, "~> 0.3.1"}, {:kino, "~> 0.7.0"} ]) diff --git a/notebooks/text/lstm_generation.livemd b/notebooks/text/lstm_generation.livemd index e41a40a5e..677188f77 100644 --- a/notebooks/text/lstm_generation.livemd +++ b/notebooks/text/lstm_generation.livemd @@ -2,10 +2,10 @@ ```elixir Mix.install([ - {:axon, github: "elixir-nx/axon"}, - {:nx, "~> 0.3.0", override: true}, - {:exla, "~> 0.3.0"}, - {:req, "~> 0.3.0"} + {:axon, "~> 0.3.0"}, + {:nx, "~> 0.4.0", override: true}, + {:exla, "~> 0.4.0"}, + {:req, "~> 0.3.1"} ]) Nx.Defn.default_options(compiler: EXLA) diff --git a/notebooks/vision/horses_or_humans.livemd b/notebooks/vision/horses_or_humans.livemd index 9196e3105..225e2db3f 100644 --- a/notebooks/vision/horses_or_humans.livemd +++ b/notebooks/vision/horses_or_humans.livemd @@ -2,11 +2,11 @@ ```elixir Mix.install([ - {:axon, github: "elixir-nx/axon"}, - {:nx, github: "elixir-nx/nx", sparse: "nx", override: true}, - {:exla, github: "elixir-nx/nx", sparse: "exla", override: true}, + {:axon, "~> 0.3.0"}, + {:nx, "~> 0.4.0", sparse: "nx", override: true}, + {:exla, "~> 0.4.0", sparse: "exla", override: true}, {:stb_image, "~> 0.5.2"}, - {:req, "~> 0.3.0"}, + {:req, "~> 0.3.1"}, {:kino, "~> 0.7.0"} ]) @@ -203,7 +203,7 @@ optimizer = Axon.Optimizers.adam(1.0e-4) params = model - |> Axon.Loop.trainer(:categorical_cross_entropy, optimizer, log: 1) + |> Axon.Loop.trainer(:categorical_cross_entropy, optimizer, :identity, log: 1) |> Axon.Loop.metric(:accuracy) |> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch) ``` @@ -218,7 +218,7 @@ We can improve the training by applying gradient centralization. It is a techniq centralized_optimizer = Axon.Updates.compose(Axon.Updates.centralize(), optimizer) model -|> Axon.Loop.trainer(:categorical_cross_entropy, centralized_optimizer, log: 1) +|> Axon.Loop.trainer(:categorical_cross_entropy, centralized_optimizer, :identity, log: 1) |> Axon.Loop.metric(:accuracy) |> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch) ``` diff --git a/notebooks/vision/mnist.livemd b/notebooks/vision/mnist.livemd index 5f84eaf2a..263cf5fef 100644 --- a/notebooks/vision/mnist.livemd +++ b/notebooks/vision/mnist.livemd @@ -2,10 +2,10 @@ ```elixir Mix.install([ - {:axon, github: "elixir-nx/axon"}, - {:nx, "~> 0.3.0", override: true}, - {:exla, "~> 0.3.0"}, - {:req, "~> 0.3.0"} + {:axon, "~> 0.3.0"}, + {:nx, "~> 0.4.0", override: true}, + {:exla, "~> 0.4.0"}, + {:req, "~> 0.3.1"} ]) ```