Skip to content
Closed
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions mllib-local/src/main/scala/org/apache/spark/ml/linalg/BLAS.scala
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,23 @@ private[spark] object BLAS extends Serializable {
spr(alpha, v, U.values)
}

/**
* y := alpha*A*x + beta*y
*
* @param A The upper triangular part of A in a [[DenseVector]] (column major).
* @param x The [[DenseVector]] transformed by A.
* @param y The [[DenseVector]] to be modified in place.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add annotation for n: the order of the matrix A.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

*/
def dspmv(
n: Int,
alpha: Double,
A: DenseVector,
x: DenseVector,
beta: Double,
y: DenseVector): Unit = {
f2jBLAS.dspmv("U", n, alpha, A.values, x.values, 1, beta, y.values, 1)
}

/**
* Adds alpha * x * x.t to a matrix in-place. This is the same as BLAS's ?SPR.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -422,4 +422,49 @@ class BLASSuite extends SparkMLFunSuite {
assert(dATT.multiply(sx) ~== expected absTol 1e-15)
assert(sATT.multiply(sx) ~== expected absTol 1e-15)
}

test("spmv") {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

dspmv

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the naming conventions for the other BLAS tests is not to use the "d" or "s" prefix - for example the test for "gemm" and "syr" (not "dgemm" or "dsyr"). I think this is correct given the other test names.

/*
A = [[3.0, -2.0, 2.0, -4.0],
[-2.0, -8.0, 4.0, 7.0],
[2.0, 4.0, -3.0, -3.0],
[-4.0, 7.0, -3.0, 0.0]]
x = [5.0, 2.0, -1.0, -9.0]
Ax = [ 45., -93., 48., -3.]
*/
val A = new DenseVector(Array(3.0, -2.0, -8.0, 2.0, 4.0, -3.0, -4.0, 7.0, -3.0, 0.0))
val x = new DenseVector(Array(5.0, 2.0, -1.0, -9.0))
val n = 4

val y1 = new DenseVector(Array(-3.0, 6.0, -8.0, -3.0))
val y2 = y1.copy
val y3 = y1.copy
val y4 = y1.copy
val y5 = y1.copy
val y6 = y1.copy
val y7 = y1.copy

val expected1 = new DenseVector(Array(42.0, -87.0, 40.0, -6.0))
val expected2 = new DenseVector(Array(19.5, -40.5, 16.0, -4.5))
val expected3 = new DenseVector(Array(-25.5, 52.5, -32.0, -1.5))
val expected4 = new DenseVector(Array(-3.0, 6.0, -8.0, -3.0))
val expected5 = new DenseVector(Array(43.5, -90.0, 44.0, -4.5))
val expected6 = new DenseVector(Array(46.5, -96.0, 52.0, -1.5))
val expected7 = new DenseVector(Array(45.0, -93.0, 48.0, -3.0))

dspmv(n, 1.0, A, x, 1.0, y1)
dspmv(n, 0.5, A, x, 1.0, y2)
dspmv(n, -0.5, A, x, 1.0, y3)
dspmv(n, 0.0, A, x, 1.0, y4)
dspmv(n, 1.0, A, x, 0.5, y5)
dspmv(n, 1.0, A, x, -0.5, y6)
dspmv(n, 1.0, A, x, 0.0, y7)
assert(y1 ~== expected1 absTol 1e-8)
assert(y2 ~== expected2 absTol 1e-8)
assert(y3 ~== expected3 absTol 1e-8)
assert(y4 ~== expected4 absTol 1e-8)
assert(y5 ~== expected5 absTol 1e-8)
assert(y6 ~== expected6 absTol 1e-8)
assert(y7 ~== expected7 absTol 1e-8)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ private[ml] class IterativelyReweightedLeastSquares(
}

// Estimate new model
model = new WeightedLeastSquares(fitIntercept, regParam, standardizeFeatures = false,
standardizeLabel = false).fit(newInstances)
model = new WeightedLeastSquares(fitIntercept, regParam, elasticNetParam = 0.0,
standardizeFeatures = false, standardizeLabel = false).fit(newInstances)

// Check convergence
val oldCoefficients = oldModel.coefficients
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.optim

import breeze.linalg.{DenseVector => BDV}
import breeze.optimize.{CachedDiffFunction, DiffFunction, LBFGS => BreezeLBFGS, OWLQN => BreezeOWLQN}
import scala.collection.mutable

import org.apache.spark.ml.linalg.{BLAS, DenseVector, Vectors}
import org.apache.spark.mllib.linalg.CholeskyDecomposition

private[ml] class NormalEquationSolution(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add document for all params, and clarify the last element of coefficients is intercept if fit with intercept.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done.

val fitIntercept: Boolean,
private val _coefficients: Array[Double],
val aaInv: Option[Array[Double]],
val objectiveHistory: Option[Array[Double]]) {

def coefficients: Array[Double] = {
if (fitIntercept) {
_coefficients.slice(0, _coefficients.length - 1)
} else {
_coefficients
}
}

def intercept: Double = if (fitIntercept) _coefficients.last else 0.0
}

/**
* Interface for classes that solve the normal equations locally.
*/
private[ml] sealed trait NormalEquationSolver {

/** Solve the normal equations from summary statistics. */
def solve(
bBar: Double,
bbBar: Double,
abBar: DenseVector,
aaBar: DenseVector,
aBar: DenseVector): NormalEquationSolution
}

/**
* A class that solves the normal equations directly, using Cholesky decomposition.
*/
private[ml] class CholeskySolver(val fitIntercept: Boolean) extends NormalEquationSolver {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm strongly prefer to move fitIntercept out of NormalEquationSolver and NormalEquationSolution. Since the arguments aaBar and abBar were constructed to append bias if fitIntercept = true at WeightedLeastSquares L277, normal equation should be not aware of intercept. It will also make the solution more clean.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the NormalEquationCostFun we set the last coefficient (the intercept) to the analytically correct value in every iteration. I'm not sure how we can do this without having knowledge of whether or not we are fitting an intercept term. This modification does in fact have an effect on the convergence of the algorithm, so I think it's important to keep.

We can still move it out of CholeskySolver and NormalEquationSolution if you prefer. Not sure about QuasiNewtonSolver. Thoughts?

Copy link
Contributor

@yanboliang yanboliang Oct 19, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's a good point! I agree to keep fitIntercept as an argument of QuasiNewtonSolver constructor, only used for NormalEquationCostFun to accelerate convergence. To CholeskySolver and NormalEquationSolution, we can move it out.


def solve(
bBar: Double,
bbBar: Double,
abBar: DenseVector,
aaBar: DenseVector,
aBar: DenseVector): NormalEquationSolution = {
val k = abBar.size
val x = CholeskyDecomposition.solve(aaBar.values, abBar.values)
val aaInv = CholeskyDecomposition.inverse(aaBar.values, k)

new NormalEquationSolution(fitIntercept, x, Some(aaInv), None)
}
}

/**
* A class for solving the normal equations using Quasi-Newton optimization methods.
*/
private[ml] class QuasiNewtonSolver(
Copy link
Contributor

@yanboliang yanboliang Oct 8, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should LocalQuasiNewtonSolver be better? It's easy to confuse with distributed L-BFGS solver after we add optimizer interface at SPARK-17136. Or other suggestion?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the fact that it extends NormalEquationSolver implies it is local. We can easily add another QuasiNewtonSolver that extends from some distributed implementation (or even change the name of this later if needed).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's OK. We can change it later if necessary, it's private.

val fitIntercept: Boolean,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fitIntercept: Boolean should be better? Since we'd like to use fitIntercept as an argument of the constructor rather than a member variable of the class.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

maxIter: Int,
tol: Double,
l1RegFunc: Option[(Int) => Double]) extends NormalEquationSolver {

def solve(
bBar: Double,
bbBar: Double,
abBar: DenseVector,
aaBar: DenseVector,
aBar: DenseVector): NormalEquationSolution = {
val numFeatures = aBar.size
val numFeaturesPlusIntercept = if (fitIntercept) numFeatures + 1 else numFeatures
val initialCoefficientsWithIntercept = new Array[Double](numFeaturesPlusIntercept)
if (fitIntercept) {
initialCoefficientsWithIntercept(numFeaturesPlusIntercept - 1) = bBar
}

val costFun =
new NormalEquationCostFun(bBar, bbBar, abBar, aaBar, aBar, fitIntercept, numFeatures)
val optimizer = l1RegFunc.map { func =>
new BreezeOWLQN[Int, BDV[Double]](maxIter, 10, func, tol)
}.getOrElse(new BreezeLBFGS[BDV[Double]](maxIter, 10, tol))

val states = optimizer.iterations(new CachedDiffFunction(costFun),
new BDV[Double](initialCoefficientsWithIntercept))

val arrayBuilder = mutable.ArrayBuilder.make[Double]
var state: optimizer.State = null
while (states.hasNext) {
state = states.next()
arrayBuilder += state.adjustedValue
}
val x = state.x.toArray.clone()
new NormalEquationSolution(fitIntercept, x, None, Some(arrayBuilder.result()))
}

/**
* NormalEquationCostFun implements Breeze's DiffFunction[T] for the normal equation.
* It returns the loss and gradient with L2 regularization at a particular point (coefficients).
* It's used in Breeze's convex optimization routines.
*/
private class NormalEquationCostFun(
bBar: Double,
bbBar: Double,
ab: DenseVector,
aa: DenseVector,
aBar: DenseVector,
fitIntercept: Boolean,
numFeatures: Int) extends DiffFunction[BDV[Double]] {

private val numFeaturesPlusIntercept = if (fitIntercept) numFeatures + 1 else numFeatures

override def calculate(coefficients: BDV[Double]): (Double, BDV[Double]) = {
val coef = Vectors.fromBreeze(coefficients).toDense
if (fitIntercept) {
var j = 0
var dotProd = 0.0
val coefValues = coef.values
val aBarValues = aBar.values
while (j < numFeatures) {
dotProd += coefValues(j) * aBarValues(j)
j += 1
}
coefValues(numFeatures) = bBar - dotProd
}
val aax = new DenseVector(new Array[Double](numFeaturesPlusIntercept))
BLAS.dspmv(numFeaturesPlusIntercept, 1.0, aa, coef, 1.0, aax)
// loss = 1/2 (b^T W b - 2 x^T A^T W b + x^T A^T W A x)
val loss = 0.5 * bbBar - BLAS.dot(ab, coef) + 0.5 * BLAS.dot(coef, aax)
// -gradient = A^T W A x - A^T W b
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Annotation typo? Should be gradient = A^T W A x - A^T W b?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep, thanks

BLAS.axpy(-1.0, ab, aax)
(loss, aax.asBreeze.toDenseVector)
}
}
}

/**
* Exception thrown when solving a linear system Ax = b for which the matrix A is non-invertible
* (singular).
*/
class SingularMatrixException(message: String, cause: Throwable)
extends IllegalArgumentException(message, cause) {

def this(message: String) = this(message, null)
}
Loading