diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala index bf56e60cf995..f52b163fe527 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala @@ -118,12 +118,12 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) { def refreshTable(tableName: String): Unit = { // TODO: Database support... - catalog.refreshTable("default", tableName) + catalog.refreshTable(Seq("default", tableName)) } protected[hive] def invalidateTable(tableName: String): Unit = { // TODO: Database support... - catalog.invalidateTable("default", tableName) + catalog.invalidateTable(Seq("default", tableName)) } /** diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index d40f9936fd3b..5cf6f0977f00 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -52,18 +52,13 @@ private[hive] class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with /** Connection to hive metastore. Usages should lock on `this`. */ protected[hive] val client = Hive.get(hive.hiveconf) - // TODO: Use this everywhere instead of tuples or databaseName, tableName,. - /** A fully qualified identifier for a table (i.e., database.tableName) */ - case class QualifiedTableName(database: String, name: String) { - def toLowerCase = QualifiedTableName(database.toLowerCase, name.toLowerCase) - } - /** A cache of Spark SQL data source tables that have been accessed. */ - protected[hive] val cachedDataSourceTables: LoadingCache[QualifiedTableName, LogicalPlan] = { - val cacheLoader = new CacheLoader[QualifiedTableName, LogicalPlan]() { - override def load(in: QualifiedTableName): LogicalPlan = { + protected[hive] val cachedDataSourceTables: LoadingCache[Seq[String], LogicalPlan] = { + val cacheLoader = new CacheLoader[Seq[String], LogicalPlan]() { + override def load(in: Seq[String]): LogicalPlan = { logDebug(s"Creating new cached data source for $in") - val table = client.getTable(in.database, in.name) + + val table = client.getTable(in(0), in(1)) val schemaString = table.getProperty("spark.sql.sources.schema") val userSpecifiedSchema = if (schemaString == null) { @@ -89,12 +84,12 @@ private[hive] class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with CacheBuilder.newBuilder().maximumSize(1000).build(cacheLoader) } - def refreshTable(databaseName: String, tableName: String): Unit = { - cachedDataSourceTables.refresh(QualifiedTableName(databaseName, tableName).toLowerCase) + def refreshTable(tableIdentifier: Seq[String]): Unit = { + cachedDataSourceTables.refresh(tableIdentifier.map(_.toLowerCase())) } - def invalidateTable(databaseName: String, tableName: String): Unit = { - cachedDataSourceTables.invalidate(QualifiedTableName(databaseName, tableName).toLowerCase) + def invalidateTable(tableIdentifier: Seq[String]): Unit = { + cachedDataSourceTables.invalidate(tableIdentifier.map(_.toLowerCase())) } val caseSensitive: Boolean = false @@ -144,7 +139,7 @@ private[hive] class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with val table = client.getTable(databaseName, tblName) if (table.getProperty("spark.sql.sources.provider") != null) { - cachedDataSourceTables(QualifiedTableName(databaseName, tblName).toLowerCase) + cachedDataSourceTables(Seq(databaseName, tblName)) } else if (table.isView) { // if the unresolved relation is from hive view // parse the text into logic node.