diff --git a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala index f97b6a6eb183d..47e1c8c06dd45 100644 --- a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala +++ b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala @@ -806,8 +806,6 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu protected def caseConvert(tableName: String): String = tableName - private def withOrWithout(isDistinct: Boolean): String = if (isDistinct) "with" else "without" - Seq(true, false).foreach { isDistinct => val distinct = if (isDistinct) "DISTINCT " else "" val withOrWithout = if (isDistinct) "with" else "without" diff --git a/core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala b/core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala index f034e5da0a698..8644608edee3d 100644 --- a/core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala @@ -1924,8 +1924,6 @@ private object ExecutorAllocationManagerSuite extends PrivateMethodTester { PrivateMethod[mutable.HashMap[Int, Int]](Symbol("numLocalityAwareTasksPerResourceProfileId")) private val _rpIdToHostToLocalTaskCount = PrivateMethod[Map[Int, Map[String, Int]]](Symbol("rpIdToHostToLocalTaskCount")) - private val _onSpeculativeTaskSubmitted = - PrivateMethod[Unit](Symbol("onSpeculativeTaskSubmitted")) private val _totalRunningTasksPerResourceProfile = PrivateMethod[Int](Symbol("totalRunningTasksPerResourceProfile")) @@ -1942,12 +1940,6 @@ private object ExecutorAllocationManagerSuite extends PrivateMethodTester { nmap(rp.id) } - private def updateAndSyncNumExecutorsTarget( - manager: ExecutorAllocationManager, - now: Long): Unit = { - manager invokePrivate _updateAndSyncNumExecutorsTarget(now) - } - private def numExecutorsTargetForDefaultProfileId(manager: ExecutorAllocationManager): Int = { numExecutorsTarget(manager, defaultProfile.id) } @@ -2025,10 +2017,6 @@ private object ExecutorAllocationManagerSuite extends PrivateMethodTester { manager invokePrivate _onSchedulerQueueEmpty() } - private def onSpeculativeTaskSubmitted(manager: ExecutorAllocationManager, id: String) : Unit = { - manager invokePrivate _onSpeculativeTaskSubmitted(id) - } - private def localityAwareTasksForDefaultProfile(manager: ExecutorAllocationManager): Int = { val localMap = manager invokePrivate _localityAwareTasksPerResourceProfileId() localMap(defaultProfile.id) @@ -2044,7 +2032,4 @@ private object ExecutorAllocationManagerSuite extends PrivateMethodTester { rpIdToHostLocal(defaultProfile.id) } - private def getResourceProfileIdOfExecutor(manager: ExecutorAllocationManager): Int = { - defaultProfile.id - } } diff --git a/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala b/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala index 3555faf5c2cb9..eb6b4b23c61cf 100644 --- a/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala @@ -233,14 +233,6 @@ class AppClientSuite // | Utility methods for testing | // =============================== - /** Return a SparkConf for applications that want to talk to our Master. */ - private def appConf: SparkConf = { - new SparkConf() - .setMaster(masterRpcEnv.address.toSparkURL) - .setAppName("test") - .set("spark.executor.memory", "256m") - } - /** Make a master to which our application will send executor requests. */ private def makeMaster(): Master = { val master = new Master(masterRpcEnv, masterRpcEnv.address, 0, securityManager, conf) diff --git a/core/src/test/scala/org/apache/spark/storage/StorageSuite.scala b/core/src/test/scala/org/apache/spark/storage/StorageSuite.scala index 5f2abb47413f6..ec436d9cd31c7 100644 --- a/core/src/test/scala/org/apache/spark/storage/StorageSuite.scala +++ b/core/src/test/scala/org/apache/spark/storage/StorageSuite.scala @@ -105,29 +105,6 @@ class StorageSuite extends SparkFunSuite { assert(status.diskUsed === actualDiskUsed) } - // For testing StorageUtils.updateRddInfo and StorageUtils.getRddBlockLocations - private def stockStorageStatuses: Seq[StorageStatus] = { - val status1 = new StorageStatus(BlockManagerId("big", "dog", 1), 1000L, Some(1000L), Some(0L)) - val status2 = new StorageStatus(BlockManagerId("fat", "duck", 2), 2000L, Some(2000L), Some(0L)) - val status3 = new StorageStatus(BlockManagerId("fat", "cat", 3), 3000L, Some(3000L), Some(0L)) - status1.addBlock(RDDBlockId(0, 0), BlockStatus(memAndDisk, 1L, 2L)) - status1.addBlock(RDDBlockId(0, 1), BlockStatus(memAndDisk, 1L, 2L)) - status2.addBlock(RDDBlockId(0, 2), BlockStatus(memAndDisk, 1L, 2L)) - status2.addBlock(RDDBlockId(0, 3), BlockStatus(memAndDisk, 1L, 2L)) - status2.addBlock(RDDBlockId(1, 0), BlockStatus(memAndDisk, 1L, 2L)) - status2.addBlock(RDDBlockId(1, 1), BlockStatus(memAndDisk, 1L, 2L)) - status3.addBlock(RDDBlockId(0, 4), BlockStatus(memAndDisk, 1L, 2L)) - status3.addBlock(RDDBlockId(1, 2), BlockStatus(memAndDisk, 1L, 2L)) - Seq(status1, status2, status3) - } - - // For testing StorageUtils.updateRddInfo - private def stockRDDInfos: Seq[RDDInfo] = { - val info0 = new RDDInfo(0, "0", 10, memAndDisk, false, Seq(3)) - val info1 = new RDDInfo(1, "1", 3, memAndDisk, false, Seq(4)) - Seq(info0, info1) - } - private val offheap = StorageLevel.OFF_HEAP // For testing add, update, remove, get, and contains etc. for both RDD and non-RDD onheap // and offheap blocks diff --git a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala index 30c9693e6dee3..89e3d8371be4c 100644 --- a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala @@ -1370,18 +1370,6 @@ private[spark] object JsonProtocolSuite extends Assertions { } } - private def assertOptionEquals[T]( - opt1: Option[T], - opt2: Option[T], - assertEquals: (T, T) => Unit): Unit = { - if (opt1.isDefined) { - assert(opt2.isDefined) - assertEquals(opt1.get, opt2.get) - } else { - assert(opt2.isEmpty) - } - } - /** * Use different names for methods we pass in to assertSeqEquals or assertOptionEquals */ @@ -1407,10 +1395,6 @@ private[spark] object JsonProtocolSuite extends Assertions { assert(ste1.getFileName === ste2.getFileName) } - private def assertEquals(rp1: ResourceProfile, rp2: ResourceProfile): Unit = { - assert(rp1 === rp2) - } - /** ----------------------------------- * | Util methods for constructing events | * ------------------------------------ */ diff --git a/mllib/src/test/scala/org/apache/spark/mllib/regression/IsotonicRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/regression/IsotonicRegressionSuite.scala index aa06b70307f53..0a4f81bd7aa9c 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/regression/IsotonicRegressionSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/regression/IsotonicRegressionSuite.scala @@ -72,16 +72,6 @@ class IsotonicRegressionSuite extends SparkFunSuite with MLlibTestSparkContext w runIsotonicRegression(labels, Array.fill(labels.size)(1d).toImmutableArraySeq, isotonic) } - private def runIsotonicRegression( - labels: Seq[Double], - features: Seq[Double], - weights: Seq[Double], - isotonic: Boolean): IsotonicRegressionModel = { - runIsotonicRegressionOnInput( - labels.indices.map(i => (labels(i), features(i), weights(i))), - isotonic) - } - private def runIsotonicRegressionOnInput( input: Seq[(Double, Double, Double)], isotonic: Boolean, diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnsiTypeCoercionSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnsiTypeCoercionSuite.scala index 139e89828f8e5..4f824a584d4a4 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnsiTypeCoercionSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnsiTypeCoercionSuite.scala @@ -64,26 +64,6 @@ class AnsiTypeCoercionSuite extends TypeCoercionSuiteBase { override def dateTimeOperationsRule: TypeCoercionRule = AnsiTypeCoercion.DateTimeOperations - private def shouldCastStringLiteral(to: AbstractDataType, expected: DataType): Unit = { - val input = Literal("123") - val castResult = AnsiTypeCoercion.implicitCast(input, to) - assert(DataTypeUtils.equalsIgnoreCaseAndNullability( - castResult.map(_.dataType).orNull, expected), - s"Failed to cast String literal to $to") - } - - private def shouldNotCastStringLiteral(to: AbstractDataType): Unit = { - val input = Literal("123") - val castResult = AnsiTypeCoercion.implicitCast(input, to) - assert(castResult.isEmpty, s"Should not be able to cast String literal to $to") - } - - private def shouldNotCastStringInput(to: AbstractDataType): Unit = { - val input = AttributeReference("s", StringType)() - val castResult = AnsiTypeCoercion.implicitCast(input, to) - assert(castResult.isEmpty, s"Should not be able to cast non-foldable String input to $to") - } - private def checkWidenType( widenFunc: (DataType, DataType) => Option[DataType], t1: DataType, diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/NamedParameterFunctionSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/NamedParameterFunctionSuite.scala index 02543c9fba539..0715e27403bc6 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/NamedParameterFunctionSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/NamedParameterFunctionSuite.scala @@ -20,7 +20,7 @@ import org.apache.spark.SparkThrowable import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.{Expression, Literal, NamedArgumentExpression} import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode} -import org.apache.spark.sql.catalyst.plans.logical.{FunctionBuilderBase, FunctionSignature, InputParameter, NamedParametersSupport} +import org.apache.spark.sql.catalyst.plans.logical.{FunctionSignature, InputParameter, NamedParametersSupport} import org.apache.spark.sql.catalyst.util.TypeUtils.toSQLId import org.apache.spark.sql.types.DataType @@ -89,14 +89,6 @@ class NamedParameterFunctionSuite extends AnalysisTest { NamedParametersSupport.defaultRearrange(functionSignature, expressions, functionName)) } - private def parseExternalException[T <: FunctionBuilderBase[_]]( - functionName: String, - builder: T, - expressions: Seq[Expression]) : SparkThrowable = { - intercept[SparkThrowable]( - FunctionRegistry.rearrangeExpressions[T](functionName, builder, expressions)) - } - test("DUPLICATE_ROUTINE_PARAMETER_ASSIGNMENT") { val condition = "DUPLICATE_ROUTINE_PARAMETER_ASSIGNMENT.BOTH_POSITIONAL_AND_NAMED" diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala index b7309923ac206..c80466aec035e 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala @@ -17,8 +17,6 @@ package org.apache.spark.sql.catalyst.encoders -import scala.reflect.ClassTag - import org.apache.spark.{SPARK_DOC_ROOT, SparkFunSuite, SparkUnsupportedOperationException} import org.apache.spark.sql.Encoders @@ -98,5 +96,4 @@ class EncoderErrorMessageSuite extends SparkFunSuite { ) } - private def clsName[T : ClassTag]: String = implicitly[ClassTag[T]].runtimeClass.getName } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CastWithAnsiOnSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CastWithAnsiOnSuite.scala index 5916e0501f8b6..674d306dbabb4 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CastWithAnsiOnSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CastWithAnsiOnSuite.scala @@ -303,10 +303,6 @@ class CastWithAnsiOnSuite extends CastSuiteBase with QueryErrorsBase { s"cannot be cast to ${toSQLType(to)} because it is malformed." } - private def castErrMsg(l: Literal, to: DataType): String = { - castErrMsg(l, to, l.dataType) - } - test("cast from invalid string to numeric should throw NumberFormatException") { def check(value: String, dataType: DataType): Unit = { checkExceptionInExpression[NumberFormatException](cast(value, dataType), diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/logical/DistinctKeyVisitorSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/logical/DistinctKeyVisitorSuite.scala index 2324c33806d48..d781d79caeae8 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/logical/DistinctKeyVisitorSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/logical/DistinctKeyVisitorSuite.scala @@ -18,11 +18,9 @@ package org.apache.spark.sql.catalyst.plans.logical import scala.collection.mutable -import scala.reflect.runtime.universe.TypeTag import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ -import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, ExpressionSet, UnspecifiedFrame} import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.types.IntegerType @@ -47,9 +45,6 @@ class DistinctKeyVisitorSuite extends PlanTest { assert(plan.analyze.distinctKeys === distinctKeys) } - implicit private def productEncoder[T <: Product : TypeTag]: ExpressionEncoder[T] = - ExpressionEncoder[T]() - test("Aggregate's distinct attributes") { checkDistinctAttributes(t1.groupBy($"a", $"b")($"a", $"b", 1), Set(ExpressionSet(Seq(a, b)))) checkDistinctAttributes(t1.groupBy($"a")($"a"), Set(ExpressionSet(Seq(a)))) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala index 700dfe30a2389..cbaebfa12238a 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/IntervalUtilsSuite.scala @@ -168,18 +168,6 @@ class IntervalUtilsSuite extends SparkFunSuite with SQLHelper { assert(safeStringToInterval(UTF8String.fromString(input)) === null) } - private def checkFromInvalidStringUnknownError(input: String, word: String): Unit = { - checkError( - exception = intercept[SparkIllegalArgumentException] { - stringToInterval(UTF8String.fromString(input)) - }, - condition = "INVALID_INTERVAL_FORMAT.UNKNOWN_PARSING_ERROR", - parameters = Map( - "input" -> Option(input).map(_.toString).getOrElse("null"), - "word" -> word)) - assert(safeStringToInterval(UTF8String.fromString(input)) === null) - } - private def failFuncWithInvalidInput( input: String, errorMsg: String, converter: String => CalendarInterval): Unit = { withClue("Expected to throw an exception for the invalid input") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala index 8d255e9efda54..301c072abbbbb 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala @@ -1388,16 +1388,6 @@ class DataSourceV2SQLSuiteV1Filter } } - private def testShowNamespaces( - sqlText: String, - expected: Seq[String]): Unit = { - val schema = new StructType().add("namespace", StringType, nullable = false) - - val df = spark.sql(sqlText) - assert(df.schema === schema) - assert(df.collect().map(_.getAs[String](0)).sorted === expected.sorted) - } - test("Use: basic tests with USE statements") { val catalogManager = spark.sessionState.catalogManager diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala index b26cdfaeb756a..4fd96eadfac75 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala @@ -453,13 +453,6 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { } } - private def assertRelationNotFound(query: String, relation: String): Unit = { - val e = intercept[AnalysisException] { - sql(query) - } - checkErrorTableNotFound(e, relation) - } - private def assertRelationNotFound(query: String, relation: String, context: ExpectedContext): Unit = { val e = intercept[AnalysisException] { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/StateStoreBasicOperationsBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/StateStoreBasicOperationsBenchmark.scala index 36035e35ee258..ff4bd41409af8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/StateStoreBasicOperationsBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/StateStoreBasicOperationsBenchmark.scala @@ -353,10 +353,6 @@ object StateStoreBasicOperationsBenchmark extends SqlBasedBenchmark { } } - private def getRows(store: StateStore, keys: Seq[UnsafeRow]): Seq[UnsafeRow] = { - keys.map(key => store.get(key)) - } - private def loadInitialData( provider: StateStoreProvider, data: Seq[(UnsafeRow, UnsafeRow)]): Long = { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala index 08f245135f589..50f3e728e5f22 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala @@ -31,8 +31,6 @@ import org.apache.spark.util.UninterruptibleThread class HDFSMetadataLogSuite extends SharedSparkSession { - private implicit def toOption[A](a: A): Option[A] = Option(a) - test("SPARK-46339: Directory with number name should not be treated as metadata log") { withTempDir { temp => val dir = new File(temp, "dir") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala index 773be0cc08e3f..a753da116924d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala @@ -235,14 +235,6 @@ class FileStreamSourceSuite extends FileStreamSourceTest { override val streamingTimeout = 80.seconds - /** Use `format` and `path` to create FileStreamSource via DataFrameReader */ - private def createFileStreamSource( - format: String, - path: String, - schema: Option[StructType] = None): FileStreamSource = { - getSourceFromFileStream(createFileStream(format, path, schema)) - } - private def createFileStreamSourceAndGetSchema( format: Option[String], path: Option[String], diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala index 224dec72c79b1..200603cae5866 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala @@ -482,8 +482,6 @@ class DataStreamReaderWriterSuite extends StreamTest with BeforeAndAfter { meq(Map.empty)) } - private def newTextInput = Utils.createTempDir(namePrefix = "text").getCanonicalPath - test("check foreach() catches null writers") { val df = spark.readStream .format("org.apache.spark.sql.streaming.test") diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala index 0c63cbb950a7c..cc82b36ed74e3 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala @@ -26,7 +26,7 @@ import org.apache.orc.OrcConf import org.apache.spark.sql.{AnalysisException, Row} import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.HiveTableRelation -import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation, LogicalRelationWithTable} +import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation} import org.apache.spark.sql.execution.datasources.orc.OrcQueryTest import org.apache.spark.sql.hive.{HiveSessionCatalog, HiveUtils} import org.apache.spark.sql.hive.test.TestHiveSingleton @@ -231,17 +231,6 @@ class HiveOrcQuerySuite extends OrcQueryTest with TestHiveSingleton { .getCachedDataSourceTable(table) } - private def checkCached(tableIdentifier: TableIdentifier): Unit = { - getCachedDataSourceTable(tableIdentifier) match { - case null => fail(s"Converted ${tableIdentifier.table} should be cached in the cache.") - case LogicalRelationWithTable(_: HadoopFsRelation, _) => // OK - case other => - fail( - s"The cached ${tableIdentifier.table} should be a HadoopFsRelation. " + - s"However, $other is returned form the cache.") - } - } - test("SPARK-28573 ORC conversation could be applied for partitioned table insertion") { withTempView("single") { val singleRowDF = Seq((0, "foo")).toDF("key", "value")