Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding synchronization on the reporting and autotuner level #54

Merged
merged 2 commits into from
Sep 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.nvidia.spark.rapids.tool.qualification

class AppSubscriber(val appId: String) {
val lock = new Object()
private var attemptID: Option[Int] = None

def unsafeSetAttemptId(newAttempt: Int): Boolean = {
attemptID match {
case Some(a) =>
if (newAttempt > a) {
attemptID = Some(newAttempt)
}
case None => attemptID = Some(newAttempt)
}
newAttempt == attemptID.get
}

def safeSetAttemptId(newAttempt: Int): Boolean = {
lock.synchronized {
unsafeSetAttemptId(newAttempt)
}
}
}

object AppSubscriber {
private val APP_SUBSCRIBERS = new java.util.concurrent.ConcurrentHashMap[String, AppSubscriber]()

def getOrCreate(appId: String): AppSubscriber = {
APP_SUBSCRIBERS.computeIfAbsent(appId, _ => new AppSubscriber(appId))
}

def subscribeAppAttempt(appId: String, newAttemptId: Int): Boolean = {
val subscriber = getOrCreate(appId)
subscriber.safeSetAttemptId(newAttemptId)
}

def withSafeValidAttempt[T](appId: String, currAttempt: Int)(f: () => T): Option[T] = {
val subscriber = getOrCreate(appId)
subscriber.lock.synchronized {
if (subscriber.unsafeSetAttemptId(currAttempt)) {
Option(f())
} else {
None
}
}
}

def withUnsafeValidAttempt[T](appId: String, currAttempt: Int)(f: () => T): Option[T] = {
val subscriber = getOrCreate(appId)
if (subscriber.unsafeSetAttemptId(currAttempt)) {
Option(f())
} else {
None
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import com.nvidia.spark.rapids.tool.tuning.TunerContext
import com.nvidia.spark.rapids.tool.views.QualRawReportGenerator
import org.apache.hadoop.conf.Configuration

import org.apache.spark.sql.rapids.tool.{AppAttemptTracker, FailureApp}
import org.apache.spark.sql.rapids.tool.FailureApp
import org.apache.spark.sql.rapids.tool.qualification._
import org.apache.spark.sql.rapids.tool.ui.ConsoleProgressBar
import org.apache.spark.sql.rapids.tool.util._
Expand Down Expand Up @@ -162,33 +162,47 @@ class Qualification(outputPath: String, numRows: Int, hadoopConf: Configuration,
// this is a bit ugly right now to overload writing out the report and returning the
// DataSource information but this encapsulates the analyzer to keep the memory usage
// smaller.
val dsInfo = QualRawReportGenerator.generateRawMetricQualViewAndGetDataSourceInfo(
outputDir, app, appIndex)
val dsInfo =
AppSubscriber.withSafeValidAttempt(app.appId, app.attemptId) { () =>
QualRawReportGenerator.generateRawMetricQualViewAndGetDataSourceInfo(
outputDir, app, appIndex)
}.getOrElse(Seq.empty)
val qualSumInfo = app.aggregateStats()
tunerContext.foreach { tuner =>
// Run the autotuner if it is enabled.
// Note that we call the autotuner anyway without checking the aggregate results
// because the Autotuner can still make some recommendations based on the information
// enclosed by the QualificationInfo object
tuner.tuneApplication(app, qualSumInfo, appIndex, dsInfo)
AppSubscriber.withSafeValidAttempt(app.appId, app.attemptId) { () =>
tunerContext.foreach { tuner =>
// Run the autotuner if it is enabled.
// Note that we call the autotuner anyway without checking the aggregate results
// because the Autotuner can still make some recommendations based on the information
// enclosed by the QualificationInfo object
tuner.tuneApplication(app, qualSumInfo, appIndex, dsInfo)
}
}
if (qualSumInfo.isDefined) {
// add the recommend cluster info into the summary
val tempSummary = qualSumInfo.get
val newClusterSummary = tempSummary.clusterSummary.copy(
recommendedClusterInfo = pluginTypeChecker.platform.recommendedClusterInfo)
if (AppAttemptTracker.isOlderAttemptId(app.appId, app.attemptId)) {
// If the attemptId is an older attemptId, skip this attempt.
// This can happen when the user has provided event logs for multiple attempts
progressBar.foreach(_.reportSkippedProcess())
SkippedAppResult.fromAppAttempt(pathStr, app.appId, app.attemptId)
} else {
AppSubscriber.withSafeValidAttempt(app.appId, app.attemptId) { () =>
val newQualSummary = tempSummary.copy(clusterSummary = newClusterSummary)
allApps.put(app.appId, newQualSummary)
// check if the app is already in the map
if (allApps.containsKey(app.appId)) {
// fix the progress bar counts
progressBar.foreach(_.adjustCounterForMultipleAttempts())
logInfo(s"Removing older app summary for app: ${app.appId} " +
s"before adding the new one with attempt: ${app.attemptId}")
}
progressBar.foreach(_.reportSuccessfulProcess())
allApps.put(app.appId, newQualSummary)
val endTime = System.currentTimeMillis()
SuccessAppResult(pathStr, app.appId, app.attemptId,
s"Took ${endTime - startTime}ms to process")
} match {
case Some(successfulResult) => successfulResult
case _ =>
// If the attemptId is an older attemptId, skip this attempt.
// This can happen when the user has provided event logs for multiple attempts
progressBar.foreach(_.reportSkippedProcess())
SkippedAppResult.fromAppAttempt(pathStr, app.appId, app.attemptId)
}
} else {
progressBar.foreach(_.reportUnkownStatusProcess())
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import com.nvidia.spark.rapids.tool.{DatabricksEventLog, DatabricksRollingEventL
import com.nvidia.spark.rapids.tool.planparser.{HiveParseHelper, ReadParser}
import com.nvidia.spark.rapids.tool.planparser.HiveParseHelper.isHiveTableScanNode
import com.nvidia.spark.rapids.tool.profiling.{BlockManagerRemovedCase, DataSourceCase, DriverAccumCase, JobInfoClass, ResourceProfileInfoCase, SQLExecutionInfoClass, SQLPlanMetricsCase}
import com.nvidia.spark.rapids.tool.qualification.AppSubscriber
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}

Expand Down Expand Up @@ -403,10 +404,10 @@ abstract class AppBase(
* greater than the existing attemptId.
*/
def registerAttemptId(): Unit = {
if(isAppMetaDefined) {
if (isAppMetaDefined) {
val currentAttemptId = sparkProperties.getOrElse("spark.app.attempt.id", "1").toInt
appMetaData.foreach(_.setAttemptId(currentAttemptId))
AppAttemptTracker.registerNewerAttemptId(appId, currentAttemptId)
AppSubscriber.subscribeAppAttempt(appId, currentAttemptId)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,11 @@ class ConsoleProgressBar(
}
}

def adjustCounterForMultipleAttempts(): Unit = {
successCounter.decrementAndGet()
skippedCounter.incrementAndGet()
}

def reportSuccessfulProcess(): Unit = {
successCounter.incrementAndGet()
totalCounter.incrementAndGet()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
package org.apache.spark.sql.rapids.tool.util

import com.nvidia.spark.rapids.tool.profiling.AppStatusResult
import com.nvidia.spark.rapids.tool.qualification.AppSubscriber
import org.apache.hadoop.conf.Configuration

import org.apache.spark.internal.Logging
import org.apache.spark.sql.rapids.tool.AppAttemptTracker

trait RuntimeReporter extends Logging {
val outputDir: String
Expand All @@ -35,12 +35,10 @@ trait RuntimeReporter extends Logging {
private def skipAppsWithOlderAttempts(appStatuses: Seq[AppResult]): Seq[AppResult] = {
appStatuses map {
case successApp: SuccessAppResult =>
if (AppAttemptTracker.isOlderAttemptId(successApp.appId, successApp.attemptId)) {
SkippedAppResult.fromAppAttempt(successApp.path, successApp.appId,
successApp.attemptId)
} else {
AppSubscriber.withUnsafeValidAttempt(successApp.appId, successApp.attemptId) { () =>
successApp
}
}.getOrElse(SkippedAppResult.fromAppAttempt(successApp.path, successApp.appId,
successApp.attemptId))
case otherApp: AppResult => otherApp
}
}
Expand Down

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Binary file not shown.
Binary file not shown.
Loading