diff --git a/DESCRIPTION b/DESCRIPTION index 9847d38..2b2c97d 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: audio.whisper Type: Package Title: Transcribe Audio Files using the "Whisper" Automatic Speech Recognition Model -Version: 0.4 +Version: 0.4.1 Maintainer: Jan Wijffels <jwijffels@bnosac.be> Authors@R: c( person('Jan', 'Wijffels', role = c('aut', 'cre', 'cph'), email = 'jwijffels@bnosac.be', comment = "R wrapper"), @@ -25,7 +25,10 @@ Imports: Suggests: tinytest, audio, - data.table (>= 1.12.4) + data.table (>= 1.12.4), + audio.vadwebrtc (>= 0.2.0) LinkingTo: Rcpp SystemRequirements: GNU make RoxygenNote: 7.1.2 +Remotes: bnosac/audio.vadwebrtc + diff --git a/NAMESPACE b/NAMESPACE index 6dd20c4..ff8c475 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,6 +1,7 @@ # Generated by roxygen2: do not edit by hand S3method(predict,whisper) +S3method(predict,whisper_transcription) export(whisper) export(whisper_benchmark) export(whisper_download_model) diff --git a/NEWS.md b/NEWS.md index ccf0f97..ba44cb4 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,7 @@ +## CHANGES IN audio.whisper VERSION 0.4.1 + +- Added function predict.whisper_transcription which allows to assign a transcription segment to either a left/right channel based on a Voice Activity Detection + ## CHANGES IN audio.whisper VERSION 0.4 - Allow to pass on multiple offset/durations diff --git a/R/vad-channel.R b/R/vad-channel.R new file mode 100644 index 0000000..ca41a34 --- /dev/null +++ b/R/vad-channel.R @@ -0,0 +1,86 @@ + + +#' @title Predict to which channel a transcription section belongs +#' @description Audio files containing 2 channels which were transcribed with \code{\link{predict.whisper}}, +#' you can use the results of a Voice Activity Detection by channel (either with R packages +#' \code{audio.vadwebrtc} or \code{audio.vadsilero}) to assign the text segments to each of the channels.\cr +#' This is done by looking for each text segment how many seconds overlap there is with the voiced sections which are identified +#' by the Voice Activity Detection. +#' @param object an object of class \code{whisper_transcription} as returned by \code{\link{predict.whisper}} +#' @param vad an object of class \code{webrtc-gmm-bychannel} as returned by function \code{VAD_channel} from R package \code{audio.vadwebrtc} with information of the detected voice in at least channels 1 and 2. +#' ar a list with element vad_segments containing a data.frame with columns channel, start, end and has_voice with information at which second +#' there was a voice in the audio +#' @param type character string with currently only possible value: 'channel' which does a 2-speaker channel assignment +#' @param threshold numeric in 0-1 range indicating if the difference between the probability that the segment was from the left channel 1 or the right channel 2 is smaller than this amount, the column \code{channel} will be set to 'both'. Defaults to 0. +#' @param ... not used +#' @return an object of class \code{whisper_transcription} as documented in \code{\link{predict.whisper}} +#' where element \code{data} contains the following extra columns indicating which channel the transcription is probably from +#' \itemize{ +#' \item{channel: either 'left', 'right' or 'both' indicating the transcription segment was either from the left channel (1), the right channel (2) or probably from both as identified by the Voice Activity Detecion} +#' \item{channel_probability: a number between 0 and 1 indicating for that specific segment the ratio of the amount of voiced seconds in the most probably channel to +#' the sum of the amount of voiced seconds in the left + the right channel} +#' \item{duration: how long (in seconds) the from-to segment is} +#' \item{duration_voiced_left: how many seconds there was a voiced signal on the left channel (channel 1) as identified by \code{vad}} +#' \item{duration_voiced_right: how many seconds there was a voiced signal on the right channel (channel 2) as identified by \code{vad}} +#' } +#' @export +#' @seealso \code{\link{predict.whisper}} +#' @examples +#' library(audio.whisper) +#' model <- whisper("tiny") +#' audio <- system.file(package = "audio.whisper", "samples", "stereo.wav") +#' trans <- predict(model, audio, language = "es") +#' \dontrun{ +#' library(audio.vadwebrtc) +#' vad <- VAD_channel(audio, channels = "all", mode = "veryaggressive", milliseconds = 30) +#' } +#' vad <- list(vad_segments = rbind( +#' data.frame(channel = 1, start = c(0, 5, 15, 22), end = c(5, 9, 18, 23), has_voice = TRUE), +#' data.frame(channel = 2, start = c(2, 9.5, 19, 22), end = c(2.5, 13.5, 21, 23), has_voice = TRUE))) +#' out <- predict(trans, vad, type = "channel", threshold = 0) +#' out$data +predict.whisper_transcription <- function(object, vad, type = "channel", threshold = 0, ...){ + stopifnot(inherits(object, "whisper_transcription")) + if(!inherits(vad, "webrtc-gmm-bychannel")){ + #warning("provided vad is not of type webrtc-gmm-bychannel") + } + fields <- colnames(object$data) + sentences <- object$data + today <- Sys.Date() + sentences$start <- as.numeric(difftime(as.POSIXct(paste(today, sentences$from, sep = " "), format = "%Y-%m-%d %H:%M:%OS"), as.POSIXct(paste(today, "00:00:00.000", sep = " "), format = "%Y-%m-%d %H:%M:%OS"), units = "secs")) + sentences$end <- as.numeric(difftime(as.POSIXct(paste(today, sentences$to, sep = " "), format = "%Y-%m-%d %H:%M:%OS"), as.POSIXct(paste(today, "00:00:00.000", sep = " "), format = "%Y-%m-%d %H:%M:%OS"), units = "secs")) + sentences$duration <- sentences$end - sentences$start + + #voiced <- audio.vadwebrtc:::VAD_channel(newdata, mode = mode) + voiced <- vad + left <- voiced$vad_segments[voiced$vad_segments$channel %in% 1 & voiced$vad_segments$has_voice, ] + right <- voiced$vad_segments[voiced$vad_segments$channel %in% 2 & voiced$vad_segments$has_voice, ] + sentences$duration_voiced_left <- mapply(start = sentences$start, end = sentences$end, FUN = function(start, end, voiced){ + voiced <- voiced[voiced$end >= start & voiced$start <= end, ] + voiced$from <- ifelse(voiced$start > start, voiced$start, start) + voiced$to <- ifelse(voiced$end > end, end, voiced$end) + voiced <- voiced[voiced$from <= voiced$to, ] + sum(voiced$to - voiced$from, na.rm = TRUE) + }, MoreArgs = list(voiced = left), SIMPLIFY = TRUE, USE.NAMES = FALSE) + sentences$duration_voiced_left <- as.numeric(sentences$duration_voiced_left) + sentences$duration_voiced_right <- mapply(start = sentences$start, end = sentences$end, FUN = function(start, end, voiced){ + voiced <- voiced[voiced$end >= start & voiced$start <= end, ] + voiced$from <- ifelse(voiced$start > start, voiced$start, start) + voiced$to <- ifelse(voiced$end > end, end, voiced$end) + voiced <- voiced[voiced$from <= voiced$to, ] + sum(voiced$to - voiced$from, na.rm = TRUE) + }, MoreArgs = list(voiced = right), SIMPLIFY = TRUE, USE.NAMES = FALSE) + sentences$duration_voiced_right <- as.numeric(sentences$duration_voiced_right) + sentences$channel <- ifelse(sentences$duration_voiced_left > sentences$duration_voiced_right, "left", "right") + sentences$channel_left_probability <- sentences$duration_voiced_left / (sentences$duration_voiced_left + sentences$duration_voiced_right) + sentences$channel_right_probability <- sentences$duration_voiced_right / (sentences$duration_voiced_left + sentences$duration_voiced_right) + sentences$channel_probability <- ifelse(sentences$channel_left_probability > sentences$channel_right_probability, sentences$channel_left_probability, sentences$channel_right_probability) + sentences$left_pct <- round(sentences$duration_voiced_left / (sentences$duration_voiced_left + sentences$duration_voiced_right), digits = 2) + sentences$right_pct <- round(sentences$duration_voiced_right / (sentences$duration_voiced_left + sentences$duration_voiced_right), digits = 2) + sentences$channel <- ifelse(abs(sentences$left_pct - sentences$right_pct) < threshold, "both", sentences$channel) + sentences$segment_pct_nonsilent_left <- sentences$duration_voiced_left / sentences$duration + sentences$segment_pct_nonsilent_right <- sentences$duration_voiced_right / sentences$duration + #c("segment", "segment_offset", "from", "to", "text", "speaker") + object$data <- sentences[, unique(c(fields, "channel", "channel_probability", "duration", "duration_voiced_left", "duration_voiced_right"))] + object +} \ No newline at end of file diff --git a/man/predict.whisper_transcription.Rd b/man/predict.whisper_transcription.Rd new file mode 100644 index 0000000..1b1ad42 --- /dev/null +++ b/man/predict.whisper_transcription.Rd @@ -0,0 +1,58 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vad-channel.R +\name{predict.whisper_transcription} +\alias{predict.whisper_transcription} +\title{Predict to which channel a transcription section belongs} +\usage{ +\method{predict}{whisper_transcription}(object, vad, type = "channel", threshold = 0, ...) +} +\arguments{ +\item{object}{an object of class \code{whisper_transcription} as returned by \code{\link{predict.whisper}}} + +\item{vad}{an object of class \code{webrtc-gmm-bychannel} as returned by function \code{VAD_channel} from R package \code{audio.vadwebrtc} with information of the detected voice in at least channels 1 and 2. +ar a list with element vad_segments containing a data.frame with columns channel, start, end and has_voice with information at which second +there was a voice in the audio} + +\item{type}{character string with currently only possible value: 'channel' which does a 2-speaker channel assignment} + +\item{threshold}{numeric in 0-1 range indicating if the difference between the probability that the segment was from the left channel 1 or the right channel 2 is smaller than this amount, the column \code{channel} will be set to 'both'. Defaults to 0.} + +\item{...}{not used} +} +\value{ +an object of class \code{whisper_transcription} as documented in \code{\link{predict.whisper}} +where element \code{data} contains the following extra columns indicating which channel the transcription is probably from +\itemize{ +\item{channel: either 'left', 'right' or 'both' indicating the transcription segment was either from the left channel (1), the right channel (2) or probably from both as identified by the Voice Activity Detecion} +\item{channel_probability: a number between 0 and 1 indicating for that specific segment the ratio of the amount of voiced seconds in the most probably channel to +the sum of the amount of voiced seconds in the left + the right channel} +\item{duration: how long (in seconds) the from-to segment is} +\item{duration_voiced_left: how many seconds there was a voiced signal on the left channel (channel 1) as identified by \code{vad}} +\item{duration_voiced_right: how many seconds there was a voiced signal on the right channel (channel 2) as identified by \code{vad}} +} +} +\description{ +Audio files containing 2 channels which were transcribed with \code{\link{predict.whisper}}, +you can use the results of a Voice Activity Detection by channel (either with R packages +\code{audio.vadwebrtc} or \code{audio.vadsilero}) to assign the text segments to each of the channels.\cr +This is done by looking for each text segment how many seconds overlap there is with the voiced sections which are identified +by the Voice Activity Detection. +} +\examples{ +library(audio.whisper) +model <- whisper("tiny") +audio <- system.file(package = "audio.whisper", "samples", "stereo.wav") +trans <- predict(model, audio, language = "es") +\dontrun{ +library(audio.vadwebrtc) +vad <- VAD_channel(audio, channels = "all", mode = "veryaggressive", milliseconds = 30) +} +vad <- list(vad_segments = rbind( + data.frame(channel = 1, start = c(0, 5, 15, 22), end = c(5, 9, 18, 23), has_voice = TRUE), + data.frame(channel = 2, start = c(2, 9.5, 19, 22), end = c(2.5, 13.5, 21, 23), has_voice = TRUE))) +out <- predict(trans, vad, type = "channel", threshold = 0) +out$data +} +\seealso{ +\code{\link{predict.whisper}} +}