Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Separate encode and decode #142

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -136,10 +136,26 @@ private synchronized void encodeAndDecodeMicDataToSpeaker(int bitrate) {
if (micDataShortsWritten == 0) {
return;
}
// Whatever micData holds, encode and decode with Lyra.
short[] decodedAudio = encodeAndDecodeSamples(micData, micDataShortsWritten, bitrate,
// Encode the recorded wave inside the micData with Lyra
short[] encodedAudio = encodeSamples(micData, micDataShortsWritten, SAMPLE_RATE, bitrate,
weightsDirectory);

// Notes for Chase:
// The bytes in "encodedAudio" array contains all the data you wish to transmit via our LoRa network.
// The length of the short array "encodedAudio" is typically larger than the maximum payload
// size allowed by our LoRa protocol, so make sure you manually separate this array into several
// small packets (e.g. ~100 bytes long) and transmit them via LoRa network sequentially.
//
// Another thing to note is that the Lyra codec can tolarate packet losses up to some level. Therefore, it
// might be useful to try different sizes of the small packets to find which size(s) give you the best audio
// experience when packet loss is present.
//
// At the receiving side, when you receive a packet, you can call "decodeSample" function to decode a Lyra packet.
// The decoded audio should be buffered into an array first and then play the buffered audio (see the example below).
// In practice, do not assume that the receiving party will receive 100% of the transmitted packets. Therefore, just
// decode what you receive - the interrupting audio may still be useful to the end users.
short[] decodedAudio = decodeSamples(encodedAudio, micDataShortsWritten, SAMPLE_RATE,
bitrate, weightsDirectory);
if (decodedAudio == null) {
Log.e(TAG, "Failed to encode and decode microphone data.");
return;
Expand Down Expand Up @@ -289,11 +305,16 @@ private void copyWeightsAssetsToDirectory(String targetDirectory) {
}

/**
* A method that is implemented by the 'lyra_android_example' C++ library, which is packaged with
* Methods that are implemented by the 'lyra_android_example' C++ library, which is packaged with
* this application.
*/
public native String lyraBenchmark(int numCondVectors, String modelBasePath);

public native short[] encodeAndDecodeSamples(
short[] samples, int sampleLength, int bitrate, String modelBasePath);
public native short[] encodeSamples(
short[] samples, int sampleLength, int sample_rate_Hz,
int bitrate, String modelBasePath);

public native short[] decodeSamples(
short[] samples, int sampleLength, int sample_rate_Hz,
int bitrate, String modelBasePath);
}
98 changes: 87 additions & 11 deletions lyra/android_example/jni_lyra_benchmark_lib.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,28 +23,104 @@
#include "lyra/lyra_benchmark_lib.h"
#include "lyra/lyra_config.h"

extern "C" JNIEXPORT jshortArray JNICALL
Java_com_example_android_lyra_MainActivity_encodeAndDecodeSamples(
JNIEnv* env, jobject this_obj, jshortArray samples, jint sample_length,
jint bitrate, jstring model_base_path) {
extern "C" JNIEXPORT jshortArray
JNICALL
/**
* @brief Encode the audio samples with Lyra encoder
*
* @param env - JNI environment (do not change)
* @param this_obj - the Java object to interact with (do not change)
* @param samples - the input audio samples (waves)
* @param sample_length - the length of the audio samples (waves)
* @param sample_rate_Hz - the sampling rate of the audio samples (waves)
* @param bitrate - the bit rate to be encoded to
* @param model_base_path - the path of the trained model
*/
Java_com_example_android_lyra_MainActivity_encodeSamples(
JNIEnv* env, jobject this_obj, jshortArray samples, jint sample_length,
jint sample_rate_Hz, jint bitrate, jstring model_base_path) {
std::vector<int16_t> samples_vector(sample_length);
std::vector<uint8_t> features;
std::vector<int16_t> decoded_audio;
jshortArray java_decoded_audio = nullptr;
std::vector<int16_t> encoded_samples;
jshortArray java_encoded_samples = nullptr;
env->GetShortArrayRegion(samples, jsize{0}, sample_length,
&samples_vector[0]);

const char* cpp_model_base_path = env->GetStringUTFChars(model_base_path, 0);

absl::BitGen gen;
if (chromemedia::codec::EncodeWav(
samples_vector, chromemedia::codec::kNumChannels, sample_rate_Hz,
bitrate, false, false, cpp_model_base_path, &features)) {
// write the encoded audio samples to a Java byte array to return
java_encoded_samples = env->NewShortArray(features.size());
env->SetShortArrayRegion(java_encoded_samples, 0, features.size(),
&encoded_samples[0]);
return java_encoded_samples;

} else
return nullptr;
/* // Original codes
if (chromemedia::codec::EncodeWav(
samples_vector, chromemedia::codec::kNumChannels, sample_rate_Hz,
bitrate, false, false, cpp_model_base_path, &features) &&
chromemedia::codec::DecodeFeatures(
features, chromemedia::codec::BitrateToPacketSize(bitrate),
false, gen, decoder.get(),
nullptr, &decoded_audio)) {
java_decoded_audio = env->NewShortArray(decoded_audio.size());
env->SetShortArrayRegion(java_decoded_audio, 0, decoded_audio.size(),
&decoded_audio[0]);
}
env->ReleaseStringUTFChars(model_base_path, cpp_model_base_path);

return java_decoded_audio;
*/
}

extern "C" JNIEXPORT jshortArray
JNICALL
/**
* @brief Decode the encoded bytes back to audio samples with Lyra decoder
*
* @param env - JNI environment (do not change)
* @param this_obj - the Java object to interact with (do not change)
* @param samples - the input audio samples (waves)
* @param sample_length - the length of the audio samples (waves)
* @param sample_rate_Hz - the sampling rate of the audio samples (waves)
* @param bitrate - the bit rate to be encoded to
* @param model_base_path - the path of the trained model
*/
Java_com_example_android_lyra_MainActivity_decodeSamples(
JNIEnv* env, jobject this_obj, jshortArray features, jshortArray output,
jint feature_length, jint sample_rate_Hz, jint bitrate,
jstring model_base_path) {
std::vector<int16_t> feature_vector(feature_length);
std::vector<uint8_t> feature_vector_bytes;
//std::vector<uint8_t> features;
std::vector<int16_t> decoded_audio;
jshortArray java_decoded_audio = nullptr;
// convert the Java data type to its corresponding C++ data type
env->GetShortArrayRegion(features, jsize{0}, feature_length, &(feature_vector[0]));

// convert the int16_t vector to the uint8_t - stay for now but can be optimised later
// Note: the reason for adding the following loop is that "GetShortArrayRegion" function
// only takes int16_t as the type of its last argument but the "DecodeFeature"
// function accepts only uint8_t vector as the input.
feature_vector_bytes.reserve(feature_length);
for (int16_t num : feature_vector) {
feature_vector_bytes.push_back(static_cast<uint8_t>(num));
}


const char* cpp_model_base_path = env->GetStringUTFChars(model_base_path, 0);
std::unique_ptr<chromemedia::codec::LyraDecoder> decoder =
chromemedia::codec::LyraDecoder::Create(
16000, chromemedia::codec::kNumChannels, cpp_model_base_path);

absl::BitGen gen;
if (chromemedia::codec::EncodeWav(
samples_vector, chromemedia::codec::kNumChannels, 16000, bitrate,
false, false, cpp_model_base_path, &features) &&
chromemedia::codec::DecodeFeatures(
features, chromemedia::codec::BitrateToPacketSize(bitrate),
if (chromemedia::codec::DecodeFeatures(
feature_vector_bytes, chromemedia::codec::BitrateToPacketSize(bitrate),
/*randomize_num_samples_requested=*/false, gen, decoder.get(),
nullptr, &decoded_audio)) {
java_decoded_audio = env->NewShortArray(decoded_audio.size());
Expand Down
1 change: 1 addition & 0 deletions out/liblyra_android_example.so
Binary file added out/lyra_android_example.apk
Binary file not shown.
Binary file added out/lyra_android_example_deploy.jar
Binary file not shown.
Binary file added out/lyra_android_example_unsigned.apk
Binary file not shown.