Skip to content

Commit

Permalink
Stricter compliance with C++ best-practices, enforced by stricter rul…
Browse files Browse the repository at this point in the history
…eset with clang-tidy (safer code). Removed C-style character arrays, now use std::array<char, size>. Added [[nodiscard]] on Trace getters and other functions so that the compiler generates warnings if the output is not used. Helper structs for low-level functions that have similar parameters (prevent mixing up parameters -> prevent bugs). Use of std::move for io_error instead of const reference.
  • Loading branch information
arbCoding committed Dec 9, 2023
1 parent 2ec8b36 commit 779c4d4
Show file tree
Hide file tree
Showing 12 changed files with 332 additions and 260 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/cpp-linter.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ jobs:
style: file
version: 15
files-changed-only: false
tidy-checks: "bugprone-a*,bugprone-b*,bugprone-c*,bugprone-d*,bugprone-em*,bugprone-ex*,bugprone-f*,bugprone-i*,bugprone-l*,bugprone-m*,bugprone-n*,bugprone-o*,bugprone-p*,bugprone-r*,bugprone-s*,bugprone-t*,bugprone-u*,bugprone-v*,performance-*,readability-a*,readability-b*,readability-c*,readability-d*,readability-e*,readability-f*,readability-i*,readability-m*,readability-n*,readability-o*,readability-q*,readability-r*,readability-si*,readability-st*,readability-u*,portability-*,clang-analyzer-*,cppcoreguidelines-avoid-ca*,cpp-coreguidelines-avoid-co*,cpp-coreguidelines-avoid-d*,cpp-coreguidelines-avoid-g*,cpp-coreguidelines-avoid-n*,cpp-coreguidelines-avoid-r*,cpp-coreguidelines-i*,cpp-coreguidelines-m*,cpp-coreguidelines-n*,cpp-coreguidelines-o*,cpp-coreguidelines-pr*,cpp-coreguidelines-pro-bounds-p*,cpp-coreguidelines-pro-t*,cpp-coreguidelines-r*,cpp-coreguidelines-s*,cpp-coreguidelines-v*"
tidy-checks: "bugprone-*,performance-*,readability-*,portability-*,clang-analyzer-*,cpp-coreguidelines-*,modernize-a*,modernize-c*,modernize-d*,modernize-l*,modernize-m*,modernize-p*,modernize-r*,modernize-s*,modernize-t*,modernize-un*,modernize-use-a*,modernize-use-b*,modernize-use-c*,modernize-use-d*,modernize-use-e*,modernize-use-n*,modernize-use-o*,modernize-use-s*,modernize-use-tran*,modernize-use-u*"
database: "build/debug/gh-coverage/compile_commands.json"
ignore: "build/debug/gh-coverage/_deps | build/debug/gh-coverage/CMakeFiles | src/utests.cpp | src/benchmark.cpp | src/util.hpp | .github"
extra-args: "-O0 -Wall -Werror -Wshadow -Wextra -pedantic-errors -std=c++20"
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@

# Any compressed files
*.tar.gz
*.tgz

# Static analysis stuff
.cache
Expand Down
1 change: 1 addition & 0 deletions README.org
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
@@html:<img alt="GitHub code size in bytes" src="https://img.shields.io/github/languages/code-size/arbCoding/sac-format">@@
@@html:<img alt="Codacy grade" src="https://img.shields.io/codacy/grade/870db5c2793a48df9ed98e942a08fc9e">@@
@@html:<img src="https://www.codefactor.io/repository/github/arbcoding/sac-format/badge">@@
@@html:<a href="https://scan.coverity.com/projects/arbcoding-sac-format"></a>@@
[[https://github.com/arbCoding/sac-format/actions/workflows/cpp-linter.yml][https://github.com/arbCoding/sac-format/actions/workflows/cpp-linter.yml/badge.svg]]

sac-format is a single-header, statically linked library for reading binary
Expand Down
24 changes: 24 additions & 0 deletions coverity_build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#!/bin/dash
base=$(pwd)
# Cleanup
if [ -e "$base/build/debug/gcc" ]; then
rm -rf "$base/build/debug/gcc"
fi
# Prepare to build
cmake --preset gcc-debug
# Prepare coverity scan
cd ./build/debug/gcc || exit
# Make sure it only captures the sac_format.hpp/.cpp pair for the library
cov-configure --config cov-conf/cov.xml --compiler g++ --comptype g++ \
--template --xml-option=skip_file:".*/_deps/.*" \
--xml-option=skip_file:".*/utests.cpp" \
--xml-option=skip_file:".*/benchmark.cpp" \
--xml-option=skip_file:".*/list_sac.cpp"
# Capture the ninja build
cov-build --config cov-conf/cov.xml --dir cov-int ninja
# Package for submission
tar czf sac-format.tgz cov-int
# Move to base dir
mv ./sac-format.tgz "$base/sac-format.tgz"
# Return home
cd "$base" || exit
139 changes: 70 additions & 69 deletions docs/index.html

Large diffs are not rendered by default.

Binary file modified docs/sac-format_manual.pdf
Binary file not shown.
2 changes: 1 addition & 1 deletion get_coverage.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ fi
if [ -e "$base/build/debug/gcc-coverage" ]; then
rm -rf "$base/build/debug/gcc-coverage"
fi
## Build the preset
# Build the preset
cmake --preset gcc-coverage
cmake --build build/debug/gcc-coverage
# Run my unit tests
Expand Down
9 changes: 8 additions & 1 deletion notes.org
Original file line number Diff line number Diff line change
@@ -1,8 +1,15 @@
* Inbox
** [2023-11-21 Tue 12:16]
Running clang-tidy locally prior to GitHub
~clang-tidy --checks="bugprone-a*,bugprone-b*,bugprone-c*,bugprone-d*,bugprone-em*,bugprone-ex*,bugprone-f*,bugprone-i*,bugprone-l*,bugprone-m*,bugprone-n*,bugprone-o*,bugprone-p*,bugprone-r*,bugprone-s*,bugprone-t*,bugprone-u*,bugprone-v*,performance-*,readability-a*,readability-b*,readability-c*,readability-d*,readability-e*,readability-f*,readability-i*,readability-m*,readability-n*,readability-o*,readability-q*,readability-r*,readability-si*,readability-st*,readability-u*,portability-*,clang-analyzer-*,cppcoreguidelines-avoid-ca*,cpp-coreguidelines-avoid-co*,cpp-coreguidelines-avoid-d*,cpp-coreguidelines-avoid-g*,cpp-coreguidelines-avoid-n*,cpp-coreguidelines-avoid-r*,cpp-coreguidelines-i*,cpp-coreguidelines-m*,cpp-coreguidelines-n*,cpp-coreguidelines-o*,cpp-coreguidelines-pr*,cpp-coreguidelines-pro-bounds-p*,cpp-coreguidelines-pro-t*,cpp-coreguidelines-r*,cpp-coreguidelines-s*,cpp-coreguidelines-v*" --extra-arg="-std=c++20" -p ./compile_commands.json src/sac_format.cpp~
** [2023-11-21 Tue 10:16]
Prior to =git add -A= be sure to run =clang-format= on all *.cpp and *.hpp files!

~clang-format -style=file -i src/sac_format.cpp~

** Alternative check

*NOTE* excluding: modernize-use-trailing-return-type because trailing return
types are silly. Just use type function() {}, instead of auto function -> type
{}.

~clang-tidy --checks="bugprone-*,performance-*,readability-*,portability-*,clang-analyzer-*,cpp-coreguidelines-*,modernize-a*,modernize-c*,modernize-d*,modernize-l*,modernize-m*,modernize-p*,modernize-r*,modernize-s*,modernize-t*,modernize-un*,modernize-use-a*,modernize-use-b*,modernize-use-c*,modernize-use-d*,modernize-use-e*,modernize-use-n*,modernize-use-o*,modernize-use-s*,modernize-use-tran*,modernize-use-u*" --extra-arg="-std=c++20" -p ./compile_commands.json src/sac_format.cpp~
13 changes: 9 additions & 4 deletions src/docs/index.org
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@
<img src="https://www.codefactor.io/repository/github/arbcoding/sac-format/badge" alt="CodeFactor" />
</a>

<a href="https://scan.coverity.com/projects/arbcoding-sac-format">
<img alt="Coverity Scan Build Status" src="https://scan.coverity.com/projects/29505/badge.svg"/>
</a>

<a href="https://github.com/arbCoding/sac-format/actions/workflows/cpp-linter.yml">
<img alt="CPP-Linter Results" src="https://github.com/arbCoding/sac-format/actions/workflows/cpp-linter.yml/badge.svg" />
</a>
Expand Down Expand Up @@ -103,9 +107,10 @@ documentation and in-code comments.
sac-format is *transparent*---all analysis and coverage information is publicly
available online.

- [[https://www.codefactor.io/repository/github/arbcoding/sac-format][CodeFactor Analysis]]
- [[https://app.codacy.com/gh/arbCoding/sac-format/dashboard][Codacy Analysis]]
- [[https://app.codecov.io/gh/arbCoding/sac-format][CodeCov Coverage Analysis]]
- [[https://www.codefactor.io/repository/github/arbcoding/sac-format][CodeFactor]]
- [[https://app.codacy.com/gh/arbCoding/sac-format/dashboard][Codacy]]
- [[https://app.codecov.io/gh/arbCoding/sac-format][CodeCov Coverage]]
- [[https://scan.coverity.com/projects/arbcoding-sac-format][Coverty Scan]]

*** Trace Class

Expand Down Expand Up @@ -309,7 +314,7 @@ using circular symmetry.
Limited to $[-180, 180]$ degrees, input that is outside that range is reduced
using circular symmetry.

***** Require for safety
***** Required for safety

Rules here are required by the SAC format standard. sac-format automatically
imposes these rules to prevent the creation of corrupt sac-files.
Expand Down
119 changes: 66 additions & 53 deletions src/sac_format.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ namespace sacfmt {
//-----------------------------------------------------------------------------
// Conversions
//-----------------------------------------------------------------------------
int word_position(const int word_number) noexcept {
size_t word_position(const size_t word_number) noexcept {
return (word_number * word_length);
}

Expand Down Expand Up @@ -165,21 +165,22 @@ word_one bool_to_binary(const bool flag) noexcept {

bool binary_to_bool(const word_one &flag) noexcept { return flag[0]; }

word_two concat_words(const word_one &word1, const word_one &word2) noexcept {
word_two concat_words(const word_pair<word_one> &pair_words) noexcept {
word_two result{};
for (size_t i{0}; i < binary_word_size; ++i) [[likely]] {
result[i] = word1[i];
result[i + binary_word_size] = word2[i];
result[i] = pair_words.first[i];
result[i + binary_word_size] = pair_words.second[i];
}
return result;
}

word_four concat_words(const word_two &word12,
const word_two &word34) noexcept {
word_four concat_words(const word_pair<word_two> &pair_words) noexcept {
word_four result{};
for (int i{0}; i < 2 * binary_word_size; ++i) {
result[i] = word12[i];
result[i + (2 * binary_word_size)] = word34[i];
for (size_t i{0}; i < static_cast<size_t>(2) * binary_word_size; ++i)
[[likely]] {
result[i] = pair_words.first[i];
result[i + (static_cast<size_t>(2) * binary_word_size)] =
pair_words.second[i];
}
return result;
}
Expand All @@ -190,50 +191,60 @@ word_one read_word(std::ifstream *sac) noexcept {
word_one bits{};
constexpr size_t char_size{bits_per_byte};
// Where we will store the characters
// flawfinder: ignore
char word[word_length];
std::array<char, word_length> word{};
// Read to our character array
// This can always hold the source due to careful typing/sizing
// flawfinder: ignore
sac->read(word, word_length);
// Take each character
std::bitset<char_size> byte{};
for (size_t i{0}; i < word_length; ++i) [[likely]] {
size_t character{static_cast<size_t>(word[i])};
byte = std::bitset<char_size>(character);
// bit-by-bit
for (size_t j{0}; j < char_size; ++j) [[likely]] {
bits[(i * char_size) + j] = byte[j];
if (sac->read(word.data(), word_length)) {
// Take each character
std::bitset<char_size> byte{};
for (size_t i{0}; i < word_length; ++i) [[likely]] {
size_t character{static_cast<size_t>(word[i])};
byte = std::bitset<char_size>(character);
// bit-by-bit
for (size_t j{0}; j < char_size; ++j) [[likely]] {
bits[(i * char_size) + j] = byte[j];
}
}
}
return bits;
}

word_two read_two_words(std::ifstream *sac) noexcept {
word_one word1{read_word(sac)};
word_one word2{read_word(sac)};
const word_one first_word{read_word(sac)};
const word_one second_word{read_word(sac)};
word_pair<word_one> pair_words{};
if constexpr (std::endian::native == std::endian::little) {
return concat_words(word1, word2);
pair_words.first = first_word;
pair_words.second = second_word;
} else {
return concat_words(word2, word1);
pair_words.first = second_word;
pair_words.second = first_word;
}
return concat_words(pair_words);
}

word_four read_four_words(std::ifstream *sac) noexcept {
word_two word12{read_two_words(sac)};
word_two word34{read_two_words(sac)};
const word_two first_words{read_two_words(sac)};
const word_two second_words{read_two_words(sac)};
word_pair<word_two> pair_words{};
if constexpr (std::endian::native == std::endian::little) {
return concat_words(word12, word34);
pair_words.first = first_words;
pair_words.second = second_words;
} else {
return concat_words(word34, word12);
pair_words.first = second_words;
pair_words.second = first_words;
}
return concat_words(pair_words);
}

std::vector<double> read_data(std::ifstream *sac, const size_t n_words,
const int start) noexcept {
sac->seekg(word_position(start));
std::vector<double> read_data(std::ifstream *sac,
const read_spec &spec) noexcept {
// static_cast is safe because I'm never using negative values
sac->seekg(static_cast<std::streamoff>(word_position(spec.start_word)));
std::vector<double> result{};
result.resize(n_words);
for (size_t i{0}; i < n_words; ++i) [[likely]] {
result.resize(spec.num_words);
for (size_t i{0}; i < spec.num_words; ++i) [[likely]] {
result[i] = static_cast<double>(binary_to_float(read_word(sac)));
}
return result;
Expand All @@ -253,11 +264,10 @@ void write_words(std::ofstream *sac_file, const std::vector<char> &input) {
// Template on the typename to make possible to handle float or int
template <typename T>
std::vector<char> convert_to_word(const T input) noexcept {
// flawfinder: ignore
char tmp[word_length];
std::array<char, word_length> tmp{};
// Copy bytes from input into the tmp array
// flawfinder: ignore
std::memcpy(tmp, &input, word_length);
std::memcpy(tmp.data(), &input, word_length);
std::vector<char> word{};
word.resize(word_length);
for (int i{0}; i < word_length; ++i) [[likely]] {
Expand All @@ -271,11 +281,10 @@ template std::vector<char> convert_to_word(const float input) noexcept;
template std::vector<char> convert_to_word(const int x) noexcept;

std::vector<char> convert_to_word(const double input) noexcept {
// flawfinder: ignore
char tmp[2 * word_length];
std::array<char, static_cast<size_t>(2) * word_length> tmp{};
// Copy bytes from input into the tmp array
// flawfinder: ignore
std::memcpy(tmp, &input, static_cast<size_t>(2) * word_length);
std::memcpy(tmp.data(), &input, static_cast<size_t>(2) * word_length);
std::vector<char> word{};
word.resize(static_cast<size_t>(2) * word_length);
for (int i{0}; i < 2 * word_length; ++i) {
Expand Down Expand Up @@ -1138,39 +1147,41 @@ void Trace::resize_data(const size_t size) noexcept {
}
//------------------------------------------------------------------------------
// Read
bool nwords_after_current(std::ifstream *sac, const size_t current_pos,
const size_t n_words) noexcept {
bool nwords_after_current(std::ifstream *sac, const read_spec &spec) noexcept {
bool result{false};
if (sac->good()) {
sac->seekg(0, std::ios::end);
const std::size_t final_pos{static_cast<size_t>(sac->tellg())};
// Doesn't like size_t since it wants to allow
// the possibility of negative offsets (not how I use it)
sac->seekg(static_cast<std::streamoff>(current_pos));
const std::size_t diff{final_pos - current_pos};
result = (diff >= (n_words * word_length));
sac->seekg(static_cast<std::streamoff>(spec.start_word));
const std::size_t diff{final_pos - spec.start_word};
result = (diff >= (spec.num_words * word_length));
}
return result;
}

void safe_to_read_header(std::ifstream *sac) {
if (!nwords_after_current(sac, 0, data_word)) {
const read_spec spec{data_word, 0};
if (!nwords_after_current(sac, spec)) {
throw io_error("Insufficient filesize for header.");
}
}

void safe_to_read_footer(std::ifstream *sac) {
// doubles are two words long
if (!nwords_after_current(sac, sac->tellg(),
static_cast<size_t>(num_footer) * 2)) {
const read_spec spec{static_cast<size_t>(num_footer) * 2,
static_cast<size_t>(sac->tellg())};
if (!nwords_after_current(sac, spec)) {
throw io_error("Insufficient filesize for footer.");
}
}

void safe_to_read_data(std::ifstream *sac, const size_t n_words,
const bool data2) {
const std::string data{data2 ? "data2" : "data1"};
if (!nwords_after_current(sac, sac->tellg(), n_words)) {
const read_spec spec{n_words, static_cast<size_t>(sac->tellg())};
if (!nwords_after_current(sac, spec)) {
throw io_error("Insufficient filesize for " + data + '.');
}
}
Expand Down Expand Up @@ -1349,18 +1360,20 @@ Trace::Trace(const std::filesystem::path &path) {
// DATA
const bool is_data{npts() != unset_int};
// data1
const size_t npts_s{static_cast<size_t>(npts())};
const size_t n_words{static_cast<size_t>(npts())};
if (is_data) {
// false flags for data1
safe_to_read_data(&file, npts_s, false); // throws io_error if unsafe
safe_to_read_data(&file, n_words, false); // throws io_error if unsafe
const read_spec spec{n_words, data_word};
// Originally floats, read as doubles
data1(read_data(&file, npts_s, data_word));
data1(read_data(&file, spec));
}
// data2 (uneven or spectral data)
if (is_data && (!leven() || (iftype() > 1))) {
// true flags for data2
safe_to_read_data(&file, npts_s, true); // throws io_error if unsafe
data2(read_data(&file, npts_s, data_word + npts()));
safe_to_read_data(&file, n_words, true); // throws io_error if unsafe
const read_spec spec{n_words, data_word + npts()};
data2(read_data(&file, spec));
}
//--------------------------------------------------------------------------
// Footer
Expand Down
Loading

0 comments on commit 779c4d4

Please sign in to comment.