-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathio.cpp
106 lines (87 loc) · 3.31 KB
/
io.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
/* I/O related functions for heat equation solver */
#include <string>
#include <iomanip>
#include <fstream>
#include <string>
#include <mpi.h>
#include "matrix.hpp"
#include "heat.hpp"
#include "pngwriter.h"
// Write a picture of the temperature field
void write_field(const Field& field, const int iter, const ParallelData parallel)
{
auto height = field.nx * parallel.size;
auto width = field.ny;
// array for MPI sends and receives
auto tmp_mat = Matrix<double> (field.nx, field.ny);
if (0 == parallel.rank) {
// Copy the inner data
auto full_data = Matrix<double>(height, width);
for (int i = 0; i < field.nx; i++)
for (int j = 0; j < field.ny; j++)
full_data(i, j) = field(i + 1, j + 1);
// Receive data from other ranks
for (int p = 1; p < parallel.size; p++) {
MPI_Recv(tmp_mat.data(), field.nx * field.ny,
MPI_DOUBLE, p, 22, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// Copy data to full array
for (int i = 0; i < field.nx; i++)
for (int j = 0; j < field.ny; j++)
full_data(i + p * field.nx, j) = tmp_mat(i, j);
}
// Write out the data to a png file
std::ostringstream filename_stream;
filename_stream << "heat_" << std::setw(4) << std::setfill('0') << iter << ".png";
std::string filename = filename_stream.str();
save_png(full_data.data(), height, width, filename.c_str(), 'c');
} else {
// Send data
for (int i = 0; i < field.nx; i++)
for (int j = 0; j < field.ny; j++)
tmp_mat(i, j) = field(i + 1, j + 1);
MPI_Send(tmp_mat.data(), field.nx * field.ny,
MPI_DOUBLE, 0, 22, MPI_COMM_WORLD);
}
}
// Read the initial temperature distribution from a file
void read_field(Field& field, std::string filename,
const ParallelData parallel)
{
std::ifstream file;
file.open(filename);
// Read the header
std::string line, comment;
std::getline(file, line);
int nx_full, ny_full;
std::stringstream(line) >> comment >> nx_full >> ny_full;
field.setup(nx_full, ny_full, parallel);
// Read the full array
auto full = Matrix<double> (nx_full, ny_full);
if (0 == parallel.rank) {
for (int i = 0; i < nx_full; i++)
for (int j = 0; j < ny_full; j++)
file >> full(i, j);
}
file.close();
// Inner region (no boundaries)
auto inner = Matrix<double> (field.nx, field.ny);
MPI_Scatter(full.data(), field.nx * ny_full, MPI_DOUBLE, inner.data(),
field.nx * ny_full, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Copy to the array containing also boundaries
for (int i = 0; i < field.nx; i++)
for (int j = 0; j < field.ny; j++)
field(i + 1, j + 1) = inner(i, j);
// Set the boundary values
for (int i = 0; i < field.nx + 2; i++) {
// left boundary
field(i, 0) = field(i, 1);
// right boundary
field(i, field.ny + 1) = field(i, field.ny);
}
for (int j = 0; j < field.ny + 2; j++) {
// top boundary
field.temperature(0, j) = field(1, j);
// bottom boundary
field.temperature(field.nx + 1, j) = field.temperature(field.nx, j);
}
}