lab09
This commit is contained in:
10
.vscode/settings.json
vendored
10
.vscode/settings.json
vendored
@@ -60,6 +60,14 @@
|
||||
"stop_token": "cpp",
|
||||
"thread": "cpp",
|
||||
"array": "cpp",
|
||||
"mutex": "cpp"
|
||||
"mutex": "cpp",
|
||||
"algorithm": "cpp",
|
||||
"functional": "cpp",
|
||||
"list": "cpp",
|
||||
"random": "cpp",
|
||||
"set": "cpp",
|
||||
"unordered_map": "cpp",
|
||||
"xhash": "cpp",
|
||||
"xtree": "cpp"
|
||||
}
|
||||
}
|
||||
BIN
lab09/HPC-Lab-09_solved.pdf
Normal file
BIN
lab09/HPC-Lab-09_solved.pdf
Normal file
Binary file not shown.
49
lab09/exc1/ring_com_async.cpp
Normal file
49
lab09/exc1/ring_com_async.cpp
Normal file
@@ -0,0 +1,49 @@
|
||||
#include <iostream>
|
||||
#include <mpi.h>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
|
||||
void printSentValue(int rank, int receiverId)
|
||||
{
|
||||
std::cout << "Proc " << rank << " sent to " << receiverId << std::endl;
|
||||
}
|
||||
|
||||
void printReceiveValue(int rank, int senderId)
|
||||
{
|
||||
std::cout << "Proc " << rank << " received from " << senderId << std::endl;
|
||||
}
|
||||
|
||||
int ringSendRecv(int sendValue)
|
||||
{
|
||||
int numProc;
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &numProc);
|
||||
int receiverId = (sendValue + 1) % numProc;
|
||||
int senderId = (sendValue - 1) % numProc;
|
||||
int recvValue;
|
||||
MPI_Request sendReq, recvReq;
|
||||
MPI_Status statusRec, statusSend;
|
||||
MPI_Isend(&sendValue, 1, MPI_INT, receiverId, 0, MPI_COMM_WORLD, &sendReq);
|
||||
MPI_Irecv(&recvValue, 1, MPI_INT, senderId, 0, MPI_COMM_WORLD, &recvReq);
|
||||
MPI_Wait(&sendReq, &statusSend);
|
||||
printSentValue(sendValue, receiverId);
|
||||
// Sleep not necessary, only for pretty .out file
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
MPI_Wait(&recvReq, &statusRec);
|
||||
printReceiveValue(sendValue, recvValue);
|
||||
return recvValue;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
MPI_Init(&argc, &argv);
|
||||
|
||||
int rank, numProc;
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
|
||||
ringSendRecv(rank);
|
||||
char processor_name[MPI_MAX_PROCESSOR_NAME];
|
||||
int name_len;
|
||||
MPI_Get_processor_name(processor_name, &name_len);
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &numProc);
|
||||
std::cout << "Process on " << processor_name << ", " << rank << "/" << numProc << " finished.\n";
|
||||
MPI_Finalize();
|
||||
}
|
||||
49
lab09/exc1/ring_com_blocking.cpp
Normal file
49
lab09/exc1/ring_com_blocking.cpp
Normal file
@@ -0,0 +1,49 @@
|
||||
#include <iostream>
|
||||
#include <mpi.h>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
|
||||
void printSentValue(int rank, int receiverId) {
|
||||
std::cout << "Proc " << rank << " sent to " << receiverId << std::endl;
|
||||
}
|
||||
|
||||
void printReceiveValue(int rank, int senderId) {
|
||||
std::cout << "Proc " << rank << " received from " << senderId << std::endl;
|
||||
}
|
||||
|
||||
int ringSendRecv(int sendValue) {
|
||||
int numProc;
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &numProc);
|
||||
int receiverId = (sendValue + 1) % numProc;
|
||||
int senderId = (sendValue - 1) % numProc;
|
||||
int recvValue;
|
||||
if (sendValue == 0) {
|
||||
MPI_Send(&sendValue, 1, MPI_INT, receiverId, 0, MPI_COMM_WORLD);
|
||||
printSentValue(sendValue, receiverId);
|
||||
MPI_Recv(&recvValue, 1, MPI_INT, senderId, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||
// Sleep not necessary, only for pretty .out file
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
printReceiveValue(sendValue, recvValue);
|
||||
} else {
|
||||
MPI_Recv(&recvValue, 1, MPI_INT, senderId, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
printReceiveValue(sendValue, recvValue);
|
||||
MPI_Send(&sendValue, 1, MPI_INT, receiverId, 0, MPI_COMM_WORLD);
|
||||
printSentValue(sendValue, receiverId);
|
||||
}
|
||||
return recvValue;
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
MPI_Init(&argc, &argv);
|
||||
|
||||
int rank, numProc;
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
|
||||
ringSendRecv(rank);
|
||||
char processor_name[MPI_MAX_PROCESSOR_NAME];
|
||||
int name_len;
|
||||
MPI_Get_processor_name(processor_name, &name_len);
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &numProc);
|
||||
std::cout << "Process on " << processor_name << ", " << rank << "/" << numProc << " finished.\n";
|
||||
MPI_Finalize();
|
||||
}
|
||||
74
lab09/exc2/computePi.cpp
Normal file
74
lab09/exc2/computePi.cpp
Normal file
@@ -0,0 +1,74 @@
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
#include <mpi.h>
|
||||
#include <fstream>
|
||||
|
||||
double computePartialSum(int64_t i0, int64_t i1, double step)
|
||||
{
|
||||
// compute part of the integral
|
||||
double sum = 0.0;
|
||||
for (int64_t i = i0; i < i1; i++)
|
||||
{
|
||||
double x = (i + 0.5) * step;
|
||||
sum += 4.0 / (1.0 + x * x);
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
MPI_Init(&argc, &argv);
|
||||
|
||||
int rank, numTasks;
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &numTasks);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
|
||||
|
||||
// roughly 1e11, but divisible through 2, 4, 6, 8, 12, 16, 18, ...
|
||||
const int64_t numSteps = 1024LL * 1024 * 1024 * 9 * 9;
|
||||
|
||||
// compute boundaries
|
||||
int64_t chunkSize = numSteps / numTasks;
|
||||
int64_t i0 = rank * chunkSize;
|
||||
int64_t i1 = (rank == numTasks - 1) ? numSteps : i0 + chunkSize;
|
||||
|
||||
double step = 1.0 / (double)numSteps;
|
||||
|
||||
// compute partial sum
|
||||
double sum = computePartialSum(i0, i1, step);
|
||||
|
||||
if (rank != 0)
|
||||
{
|
||||
// if not rank 0, send msg to 0 with partial sum
|
||||
MPI_Send(&sum, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
|
||||
}
|
||||
else
|
||||
{
|
||||
// if rank 0 receive all partial sums and sum them up
|
||||
double recvValue;
|
||||
for (int i = 1; i < numTasks; i++)
|
||||
{
|
||||
MPI_Recv(&recvValue, 1, MPI_DOUBLE, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||
sum += recvValue;
|
||||
}
|
||||
}
|
||||
|
||||
if (rank == 0)
|
||||
{
|
||||
double pi = step * sum;
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
std::chrono::duration<double, std::milli> diff = end - start;
|
||||
|
||||
std::string filename = "results" + std::to_string(numTasks) + ".txt";
|
||||
std::ofstream outFile(filename);
|
||||
outFile << std::fixed << std::setprecision(16);
|
||||
outFile << "π ≈ " << pi << ", Runtime: " << diff.count() << " ms, Tasks: " << numTasks << ", Step: " << step << ", Sum: " << sum << std::endl;
|
||||
outFile.close();
|
||||
}
|
||||
|
||||
MPI_Finalize();
|
||||
return 0;
|
||||
}
|
||||
50
lab09/exc2/plot_scaling.py
Normal file
50
lab09/exc2/plot_scaling.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import glob
|
||||
import re
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Find all result files like results1.txt, results2.txt, etc.
|
||||
files = sorted(glob.glob("results*.txt"), key=lambda f: int(re.search(r"\d+", f).group()))
|
||||
|
||||
cores = [1,2,4,6,8,12,16,18,24,32,36,48]
|
||||
core_increase = [core / cores[i - 1] for i, core in enumerate(cores[1:], start=1)]
|
||||
core_increase.insert(0, 1)
|
||||
|
||||
runtimes = {}
|
||||
for file in files:
|
||||
with open(file, "r") as f:
|
||||
line = f.readline()
|
||||
match = re.search(r"Runtime:\s+([\d.]+)\s+ms, Tasks:\s+(\d+)", line)
|
||||
if match:
|
||||
runtime = float(match.group(1))
|
||||
tasks = int(match.group(2))
|
||||
runtimes[tasks] = runtime
|
||||
else:
|
||||
print(f"Skipping unrecognized format in {file}")
|
||||
|
||||
# Sort by number of tasks
|
||||
tasks_sorted = sorted(runtimes.keys())
|
||||
T1 = runtimes[1]
|
||||
|
||||
speedups = [T1 / runtimes[t] for t in tasks_sorted]
|
||||
efficiencies = [s / t for s, t in zip(speedups, cores)]
|
||||
|
||||
print(core_increase)
|
||||
print(speedups)
|
||||
|
||||
# Plot speedup
|
||||
plt.figure()
|
||||
plt.plot(tasks_sorted, speedups, marker='o')
|
||||
plt.title("Speedup vs. Number of Tasks")
|
||||
plt.xlabel("Tasks")
|
||||
plt.ylabel("Speedup")
|
||||
plt.grid(True)
|
||||
|
||||
# Plot efficiency
|
||||
plt.figure()
|
||||
plt.plot(tasks_sorted, efficiencies, marker='o')
|
||||
plt.title("Efficiency vs. Number of Tasks")
|
||||
plt.xlabel("Tasks")
|
||||
plt.ylabel("Efficiency")
|
||||
plt.grid(True)
|
||||
|
||||
plt.show()
|
||||
1
lab09/exc2/results1.txt
Normal file
1
lab09/exc2/results1.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535461467, Runtime: 408382.0540599999949336 ms, Tasks: 1, Step: 0.0000000000114978, Sum: 273234013512.7748107910156250
|
||||
1
lab09/exc2/results12.txt
Normal file
1
lab09/exc2/results12.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535922161, Runtime: 35373.3204059999989113 ms, Tasks: 12, Step: 0.0000000000114978, Sum: 273234013516.7816162109375000
|
||||
1
lab09/exc2/results16.txt
Normal file
1
lab09/exc2/results16.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535889263, Runtime: 28642.3614500000003318 ms, Tasks: 16, Step: 0.0000000000114978, Sum: 273234013516.4954833984375000
|
||||
1
lab09/exc2/results18.txt
Normal file
1
lab09/exc2/results18.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535928413, Runtime: 25772.6537300000018149 ms, Tasks: 18, Step: 0.0000000000114978, Sum: 273234013516.8359985351562500
|
||||
1
lab09/exc2/results2.txt
Normal file
1
lab09/exc2/results2.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535641585, Runtime: 210387.5220599999884143 ms, Tasks: 2, Step: 0.0000000000114978, Sum: 273234013514.3413696289062500
|
||||
1
lab09/exc2/results24.txt
Normal file
1
lab09/exc2/results24.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535901955, Runtime: 19925.1983489999984158 ms, Tasks: 24, Step: 0.0000000000114978, Sum: 273234013516.6058654785156250
|
||||
1
lab09/exc2/results32.txt
Normal file
1
lab09/exc2/results32.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535895515, Runtime: 15541.2492930000007618 ms, Tasks: 32, Step: 0.0000000000114978, Sum: 273234013516.5498657226562500
|
||||
1
lab09/exc2/results36.txt
Normal file
1
lab09/exc2/results36.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535884315, Runtime: 14086.3550599999998667 ms, Tasks: 36, Step: 0.0000000000114978, Sum: 273234013516.4524841308593750
|
||||
1
lab09/exc2/results4.txt
Normal file
1
lab09/exc2/results4.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535827143, Runtime: 105416.3563049999938812 ms, Tasks: 4, Step: 0.0000000000114978, Sum: 273234013515.9552307128906250
|
||||
1
lab09/exc2/results48.txt
Normal file
1
lab09/exc2/results48.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535899494, Runtime: 11168.0419720000008965 ms, Tasks: 48, Step: 0.0000000000114978, Sum: 273234013516.5844726562500000
|
||||
1
lab09/exc2/results6.txt
Normal file
1
lab09/exc2/results6.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535877827, Runtime: 70373.2869280000013532 ms, Tasks: 6, Step: 0.0000000000114978, Sum: 273234013516.3960266113281250
|
||||
1
lab09/exc2/results8.txt
Normal file
1
lab09/exc2/results8.txt
Normal file
@@ -0,0 +1 @@
|
||||
π ≈ 3.1415926535919958, Runtime: 52857.8155059999990044 ms, Tasks: 8, Step: 0.0000000000114978, Sum: 273234013516.7624511718750000
|
||||
Reference in New Issue
Block a user