-
Notifications
You must be signed in to change notification settings - Fork 0
/
hpc-perf-evaluation.py
executable file
·162 lines (131 loc) · 5.27 KB
/
hpc-perf-evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
#!/usr/bin/env python3
# Fabrizio Margotta 789072
import os
import csv
import logging
logging.basicConfig(level=logging.INFO)
# executable variables
exe_folder = "../"
exe_name = exe_folder + "omp-earthquake"
steps = 100000
matrix_sides = [256, 512, 1024]
# matrix_sides = [256]
# [1, 2, 4, 8, 16, 32, 64] threads used
threads = list(2**x for x in range(0, 6))
# threads = [1, 8]
# run the simulation N times and take the average execution time
total_run = 5
result_dir = "../data"
os.makedirs(result_dir, exist_ok=True)
compute_weak = True
weak_psize = 256
# csv info
timings_filename = result_dir + "/omp-timings-py.csv"
speedup_filename = result_dir + "/omp-speedup-py.csv"
strong_filename = result_dir + "/omp-strong-py.csv"
weak_filename = result_dir + "/omp-weak-py.csv"
def compute_weak_scaling():
logging.info("###################### WEAK SCALING #######################")
for thread in range(1, 17):
avg_exe_time = 0
for k in range(1, total_run + 1):
out_file = "/dev/null"
res_file = result_dir + "/res_weak{}_{}".format(thread, k)
# e(l($N0 * $N0 * $N0 * $p)/3)
p_size_weak = round(weak_psize * (thread**(1. / 3.)), 4)
logging.info("run #{}: L{}, {} thread".format(
k, p_size_weak, thread))
cmd = "OMP_NUM_THREADS={} ./{} {} {} > {} 2>{}".format(
thread, exe_name, steps, p_size_weak, out_file, res_file)
logging.info(cmd)
os.system(cmd)
with open(res_file) as f:
for line in f:
if "parallel" in line:
exe_time = float(line.split(':')[1].split(' ')[1])
logging.info("exe_time #{} = {}".format(k, exe_time))
avg_exe_time += exe_time
logging.info("avg_exe_time #{} = {}".format(
k, avg_exe_time))
avg_exe_time /= total_run
logging.info("avg_exe_time = {}".format(avg_exe_time))
if thread == 1:
t_serial = avg_exe_time
logging.info("t_serial = {}".format(t_serial))
weak_scaling[thread] = round(float(t_serial / avg_exe_time), 4)
logging.info("weak_scaling = {}".format(weak_scaling))
weak_scaling = dict()
compute_weak_scaling()
avg_exe_time = dict()
t_serial = dict()
speedup = dict()
strong_scaling = dict()
for thread in threads: # for each number of threads
avg_exe_time[thread] = dict()
speedup[thread] = dict()
strong_scaling[thread] = dict()
def get_execution_time(filename, regex):
exe_time = 0
with open(filename) as f:
for line in f:
if regex in line:
exe_time = float(line.split(':')[1].split(' ')[1])
logging.info("exe_time = {}".format(exe_time))
return exe_time
# run speedup / strong
logging.info("################## SPEEDUP & STRONG SCALING ###################")
for thread in threads:
for side in matrix_sides:
avg_exe_time[thread][side] = 0
for n_run in range(1, total_run + 1):
logging.info("run #{}: L{}, {} thread".format(n_run, side, thread))
# run variables
out_file = "/dev/null"
res_file = result_dir + "/res{}_{}_{}".format(side, thread, n_run)
cmd = "OMP_NUM_THREADS={} {} {} {} > {} 2>{}".format(
thread, exe_name, steps, side, out_file, res_file)
logging.info(cmd)
# execute run
os.system(cmd)
# get time
exe_time = get_execution_time(res_file, "parallel")
logging.info("exe_time #{} = {}".format(n_run, exe_time))
avg_exe_time[thread][side] += exe_time
logging.info("sum_avg_exe_time #{} = {}".format(
n_run, avg_exe_time[thread][side]))
avg_exe_time[thread][side] = round(
avg_exe_time[thread][side] / total_run, 4)
logging.info("avg_exe_time = {}".format(avg_exe_time))
if thread == 1:
t_serial[side] = avg_exe_time[thread][side]
# speedup
speedup[thread][side] = round(
float(t_serial[side] / avg_exe_time[thread][side]), 4)
logging.info("speedup = {}".format(speedup))
# strong scaling
strong_scaling[thread][side] = round(
float(speedup[thread][side] / thread), 4)
logging.info("strong_scaling = {}".format(strong_scaling))
logging.info("####################### SAVING TO FILE ########################")
def save(column_headers, filename, data):
with open(filename, mode='w') as f:
headers = ["THREAD"]
headers += column_headers
writer = csv.DictWriter(f, headers)
writer.writeheader()
for thread in threads:
row = data[thread]
row["THREAD"] = thread
writer.writerow(row)
save(matrix_sides, timings_filename, avg_exe_time)
save(matrix_sides, speedup_filename, speedup)
save(matrix_sides, strong_filename, strong_scaling)
with open(weak_filename, mode='w') as weak_file:
weak_fieldnames = ['p', 'WEAK']
weak_writer = csv.DictWriter(weak_file, weak_fieldnames)
weak_writer.writeheader()
row = dict()
for thread in range(1, 17):
row["p"] = thread
row["WEAK"] = weak_scaling[thread]
weak_writer.writerow(row)