IOR
md-workbench.c
Go to the documentation of this file.
1 #include <mpi.h>
2 
3 #include <time.h>
4 #include <stdio.h>
5 #include <errno.h>
6 #include <string.h>
7 #include <stdlib.h>
8 #include <math.h>
9 #include <assert.h>
10 
11 #include "md-workbench.h"
12 #include "config.h"
13 #include "aiori.h"
14 #include "utilities.h"
15 #include "parse_options.h"
16 
17 /*
18 This is the modified version md-workbench-fs that can utilize AIORI.
19 It follows the hierarchical file system semantics in contrast to the md-workbench (without -fs) which has dataset and object semantics.
20  */
21 
22 #define DIRMODE S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IWGRP|S_IXGRP|S_IROTH|S_IXOTH
23 
24 #define CHECK_MPI_RET(ret) if (ret != MPI_SUCCESS){ printf("Unexpected error in MPI on Line %d\n", __LINE__);}
25 #define LLU (long long unsigned)
26 #define min(a,b) (a < b ? a : b)
27 
28 #define oprintf(...) do { fprintf(o.logfile, __VA_ARGS__); fflush(o.logfile); } while(0);
29 
30 // successfull, errors
31 typedef struct {
32  int suc;
33  int err;
34 } op_stat_t;
35 
36 // A runtime for an operation and when the operation was started
37 typedef struct{
39  float runtime;
41 
42 
43 // statistics for running a single phase
44 typedef struct{ // NOTE: if this type is changed, adjust end_phase() !!!
45  double t; // maximum time
46  double * t_all;
47 
50 
55 
56  // time measurements of individual runs, these are not returned for now by the API!
57  uint64_t repeats;
62 
67 
68  // the maximum time for any single operation
69  double max_op_time;
72 } phase_stat_t;
73 
78  MPI_Comm com;
79  FILE * logfile;
80 
81  char * interface;
82  int num;
83  int precreate;
85 
86  mdworkbench_results_t * results; // the results
87 
88  int offset;
91  int file_size;
92  int read_only;
95  int gpu_memory_flags; /* use the GPU to store the data */
96 
99 
103 
104  //int limit_memory;
105  //int limit_memory_between_phases;
106 
109 
112 
114  char * prefix; // directory to work on
115 
117  int rank;
118  int size;
121 
124 
126 };
127 
129 
130 static void def_dset_name(char * out_name, int n, int d){
131  sprintf(out_name, "%s/%d_%d", o.prefix, n, d);
132 }
133 
134 static void def_obj_name(char * out_name, int n, int d, int i){
135  sprintf(out_name, "%s/%d_%d/file-%d", o.prefix, n, d, i);
136 }
137 
139  o = (struct benchmark_options){
140  .interface = "POSIX",
141  .prefix = "./out",
142  .num = 1000,
143  .random_buffer_offset = -1,
144  .precreate = 3000,
145  .dset_count = 10,
146  .offset = 1,
147  .iterations = 3,
148  .file_size = 3901,
149  .run_info_file = "md-workbench.status"};
150 }
151 
152 static void mdw_wait(double runtime){
153  double waittime = runtime * o.relative_waiting_factor;
154  //printf("waittime: %e\n", waittime);
155  if(waittime < 0.01){
156  double start;
157  start = GetTimeStamp();
158  double cur = GetTimeStamp();
159  double end = cur + waittime;
160  while (cur < end){
161  cur = GetTimeStamp();
162  }
163  }else{
164  struct timespec w;
165  w.tv_sec = (time_t) (waittime);
166  w.tv_nsec = (long) ((waittime - w.tv_sec) * 1000 * 1000 * 1000);
167  nanosleep(& w, NULL);
168  }
169 }
170 
171 static void init_stats(phase_stat_t * p, size_t repeats){
172  memset(p, 0, sizeof(phase_stat_t));
173  p->repeats = repeats;
174  size_t timer_size = repeats * sizeof(time_result_t);
175  p->time_create = (time_result_t *) malloc(timer_size);
176  p->time_read = (time_result_t *) malloc(timer_size);
177  p->time_stat = (time_result_t *) malloc(timer_size);
178  p->time_delete = (time_result_t *) malloc(timer_size);
179 }
180 
181 static float add_timed_result(double start, double phase_start_timer, time_result_t * results, size_t pos, double * max_time, double * out_op_time){
182  float curtime = start - phase_start_timer;
183  double op_time = GetTimeStamp() - start;
184  results[pos].runtime = (float) op_time;
185  results[pos].time_since_app_start = curtime;
186  if (op_time > *max_time){
187  *max_time = op_time;
188  }
189  *out_op_time = op_time;
190  return curtime;
191 }
192 
194  printf("phase\t\td name\tcreate\tdelete\tob nam\tcreate\tread\tstat\tdelete\tt_inc_b\tt_no_bar\tthp\tmax_t\n");
195 }
196 
197 static int sum_err(phase_stat_t * p){
198  return p->dset_create.err + p->dset_delete.err + p->obj_create.err + p->obj_read.err + p->obj_stat.err + p->obj_delete.err;
199 }
200 
201 static double statistics_mean(int count, double * arr){
202  double sum = 0;
203  for(int i=0; i < o.size; i++){
204  sum += arr[i];
205  }
206  return sum / o.size;
207 }
208 
209 static double statistics_std_dev(int count, double * arr){
210  double mean = statistics_mean(count, arr);
211  double sum = 0;
212  for(int i=0; i < o.size; i++){
213  sum += (mean - arr[i])*(mean - arr[i]);
214  }
215  return sqrt(sum / (o.size-1));
216 }
217 
218 static void statistics_minmax(int count, double * arr, double * out_min, double * out_max){
219  double min = 1e308;
220  double max = 0;
221  for(int i=0; i < o.size; i++){
222  min = (arr[i] < min) ? arr[i] : min;
223  max = (arr[i] > max) ? arr[i] : max;
224  }
225  *out_min = min;
226  *out_max = max;
227 }
228 
229 static void print_p_stat(char * buff, const char * name, phase_stat_t * p, double t, int print_global){
230  const double tp = (double)(p->obj_create.suc + p->obj_read.suc) * o.file_size / t / 1024 / 1024;
231 
232  const int errs = sum_err(p);
233  double r_min = 0;
234  double r_max = 0;
235  double r_mean = 0;
236  double r_std = 0;
237 
238  if(p->t_all){
239  // we can compute several derived values that provide insight about quality of service, latency distribution and load balancing
240  statistics_minmax(o.size, p->t_all, & r_min, & r_max);
241  r_mean = statistics_mean(o.size, p->t_all);
242  r_std = statistics_std_dev(o.size, p->t_all);
243  }
244 
245  if (o.print_detailed_stats){
246  sprintf(buff, "%s \t%d\t%d\t%d\t%d\t%d\t%d\t%.3fs\t%.3fs\t%.2f MiB/s %.4e", name, p->dset_create.suc, p->dset_delete.suc, p->obj_create.suc, p->obj_read.suc, p->obj_stat.suc, p->obj_delete.suc, p->t, t, tp, p->max_op_time);
247 
248  if (errs > 0){
249  sprintf(buff, "%s err\t%d\t%d\t%d\t%d\t%d\t%d", name, p->dset_create.err, p->dset_delete.err, p->obj_create.err, p->obj_read.err, p->obj_stat.err, p->obj_delete.err);
250  }
251  }else{
252  int pos = 0;
253  // single line
254  pos += sprintf(buff, "%s process max:%.2fs ", name, t);
255  if(print_global){
256  pos += sprintf(buff + pos, "min:%.2fs mean: %.2fs balance:%.1f stddev:%.1f ", r_min, r_mean, r_min/r_max * 100.0, r_std);
257  }
258  int ioops_per_iter = 4;
259  if(o.read_only){
260  ioops_per_iter = 2;
261  }
262 
263  double rate;
264 
265  switch(name[0]){
266  case('b'):
267  rate = p->obj_read.suc * ioops_per_iter / t;
268  pos += sprintf(buff + pos, "rate:%.1f iops/s objects:%d rate:%.1f obj/s tp:%.1f MiB/s op-max:%.4es",
269  rate, // write, stat, read, delete
270  p->obj_read.suc,
271  p->obj_read.suc / t,
272  tp,
273  p->max_op_time);
274 
275  if(o.relative_waiting_factor > 1e-9){
276  pos += sprintf(buff + pos, " waiting_factor:%.2f", o.relative_waiting_factor);
277  }
278  break;
279  case('p'):
280  rate = (p->dset_create.suc + p->obj_create.suc) / t;
281  pos += sprintf(buff + pos, "rate:%.1f iops/s dsets: %d objects:%d rate:%.3f dset/s rate:%.1f obj/s tp:%.1f MiB/s op-max:%.4es",
282  rate,
283  p->dset_create.suc,
284  p->obj_create.suc,
285  p->dset_create.suc / t,
286  p->obj_create.suc / t,
287  tp,
288  p->max_op_time);
289  break;
290  case('c'):
291  rate = (p->obj_delete.suc + p->dset_delete.suc) / t;
292  pos += sprintf(buff + pos, "rate:%.1f iops/s objects:%d dsets: %d rate:%.1f obj/s rate:%.3f dset/s op-max:%.4es",
293  rate,
294  p->obj_delete.suc,
295  p->dset_delete.suc,
296  p->obj_delete.suc / t,
297  p->dset_delete.suc / t,
298  p->max_op_time);
299  break;
300  default:
301  pos = sprintf(buff, "%s: unknown phase", name);
302  break;
303  }
304 
305  if(print_global){
307  res->errors = errs;
308  o.results->errors += errs;
309  res->rate = rate;
310  res->max_op_time = p->max_op_time;
311  res->runtime = t;
312  res->iterations_done = p->repeats;
313  }
314 
315  if(! o.quiet_output || errs > 0){
316  pos += sprintf(buff + pos, " (%d errs", errs);
317  if(errs > 0){
318  pos += sprintf(buff + pos, "!!!)" );
319  }else{
320  pos += sprintf(buff + pos, ")" );
321  }
322  }
323  if(! o.quiet_output && p->stonewall_iterations){
324  pos += sprintf(buff + pos, " stonewall-iter:%d", p->stonewall_iterations);
325  }
326 
327  if(p->stats_read.max > 1e-9){
328  time_statistics_t stat = p->stats_read;
329  pos += sprintf(buff + pos, " read(%.4es, %.4es, %.4es, %.4es, %.4es, %.4es, %.4es)", stat.min, stat.q1, stat.median, stat.q3, stat.q90, stat.q99, stat.max);
330  }
331  if(p->stats_stat.max > 1e-9){
332  time_statistics_t stat = p->stats_stat;
333  pos += sprintf(buff + pos, " stat(%.4es, %.4es, %.4es, %.4es, %.4es, %.4es, %.4es)", stat.min, stat.q1, stat.median, stat.q3, stat.q90, stat.q99, stat.max);
334  }
335  if(p->stats_create.max > 1e-9){
337  pos += sprintf(buff + pos, " create(%.4es, %.4es, %.4es, %.4es, %.4es, %.4es, %.4es)", stat.min, stat.q1, stat.median, stat.q3, stat.q90, stat.q99, stat.max);
338  }
339  if(p->stats_delete.max > 1e-9){
341  pos += sprintf(buff + pos, " delete(%.4es, %.4es, %.4es, %.4es, %.4es, %.4es, %.4es)", stat.min, stat.q1, stat.median, stat.q3, stat.q90, stat.q99, stat.max);
342  }
343  }
344 }
345 
347  return x->runtime < y->runtime ? -1 : (x->runtime > y->runtime ? +1 : 0);
348 }
349 
350 static double runtime_quantile(int repeats, time_result_t * times, float quantile){
351  int pos = round(quantile * (repeats - 1) + 0.49);
352  assert(pos < repeats);
353  return times[pos].runtime;
354 }
355 
356 static uint64_t aggregate_timers(int repeats, int max_repeats, time_result_t * times, time_result_t * global_times){
357  uint64_t count = 0;
358  int ret;
359  // due to stonewall, the number of repeats may be different per process
360  if(o.rank == 0){
361  MPI_Status status;
362  memcpy(global_times, times, repeats * 2 * sizeof(float));
363  count += repeats;
364  for(int i=1; i < o.size; i++){
365  int cnt;
366  ret = MPI_Recv(& global_times[count], max_repeats*2, MPI_FLOAT, i, 888, o.com, & status);
367  CHECK_MPI_RET(ret)
368  MPI_Get_count(& status, MPI_FLOAT, & cnt);
369  count += cnt / 2;
370  }
371  }else{
372  ret = MPI_Send(times, repeats * 2, MPI_FLOAT, 0, 888, o.com);
373  CHECK_MPI_RET(ret)
374  }
375 
376  return count;
377 }
378 
379 static void compute_histogram(const char * name, time_result_t * times, time_statistics_t * stats, size_t repeats, int writeLatencyFile){
380  if(writeLatencyFile && o.latency_file_prefix ){
381  char file[MAX_PATHLEN];
382  sprintf(file, "%s-%.2f-%d-%s.csv", o.latency_file_prefix, o.relative_waiting_factor, o.global_iteration, name);
383  FILE * f = fopen(file, "w+");
384  if(f == NULL){
385  ERRF("%d: Error writing to latency file: %s", o.rank, file);
386  return;
387  }
388  fprintf(f, "time,runtime\n");
389  for(size_t i = 0; i < repeats; i++){
390  fprintf(f, "%.7f,%.4e\n", times[i].time_since_app_start, times[i].runtime);
391  }
392  fclose(f);
393  }
394  // now sort the times and pick the quantiles
395  qsort(times, repeats, sizeof(time_result_t), (int (*)(const void *, const void *)) compare_floats);
396  stats->min = times[0].runtime;
397  stats->q1 = runtime_quantile(repeats, times, 0.25);
398  if(repeats % 2 == 0){
399  stats->median = (times[repeats/2].runtime + times[repeats/2 - 1].runtime)/2.0;
400  }else{
401  stats->median = times[repeats/2].runtime;
402  }
403  stats->q3 = runtime_quantile(repeats, times, 0.75);
404  stats->q90 = runtime_quantile(repeats, times, 0.90);
405  stats->q99 = runtime_quantile(repeats, times, 0.99);
406  stats->max = times[repeats - 1].runtime;
407 }
408 
409 static void end_phase(const char * name, phase_stat_t * p){
410  int ret;
411  char buff[MAX_PATHLEN];
412 
413  //char * limit_memory_P = NULL;
414  MPI_Barrier(o.com);
415 
416  int max_repeats = o.precreate * o.dset_count;
417  if(strcmp(name,"benchmark") == 0){
418  max_repeats = o.num * o.dset_count;
419  }
420 
421  // prepare the summarized report
422  phase_stat_t g_stat;
423  init_stats(& g_stat, (o.rank == 0 ? 1 : 0) * ((size_t) max_repeats) * o.size);
424  // reduce timers
425  ret = MPI_Reduce(& p->t, & g_stat.t, 2, MPI_DOUBLE, MPI_MAX, 0, o.com);
426  CHECK_MPI_RET(ret)
427  if(o.rank == 0) {
428  g_stat.t_all = (double*) malloc(sizeof(double) * o.size);
429  }
430  ret = MPI_Gather(& p->t, 1, MPI_DOUBLE, g_stat.t_all, 1, MPI_DOUBLE, 0, o.com);
431  CHECK_MPI_RET(ret)
432  ret = MPI_Reduce(& p->dset_create, & g_stat.dset_create, 2*(2+4), MPI_INT, MPI_SUM, 0, o.com);
433  CHECK_MPI_RET(ret)
434  ret = MPI_Reduce(& p->max_op_time, & g_stat.max_op_time, 1, MPI_DOUBLE, MPI_MAX, 0, o.com);
435  CHECK_MPI_RET(ret)
436  if( p->stonewall_iterations ){
437  ret = MPI_Reduce(& p->repeats, & g_stat.repeats, 1, MPI_UINT64_T, MPI_MIN, 0, o.com);
438  CHECK_MPI_RET(ret)
440  }
441  int write_rank0_latency_file = (o.rank == 0) && ! o.latency_keep_all;
442 
443  if(strcmp(name,"precreate") == 0){
444  uint64_t repeats = aggregate_timers(p->repeats, max_repeats, p->time_create, g_stat.time_create);
445  if(o.rank == 0){
446  compute_histogram("precreate-all", g_stat.time_create, & g_stat.stats_create, repeats, o.latency_keep_all);
447  }
448  compute_histogram("precreate", p->time_create, & p->stats_create, p->repeats, write_rank0_latency_file);
449  }else if(strcmp(name,"cleanup") == 0){
450  uint64_t repeats = aggregate_timers(p->repeats, max_repeats, p->time_delete, g_stat.time_delete);
451  if(o.rank == 0) {
452  compute_histogram("cleanup-all", g_stat.time_delete, & g_stat.stats_delete, repeats, o.latency_keep_all);
453  }
454  compute_histogram("cleanup", p->time_delete, & p->stats_delete, p->repeats, write_rank0_latency_file);
455  }else if(strcmp(name,"benchmark") == 0){
456  uint64_t repeats = aggregate_timers(p->repeats, max_repeats, p->time_read, g_stat.time_read);
457  if(o.rank == 0) {
458  compute_histogram("read-all", g_stat.time_read, & g_stat.stats_read, repeats, o.latency_keep_all);
459  }
460  compute_histogram("read", p->time_read, & p->stats_read, p->repeats, write_rank0_latency_file);
461 
462  repeats = aggregate_timers(p->repeats, max_repeats, p->time_stat, g_stat.time_stat);
463  if(o.rank == 0) {
464  compute_histogram("stat-all", g_stat.time_stat, & g_stat.stats_stat, repeats, o.latency_keep_all);
465  }
466  compute_histogram("stat", p->time_stat, & p->stats_stat, p->repeats, write_rank0_latency_file);
467 
468  if(! o.read_only){
469  repeats = aggregate_timers(p->repeats, max_repeats, p->time_create, g_stat.time_create);
470  if(o.rank == 0) {
471  compute_histogram("create-all", g_stat.time_create, & g_stat.stats_create, repeats, o.latency_keep_all);
472  }
473  compute_histogram("create", p->time_create, & p->stats_create, p->repeats, write_rank0_latency_file);
474 
475  repeats = aggregate_timers(p->repeats, max_repeats, p->time_delete, g_stat.time_delete);
476  if(o.rank == 0) {
477  compute_histogram("delete-all", g_stat.time_delete, & g_stat.stats_delete, repeats, o.latency_keep_all);
478  }
479  compute_histogram("delete", p->time_delete, & p->stats_delete, p->repeats, write_rank0_latency_file);
480  }
481  }
482 
483  if (o.rank == 0){
484  //print the stats:
485  print_p_stat(buff, name, & g_stat, g_stat.t, 1);
486  oprintf("%s\n", buff);
487  }
488 
489  if(o.process_report){
490  if(o.rank == 0){
491  print_p_stat(buff, name, p, p->t, 0);
492  oprintf("0: %s\n", buff);
493  for(int i=1; i < o.size; i++){
494  MPI_Recv(buff, MAX_PATHLEN, MPI_CHAR, i, 4711, o.com, MPI_STATUS_IGNORE);
495  oprintf("%d: %s\n", i, buff);
496  }
497  }else{
498  print_p_stat(buff, name, p, p->t, 0);
499  MPI_Send(buff, MAX_PATHLEN, MPI_CHAR, 0, 4711, o.com);
500  }
501  }
502 
503  if(g_stat.t_all){
504  free(g_stat.t_all);
505  }
506  if(p->time_create){
507  free(p->time_create);
508  free(p->time_read);
509  free(p->time_stat);
510  free(p->time_delete);
511  }
512  if(g_stat.time_create){
513  free(g_stat.time_create);
514  free(g_stat.time_read);
515  free(g_stat.time_stat);
516  free(g_stat.time_delete);
517  }
518 
519  // copy the result back for the API
521  memcpy(& res->stats_create, & g_stat.stats_create, sizeof(time_statistics_t));
522  memcpy(& res->stats_read, & g_stat.stats_read, sizeof(time_statistics_t));
523  memcpy(& res->stats_stat, & g_stat.stats_stat, sizeof(time_statistics_t));
524  memcpy(& res->stats_delete, & g_stat.stats_delete, sizeof(time_statistics_t));
525 
526  o.results->count++;
527 
528  // allocate memory if necessary
529  // ret = mem_preallocate(& limit_memory_P, o.limit_memory_between_phases, o.verbosity >= 3);
530  // if( ret != 0){
531  // printf("%d: Error allocating memory!\n", o.rank);
532  // }
533  // mem_free_preallocated(& limit_memory_P);
534 }
535 
536 void run_precreate(phase_stat_t * s, int current_index){
537  char dset[MAX_PATHLEN];
538  char obj_name[MAX_PATHLEN];
539  int ret;
540 
541  for(int i=0; i < o.dset_count; i++){
542  def_dset_name(dset, o.rank, i);
543 
544  ret = o.backend->mkdir(dset, DIRMODE, o.backend_options);
545  if (ret == 0){
546  s->dset_create.suc++;
547  }else{
548  s->dset_create.err++;
549  if (! o.ignore_precreate_errors){
550  ERRF("%d: Error while creating the dset: %s", o.rank, dset);
551  }
552  }
553  }
554 
557  double op_timer; // timer for individual operations
558  size_t pos = -1; // position inside the individual measurement array
559  double op_time;
560 
561  // create the obj
562  for(int f=current_index; f < o.precreate; f++){
563  for(int d=0; d < o.dset_count; d++){
564  pos++;
565  def_obj_name(obj_name, o.rank, d, f);
566 
567  op_timer = GetTimeStamp();
568  aiori_fd_t * aiori_fh = o.backend->create(obj_name, IOR_WRONLY | IOR_CREAT, o.backend_options);
569  if (NULL == aiori_fh){
570  FAIL("Unable to open file %s", obj_name);
571  }
573  if ( o.file_size == (int) o.backend->xfer(WRITE, aiori_fh, (IOR_size_t *) buf, o.file_size, 0, o.backend_options)) {
574  s->obj_create.suc++;
575  }else{
576  s->obj_create.err++;
577  if (! o.ignore_precreate_errors){
578  ERRF("%d: Error while creating the obj: %s", o.rank, obj_name);
579  }
580  }
581  o.backend->close(aiori_fh, o.backend_options);
582 
583  add_timed_result(op_timer, s->phase_start_timer, s->time_create, pos, & s->max_op_time, & op_time);
584 
585  if (o.verbosity >= 2){
586  oprintf("%d: write %s:%s (%d) pretend: %d\n", o.rank, dset, obj_name, ret, o.rank);
587  }
588  }
589  }
591 }
592 
593 /* FIFO: create a new file, write to it. Then read from the first created file, delete it... */
594 void run_benchmark(phase_stat_t * s, int * current_index_p){
595  char obj_name[MAX_PATHLEN];
596  int ret;
598  memset(buf, o.rank % 256, o.file_size);
599  double op_timer; // timer for individual operations
600  size_t pos = -1; // position inside the individual measurement array
601  int start_index = *current_index_p;
602  int total_num = o.num;
603  int armed_stone_wall = (o.stonewall_timer > 0);
604  int f;
605  double phase_allreduce_time = 0;
606  aiori_fd_t * aiori_fh;
607 
608  for(f=0; f < total_num; f++){
609  float bench_runtime = 0; // the time since start
610  for(int d=0; d < o.dset_count; d++){
611  double op_time;
612  struct stat stat_buf;
613  const int prevFile = f + start_index;
614  pos++;
615 
616  int readRank = (o.rank - o.offset * (d+1)) % o.size;
617  readRank = readRank < 0 ? readRank + o.size : readRank;
618  def_obj_name(obj_name, readRank, d, prevFile);
619 
620  op_timer = GetTimeStamp();
621 
622  ret = o.backend->stat(obj_name, & stat_buf, o.backend_options);
623  // TODO potentially check return value must be identical to o.file_size
624 
625  bench_runtime = add_timed_result(op_timer, s->phase_start_timer, s->time_stat, pos, & s->max_op_time, & op_time);
626  if(o.relative_waiting_factor > 1e-9) {
627  mdw_wait(op_time);
628  }
629 
630  if (o.verbosity >= 2){
631  oprintf("%d: stat %s (%d)\n", o.rank, obj_name, ret);
632  }
633 
634  if(ret != 0){
635  if (o.verbosity)
636  ERRF("%d: Error while stating the obj: %s", o.rank, obj_name);
637  s->obj_stat.err++;
638  continue;
639  }
640  s->obj_stat.suc++;
641 
642  if (o.verbosity >= 2){
643  oprintf("%d: read %s pretend: %d\n", o.rank, obj_name, readRank);
644  }
645 
646  op_timer = GetTimeStamp();
647  aiori_fh = o.backend->open(obj_name, IOR_RDONLY, o.backend_options);
648  if (NULL == aiori_fh){
649  FAIL("Unable to open file %s", obj_name);
650  }
651  if ( o.file_size == (int) o.backend->xfer(READ, aiori_fh, (IOR_size_t *) buf, o.file_size, 0, o.backend_options) ) {
652  if(o.verify_read){
653  if(verify_memory_pattern(prevFile * o.dset_count + d, buf, o.file_size, o.random_buffer_offset, readRank) == 0){
654  s->obj_read.suc++;
655  }else{
656  s->obj_read.err++;
657  }
658  }else{
659  s->obj_read.suc++;
660  }
661  }else{
662  s->obj_read.err++;
663  EWARNF("%d: Error while reading the obj: %s", o.rank, obj_name);
664  }
665  o.backend->close(aiori_fh, o.backend_options);
666 
667  bench_runtime = add_timed_result(op_timer, s->phase_start_timer, s->time_read, pos, & s->max_op_time, & op_time);
668  if(o.relative_waiting_factor > 1e-9) {
669  mdw_wait(op_time);
670  }
671  if(o.read_only){
672  continue;
673  }
674 
675  op_timer = GetTimeStamp();
676  o.backend->delete(obj_name, o.backend_options);
677  bench_runtime = add_timed_result(op_timer, s->phase_start_timer, s->time_delete, pos, & s->max_op_time, & op_time);
678  if(o.relative_waiting_factor > 1e-9) {
679  mdw_wait(op_time);
680  }
681 
682  if (o.verbosity >= 2){
683  oprintf("%d: delete %s\n", o.rank, obj_name);
684  }
685  s->obj_delete.suc++;
686 
687  int writeRank = (o.rank + o.offset * (d+1)) % o.size;
688  const int newFileIndex = o.precreate + prevFile;
689  def_obj_name(obj_name, writeRank, d, newFileIndex);
690 
691  op_timer = GetTimeStamp();
692  aiori_fh = o.backend->create(obj_name, IOR_WRONLY | IOR_CREAT, o.backend_options);
693  if (NULL != aiori_fh){
695  update_write_memory_pattern(newFileIndex * o.dset_count + d, buf, o.file_size, o.random_buffer_offset, writeRank);
696 
697  if ( o.file_size == (int) o.backend->xfer(WRITE, aiori_fh, (IOR_size_t *) buf, o.file_size, 0, o.backend_options)) {
698  s->obj_create.suc++;
699  }else{
700  s->obj_create.err++;
701  if (! o.ignore_precreate_errors){
702  ERRF("%d: Error while creating the obj: %s\n", o.rank, obj_name);
703  }
704  }
705  o.backend->close(aiori_fh, o.backend_options);
706  }else{
707  if (! o.ignore_precreate_errors){
708  ERRF("%d: Error while creating the obj: %s", o.rank, obj_name);
709  }
710  EWARNF("Unable to open file %s", obj_name);
711  s->obj_create.err++;
712  }
713  bench_runtime = add_timed_result(op_timer, s->phase_start_timer, s->time_create, pos, & s->max_op_time, & op_time);
714  if(o.relative_waiting_factor > 1e-9) {
715  mdw_wait(op_time);
716  }
717 
718  if (o.verbosity >= 2){
719  oprintf("%d: write %s (%d) pretend: %d\n", o.rank, obj_name, ret, writeRank);
720  }
721  } // end loop
722 
723  if(armed_stone_wall && bench_runtime >= o.stonewall_timer){
724  if(o.verbosity){
725  oprintf("%d: stonewall runtime %fs (%ds)\n", o.rank, bench_runtime, o.stonewall_timer);
726  }
728  s->stonewall_iterations = f;
729  break;
730  }
731  armed_stone_wall = 0;
732  // wear out mode, now reduce the maximum
733  int cur_pos = f + 1;
734  phase_allreduce_time = GetTimeStamp() - s->phase_start_timer;
735  int ret = MPI_Allreduce(& cur_pos, & total_num, 1, MPI_INT, MPI_MAX, o.com);
736  CHECK_MPI_RET(ret)
738  s->stonewall_iterations = total_num;
739  if(o.rank == 0){
740  oprintf("stonewall wear out %fs (%d iter)\n", bench_runtime, total_num);
741  }
742  if(f == total_num){
743  break;
744  }
745  }
746  }
747  s->t = GetTimeStamp() - s->phase_start_timer + phase_allreduce_time;
748  if(armed_stone_wall && o.stonewall_timer_wear_out){
749  int f = total_num;
750  int ret = MPI_Allreduce(& f, & total_num, 1, MPI_INT, MPI_MAX, o.com);
751  CHECK_MPI_RET(ret)
752  s->stonewall_iterations = total_num;
753  }
755  // TODO FIXME
756  int sh = s->stonewall_iterations;
757  int ret = MPI_Allreduce(& sh, & s->stonewall_iterations, 1, MPI_INT, MPI_MAX, o.com);
758  CHECK_MPI_RET(ret)
759  }
760 
761  if(! o.read_only) {
762  *current_index_p += f;
763  }
764  s->repeats = pos + 1;
766 }
767 
768 void run_cleanup(phase_stat_t * s, int start_index){
769  char dset[MAX_PATHLEN];
770  char obj_name[MAX_PATHLEN];
771  double op_timer; // timer for individual operations
772  size_t pos = -1; // position inside the individual measurement array
773 
774  for(int d=0; d < o.dset_count; d++){
775  for(int f=0; f < o.precreate; f++){
776  double op_time;
777  pos++;
778  def_obj_name(obj_name, o.rank, d, f + start_index);
779 
780  op_timer = GetTimeStamp();
781  o.backend->delete(obj_name, o.backend_options);
782  add_timed_result(op_timer, s->phase_start_timer, s->time_delete, pos, & s->max_op_time, & op_time);
783 
784  if (o.verbosity >= 2){
785  oprintf("%d: delete %s\n", o.rank, obj_name);
786  }
787  s->obj_delete.suc++;
788  }
789 
790  def_dset_name(dset, o.rank, d);
791  if (o.backend->rmdir(dset, o.backend_options) == 0) {
792  s->dset_delete.suc++;
793  }else{
794  oprintf("Unable to remove directory %s\n", dset);
795  }
796  if (o.verbosity >= 2){
797  oprintf("%d: delete dset %s\n", o.rank, dset);
798  }
799  }
800 }
801 
802 
803 static option_help options [] = {
804  {'O', "offset", "Offset in o.ranks between writers and readers. Writers and readers should be located on different nodes.", OPTION_OPTIONAL_ARGUMENT, 'd', & o.offset},
805  {'a', "api", "The API (plugin) to use for the benchmark, use list to show all compiled plugins.", OPTION_OPTIONAL_ARGUMENT, 's', & o.interface},
806  {'I', "obj-per-proc", "Number of I/O operations per data set.", OPTION_OPTIONAL_ARGUMENT, 'd', & o.num},
807  {'L', "latency", "Measure the latency for individual operations, prefix the result files with the provided filename.", OPTION_OPTIONAL_ARGUMENT, 's', & o.latency_file_prefix},
808  {0, "latency-all", "Keep the latency files from all ranks.", OPTION_FLAG, 'd', & o.latency_keep_all},
809  {'P', "precreate-per-set", "Number of object to precreate per data set.", OPTION_OPTIONAL_ARGUMENT, 'd', & o.precreate},
810  {'D', "data-sets", "Number of data sets covered per process and iteration.", OPTION_OPTIONAL_ARGUMENT, 'd', & o.dset_count},
811  {'G', NULL, "Offset for the data in the read/write buffer, if not set, a random value is used", OPTION_OPTIONAL_ARGUMENT, 'd', & o.random_buffer_offset},
812  {'o', NULL, "Output directory", OPTION_OPTIONAL_ARGUMENT, 's', & o.prefix},
813  {'q', "quiet", "Avoid irrelevant printing.", OPTION_FLAG, 'd', & o.quiet_output},
814  //{'m', "lim-free-mem", "Allocate memory until this limit (in MiB) is reached.", OPTION_OPTIONAL_ARGUMENT, 'd', & o.limit_memory},
815  // {'M', "lim-free-mem-phase", "Allocate memory until this limit (in MiB) is reached between the phases, but free it before starting the next phase; the time is NOT included for the phase.", OPTION_OPTIONAL_ARGUMENT, 'd', & o.limit_memory_between_phases},
816  {'S', "object-size", "Size for the created objects.", OPTION_OPTIONAL_ARGUMENT, 'd', & o.file_size},
817  {'R', "iterations", "Number of times to rerun the main phase", OPTION_OPTIONAL_ARGUMENT, 'd', & o.iterations},
818  {'t', "waiting-time", "Waiting time relative to runtime (1.0 is 100%%)", OPTION_OPTIONAL_ARGUMENT, 'f', & o.relative_waiting_factor},
819  {'T', "adaptive-waiting", "Compute an adaptive waiting time", OPTION_FLAG, 'd', & o.adaptive_waiting_mode},
820  {'1', "run-precreate", "Run precreate phase", OPTION_FLAG, 'd', & o.phase_precreate},
821  {'2', "run-benchmark", "Run benchmark phase", OPTION_FLAG, 'd', & o.phase_benchmark},
822  {'3', "run-cleanup", "Run cleanup phase (only run explicit phases)", OPTION_FLAG, 'd', & o.phase_cleanup},
823  {'w', "stonewall-timer", "Stop each benchmark iteration after the specified seconds (if not used with -W this leads to process-specific progress!)", OPTION_OPTIONAL_ARGUMENT, 'd', & o.stonewall_timer},
824  {'W', "stonewall-wear-out", "Stop with stonewall after specified time and use a soft wear-out phase -- all processes perform the same number of iterations", OPTION_FLAG, 'd', & o.stonewall_timer_wear_out},
825  {'X', "verify-read", "Verify the data on read", OPTION_FLAG, 'd', & o.verify_read},
826  {0, "allocateBufferOnGPU", "Allocate the buffer on the GPU.", OPTION_FLAG, 'd', & o.gpu_memory_flags},
827  {0, "start-item", "The iteration number of the item to start with, allowing to offset the operations", OPTION_OPTIONAL_ARGUMENT, 'l', & o.start_item_number},
828  {0, "print-detailed-stats", "Print detailed machine parsable statistics.", OPTION_FLAG, 'd', & o.print_detailed_stats},
829  {0, "read-only", "Run read-only during benchmarking phase (no deletes/writes), probably use with -2", OPTION_FLAG, 'd', & o.read_only},
830  {0, "ignore-precreate-errors", "Ignore errors occuring during the pre-creation phase", OPTION_FLAG, 'd', & o.ignore_precreate_errors},
831  {0, "process-reports", "Independent report per process/rank", OPTION_FLAG, 'd', & o.process_report},
832  {'v', "verbose", "Increase the verbosity level", OPTION_FLAG, 'd', & o.verbosity},
833  {0, "run-info-file", "The log file for resuming a previous run", OPTION_OPTIONAL_ARGUMENT, 's', & o.run_info_file},
835  };
836 
837 static void printTime(){
838  char buff[100];
839  time_t now = time(0);
840  strftime (buff, 100, "%Y-%m-%d %H:%M:%S", localtime (&now));
841  oprintf("%s\n", buff);
842 }
843 
844 static int return_position(){
845  int position, ret;
846  if( o.rank == 0){
847  FILE * f = fopen(o.run_info_file, "r");
848  if(! f){
849  ERRF("[ERROR] Could not open %s for restart", o.run_info_file);
850  exit(1);
851  }
852  ret = fscanf(f, "pos: %d", & position);
853  if (ret != 1){
854  ERRF("Could not read from %s for restart", o.run_info_file);
855  exit(1);
856  }
857  fclose(f);
858  }
859  ret = MPI_Bcast( & position, 1, MPI_INT, 0, o.com );
860  return position;
861 }
862 
863 static void store_position(int position){
864  if (o.rank != 0){
865  return;
866  }
867  FILE * f = fopen(o.run_info_file, "w");
868  if(! f){
869  ERRF("[ERROR] Could not open %s for saving data", o.run_info_file);
870  exit(1);
871  }
872  fprintf(f, "pos: %d\n", position);
873  fclose(f);
874 }
875 
876 mdworkbench_results_t* md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logfile){
877  int ret;
878  int printhelp = 0;
879  char * limit_memory_P = NULL;
880  init_options();
881  init_clock(world_com);
882 
883  o.com = world_com;
885 
886  MPI_Comm_rank(o.com, & o.rank);
887  MPI_Comm_size(o.com, & o.size);
888 
889  if (o.rank == 0 && ! o.quiet_output){
890  oprintf("Args: %s", argv[0]);
891  for(int i=1; i < argc; i++){
892  oprintf(" \"%s\"", argv[i]);
893  }
894  oprintf("\n");
895  }
896 
897  memset(& o.hints, 0, sizeof(o.hints));
899  int parsed = option_parse(argc, argv, global_options);
901  if (o.backend == NULL){
902  ERR("Unrecognized I/O API");
903  }
904  if (! o.backend->enable_mdtest){
905  ERR("Backend doesn't support MDWorbench");
906  }
908 
910  // enable all phases
912  }
914  if(o.rank == 0)
915  ERR("Invalid options, if running only the benchmark phase using -2 with stonewall option then use stonewall wear-out");
916  exit(1);
917  }
918  if( o.random_buffer_offset == -1 ){
919  o.random_buffer_offset = time(NULL);
920  MPI_Bcast(& o.random_buffer_offset, 1, MPI_INT, 0, o.com);
921  }
922 
923  if(o.backend->xfer_hints){
924  o.backend->xfer_hints(& o.hints);
925  }
926  if(o.backend->check_params){
928  }
929  if (o.backend->initialize){
931  }
932 
933  int current_index = 0;
934 
936  current_index = return_position();
937  }
938 
939  if(o.start_item_number){
940  oprintf("Using start position %lld\n", (long long) o.start_item_number);
941  current_index = o.start_item_number;
942  }
943 
944  size_t total_obj_count = o.dset_count * (size_t) (o.num * o.iterations + o.precreate) * o.size;
945  if (o.rank == 0 && ! o.quiet_output){
946  oprintf("MD-Workbench total objects: %zu workingset size: %.3f MiB (version: %s) time: ", total_obj_count, ((double) o.size) * o.dset_count * o.precreate * o.file_size / 1024.0 / 1024.0, PACKAGE_VERSION);
947  printTime();
948  if(o.num > o.precreate){
949  oprintf("WARNING: num > precreate, this may cause the situation that no objects are available to read\n");
950  }
951  }
952 
953  if ( o.rank == 0 && ! o.quiet_output ){
954  // print the set output options
955  // option_print_current(options);
956  // oprintf("\n");
957  }
958 
959  // preallocate memory if necessary
960  //ret = mem_preallocate(& limit_memory_P, o.limit_memory, o.verbosity >= 3);
961  //if(ret != 0){
962  // printf("%d: Error allocating memory\n", o.rank);
963  // MPI_Abort(o.com, 1);
964  //}
965 
966  double t_bench_start;
967  t_bench_start = GetTimeStamp();
968  phase_stat_t phase_stats;
969  size_t result_count = (2 + o.iterations) * (o.adaptive_waiting_mode ? 7 : 1);
970  o.results = malloc(sizeof(mdworkbench_results_t) + sizeof(mdworkbench_result_t) * result_count);
971  memset(o.results, 0, sizeof(mdworkbench_results_t) + sizeof(mdworkbench_result_t) * result_count);
972  o.results->count = 0;
973 
974  if(o.rank == 0 && o.print_detailed_stats && ! o.quiet_output){
976  }
977 
978  if (o.phase_precreate){
979  if (o.rank == 0){
980  if (o.backend->mkdir(o.prefix, DIRMODE, o.backend_options) != 0) {
981  EWARNF("Unable to create test directory %s", o.prefix);
982  }
983  }
984  init_stats(& phase_stats, o.precreate * o.dset_count);
985  MPI_Barrier(o.com);
986 
987  // pre-creation phase
988  phase_stats.phase_start_timer = GetTimeStamp();
989  run_precreate(& phase_stats, current_index);
990  phase_stats.t = GetTimeStamp() - phase_stats.phase_start_timer;
991  end_phase("precreate", & phase_stats);
992  }
993 
994  if (o.phase_benchmark){
995  // benchmark phase
999  }
1000  init_stats(& phase_stats, o.num * o.dset_count);
1001  MPI_Barrier(o.com);
1002  phase_stats.phase_start_timer = GetTimeStamp();
1003  run_benchmark(& phase_stats, & current_index);
1004  end_phase("benchmark", & phase_stats);
1005 
1007  o.relative_waiting_factor = 0.0625;
1008  for(int r=0; r <= 6; r++){
1009  init_stats(& phase_stats, o.num * o.dset_count);
1010  MPI_Barrier(o.com);
1011  phase_stats.phase_start_timer = GetTimeStamp();
1012  run_benchmark(& phase_stats, & current_index);
1013  end_phase("benchmark", & phase_stats);
1015  }
1016  }
1017  }
1018  }
1019 
1020  // cleanup phase
1021  if (o.phase_cleanup){
1022  init_stats(& phase_stats, o.precreate * o.dset_count);
1023  phase_stats.phase_start_timer = GetTimeStamp();
1024  run_cleanup(& phase_stats, current_index);
1025  phase_stats.t = GetTimeStamp() - phase_stats.phase_start_timer;
1026  end_phase("cleanup", & phase_stats);
1027 
1028  if (o.rank == 0){
1029  if (o.backend->rmdir(o.prefix, o.backend_options) != 0) {
1030  oprintf("Unable to remove directory %s\n", o.prefix);
1031  }
1032  }
1033  }else{
1034  store_position(current_index);
1035  }
1036 
1037  double t_all = GetTimeStamp() - t_bench_start;
1038  if(o.backend->finalize){
1040  }
1041  if (o.rank == 0 && ! o.quiet_output){
1042  oprintf("Total runtime: %.0fs time: ", t_all);
1043  printTime();
1044  }
1045  //mem_free_preallocated(& limit_memory_P);
1046  return o.results;
1047 }
#define ERRF(FORMAT,...)
Definition: aiori-debug.h:77
mdworkbench_results_t * md_workbench_run(int argc, char **argv, MPI_Comm world_com, FILE *out_logfile)
Definition: md-workbench.c:876
void run_benchmark(phase_stat_t *s, int *current_index_p)
Definition: md-workbench.c:594
static void def_dset_name(char *out_name, int n, int d)
Definition: md-workbench.c:130
time_statistics_t stats_read
Definition: md-workbench.c:64
time_statistics_t stats_create
Definition: md-workbench.c:63
static int sum_err(phase_stat_t *p)
Definition: md-workbench.c:197
static float add_timed_result(double start, double phase_start_timer, time_result_t *results, size_t pos, double *max_time, double *out_op_time)
Definition: md-workbench.c:181
#define LAST_OPTION
Definition: option.h:39
op_stat_t obj_read
Definition: md-workbench.c:52
void * airoi_update_module_options(const ior_aiori_t *backend, options_all_t *opt)
Definition: aiori.c:93
FILE * out_logfile
Definition: utilities.c:72
int option_parse(int argc, char **argv, options_all_t *opt_all)
Definition: option.c:414
float relative_waiting_factor
Definition: md-workbench.c:122
struct benchmark_options o
Definition: md-workbench.c:128
time_statistics_t stats_delete
Definition: md-workbench.c:66
static void statistics_minmax(int count, double *arr, double *out_min, double *out_max)
Definition: md-workbench.c:218
op_stat_t obj_stat
Definition: md-workbench.c:53
time_result_t * time_create
Definition: md-workbench.c:58
void run_precreate(phase_stat_t *s, int current_index)
Definition: md-workbench.c:536
void(* delete)(char *, aiori_mod_opt_t *module_options)
Definition: aiori.h:100
static void printTime()
Definition: md-workbench.c:837
op_stat_t obj_delete
Definition: md-workbench.c:54
int(* mkdir)(const char *path, mode_t mode, aiori_mod_opt_t *module_options)
Definition: aiori.h:105
#define min(a, b)
Definition: md-workbench.c:26
double max_op_time
Definition: md-workbench.c:69
op_stat_t obj_create
Definition: md-workbench.c:51
#define FAIL(...)
Definition: aiori-debug.h:12
static uint64_t aggregate_timers(int repeats, int max_repeats, time_result_t *times, time_result_t *global_times)
Definition: md-workbench.c:356
time_statistics_t stats_stat
Definition: md-workbench.h:23
double * t_all
Definition: md-workbench.c:46
static void compute_histogram(const char *name, time_result_t *times, time_statistics_t *stats, size_t repeats, int writeLatencyFile)
Definition: md-workbench.c:379
#define IOR_RDONLY
Definition: aiori.h:28
int stonewall_timer_wear_out
Definition: md-workbench.c:94
#define WRITE
Definition: iordef.h:86
void * backend_options
Definition: md-workbench.c:76
static option_help options[]
Definition: md-workbench.c:803
static int compare_floats(time_result_t *x, time_result_t *y)
Definition: md-workbench.c:346
int(* rmdir)(const char *path, aiori_mod_opt_t *module_options)
Definition: aiori.h:106
#define READ
Definition: iordef.h:88
static double statistics_mean(int count, double *arr)
Definition: md-workbench.c:201
#define IOR_CREAT
Definition: aiori.h:32
const ior_aiori_t * aiori_select(const char *api)
Definition: aiori.c:237
time_result_t * time_stat
Definition: md-workbench.c:60
double phase_start_timer
Definition: md-workbench.c:70
int(* check_params)(aiori_mod_opt_t *)
Definition: aiori.h:113
void init_options()
Definition: md-workbench.c:138
static double statistics_std_dev(int count, double *arr)
Definition: md-workbench.c:209
void run_cleanup(phase_stat_t *s, int start_index)
Definition: md-workbench.c:768
int verify_memory_pattern(int item, char *buffer, size_t bytes, int buff_offset, int pretendRank)
Definition: utilities.c:100
uint64_t iterations_done
Definition: md-workbench.h:30
void init_clock(MPI_Comm com)
Definition: utilities.c:772
void(* initialize)(aiori_mod_opt_t *options)
Definition: aiori.h:109
float time_since_app_start
Definition: md-workbench.c:38
#define DIRMODE
Definition: md-workbench.c:22
time_statistics_t stats_create
Definition: md-workbench.h:21
void(* xfer_hints)(aiori_xfer_hint_t *params)
Definition: aiori.h:96
void(* close)(aiori_fd_t *, aiori_mod_opt_t *module_options)
Definition: aiori.h:99
time_result_t * time_read
Definition: md-workbench.c:59
op_stat_t dset_delete
Definition: md-workbench.c:49
int(* stat)(const char *path, struct stat *buf, aiori_mod_opt_t *module_options)
Definition: aiori.h:108
mdworkbench_results_t * results
Definition: md-workbench.c:86
options_all_t * airoi_create_all_module_options(option_help *global_options)
Definition: aiori.c:107
double GetTimeStamp(void)
Definition: utilities.c:731
#define EWARNF(FORMAT,...)
Definition: aiori-debug.h:45
static int return_position()
Definition: md-workbench.c:844
static void store_position(int position)
Definition: md-workbench.c:863
#define CHECK_MPI_RET(ret)
Definition: md-workbench.c:24
ior_aiori_t const * backend
Definition: md-workbench.c:75
void generate_memory_pattern(char *buf, size_t bytes, int buff_offset, int rank)
Definition: utilities.c:86
aiori_fd_t *(* create)(char *, int iorflags, aiori_mod_opt_t *)
Definition: aiori.h:90
IOR_offset_t(* xfer)(int access, aiori_fd_t *, IOR_size_t *, IOR_offset_t size, IOR_offset_t offset, aiori_mod_opt_t *module_options)
Definition: aiori.h:97
static void def_obj_name(char *out_name, int n, int d, int i)
Definition: md-workbench.c:134
#define IOR_WRONLY
Definition: aiori.h:29
static void end_phase(const char *name, phase_stat_t *p)
Definition: md-workbench.c:409
int stonewall_iterations
Definition: md-workbench.c:71
static options_all_t * global_options
Definition: parse_options.c:41
time_statistics_t stats_delete
Definition: md-workbench.h:24
time_result_t * time_delete
Definition: md-workbench.c:61
long long int IOR_size_t
Definition: iordef.h:110
mdworkbench_result_t result[]
Definition: md-workbench.h:36
void(* finalize)(aiori_mod_opt_t *options)
Definition: aiori.h:110
static double runtime_quantile(int repeats, time_result_t *times, float quantile)
Definition: md-workbench.c:350
aiori_xfer_hint_t hints
Definition: md-workbench.c:77
#define oprintf(...)
Definition: md-workbench.c:28
bool enable_mdtest
Definition: aiori.h:115
static void print_detailed_stat_header()
Definition: md-workbench.c:193
static void mdw_wait(double runtime)
Definition: md-workbench.c:152
time_statistics_t stats_stat
Definition: md-workbench.c:65
#define MAX_PATHLEN
Definition: utilities.h:31
aiori_fd_t *(* open)(char *, int iorflags, aiori_mod_opt_t *)
Definition: aiori.h:92
#define ERR(MSG)
Definition: aiori-debug.h:92
uint64_t repeats
Definition: md-workbench.c:57
static void print_p_stat(char *buff, const char *name, phase_stat_t *p, double t, int print_global)
Definition: md-workbench.c:229
void aligned_buffer_free(void *buf, ior_memory_flags gpu)
Definition: utilities.c:973
uint64_t start_item_number
Definition: md-workbench.c:125
static void init_stats(phase_stat_t *p, size_t repeats)
Definition: md-workbench.c:171
void update_write_memory_pattern(uint64_t item, char *buf, size_t bytes, int buff_offset, int rank)
Definition: utilities.c:78
time_statistics_t stats_read
Definition: md-workbench.h:22
op_stat_t dset_create
Definition: md-workbench.c:48
#define NULL
Definition: iordef.h:70
char * latency_file_prefix
Definition: md-workbench.c:97
void * aligned_buffer_alloc(size_t size, ior_memory_flags type)
Definition: utilities.c:924