diff --git a/examples/mapping_tester/run.sh b/examples/mapping_tester/run.sh index 62b2c4e6..4d0d6a75 100755 --- a/examples/mapping_tester/run.sh +++ b/examples/mapping_tester/run.sh @@ -12,10 +12,10 @@ MAPPING_TESTER="${TEST_LOCATION}"/../../tools/mapping-tester/ TEST_CASE_LOCATION="${TEST_LOCATION}"/case # Generate the run scripts -python3 "${MAPPING_TESTER}"/generate.py --setup "${TEST_LOCATION}"/setup-test.json --outdir "${TEST_CASE_LOCATION}" --template "${MAPPING_TESTER}"/config-template.xml +python3 "${MAPPING_TESTER}"/generate_mapping_test.py --setup "${TEST_LOCATION}"/setup-test.json --outdir "${TEST_CASE_LOCATION}" --template "${MAPPING_TESTER}"/config-template.xml # Prepare the meshes -python3 "${MAPPING_TESTER}"/preparemeshes.py --setup "${TEST_LOCATION}"/setup-test.json --outdir "${TEST_CASE_LOCATION}" --force +python3 "${MAPPING_TESTER}"/prepare_mapping_teest_meshes.py --setup "${TEST_LOCATION}"/setup-test.json --outdir "${TEST_CASE_LOCATION}" --force export ASTE_A_MPIARGS="" export ASTE_B_MPIARGS="" @@ -29,4 +29,4 @@ bash ./postprocessall.sh cd "${TEST_LOCATION}" # Gather the generated statistics -python3 "${MAPPING_TESTER}"/gatherstats.py --outdir "${TEST_CASE_LOCATION}" --file test-statistics.csv +python3 "${MAPPING_TESTER}"/gather_stats.py --outdir "${TEST_CASE_LOCATION}" --file test-statistics.csv diff --git a/examples/weak_scaling_tester/clean.sh b/examples/weak_scaling_tester/clean.sh new file mode 100755 index 00000000..5e2369c5 --- /dev/null +++ b/examples/weak_scaling_tester/clean.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +rm -f test-statistics.csv +rm -fr ./case/ diff --git a/examples/weak_scaling_tester/run.sh b/examples/weak_scaling_tester/run.sh new file mode 100755 index 00000000..d2e4b2ce --- /dev/null +++ b/examples/weak_scaling_tester/run.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +set -e -x + +# Get the test location +TEST_LOCATION="$(pwd)" +export TEST_LOCATION + +# The mapping-tester location +WEAK_SCALING_TESTER="${TEST_LOCATION}"/../../tools/mapping-scaling-tester/ + +GENERATOR="${TEST_LOCATION}"/../../tools/mesh-generators/generate_halton_mesh.py + +# The case directory +TEST_CASE_LOCATION="${TEST_LOCATION}"/case + +# Generate the run scripts +python3 "${WEAK_SCALING_TESTER}"/generate_scale_test.py --setup "${TEST_LOCATION}"/setup-test.json --outdir "${TEST_CASE_LOCATION}" --template "${WEAK_SCALING_TESTER}"/config-template.xml + +# Prepare the meshes +python3 "${WEAK_SCALING_TESTER}"/prepare_scale_meshes.py --setup "${TEST_LOCATION}"/setup-test.json --outdir "${TEST_CASE_LOCATION}" --force -g "${GENERATOR}" +export ASTE_A_MPIARGS="" +export ASTE_B_MPIARGS="" + +# Run the actual cases +cd "${TEST_CASE_LOCATION}" && bash ./runall.sh + +# Postprocess the test cases +bash ./postprocessall.sh + +cd "${TEST_LOCATION}" + +# Gather the generated statistics +python3 "${WEAK_SCALING_TESTER}"/gather_stats.py --outdir "${TEST_CASE_LOCATION}" --file test-statistics.csv + +# Plot the results +python3 "${WEAK_SCALING_TESTER}"/prepare_plots.py --file test-statistics.csv --mode weak_scale diff --git a/examples/weak_scaling_tester/setup-test.json b/examples/weak_scaling_tester/setup-test.json new file mode 100644 index 00000000..f21a8a18 --- /dev/null +++ b/examples/weak_scaling_tester/setup-test.json @@ -0,0 +1,35 @@ +{ + "general": { + "testlocation": "${TEST_LOCATION}", + "function": "x+y^2+z^3", + "ranks": { + "A": [ + 1,2,3,4 + ], + "B": [ + 1,2,3,4 + ] + }, + "network": "lo0", + "syncmode": false, + "numberofpointsperrank": { + "A": 1000, + "B": 1200 + } + }, + "groups": [ + { + "mapping": { + "constraints": [ + "consistent" + ], + "cases": { + "tps": { + "kind": "rbf-thin-plate-splines", + "options": "use-qr-decomposition=\"1\"" + } + } + } + } + ] +} diff --git a/tools/mapping-tester/generate.py b/tools/mapping-scaling-tester/common.py old mode 100755 new mode 100644 similarity index 55% rename from tools/mapping-tester/generate.py rename to tools/mapping-scaling-tester/common.py index 747599b3..d331f9e1 --- a/tools/mapping-tester/generate.py +++ b/tools/mapping-scaling-tester/common.py @@ -1,13 +1,11 @@ -#! /usr/bin/env python3 +#!/usr/bin/env python3 -import argparse -import json import os from jinja2 import Template -def generateConfig(template, setup): +def generate_config(template, setup): template = Template(template) return template.render(setup) @@ -20,55 +18,7 @@ def as_iter(something): return [something] -def generateCases(setup): - meshes = setup["general"]["meshes"] - network = setup["general"].get("network", "lo") - syncmode = setup["general"].get("syncmode", "false") - - cases = [] - for group in setup["groups"]: - for name, mapping in group["mapping"]["cases"].items(): - for constraint in group["mapping"]["constraints"]: - for inname in group["meshes"]["A"]: - infile = meshes["A"][inname] - for outname in group["meshes"]["B"]: - outfile = meshes["B"][outname] - for ranksA, ranksB in zip( - as_iter(setup["general"]["ranks"].get("A", 1)), - as_iter(setup["general"]["ranks"].get("B", 1)), - ): - cases.append( - { - "function": setup["general"]["function"], - "mapping": { - "name": name, - "kind": mapping["kind"], - "constraint": constraint, - "options": mapping.get("options", ""), - }, - "A": { - "ranks": ranksA, - "mesh": { - "name": inname, - "file": infile, - }, - }, - "B": { - "ranks": ranksB, - "mesh": { - "name": outname, - "file": outfile, - }, - }, - "network": network, - "syncmode": syncmode, - } - ) - - return cases - - -def getCaseFolders(case): +def get_case_folder(case): return [ case["mapping"]["name"], case["mapping"]["constraint"], @@ -77,21 +27,21 @@ def getCaseFolders(case): ] -def caseToSortable(case): +def case_to_sortable(case): parts = case.split(os.path.sep) kind = parts[0] mesha, meshb = map(float, parts[-2].split("-")) - kindCost = 0 + kind_cost = 0 if kind.startswith("gaussian"): - kindCost = 1 + kind_cost = 1 elif kind.startswith("tps"): - kindCost = 2 + kind_cost = 2 - return (kindCost, -mesha, -meshb) + return (kind_cost, -mesha, -meshb) -def createMasterRunScripts(casemap, dir): +def create_master_run_scripts(casemap, dir): common = [ "#!/bin/bash", "", @@ -136,28 +86,28 @@ def createMasterRunScripts(casemap, dir): ) -def createRunScript(outdir, path, case): +def create_run_script(outdir, path, case): amesh = case["A"]["mesh"]["name"] aranks = case["A"]["ranks"] - ameshLocation = os.path.relpath( + amesh_location = os.path.relpath( os.path.join(outdir, "meshes", amesh, str(aranks), amesh), path ) # Generate runner script acmd = '/usr/bin/time -f %M -a -o memory-A.log precice-aste-run -v -a -p A --data "{}" --mesh {} || kill 0 &'.format( - case["function"], ameshLocation + case["function"], amesh_location ) if aranks > 1: acmd = "mpirun -n {} $ASTE_A_MPIARGS {}".format(aranks, acmd) bmesh = case["B"]["mesh"]["name"] branks = case["B"]["ranks"] - bmeshLocation = os.path.relpath( + bmesh_location = os.path.relpath( os.path.join(outdir, "meshes", bmesh, str(branks), bmesh), path ) mapped_data_name = case["function"] + "(mapped)" bcmd = '/usr/bin/time -f %M -a -o memory-B.log precice-aste-run -v -a -p B --data "{}" --mesh {} --output mapped || kill 0 &'.format( - mapped_data_name, bmeshLocation + mapped_data_name, bmesh_location ) if branks > 1: bcmd = "mpirun -n {} $ASTE_B_MPIARGS {}".format(branks, bcmd) @@ -218,12 +168,10 @@ def createRunScript(outdir, path, case): ) post_content += [joincmd, diffcmd] else: - [recoveryFileLocation, tmpPrefix] = os.path.split( - os.path.normpath(bmeshLocation) - ) - tmprecoveryFile = recoveryFileLocation + "/{}_recovery.json".format(bmesh) + [recovery_file_location, _] = os.path.split(os.path.normpath(bmesh_location)) + tmp_recovery_file = recovery_file_location + "/{}_recovery.json".format(bmesh) joincmd = "precice-aste-join --mesh mapped -r {} -o result.vtk".format( - tmprecoveryFile + tmp_recovery_file ) diffcmd = 'precice-aste-evaluate --data error --diffdata "{1}" --diff --stats --mesh result.vtk --function "{0}" | tee diff.log'.format( case["function"], mapped_data_name @@ -234,10 +182,10 @@ def createRunScript(outdir, path, case): ) -def setupCases(outdir, template, cases): +def setup_cases(outdir, template, cases): casemap = {} for case in cases: - folders = getCaseFolders(case) + folders = get_case_folder(case) casemap.setdefault(folders[0], []).append(folders[1:]) name = [outdir] + folders path = os.path.join(*name) @@ -246,58 +194,9 @@ def setupCases(outdir, template, cases): print(f"Generating {path}") os.makedirs(path, exist_ok=True) with open(config, "w") as config: - config.write(generateConfig(template, case)) - createRunScript(outdir, path, case) + config.write(generate_config(template, case)) + create_run_script(outdir, path, case) print(f"Generated {len(cases)} cases") print(f"Generating master scripts") - createMasterRunScripts(casemap, outdir) - - -def parseArguments(args): - parser = argparse.ArgumentParser(description="Generator for a mapping test suite") - parser.add_argument( - "-o", - "--outdir", - default="cases", - help="Directory to generate the test suite in.", - ) - parser.add_argument( - "-s", - "--setup", - type=argparse.FileType("r"), - default="setup.json", - help="The test setup file to use.", - ) - parser.add_argument( - "-t", - "--template", - type=argparse.FileType("r"), - default="config-template.xml", - help="The precice config template to use.", - ) - return parser.parse_args(args) - - -def main(argv): - # Parse the input arguments - args = parseArguments(argv[1:]) - # Parse the json file using the json module - setup = json.load(args.setup) - # Read the xml-template file - template = args.template.read() - # Generate the actual cases - cases = generateCases(setup) - outdir = os.path.normpath(args.outdir) - if os.path.isdir(outdir): - print('Warning: outdir "{}" already exisits.'.format(outdir)) - - setupCases(outdir, template, cases) - - return 0 - - -if __name__ == "__main__": - import sys - - sys.exit(main(sys.argv)) + create_master_run_scripts(casemap, outdir) diff --git a/tools/mapping-tester/config-template.xml b/tools/mapping-scaling-tester/config-template.xml similarity index 100% rename from tools/mapping-tester/config-template.xml rename to tools/mapping-scaling-tester/config-template.xml diff --git a/tools/mapping-tester/plotconv.py b/tools/mapping-scaling-tester/create_plots.py similarity index 56% rename from tools/mapping-tester/plotconv.py rename to tools/mapping-scaling-tester/create_plots.py index 8e20201e..b4017cfa 100755 --- a/tools/mapping-tester/plotconv.py +++ b/tools/mapping-scaling-tester/create_plots.py @@ -2,16 +2,29 @@ import argparse import math +import sys import matplotlib.pyplot as plt -import numpy as np import pandas +style_colours = [ + "#0173b2", + "#de8f05", + "#029e73", + "#d55e00", + "#cc78bc", + "#ca9161", + "#fbafe4", + "#949494", + "#ece133", + "#56b4e9", +] +style_markers = ["o", "D", "s"] +styles = [(c, m) for m in style_markers for c in style_colours] -def parseArguments(args): - parser = argparse.ArgumentParser( - description="Creates convergence plots from gathered stats" - ) + +def parse_arguments(args): + parser = argparse.ArgumentParser(description="Creates plots from gathered stats") parser.add_argument( "-f", "--file", @@ -25,6 +38,13 @@ def parseArguments(args): default="result", help="The prefix for all generated PDF plots.", ) + parser.add_argument( + "-m", + "--mode", + default="mapping", + choices=["weak_scale", "mapping"], + help="The test mode to plot", + ) return parser.parse_args(args) @@ -32,32 +52,12 @@ def lavg(l): return math.exp(sum(map(math.log, l)) / len(l)) -# seaborn.color_palette("colorblind", 10).as_hex() -style_colours = [ - "#0173b2", - "#de8f05", - "#029e73", - "#d55e00", - "#cc78bc", - "#ca9161", - "#fbafe4", - "#949494", - "#ece133", - "#56b4e9", -] -style_markers = ["o", "D", "s"] -styles = [(c, m) for m in style_markers for c in style_colours] - - -def plotConv(ax, df, yname): +def plot_convergence(ax, df, yname): xmin = df["mesh A"].min() xmax = df["mesh A"].max() ymin = df[yname].min() ymax = df[yname].max() - print(xmin, xmax) - print(ymin, ymax) - # 1st order line fox = [xmax, xmin] foy1 = ymax @@ -68,7 +68,7 @@ def plotConv(ax, df, yname): ) ax.annotate("1st order", xy=(lavg(fox), lavg(foy)), color="gray", zorder=-1) - # # 2nd order line + # 2nd order line sox = [xmin, xmax] soy1 = ymin soy2 = soy1 * ((sox[1] / sox[0]) ** 2) @@ -80,7 +80,7 @@ def plotConv(ax, df, yname): ax.annotate("2nd order", xy=(lavg(sox), lavg(soy)), color="gray", zorder=-1) -def plotError(df, prefix): +def plot_error(df, prefix): yname = "relative-l2" fig, ax = plt.subplots(sharex=True, sharey=True) series = df.groupby("mapping") @@ -102,14 +102,14 @@ def plotError(df, prefix): ax.set_xlabel("edge length(h) of mesh A") ax.set_ylabel("relative-l2 error mapping to mesh B") - plotConv(ax, df, yname) + plot_convergence(ax, df, yname) plt.gca().invert_xaxis() plt.grid() plt.savefig(prefix + "-error.pdf") -def plotMemory(df, prefix): +def plot_memory_usage(df, prefix): yname = "peakMemB" fig, ax = plt.subplots(sharex=True, sharey=True) series = df.groupby("mapping") @@ -131,14 +131,12 @@ def plotMemory(df, prefix): ax.set_xlabel("edge length(h) of mesh A") ax.set_ylabel("peak memory of participant B [bytes]") - # plotConv(ax, df, yname) - plt.gca().invert_xaxis() plt.grid() plt.savefig(prefix + "-peakMemB.pdf") -def plotComputeMappingTime(df, prefix): +def plot_compute_mapping_time(df, prefix): yname = "computeMappingTime" fig, ax = plt.subplots(sharex=True, sharey=True) series = df.groupby("mapping") @@ -161,14 +159,12 @@ def plotComputeMappingTime(df, prefix): ax.set_xlabel("edge length(h) of mesh A") ax.set_ylabel("time to compute mapping [ms]") - # plotConv(ax, df, yname) - plt.gca().invert_xaxis() plt.grid() plt.savefig(prefix + "-computet.pdf") -def plotMapDataTime(df, prefix): +def plot_map_data_time(df, prefix): yname = "mapDataTime" fig, ax = plt.subplots(sharex=True, sharey=True) series = df.groupby("mapping") @@ -191,34 +187,115 @@ def plotMapDataTime(df, prefix): ax.set_xlabel("edge length(h) of mesh A") ax.set_ylabel("time to map Data [ms]") - # plotConv(ax, df, yname) - plt.gca().invert_xaxis() plt.grid() plt.savefig(prefix + "-mapt.pdf") +def plot_scale_memory(df, prefix, participant): + yname = "peakMem" + participant + _, ax = plt.subplots(sharex=True, sharey=True) + series = df.groupby("mapping") + + for grouped, style in zip(series, styles): + name, group = grouped + if group[yname].max() == 0: + print(f"Dropping {yname}-series {name} as all 0") + continue + color, marker = style + group.plot( + ax=ax, + x=f"mesh {participant}", + y=yname, + label=name, + marker=marker, + color=color, + ) + ax.set_xlabel(f"ranks {participant}") + ax.set_ylabel(f"peak memory of participant {participant} [bytes]") + + plt.grid() + plt.savefig(f"{prefix}-peakMem{participant}.pdf") + + +def plot_scale_map_time(df, prefix, participant): + yname = "mapDataTime" + _, ax = plt.subplots(sharex=True, sharey=True) + series = df.groupby("mapping") + for grouped, style in zip(series, styles): + name, group = grouped + if group[yname].max() == 0: + print(f"Dropping {yname}-series {name} as all 0") + continue + color, marker = style + group.plot( + ax=ax, + x=f"ranks {participant}", + y=yname, + label=name, + marker=marker, + color=color, + ) + + ax.set_xlabel(f"ranks {participant}") + ax.set_ylabel("time to map Data [ms]") + + plt.grid() + plt.savefig(f"{prefix}-mapt-{participant}.pdf") + + +def plot_scale_compute_mapping_time(df, prefix, participant): + yname = "computeMappingTime" + _, ax = plt.subplots(sharex=True, sharey=True) + series = df.groupby("mapping") + for grouped, style in zip(series, styles): + name, group = grouped + if group[yname].max() == 0: + print(f"Dropping {yname}-series {name} as all 0") + continue + color, marker = style + group.plot( + ax=ax, + x=f"ranks {participant}", + y=yname, + label=name, + marker=marker, + color=color, + ) + + ax.set_xlabel(f"ranks {participant}") + ax.set_ylabel("time to compute mapping [ms]") + + plt.grid() + plt.savefig(f"{prefix}-computet-{participant}.pdf") + + def main(argv): - args = parseArguments(argv[1:]) + args = parse_arguments(argv[1:]) plt.rcParams["legend.fontsize"] = "small" plt.rcParams["figure.figsize"] = "8, 8" plt.rcParams["figure.autolayout"] = "true" df = pandas.read_csv(args.file) - toMeshes = df["mesh B"].unique() - assert ( - len(toMeshes) == 1 - ), f"There are {len(toMeshes)} to-meshes but only 1 is allowed. Fix your dataset!" - df.sort_values("mesh A", inplace=True) - plotError(df, args.prefix) - plotMemory(df, args.prefix) - plotMapDataTime(df, args.prefix) - plotComputeMappingTime(df, args.prefix) + if args.mode == "mapping": + to_meshes = df["mesh B"].unique() + assert ( + len(to_meshes) == 1 + ), f"There are {len(to_meshes)} to-meshes but only 1 is allowed. Fix your dataset!" + df.sort_values("mesh A", inplace=True) + plot_error(df, args.prefix) + plot_memory_usage(df, args.prefix) + plot_map_data_time(df, args.prefix) + plot_compute_mapping_time(df, args.prefix) + elif args.mode == "weak_scale": + for participant in ["A", "B"]: + df.sort_values(f"ranks {participant}", inplace=True) + plot_scale_memory(df, args.prefix, participant) + plot_scale_map_time(df, args.prefix, participant) + plot_scale_compute_mapping_time(df, args.prefix, participant) return 0 if __name__ == "__main__": - import sys - sys.exit(main(sys.argv)) diff --git a/tools/mapping-tester/gatherstats.py b/tools/mapping-scaling-tester/gather_stats.py similarity index 90% rename from tools/mapping-tester/gatherstats.py rename to tools/mapping-scaling-tester/gather_stats.py index 24397c13..3d33adce 100755 --- a/tools/mapping-tester/gatherstats.py +++ b/tools/mapping-scaling-tester/gather_stats.py @@ -5,9 +5,10 @@ import glob import json import os +import sys -def parseArguments(args): +def parse_arguments(args): parser = argparse.ArgumentParser(description="Gathers stats after a run") parser.add_argument( "-o", @@ -25,7 +26,7 @@ def parseArguments(args): return parser.parse_args(args) -def statsFromTimings(dir): +def stats_from_timings(dir): stats = {} assert os.path.isdir(dir) file = os.path.join(dir, "precice-B-events.json") @@ -55,7 +56,7 @@ def statsFromTimings(dir): return stats -def memoryStats(dir): +def memory_stats(dir): stats = {} assert os.path.isdir(dir) for P in "A", "B": @@ -73,23 +74,23 @@ def memoryStats(dir): def main(argv): - args = parseArguments(argv[1:]) + args = parse_arguments(argv[1:]) globber = os.path.join(args.outdir, "**", "*.stats.json") - statFiles = [ + stat_files = [ os.path.relpath(path, args.outdir) for path in glob.iglob(globber, recursive=True) ] allstats = [] fields = [] - for file in statFiles: + for file in stat_files: print("Found: " + file) casedir = os.path.join(args.outdir, os.path.dirname(file)) parts = os.path.normpath(file).split(os.sep) assert len(parts) >= 5 mapping, constraint, meshes, ranks, _ = parts[-5:] meshA, meshB = meshes.split("-") - ranksA, ranksB = meshes.split("-") + ranksA, ranksB = ranks.split("-") with open(os.path.join(args.outdir, file), "r") as jsonfile: stats = json.load(jsonfile) @@ -99,8 +100,8 @@ def main(argv): stats["mesh B"] = meshB stats["ranks A"] = ranksA stats["ranks B"] = ranksB - stats.update(statsFromTimings(casedir)) - stats.update(memoryStats(casedir)) + stats.update(stats_from_timings(casedir)) + stats.update(memory_stats(casedir)) allstats.append(stats) if not fields: fields += stats.keys() @@ -113,6 +114,4 @@ def main(argv): if __name__ == "__main__": - import sys - sys.exit(main(sys.argv)) diff --git a/tools/mapping-scaling-tester/generate_mapping_test.py b/tools/mapping-scaling-tester/generate_mapping_test.py new file mode 100755 index 00000000..3b17f06c --- /dev/null +++ b/tools/mapping-scaling-tester/generate_mapping_test.py @@ -0,0 +1,103 @@ +#! /usr/bin/env python3 + +import argparse +import json +import os +import sys + +from common import * + + +def generate_cases(setup): + meshes = setup["general"]["meshes"] + network = setup["general"].get("network", "lo") + syncmode = setup["general"].get("syncmode", "false") + + cases = [] + for group in setup["groups"]: + for name, mapping in group["mapping"]["cases"].items(): + for constraint in group["mapping"]["constraints"]: + for inname in group["meshes"]["A"]: + infile = meshes["A"][inname] + for outname in group["meshes"]["B"]: + outfile = meshes["B"][outname] + for ranksA, ranksB in zip( + as_iter(setup["general"]["ranks"].get("A", 1)), + as_iter(setup["general"]["ranks"].get("B", 1)), + ): + cases.append( + { + "function": setup["general"]["function"], + "mapping": { + "name": name, + "kind": mapping["kind"], + "constraint": constraint, + "options": mapping.get("options", ""), + }, + "A": { + "ranks": ranksA, + "mesh": { + "name": inname, + "file": infile, + }, + }, + "B": { + "ranks": ranksB, + "mesh": { + "name": outname, + "file": outfile, + }, + }, + "network": network, + "syncmode": syncmode, + } + ) + + return cases + + +def parse_arguments(args): + parser = argparse.ArgumentParser(description="Generator for a mapping test suite") + parser.add_argument( + "-o", + "--outdir", + default="cases", + help="Directory to generate the test suite in.", + ) + parser.add_argument( + "-s", + "--setup", + type=argparse.FileType("r"), + default="setup.json", + help="The test setup file to use.", + ) + parser.add_argument( + "-t", + "--template", + type=argparse.FileType("r"), + default="config-template.xml", + help="The precice config template to use.", + ) + return parser.parse_args(args) + + +def main(argv): + # Parse the input arguments + args = parse_arguments(argv[1:]) + # Parse the json file using the json module + setup = json.load(args.setup) + # Read the xml-template file + template = args.template.read() + # Generate the actual cases + cases = generate_cases(setup) + outdir = os.path.normpath(args.outdir) + if os.path.isdir(outdir): + print('Warning: outdir "{}" already exisits.'.format(outdir)) + + setup_cases(outdir, template, cases) + + return 0 + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/tools/mapping-scaling-tester/generate_scale_test.py b/tools/mapping-scaling-tester/generate_scale_test.py new file mode 100644 index 00000000..fd2ad72b --- /dev/null +++ b/tools/mapping-scaling-tester/generate_scale_test.py @@ -0,0 +1,114 @@ +#! /usr/bin/env python3 + +import argparse +import json +import os +import sys + +from common import * + + +def generate_cases(setup): + network = setup["general"].get("network", "lo") + syncmode = setup["general"].get("syncmode", "false") + test_location = setup["general"]["testlocation"] + cases = [] + for group in setup["groups"]: + for name, mapping in group["mapping"]["cases"].items(): + for constraint in group["mapping"]["constraints"]: + for number_of_points_per_rank_A in as_iter( + setup["general"]["numberofpointsperrank"]["A"] + ): + for number_of_points_per_rankB in as_iter( + setup["general"]["numberofpointsperrank"]["B"] + ): + for ranksA, ranksB in zip( + as_iter(setup["general"]["ranks"].get("A", 1)), + as_iter(setup["general"]["ranks"].get("B", 1)), + ): + cases.append( + { + "function": setup["general"]["function"], + "mapping": { + "name": name, + "kind": mapping["kind"], + "constraint": constraint, + "options": mapping.get("options", ""), + }, + "A": { + "ranks": ranksA, + "mesh": { + "name": str( + number_of_points_per_rank_A * ranksA + ), + "file": os.path.join( + test_location, + f"meshA-{number_of_points_per_rank_A*ranksA}.vtk", + ), + }, + }, + "B": { + "ranks": ranksB, + "mesh": { + "name": str( + number_of_points_per_rankB * ranksB + ), + "file": os.path.join( + test_location, + f"meshB-{number_of_points_per_rankB*ranksB}.vtk", + ), + }, + }, + "network": network, + "syncmode": syncmode, + } + ) + + return cases + + +def parse_arguments(args): + parser = argparse.ArgumentParser(description="Generator for a mapping test suite") + parser.add_argument( + "-o", + "--outdir", + default="cases", + help="Directory to generate the test suite in.", + ) + parser.add_argument( + "-s", + "--setup", + type=argparse.FileType("r"), + default="setup.json", + help="The test setup file to use.", + ) + parser.add_argument( + "-t", + "--template", + type=argparse.FileType("r"), + default="config-template.xml", + help="The precice config template to use.", + ) + return parser.parse_args(args) + + +def main(argv): + # Parse the input arguments + args = parse_arguments(argv[1:]) + # Parse the json file using the json module + setup = json.load(args.setup) + # Read the xml-template file + template = args.template.read() + # Generate the actual cases + cases = generate_cases(setup) + outdir = os.path.normpath(args.outdir) + if os.path.isdir(outdir): + print('Warning: outdir "{}" already exisits.'.format(outdir)) + + setup_cases(outdir, template, cases) + + return 0 + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/tools/mapping-tester/local_rbf.py b/tools/mapping-scaling-tester/local_rbf.py similarity index 100% rename from tools/mapping-tester/local_rbf.py rename to tools/mapping-scaling-tester/local_rbf.py diff --git a/tools/mapping-tester/plots/paperplot.py b/tools/mapping-scaling-tester/plots/paperplot.py similarity index 100% rename from tools/mapping-tester/plots/paperplot.py rename to tools/mapping-scaling-tester/plots/paperplot.py diff --git a/tools/mapping-tester/plots/plot.py b/tools/mapping-scaling-tester/plots/plot.py similarity index 100% rename from tools/mapping-tester/plots/plot.py rename to tools/mapping-scaling-tester/plots/plot.py diff --git a/tools/mapping-tester/preparemeshes.py b/tools/mapping-scaling-tester/prepare_mapping_test_meshes.py similarity index 69% rename from tools/mapping-tester/preparemeshes.py rename to tools/mapping-scaling-tester/prepare_mapping_test_meshes.py index 78e77d99..0481a0ac 100755 --- a/tools/mapping-tester/preparemeshes.py +++ b/tools/mapping-scaling-tester/prepare_mapping_test_meshes.py @@ -8,7 +8,7 @@ import subprocess -def parseArguments(args): +def parse_arguments(args): parser = argparse.ArgumentParser(description="Prepares meshes for a test suite") parser.add_argument( "-o", @@ -30,23 +30,23 @@ def parseArguments(args): return parser.parse_args(args) -def prepareMainMesh(meshdir, name, file, function, force=False): - mainDir = os.path.join(meshdir, name, "1") - mainMesh = os.path.join(mainDir, name + ".vtu") - print("Preparing Mesh {} in {}".format(name, mainDir)) +def prepare_main_mesh(meshdir, name, file, function, force=False): + main_dir = os.path.join(meshdir, name, "1") + main_mesh = os.path.join(main_dir, name + ".vtu") + print("Preparing Mesh {} in {}".format(name, main_dir)) - if os.path.isdir(mainDir): + if os.path.isdir(main_dir): if force: print(" Regenerating the mesh.") - shutil.rmtree(mainDir) + shutil.rmtree(main_dir) else: print(" Mesh already exists.") return - os.makedirs(mainDir, exist_ok=True) + os.makedirs(main_dir, exist_ok=True) data_name = "{}".format(function) - [pathName, tmpfilename] = os.path.split(os.path.normpath(mainMesh)) + [path_name, tmpfilename] = os.path.split(os.path.normpath(main_mesh)) subprocess.run( [ "precice-aste-evaluate", @@ -57,45 +57,45 @@ def prepareMainMesh(meshdir, name, file, function, force=False): "--data", data_name, "--directory", - pathName, + path_name, "-o", tmpfilename, ] ) -def preparePartMesh(meshdir, name, p, force=False): +def prepare_part_mesh(meshdir, name, p, force=False): if p == 1: return - mainMesh = os.path.join(meshdir, name, "1", name + ".vtu") - partDir = os.path.join(meshdir, name, str(p)) - partMesh = os.path.join(partDir, name) - print("Preparing Mesh {} with {} paritions in {}".format(name, p, partDir)) + main_mesh = os.path.join(meshdir, name, "1", name + ".vtu") + part_dir = os.path.join(meshdir, name, str(p)) + part_mesh = os.path.join(part_dir, name) + print("Preparing Mesh {} with {} paritions in {}".format(name, p, part_dir)) - if os.path.isdir(partDir): + if os.path.isdir(part_dir): if force: print(" Regenerating the partitioned mesh.") - shutil.rmtree(partDir) + shutil.rmtree(part_dir) else: print(" Partitioned mesh already exists.") return - os.makedirs(partDir, exist_ok=True) - [pathName, tmpfilename] = os.path.split(os.path.normpath(partMesh)) + os.makedirs(part_dir, exist_ok=True) + [path_name, tmpfilename] = os.path.split(os.path.normpath(part_mesh)) subprocess.run( [ "precice-aste-partition", "--mesh", - mainMesh, + main_mesh, "--algorithm", "meshfree", "-o", - partMesh, + part_mesh, "--directory", - pathName, + path_name, "-n", str(p), ] @@ -103,7 +103,7 @@ def preparePartMesh(meshdir, name, p, force=False): def main(argv): - args = parseArguments(argv[1:]) + args = parse_arguments(argv[1:]) setup = json.load(args.setup) outdir = os.path.normpath(args.outdir) @@ -125,10 +125,10 @@ def main(argv): if not os.path.isfile(os.path.expandvars(file)): raise Exception(f'\033[91m Unable to open file called "{file}".\033[0m') - prepareMainMesh(meshdir, name, file, function, args.force) + prepare_main_mesh(meshdir, name, file, function, args.force) for p in partitions: - preparePartMesh(meshdir, name, p, args.force) + prepare_part_mesh(meshdir, name, p, args.force) return 0 diff --git a/tools/mapping-scaling-tester/prepare_scale_meshes.py b/tools/mapping-scaling-tester/prepare_scale_meshes.py new file mode 100644 index 00000000..00bc1ac3 --- /dev/null +++ b/tools/mapping-scaling-tester/prepare_scale_meshes.py @@ -0,0 +1,164 @@ +#! /usr/bin/env python3 + +import argparse +import json +import os +import shutil +import subprocess +import sys + + +def parse_arguments(args): + parser = argparse.ArgumentParser(description="Prepares meshes for a test suite") + parser.add_argument( + "-o", + "--outdir", + default="cases", + help="Directory to generate the test suite in.", + ) + parser.add_argument( + "-s", + "--setup", + type=argparse.FileType("r"), + default="setup.json", + help="The test setup file to use.", + ) + parser.add_argument( + "-f", "--force", action="store_true", help="Remove existing meshes." + ) + parser.add_argument("-g", "--generator", help="The generator to use.") + parser.add_argument("-d", "--dim", default=3, help="The dimension of the mesh.") + parser.add_argument("--seed", default=0, help="The seed for the mesh generator.") + return parser.parse_args(args) + + +def prepare_main_mesh(meshdir, name, file, function, force=False): + main_dir = os.path.join(meshdir, name, "1") + main_mesh = os.path.join(main_dir, name + ".vtu") + print("Preparing Mesh {} in {}".format(name, main_dir)) + + if os.path.isdir(main_dir): + if force: + print(" Regenerating the mesh.") + shutil.rmtree(main_dir) + else: + print(" Mesh already exists.") + + return + + os.makedirs(main_dir, exist_ok=True) + data_name = "{}".format(function) + [path_name, tmpfilename] = os.path.split(os.path.normpath(main_mesh)) + subprocess.run( + [ + "precice-aste-evaluate", + "--mesh", + os.path.expandvars(file), + "--function", + function, + "--data", + data_name, + "--directory", + path_name, + "-o", + tmpfilename, + ] + ) + + +def prepare_part_mesh(meshdir, name, p, force=False): + + if p == 1: + return + + main_mesh = os.path.join(meshdir, name, "1", name + ".vtu") + part_dir = os.path.join(meshdir, name, str(p)) + part_mesh = os.path.join(part_dir, name) + print("Preparing Mesh {} with {} paritions in {}".format(name, p, part_dir)) + + if os.path.isdir(part_dir): + if force: + print(" Regenerating the partitioned mesh.") + shutil.rmtree(part_dir) + else: + print(" Partitioned mesh already exists.") + + return + + os.makedirs(part_dir, exist_ok=True) + [path_name, _] = os.path.split(os.path.normpath(part_mesh)) + subprocess.run( + [ + "precice-aste-partition", + "--mesh", + main_mesh, + "--algorithm", + "meshfree", + "-o", + part_mesh, + "--directory", + path_name, + "-n", + str(p), + ] + ) + + +def create_mesh(halton_generator_script, file, points, dim=3, seed=0): + # Ensure the output directory exists + directory = os.path.dirname(file) + if not os.path.exists(directory): + os.mkdir(directory) + + subprocess.run( + [ + "python3", + os.path.abspath(halton_generator_script), + "--mesh", + file, + "--numpoints", + str(points), + "--dimension", + str(dim), + "--seed", + str(seed), + ] + ) + + +def main(argv): + args = parse_arguments(argv[1:]) + setup = json.load(args.setup) + outdir = os.path.normpath(args.outdir) + + if os.path.isdir(outdir): + print('Warning: outdir "{}" already exisits.'.format(outdir)) + meshdir = os.path.join(outdir, "meshes") + function = setup["general"]["function"] + + partitions = set( + [int(rank) for pranks in setup["general"]["ranks"].values() for rank in pranks] + ) + for points in set( + [ + setup["general"]["numberofpointsperrank"]["A"], + setup["general"]["numberofpointsperrank"]["B"], + ] + ): + for partition in partitions: + filepoints = int(points) * int(partition) + file = os.path.join(meshdir, f"{filepoints}.vtk") + + create_mesh(args.generator, file, points, args.dim, args.seed) + + if not os.path.isfile(os.path.expandvars(file)): + raise Exception(f'\033[91m Unable to open file called "{file}".\033[0m') + prepare_main_mesh(meshdir, str(filepoints), file, function, args.force) + + prepare_part_mesh(meshdir, str(filepoints), partition, args.force) + + return 0 + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/tools/mapping-tester/setup-turbine-big.json b/tools/mapping-scaling-tester/setup-turbine-big.json similarity index 100% rename from tools/mapping-tester/setup-turbine-big.json rename to tools/mapping-scaling-tester/setup-turbine-big.json diff --git a/tools/mapping-tester/setup-turbine-small-tps.json b/tools/mapping-scaling-tester/setup-turbine-small-tps.json similarity index 100% rename from tools/mapping-tester/setup-turbine-small-tps.json rename to tools/mapping-scaling-tester/setup-turbine-small-tps.json diff --git a/tools/mapping-tester/setup-turbine-small.json b/tools/mapping-scaling-tester/setup-turbine-small.json similarity index 100% rename from tools/mapping-tester/setup-turbine-small.json rename to tools/mapping-scaling-tester/setup-turbine-small.json diff --git a/tools/mapping-tester/setup.json b/tools/mapping-scaling-tester/setup.json similarity index 100% rename from tools/mapping-tester/setup.json rename to tools/mapping-scaling-tester/setup.json