|
|
@@ -3,38 +3,38 @@ import os
|
|
|
import pandas as pd
|
|
|
import seaborn as sns
|
|
|
import matplotlib.pyplot as plt
|
|
|
+import matplotlib.ticker
|
|
|
+from scipy import stats
|
|
|
+import numpy as np
|
|
|
|
|
|
import plot_utils
|
|
|
|
|
|
-def validate_and_cleanup_df(df):
|
|
|
- # rename_dict = {
|
|
|
- # "checkpoint_enabled": "ckpt",
|
|
|
- # "voltage_check_enabled": "VC",
|
|
|
- # "pass_counter_enabled": "PC"
|
|
|
- # }
|
|
|
- # df = df.rename(columns=rename_dict)
|
|
|
+config_order = ["counter", "unroll", "adaptive"]
|
|
|
+bench_order = ["fft", "basicmath", "crc", "sha", "stringsearch", "aes", "conv2d", "matmul"]
|
|
|
|
|
|
- mask = df["is_correct"] == False
|
|
|
- if mask.any():
|
|
|
- print("drop rows with incorrect output")
|
|
|
- print(df[mask])
|
|
|
+def validate_and_cleanup_df(df, config_name):
|
|
|
|
|
|
- df = df[~mask]
|
|
|
- assert(df["is_correct"].all())
|
|
|
-
|
|
|
- # def category_mapper(row):
|
|
|
- # if row["ckpt"] == False:
|
|
|
- # return "Baseline"
|
|
|
- # else:
|
|
|
- # if row["VC"] == False:
|
|
|
- # assert(row["PC"] == True)
|
|
|
- # return "PC"
|
|
|
- # else:
|
|
|
- # assert(row["PC"] == False)
|
|
|
- # return "VC"
|
|
|
-
|
|
|
- # category_column_name = "config"
|
|
|
- # df[category_column_name] = df.apply(category_mapper, axis=1)
|
|
|
+ # mask = df["is_correct"] == False
|
|
|
+ # if mask.any():
|
|
|
+ # print(f"({config_name}) drop {mask.sum()} rows with incorrect output")
|
|
|
+ # df = df[~mask]
|
|
|
+ # assert(df["is_correct"].all())
|
|
|
+
|
|
|
+ bench_name = df.iloc[0,0]
|
|
|
+ z_score = stats.zscore(df["time_taken"])
|
|
|
+ mask2 = np.abs(z_score) > 4.35
|
|
|
+ if mask2.any():
|
|
|
+ print(f"({bench_name}, {config_name}) remove {mask2.sum()} outliers")
|
|
|
+ time = float(df[mask2]["time_taken"].iloc[0])
|
|
|
+ average = df["time_taken"].mean()
|
|
|
+ print(f"time: {time:.2f}, average: {average:.2f}")
|
|
|
+
|
|
|
+ df = df[~mask2].copy()
|
|
|
+
|
|
|
+ def rename_bench_name(row):
|
|
|
+ return row["bench_name"].lower()[1:]
|
|
|
+
|
|
|
+ df["bench_name"] = df.apply(rename_bench_name, axis=1)
|
|
|
|
|
|
def process_stats(row):
|
|
|
lines = row["stats"]
|
|
|
@@ -47,7 +47,7 @@ def validate_and_cleanup_df(df):
|
|
|
if key.startswith("checkpoint executed"):
|
|
|
result["ckpt_exec"] = val
|
|
|
return result
|
|
|
-
|
|
|
+
|
|
|
df_stats = df.apply(process_stats, axis=1, result_type="expand")
|
|
|
df = pd.concat([df, df_stats], axis=1)
|
|
|
|
|
|
@@ -80,8 +80,8 @@ def get_base_df():
|
|
|
output_dir = "/home/ybkim/workspace/imc/imc_freertos_app_m33/imc/exprs/date2025/3_adaptive/output"
|
|
|
|
|
|
all_dfs = []
|
|
|
- configs = ["pass_count", "adaptive"]
|
|
|
- drop_index = list(range(0, 3))
|
|
|
+ configs = ["pass_count", "adaptive", "unroll"]
|
|
|
+ drop_index = list(range(0, 1))
|
|
|
drop_index = None
|
|
|
|
|
|
for benchmark in benchmarks:
|
|
|
@@ -94,73 +94,243 @@ def get_base_df():
|
|
|
orig_df = pickle.load(f)
|
|
|
if drop_index and config_name == "adaptive":
|
|
|
orig_df = orig_df.drop(drop_index)
|
|
|
- df = validate_and_cleanup_df(orig_df)
|
|
|
+ df = validate_and_cleanup_df(orig_df, config_name)
|
|
|
df["config"] = config_name
|
|
|
all_dfs.append(df)
|
|
|
|
|
|
orig_df = pd.concat(all_dfs)
|
|
|
+
|
|
|
+ def rename_config(row):
|
|
|
+ name = row["config"]
|
|
|
+ d = {
|
|
|
+ "adaptive": "adaptive",
|
|
|
+ "pass_count": "counter",
|
|
|
+ "unroll": "unroll"
|
|
|
+ }
|
|
|
+ return d[name]
|
|
|
+ orig_df["config"] = orig_df.apply(rename_config, axis=1)
|
|
|
+
|
|
|
return orig_df
|
|
|
|
|
|
|
|
|
-def draw_checkpoint_count():
|
|
|
- plot_utils.set_theme_seaborn(kind="line")
|
|
|
- orig_df = get_base_df()
|
|
|
+def remove_error_stats(orig_df):
|
|
|
mask = orig_df["ckpt_trig"].abs() > 1e7
|
|
|
df = orig_df[~mask]
|
|
|
- mask = orig_df["ckpt_exec"].abs() > 1e6
|
|
|
- df = orig_df[~mask]
|
|
|
- benchmarks = list(df["bench_name"].unique())
|
|
|
- print(benchmarks)
|
|
|
- fig_size = (12, 3.5 * len(benchmarks))
|
|
|
- n_rows = len(benchmarks)
|
|
|
- n_cols = 1
|
|
|
- hspace = 0.08
|
|
|
+ mask = df["ckpt_exec"].abs() > 1e6
|
|
|
+ df = df[~mask]
|
|
|
+ mask = df["ckpt_trig"] < 0
|
|
|
+ df = df[~mask]
|
|
|
+ mask = df["ckpt_exec"] < 0
|
|
|
+ df = df[~mask]
|
|
|
+ return df
|
|
|
+
|
|
|
+
|
|
|
+def draw_checkpoint_count():
|
|
|
+ rc = {
|
|
|
+ "lines.linewidth": 1.5,
|
|
|
+ "axes.titlepad": 10,
|
|
|
+ "ytick.major.pad": -4,
|
|
|
+ "ytick.labelsize": 20,
|
|
|
+ "legend.fontsize": 23,
|
|
|
+ "axes.labelsize": 25,
|
|
|
+ }
|
|
|
+ plot_utils.set_theme_seaborn(kind="line", rc_custom=rc)
|
|
|
+
|
|
|
+ orig_df = get_base_df()
|
|
|
+ # mask = orig_df["ckpt_trig"].abs() > 1e7
|
|
|
+ # df = orig_df[~mask]
|
|
|
+ # mask = orig_df["ckpt_exec"].abs() > 1e6
|
|
|
+ # df = orig_df[~mask]
|
|
|
+ # mask = orig_df["ckpt_trig"] < 0
|
|
|
+ # df = orig_df[~mask]
|
|
|
+ # mask = orig_df["ckpt_exec"] < 0
|
|
|
+ # df = orig_df[~mask]
|
|
|
+ df = remove_error_stats(orig_df)
|
|
|
+
|
|
|
+ benchmarks = bench_order
|
|
|
+ fig_size = (13, 5.5)
|
|
|
+ n_rows = 2
|
|
|
+ n_cols = 4
|
|
|
+ hspace = 0.05
|
|
|
fig = plt.figure(figsize=fig_size)
|
|
|
- axes = fig.subplots(n_rows, n_cols, sharex=False, gridspec_kw={"hspace": hspace})
|
|
|
- for (i, (key, d_orig)) in enumerate(df.groupby(["bench_name"])):
|
|
|
- d = d_orig.reset_index()
|
|
|
- # mask = d["config"] == "adaptive"
|
|
|
- # d = d[mask]
|
|
|
- y = "ckpt_exec"
|
|
|
- # y = "ckpt_trig"
|
|
|
+ axes = fig.subplots(n_rows, n_cols, sharex=True, gridspec_kw={"hspace": hspace})
|
|
|
+ df = df[df["config"] == "adaptive"]
|
|
|
+ df = df.rename(columns={
|
|
|
+ "ckpt_trig": "trig",
|
|
|
+ "ckpt_exec": "exec",
|
|
|
+ })
|
|
|
+ df = df.drop(columns=["config", "time_taken"])
|
|
|
+ for i, benchmark in enumerate(benchmarks):
|
|
|
+ d = df[df["bench_name"] == benchmark].reset_index()
|
|
|
+ print(d)
|
|
|
+
|
|
|
+ id_vars = ["index", "bench_name"]
|
|
|
+ value_vars = ["trig", "exec"]
|
|
|
+ d = d.melt(id_vars=id_vars, value_vars=value_vars, value_name="count", var_name="type")
|
|
|
+ # d = d.reset_index().melt(id_vars=["bench_name"], var_name="config", value_name="normalized")
|
|
|
print(d)
|
|
|
|
|
|
ax = axes.reshape(-1)[i]
|
|
|
sns.lineplot(
|
|
|
data=d,
|
|
|
x="index",
|
|
|
- y=y,
|
|
|
- hue="config",
|
|
|
- ax=ax
|
|
|
+ y="count",
|
|
|
+ hue="type",
|
|
|
+ ax=ax,
|
|
|
)
|
|
|
- ax.set_title(key[0])
|
|
|
+ ax.set_title(benchmark)
|
|
|
+ ax.set_xlabel("")
|
|
|
+ ax.set_ylabel("")
|
|
|
+ if i / 4 >= 1:
|
|
|
+ ax.set_xlabel("Execution #")
|
|
|
+ if i in [0, 4]:
|
|
|
+ ax.set_ylabel("Count (k)")
|
|
|
+ ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, pos: f"{x/1000:.0f}"))
|
|
|
+ ax.yaxis.grid(visible=True, which="both")
|
|
|
+ ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(3))
|
|
|
+ ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator(3))
|
|
|
+
|
|
|
+ ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator(2))
|
|
|
+ ax.xaxis.grid(visible=True, which="both")
|
|
|
+
|
|
|
+ if i == 0:
|
|
|
+ ax.legend(
|
|
|
+ ncol=1, loc="upper left", bbox_to_anchor=(0.33, 0.995), labelspacing=0.3,
|
|
|
+ handlelength=1,
|
|
|
+ )
|
|
|
+ # ax.get_legend().remove()
|
|
|
+ else:
|
|
|
+ ax.get_legend().remove()
|
|
|
+
|
|
|
+ return fig
|
|
|
|
|
|
|
|
|
def draw_graph():
|
|
|
orig_df = get_base_df()
|
|
|
- df = orig_df.groupby(["bench_name", "config"]).mean()
|
|
|
- df = df.reset_index()
|
|
|
+ # df = orig_df.groupby(["bench_name", "config"]).mean()
|
|
|
+ df = orig_df.reset_index()
|
|
|
|
|
|
- # df = df.pivot(index="bench_name", columns="config", values="time_taken")
|
|
|
|
|
|
- # df2 = pd.DataFrame()
|
|
|
- # df2["PC"] = df["PC"] / df["Baseline"]
|
|
|
- # df2["VC"] = df["VC"] / df["Baseline"]
|
|
|
+ err_kws = {
|
|
|
+ "linewidth": 1.2,
|
|
|
+ "color": "black",
|
|
|
+ }
|
|
|
|
|
|
- # df2 = df2.reset_index().melt(id_vars=["bench_name"], var_name="config", value_name="normalized")
|
|
|
- # df2 = df2.sort_values(by="bench_name")
|
|
|
- # df2 = df2[df2["config"] == "PC"]
|
|
|
- # df2 = df2[df2["config"] == "VC"]
|
|
|
- # df2 = df2[df2["bench_name"] == "vFFT"]
|
|
|
+ rc = {
|
|
|
+ "legend.fontsize": 24,
|
|
|
+ }
|
|
|
+
|
|
|
+ plot_utils.set_theme_seaborn(rc)
|
|
|
|
|
|
- plot_utils.set_theme_seaborn()
|
|
|
g = sns.catplot(
|
|
|
data=df,
|
|
|
kind="bar",
|
|
|
x="bench_name",
|
|
|
- y = "time_taken",
|
|
|
+ y="time_taken",
|
|
|
hue="config",
|
|
|
- aspect=2.3
|
|
|
+ legend="brief",
|
|
|
+ legend_out=False,
|
|
|
+ aspect=2.5,
|
|
|
+ errorbar=lambda x: (x.min(), x.max()),
|
|
|
+ hue_order=config_order,
|
|
|
+ order=bench_order,
|
|
|
+ err_kws=err_kws,
|
|
|
+ capsize=0.2,
|
|
|
)
|
|
|
+ ax = g.ax
|
|
|
+ ax.set_ylim([0, 35])
|
|
|
+ ax.set_xlabel("Benchmark")
|
|
|
+ ax.set_ylabel("Time (s)")
|
|
|
+ g.ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(6))
|
|
|
+
|
|
|
+ ax = g.facet_axis(0, 0)
|
|
|
+
|
|
|
+ for i, c in enumerate(ax.containers):
|
|
|
+ labels = [f"{(v.get_height()):.0f}" if v.get_height() > 50 else "" for v in c]
|
|
|
+ padding = 125
|
|
|
+ ax.bar_label(c, labels=labels, label_type="center", padding=padding, size=16)
|
|
|
+
|
|
|
+ num_categories = len(df.bench_name.unique())
|
|
|
+ num_hues = len(df.config.unique())
|
|
|
+ plot_utils.draw_hatch(ax, num_categories, num_hues)
|
|
|
+
|
|
|
+ ax.legend(ncol=1, loc="upper right", bbox_to_anchor=(0.265, 0.995), labelspacing=0.3)
|
|
|
+
|
|
|
+ sns.despine(g.fig, right=False, top=False)
|
|
|
|
|
|
return g
|
|
|
+
|
|
|
+def draw_checkpoint_frequency_comparison():
|
|
|
+ orig_df = get_base_df()
|
|
|
+ df = remove_error_stats(orig_df)
|
|
|
+ df = df.drop(columns=["time_taken"])
|
|
|
+ df = df.groupby(["bench_name", "config"]).mean()
|
|
|
+ df = df.reset_index()
|
|
|
+
|
|
|
+ print(df)
|
|
|
+ base_df = df
|
|
|
+ adaptive_df = base_df[base_df["config"] == "adaptive"]
|
|
|
+ adaptive_df = adaptive_df.set_index("bench_name")
|
|
|
+
|
|
|
+ rc = {
|
|
|
+ "legend.fontsize": 24,
|
|
|
+ }
|
|
|
+ plot_utils.set_theme_seaborn(rc)
|
|
|
+
|
|
|
+ benchmarks = bench_order
|
|
|
+ fig_size = (10, 6.2)
|
|
|
+ n_rows = 2
|
|
|
+ n_cols = 1
|
|
|
+ hspace = 0.05
|
|
|
+ fig = plt.figure(figsize=fig_size)
|
|
|
+ axes = fig.subplots(n_rows, n_cols, squeeze=True, sharex=False, gridspec_kw={"hspace": hspace})
|
|
|
+
|
|
|
+ configs = ["counter", "unroll"]
|
|
|
+ for i, config in enumerate(configs):
|
|
|
+ d = base_df[base_df["config"] == config]
|
|
|
+ d = d.set_index("bench_name")
|
|
|
+
|
|
|
+ df2 = pd.DataFrame()
|
|
|
+ df2["trig"] = adaptive_df["ckpt_trig"] / d["ckpt_trig"]
|
|
|
+ df2["exec"] = adaptive_df["ckpt_exec"] / d["ckpt_exec"]
|
|
|
+ df2 = df2.reset_index()
|
|
|
+ df2 = df2.melt(id_vars=["bench_name"], value_vars=["trig", "exec"], var_name="type", value_name="normalized")
|
|
|
+
|
|
|
+ print(axes)
|
|
|
+ ax = axes[i]
|
|
|
+
|
|
|
+ g = sns.barplot(
|
|
|
+ data=df2,
|
|
|
+ x="bench_name",
|
|
|
+ y="normalized",
|
|
|
+ hue="type",
|
|
|
+ legend="brief",
|
|
|
+ order=bench_order,
|
|
|
+ ax = ax,
|
|
|
+ )
|
|
|
+ ax.set_title(f"vs. {config}", fontsize=22)
|
|
|
+ ax.set_ylim([0, 0.8])
|
|
|
+ ax.set_xlabel("Benchmark")
|
|
|
+ ax.set_ylabel("Ratio")
|
|
|
+ # ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(6))
|
|
|
+ ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator(5))
|
|
|
+ ax.yaxis.grid(visible=True, which="both")
|
|
|
+
|
|
|
+ for c in ax.containers:
|
|
|
+ labels = [f"{(v.get_height()):.2f}" if v.get_height() > 0.8 else "" for v in c]
|
|
|
+ padding = 80
|
|
|
+ ax.bar_label(c, labels=labels, label_type="center", padding=padding, size=16)
|
|
|
+
|
|
|
+ num_categories = len(df.bench_name.unique())
|
|
|
+ num_hues = len(df.config.unique())
|
|
|
+ plot_utils.draw_hatch(ax, num_categories, num_hues)
|
|
|
+
|
|
|
+ if i == 0:
|
|
|
+ ax.set_xlabel("")
|
|
|
+ ax.legend(ncol=1, loc="upper right", bbox_to_anchor=(1, 1), labelspacing=0.3)
|
|
|
+ else:
|
|
|
+ ax.get_legend().remove()
|
|
|
+
|
|
|
+ sns.despine(fig, right=False, top=False)
|
|
|
+
|
|
|
+ return fig
|