| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153 |
- import tempfile
- import time
- import pandas as pd
- import pickle
- from imc_utils.pps_e36311a import PPS_E36311A
- from imc_utils.build_config.cortex_m33 import BuildConfigM33
- from imc_utils.build_config.test_env import TestEnv
- import imc_utils.serial_device
- from imc_utils.serial_watch import SerialWatcher
- WORKSPACE_ROOT = "/home/ybkim/workspace/imc/imc_freertos_app_m33"
- NVM_RESET_BIN = f"{WORKSPACE_ROOT}/imc/utils/nvm_reset.elf"
- OPENOCD_SCRIPT = f"{WORKSPACE_ROOT}/imc_freertos_app_m33.cfg"
- def main():
- pps = PPS_E36311A()
- config = get_default_build_config()
- benchmarks = [
- "vBasicMath", "vCrc", "vFFT", "vSha", "vStringSearch", "vMatMul", "vConv2d", "vAes"
- ]
- for benchmark in benchmarks:
- config.bench_name = benchmark
- config.insert_compiler_checkpoints = True
- config.use_checkpoint_pass_counter = True
- config.use_checkpoint_voltage_check = False
- config.checkpoint_pass_count = 1000
- config.bench_infinite_loop = True
- pps.set_voltage(3.3, 1)
- pps.set_current(0.1, 1)
- pps.output_on(1)
- env = TestEnv(WORKSPACE_ROOT, NVM_RESET_BIN, OPENOCD_SCRIPT)
- with tempfile.TemporaryDirectory() as build_dir:
- binary = env.build_binary(config, build_dir)
- env.clear_nvm_and_load_binary(binary, resume=False)
-
- pps.set_current(0.015, 1)
- time.sleep(1)
- env.resume_board(terminate=True)
- total_iterations = 5
- records = SerialWatcher(benchmark, total_iterations).run()
- # records = measure_execution_time(benchmark, total_iterations)
- df = pd.DataFrame(records)
- save_records(benchmark, df)
- print(df)
- def measure_execution_time(bench_nanme, total_iterations):
- ser = imc_utils.serial_device.get_serial()
- num_finished = 0
- start_detected = False
- time_takens = []
- time_total = 0
- outputs = []
- records = []
- num_recovery = 0
- ser.reset_input_buffer()
- while num_finished < total_iterations:
- if ser.readable():
- res = ser.readline()
- try:
- line = res.decode()[: len(res) - 1]
- except:
- print("readline() exception")
- continue
- if line.startswith("hardfault"):
- print("\nHARD FAULT")
- if not start_detected:
- if line.startswith("Start benchmark"):
- print("\nbenchmark start detected")
- t_start = time.time()
- start_detected = True
- else:
- if line.startswith("End benchmark"):
- t_end = time.time()
- t_diff = t_end - t_start
- time_takens.append(t_diff)
- time_total += t_diff
- num_finished += 1
- print(
- f"\nbenchmark end detected, time: {t_diff:.2f} secs, finished: {num_finished}, average: {time_total/num_finished:.2f} secs"
- )
- start_detected = False
- record = {
- "bench_name": bench_nanme,
- "start": t_start,
- "end": t_end,
- "time_taken": t_diff,
- "recovery": num_recovery,
- "outputs": outputs,
- "is_correct": check_output_is_correct(bench_nanme, outputs)
- }
- records.append(record)
- outputs = []
- num_recovery = 0
- elif line.startswith("(OUT)"):
- print(line)
- outputs.append(line[6:].strip())
-
- elif line.startswith("Start recovery"):
- num_recovery += 1
- print(f"recovery: #{num_recovery}", end="\r")
- return records
- def get_default_build_config():
- config = BuildConfigM33()
- config.bench_name = "vBasicMath"
- config.insert_compiler_checkpoints = True
- config.enable_extension = True
- config.use_checkpoint_pass_counter = False
- config.use_checkpoint_voltage_check = True
- config.bench_infinite_loop = True
- config.checkpoint_pass_count = 100
- config.print_recovery_message = True
- return config
- correct_outputs = {
- "vBasicMath": "Sum: -9313",
- "vCrc": "210692533",
- "vFFT": "2807, 915",
- "vSha": "4926a88d 0ca714f4 a9ebc1eb def37b8e 3911ee0f",
- }
- def check_output_is_correct(bench_name, outputs):
- if len(outputs) == 0 or bench_name not in correct_outputs:
- return False
- is_single_output = len(outputs) == 1
- is_output_correct = correct_outputs[bench_name] == outputs[0]
- return is_single_output and is_output_correct
- def save_records(bench_name, df):
- with open(f"output/{bench_name}.pickle", "wb") as f:
- pickle.dump(df, f)
- if __name__ == "__main__":
- main()
|