Provide an initial version of the parse_libcamera_tuning This currently only operates on an existing PiSP tuning file and outputs a set of images combined into a PDF report. Signed-off-by: Kieran Bingham # Please enter the commit message for your changes. Lines starting # with '#' will be ignored, and an empty message aborts the commit. # # Date: Wed Jun 4 14:19:30 2025 +0100 # # interactive rebase in progress; onto 6394313648bb # Last command done (1 command done): # reword 4bb39a901225 Initial version supporting AWB graph # Next command to do (1 remaining command): # pick 313cd427d5cd Fix output directory # You are currently editing a commit while rebasing branch 'main' on '6394313648bb'. # # Changes to be committed: # new file: parse_libcamera_tuning.py # # Untracked files: # output-imx219/ # output-imx335 pisp/ # output-imx519/ # output-ov5647/ # output-ov5647_noir/ # output-ov64a40/ # output-pisp_imx415/ # # ------------------------ >8 ------------------------ # Do not modify or remove the line above. # Everything below it will be ignored. diff --git a/parse_libcamera_tuning.py b/parse_libcamera_tuning.py new file mode 100644 index 000000000000..c202bc60ebb5 --- /dev/null +++ b/parse_libcamera_tuning.py @@ -0,0 +1,144 @@ + +import json +import matplotlib.pyplot as plt +import pandas as pd +import numpy as np +import os +import glob +from fpdf import FPDF +from mpl_toolkits.mplot3d import Axes3D + +def plot_awb_ct_curve(awb_data): + ct_curve = awb_data.get("ct_curve", []) + ct_data = [] + for i in range(0, len(ct_curve), 3): + ct = ct_curve[i] + red_gain = ct_curve[i + 1] + blue_gain = ct_curve[i + 2] + ct_data.append({"Colour Temperature (K)": ct, "Red Gain": red_gain, "Blue Gain": blue_gain}) + df_ct = pd.DataFrame(ct_data) + plt.figure() + plt.plot(df_ct["Colour Temperature (K)"], df_ct["Red Gain"], label="Red Gain", marker='o') + plt.plot(df_ct["Colour Temperature (K)"], df_ct["Blue Gain"], label="Blue Gain", marker='o') + plt.title("AWB Gains vs Colour Temperature") + plt.xlabel("Colour Temperature (K)") + plt.ylabel("Gain") + plt.grid(True) + plt.legend() + plt.tight_layout() + os.makedirs("output", exist_ok=True) + plt.savefig("output/awb_ct_curve.png") + plt.close() + +def plot_alsc_shading_tables(alsc_data): + calibrations = alsc_data.get("calibrations_Cr", []) + for calibration in calibrations: + ct = calibration["ct"] + table = calibration["table"] + table_size = int(len(table) ** 0.5) + table_2d = np.array(table).reshape((table_size, table_size)) + x = np.arange(table_size) + y = np.arange(table_size) + x, y = np.meshgrid(x, y) + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + ax.plot_surface(x, y, table_2d, cmap='viridis') + ax.set_title(f'ALSC Shading Table at CT={ct}K') + ax.set_xlabel('X') + ax.set_ylabel('Y') + ax.set_zlabel('Gain') + plt.tight_layout() + plt.savefig(f"output/alsc_shading_table_{int(ct)}K.png") + plt.close() + +def plot_agc_exposure_modes(agc_data): + channel = agc_data["channels"][0] + exposure_modes = channel.get("exposure_modes", {}) + for mode_name, mode_data in exposure_modes.items(): + shutter = mode_data["shutter"] + gain = mode_data["gain"] + plt.figure() + plt.plot(shutter, gain, marker='o') + plt.title(f"AGC Exposure Mode: {mode_name.capitalize()}") + plt.xlabel("Shutter (microseconds)") + plt.ylabel("Gain") + plt.grid(True) + plt.tight_layout() + plt.savefig(f"output/agc_exposure_mode_{mode_name}.png") + plt.close() + +def plot_noise_model(noise_data): + slope = noise_data.get("reference_slope", 0) + constant = noise_data.get("reference_constant", 0) + signal = np.linspace(0, 1024, 100) + variance = slope * signal + constant + plt.figure() + plt.plot(signal, variance) + plt.title("Noise Model: Variance vs Signal") + plt.xlabel("Signal Level") + plt.ylabel("Variance") + plt.grid(True) + plt.tight_layout() + plt.savefig("output/noise_model.png") + plt.close() + +def plot_grouped_bars(data_dict, title, keys): + df = pd.DataFrame(data_dict, index=keys).T + df.plot(kind='bar') + plt.title(title) + plt.ylabel("Value") + plt.grid(True) + plt.xticks(rotation=0) + plt.tight_layout() + plt.savefig(f"output/{title.replace(' ', '_').lower()}.png") + plt.close() + +def plot_denoise_parameters(denoise_data): + modes = ["normal", "hdr", "night"] + sdn_keys = ["deviation", "strength", "deviation2", "deviation_no_tdn", "strength_no_tdn"] + cdn_keys = ["deviation", "strength"] + tdn_keys = ["deviation", "threshold"] + sdn_data = {mode: [denoise_data[mode]["sdn"][k] for k in sdn_keys] for mode in modes} + cdn_data = {mode: [denoise_data[mode]["cdn"][k] for k in cdn_keys] for mode in modes} + tdn_data = {mode: [denoise_data[mode]["tdn"][k] for k in tdn_keys] for mode in modes} + plot_grouped_bars(sdn_data, "Spatial Denoise (SDN) Parameters", sdn_keys) + plot_grouped_bars(cdn_data, "Colour Denoise (CDN) Parameters", cdn_keys) + plot_grouped_bars(tdn_data, "Temporal Denoise (TDN) Parameters", tdn_keys) + +def generate_pdf_report(output_path="output/libcamera_tuning_report.pdf"): + pdf = FPDF() + pdf.set_auto_page_break(auto=True, margin=15) + pdf.add_page() + pdf.set_font("Arial", "B", 16) + pdf.cell(0, 10, "Libcamera Tuning File Report", ln=True, align='C') + pdf.ln(10) + image_files = sorted(glob.glob("output/*.png")) + for image in image_files: + pdf.add_page() + pdf.set_font("Arial", size=12) + pdf.cell(0, 10, os.path.basename(image), ln=True) + pdf.image(image, x=10, w=180) + pdf.output(output_path) + +def main(json_path): + with open(json_path, "r") as f: + data = json.load(f) + for algo in data["algorithms"]: + if "rpi.awb" in algo: + plot_awb_ct_curve(algo["rpi.awb"]) + elif "rpi.alsc" in algo: + plot_alsc_shading_tables(algo["rpi.alsc"]) + elif "rpi.agc" in algo: + plot_agc_exposure_modes(algo["rpi.agc"]) + elif "rpi.noise" in algo: + plot_noise_model(algo["rpi.noise"]) + elif "rpi.denoise" in algo: + plot_denoise_parameters(algo["rpi.denoise"]) + generate_pdf_report() + +if __name__ == "__main__": + import sys + if len(sys.argv) != 2: + print("Usage: python parse_libcamera_tuning.py ") + sys.exit(1) + main(sys.argv[1])