"""
Intermodulation Distortion (IMD) Analyzer for Vinyl Playback
=============================================================
Measures IMD products from two-tone test tracks on the
CBS Laboratories STR-112 (or compatible) test record.

Both channels (L and R) are always analysed independently.
The report contains separate sections for each channel, and
four plots are generated: spectrum and bar chart for L and R.

One or two WAV files may be supplied:
  -fileVertical   : recorded from a vertically modulated track
                    (for VTA / SRA optimisation)
  -fileHorizontal : recorded from a laterally modulated track
                    (for LTA / zenith-error optimisation)

When both files are provided the analyser runs both analyses.
The options-file key 'mode' controls which analyses are performed:
  BOTH       -- both files required (default)
  VERTICAL   -- only -fileVertical is required
  HORIZONTAL -- only -fileHorizontal is required

The CBS STR-112 IMD test tracks are mono signals recorded
identically on both channels.  The analyser verifies this and
warns if the channels differ significantly.

Each test track contains exactly two tones: one low-frequency
test tone (fL) and one high-frequency pilot tone (fH).  The fL
tone is always louder than the fH tone.

The vertical and horizontal files may have different fundamental
tone frequencies, and L and R channels may have different prominent
frequencies (e.g. on old worn records).  Therefore f_low and f_high
accept 1, 2, or 4 comma-separated values:

  1 value  -- used for all directions and channels
  2 values -- vertical, horizontal  (L and R get the same value)
  4 values -- verticalL, verticalR, horizontalL, horizontalR

If only one value is given it is used for both directions and
both channels.

The script performs robust automatic tone discovery:
  - It scans the spectrum to find the two most prominent tones.
  - If two distinct tones cannot be safely identified, the script
    exits with a clear diagnostic message.
  - If two tones are found but they are far from the user-specified
    fL and fH, the script exits and reports the detected frequencies
    for both the vertical and horizontal files (and separately for
    L and R if they differ) so the user can rerun with corrected
    values if needed.

Frequencies need not be exact -- old records with speed error are
accommodated by a configurable acceptance tolerance.

By default, all reported levels are normalized so that the tallest
test tone (fL or fH) sits at 0 dB.  This removes gain-chain
dependence and makes results comparable across different recordings.
Set normalize_levels = false in the options file to report raw
(absolute) dB levels instead.

Usage:
  python IMDImproved.py
         -fileVertical = <vertical.wav>
         -fileHorizontal = <horizontal.wav>
         [-equipment = <equipment.txt>]
         [-options = <options.txt>]

  # Vertical only (mode = VERTICAL in options file):
  python IMDImproved.py
         -fileVertical = <vertical.wav>
         [-equipment = <equipment.txt>]
         [-options = <options.txt>]

  # Horizontal only (mode = HORIZONTAL in options file):
  python IMDImproved.py
         -fileHorizontal = <horizontal.wav>
         [-equipment = <equipment.txt>]
         [-options = <options.txt>]

Arguments:
  -fileVertical = <file>    WAV from vertical track
  -fileHorizontal = <file>  WAV from lateral track
  -equipment = <file>       Equipment description   (optional)
  -options = <file>         Analysis options file    (optional)

Spaces around '=' are allowed and ignored.

When mode = BOTH, -fileVertical must come before -fileHorizontal.
When mode = VERTICAL or HORIZONTAL, only the relevant file argument
is required; the other is ignored if given.

If -options is not given, built-in defaults are used.
If -equipment is not given, no equipment info is shown on the plot.
"""

__version__ = "2.0"

import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator, FuncFormatter
from scipy.io import wavfile
from scipy.signal import windows
from datetime import datetime


# ---------------------------------------------------------------------------
# Constants  (defaults -- may be overridden by an options file)
# ---------------------------------------------------------------------------

# Analysis mode: BOTH, VERTICAL, HORIZONTAL
MODE = 'BOTH'

# Expected test-tone frequencies (CBS STR-112)
# Nested dict: direction -> channel -> frequency.
# Allows L and R to have independently specified expected frequencies.
F_HIGH = {
    'vertical':   {'L': 4000.0, 'R': 4000.0},
    'horizontal': {'L': 4000.0, 'R': 4000.0},
}
F_LOW = {
    'vertical':   {'L': 200.0, 'R': 200.0},
    'horizontal': {'L': 200.0, 'R': 200.0},
}

# Nominal fH recording level relative to standard (CBS STR-112: -6 dB)
FH_NOMINAL_DB = -6.0

# Known CBS STR-112 fL level variants (dB above standard recording level).
CBS_VERTICAL_LEVELS   = [6, 9, 12]
CBS_HORIZONTAL_LEVELS = [6, 9, 12, 15, 18]

# User-specified track levels (None = auto-detect).
TRACK_LEVEL_VERTICAL   = None
TRACK_LEVEL_HORIZONTAL = None

# Minimum separation (dB) between a detected tone and the noise floor.
TONE_PROMINENCE_DB = 20.0

# Maximum allowed deviation (Hz) between a detected tone and the
# user-specified frequency in blind discovery before refusing.
TONE_ACCEPTANCE_TOLERANCE = 50.0

# Frequency search tolerance for STFT windows (Hz).
FREQ_TOLERANCE = 30.0

# Wide search tolerance for initial tone-pair validation (Hz).
FREQ_TOLERANCE_WIDE = 200.0

# STFT parameters
STFT_WINDOW_SEC = 0.2
STFT_OVERLAP = 0.75
MIN_PILOT_DB = -40.0

# dB floor
DB_FLOOR = -100.0

# Analysis channel (kept for options-file compatibility)
CHANNEL = 'L'

# IMD product orders to report
MAX_IMD_ORDER = 5

# Plot frequency range (per direction)
PLOT_FREQ_MAX_VERTICAL   = 12000.0
PLOT_FREQ_MAX_HORIZONTAL = 12000.0

# Channel-identity check threshold
CHANNEL_IDENTITY_THRESHOLD = 0.001

# Level normalization
NORMALIZE_LEVELS = True

# Error report files
USAGE_ERROR_FILE = "imd_usage_error_report.txt"
ANALYSIS_ERROR_FILE = "imd_analysis_errors.txt"

# Valid modes
_VALID_MODES = ('BOTH', 'VERTICAL', 'HORIZONTAL')


# ---------------------------------------------------------------------------
# Direction / channel helpers
# ---------------------------------------------------------------------------
def _f_high_for(direction, channel='L'):
    """Return the expected fH for the given direction and channel."""
    return F_HIGH[direction][channel]


def _f_low_for(direction, channel='L'):
    """Return the expected fL for the given direction and channel."""
    return F_LOW[direction][channel]


def _freq_summary(fd):
    """Format a frequency dict for display."""
    vl, vr = fd['vertical']['L'], fd['vertical']['R']
    hl, hr = fd['horizontal']['L'], fd['horizontal']['R']
    if vl == vr and hl == hr:
        return f"[{vl:.0f}, {hl:.0f}]  [vert, horiz]"
    return (f"[{vl:.0f}, {vr:.0f}, {hl:.0f}, {hr:.0f}]  "
            f"[vertL, vertR, horizL, horizR]")


def _plot_freq_max_for(direction):
    """Return the plot frequency max for the given direction."""
    if direction in ('vertical', 'VERTICAL -- VTA/SRA'):
        return PLOT_FREQ_MAX_VERTICAL
    return PLOT_FREQ_MAX_HORIZONTAL


# ---------------------------------------------------------------------------
# Error handling
# ---------------------------------------------------------------------------
_analysis_errors = []
_tone_location_suggestions = {}   # direction -> channel -> (det_fL, det_fH)


def _die(message):
    """Print an error message to console AND write it to a file, then exit."""
    full = f"ERROR: {message}"
    print(full)
    with open(USAGE_ERROR_FILE, 'w', encoding='utf-8') as fh:
        fh.write(full + "\n")
    sys.exit(1)


def _log_analysis_error(wav_path, message):
    """Record a per-file analysis error."""
    tag = os.path.basename(wav_path)
    full = f"ANALYSIS ERROR [{tag}]: {message}"
    print(f"\n  *** {full}")
    _analysis_errors.append(full)


def _log_tone_suggestion(direction, channel, detected_fL, detected_fH):
    """Record a tone-location suggestion for the combined rerun block."""
    if direction not in _tone_location_suggestions:
        _tone_location_suggestions[direction] = {}
    _tone_location_suggestions[direction][channel] = (
        round(detected_fL), round(detected_fH))


def _format_rerun_suggestion():
    """Build the combined rerun suggestion text."""
    if not _tone_location_suggestions:
        return ""

    lines = []
    lines.append("-" * 60)
    lines.append("SUGGESTED OPTIONS-FILE VALUES FOR RERUN")
    lines.append("-" * 60)
    lines.append("")

    # Check whether any direction has L != R
    any_lr_diff = False
    for d in _tone_location_suggestions:
        ch_dict = _tone_location_suggestions[d]
        if 'L' in ch_dict and 'R' in ch_dict:
            if ch_dict['L'] != ch_dict['R']:
                any_lr_diff = True

    if any_lr_diff:
        lines.append("  NOTE: L and R channels have different prominent")
        lines.append("  frequencies.  You may need two separate runs,")
        lines.append("  or use the 4-value syntax (see below).")
        lines.append("")

        for d in sorted(_tone_location_suggestions.keys()):
            d_label = d.capitalize()
            ch_dict = _tone_location_suggestions[d]
            for ch in ('L', 'R'):
                if ch in ch_dict:
                    det_fL, det_fH = ch_dict[ch]
                    lines.append(
                        f"  {d_label} file, channel {ch}:")
                    lines.append(f"    f_low  = {det_fL}")
                    lines.append(f"    f_high = {det_fH}")
                    lines.append("")

        # Also show the 4-value syntax
        lines.append("  Or use the 4-value syntax for a single run")
        lines.append("  (verticalL, verticalR, horizontalL, horizontalR):")
        lines.append("")
        fL_parts = []
        fH_parts = []
        for d in ('vertical', 'horizontal'):
            for ch in ('L', 'R'):
                if d in _tone_location_suggestions and \
                   ch in _tone_location_suggestions[d]:
                    det_fL, det_fH = _tone_location_suggestions[d][ch]
                else:
                    det_fL = int(F_LOW[d][ch])
                    det_fH = int(F_HIGH[d][ch])
                fL_parts.append(str(det_fL))
                fH_parts.append(str(det_fH))
        lines.append(f"    f_low  = {', '.join(fL_parts)}")
        lines.append(f"    f_high = {', '.join(fH_parts)}")
        lines.append("")

    else:
        # All channels that reported agree -- simple output
        active_dirs = sorted(_tone_location_suggestions.keys())

        if len(active_dirs) == 1:
            d = active_dirs[0]
            ch_dict = _tone_location_suggestions[d]
            ch = next(iter(ch_dict))
            det_fL, det_fH = ch_dict[ch]
            lines.append(
                f"  Rerun with the following in the options file "
                f"({d} file):")
            lines.append("")
            lines.append(f"    f_low  = {det_fL}")
            lines.append(f"    f_high = {det_fH}")
            lines.append("")
        else:
            det_v = _tone_location_suggestions.get('vertical', {})
            det_h = _tone_location_suggestions.get('horizontal', {})
            v_ch = next(iter(det_v)) if det_v else 'L'
            h_ch = next(iter(det_h)) if det_h else 'L'
            v_fL, v_fH = det_v.get(v_ch,
                (int(F_LOW['vertical']['L']), int(F_HIGH['vertical']['L'])))
            h_fL, h_fH = det_h.get(h_ch,
                (int(F_LOW['horizontal']['L']), int(F_HIGH['horizontal']['L'])))
            lines.append(
                "  Rerun with the following in the options file "
                "(vertical, horizontal):")
            lines.append("")
            lines.append(f"    f_low  = {v_fL}, {h_fL}")
            lines.append(f"    f_high = {v_fH}, {h_fH}")
            lines.append("")

    return "\n".join(lines)


def _flush_analysis_errors():
    """Write all accumulated analysis errors to ANALYSIS_ERROR_FILE."""
    if not _analysis_errors:
        if os.path.isfile(ANALYSIS_ERROR_FILE):
            os.remove(ANALYSIS_ERROR_FILE)
        return False

    with open(ANALYSIS_ERROR_FILE, 'w', encoding='utf-8') as fh:
        fh.write(f"IMD Analysis Errors  "
                 f"({datetime.now().strftime('%Y-%m-%d %H:%M:%S')})\n")
        fh.write("=" * 60 + "\n\n")
        for err in _analysis_errors:
            fh.write(err + "\n\n")

        suggestion = _format_rerun_suggestion()
        if suggestion:
            fh.write(suggestion + "\n")

    print(f"\nAnalysis errors written to: {ANALYSIS_ERROR_FILE}")
    return True


def _print_usage():
    """Print usage help to console AND to a file, then exit."""
    text = (
        "Usage:\n"
        "  python IMDImproved.py\n"
        "         -fileVertical = <vertical.wav>\n"
        "         -fileHorizontal = <horizontal.wav>\n"
        "         [-equipment = <equipment.txt>]\n"
        "         [-options = <options.txt>]\n"
        "\n"
        "File arguments:\n"
        "  -fileVertical = <file>    WAV from vertical-modulation track\n"
        "                            (for VTA / SRA optimisation)\n"
        "  -fileHorizontal = <file>  WAV from lateral-modulation track\n"
        "                            (for LTA / zenith-error optimisation)\n"
        "\n"
        "  Which files are required depends on the 'mode' option:\n"
        "    mode = BOTH        both files required (default)\n"
        "    mode = VERTICAL    only -fileVertical required\n"
        "    mode = HORIZONTAL  only -fileHorizontal required\n"
        "\n"
        "Optional (any order):\n"
        "  -equipment = <file>   Equipment info file (shown on plots)\n"
        "  -options = <file>     Options file (overrides built-in defaults)\n"
        "\n"
        "Spaces around '=' are allowed:\n"
        "  -fileVertical=track01.wav\n"
        "  -fileVertical = track01.wav\n"
        "\n"
        "Both L and R channels are always analysed independently.\n"
        "Each WAV should contain a single test track from the record.\n"
        "Each track has exactly two tones: one fL and one fH.\n"
        "\n"
        "f_low and f_high accept 1, 2, or 4 comma-separated values:\n"
        "  1 value  -- used for all directions and channels\n"
        "  2 values -- vertical, horizontal  (L and R same)\n"
        "  4 values -- verticalL, verticalR, horizontalL, horizontalR\n"
        "\n"
        "The fL level variant (+6, +9, +12 dB etc.) is auto-detected\n"
        "or can be set explicitly via the options file.\n"
        "\n"
        "If -options is not given, built-in defaults are used.\n"
        "See imd_analysis_options.txt for available options.\n"
    )
    print(text)
    with open(USAGE_ERROR_FILE, 'w', encoding='utf-8') as fh:
        fh.write("No valid arguments provided.\n\n")
        fh.write(text)
    sys.exit(1)


# ---------------------------------------------------------------------------
# Analysis failure exception
# ---------------------------------------------------------------------------
class AnalysisError(Exception):
    """Raised when a WAV file cannot be analysed (non-fatal)."""
    pass


class ToneLocationError(AnalysisError):
    """
    Raised when two tones are found but far from expected frequencies.

    Attributes:
        detected_fL : float
        detected_fH : float
        direction   : str    -- 'vertical' or 'horizontal'
    """
    def __init__(self, message, detected_fL, detected_fH, direction):
        super().__init__(message)
        self.detected_fL = detected_fL
        self.detected_fH = detected_fH
        self.direction = direction


# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _to_float64(audio_data):
    """Convert integer PCM samples to float64 in [-1, 1]."""
    if audio_data.dtype == np.int16:
        return audio_data.astype(np.float64) / 32768.0
    elif audio_data.dtype == np.int32:
        return audio_data.astype(np.float64) / 2147483648.0
    elif audio_data.dtype in (np.float32, np.float64):
        return audio_data.astype(np.float64)
    else:
        raise AnalysisError(f"Unsupported sample format: {audio_data.dtype}")


def _safe_db(linear):
    """Linear magnitude -> dB, clamped to DB_FLOOR."""
    with np.errstate(divide='ignore', invalid='ignore'):
        db = 20.0 * np.log10(np.maximum(linear, 10.0 ** (DB_FLOOR / 20.0)))
    return np.maximum(db, DB_FLOOR)


def _check_channel_identity(audio):
    """Check whether a stereo file's two channels are (near-)identical."""
    if audio.ndim == 1:
        return {
            'is_stereo': False,
            'channels_equal': True,
            'rms_diff': 0.0,
            'rms_diff_db': DB_FLOOR,
            'peak_diff': 0.0,
        }

    diff = audio[:, 0] - audio[:, 1]
    rms_diff = float(np.sqrt(np.mean(diff ** 2)))
    peak_diff = float(np.max(np.abs(diff)))
    rms_diff_db = 20.0 * np.log10(rms_diff + 1e-30)

    return {
        'is_stereo': True,
        'channels_equal': rms_diff < CHANNEL_IDENTITY_THRESHOLD,
        'rms_diff': rms_diff,
        'rms_diff_db': max(rms_diff_db, DB_FLOOR),
        'peak_diff': peak_diff,
    }


def _select_channel(audio, channel):
    """Select a specific channel from a stereo (or mono) array."""
    if audio.ndim == 1:
        return audio
    if audio.shape[1] != 2:
        raise AnalysisError(
            f"Expected mono or stereo, got {audio.shape[1]} channels")

    if channel == 'L':
        return audio[:, 0]
    elif channel == 'R':
        return audio[:, 1]
    else:  # MIX
        return (audio[:, 0] + audio[:, 1]) / 2.0


def _estimate_track_level(fl_level_db, fh_level_db, known_levels):
    """Estimate which CBS STR-112 track level variant was recorded."""
    measured_diff = fl_level_db - fh_level_db

    best_level = None
    best_dist = float('inf')
    for level in known_levels:
        expected_diff = level - FH_NOMINAL_DB
        dist = abs(measured_diff - expected_diff)
        if dist < best_dist:
            best_dist = dist
            best_level = level

    expected_diff = best_level - FH_NOMINAL_DB
    is_reliable = best_dist <= 3.0

    return best_level, expected_diff, measured_diff, is_reliable


def _file_mod_date(path):
    """Return the modification date of *path* as 'YYYY-MM-DD HH:MM', or None."""
    if path is None or not os.path.isfile(path):
        return None
    ts = os.path.getmtime(path)
    return datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M')


def _build_file_date_info(file_v, file_h, options_path, equipment_path,
                          run_timestamp):
    """Build a dict of file-date information for report and plots."""
    files = []
    if file_v is not None:
        files.append(('Vertical WAV',
                       os.path.basename(file_v),
                       _file_mod_date(file_v)))
    if file_h is not None:
        files.append(('Horizontal WAV',
                       os.path.basename(file_h),
                       _file_mod_date(file_h)))
    if options_path is not None:
        files.append(('Options',
                       os.path.basename(options_path),
                       _file_mod_date(options_path)))
    if equipment_path is not None:
        files.append(('Equipment',
                       os.path.basename(equipment_path),
                       _file_mod_date(equipment_path)))

    return {
        'run_date': run_timestamp,
        'files': files,
    }


def _format_date_info_text(date_info):
    """Format the file-date info block as a multi-line string."""
    lines = [f"Analysis run:  {date_info['run_date']}"]
    for label, basename, mod_date in date_info['files']:
        date_str = mod_date if mod_date else '(unknown)'
        lines.append(f"{label}:  {basename}  [{date_str}]")
    return "\n".join(lines)


# ---------------------------------------------------------------------------
# IMD product definitions
# ---------------------------------------------------------------------------
def _build_imd_products(f_low, f_high, max_order):
    """Build a list of IMD product definitions up to *max_order*."""
    products = []

    for n in range(1, max_order):
        f_plus = f_high + n * f_low
        f_minus = f_high - n * f_low
        order = n + 1

        if f_minus > 0:
            products.append((f"fH-{n}fL", f_minus, order))
        products.append((f"fH+{n}fL", f_plus, order))

    for n in range(2, max_order + 1):
        products.append((f"{n}fL", n * f_low, n))

    for n in range(2, 4):
        products.append((f"{n}fH", n * f_high, n))

    return products


# ---------------------------------------------------------------------------
# Spectral analysis -- windowed FFT of a single block
# ---------------------------------------------------------------------------
def _spectrum_of_block(signal, sample_rate):
    """Compute single-sided amplitude spectrum using a Hann window."""
    n = len(signal)
    win = windows.hann(n, sym=False)
    coherent_gain = np.sum(win) / n

    fft_result = np.fft.rfft(signal * win)
    magnitude = np.abs(fft_result) * 2.0 / (n * coherent_gain)
    frequencies = np.fft.rfftfreq(n, 1.0 / sample_rate)

    magnitude[0] /= 2.0
    if n % 2 == 0:
        magnitude[-1] /= 2.0

    return frequencies, magnitude


def _find_peak_near(frequencies, magnitude_db, target_freq, tolerance):
    """Find the spectral peak closest to *target_freq* within +/- *tolerance*."""
    mask = (frequencies >= target_freq - tolerance) & \
           (frequencies <= target_freq + tolerance)
    if not np.any(mask):
        return None, None

    sub_db = magnitude_db[mask]
    sub_freq = frequencies[mask]
    idx = np.argmax(sub_db)
    return sub_freq[idx], sub_db[idx]


# ---------------------------------------------------------------------------
# Robust automatic tone discovery
# ---------------------------------------------------------------------------
def _estimate_noise_floor_db(magnitude_db, percentile=50):
    """Estimate the noise floor as the given percentile of the spectrum."""
    return float(np.percentile(magnitude_db, percentile))


def _find_prominent_peaks(frequencies, magnitude_db, noise_floor_db,
                          min_prominence_db, min_freq=50.0,
                          max_freq=None, min_separation_hz=50.0):
    """Find all spectral peaks above min_prominence_db over the noise floor."""
    if max_freq is None:
        max_freq = frequencies[-1]

    band_mask = (frequencies >= min_freq) & (frequencies <= max_freq)
    band_freqs = frequencies[band_mask]
    band_db = magnitude_db[band_mask]

    if len(band_db) < 3:
        return []

    peaks = []
    for i in range(1, len(band_db) - 1):
        if band_db[i] > band_db[i - 1] and band_db[i] > band_db[i + 1]:
            prominence = band_db[i] - noise_floor_db
            if prominence >= min_prominence_db:
                peaks.append((band_freqs[i], band_db[i], prominence))

    peaks.sort(key=lambda p: p[1], reverse=True)

    merged = []
    for freq, level, prom in peaks:
        too_close = False
        for mf, _, _ in merged:
            if abs(freq - mf) < min_separation_hz:
                too_close = True
                break
        if not too_close:
            merged.append((freq, level, prom))

    return merged


def _discover_tone_pair(signal, sample_rate, wav_path, direction, channel):
    """
    Scan the spectrum to discover the two test tones.

    *direction* is 'vertical' or 'horizontal'.
    *channel* is 'L' or 'R'.

    Strategy (two stages):
      1. GUIDED: find the prominent peak closest in frequency to each
         expected value (within FREQ_TOLERANCE_WIDE).  If both found,
         accept them unconditionally.
      2. BLIND: if guided fails, pick the two loudest peaks.  If those
         are far from expected, raise ToneLocationError with a rerun
         suggestion.
    """
    expected_fL = _f_low_for(direction, channel)
    expected_fH = _f_high_for(direction, channel)
    dir_label = direction.capitalize()

    n_detect = min(len(signal), max(int(2.0 * sample_rate), 16384))
    block = signal[:n_detect]
    freqs, mag = _spectrum_of_block(block, sample_rate)
    mag_db = _safe_db(mag)

    noise_floor_db = _estimate_noise_floor_db(mag_db)
    nyquist = sample_rate / 2.0

    print(f"    Tone discovery ({dir_label}, ch {channel}): "
          f"analysing {n_detect} samples "
          f"({n_detect / sample_rate:.2f} s), "
          f"noise floor ~ {noise_floor_db:.1f} dB")
    print(f"    Expected: fL = {expected_fL:.0f} Hz, "
          f"fH = {expected_fH:.0f} Hz")

    peaks = _find_prominent_peaks(
        freqs, mag_db, noise_floor_db,
        min_prominence_db=TONE_PROMINENCE_DB,
        min_freq=50.0,
        max_freq=min(_plot_freq_max_for(direction), nyquist),
        min_separation_hz=50.0)

    if len(peaks) == 0:
        raise AnalysisError(
            f"No prominent tones found in the spectrum.  "
            f"Noise floor at {noise_floor_db:.1f} dB; required prominence "
            f">= {TONE_PROMINENCE_DB:.0f} dB.  "
            f"The file may not contain an IMD test signal.")

    for i, (pf, pl, pp) in enumerate(peaks[:6]):
        print(f"    Peak #{i+1}: {pf:.1f} Hz  level {pl:.1f} dB  "
              f"prominence {pp:.1f} dB")

    # ------------------------------------------------------------------
    # STAGE 1 - GUIDED: find the prominent peak closest in frequency
    #           to each expected value (within FREQ_TOLERANCE_WIDE).
    # ------------------------------------------------------------------
    wide_tol = FREQ_TOLERANCE_WIDE
    guided_fL = None
    guided_fH = None
    best_fL_dist = wide_tol + 1
    best_fH_dist = wide_tol + 1

    for pf, pl, pp in peaks:
        dist_fL = abs(pf - expected_fL)
        if dist_fL <= wide_tol and dist_fL < best_fL_dist:
            guided_fL = (pf, pl, pp)
            best_fL_dist = dist_fL

        dist_fH = abs(pf - expected_fH)
        if dist_fH <= wide_tol and dist_fH < best_fH_dist:
            guided_fH = (pf, pl, pp)
            best_fH_dist = dist_fH

    if guided_fL is not None and guided_fH is not None:
        fL_det, fL_db = guided_fL[0], guided_fL[1]
        fH_det, fH_db = guided_fH[0], guided_fH[1]

        if fL_det > fH_det:
            fL_det, fL_db, fH_det, fH_db = fH_det, fH_db, fL_det, fL_db

        fL_deviation = abs(fL_det - expected_fL)
        fH_deviation = abs(fH_det - expected_fH)
        speed_pct = ((fH_det - expected_fH) / expected_fH) * 100.0

        if fL_deviation > TONE_ACCEPTANCE_TOLERANCE or \
           fH_deviation > TONE_ACCEPTANCE_TOLERANCE:
            print(f"    Guided match (with warning): "
                  f"fL = {fL_det:.1f} Hz (dev {fL_deviation:.1f} Hz), "
                  f"fH = {fH_det:.1f} Hz (dev {fH_deviation:.1f} Hz)")
            print(f"    NOTE: deviation exceeds acceptance tolerance "
                  f"({TONE_ACCEPTANCE_TOLERANCE:.0f} Hz) but peaks are "
                  f"within wide tolerance ({wide_tol:.0f} Hz).  Accepted.")
        else:
            print(f"    Guided match: "
                  f"fL = {fL_det:.1f} Hz ({fL_db:.1f} dB), "
                  f"fH = {fH_det:.1f} Hz ({fH_db:.1f} dB)")

        print(f"    Tone pair accepted (guided).  "
              f"Speed error from fH: {speed_pct:+.2f}%")

        return {
            'fL_detected': fL_det,
            'fL_db': fL_db,
            'fH_detected': fH_det,
            'fH_db': fH_db,
            'speed_pct': speed_pct,
            'noise_floor_db': noise_floor_db,
            'all_peaks': peaks,
        }
    else:
        missing = []
        if guided_fL is None:
            missing.append(f"fL~{expected_fL:.0f}")
        if guided_fH is None:
            missing.append(f"fH~{expected_fH:.0f}")
        print(f"    No guided match for {', '.join(missing)} "
              f"(wide tol {wide_tol:.0f} Hz).  "
              f"Falling back to blind discovery ...")

    # ------------------------------------------------------------------
    # STAGE 2 - BLIND: pick the two loudest peaks
    # ------------------------------------------------------------------
    if len(peaks) < 2:
        raise AnalysisError(
            f"Only one prominent tone found at {peaks[0][0]:.1f} Hz "
            f"({peaks[0][1]:.1f} dB).  "
            f"An IMD test signal requires two clearly distinguishable "
            f"tones.  Check the file or lower tone_prominence_db.")

    tone_a_freq, tone_a_db, tone_a_prom = peaks[0]
    tone_b_freq, tone_b_db, tone_b_prom = peaks[1]

    if len(peaks) >= 3:
        tone_c_db = peaks[2][1]
        gap_ab = min(tone_a_db, tone_b_db) - tone_c_db
        if gap_ab < 6.0:
            top3_desc = "; ".join(
                f"{p[0]:.1f} Hz at {p[1]:.1f} dB" for p in peaks[:3])
            print(f"    WARNING: 3rd peak is close in level to the top two.  "
                  f"Top 3: {top3_desc}")
            print(f"    Proceeding with the two strongest peaks.")

    if tone_a_freq < tone_b_freq:
        fL_det, fL_db = tone_a_freq, tone_a_db
        fH_det, fH_db = tone_b_freq, tone_b_db
    else:
        fL_det, fL_db = tone_b_freq, tone_b_db
        fH_det, fH_db = tone_a_freq, tone_a_db

    print(f"    Identified tone pair (blind): "
          f"fL = {fL_det:.1f} Hz ({fL_db:.1f} dB), "
          f"fH = {fH_det:.1f} Hz ({fH_db:.1f} dB)")

    if fL_db < fH_db:
        print(f"    NOTE: fL ({fL_db:.1f} dB) is quieter than fH "
              f"({fH_db:.1f} dB).  Unusual but proceeding.")

    fL_deviation = abs(fL_det - expected_fL)
    fH_deviation = abs(fH_det - expected_fH)

    if fL_deviation > TONE_ACCEPTANCE_TOLERANCE or \
       fH_deviation > TONE_ACCEPTANCE_TOLERANCE:
        _log_tone_suggestion(direction, channel, fL_det, fH_det)

        msg = (
            f"[ch {channel}] Two distinct tones found in {dir_label} file "
            f"but they are far from the expected frequencies.\n"
            f"\n"
            f"  Expected ({dir_label}, ch {channel}):  "
            f"fL = {expected_fL:.0f} Hz,  fH = {expected_fH:.0f} Hz\n"
            f"  Detected:  "
            f"fL = {fL_det:.1f} Hz (deviation {fL_deviation:.1f} Hz),  "
            f"fH = {fH_det:.1f} Hz (deviation {fH_deviation:.1f} Hz)\n"
            f"  Acceptance tolerance: {TONE_ACCEPTANCE_TOLERANCE:.0f} Hz\n"
            f"\n"
            f"  To analyse with the detected frequencies, rerun with:\n"
            f"    f_low  = {round(fL_det)}\n"
            f"    f_high = {round(fH_det)}\n"
            f"  in the options file (or adjust tone_acceptance_tolerance).")
        raise ToneLocationError(msg, fL_det, fH_det, direction)

    speed_pct = ((fH_det - expected_fH) / expected_fH) * 100.0

    print(f"    Tone pair accepted (within tolerance).  "
          f"Speed error from fH: {speed_pct:+.2f}%")

    return {
        'fL_detected': fL_det,
        'fL_db': fL_db,
        'fH_detected': fH_det,
        'fH_db': fH_db,
        'speed_pct': speed_pct,
        'noise_floor_db': noise_floor_db,
        'all_peaks': peaks,
    }


def _compute_stft_tolerance(tone_info):
    """Compute the frequency tolerance for STFT measurement."""
    speed_pct = abs(tone_info['speed_pct'])
    base_tol = FREQ_TOLERANCE
    speed_scaled = base_tol * max(1.0, speed_pct / 1.0)
    adj_tolerance = min(speed_scaled, FREQ_TOLERANCE_WIDE)
    adj_tolerance = max(adj_tolerance, base_tol)
    return adj_tolerance


# ---------------------------------------------------------------------------
# STFT-based IMD measurement
# ---------------------------------------------------------------------------
def measure_imd_stft(signal, sample_rate, f_low, f_high, tolerance):
    """Measure IMD across the duration of the recording using STFT."""
    n_total = len(signal)
    win_samples = max(int(STFT_WINDOW_SEC * sample_rate), 2048)
    hop = max(int(win_samples * (1.0 - STFT_OVERLAP)), 1)

    imd_defs = _build_imd_products(f_low, f_high, MAX_IMD_ORDER)

    window_results = []
    n_avg = 0
    avg_magnitude = None

    pos = 0
    while pos + win_samples <= n_total:
        block = signal[pos:pos + win_samples]
        freqs, mag = _spectrum_of_block(block, sample_rate)
        mag_db = _safe_db(mag)

        if avg_magnitude is None:
            avg_magnitude = np.zeros_like(mag)
        avg_magnitude += mag ** 2
        n_avg += 1

        fH_actual, fH_db = _find_peak_near(freqs, mag_db, f_high,
                                            tolerance)
        if fH_actual is None or fH_db < MIN_PILOT_DB:
            pos += hop
            continue

        fL_actual, fL_db = _find_peak_near(freqs, mag_db, f_low,
                                           tolerance)
        fl_result = (fL_actual, fL_db)

        win_imd = {}
        for label, expected_f, order in imd_defs:
            if expected_f <= 0 or expected_f >= sample_rate / 2.0:
                continue
            f_act, f_db = _find_peak_near(freqs, mag_db, expected_f,
                                          tolerance)
            if f_act is not None:
                dbc = f_db - fH_db
                win_imd[label] = {
                    'expected_freq': expected_f,
                    'actual_freq': f_act,
                    'abs_db': f_db,
                    'dbc': dbc,
                    'order': order,
                }

        window_results.append({
            'fH_actual': fH_actual,
            'fH_db': fH_db,
            'fL_actual': fl_result[0],
            'fL_db': fl_result[1],
            'imd': win_imd,
        })

        pos += hop

    if n_avg > 0:
        avg_magnitude = np.sqrt(avg_magnitude / n_avg)
    else:
        avg_magnitude = np.zeros(1)

    avg_freqs = np.fft.rfftfreq(win_samples, 1.0 / sample_rate)

    return {
        'window_results': window_results,
        'avg_freqs': avg_freqs,
        'avg_magnitude': avg_magnitude,
        'n_windows': len(window_results),
        'win_samples': win_samples,
    }


# ---------------------------------------------------------------------------
# Aggregate per-window IMD results
# ---------------------------------------------------------------------------
def aggregate_imd(stft_result):
    """Compute median dBc for each IMD product across all valid windows."""
    if stft_result['n_windows'] == 0:
        return {}

    all_labels = set()
    for wr in stft_result['window_results']:
        all_labels.update(wr['imd'].keys())

    agg = {}
    for label in sorted(all_labels):
        dbc_vals = []
        freq_vals = []
        order = None
        for wr in stft_result['window_results']:
            if label in wr['imd']:
                dbc_vals.append(wr['imd'][label]['dbc'])
                freq_vals.append(wr['imd'][label]['actual_freq'])
                order = wr['imd'][label]['order']

        if len(dbc_vals) > 0:
            arr = np.array(dbc_vals)
            agg[label] = {
                'median_dbc': float(np.median(arr)),
                'mean_dbc': float(np.mean(arr)),
                'std_dbc': float(np.std(arr)),
                'count': len(dbc_vals),
                'median_freq': float(np.median(freq_vals)),
                'order': order,
            }

    return agg


def compute_total_imd(agg):
    """Compute the total IMD percentage from the aggregated results."""
    if not agg:
        return 0.0, DB_FLOOR

    linear_sq_sum = 0.0
    for info in agg.values():
        lin = 10.0 ** (info['median_dbc'] / 20.0)
        linear_sq_sum += lin ** 2

    total_lin = np.sqrt(linear_sq_sum)
    total_pct = total_lin * 100.0
    total_db = 20.0 * np.log10(total_lin + 1e-30)
    return total_pct, total_db


# ---------------------------------------------------------------------------
# Level normalization
# ---------------------------------------------------------------------------
def _normalize_result(res):
    """Normalize all levels so the tallest test tone is at 0 dB."""
    fL_nom = res['fL_nominal']
    fL_db = res['fl_detected'][fL_nom]['level_db']
    fH_db = res['fH_level_db']

    ref_db = max(fL_db, fH_db)

    res['normalize_offset_db'] = ref_db
    res['fH_level_db_raw'] = fH_db
    res['fL_level_db_raw'] = fL_db

    res['fH_level_db'] = fH_db - ref_db
    res['fl_detected'][fL_nom]['level_db'] = fL_db - ref_db

    if ref_db != 0.0:
        linear_divisor = 10.0 ** (ref_db / 20.0)
        res['avg_magnitude'] = res['avg_magnitude'] / linear_divisor

    return res


# ---------------------------------------------------------------------------
# Per-file, per-channel analysis
# ---------------------------------------------------------------------------
def analyze_file(wav_path, direction, user_track_level, channel):
    """Analyse one WAV file for IMD on a specific channel."""
    expected_fL = _f_low_for(direction, channel)
    expected_fH = _f_high_for(direction, channel)

    try:
        sample_rate, raw = wavfile.read(wav_path)
    except Exception as exc:
        raise AnalysisError(
            f"Cannot read WAV file: {exc}") from exc

    audio = _to_float64(raw)

    ch_info = _check_channel_identity(audio)
    if ch_info['is_stereo']:
        if ch_info['channels_equal']:
            print(f"    Channels: L and R are identical (mono test signal)")
        else:
            print(f"    WARNING: L and R differ!  "
                  f"RMS(L-R) = {ch_info['rms_diff_db']:.1f} dBFS, "
                  f"peak |L-R| = {ch_info['peak_diff']:.6f}")
            print(f"    This is unexpected for a CBS STR-112 mono test track.")
    else:
        print(f"    Channels: mono file")

    signal = _select_channel(audio, channel)
    duration = len(signal) / sample_rate

    print(f"  {os.path.basename(wav_path)}: "
          f"{sample_rate} Hz, {duration:.2f} s, "
          f"channel = {channel}")

    # --- Robust tone discovery ---
    tone_info = _discover_tone_pair(signal, sample_rate, wav_path,
                                    direction, channel)

    fL_detected = tone_info['fL_detected']
    fH_detected = tone_info['fH_detected']
    adj_tolerance = _compute_stft_tolerance(tone_info)

    fL_nominal = fL_detected

    print(f"    Analysing as two-tone pair: "
          f"fL = {fL_nominal:.1f} Hz, fH = {fH_detected:.1f} Hz")
    print(f"    Using tolerance = {adj_tolerance:.0f} Hz for STFT.")

    stft_res = measure_imd_stft(signal, sample_rate, fL_nominal,
                                fH_detected, adj_tolerance)

    if stft_res['n_windows'] == 0:
        raise AnalysisError(
            f"No valid STFT windows found.  "
            f"The pilot tone ({expected_fH:.0f} Hz) was detected in the "
            f"initial scan at {fH_detected:.1f} Hz "
            f"({tone_info['fH_db']:.1f} dB) but fell below the "
            f"minimum level ({MIN_PILOT_DB:.0f} dB) in all "
            f"{STFT_WINDOW_SEC*1000:.0f} ms windows.  "
            f"Try lowering min_pilot_db in the options file.")

    fH_vals = [w['fH_actual'] for w in stft_res['window_results']]
    fH_db_vals = [w['fH_db'] for w in stft_res['window_results']]

    fH_median = float(np.median(fH_vals)) if fH_vals else expected_fH
    fH_level = float(np.median(fH_db_vals)) if fH_db_vals else DB_FLOOR

    fL_vals = [w['fL_actual'] for w in stft_res['window_results']
               if w['fL_actual'] is not None]
    fL_db_vals = [w['fL_db'] for w in stft_res['window_results']
                  if w['fL_actual'] is not None]

    fL_detected_freq = float(np.median(fL_vals)) if fL_vals else fL_nominal
    fL_detected_db = float(np.median(fL_db_vals)) if fL_db_vals else DB_FLOOR

    fl_detected = {
        fL_nominal: {
            'freq': fL_detected_freq,
            'level_db': fL_detected_db,
        }
    }

    speed_error_pct = ((fH_median - expected_fH) / expected_fH) * 100.0

    known_levels = (CBS_VERTICAL_LEVELS if direction == 'vertical'
                    else CBS_HORIZONTAL_LEVELS)

    if user_track_level is not None:
        track_level = user_track_level
        track_level_source = 'options file'
        track_level_reliable = True
    else:
        track_level, _, measured_diff, track_level_reliable = (
            _estimate_track_level(fL_detected_db, fH_level,
                                  known_levels))
        track_level_source = 'auto-detected'

    agg = aggregate_imd(stft_res)
    total_pct, total_db = compute_total_imd(agg)

    reliable_str = "" if track_level_reliable else " (uncertain)"
    print(f"    Detected fH = {fH_median:.1f} Hz "
          f"(expected {expected_fH:.0f}, speed error {speed_error_pct:+.2f}%)")
    print(f"    Detected fL = {fL_detected_freq:.1f} Hz "
          f"(expected {expected_fL:.0f}, level {fL_detected_db:.1f} dB)")
    print(f"    Track level: fL = +{track_level} dB "
          f"({track_level_source}){reliable_str}")
    print(f"    Valid STFT windows: {stft_res['n_windows']}")
    print(f"    Total IMD: {total_pct:.3f}% ({total_db:.1f} dBc)")

    res = {
        'wav_path': wav_path,
        'sample_rate': sample_rate,
        'duration': duration,
        'channel': channel,
        'n_windows': stft_res['n_windows'],
        'fH_detected': fH_median,
        'fH_level_db': fH_level,
        'fL_nominal': fL_nominal,
        'fl_detected': fl_detected,
        'speed_error_pct': speed_error_pct,
        'track_level': track_level,
        'track_level_source': track_level_source,
        'track_level_reliable': track_level_reliable,
        'channel_info': ch_info,
        'agg_imd': agg,
        'total_imd_pct': total_pct,
        'total_imd_db': total_db,
        'normalize_offset_db': 0.0,
        'fH_level_db_raw': fH_level,
        'fL_level_db_raw': fL_detected_db,
        'avg_freqs': stft_res['avg_freqs'],
        'avg_magnitude': stft_res['avg_magnitude'],
    }

    if NORMALIZE_LEVELS:
        _normalize_result(res)
        print(f"    Levels normalized: tallest tone -> 0 dB "
              f"(offset {res['normalize_offset_db']:+.1f} dB)")

    return res


# ---------------------------------------------------------------------------
# Text report
# ---------------------------------------------------------------------------
def _track_level_label(res):
    """Format a short label for the track level."""
    s = f"fL +{res['track_level']} dB"
    if not res['track_level_reliable']:
        s += " (uncertain)"
    return s


def _channel_identity_text(ch_info):
    """Format a one-line channel identity summary for the report."""
    if not ch_info['is_stereo']:
        return "mono file"
    if ch_info['channels_equal']:
        return "stereo, L=R (mono test signal, as expected)"
    return (f"stereo, L\u2260R  WARNING  "
            f"RMS(L-R) = {ch_info['rms_diff_db']:.1f} dBFS, "
            f"peak |L-R| = {ch_info['peak_diff']:.6f}")


def _report_section(L, w, fname, res, heading, guidance):
    """Append one analysis section to report lines list *L*."""
    lvl = _track_level_label(res)
    fL_nom = res['fL_nominal']
    fl_info = res['fl_detected'][fL_nom]
    dbc = fl_info['level_db'] - res['fH_level_db']
    ch = res['channel']

    L.append("=" * w)
    L.append(f"  {heading}")
    L.append(f"  Track level:  {lvl}  ({res['track_level_source']})")
    L.append(f"  Source: {fname}")
    L.append("=" * w)
    L.append(f"  {guidance}")
    L.append("")
    L.append(f"  Sample rate:       {res['sample_rate']} Hz")
    L.append(f"  Duration:          {res['duration']:.2f} s")
    L.append(f"  Channels:          {_channel_identity_text(res['channel_info'])}")
    L.append(f"  Analysis channel:  {ch}")
    L.append(f"  Valid windows:     {res['n_windows']}")

    if NORMALIZE_LEVELS:
        L.append(f"  Level reference:   tallest tone = 0 dB "
                 f"(offset {res['normalize_offset_db']:+.1f} dB)")

    L.append(f"  Detected fH:       {res['fH_detected']:.1f} Hz "
             f"(level {res['fH_level_db']:.1f} dB)")
    L.append(f"  Detected fL:       {fl_info['freq']:.1f} Hz "
             f"(expected {fL_nom:.0f}, "
             f"level {fl_info['level_db']:.1f} dB, "
             f"{dbc:+.1f} dBc)")
    L.append(f"  Speed error:       {res['speed_error_pct']:+.02f}%")
    L.append(f"  Track level:       +{res['track_level']} dB  "
             f"({res['track_level_source']})"
             f"{'  ** uncertain **' if not res['track_level_reliable'] else ''}")
    L.append("")

    L.append(f"  {'Product':>14s}  {'Freq (Hz)':>10s}  "
             f"{'Level (dBc)':>12s}  {'Order':>5s}  {'Windows':>7s}")
    L.append(f"  {'-'*14}  {'-'*10}  {'-'*12}  {'-'*5}  {'-'*7}")

    for label in sorted(res['agg_imd'].keys(),
                        key=lambda k: res['agg_imd'][k]['median_freq']):
        info = res['agg_imd'][label]
        L.append(f"  {label:>14s}  {info['median_freq']:10.1f}  "
                 f"{info['median_dbc']:12.2f}  "
                 f"{info['order']:5d}  {info['count']:7d}")

    L.append("")
    L.append(f"  Total IMD:  {res['total_imd_pct']:.3f}%  "
             f"({res['total_imd_db']:.1f} dBc)")
    L.append("")


def _report_failed_section(L, w, fname, error_msg, heading):
    """Append a section for a file that failed analysis."""
    L.append("=" * w)
    L.append(f"  {heading}")
    L.append(f"  Source: {fname}")
    L.append("=" * w)
    L.append("")
    L.append(f"  *** ANALYSIS FAILED ***")
    L.append("")
    for line in error_msg.splitlines():
        L.append(f"  {line}")
    L.append("")


def _summary_line(res, label_prefix):
    """Format one summary line for a result (or None)."""
    if res is not None:
        return (f"  {label_prefix}  fL +{res['track_level']:>2d} dB:  "
                f"IMD = {res['total_imd_pct']:.3f}%  "
                f"({res['total_imd_db']:.1f} dBc)")
    return None


def build_report(file_v, file_h,
                 res_vL, res_vR, res_hL, res_hR,
                 err_vL=None, err_vR=None,
                 err_hL=None, err_hR=None,
                 date_info=None):
    """Return the full IMD analysis report as a single string."""
    w = 72
    L = []

    v_levels_str = ", ".join(f"+{v}" for v in CBS_VERTICAL_LEVELS)
    h_levels_str = ", ".join(f"+{v}" for v in CBS_HORIZONTAL_LEVELS)

    fL_str = _freq_summary(F_LOW)
    fH_str = _freq_summary(F_HIGH)

    L.append("=" * w)
    L.append("VINYL INTERMODULATION DISTORTION (IMD) ANALYZER")
    L.append("=" * w)
    L.append(f"Date:              {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    L.append(f"Test record:       CBS Laboratories STR-112 (or compatible)")
    L.append(f"Pilot tone (fH):   {fH_str}"
             f"  (nominal {FH_NOMINAL_DB:+.0f} dB)")
    L.append(f"Test tone (fL):    {fL_str}")
    L.append(f"Tone prominence:   {TONE_PROMINENCE_DB:.0f} dB above noise floor")
    L.append(f"Acceptance tol:    {TONE_ACCEPTANCE_TOLERANCE:.0f} Hz")
    L.append(f"Vertical tracks:   fL at {v_levels_str} dB  (3 tracks)")
    L.append(f"Horizontal tracks: fL at {h_levels_str} dB  (5 tracks)")
    L.append(f"Analysis mode:     {MODE}")
    L.append(f"Analysis channels: L and R (both)")
    L.append(f"Normalize levels:  {'yes (tallest tone = 0 dB)' if NORMALIZE_LEVELS else 'no (raw dB)'}")
    L.append(f"STFT window:       {STFT_WINDOW_SEC*1000:.0f} ms, "
             f"{STFT_OVERLAP*100:.0f}% overlap")
    L.append(f"Freq tolerance:    +/-{FREQ_TOLERANCE:.0f} Hz "
             f"(wide: +/-{FREQ_TOLERANCE_WIDE:.0f} Hz)")
    L.append(f"Max IMD order:     {MAX_IMD_ORDER}")
    L.append("")

    # --- Vertical sections ---
    vert_heading_v = "VERTICAL MODULATION -- VTA / SRA Assessment"
    vert_guidance = ("Optimise the Vertical Tracking Angle (VTA) or Stylus Rake\n"
                     "  Angle (SRA) to minimise the IMD products below.")

    if res_vL is not None:
        _report_section(L, w, file_v, res_vL,
                        f"{vert_heading_v}  [LEFT channel]",
                        vert_guidance)
    elif err_vL is not None:
        _report_failed_section(L, w, file_v, err_vL,
                               f"{vert_heading_v}  [LEFT channel]")

    if res_vR is not None:
        _report_section(L, w, file_v, res_vR,
                        f"{vert_heading_v}  [RIGHT channel]",
                        vert_guidance)
    elif err_vR is not None:
        _report_failed_section(L, w, file_v, err_vR,
                               f"{vert_heading_v}  [RIGHT channel]")

    # --- Horizontal sections ---
    horiz_heading = "HORIZONTAL (LATERAL) MODULATION -- LTA / Zenith Assessment"
    horiz_guidance = ("Optimise the Lateral Tracking Angle (LTA) or cartridge zenith\n"
                      "  rotation in the headshell to minimise the IMD products below.")

    if res_hL is not None:
        _report_section(L, w, file_h, res_hL,
                        f"{horiz_heading}  [LEFT channel]",
                        horiz_guidance)
    elif err_hL is not None:
        _report_failed_section(L, w, file_h, err_hL,
                               f"{horiz_heading}  [LEFT channel]")

    if res_hR is not None:
        _report_section(L, w, file_h, res_hR,
                        f"{horiz_heading}  [RIGHT channel]",
                        horiz_guidance)
    elif err_hR is not None:
        _report_failed_section(L, w, file_h, err_hR,
                               f"{horiz_heading}  [RIGHT channel]")

    # ---- Summary ----
    L.append("=" * w)
    L.append("SUMMARY")
    L.append("=" * w)
    L.append("")

    for lbl, res, err in [
        ("Vertical   (VTA/SRA)    L", res_vL, err_vL),
        ("Vertical   (VTA/SRA)    R", res_vR, err_vR),
        ("Horizontal (LTA/zenith) L", res_hL, err_hL),
        ("Horizontal (LTA/zenith) R", res_hR, err_hR),
    ]:
        s = _summary_line(res, lbl)
        if s is not None:
            L.append(s)
        elif err is not None:
            L.append(f"  {lbl}:  FAILED (see above)")
        else:
            L.append(f"  {lbl}:  not analysed")

    L.append("")
    L.append("  Lower IMD values are better.")
    if res_vL is not None or res_vR is not None:
        L.append("  Adjust VTA/SRA to minimise the vertical result.")
    if res_hL is not None or res_hR is not None:
        L.append("  Adjust zenith/LTA to minimise the horizontal result.")
    L.append("")

    all_res = [r for r in (res_vL, res_vR, res_hL, res_hR)
               if r is not None]

    any_uncertain = any(not r['track_level_reliable'] for r in all_res)
    if any_uncertain:
        L.append("  WARNING: One or more track levels could not be")
        L.append("  reliably determined.  Use track_level_vertical and/or")
        L.append("  track_level_horizontal in the options file to override.")
        L.append("")

    any_ch_mismatch = any(
        r['channel_info']['is_stereo'] and not r['channel_info']['channels_equal']
        for r in all_res)
    if any_ch_mismatch:
        L.append("  WARNING: One or both WAV files have L and R channels")
        L.append("  that are NOT identical.  The CBS STR-112 IMD tracks are")
        L.append("  mono signals -- differing channels may indicate a")
        L.append("  recording issue or a non-standard source.")
        L.append("")

    all_err = [e for e in (err_vL, err_vR, err_hL, err_hR)
               if e is not None]
    if all_err:
        L.append(f"  See {ANALYSIS_ERROR_FILE} for detailed error information.")
        L.append("")

    if date_info is not None:
        L.append("-" * w)
        L.append("FILE DATES")
        L.append("-" * w)
        L.append(f"  {_format_date_info_text(date_info).replace(chr(10), chr(10) + '  ')}")
        L.append("")

    L.append("=" * w)
    return "\n".join(L)


# ---------------------------------------------------------------------------
# Plotting
# ---------------------------------------------------------------------------
def _format_freq(value, _pos):
    """Format frequency tick: use 'k' suffix for >= 1000 Hz."""
    if value >= 1000:
        return f'{value/1000:g}k'
    return f'{value:g}'


def _plot_spectrum_panel(ax, res, title, color_main, color_imd,
                         major_ticks, plot_freq_max):
    """Draw one spectrum panel on axes *ax*."""
    freqs = res['avg_freqs']
    mag_db = _safe_db(res['avg_magnitude'])

    mask = freqs <= plot_freq_max
    ax.plot(freqs[mask], mag_db[mask],
            color='gray', linewidth=0.5, alpha=0.7,
            label='Average spectrum')

    fH_freq = res['fH_detected']
    fH_level = res['fH_level_db']
    ax.axvline(fH_freq, color='red', linestyle='--',
               linewidth=1.5, alpha=0.8,
               label=f'fH = {fH_freq:.0f} Hz')

    idx_fH = np.argmin(np.abs(freqs - fH_freq))
    y_fH = mag_db[idx_fH]
    ax.plot(fH_freq, y_fH, 'v', color='red', markersize=6, zorder=5)

    fH_is_ref = (NORMALIZE_LEVELS and fH_level == 0.0)
    if not fH_is_ref:
        ax.annotate(f'fH\n{fH_level:.1f} dB',
                    xy=(fH_freq, y_fH),
                    xytext=(0, 12), textcoords='offset points',
                    fontsize=5.5, ha='center', color='red',
                    fontweight='bold')

    fL_nom = res['fL_nominal']
    fl_info = res['fl_detected'][fL_nom]
    fL_freq = fl_info['freq']
    fL_level = fl_info['level_db']
    ax.axvline(fL_freq, color='blue', linestyle='--',
               linewidth=1.5, alpha=0.8,
               label=f'fL = {fL_freq:.0f} Hz')

    idx_fL = np.argmin(np.abs(freqs - fL_freq))
    y_fL = mag_db[idx_fL]
    ax.plot(fL_freq, y_fL, 'v', color='blue', markersize=6, zorder=5)

    fL_is_ref = (NORMALIZE_LEVELS and fL_level == 0.0)
    if not fL_is_ref:
        ax.annotate(f'fL\n{fL_level:.1f} dB',
                    xy=(fL_freq, y_fL),
                    xytext=(0, 12), textcoords='offset points',
                    fontsize=5.5, ha='center', color='blue',
                    fontweight='bold')

    for label, info in sorted(res['agg_imd'].items(),
                              key=lambda x: x[1]['median_freq']):
        f_imd = info['median_freq']
        if f_imd <= plot_freq_max:
            ax.axvline(f_imd, color=color_imd, linestyle=':',
                       linewidth=0.8, alpha=0.6)
            idx = np.argmin(np.abs(freqs - f_imd))
            y_val = mag_db[idx]
            ax.plot(f_imd, y_val, 'v', color=color_imd,
                    markersize=5, zorder=5)
            ax.annotate(f'{label}\n{info["median_dbc"]:.1f} dBc',
                        xy=(f_imd, y_val),
                        xytext=(0, 12), textcoords='offset points',
                        fontsize=5.5, ha='center', color=color_imd,
                        fontweight='bold')

    ax.text(1.0, 1.02,
            f'Total IMD: {res["total_imd_pct"]:.3f}% '
            f'({res["total_imd_db"]:.1f} dBc)',
            transform=ax.transAxes, fontsize=9,
            verticalalignment='bottom', horizontalalignment='right',
            bbox=dict(boxstyle='round,pad=0.3', facecolor='wheat',
                      alpha=0.8),
            clip_on=False)

    ax.set_title(title, fontsize=11, pad=20)
    y_label = "Magnitude (dB, normalized)" if NORMALIZE_LEVELS else "Magnitude (dB)"
    ax.set_ylabel(y_label)
    ax.set_xlim(0, plot_freq_max)
    ax.xaxis.set_major_locator(FixedLocator(
        [t for t in major_ticks if t <= plot_freq_max]))
    ax.xaxis.set_major_formatter(FuncFormatter(_format_freq))
    ax.grid(True, which='major', alpha=0.3)
    ax.legend(loc='upper right', fontsize=7)


def _compute_bottom_margin(equipment_text, date_info):
    """Compute bottom margin and extra figure height for footer text."""
    has_equip = equipment_text is not None and equipment_text.strip()
    equip_lines = (equipment_text.strip().splitlines()
                   if has_equip else [])

    date_text = _format_date_info_text(date_info) if date_info else ''
    date_lines = date_text.splitlines() if date_text else []

    n_lines = max(len(equip_lines), len(date_lines))

    if n_lines == 0:
        return 0.0, 0.0

    bottom_margin = 0.06 + n_lines * 0.016
    extra_height = n_lines * 0.15
    return bottom_margin, extra_height


def _add_footer_text(fig, bottom_margin, equipment_text, date_info):
    """Add equipment info and date info below the chart area."""
    has_equip = equipment_text is not None and equipment_text.strip()

    if has_equip:
        fig.text(0.03, bottom_margin - 0.015,
                 equipment_text.strip(),
                 fontsize=7.5, fontfamily='monospace',
                 verticalalignment='top', color='#444444')

    if date_info is not None:
        date_text = _format_date_info_text(date_info)
        fig.text(0.97, bottom_margin - 0.015,
                 date_text,
                 fontsize=7.5, fontfamily='monospace',
                 verticalalignment='top', horizontalalignment='right',
                 color='#444444')


def _generate_spectrum_plot(panels, output_path, direction_label,
                            plot_freq_max,
                            equipment_text=None, date_info=None):
    """Generate one spectrum plot (one or two panels) for a direction."""
    major_ticks = [200, 500, 1000, 2000, 4000, 6000, 8000, 10000, 12000]

    if not panels:
        return

    bottom_margin, extra_height = _compute_bottom_margin(
        equipment_text, date_info)

    n_panels = len(panels)
    fig, axes = plt.subplots(n_panels, 1,
                             figsize=(14, 5 * n_panels + extra_height),
                             sharex=True, squeeze=False)
    suptitle = f"IMD Spectrum Analysis  [{direction_label}]"
    if NORMALIZE_LEVELS:
        suptitle += "  [normalized]"
    fig.suptitle(suptitle, fontsize=14, fontweight='bold')

    for i, (res, title, color_main, color_imd) in enumerate(panels):
        _plot_spectrum_panel(axes[i, 0], res, title, color_main,
                             color_imd, major_ticks, plot_freq_max)

    axes[-1, 0].set_xlabel("Frequency (Hz)")
    fig.tight_layout(rect=[0, bottom_margin, 1, 0.96])

    _add_footer_text(fig, bottom_margin, equipment_text, date_info)

    fig.savefig(output_path, dpi=150)
    plt.close(fig)
    print(f"Plot saved to:   {output_path}")


def _generate_bar_plot(res_L, res_R, wav_file, output_path,
                       direction_label, equipment_text=None,
                       date_info=None):
    """Generate one bar chart comparing L and R channels for a direction."""
    label_sources = []
    if res_L is not None:
        label_sources.append(res_L['agg_imd'])
    if res_R is not None:
        label_sources.append(res_R['agg_imd'])

    all_keys = set()
    for src in label_sources:
        all_keys |= src.keys()

    def _sort_key(k):
        for src in label_sources:
            if k in src:
                return src[k]['median_freq']
        return 0

    all_labels = sorted(all_keys, key=_sort_key)

    if not all_labels:
        return

    bottom_margin, extra_height = _compute_bottom_margin(
        equipment_text, date_info)
    fig_height = 7 + extra_height

    fig, ax = plt.subplots(1, 1, figsize=(14, fig_height))

    x = np.arange(len(all_labels))
    have_both = res_L is not None and res_R is not None

    if have_both:
        track_level = res_L['track_level']
        fig.suptitle(
            f"IMD Product Comparison  [{direction_label}]:  "
            f"LEFT vs RIGHT  fL +{track_level} dB",
            fontsize=14, fontweight='bold')

        bar_w = 0.35
        l_vals = [res_L['agg_imd'][lb]['median_dbc']
                  if lb in res_L['agg_imd'] else 0
                  for lb in all_labels]
        r_vals = [res_R['agg_imd'][lb]['median_dbc']
                  if lb in res_R['agg_imd'] else 0
                  for lb in all_labels]

        ax.bar(x - bar_w / 2, l_vals, bar_w, color='royalblue',
               alpha=0.8, label='LEFT channel')
        ax.bar(x + bar_w / 2, r_vals, bar_w, color='crimson',
               alpha=0.8, label='RIGHT channel')

        summary_text = (
            f'LEFT  total (fL +{res_L["track_level"]} dB):  '
            f'{res_L["total_imd_pct"]:.3f}% '
            f'({res_L["total_imd_db"]:.1f} dBc)\n'
            f'RIGHT total (fL +{res_R["track_level"]} dB):  '
            f'{res_R["total_imd_pct"]:.3f}% '
            f'({res_R["total_imd_db"]:.1f} dBc)')

    elif res_L is not None:
        fig.suptitle(
            f"IMD Products  [{direction_label}] -- LEFT channel  "
            f"fL +{res_L['track_level']} dB",
            fontsize=14, fontweight='bold')

        bar_w = 0.5
        l_vals = [res_L['agg_imd'][lb]['median_dbc']
                  if lb in res_L['agg_imd'] else 0
                  for lb in all_labels]
        ax.bar(x, l_vals, bar_w, color='royalblue', alpha=0.8,
               label=f'LEFT channel fL +{res_L["track_level"]} dB')

        summary_text = (
            f'LEFT total (fL +{res_L["track_level"]} dB):  '
            f'{res_L["total_imd_pct"]:.3f}% '
            f'({res_L["total_imd_db"]:.1f} dBc)')

    else:  # res_R only
        fig.suptitle(
            f"IMD Products  [{direction_label}] -- RIGHT channel  "
            f"fL +{res_R['track_level']} dB",
            fontsize=14, fontweight='bold')

        bar_w = 0.5
        r_vals = [res_R['agg_imd'][lb]['median_dbc']
                  if lb in res_R['agg_imd'] else 0
                  for lb in all_labels]
        ax.bar(x, r_vals, bar_w, color='crimson', alpha=0.8,
               label=f'RIGHT channel fL +{res_R["track_level"]} dB')

        summary_text = (
            f'RIGHT total (fL +{res_R["track_level"]} dB):  '
            f'{res_R["total_imd_pct"]:.3f}% '
            f'({res_R["total_imd_db"]:.1f} dBc)')

    ax.set_xticks(x)
    ax.set_xticklabels(all_labels, rotation=45, ha='right', fontsize=7)
    ax.set_ylabel("Level (dBc relative to pilot tone)")
    ax.set_xlabel("IMD Product")
    ax.grid(True, axis='y', alpha=0.3)
    ax.legend(fontsize=10)

    ax.text(0.02, 0.02, summary_text,
            transform=ax.transAxes, fontsize=9,
            verticalalignment='bottom',
            fontfamily='monospace',
            bbox=dict(boxstyle='round,pad=0.4', facecolor='lightyellow',
                      alpha=0.9))

    fig.tight_layout(rect=[0, bottom_margin, 1, 0.95])

    _add_footer_text(fig, bottom_margin, equipment_text, date_info)

    fig.savefig(output_path, dpi=150)
    plt.close(fig)
    print(f"Plot saved to:   {output_path}")


def generate_plots(file_v, file_h,
                   res_vL, res_vR, res_hL, res_hR,
                   output_base, equipment_text=None,
                   date_info=None):
    """Generate spectrum and bar-chart plots for each direction."""
    for direction, dir_tag, wav_file, res_L, res_R in [
        ('VERTICAL -- VTA/SRA', 'vertical', file_v, res_vL, res_vR),
        ('HORIZONTAL -- LTA/Zenith', 'horizontal', file_h, res_hL, res_hR),
    ]:
        if wav_file is None:
            continue

        pfm = _plot_freq_max_for(dir_tag)

        panels = []
        if res_L is not None:
            panels.append((
                res_L,
                f"{direction}  |  fL +{res_L['track_level']} dB  |  "
                f"{os.path.basename(wav_file)}  |  ch L",
                'royalblue', 'navy'))
        if res_R is not None:
            panels.append((
                res_R,
                f"{direction}  |  fL +{res_R['track_level']} dB  |  "
                f"{os.path.basename(wav_file)}  |  ch R",
                'crimson', 'darkred'))

        if panels:
            spectrum_path = f"{output_base}_{dir_tag}_spectrum.png"
            _generate_spectrum_plot(panels, spectrum_path, direction,
                                   pfm, equipment_text, date_info)

        if res_L is not None or res_R is not None:
            have_both = res_L is not None and res_R is not None
            suffix = '_LR_comparison' if have_both else '_products'
            bar_path = f"{output_base}_{dir_tag}{suffix}.png"
            _generate_bar_plot(res_L, res_R, wav_file, bar_path,
                               direction, equipment_text, date_info)

    if all(r is None for r in (res_vL, res_vR, res_hL, res_hR)):
        print("  No successful analyses -- skipping plots.")


# ---------------------------------------------------------------------------
# Options file reader
# ---------------------------------------------------------------------------
def _parse_freq_list(raw, key_name):
    """
    Parse a comma-separated frequency value.

    Allowed counts:
      1 value  -> used for all (vertL, vertR, horizL, horizR)
      2 values -> (vertical, horizontal) -- L and R get same value
      4 values -> (verticalL, verticalR, horizontalL, horizontalR)

    Returns a dict: {'vertical': {'L': v, 'R': v},
                     'horizontal': {'L': v, 'R': v}}
    """
    parts = [s.strip() for s in raw.split(',') if s.strip()]
    parsed = []
    for p in parts:
        try:
            v = float(p)
        except ValueError:
            _die(f"'{key_name}' contains a non-numeric value: '{p}'.  "
                 f"Check for inline comments (use a separate line "
                 f"starting with '#' for inline comments instead).")
        if v <= 0:
            _die(f"'{key_name}' values must be > 0 (got {v})")
        parsed.append(v)

    if len(parsed) == 0:
        _die(f"'{key_name}' must contain at least one frequency.")
    elif len(parsed) == 1:
        v = parsed[0]
        return {
            'vertical':   {'L': v, 'R': v},
            'horizontal': {'L': v, 'R': v},
        }
    elif len(parsed) == 2:
        return {
            'vertical':   {'L': parsed[0], 'R': parsed[0]},
            'horizontal': {'L': parsed[1], 'R': parsed[1]},
        }
    elif len(parsed) == 4:
        return {
            'vertical':   {'L': parsed[0], 'R': parsed[1]},
            'horizontal': {'L': parsed[2], 'R': parsed[3]},
        }
    else:
        _die(f"'{key_name}' must have 1, 2, or 4 comma-separated values "
             f"(got {len(parsed)}).  "
             f"Format: value | vert,horiz | vertL,vertR,horizL,horizR")


def load_options(options_path):
    """Read an options file and apply values to module-level constants."""
    global MODE
    global F_HIGH, F_LOW, FH_NOMINAL_DB
    global FREQ_TOLERANCE, FREQ_TOLERANCE_WIDE
    global STFT_WINDOW_SEC, STFT_OVERLAP, MIN_PILOT_DB
    global DB_FLOOR, CHANNEL, MAX_IMD_ORDER
    global TRACK_LEVEL_VERTICAL, TRACK_LEVEL_HORIZONTAL
    global CHANNEL_IDENTITY_THRESHOLD, NORMALIZE_LEVELS
    global TONE_PROMINENCE_DB, TONE_ACCEPTANCE_TOLERANCE
    global PLOT_FREQ_MAX_VERTICAL, PLOT_FREQ_MAX_HORIZONTAL

    opts = {}
    with open(options_path, 'r', encoding='utf-8') as fh:
        for line_num, raw_line in enumerate(fh, 1):
            line = raw_line.strip()
            if not line or line.startswith('#'):
                continue
            if '=' not in line:
                print(f"  WARNING: options file line {line_num} ignored "
                      f"(no '='): {raw_line.rstrip()}")
                continue
            key, _, val = line.partition('=')
            key = key.strip().lower()
            val = val.strip()
            opts[key] = val

    if 'mode' in opts:
        val = opts['mode'].upper()
        if val not in _VALID_MODES:
            _die(f"'mode' must be one of {list(_VALID_MODES)} "
                 f"(got '{opts['mode']}')")
        MODE = val
    if 'f_high' in opts:
        F_HIGH = _parse_freq_list(opts['f_high'], 'f_high')
    if 'f_low' in opts:
        F_LOW = _parse_freq_list(opts['f_low'], 'f_low')
    if 'f_low_list' in opts and 'f_low' not in opts:
        F_LOW = _parse_freq_list(opts['f_low_list'], 'f_low_list')
        print(f"  NOTE: 'f_low_list' is deprecated.  Use 'f_low' instead.")
    if 'fh_nominal_db' in opts:
        FH_NOMINAL_DB = float(opts['fh_nominal_db'])
    if 'tone_prominence_db' in opts:
        v = float(opts['tone_prominence_db'])
        if v < 0:
            _die(f"'tone_prominence_db' must be >= 0 (got {v})")
        TONE_PROMINENCE_DB = v
    if 'tone_acceptance_tolerance' in opts:
        v = float(opts['tone_acceptance_tolerance'])
        if v <= 0:
            _die(f"'tone_acceptance_tolerance' must be > 0 (got {v})")
        TONE_ACCEPTANCE_TOLERANCE = v
    if 'freq_tolerance' in opts:
        FREQ_TOLERANCE = float(opts['freq_tolerance'])
    if 'freq_tolerance_wide' in opts:
        FREQ_TOLERANCE_WIDE = float(opts['freq_tolerance_wide'])
    if 'stft_window_sec' in opts:
        STFT_WINDOW_SEC = float(opts['stft_window_sec'])
    if 'stft_overlap' in opts:
        STFT_OVERLAP = float(opts['stft_overlap'])
    if 'min_pilot_db' in opts:
        MIN_PILOT_DB = float(opts['min_pilot_db'])
    if 'db_floor' in opts:
        DB_FLOOR = float(opts['db_floor'])
    if 'channel' in opts:
        val = opts['channel'].upper()
        if val not in ('L', 'R', 'MIX'):
            _die(f"'channel' must be L, R, or MIX (got '{opts['channel']}')")
        CHANNEL = val
    if 'max_imd_order' in opts:
        v = int(opts['max_imd_order'])
        if v < 2 or v > 10:
            _die(f"'max_imd_order' must be 2..10 (got {v})")
        MAX_IMD_ORDER = v
    if 'plot_freq_max' in opts:
        # Legacy single value -- apply to both
        v = float(opts['plot_freq_max'])
        PLOT_FREQ_MAX_VERTICAL = v
        PLOT_FREQ_MAX_HORIZONTAL = v
    if 'plot_freq_max_vertical' in opts:
        PLOT_FREQ_MAX_VERTICAL = float(opts['plot_freq_max_vertical'])
    if 'plot_freq_max_horizontal' in opts:
        PLOT_FREQ_MAX_HORIZONTAL = float(opts['plot_freq_max_horizontal'])
    if 'track_level_vertical' in opts:
        TRACK_LEVEL_VERTICAL = int(opts['track_level_vertical'])
    if 'track_level_horizontal' in opts:
        TRACK_LEVEL_HORIZONTAL = int(opts['track_level_horizontal'])
    if 'channel_identity_threshold' in opts:
        CHANNEL_IDENTITY_THRESHOLD = float(opts['channel_identity_threshold'])
    if 'normalize_levels' in opts:
        val = opts['normalize_levels'].lower()
        if val in ('true', 'yes', '1'):
            NORMALIZE_LEVELS = True
        elif val in ('false', 'no', '0'):
            NORMALIZE_LEVELS = False
        else:
            _die(f"'normalize_levels' must be true/false "
                 f"(got '{opts['normalize_levels']}')")

    return opts


# ---------------------------------------------------------------------------
# Argument parsing
# ---------------------------------------------------------------------------
def _parse_named_arg(tokens, index):
    """Parse a named argument at tokens[index]."""
    tok = tokens[index]
    if not tok.startswith('-'):
        return None, None, index

    rest = tok[1:]

    if '=' in rest:
        k, _, v = rest.partition('=')
        k = k.strip()
        v = v.strip()
        next_i = index + 1
        if not v and next_i < len(tokens) and not tokens[next_i].startswith('-'):
            v = tokens[next_i].strip()
            next_i += 1
        return k.lower(), v, next_i

    next_i = index + 1
    if next_i < len(tokens):
        nxt = tokens[next_i]
        if nxt.startswith('='):
            k = rest.strip()
            v = nxt[1:].strip()
            next_i += 1
            if not v and next_i < len(tokens) and not tokens[next_i].startswith('-'):
                v = tokens[next_i].strip()
                next_i += 1
            return k.lower(), v, next_i

    return None, None, index


def parse_args(argv):
    """Parse command-line arguments."""
    result = {
        'fileVertical': None,
        'fileHorizontal': None,
        'equipment': None,
        'options': None,
    }

    i = 0
    while i < len(argv):
        key, val, next_i = _parse_named_arg(argv, i)
        if key is None:
            i += 1
            continue

        norm = key.lower().replace('-', '').replace('_', '')

        if norm == 'filevertical':
            result['fileVertical'] = val
        elif norm in ('filehorizontal', 'filehorisontal'):
            result['fileHorizontal'] = val
        elif norm == 'equipment':
            result['equipment'] = val
        elif norm == 'options':
            result['options'] = val
        else:
            _die(f"Unknown argument '-{key}'.")

        i = next_i

    return result


# ---------------------------------------------------------------------------
# Helper: analyse one file on both channels
# ---------------------------------------------------------------------------
def _analyze_both_channels(wav_path, direction, user_track_level):
    """
    Analyse one WAV file on both L and R channels.

    Both channels are always analysed independently -- a tone-location
    error on L does not skip R, because L and R may have different
    prominent frequencies.

    Returns (res_L, err_L, res_R, err_R).
    """
    res_L = err_L = res_R = err_R = None

    for ch in ('L', 'R'):
        print(f"  --- Channel {ch} ---")
        try:
            res = analyze_file(wav_path, direction, user_track_level, ch)
        except ToneLocationError as exc:
            err = str(exc)
            _log_analysis_error(wav_path, f"[ch {ch}] {err}")
            if ch == 'L':
                err_L = err
            else:
                err_R = err
        except AnalysisError as exc:
            err = str(exc)
            _log_analysis_error(wav_path, f"[ch {ch}] {err}")
            if ch == 'L':
                err_L = err
            else:
                err_R = err
        else:
            if ch == 'L':
                res_L = res
            else:
                res_R = res

    return res_L, err_L, res_R, err_R


# ---------------------------------------------------------------------------
# Entry point
# ---------------------------------------------------------------------------
def main():
    args = parse_args(sys.argv[1:])

    if args['options'] is not None:
        opt_path = args['options']
        if not os.path.isfile(opt_path):
            _die(f"-options file not found: {opt_path}")
        print(f"Loading options: {opt_path}")
        load_options(opt_path)

    need_v = MODE in ('BOTH', 'VERTICAL')
    need_h = MODE in ('BOTH', 'HORIZONTAL')

    file_v = args['fileVertical']
    file_h = args['fileHorizontal']

    if need_v and file_v is None:
        if file_h is None:
            _print_usage()
        _die(f"mode = {MODE}: -fileVertical is required.")

    if need_h and file_h is None:
        if file_v is None:
            _print_usage()
        _die(f"mode = {MODE}: -fileHorizontal is required.")

    if file_v is not None and not os.path.isfile(file_v):
        _die(f"-fileVertical file not found: {file_v}")
    if file_h is not None and not os.path.isfile(file_h):
        _die(f"-fileHorizontal file not found: {file_h}")

    equipment_text = None
    equipment_path = args['equipment']
    if equipment_path is not None:
        if not os.path.isfile(equipment_path):
            _die(f"-equipment file not found: {equipment_path}")
        with open(equipment_path, 'r', encoding='utf-8') as fh:
            equipment_text = fh.read()
        print(f"Equipment info:  {equipment_path}")

    run_timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    date_info = _build_file_date_info(
        file_v, file_h,
        args['options'], equipment_path,
        run_timestamp)

    fL_str = _freq_summary(F_LOW)
    fH_str = _freq_summary(F_HIGH)

    print(f"\nAnalysis mode:     {MODE}")
    print(f"Pilot tone (fH):   {fH_str}"
          f"  (nominal {FH_NOMINAL_DB:+.0f} dB)")
    print(f"Test tone (fL):    {fL_str}")
    print(f"Tone prominence:   {TONE_PROMINENCE_DB:.0f} dB above noise floor")
    print(f"Acceptance tol:    {TONE_ACCEPTANCE_TOLERANCE:.0f} Hz")
    print(f"Analysis channels: L and R (both)")
    print(f"Normalize levels:  {'yes (tallest tone = 0 dB)' if NORMALIZE_LEVELS else 'no (raw dB)'}")
    print(f"Freq tolerance:    +/-{FREQ_TOLERANCE:.0f} Hz "
          f"(wide initial: +/-{FREQ_TOLERANCE_WIDE:.0f} Hz)")
    print()

    # -- Vertical analysis (both channels) --
    res_vL = err_vL = res_vR = err_vR = None
    if need_v:
        print("Analysing VERTICAL modulation (VTA/SRA) ...")
        res_vL, err_vL, res_vR, err_vR = _analyze_both_channels(
            file_v, 'vertical', TRACK_LEVEL_VERTICAL)
        print()

    # -- Horizontal analysis (both channels) --
    res_hL = err_hL = res_hR = err_hR = None
    if need_h:
        print("Analysing HORIZONTAL modulation (LTA/Zenith) ...")
        res_hL, err_hL, res_hR, err_hR = _analyze_both_channels(
            file_h, 'horizontal', TRACK_LEVEL_HORIZONTAL)
        print()

    _flush_analysis_errors()

    # -- Report --
    report = build_report(file_v, file_h,
                          res_vL, res_vR, res_hL, res_hR,
                          err_vL=err_vL, err_vR=err_vR,
                          err_hL=err_hL, err_hR=err_hR,
                          date_info=date_info)
    print(report)

    base_file = file_v if file_v is not None else file_h
    base = os.path.splitext(os.path.basename(base_file))[0]
    report_path = f"imd_report_{base}.txt"
    with open(report_path, 'w', encoding='utf-8') as fh:
        fh.write(report + "\n")
    print(f"\nReport saved to: {report_path}")

    # -- Plots --
    plot_base = f"imd_plot_{base}"
    generate_plots(file_v, file_h,
                   res_vL, res_vR, res_hL, res_hR,
                   plot_base, equipment_text=equipment_text,
                   date_info=date_info)


if __name__ == "__main__":
    main()