Automate resolution measurement

Taylor, first thank you for the article. It will take me sometime to follow, but very interesting.
You gave me some code to get to the individual pixels.

import numpy as np

def extract_bayer_channel(image: np.ndarray, channel: str) → np.ndarray:
if channel == ‘blue’:
return image[0::2, 0::2]
elif channel == ‘green1’: # green at blue row
return image[0::2, 1::2]
elif channel == ‘green2’: # green at red row
return image[1::2, 0::2]
elif channel == ‘red’:
return image[1::2, 1::2]
else:
raise ValueError(“Channel must be one of: ‘red’, ‘green1’, ‘green2’, ‘blue’.”)

I have tried but no luck. How can I use this code to get say the blue pixels from the camera. Should I be able to get a 2MP grayscale image of say the blue pixels only, then do the same with each of the 4 color pixels.

What’s happening when you try to use the code to extract the bayer channels? An exception?
If you’re saying that the issue is that extracting the red, green or blue pixels using the code you posted above ends up with a grayscale image, that’s expected as well. Even though it’s gray, it’s the intensity of all the red, green or blue pixels (which ever you selected to extract). If something else is wrong wrong with the code, I’ll need more info

Here’s an example of how to use that function. I haven’t actually tested this, but at least the gist of it should be right.

def unpack(raw_bytes: np.ndarray):
    """
    Convert a 2D uint8 image with shape (H, W*2) to (H, W) 10-bit uint16 image.
    Each 2-byte pair encodes a 10-bit pixel.
    """
    h, w_bytes = raw_bytes.shape
    if w_bytes % 2 != 0:
        raise ValueError("Expected even number of bytes per row")

    w_pixels = w_bytes // 2
    # View as 16-bit little-endian
    raw_words = raw_bytes.reshape(h, w_pixels, 2)
    raw_10bit = (raw_words[:, :, 1].astype(np.uint16) <<
                 8) | raw_words[:, :, 0].astype(np.uint16)

    # If only 10 bits are valid, mask out the rest
    return (raw_10bit & 0x03FF) << 6

def extract_bayer_channel(image: np.ndarray, channel: str) → np.ndarray:
    if channel == ‘blue’:
        return image[0::2, 0::2]
    elif channel == ‘green1’: # green at blue row
        return image[0::2, 1::2]
    elif channel == ‘green2’: # green at red row
        return image[1::2, 0::2]
    elif channel == ‘red’:
        return image[1::2, 1::2]
    else:
        raise ValueError(“Channel must be one of: ‘red’, ‘green1’, ‘green2’, ‘blue’.”)

def main():
    picam2 = Picamera2()
    capture_config = picam2.create_still_configuration(
        raw={"format": 'SBGGR10'})
    picam2.configure(capture_config)
    picam2.start()
    print(picam2.stream_configuration("raw"))
    raw_data = picam2.capture_array("raw")
    data = unpack(raw_data)

    img = Image.fromarray(data, mode='I;16')
    img.save('bayer_preserved.png', format='PNG')

    # extract blue pixels
    blue_pixels = extract_bayer_channel(img, 'blue')
    blue_pixels.save('blue.png', format='PNG')

if __name__ == '__main__':
    main()

Taylor,
I had to modify the code a bit (not me but ChatGPT), I tried.
I was able to take an image and separate out the 4 channels.
import numpy as np
from picamera2 import Picamera2
from PIL import Image

def unpack(raw_bytes: np.ndarray):
“”"
Convert 2D uint8 image with shape (H, W*2) to 10-bit uint16 (H, W).
“”"
h, w_bytes = raw_bytes.shape
if w_bytes % 2 != 0:
raise ValueError(“Expected even number of bytes per row”)

w_pixels = w_bytes // 2
raw_words = raw_bytes.reshape(h, w_pixels, 2)
raw_10bit = (raw_words[:, :, 1].astype(np.uint16) << 8) | raw_words[:, :, 0].astype(np.uint16)
return (raw_10bit & 0x03FF) << 6  # Scale 10-bit to full 16-bit range

def extract_bayer_channel(image: np.ndarray, channel: str) → np.ndarray:
if channel == ‘blue’:
return image[0::2, 0::2] # Top-left of BGGR
elif channel == ‘green1’: # green at blue row
return image[0::2, 1::2]
elif channel == ‘green2’: # green at red row
return image[1::2, 0::2]
elif channel == ‘red’:
return image[1::2, 1::2]
else:
raise ValueError(“Channel must be one of: ‘red’, ‘green1’, ‘green2’, ‘blue’.”)

def main():
picam2 = Picamera2()
capture_config = picam2.create_still_configuration(raw={“format”: ‘SBGGR10’})
picam2.configure(capture_config)
picam2.start()
raw_data = picam2.capture_array(“raw”)
data = unpack(raw_data)

# Save full Bayer data
bayer_img = Image.fromarray(data, mode='I;16')
bayer_img.save('bayer_preserved.png', format='PNG')

# Extract blue channel and save
#blue_pixels = extract_bayer_channel(data, 'blue')
#blue_img = Image.fromarray(blue_pixels, mode='I;16')
#blue_img.save('blue_052825_1.png', format='PNG')

# Extract green1 channel and save
green1_pixels = extract_bayer_channel(data, 'green1')
green1_img = Image.fromarray(green1_pixels, mode='I;16')
green1_img.save('green1.png', format='PNG')

# Extract green1 channel and save
#green2_pixels = extract_bayer_channel(data, 'green2')
#green2_img = Image.fromarray(green2_pixels, mode='I;16')
#green2_img.save('green2_052825_3.png', format='PNG')

# Extract green1 channel and save
#red_pixels = extract_bayer_channel(data, 'red')
#red_img = Image.fromarray(red_pixels, mode='I;16')
#red_img.save('red_052825_4.png', format='PNG')

if name == ‘main’:
main()

I first did a Full Auto-Calibrate with a white LED. Then replaced the white with a green LED. I switched to the second operating system and took the png image 4 times, saving a Blue, Green1, Green2, and a Red image. The Blue and Red images were much lower intensity as expected.
Next I ran the Auto Resolution program.The Green1 and Green2 both gave the results below. The Blue and Red gave an error “No edge line detected” sounds good to me.



I suppose the next step would be to add the 2 green images together. According to ImageJ they are 2MP images with 8 bits per pixel. The other thing is the pixel depth, could it be changed to a higher depth maybe 16 bits per pixel. It looks like the original image is 16bit.
One last thing, if we add the 2 Green Pixels, we should end up with a 4MB image or are the pixels averaged on a 2 MB image?

Correction, the images are 16 bits per pixel.

The G1 and G2 channels are sampled at different spatial locations (see the link on CFA filters I posted earlier). Adding them is equivalent to applying a misaligned blur kernel, which artificially smooths sharp transitions. Since the MTF is derived from the sharpness of these transitions, this blending will distort your results. It lowers the apparent resolution (not because the system is worse, but because you’re introducing blur in preprocessing). For edge-based resolution testing, you should only use a single, spatially consistent green channel. This means you’d have two different resolution estimates per image (I suppose you could average the estimates if you wanted to).

It’s just interesting that the 2 channels show a difference. The resolution program did pick a different region for each. Maybe an improvement would be to pick the center of the image after finding the edge.
Could I make a 4MP image out of G1 & G2, would that possibly improve the resolution. I assume any averaging wouldn’t help.

I modified the code (ChatGPT) to combine the 2 green channels, didn’t work.

Here is the code:

import numpy as np
from picamera2 import Picamera2
from PIL import Image

def unpack(raw_bytes: np.ndarray):
“”"
Convert 2D uint8 Bayer data with shape (H, W*2) to 10-bit uint16 (H, W).
Each pair of bytes encodes one 10-bit pixel.
“”"
h, w_bytes = raw_bytes.shape
if w_bytes % 2 != 0:
raise ValueError(“Expected even number of bytes per row”)

w_pixels = w_bytes // 2
raw_words = raw_bytes.reshape(h, w_pixels, 2)
raw_10bit = (raw_words[:, :, 1].astype(np.uint16) << 8) | raw_words[:, :, 0].astype(np.uint16)

return (raw_10bit & 0x03FF) << 6  # Scale to 16-bit

def extract_bayer_channel(image: np.ndarray, channel: str) → np.ndarray:
if channel == ‘blue’:
return image[0::2, 0::2]
elif channel == ‘green1’: # Green on blue rows
return image[0::2, 1::2]
elif channel == ‘green2’: # Green on red rows
return image[1::2, 0::2]
elif channel == ‘red’:
return image[1::2, 1::2]
else:
raise ValueError(“Channel must be one of: ‘red’, ‘green1’, ‘green2’, ‘blue’.”)

def interleave_green_channels(g1: np.ndarray, g2: np.ndarray) → np.ndarray:
“”"
Combine Green1 and Green2 into full-resolution green channel image.
g1: Green pixels from even rows (Green1)
g2: Green pixels from odd rows (Green2)
“”"
h, w = g1.shape # g1 and g2 should be the same shape
full_h = h * 2
full_w = w * 2

green_full = np.zeros((full_h, full_w), dtype=np.uint16)

# Place green1 at even rows, odd columns
green_full[0::2, 1::2] = g1

# Place green2 at odd rows, even columns
green_full[1::2, 0::2] = g2

return green_full

def save_image(array: np.ndarray, filename: str):
img = Image.fromarray(array, mode=‘I;16’)
img.save(filename, format=‘PNG’)
print(f"Saved: {filename}")

def main():
picam2 = Picamera2()
capture_config = picam2.create_still_configuration(raw={“format”: ‘SBGGR10’})
picam2.configure(capture_config)
picam2.start()

raw_data = picam2.capture_array("raw")
data = unpack(raw_data)

# Save full Bayer image
save_image(data, "bayer_preserved.png")

# Extract Bayer components
g1 = extract_bayer_channel(data, 'green1')
g2 = extract_bayer_channel(data, 'green2')
red = extract_bayer_channel(data, 'red')
blue = extract_bayer_channel(data, 'blue')

save_image(g1, "green1.png")
save_image(g2, "green2.png")
save_image(red, "red.png")
save_image(blue, "blue.png")

# Combine green1 + green2 into full-res green image
green_full = interleave_green_channels(g1, g2)
save_image(green_full, "green_full_res.png")

if name == ‘main’:
main()

I tried this code with the white LED, separating all 4 channels and combining the 2 green channels. The 4 individual channels gave good results. But the combined green channel didn’t give any good results. The combined image is 3280x2464, image seems fine, but the automated pgm didn’t give any reasonable pixel values?
I tried the same with the green LED, again no reasonable results from the combined green image. I assume the interleave function isn’t correct
green_full = interleave_green_channels(g1, g2)
Here is the combined green channels image with the white LED:


The results are 9.00px, N/A, 7.00px, should I just stick with the individual channels or is there something else I can try?
I haven’t yet tried to use the center of the edge, thinking that would make the program more accurate.

I added Bilinear Interpolation in the combined image. I used a green LED. The combined image went to 8MP, this is a suggestion from (ChatGPT). The Green1 shows good results


The Green2 picked near the edge, not as good

Each of those images are 2MP
The combined image of the 2 green channels with Bilinear interpolation on the other 2 channels, suggested by (ChatGPT) seems to give better results? Note: (ChatGPT) has also suggested trying a bicubic or a gradient-based green estimation.

I’m still unclear on where the Edge Spread Function selects the 3 lines, it may be better to select the center or close to the center of the edge, any thoughts?

Forgot to provide the code, help from (ChatGPT)
You need to install scipy: this is for the pixel averaging/interpolation
called Bilinear Interpolation it fills the missing pixels with the average of the 4 connected neighbors

sudo apt update
sudo apt install python3-scipy

import numpy as np
from picamera2 import Picamera2
from PIL import Image
from scipy.ndimage import convolve

def unpack(raw_bytes: np.ndarray):
h, w_bytes = raw_bytes.shape
if w_bytes % 2 != 0:
raise ValueError(“Expected even number of bytes per row”)
w_pixels = w_bytes // 2
raw_words = raw_bytes.reshape(h, w_pixels, 2)
raw_10bit = (raw_words[:, :, 1].astype(np.uint16) << 8) | raw_words[:, :, 0].astype(np.uint16)
return (raw_10bit & 0x03FF) << 6 # 10-bit to 16-bit

def extract_bayer_channel(image: np.ndarray, channel: str) → np.ndarray:
if channel == ‘blue’:
return image[0::2, 0::2]
elif channel == ‘green1’:
return image[0::2, 1::2]
elif channel == ‘green2’:
return image[1::2, 0::2]
elif channel == ‘red’:
return image[1::2, 1::2]
else:
raise ValueError(“Channel must be one of: ‘red’, ‘green1’, ‘green2’, ‘blue’.”)

def interleave_green_channels(g1: np.ndarray, g2: np.ndarray) → np.ndarray:
h, w = g1.shape
full_h = h * 2
full_w = w * 2
green_full = np.zeros((full_h, full_w), dtype=np.uint16)
green_full[0::2, 1::2] = g1
green_full[1::2, 0::2] = g2
return green_full

def interpolate_missing_pixels(image: np.ndarray) → np.ndarray:
“”"
Fills in zeros using a 4-neighbor average (bilinear style).
“”"
# Create a mask of valid data
valid = (image > 0).astype(np.uint8)

# Use a kernel to sum neighbor values
kernel = np.array([[0, 1, 0],
                   [1, 0, 1],
                   [0, 1, 0]], dtype=np.uint8)

value_sum = convolve(image.astype(np.uint32), kernel, mode='constant', cval=0)
weight_sum = convolve(valid, kernel, mode='constant', cval=0)

# Avoid divide-by-zero
interpolated = image.copy()
missing = (valid == 0) & (weight_sum > 0)
interpolated[missing] = (value_sum[missing] // weight_sum[missing]).astype(np.uint16)

return interpolated

def save_image(array: np.ndarray, filename: str):
img = Image.fromarray(array, mode=‘I;16’)
img.save(filename, format=‘PNG’)
print(f"Saved: {filename}")

def main():
picam2 = Picamera2()
capture_config = picam2.create_still_configuration(raw={“format”: ‘SBGGR10’})
picam2.configure(capture_config)
picam2.start()

raw_data = picam2.capture_array("raw")
data = unpack(raw_data)

save_image(data, "bayer_preserved.png")

g1 = extract_bayer_channel(data, 'green1')
g2 = extract_bayer_channel(data, 'green2')

save_image(g1, "green1.png")
save_image(g2, "green2.png")

green_full = interleave_green_channels(g1, g2)
save_image(green_full, "green_full_raw.png")

green_interp = interpolate_missing_pixels(green_full)
save_image(green_interp, "green_full_interpolated.png")

if name == ‘main’:
main()

I have been looking for ways to measure and then improve the resolution of the images collected by the microscope. I was using Talyor’s program for automated resolution. I noticed that I was getting changes in the resolution measurement for the same image, not big changes but changes of a couple pixels. So I modified Taylor’s program (with ChatGPT). The program now detects the edge of the razorblade then extends that edge to the full image. It determines the pixel resolution based on a fraction of that range. For example (0.4, 0.6) would sample in the middle of the image. Next NUM_SAMPLES sets how many profiles to extract maybe 3 for example. Now you get three profiles in the center of the image.

Here is the code:

Code for Edge-Based Resolution Sampling In the Middle, Taylor 4/29/25+

#Extending the edge to the end of the image.
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, feature
from skimage.transform import probabilistic_hough_line
from skimage.color import rgb2gray
from scipy.ndimage import gaussian_filter1d
from shapely.geometry import LineString, box

---------------------

CONFIGURATION

---------------------

FRACTION_RANGE = (0.4, 0.6) # e.g. (0.4, 0.6) = center portion of the edge
NUM_SAMPLES = 3 # number of profiles to extract in that range

---------------------

LOGIC

---------------------

def load_image(path):
img = io.imread(path)
if img.ndim == 3:
img = rgb2gray(img)
return img

def detect_edge_line(image):
edges = feature.canny(image, sigma=2)
lines = probabilistic_hough_line(edges, threshold=10, line_length=50, line_gap=10)
if not lines:
raise ValueError(“No edge line detected.”)

# Use the longest line detected
lines.sort(key=lambda l: np.linalg.norm(np.subtract(l[0], l[1])), reverse=True)
(x1, y1), (x2, y2) = lines[0]

# Extend line to image bounds using Shapely
dx, dy = x2 - x1, y2 - y1
line = LineString([(x1 - 10000 * dx, y1 - 10000 * dy), (x2 + 10000 * dx, y2 + 10000 * dy)])
image_box = box(0, 0, image.shape[1], image.shape[0])
cropped = line.intersection(image_box)

if cropped.is_empty or not isinstance(cropped, LineString):
    raise ValueError("Extended line does not intersect image bounds.")

x1c, y1c, x2c, y2c = *cropped.coords[0], *cropped.coords[-1]
return ((int(x1c), int(y1c)), (int(x2c), int(y2c)))

def get_perpendicular_lines(line, length=50):
(x1, y1), (x2, y2) = line
dx, dy = x2 - x1, y2 - y1
normal = np.array([-dy, dx], dtype=np.float64)
normal /= np.linalg.norm(normal)
sample_fracs = np.linspace(FRACTION_RANGE[0], FRACTION_RANGE[1], NUM_SAMPLES)
perp_lines =
for f in sample_fracs:
mx = x1 + dx * f
my = y1 + dy * f
start = (int(mx - normal[0] * length / 2), int(my - normal[1] * length / 2))
end = (int(mx + normal[0] * length / 2), int(my + normal[1] * length / 2))
perp_lines.append((start, end))
return perp_lines

def extract_profile(image, line):
(x0, y0), (x1, y1) = line
length = int(np.hypot(x1 - x0, y1 - y0))
x_vals = np.linspace(x0, x1, length)
y_vals = np.linspace(y0, y1, length)
x_vals = np.clip(x_vals.astype(int), 0, image.shape[1] - 1)
y_vals = np.clip(y_vals.astype(int), 0, image.shape[0] - 1)
profile = image[y_vals, x_vals]
return profile

def compute_lsf(esf):
if np.mean(esf[:5]) > np.mean(esf[-5:]):
esf = esf[::-1]
lsf = np.gradient(esf)
return gaussian_filter1d(lsf, sigma=1.0)

def compute_fwhm(lsf):
peak = np.max(lsf)
half_max = peak / 2
indices = np.where(lsf >= half_max)[0]
return indices[-1] - indices[0] if len(indices) > 1 else None

---------------------

VISUALIZATION

---------------------

def visualize_results(image, edge_line, perp_lines, profiles, lsfs, fwhms):
fig, axs = plt.subplots(2, 2, figsize=(12, 10))

axs[0, 0].imshow(image, cmap='gray')
axs[0, 0].set_title("Detected Edge and Sampled Lines")
axs[0, 0].plot([edge_line[0][0], edge_line[1][0]],
               [edge_line[0][1], edge_line[1][1]], 'r-', linewidth=2, label='Edge')
for idx, line in enumerate(perp_lines):
    axs[0, 0].plot([line[0][0], line[1][0]],
                   [line[0][1], line[1][1]], '--', linewidth=1.5, label=f'Perp {idx+1}')
axs[0, 0].legend()

axs[0, 1].set_title("Edge Spread Functions (ESF)")
for i, p in enumerate(profiles):
    axs[0, 1].plot(p, label=f'ESF {i+1}')
axs[0, 1].legend()

axs[1, 0].set_title("Line Spread Functions (LSF)")
for i, lsf in enumerate(lsfs):
    axs[1, 0].plot(lsf, label=f'LSF {i+1}')
axs[1, 0].legend()

axs[1, 1].axis('off')
text = "\n".join([f"FWHM {i+1}: {fwhm:.2f} px" if fwhm else f"FWHM {i+1}: N/A"
                  for i, fwhm in enumerate(fwhms)])
axs[1, 1].text(0.1, 0.6, text, fontsize=14)

plt.tight_layout()
plt.show()

---------------------

MAIN FUNCTION

---------------------

def main(image_path):
image = load_image(image_path)
edge_line = detect_edge_line(image)
perp_lines = get_perpendicular_lines(edge_line)
profiles = [extract_profile(image, line) for line in perp_lines]
lsfs = [compute_lsf(profile) for profile in profiles]
fwhms = [compute_fwhm(lsf) for lsf in lsfs]
visualize_results(image, edge_line, perp_lines, profiles, lsfs, fwhms)

---------------------

RUN

---------------------

if name == “main”:
main(“/content/green2_G_060425_1.png”) # Replace with actual image file path

I used this code on the razor blade with white LED imaging on the jpeg image.

I did the same position sampling the green channel of the raw png image:

Finally I sampled across the full image using a range 0,1 looking at 6 profiles

The center images show the best resolution as expected.

My plan is to use this tool to try to make changes to the visible light, maybe the position or the lens or type of LED.

Anybody have any ideas for improving the resolution?

I have a program for capturing raw images from the camera (help from ChatGPT). I have this program on a separate MicroSD. I use the standard program to focus and take the .jpeg image. Next I shut down then install the other MicroSD card to take raw images from the camera. I use Thonny to run the program. This program has a preview and allows you to set the exposure time then take all the available images in .png. Red, Green1, Green2, Blue and a combined Green along with a full bayer_preserved.

My plan is to compare all these images against the .jpeg with different LED lighting and different heights for the lighting, looking for the best resolution.

Here is the Python program for taking raw images from the camera.

import numpy as np
import cv2
import time
from picamera2 import Picamera2
from PIL import Image
import os

------------------- Utility Functions -------------------

def unpack(raw_bytes: np.ndarray):
“”“Unpack 10-bit Bayer data into 16-bit numpy array.”“”
h, w_bytes = raw_bytes.shape
if w_bytes % 2 != 0:
raise ValueError(“Expected even number of bytes per row”)
w_pixels = w_bytes // 2
raw_words = raw_bytes.reshape(h, w_pixels, 2)
raw_10bit = (raw_words[:, :, 1].astype(np.uint16) << 8) | raw_words[:, :, 0].astype(np.uint16)
return (raw_10bit & 0x03FF) << 6 # Scale to 16-bit

def extract_bayer_channel(image: np.ndarray, channel: str) → np.ndarray:
if channel == ‘blue’:
return image[0::2, 0::2]
elif channel == ‘green1’:
return image[0::2, 1::2]
elif channel == ‘green2’:
return image[1::2, 0::2]
elif channel == ‘red’:
return image[1::2, 1::2]
else:
raise ValueError(“Channel must be one of: ‘red’, ‘green1’, ‘green2’, ‘blue’.”)

def interleave_green_channels(g1: np.ndarray, g2: np.ndarray) → np.ndarray:
h, w = g1.shape
full_h = h * 2
full_w = w * 2
green_full = np.zeros((full_h, full_w), dtype=np.uint16)
green_full[0::2, 1::2] = g1
green_full[1::2, 0::2] = g2
return green_full

def save_16bit_png(array: np.ndarray, path: str):
img = Image.fromarray(array, mode=‘I;16’)
img.info.pop(‘icc_profile’, None)
img.save(path)
print(f"Saved: {path}")

def save_as_jpeg(array: np.ndarray, path: str):

Normalize 16-bit to 8-bit for JPEG viewing

normalized = (array / array.max() * 255.0).astype(np.uint8)
img = Image.fromarray(normalized, mode=‘L’)
img.save(path, format=‘JPEG’)
print(f"Saved: {path}")

def save_preview_all_formats(bgr_array: np.ndarray, folder: str):

Convert OpenCV BGR to RGB for accurate color

rgb_array = cv2.cvtColor(bgr_array, cv2.COLOR_BGR2RGB)
img = Image.fromarray(rgb_array)
img.info.pop(‘icc_profile’, None)
img.save(os.path.join(folder, “preview_rgb.png”))
img.save(os.path.join(folder, “preview_rgb.tiff”))
img.save(os.path.join(folder, “preview_rgb.jpeg”))
print(“Saved preview in PNG, TIFF, and JPEG (color-corrected)”)

------------------- Main Program -------------------

def main():

Prompt user for exposure time

try:
exposure_ms = int(input("Enter exposure time in milliseconds (e.g., 1000 for 1s): "))
except ValueError:
print(“Invalid input, using default 1000ms”)
exposure_ms = 1000
exposure_us = exposure_ms * 1000

# Setup camera
picam2 = Picamera2()
config = picam2.create_preview_configuration(raw={"format": 'SBGGR10'}, main={"format": 'RGB888'})
picam2.configure(config)

picam2.set_controls({
    "ExposureTime": exposure_us,
    "AnalogueGain": 4.0,
    "AeEnable": False,
    "AwbEnable": False
})

picam2.start()
time.sleep(2)
print("Preview started. Press 's' to save images, 'q' to quit.")

while True:
    frame = picam2.capture_array("main")  # preview RGB frame (BGR format)
    cv2.imshow("Preview", frame)

    key = cv2.waitKey(1) & 0xFF
    if key == ord('s'):
        timestamp = int(time.time())
        folder = f"capture_{timestamp}"
        os.makedirs(folder, exist_ok=True)

        # Save preview RGB image (color-corrected)
        save_preview_all_formats(frame, folder)

        # Capture raw Bayer data
        raw_data = picam2.capture_array("raw")
        bayer = unpack(raw_data)
        save_16bit_png(bayer, os.path.join(folder, "bayer_preserved.png"))

        # Extract and save Bayer channels
        g1 = extract_bayer_channel(bayer, 'green1')
        g2 = extract_bayer_channel(bayer, 'green2')
        red = extract_bayer_channel(bayer, 'red')
        blue = extract_bayer_channel(bayer, 'blue')
        green_full = interleave_green_channels(g1, g2)

        for name, arr in {
            "blue": blue,
            "green1": g1,
            "green2": g2,
            "red": red,
            "green_full_res": green_full
        }.items():
            save_16bit_png(arr, os.path.join(folder, f"{name}.png"))
            save_as_jpeg(arr, os.path.join(folder, f"{name}.jpeg"))

        print(f"✅ All data saved in: {folder}")

    elif key == ord('q'):
        break

picam2.stop()
cv2.destroyAllWindows()

if name == ‘main’:
main()