How to process gyro, accell, depth and color frames?

here is my code where i am trying to use information from all the 4 sensors but i am getting an error:

code:

# Get device product line for setting a supporting resolution
ctx= Context()
device_list= ctx.query_devices()
if device_list.get_count() == 0:
    print("No device connected")
device= device_list.get_device_by_index(0)
sensor_list= device.get_sensor_list()


config = Config()
pipeline = Pipeline(device)


# Start depth streaming
profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
assert profile_list is not None
try:
    depth_profile = profile_list.get_video_stream_profile(640, 0, OBFormat.Y16, 30)
except OBError as e:
    print("Error: ", e)
    depth_profile = profile_list.get_default_video_stream_profile()
assert depth_profile is not None

profile_list = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
try:
    color_profile = profile_list.get_video_stream_profile(640, 0, OBFormat.RGB, 30)
except OBError as e:
    print(e)
    color_profile = profile_list.get_default_video_stream_profile()


gyro_senor = sensor_list.get_sensor_by_type(OBSensorType.GYRO_SENSOR)
if gyro_senor is None:
    print("No gyro sensor")
gyro_profile_list = gyro_senor.get_stream_profile_list()
gyro_profile = gyro_profile_list.get_stream_profile_by_index(0)
assert gyro_profile is not None


accel_sensor = sensor_list.get_sensor_by_type(OBSensorType.ACCEL_SENSOR)
if accel_sensor is None:
    print("No accel sensor")
accel_profile_list = accel_sensor.get_stream_profile_list()
accel_profile = accel_profile_list.get_stream_profile_by_index(0)
assert accel_profile is not None


config.enable_stream(color_profile)
config.enable_stream(depth_profile)

pipeline.start(config)


try:
    timer = 0
    times = []
    while True:
        hfov = 91 # Horizontal Field of View
        vfov = 66 # Vertical Field of View
        pitch_stabilization = False # Enable or Disable Pitch correction

        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames(100)
        if frames is None:
            continue
        color_frame = frames.get_color_frame()
        depth_frame = frames.get_depth_frame()
        if depth_frame is None:
            continue
        w = depth_frame.get_width()
        h = depth_frame.get_height()
        scale = depth_frame.get_depth_scale()
        
        if color_frame is None:
            continue
        # covert to RGB format
        color_image = frame_to_bgr_image(color_frame)
        if color_image is None:
            print("failed to convert frame to image")
            continue

        depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16)
        depth_data = depth_data.reshape((h, w))

        depth_data = depth_data.astype(np.float32) * scale
        depth_image = np.asanyarray(depth_data)

        #print(depth_image.shape)

        vertical_angular_scale = h/vfov

        if pitch_stabilization == True:
            gyro_frame = gyro_senor.start(gyro_profile, on_gyro_frame_callback)
            accel_frame = accel_sensor.start(accel_profile, on_accel_frame_callback)
            pixel_change = pitch_correction(vertical_angular_scale, accel_frame, gyro_frame)
        else:
            pixel_change = 0.0

        # Cropping the Depth image to h/64 thickeness Horizontally
        pixels = int(480*12/vfov)
        top = int(h/2 - pixels + pixel_change) if int(h/2-pixels + pixel_change) >= 0 else 0
        bottom = int(h/2 + pixels + pixel_change) if int(h/2 + pixels + pixel_change) >= 0 else 480
        depth_image_2 = depth_image[top:bottom,:]
        
        # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
        depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_BONE)
        depth_colormap_dim = depth_colormap.shape

        # Entering the coordinates and parameters for drawing the horizon line
        start_point = (0, top)
        end_point = (w, bottom)
        thickness = 2
        color = (255,0,0)
        
        resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
        images = np.hstack((depth_colormap, resized_color_image))
        

        #start = time.time()
        plot_data(depth_image_2, images)    
        
        #end = time.time()
        #times.append(end - start)
        #if timer == 30:
            #break
        #timer += 1
    #print("The time of execution of above program is :", np.average(times) * 10**3, "ms")

finally:

    # Stop streaming
    pipeline.stop()

I repetitively getting the following in the log and I am not able to get any plot for the images:
[2023-08-31 14:30:51.154532][warning][116087][Pipeline.cpp:289] Pipeline source frameset queue fulled, drop the oldest frame!

When I do set pitch_stabilisation to false which basically means that I will not be using the accell and gyro frames data, then everything is working fine, I am getting the required plots.
Can someone tell me how to use all the frames simultaneously?