mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-09 11:30:02 +00:00
Sometimes setting up a DisplayPort tunnel may take quite long time. The reason is that the graphics driver (DPRX) is expected to issue read of certain monitor capabilities over the AUX channel and the "suggested" timeout from VESA is 5 seconds. If there is no graphics driver loaded this does not happen and currently we timeout and tear the tunnel down. The reason for this is that at least Intel discrete USB4 controllers do not send plug/unplug events about whether the DisplayPort cable from the GPU to the controller is connected or not, so in order to "release" the DisplayPort OUT adapter (the one that has monitor connected) we must tear the tunnel down after this timeout has been elapsed. In typical cases there is always graphics driver loaded, and also all the cables are connected but for instance in Intel graphics CI they only load the graphics driver after the system is fully booted up. This makes the driver to tear down the DisplayPort tunnel. To help this case we allow passing bigger or indefinite timeout through a new module parameter (dprx_timeout). To keep the driver bit more responsive during that time we change the way DisplayPort tunnels get activated. We first do the normal tunnel setup and then run the polling of DPRX capabilities read completion in a separate worker. This also makes the driver to accept bandwidth requests to already established DisplayPort tunnels more responsive. If the tunnel still fails to establish we will tear it down and remove the DisplayPort IN adapter from the dp_resource list to avoid using it again (unless we get hotplug to that adapter). Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
219 lines
8.2 KiB
C
219 lines
8.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Thunderbolt driver - Tunneling support
|
|
*
|
|
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
|
|
* Copyright (C) 2019, Intel Corporation
|
|
*/
|
|
|
|
#ifndef TB_TUNNEL_H_
|
|
#define TB_TUNNEL_H_
|
|
|
|
#include "tb.h"
|
|
|
|
enum tb_tunnel_type {
|
|
TB_TUNNEL_PCI,
|
|
TB_TUNNEL_DP,
|
|
TB_TUNNEL_DMA,
|
|
TB_TUNNEL_USB3,
|
|
};
|
|
|
|
/**
|
|
* enum tb_tunnel_state - State of a tunnel
|
|
* @TB_TUNNEL_INACTIVE: tb_tunnel_activate() is not called for the tunnel
|
|
* @TB_TUNNEL_ACTIVATING: tb_tunnel_activate() returned successfully for the tunnel
|
|
* @TB_TUNNEL_ACTIVE: The tunnel is fully active
|
|
*/
|
|
enum tb_tunnel_state {
|
|
TB_TUNNEL_INACTIVE,
|
|
TB_TUNNEL_ACTIVATING,
|
|
TB_TUNNEL_ACTIVE,
|
|
};
|
|
|
|
/**
|
|
* struct tb_tunnel - Tunnel between two ports
|
|
* @kref: Reference count
|
|
* @tb: Pointer to the domain
|
|
* @src_port: Source port of the tunnel
|
|
* @dst_port: Destination port of the tunnel. For discovered incomplete
|
|
* tunnels may be %NULL or null adapter port instead.
|
|
* @paths: All paths required by the tunnel
|
|
* @npaths: Number of paths in @paths
|
|
* @pre_activate: Optional tunnel specific initialization called before
|
|
* activation. Can touch hardware.
|
|
* @activate: Optional tunnel specific activation/deactivation
|
|
* @post_deactivate: Optional tunnel specific de-initialization called
|
|
* after deactivation. Can touch hardware.
|
|
* @destroy: Optional tunnel specific callback called when the tunnel
|
|
* memory is being released. Should not touch hardware.
|
|
* @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel
|
|
* @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel
|
|
* @alloc_bandwidth: Change tunnel bandwidth allocation
|
|
* @consumed_bandwidth: Return how much bandwidth the tunnel consumes
|
|
* @release_unused_bandwidth: Release all unused bandwidth
|
|
* @reclaim_available_bandwidth: Reclaim back available bandwidth
|
|
* @list: Tunnels are linked using this field
|
|
* @type: Type of the tunnel
|
|
* @state: Current state of the tunnel
|
|
* @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
|
|
* Only set if the bandwidth needs to be limited.
|
|
* @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
|
|
* Only set if the bandwidth needs to be limited.
|
|
* @allocated_up: Allocated upstream bandwidth (only for USB3)
|
|
* @allocated_down: Allocated downstream bandwidth (only for USB3)
|
|
* @bw_mode: DP bandwidth allocation mode registers can be used to
|
|
* determine consumed and allocated bandwidth
|
|
* @dprx_canceled: Was DPRX capabilities read poll canceled
|
|
* @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes
|
|
* @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read
|
|
* @callback: Optional callback called when DP tunnel is fully activated
|
|
* @callback_data: Optional data for @callback
|
|
*/
|
|
struct tb_tunnel {
|
|
struct kref kref;
|
|
struct tb *tb;
|
|
struct tb_port *src_port;
|
|
struct tb_port *dst_port;
|
|
struct tb_path **paths;
|
|
size_t npaths;
|
|
int (*pre_activate)(struct tb_tunnel *tunnel);
|
|
int (*activate)(struct tb_tunnel *tunnel, bool activate);
|
|
void (*post_deactivate)(struct tb_tunnel *tunnel);
|
|
void (*destroy)(struct tb_tunnel *tunnel);
|
|
int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up,
|
|
int *max_down);
|
|
int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up,
|
|
int *allocated_down);
|
|
int (*alloc_bandwidth)(struct tb_tunnel *tunnel, int *alloc_up,
|
|
int *alloc_down);
|
|
int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
|
|
int *consumed_down);
|
|
int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
|
|
void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel,
|
|
int *available_up,
|
|
int *available_down);
|
|
struct list_head list;
|
|
enum tb_tunnel_type type;
|
|
enum tb_tunnel_state state;
|
|
int max_up;
|
|
int max_down;
|
|
int allocated_up;
|
|
int allocated_down;
|
|
bool bw_mode;
|
|
bool dprx_canceled;
|
|
ktime_t dprx_timeout;
|
|
struct delayed_work dprx_work;
|
|
void (*callback)(struct tb_tunnel *tunnel, void *data);
|
|
void *callback_data;
|
|
};
|
|
|
|
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
|
|
bool alloc_hopid);
|
|
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
|
|
struct tb_port *down);
|
|
bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
|
|
int *reserved_down);
|
|
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
|
|
bool alloc_hopid);
|
|
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
|
|
struct tb_port *out, int link_nr,
|
|
int max_up, int max_down,
|
|
void (*callback)(struct tb_tunnel *, void *),
|
|
void *callback_data);
|
|
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
|
|
struct tb_port *dst, int transmit_path,
|
|
int transmit_ring, int receive_path,
|
|
int receive_ring);
|
|
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
|
|
int transmit_ring, int receive_path, int receive_ring);
|
|
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
|
|
bool alloc_hopid);
|
|
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
|
|
struct tb_port *down, int max_up,
|
|
int max_down);
|
|
|
|
void tb_tunnel_put(struct tb_tunnel *tunnel);
|
|
int tb_tunnel_activate(struct tb_tunnel *tunnel);
|
|
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
|
|
|
|
/**
|
|
* tb_tunnel_is_active() - Is tunnel fully activated
|
|
* @tunnel: Tunnel to check
|
|
*
|
|
* Returns %true if @tunnel is fully activated. For other than DP
|
|
* tunnels this is pretty much once tb_tunnel_activate() returns
|
|
* successfully. However, for DP tunnels this returns %true only once the
|
|
* DPRX capabilities read has been issued successfully.
|
|
*/
|
|
static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
|
|
{
|
|
return tunnel->state == TB_TUNNEL_ACTIVE;
|
|
}
|
|
|
|
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
|
|
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
|
|
const struct tb_port *port);
|
|
int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
|
|
int *max_down);
|
|
int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
|
|
int *allocated_down);
|
|
int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
|
|
int *alloc_down);
|
|
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
|
int *consumed_down);
|
|
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);
|
|
void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
|
|
int *available_up,
|
|
int *available_down);
|
|
|
|
static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
|
|
{
|
|
return tunnel->type == TB_TUNNEL_PCI;
|
|
}
|
|
|
|
static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel)
|
|
{
|
|
return tunnel->type == TB_TUNNEL_DP;
|
|
}
|
|
|
|
static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
|
|
{
|
|
return tunnel->type == TB_TUNNEL_DMA;
|
|
}
|
|
|
|
static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
|
|
{
|
|
return tunnel->type == TB_TUNNEL_USB3;
|
|
}
|
|
|
|
static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel)
|
|
{
|
|
return tb_port_path_direction_downstream(tunnel->src_port,
|
|
tunnel->dst_port);
|
|
}
|
|
|
|
const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
|
|
|
|
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
|
|
do { \
|
|
struct tb_tunnel *__tunnel = (tunnel); \
|
|
level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
|
|
tb_route(__tunnel->src_port->sw), \
|
|
__tunnel->src_port->port, \
|
|
tb_route(__tunnel->dst_port->sw), \
|
|
__tunnel->dst_port->port, \
|
|
tb_tunnel_type_name(__tunnel), \
|
|
## arg); \
|
|
} while (0)
|
|
|
|
#define tb_tunnel_WARN(tunnel, fmt, arg...) \
|
|
__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
|
|
#define tb_tunnel_warn(tunnel, fmt, arg...) \
|
|
__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
|
|
#define tb_tunnel_info(tunnel, fmt, arg...) \
|
|
__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
|
|
#define tb_tunnel_dbg(tunnel, fmt, arg...) \
|
|
__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
|
|
|
|
#endif
|