xen: branch for v5.10-rc1c

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCX5VVeQAKCRCAXGG7T9hj
 voI0AQD3ol/EN9uHW1qKduBI/nl5tgv325Zri8CMu60kS45pgAD/ccUXRcHojs3l
 YIfgcgT4qKQFWzv57Fc9FUBQJMahJgM=
 =6ZgH
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-5.10b-rc1c-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull more xen updates from Juergen Gross:

 - a series for the Xen pv block drivers adding module parameters for
   better control of resource usge

 - a cleanup series for the Xen event driver

* tag 'for-linus-5.10b-rc1c-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  Documentation: add xen.fifo_events kernel parameter description
  xen/events: unmask a fifo event channel only if it was masked
  xen/events: only register debug interrupt for 2-level events
  xen/events: make struct irq_info private to events_base.c
  xen: remove no longer used functions
  xen-blkfront: Apply changed parameter name to the document
  xen-blkfront: add a parameter for disabling of persistent grants
  xen-blkback: add a parameter for disabling of persistent grants
This commit is contained in:
Linus Torvalds 2020-10-25 10:55:35 -07:00
commit bd6aabc7ca
12 changed files with 152 additions and 126 deletions

View File

@ -35,3 +35,12 @@ Description:
controls the duration in milliseconds that blkback will not
cache any page not backed by a grant mapping.
The default is 10ms.
What: /sys/module/xen_blkback/parameters/feature_persistent
Date: September 2020
KernelVersion: 5.10
Contact: SeongJae Park <sjpark@amazon.de>
Description:
Whether to enable the persistent grants feature or not. Note
that this option only takes effect on newly created backends.
The default is Y (enable).

View File

@ -1,4 +1,4 @@
What: /sys/module/xen_blkfront/parameters/max
What: /sys/module/xen_blkfront/parameters/max_indirect_segments
Date: June 2013
KernelVersion: 3.11
Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
@ -8,3 +8,12 @@ Description:
is 32 - higher value means more potential throughput but more
memory usage. The backend picks the minimum of the frontend
and its default backend value.
What: /sys/module/xen_blkfront/parameters/feature_persistent
Date: September 2020
KernelVersion: 5.10
Contact: SeongJae Park <sjpark@amazon.de>
Description:
Whether to enable the persistent grants feature or not. Note
that this option only takes effect on newly created frontends.
The default is Y (enable).

View File

@ -5978,6 +5978,13 @@
After which time (jiffies) the event handling loop
should start to delay EOI handling. Default is 2.
xen.fifo_events= [XEN]
Boolean parameter to disable using fifo event handling
even if available. Normally fifo event handling is
preferred over the 2-level event handling, as it is
fairer and the number of possible event channels is
much higher. Default is on (use fifo events).
nopv= [X86,XEN,KVM,HYPER_V,VMWARE]
Disables the PV optimizations forcing the guest to run
as generic guest with no PV drivers. Currently support

View File

@ -88,14 +88,17 @@ int xen_smp_intr_init(unsigned int cpu)
per_cpu(xen_callfunc_irq, cpu).irq = rc;
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
IRQF_PERCPU | IRQF_NOBALANCING,
debug_name, NULL);
if (rc < 0)
goto fail;
per_cpu(xen_debug_irq, cpu).irq = rc;
per_cpu(xen_debug_irq, cpu).name = debug_name;
if (!xen_fifo_events) {
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
xen_debug_interrupt,
IRQF_PERCPU | IRQF_NOBALANCING,
debug_name, NULL);
if (rc < 0)
goto fail;
per_cpu(xen_debug_irq, cpu).irq = rc;
per_cpu(xen_debug_irq, cpu).name = debug_name;
}
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,

View File

@ -29,6 +29,8 @@ extern struct start_info *xen_start_info;
extern struct shared_info xen_dummy_shared_info;
extern struct shared_info *HYPERVISOR_shared_info;
extern bool xen_fifo_events;
void xen_setup_mfn_list_list(void);
void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);

View File

@ -473,6 +473,12 @@ static void xen_vbd_free(struct xen_vbd *vbd)
vbd->bdev = NULL;
}
/* Enable the persistent grants feature. */
static bool feature_persistent = true;
module_param(feature_persistent, bool, 0644);
MODULE_PARM_DESC(feature_persistent,
"Enables the persistent grants feature");
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
unsigned major, unsigned minor, int readonly,
int cdrom)
@ -518,6 +524,8 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (q && blk_queue_secure_erase(q))
vbd->discard_secure = true;
vbd->feature_gnt_persistent = feature_persistent;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
@ -905,7 +913,8 @@ again:
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
be->blkif->vbd.feature_gnt_persistent);
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
dev->nodename);
@ -1066,7 +1075,6 @@ static int connect_ring(struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
unsigned int pers_grants;
char protocol[64] = "";
int err, i;
char *xspath;
@ -1092,9 +1100,11 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
0);
blkif->vbd.feature_gnt_persistent = pers_grants;
if (blkif->vbd.feature_gnt_persistent)
blkif->vbd.feature_gnt_persistent =
xenbus_read_unsigned(dev->otherend,
"feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;
/*
@ -1117,7 +1127,7 @@ static int connect_ring(struct backend_info *be)
pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
blkif->nr_rings, blkif->blk_protocol, protocol,
pers_grants ? "persistent grants" : "");
blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
ring_page_order = xenbus_read_unsigned(dev->otherend,
"ring-page-order", 0);

View File

@ -1866,8 +1866,8 @@ again:
message = "writing protocol";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename,
"feature-persistent", "%u", 1);
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
info->feature_persistent);
if (err)
dev_warn(&dev->dev,
"writing persistent grants feature to xenbus");
@ -1941,6 +1941,13 @@ static int negotiate_mq(struct blkfront_info *info)
}
return 0;
}
/* Enable the persistent grants feature. */
static bool feature_persistent = true;
module_param(feature_persistent, bool, 0644);
MODULE_PARM_DESC(feature_persistent,
"Enables the persistent grants feature");
/**
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@ -2007,6 +2014,8 @@ static int blkfront_probe(struct xenbus_device *dev,
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
info->feature_persistent = feature_persistent;
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
@ -2316,9 +2325,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
if (info->feature_persistent)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);

View File

@ -47,10 +47,11 @@ static unsigned evtchn_2l_max_channels(void)
return EVTCHN_2L_NR_CHANNELS;
}
static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
unsigned int old_cpu)
{
clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu)));
set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
}
static void evtchn_2l_clear_pending(evtchn_port_t port)

View File

@ -70,6 +70,57 @@
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "xen."
/* Interrupt types. */
enum xen_irq_type {
IRQT_UNBOUND = 0,
IRQT_PIRQ,
IRQT_VIRQ,
IRQT_IPI,
IRQT_EVTCHN
};
/*
* Packed IRQ information:
* type - enum xen_irq_type
* event channel - irq->event channel mapping
* cpu - cpu this event channel is bound to
* index - type-specific information:
* PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
* guest, or GSI (real passthrough IRQ) of the device.
* VIRQ - virq number
* IPI - IPI vector
* EVTCHN -
*/
struct irq_info {
struct list_head list;
struct list_head eoi_list;
short refcnt;
short spurious_cnt;
enum xen_irq_type type; /* type */
unsigned irq;
evtchn_port_t evtchn; /* event channel */
unsigned short cpu; /* cpu bound */
unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
u64 eoi_time; /* Time in jiffies when to EOI. */
union {
unsigned short virq;
enum ipi_vector ipi;
struct {
unsigned short pirq;
unsigned short gsi;
unsigned char vector;
unsigned char flags;
uint16_t domid;
} pirq;
} u;
};
#define PIRQ_NEEDS_EOI (1 << 0)
#define PIRQ_SHAREABLE (1 << 1)
#define PIRQ_MSI_GROUP (1 << 2)
static uint __read_mostly event_loop_timeout = 2;
module_param(event_loop_timeout, uint, 0644);
@ -110,7 +161,7 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
int **evtchn_to_irq;
static int **evtchn_to_irq;
#ifdef CONFIG_X86
static unsigned long *pirq_eoi_map;
#endif
@ -190,7 +241,7 @@ int get_evtchn_to_irq(evtchn_port_t evtchn)
}
/* Get info for IRQ */
struct irq_info *info_for_irq(unsigned irq)
static struct irq_info *info_for_irq(unsigned irq)
{
if (irq < nr_legacy_irqs())
return legacy_info_ptrs[irq];
@ -228,7 +279,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
return xen_evtchn_port_setup(info);
return xen_evtchn_port_setup(evtchn);
}
static int xen_irq_info_evtchn_setup(unsigned irq,
@ -351,7 +402,7 @@ static enum xen_irq_type type_from_irq(unsigned irq)
return info_for_irq(irq)->type;
}
unsigned cpu_from_irq(unsigned irq)
static unsigned cpu_from_irq(unsigned irq)
{
return info_for_irq(irq)->cpu;
}
@ -391,7 +442,7 @@ static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
#ifdef CONFIG_SMP
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
#endif
xen_evtchn_port_bind_to_cpu(info, cpu);
xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
info->cpu = cpu;
}
@ -745,7 +796,7 @@ static unsigned int __startup_pirq(unsigned int irq)
info->evtchn = evtchn;
bind_evtchn_to_cpu(evtchn, 0);
rc = xen_evtchn_port_setup(info);
rc = xen_evtchn_port_setup(evtchn);
if (rc)
goto err;
@ -1145,14 +1196,6 @@ static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
chip);
}
int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
evtchn_port_t remote_port)
{
return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
&xen_dynamic_chip);
}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
evtchn_port_t remote_port)
{
@ -1320,19 +1363,6 @@ static int bind_interdomain_evtchn_to_irqhandler_chip(
return irq;
}
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
evtchn_port_t remote_port,
irq_handler_t handler,
unsigned long irqflags,
const char *devname,
void *dev_id)
{
return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
remote_port, handler, irqflags, devname,
dev_id, &xen_dynamic_chip);
}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
evtchn_port_t remote_port,
irq_handler_t handler,
@ -2020,8 +2050,8 @@ void xen_setup_callback_vector(void) {}
static inline void xen_alloc_callback_vector(void) {}
#endif
static bool fifo_events = true;
module_param(fifo_events, bool, 0);
bool xen_fifo_events = true;
module_param_named(fifo_events, xen_fifo_events, bool, 0);
static int xen_evtchn_cpu_prepare(unsigned int cpu)
{
@ -2050,10 +2080,12 @@ void __init xen_init_IRQ(void)
int ret = -EINVAL;
evtchn_port_t evtchn;
if (fifo_events)
if (xen_fifo_events)
ret = xen_evtchn_fifo_init();
if (ret < 0)
if (ret < 0) {
xen_evtchn_2l_init();
xen_fifo_events = false;
}
xen_cpu_init_eoi(smp_processor_id());

View File

@ -138,9 +138,8 @@ static void init_array_page(event_word_t *array_page)
array_page[i] = 1 << EVTCHN_FIFO_MASKED;
}
static int evtchn_fifo_setup(struct irq_info *info)
static int evtchn_fifo_setup(evtchn_port_t port)
{
evtchn_port_t port = info->evtchn;
unsigned new_array_pages;
int ret;
@ -186,7 +185,8 @@ static int evtchn_fifo_setup(struct irq_info *info)
return ret;
}
static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
static void evtchn_fifo_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
unsigned int old_cpu)
{
/* no-op */
}
@ -237,6 +237,9 @@ static bool clear_masked_cond(volatile event_word_t *word)
w = *word;
do {
if (!(w & (1 << EVTCHN_FIFO_MASKED)))
return true;
if (w & (1 << EVTCHN_FIFO_PENDING))
return false;

View File

@ -7,65 +7,15 @@
#ifndef __EVENTS_INTERNAL_H__
#define __EVENTS_INTERNAL_H__
/* Interrupt types. */
enum xen_irq_type {
IRQT_UNBOUND = 0,
IRQT_PIRQ,
IRQT_VIRQ,
IRQT_IPI,
IRQT_EVTCHN
};
/*
* Packed IRQ information:
* type - enum xen_irq_type
* event channel - irq->event channel mapping
* cpu - cpu this event channel is bound to
* index - type-specific information:
* PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
* guest, or GSI (real passthrough IRQ) of the device.
* VIRQ - virq number
* IPI - IPI vector
* EVTCHN -
*/
struct irq_info {
struct list_head list;
struct list_head eoi_list;
short refcnt;
short spurious_cnt;
enum xen_irq_type type; /* type */
unsigned irq;
evtchn_port_t evtchn; /* event channel */
unsigned short cpu; /* cpu bound */
unsigned short eoi_cpu; /* EOI must happen on this cpu */
unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
u64 eoi_time; /* Time in jiffies when to EOI. */
union {
unsigned short virq;
enum ipi_vector ipi;
struct {
unsigned short pirq;
unsigned short gsi;
unsigned char vector;
unsigned char flags;
uint16_t domid;
} pirq;
} u;
};
#define PIRQ_NEEDS_EOI (1 << 0)
#define PIRQ_SHAREABLE (1 << 1)
#define PIRQ_MSI_GROUP (1 << 2)
struct evtchn_loop_ctrl;
struct evtchn_ops {
unsigned (*max_channels)(void);
unsigned (*nr_channels)(void);
int (*setup)(struct irq_info *info);
void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
int (*setup)(evtchn_port_t port);
void (*bind_to_cpu)(evtchn_port_t evtchn, unsigned int cpu,
unsigned int old_cpu);
void (*clear_pending)(evtchn_port_t port);
void (*set_pending)(evtchn_port_t port);
@ -83,12 +33,9 @@ struct evtchn_ops {
extern const struct evtchn_ops *evtchn_ops;
extern int **evtchn_to_irq;
int get_evtchn_to_irq(evtchn_port_t evtchn);
void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
struct irq_info *info_for_irq(unsigned irq);
unsigned cpu_from_irq(unsigned irq);
unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
static inline unsigned xen_evtchn_max_channels(void)
@ -100,17 +47,18 @@ static inline unsigned xen_evtchn_max_channels(void)
* Do any ABI specific setup for a bound event channel before it can
* be unmasked and used.
*/
static inline int xen_evtchn_port_setup(struct irq_info *info)
static inline int xen_evtchn_port_setup(evtchn_port_t evtchn)
{
if (evtchn_ops->setup)
return evtchn_ops->setup(info);
return evtchn_ops->setup(evtchn);
return 0;
}
static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
unsigned cpu)
static inline void xen_evtchn_port_bind_to_cpu(evtchn_port_t evtchn,
unsigned int cpu,
unsigned int old_cpu)
{
evtchn_ops->bind_to_cpu(info, cpu);
evtchn_ops->bind_to_cpu(evtchn, cpu, old_cpu);
}
static inline void clear_evtchn(evtchn_port_t port)

View File

@ -35,16 +35,8 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
unsigned long irqflags,
const char *devname,
void *dev_id);
int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
evtchn_port_t remote_port);
int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
evtchn_port_t remote_port);
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
evtchn_port_t remote_port,
irq_handler_t handler,
unsigned long irqflags,
const char *devname,
void *dev_id);
int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
evtchn_port_t remote_port,
irq_handler_t handler,