2018-03-14 23:13:07 +00:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
|
|
|
|
*
|
2023-04-05 13:38:21 +00:00
|
|
|
* Copyright (C) 2000 - 2023, Intel Corp.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2018-03-14 23:13:07 +00:00
|
|
|
*****************************************************************************/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <acpi/acpi.h>
|
2009-01-09 05:30:03 +00:00
|
|
|
#include "accommon.h"
|
|
|
|
#include "acevents.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define _COMPONENT ACPI_HARDWARE
|
2005-08-05 04:44:28 +00:00
|
|
|
ACPI_MODULE_NAME("hwgpe")
|
2012-02-14 10:14:27 +00:00
|
|
|
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
|
2005-04-19 02:49:35 +00:00
|
|
|
/* Local prototypes */
|
|
|
|
static acpi_status
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
2008-12-30 01:45:17 +00:00
|
|
|
struct acpi_gpe_block_info *gpe_block,
|
|
|
|
void *context);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-02-05 07:19:48 +00:00
|
|
|
static acpi_status
|
|
|
|
acpi_hw_gpe_enable_write(u8 enable_mask,
|
|
|
|
struct acpi_gpe_register_info *gpe_register_info);
|
|
|
|
|
2020-09-04 16:27:43 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_gpe_read
|
|
|
|
*
|
|
|
|
* PARAMETERS: value - Where the value is returned
|
2020-09-04 16:27:52 +00:00
|
|
|
* reg - GPE register structure
|
2020-09-04 16:27:43 +00:00
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Read from a GPE register in either memory or IO space.
|
|
|
|
*
|
|
|
|
* LIMITATIONS: <These limitations also apply to acpi_hw_gpe_write>
|
|
|
|
* space_ID must be system_memory or system_IO.
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2020-09-04 16:27:52 +00:00
|
|
|
acpi_status acpi_hw_gpe_read(u64 *value, struct acpi_gpe_address *reg)
|
2020-09-04 16:27:43 +00:00
|
|
|
{
|
|
|
|
acpi_status status;
|
|
|
|
u32 value32;
|
|
|
|
|
|
|
|
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
ACPICA: Add support for using logical addresses of GPE blocks
The logical address of every GPE block in system memory must be
known before passing it to acpi_ev_initialize_gpe_block(), because
memory cannot be mapped on the fly from an interrupt handler.
Accordingly, the host OS must map every GPE block in system
memory upfront and it can store the logical addresses of GPE
blocks for future use.
If these logical addresses were known to ACPICA, it could use them
instead of the corresponding physical addresses of GPE block for
GPE register accesses and the memory mapping lookups carried out
by acpi_os_read_memory() and acpi_os_write_memory() on every
attempt to access a GPE register would not be necessary any more.
To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES
symbol to indicate whether or not the host OS wants ACPICA to use the
logical addresses of GPE registers in system memory directly (which
is the case if this symbol is defined). Moreover, conditional on
whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new
global variables for storing the logical addresses of the FADT GPE
blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize()
pass their values instead of the physical addresses of the GPE blocks
in question to acpi_ev_create_gpe_block() and modify
acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly
via the addresses stored in the struct acpi_gpe_address objects,
which are expected to be the logical addresses of GPE registers if
ACPI_GPE_USE_LOGICAL_ADDRESSES is defined.
With the above changes in place, a host OS wanting ACPICA to
access GPE registers directly through their logical addresses
needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and
make sure that the logical addresses of the FADT GPE blocks 0
and 1 are stored in acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, respectively, prior to
calling acpi_ev_gpe_initialize().
[If such a host OS also uses acpi_install_gpe_block() to add
non-FADT GPE register blocks located in system memory, it must
pass their logical addresses instead of their physical addresses
to this function.]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 14:44:45 +00:00
|
|
|
#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES
|
2020-10-19 12:46:39 +00:00
|
|
|
*value = (u64)ACPI_GET8((unsigned long)reg->address);
|
ACPICA: Add support for using logical addresses of GPE blocks
The logical address of every GPE block in system memory must be
known before passing it to acpi_ev_initialize_gpe_block(), because
memory cannot be mapped on the fly from an interrupt handler.
Accordingly, the host OS must map every GPE block in system
memory upfront and it can store the logical addresses of GPE
blocks for future use.
If these logical addresses were known to ACPICA, it could use them
instead of the corresponding physical addresses of GPE block for
GPE register accesses and the memory mapping lookups carried out
by acpi_os_read_memory() and acpi_os_write_memory() on every
attempt to access a GPE register would not be necessary any more.
To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES
symbol to indicate whether or not the host OS wants ACPICA to use the
logical addresses of GPE registers in system memory directly (which
is the case if this symbol is defined). Moreover, conditional on
whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new
global variables for storing the logical addresses of the FADT GPE
blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize()
pass their values instead of the physical addresses of the GPE blocks
in question to acpi_ev_create_gpe_block() and modify
acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly
via the addresses stored in the struct acpi_gpe_address objects,
which are expected to be the logical addresses of GPE registers if
ACPI_GPE_USE_LOGICAL_ADDRESSES is defined.
With the above changes in place, a host OS wanting ACPICA to
access GPE registers directly through their logical addresses
needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and
make sure that the logical addresses of the FADT GPE blocks 0
and 1 are stored in acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, respectively, prior to
calling acpi_ev_gpe_initialize().
[If such a host OS also uses acpi_install_gpe_block() to add
non-FADT GPE register blocks located in system memory, it must
pass their logical addresses instead of their physical addresses
to this function.]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 14:44:45 +00:00
|
|
|
return_ACPI_STATUS(AE_OK);
|
|
|
|
#else
|
2020-09-04 16:27:43 +00:00
|
|
|
return acpi_os_read_memory((acpi_physical_address)reg->address,
|
|
|
|
value, ACPI_GPE_REGISTER_WIDTH);
|
ACPICA: Add support for using logical addresses of GPE blocks
The logical address of every GPE block in system memory must be
known before passing it to acpi_ev_initialize_gpe_block(), because
memory cannot be mapped on the fly from an interrupt handler.
Accordingly, the host OS must map every GPE block in system
memory upfront and it can store the logical addresses of GPE
blocks for future use.
If these logical addresses were known to ACPICA, it could use them
instead of the corresponding physical addresses of GPE block for
GPE register accesses and the memory mapping lookups carried out
by acpi_os_read_memory() and acpi_os_write_memory() on every
attempt to access a GPE register would not be necessary any more.
To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES
symbol to indicate whether or not the host OS wants ACPICA to use the
logical addresses of GPE registers in system memory directly (which
is the case if this symbol is defined). Moreover, conditional on
whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new
global variables for storing the logical addresses of the FADT GPE
blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize()
pass their values instead of the physical addresses of the GPE blocks
in question to acpi_ev_create_gpe_block() and modify
acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly
via the addresses stored in the struct acpi_gpe_address objects,
which are expected to be the logical addresses of GPE registers if
ACPI_GPE_USE_LOGICAL_ADDRESSES is defined.
With the above changes in place, a host OS wanting ACPICA to
access GPE registers directly through their logical addresses
needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and
make sure that the logical addresses of the FADT GPE blocks 0
and 1 are stored in acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, respectively, prior to
calling acpi_ev_gpe_initialize().
[If such a host OS also uses acpi_install_gpe_block() to add
non-FADT GPE register blocks located in system memory, it must
pass their logical addresses instead of their physical addresses
to this function.]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 14:44:45 +00:00
|
|
|
#endif
|
2020-09-04 16:27:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
status = acpi_os_read_port((acpi_io_address)reg->address,
|
|
|
|
&value32, ACPI_GPE_REGISTER_WIDTH);
|
|
|
|
if (ACPI_FAILURE(status))
|
|
|
|
return_ACPI_STATUS(status);
|
|
|
|
|
|
|
|
*value = (u64)value32;
|
|
|
|
|
|
|
|
return_ACPI_STATUS(AE_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_gpe_write
|
|
|
|
*
|
|
|
|
* PARAMETERS: value - Value to be written
|
2020-09-04 16:27:52 +00:00
|
|
|
* reg - GPE register structure
|
2020-09-04 16:27:43 +00:00
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Write to a GPE register in either memory or IO space.
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2020-09-04 16:27:52 +00:00
|
|
|
acpi_status acpi_hw_gpe_write(u64 value, struct acpi_gpe_address *reg)
|
2020-09-04 16:27:43 +00:00
|
|
|
{
|
|
|
|
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
ACPICA: Add support for using logical addresses of GPE blocks
The logical address of every GPE block in system memory must be
known before passing it to acpi_ev_initialize_gpe_block(), because
memory cannot be mapped on the fly from an interrupt handler.
Accordingly, the host OS must map every GPE block in system
memory upfront and it can store the logical addresses of GPE
blocks for future use.
If these logical addresses were known to ACPICA, it could use them
instead of the corresponding physical addresses of GPE block for
GPE register accesses and the memory mapping lookups carried out
by acpi_os_read_memory() and acpi_os_write_memory() on every
attempt to access a GPE register would not be necessary any more.
To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES
symbol to indicate whether or not the host OS wants ACPICA to use the
logical addresses of GPE registers in system memory directly (which
is the case if this symbol is defined). Moreover, conditional on
whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new
global variables for storing the logical addresses of the FADT GPE
blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize()
pass their values instead of the physical addresses of the GPE blocks
in question to acpi_ev_create_gpe_block() and modify
acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly
via the addresses stored in the struct acpi_gpe_address objects,
which are expected to be the logical addresses of GPE registers if
ACPI_GPE_USE_LOGICAL_ADDRESSES is defined.
With the above changes in place, a host OS wanting ACPICA to
access GPE registers directly through their logical addresses
needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and
make sure that the logical addresses of the FADT GPE blocks 0
and 1 are stored in acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, respectively, prior to
calling acpi_ev_gpe_initialize().
[If such a host OS also uses acpi_install_gpe_block() to add
non-FADT GPE register blocks located in system memory, it must
pass their logical addresses instead of their physical addresses
to this function.]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 14:44:45 +00:00
|
|
|
#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES
|
2020-10-19 12:46:39 +00:00
|
|
|
ACPI_SET8((unsigned long)reg->address, value);
|
ACPICA: Add support for using logical addresses of GPE blocks
The logical address of every GPE block in system memory must be
known before passing it to acpi_ev_initialize_gpe_block(), because
memory cannot be mapped on the fly from an interrupt handler.
Accordingly, the host OS must map every GPE block in system
memory upfront and it can store the logical addresses of GPE
blocks for future use.
If these logical addresses were known to ACPICA, it could use them
instead of the corresponding physical addresses of GPE block for
GPE register accesses and the memory mapping lookups carried out
by acpi_os_read_memory() and acpi_os_write_memory() on every
attempt to access a GPE register would not be necessary any more.
To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES
symbol to indicate whether or not the host OS wants ACPICA to use the
logical addresses of GPE registers in system memory directly (which
is the case if this symbol is defined). Moreover, conditional on
whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new
global variables for storing the logical addresses of the FADT GPE
blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize()
pass their values instead of the physical addresses of the GPE blocks
in question to acpi_ev_create_gpe_block() and modify
acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly
via the addresses stored in the struct acpi_gpe_address objects,
which are expected to be the logical addresses of GPE registers if
ACPI_GPE_USE_LOGICAL_ADDRESSES is defined.
With the above changes in place, a host OS wanting ACPICA to
access GPE registers directly through their logical addresses
needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and
make sure that the logical addresses of the FADT GPE blocks 0
and 1 are stored in acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, respectively, prior to
calling acpi_ev_gpe_initialize().
[If such a host OS also uses acpi_install_gpe_block() to add
non-FADT GPE register blocks located in system memory, it must
pass their logical addresses instead of their physical addresses
to this function.]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 14:44:45 +00:00
|
|
|
return_ACPI_STATUS(AE_OK);
|
|
|
|
#else
|
2020-09-04 16:27:43 +00:00
|
|
|
return acpi_os_write_memory((acpi_physical_address)reg->address,
|
|
|
|
value, ACPI_GPE_REGISTER_WIDTH);
|
ACPICA: Add support for using logical addresses of GPE blocks
The logical address of every GPE block in system memory must be
known before passing it to acpi_ev_initialize_gpe_block(), because
memory cannot be mapped on the fly from an interrupt handler.
Accordingly, the host OS must map every GPE block in system
memory upfront and it can store the logical addresses of GPE
blocks for future use.
If these logical addresses were known to ACPICA, it could use them
instead of the corresponding physical addresses of GPE block for
GPE register accesses and the memory mapping lookups carried out
by acpi_os_read_memory() and acpi_os_write_memory() on every
attempt to access a GPE register would not be necessary any more.
To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES
symbol to indicate whether or not the host OS wants ACPICA to use the
logical addresses of GPE registers in system memory directly (which
is the case if this symbol is defined). Moreover, conditional on
whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new
global variables for storing the logical addresses of the FADT GPE
blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize()
pass their values instead of the physical addresses of the GPE blocks
in question to acpi_ev_create_gpe_block() and modify
acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly
via the addresses stored in the struct acpi_gpe_address objects,
which are expected to be the logical addresses of GPE registers if
ACPI_GPE_USE_LOGICAL_ADDRESSES is defined.
With the above changes in place, a host OS wanting ACPICA to
access GPE registers directly through their logical addresses
needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and
make sure that the logical addresses of the FADT GPE blocks 0
and 1 are stored in acpi_gbl_xgpe0_block_logical_address and
acpi_gbl_xgpe1_block_logical_address, respectively, prior to
calling acpi_ev_gpe_initialize().
[If such a host OS also uses acpi_install_gpe_block() to add
non-FADT GPE register blocks located in system memory, it must
pass their logical addresses instead of their physical addresses
to this function.]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 14:44:45 +00:00
|
|
|
#endif
|
2020-09-04 16:27:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return acpi_os_write_port((acpi_io_address)reg->address, (u32)value,
|
|
|
|
ACPI_GPE_REGISTER_WIDTH);
|
|
|
|
}
|
|
|
|
|
2010-06-08 08:48:26 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
2010-07-01 02:07:17 +00:00
|
|
|
* FUNCTION: acpi_hw_get_gpe_register_bit
|
2010-06-08 08:48:26 +00:00
|
|
|
*
|
|
|
|
* PARAMETERS: gpe_event_info - Info block for the GPE
|
|
|
|
*
|
2010-12-13 05:39:37 +00:00
|
|
|
* RETURN: Register mask with a one in the GPE bit position
|
2010-06-08 08:48:26 +00:00
|
|
|
*
|
2010-12-13 05:39:37 +00:00
|
|
|
* DESCRIPTION: Compute the register mask for this GPE. One bit is set in the
|
|
|
|
* correct position for the input GPE.
|
2010-06-08 08:48:26 +00:00
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2012-08-17 03:10:02 +00:00
|
|
|
u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
|
2010-06-08 08:48:26 +00:00
|
|
|
{
|
2012-12-19 05:37:21 +00:00
|
|
|
|
|
|
|
return ((u32)1 <<
|
|
|
|
(gpe_event_info->gpe_number -
|
|
|
|
gpe_event_info->register_info->base_gpe_number));
|
2010-06-08 08:48:26 +00:00
|
|
|
}
|
|
|
|
|
2008-06-13 00:28:55 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
2010-06-08 08:49:08 +00:00
|
|
|
* FUNCTION: acpi_hw_low_set_gpe
|
2008-06-13 00:28:55 +00:00
|
|
|
*
|
|
|
|
* PARAMETERS: gpe_event_info - Info block for the GPE to be disabled
|
2010-06-08 08:49:08 +00:00
|
|
|
* action - Enable or disable
|
2008-06-13 00:28:55 +00:00
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
2010-12-13 05:39:37 +00:00
|
|
|
* DESCRIPTION: Enable or disable a single GPE in the parent enable register.
|
2015-04-15 02:00:27 +00:00
|
|
|
* The enable_mask field of the involved GPE register must be
|
|
|
|
* updated by the caller if necessary.
|
2008-06-13 00:28:55 +00:00
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2010-06-08 08:49:08 +00:00
|
|
|
acpi_status
|
2010-12-13 05:39:37 +00:00
|
|
|
acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
|
2008-06-13 00:28:55 +00:00
|
|
|
{
|
|
|
|
struct acpi_gpe_register_info *gpe_register_info;
|
2016-08-04 08:43:39 +00:00
|
|
|
acpi_status status = AE_OK;
|
2017-09-20 02:00:11 +00:00
|
|
|
u64 enable_mask;
|
2010-06-08 08:48:26 +00:00
|
|
|
u32 register_bit;
|
2008-06-13 00:28:55 +00:00
|
|
|
|
2010-06-08 08:49:08 +00:00
|
|
|
ACPI_FUNCTION_ENTRY();
|
|
|
|
|
2008-06-13 00:28:55 +00:00
|
|
|
/* Get the info block for the entire GPE register */
|
|
|
|
|
|
|
|
gpe_register_info = gpe_event_info->register_info;
|
|
|
|
if (!gpe_register_info) {
|
|
|
|
return (AE_NOT_EXIST);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get current value of the enable register that contains this GPE */
|
|
|
|
|
2020-09-04 16:27:43 +00:00
|
|
|
status = acpi_hw_gpe_read(&enable_mask,
|
|
|
|
&gpe_register_info->enable_address);
|
2008-06-13 00:28:55 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
return (status);
|
|
|
|
}
|
|
|
|
|
2010-12-13 05:39:37 +00:00
|
|
|
/* Set or clear just the bit that corresponds to this GPE */
|
2008-06-13 00:28:55 +00:00
|
|
|
|
2012-08-17 03:10:02 +00:00
|
|
|
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
|
2015-04-15 02:00:27 +00:00
|
|
|
switch (action) {
|
2010-12-13 05:36:15 +00:00
|
|
|
case ACPI_GPE_CONDITIONAL_ENABLE:
|
2010-12-13 05:39:37 +00:00
|
|
|
|
2014-12-01 22:50:16 +00:00
|
|
|
/* Only enable if the corresponding enable_mask bit is set */
|
2010-12-13 05:39:37 +00:00
|
|
|
|
2014-12-01 22:50:16 +00:00
|
|
|
if (!(register_bit & gpe_register_info->enable_mask)) {
|
2010-06-08 08:49:45 +00:00
|
|
|
return (AE_BAD_PARAMETER);
|
2010-12-13 05:39:37 +00:00
|
|
|
}
|
|
|
|
|
2021-01-22 00:23:51 +00:00
|
|
|
ACPI_FALLTHROUGH;
|
2010-06-08 08:49:45 +00:00
|
|
|
|
2010-06-08 08:49:08 +00:00
|
|
|
case ACPI_GPE_ENABLE:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2010-06-08 08:49:08 +00:00
|
|
|
ACPI_SET_BIT(enable_mask, register_bit);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ACPI_GPE_DISABLE:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2010-06-08 08:49:08 +00:00
|
|
|
ACPI_CLEAR_BIT(enable_mask, register_bit);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2013-01-25 05:41:16 +00:00
|
|
|
ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u", action));
|
2010-06-08 08:49:08 +00:00
|
|
|
return (AE_BAD_PARAMETER);
|
|
|
|
}
|
2008-06-13 00:28:55 +00:00
|
|
|
|
2016-08-04 08:43:39 +00:00
|
|
|
if (!(register_bit & gpe_register_info->mask_for_run)) {
|
2008-06-13 00:28:55 +00:00
|
|
|
|
2016-08-04 08:43:39 +00:00
|
|
|
/* Write the updated enable mask */
|
|
|
|
|
2020-09-04 16:27:43 +00:00
|
|
|
status = acpi_hw_gpe_write(enable_mask,
|
|
|
|
&gpe_register_info->enable_address);
|
2016-08-04 08:43:39 +00:00
|
|
|
}
|
2008-06-13 00:28:55 +00:00
|
|
|
return (status);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_clear_gpe
|
|
|
|
*
|
|
|
|
* PARAMETERS: gpe_event_info - Info block for the GPE to be cleared
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Clear the status bit for a single GPE.
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2016-05-05 04:57:53 +00:00
|
|
|
acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-06-08 08:48:26 +00:00
|
|
|
struct acpi_gpe_register_info *gpe_register_info;
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status status;
|
2010-06-08 08:48:26 +00:00
|
|
|
u32 register_bit;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
ACPI_FUNCTION_ENTRY();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-06-08 08:48:26 +00:00
|
|
|
/* Get the info block for the entire GPE register */
|
|
|
|
|
|
|
|
gpe_register_info = gpe_event_info->register_info;
|
|
|
|
if (!gpe_register_info) {
|
|
|
|
return (AE_NOT_EXIST);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Write a one to the appropriate bit in the status register to
|
|
|
|
* clear this GPE.
|
|
|
|
*/
|
2012-08-17 03:10:02 +00:00
|
|
|
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
|
2010-12-13 05:39:37 +00:00
|
|
|
|
2020-09-04 16:27:43 +00:00
|
|
|
status = acpi_hw_gpe_write(register_bit,
|
|
|
|
&gpe_register_info->status_address);
|
2005-04-16 22:20:36 +00:00
|
|
|
return (status);
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_get_gpe_status
|
|
|
|
*
|
|
|
|
* PARAMETERS: gpe_event_info - Info block for the GPE to queried
|
|
|
|
* event_status - Where the GPE status is returned
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Return the status of a single GPE.
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
2005-04-19 02:49:35 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
acpi_status
|
2016-05-05 04:57:53 +00:00
|
|
|
acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
|
2014-10-10 02:39:39 +00:00
|
|
|
acpi_event_status *event_status)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-09-20 02:00:11 +00:00
|
|
|
u64 in_byte;
|
2010-06-08 08:48:26 +00:00
|
|
|
u32 register_bit;
|
2005-08-05 04:44:28 +00:00
|
|
|
struct acpi_gpe_register_info *gpe_register_info;
|
|
|
|
acpi_event_status local_event_status = 0;
|
2010-12-13 05:39:37 +00:00
|
|
|
acpi_status status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
ACPI_FUNCTION_ENTRY();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!event_status) {
|
|
|
|
return (AE_BAD_PARAMETER);
|
|
|
|
}
|
|
|
|
|
2014-10-10 02:39:57 +00:00
|
|
|
/* GPE currently handled? */
|
|
|
|
|
2015-02-05 07:20:29 +00:00
|
|
|
if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
|
2014-10-10 02:39:57 +00:00
|
|
|
ACPI_GPE_DISPATCH_NONE) {
|
2014-10-10 02:40:05 +00:00
|
|
|
local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER;
|
2014-10-10 02:39:57 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Get the info block for the entire GPE register */
|
|
|
|
|
|
|
|
gpe_register_info = gpe_event_info->register_info;
|
|
|
|
|
|
|
|
/* Get the register bitmask for this GPE */
|
|
|
|
|
2012-08-17 03:10:02 +00:00
|
|
|
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* GPE currently enabled? (enabled for runtime?) */
|
|
|
|
|
|
|
|
if (register_bit & gpe_register_info->enable_for_run) {
|
|
|
|
local_event_status |= ACPI_EVENT_FLAG_ENABLED;
|
|
|
|
}
|
|
|
|
|
2016-08-04 08:43:39 +00:00
|
|
|
/* GPE currently masked? (masked for runtime?) */
|
|
|
|
|
|
|
|
if (register_bit & gpe_register_info->mask_for_run) {
|
|
|
|
local_event_status |= ACPI_EVENT_FLAG_MASKED;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* GPE enabled for wake? */
|
|
|
|
|
|
|
|
if (register_bit & gpe_register_info->enable_for_wake) {
|
|
|
|
local_event_status |= ACPI_EVENT_FLAG_WAKE_ENABLED;
|
|
|
|
}
|
|
|
|
|
2015-04-13 03:49:13 +00:00
|
|
|
/* GPE currently enabled (enable bit == 1)? */
|
|
|
|
|
2020-09-04 16:27:43 +00:00
|
|
|
status = acpi_hw_gpe_read(&in_byte, &gpe_register_info->enable_address);
|
2015-04-13 03:49:13 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
return (status);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (register_bit & in_byte) {
|
|
|
|
local_event_status |= ACPI_EVENT_FLAG_ENABLE_SET;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* GPE currently active (status bit == 1)? */
|
|
|
|
|
2020-09-04 16:27:43 +00:00
|
|
|
status = acpi_hw_gpe_read(&in_byte, &gpe_register_info->status_address);
|
2005-08-05 04:44:28 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2010-01-21 01:08:31 +00:00
|
|
|
return (status);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (register_bit & in_byte) {
|
2015-04-13 03:49:13 +00:00
|
|
|
local_event_status |= ACPI_EVENT_FLAG_STATUS_SET;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set return value */
|
|
|
|
|
|
|
|
(*event_status) = local_event_status;
|
2010-01-21 01:08:31 +00:00
|
|
|
return (AE_OK);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2014-12-01 22:50:16 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_gpe_enable_write
|
|
|
|
*
|
|
|
|
* PARAMETERS: enable_mask - Bit mask to write to the GPE register
|
|
|
|
* gpe_register_info - Gpe Register info
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Write the enable mask byte to the given GPE register.
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
static acpi_status
|
|
|
|
acpi_hw_gpe_enable_write(u8 enable_mask,
|
|
|
|
struct acpi_gpe_register_info *gpe_register_info)
|
|
|
|
{
|
|
|
|
acpi_status status;
|
|
|
|
|
2015-04-15 02:00:27 +00:00
|
|
|
gpe_register_info->enable_mask = enable_mask;
|
2015-12-29 05:52:32 +00:00
|
|
|
|
2020-09-04 16:27:43 +00:00
|
|
|
status = acpi_hw_gpe_write(enable_mask,
|
|
|
|
&gpe_register_info->enable_address);
|
2014-12-01 22:50:16 +00:00
|
|
|
return (status);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_disable_gpe_block
|
|
|
|
*
|
|
|
|
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
|
|
|
|
* gpe_block - Gpe Block info
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
2005-04-19 02:49:35 +00:00
|
|
|
* DESCRIPTION: Disable all GPEs within a single GPE block
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
acpi_status
|
2008-12-30 01:45:17 +00:00
|
|
|
acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|
|
|
struct acpi_gpe_block_info *gpe_block, void *context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
u32 i;
|
|
|
|
acpi_status status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Examine each GPE Register within the block */
|
|
|
|
|
|
|
|
for (i = 0; i < gpe_block->register_count; i++) {
|
2006-10-02 04:00:00 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Disable all GPEs in this register */
|
|
|
|
|
2008-12-30 18:55:32 +00:00
|
|
|
status =
|
2014-12-01 22:50:16 +00:00
|
|
|
acpi_hw_gpe_enable_write(0x00,
|
|
|
|
&gpe_block->register_info[i]);
|
2005-08-05 04:44:28 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
return (status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (AE_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_clear_gpe_block
|
|
|
|
*
|
|
|
|
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
|
|
|
|
* gpe_block - Gpe Block info
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
2005-04-19 02:49:35 +00:00
|
|
|
* DESCRIPTION: Clear status bits for all GPEs within a single GPE block
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
acpi_status
|
2008-12-30 01:45:17 +00:00
|
|
|
acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|
|
|
struct acpi_gpe_block_info *gpe_block, void *context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
u32 i;
|
|
|
|
acpi_status status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Examine each GPE Register within the block */
|
|
|
|
|
|
|
|
for (i = 0; i < gpe_block->register_count; i++) {
|
2006-10-02 04:00:00 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Clear status on all GPEs in this register */
|
|
|
|
|
2020-09-04 16:27:43 +00:00
|
|
|
status = acpi_hw_gpe_write(0xFF,
|
|
|
|
&gpe_block->register_info[i].status_address);
|
2005-08-05 04:44:28 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
return (status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (AE_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_enable_runtime_gpe_block
|
|
|
|
*
|
|
|
|
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
|
|
|
|
* gpe_block - Gpe Block info
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
2005-04-19 02:49:35 +00:00
|
|
|
* DESCRIPTION: Enable all "runtime" GPEs within a single GPE block. Includes
|
|
|
|
* combination wake/run GPEs.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
acpi_status
|
2008-12-30 01:45:17 +00:00
|
|
|
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
2016-05-05 04:57:53 +00:00
|
|
|
struct acpi_gpe_block_info *gpe_block,
|
2012-10-31 02:25:45 +00:00
|
|
|
void *context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
u32 i;
|
|
|
|
acpi_status status;
|
2014-12-01 22:50:16 +00:00
|
|
|
struct acpi_gpe_register_info *gpe_register_info;
|
2016-08-04 08:43:39 +00:00
|
|
|
u8 enable_mask;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* NOTE: assumes that all GPEs are currently disabled */
|
|
|
|
|
|
|
|
/* Examine each GPE Register within the block */
|
|
|
|
|
|
|
|
for (i = 0; i < gpe_block->register_count; i++) {
|
2014-12-01 22:50:16 +00:00
|
|
|
gpe_register_info = &gpe_block->register_info[i];
|
|
|
|
if (!gpe_register_info->enable_for_run) {
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable all "runtime" GPEs in this register */
|
|
|
|
|
2016-08-04 08:43:39 +00:00
|
|
|
enable_mask = gpe_register_info->enable_for_run &
|
|
|
|
~gpe_register_info->mask_for_run;
|
2009-06-24 01:44:06 +00:00
|
|
|
status =
|
2016-08-04 08:43:39 +00:00
|
|
|
acpi_hw_gpe_enable_write(enable_mask, gpe_register_info);
|
2005-08-05 04:44:28 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
return (status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (AE_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_enable_wakeup_gpe_block
|
|
|
|
*
|
|
|
|
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
|
|
|
|
* gpe_block - Gpe Block info
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
2005-04-19 02:49:35 +00:00
|
|
|
* DESCRIPTION: Enable all "wake" GPEs within a single GPE block. Includes
|
|
|
|
* combination wake/run GPEs.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2005-04-19 02:49:35 +00:00
|
|
|
static acpi_status
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
2008-12-30 01:45:17 +00:00
|
|
|
struct acpi_gpe_block_info *gpe_block,
|
|
|
|
void *context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
u32 i;
|
|
|
|
acpi_status status;
|
2014-12-01 22:50:16 +00:00
|
|
|
struct acpi_gpe_register_info *gpe_register_info;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Examine each GPE Register within the block */
|
|
|
|
|
|
|
|
for (i = 0; i < gpe_block->register_count; i++) {
|
2014-12-01 22:50:16 +00:00
|
|
|
gpe_register_info = &gpe_block->register_info[i];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-09-30 00:24:38 +00:00
|
|
|
/*
|
|
|
|
* Enable all "wake" GPEs in this register and disable the
|
|
|
|
* remaining ones.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-06-24 01:44:06 +00:00
|
|
|
status =
|
2014-12-01 22:50:16 +00:00
|
|
|
acpi_hw_gpe_enable_write(gpe_register_info->enable_for_wake,
|
|
|
|
gpe_register_info);
|
2005-08-05 04:44:28 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
return (status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (AE_OK);
|
|
|
|
}
|
|
|
|
|
2020-03-25 10:54:29 +00:00
|
|
|
struct acpi_gpe_block_status_context {
|
|
|
|
struct acpi_gpe_register_info *gpe_skip_register_info;
|
|
|
|
u8 gpe_skip_mask;
|
|
|
|
u8 retval;
|
|
|
|
};
|
|
|
|
|
2020-02-11 16:52:32 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_get_gpe_block_status
|
|
|
|
*
|
|
|
|
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
|
|
|
|
* gpe_block - Gpe Block info
|
2020-03-25 10:54:29 +00:00
|
|
|
* context - GPE list walk context data
|
2020-02-11 16:52:32 +00:00
|
|
|
*
|
|
|
|
* RETURN: Success
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Produce a combined GPE status bits mask for the given block.
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
static acpi_status
|
|
|
|
acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|
|
|
struct acpi_gpe_block_info *gpe_block,
|
2020-03-25 10:54:29 +00:00
|
|
|
void *context)
|
2020-02-11 16:52:32 +00:00
|
|
|
{
|
2020-03-25 10:54:29 +00:00
|
|
|
struct acpi_gpe_block_status_context *c = context;
|
2020-02-11 16:52:32 +00:00
|
|
|
struct acpi_gpe_register_info *gpe_register_info;
|
|
|
|
u64 in_enable, in_status;
|
|
|
|
acpi_status status;
|
2020-03-25 10:54:29 +00:00
|
|
|
u8 ret_mask;
|
2020-02-11 16:52:32 +00:00
|
|
|
u32 i;
|
|
|
|
|
|
|
|
/* Examine each GPE Register within the block */
|
|
|
|
|
|
|
|
for (i = 0; i < gpe_block->register_count; i++) {
|
|
|
|
gpe_register_info = &gpe_block->register_info[i];
|
|
|
|
|
2020-09-04 16:27:43 +00:00
|
|
|
status = acpi_hw_gpe_read(&in_enable,
|
|
|
|
&gpe_register_info->enable_address);
|
2020-02-11 16:52:32 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-09-04 16:27:43 +00:00
|
|
|
status = acpi_hw_gpe_read(&in_status,
|
|
|
|
&gpe_register_info->status_address);
|
2020-02-11 16:52:32 +00:00
|
|
|
if (ACPI_FAILURE(status)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-03-25 10:54:29 +00:00
|
|
|
ret_mask = in_enable & in_status;
|
|
|
|
if (ret_mask && c->gpe_skip_register_info == gpe_register_info) {
|
|
|
|
ret_mask &= ~c->gpe_skip_mask;
|
|
|
|
}
|
|
|
|
c->retval |= ret_mask;
|
2020-02-11 16:52:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (AE_OK);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_disable_all_gpes
|
|
|
|
*
|
ACPICA 20050617-0624 from Bob Moore <robert.moore@intel.com>
ACPICA 20050617:
Moved the object cache operations into the OS interface
layer (OSL) to allow the host OS to handle these operations
if desired (for example, the Linux OSL will invoke the
slab allocator). This support is optional; the compile
time define ACPI_USE_LOCAL_CACHE may be used to utilize
the original cache code in the ACPI CA core. The new OSL
interfaces are shown below. See utalloc.c for an example
implementation, and acpiosxf.h for the exact interface
definitions. Thanks to Alexey Starikovskiy.
acpi_os_create_cache
acpi_os_delete_cache
acpi_os_purge_cache
acpi_os_acquire_object
acpi_os_release_object
Modified the interfaces to acpi_os_acquire_lock and
acpi_os_release_lock to return and restore a flags
parameter. This fits better with many OS lock models.
Note: the current execution state (interrupt handler
or not) is no longer passed to these interfaces. If
necessary, the OSL must determine this state by itself, a
simple and fast operation. Thanks to Alexey Starikovskiy.
Fixed a problem in the ACPI table handling where a valid
XSDT was assumed present if the revision of the RSDP
was 2 or greater. According to the ACPI specification,
the XSDT is optional in all cases, and the table manager
therefore now checks for both an RSDP >=2 and a valid
XSDT pointer. Otherwise, the RSDT pointer is used.
Some ACPI 2.0 compliant BIOSs contain only the RSDT.
Fixed an interpreter problem with the Mid() operator in the
case of an input string where the resulting output string
is of zero length. It now correctly returns a valid,
null terminated string object instead of a string object
with a null pointer.
Fixed a problem with the control method argument handling
to allow a store to an Arg object that already contains an
object of type Device. The Device object is now correctly
overwritten. Previously, an error was returned.
ACPICA 20050624:
Modified the new OSL cache interfaces to use ACPI_CACHE_T
as the type for the host-defined cache object. This allows
the OSL implementation to define and type this object in
any manner desired, simplifying the OSL implementation.
For example, ACPI_CACHE_T is defined as kmem_cache_t for
Linux, and should be defined in the OS-specific header
file for other operating systems as required.
Changed the interface to AcpiOsAcquireObject to directly
return the requested object as the function return (instead
of ACPI_STATUS.) This change was made for performance
reasons, since this is the purpose of the interface in the
first place. acpi_os_acquire_object is now similar to the
acpi_os_allocate interface. Thanks to Alexey Starikovskiy.
Modified the initialization sequence in
acpi_initialize_subsystem to call the OSL interface
acpi_osl_initialize first, before any local initialization.
This change was required because the global initialization
now calls OSL interfaces.
Restructured the code base to split some files because
of size and/or because the code logically belonged in a
separate file. New files are listed below.
utilities/utcache.c /* Local cache interfaces */
utilities/utmutex.c /* Local mutex support */
utilities/utstate.c /* State object support */
parser/psloop.c /* Main AML parse loop */
Signed-off-by: Len Brown <len.brown@intel.com>
2005-06-24 04:00:00 +00:00
|
|
|
* PARAMETERS: None
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
2005-04-19 02:49:35 +00:00
|
|
|
* DESCRIPTION: Disable and clear all GPEs in all GPE blocks
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status acpi_hw_disable_all_gpes(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ACPI: ACPICA 20060421
Removed a device initialization optimization introduced in
20051216 where the _STA method was not run unless an _INI
was also present for the same device. This optimization
could cause problems because it could allow _INI methods
to be run within a not-present device subtree (If a
not-present device had no _INI, _STA would not be run,
the not-present status would not be discovered, and the
children of the device would be incorrectly traversed.)
Implemented a new _STA optimization where namespace
subtrees that do not contain _INI are identified and
ignored during device initialization. Selectively running
_STA can significantly improve boot time on large machines
(with assistance from Len Brown.)
Implemented support for the device initialization case
where the returned _STA flags indicate a device not-present
but functioning. In this case, _INI is not run, but the
device children are examined for presence, as per the
ACPI specification.
Implemented an additional change to the IndexField support
in order to conform to MS behavior. The value written to
the Index Register is not simply a byte offset, it is a
byte offset in units of the access width of the parent
Index Field. (Fiodor Suietov)
Defined and deployed a new OSL interface,
acpi_os_validate_address(). This interface is called during
the creation of all AML operation regions, and allows
the host OS to exert control over what addresses it will
allow the AML code to access. Operation Regions whose
addresses are disallowed will cause a runtime exception
when they are actually accessed (will not affect or abort
table loading.)
Defined and deployed a new OSL interface,
acpi_os_validate_interface(). This interface allows the host OS
to match the various "optional" interface/behavior strings
for the _OSI predefined control method as appropriate
(with assistance from Bjorn Helgaas.)
Restructured and corrected various problems in the
exception handling code paths within DsCallControlMethod
and DsTerminateControlMethod in dsmethod (with assistance
from Takayoshi Kochi.)
Modified the Linux source converter to ignore quoted string
literals while converting identifiers from mixed to lower
case. This will correct problems with the disassembler
and other areas where such strings must not be modified.
The ACPI_FUNCTION_* macros no longer require quotes around
the function name. This allows the Linux source converter
to convert the names, now that the converter ignores
quoted strings.
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 21:15:00 +00:00
|
|
|
ACPI_FUNCTION_TRACE(hw_disable_all_gpes);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-12-30 01:45:17 +00:00
|
|
|
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
|
2005-08-05 04:44:28 +00:00
|
|
|
return_ACPI_STATUS(status);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_enable_all_runtime_gpes
|
|
|
|
*
|
ACPICA 20050617-0624 from Bob Moore <robert.moore@intel.com>
ACPICA 20050617:
Moved the object cache operations into the OS interface
layer (OSL) to allow the host OS to handle these operations
if desired (for example, the Linux OSL will invoke the
slab allocator). This support is optional; the compile
time define ACPI_USE_LOCAL_CACHE may be used to utilize
the original cache code in the ACPI CA core. The new OSL
interfaces are shown below. See utalloc.c for an example
implementation, and acpiosxf.h for the exact interface
definitions. Thanks to Alexey Starikovskiy.
acpi_os_create_cache
acpi_os_delete_cache
acpi_os_purge_cache
acpi_os_acquire_object
acpi_os_release_object
Modified the interfaces to acpi_os_acquire_lock and
acpi_os_release_lock to return and restore a flags
parameter. This fits better with many OS lock models.
Note: the current execution state (interrupt handler
or not) is no longer passed to these interfaces. If
necessary, the OSL must determine this state by itself, a
simple and fast operation. Thanks to Alexey Starikovskiy.
Fixed a problem in the ACPI table handling where a valid
XSDT was assumed present if the revision of the RSDP
was 2 or greater. According to the ACPI specification,
the XSDT is optional in all cases, and the table manager
therefore now checks for both an RSDP >=2 and a valid
XSDT pointer. Otherwise, the RSDT pointer is used.
Some ACPI 2.0 compliant BIOSs contain only the RSDT.
Fixed an interpreter problem with the Mid() operator in the
case of an input string where the resulting output string
is of zero length. It now correctly returns a valid,
null terminated string object instead of a string object
with a null pointer.
Fixed a problem with the control method argument handling
to allow a store to an Arg object that already contains an
object of type Device. The Device object is now correctly
overwritten. Previously, an error was returned.
ACPICA 20050624:
Modified the new OSL cache interfaces to use ACPI_CACHE_T
as the type for the host-defined cache object. This allows
the OSL implementation to define and type this object in
any manner desired, simplifying the OSL implementation.
For example, ACPI_CACHE_T is defined as kmem_cache_t for
Linux, and should be defined in the OS-specific header
file for other operating systems as required.
Changed the interface to AcpiOsAcquireObject to directly
return the requested object as the function return (instead
of ACPI_STATUS.) This change was made for performance
reasons, since this is the purpose of the interface in the
first place. acpi_os_acquire_object is now similar to the
acpi_os_allocate interface. Thanks to Alexey Starikovskiy.
Modified the initialization sequence in
acpi_initialize_subsystem to call the OSL interface
acpi_osl_initialize first, before any local initialization.
This change was required because the global initialization
now calls OSL interfaces.
Restructured the code base to split some files because
of size and/or because the code logically belonged in a
separate file. New files are listed below.
utilities/utcache.c /* Local cache interfaces */
utilities/utmutex.c /* Local mutex support */
utilities/utstate.c /* State object support */
parser/psloop.c /* Main AML parse loop */
Signed-off-by: Len Brown <len.brown@intel.com>
2005-06-24 04:00:00 +00:00
|
|
|
* PARAMETERS: None
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
2005-04-19 02:49:35 +00:00
|
|
|
* DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status acpi_hw_enable_all_runtime_gpes(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ACPI: ACPICA 20060421
Removed a device initialization optimization introduced in
20051216 where the _STA method was not run unless an _INI
was also present for the same device. This optimization
could cause problems because it could allow _INI methods
to be run within a not-present device subtree (If a
not-present device had no _INI, _STA would not be run,
the not-present status would not be discovered, and the
children of the device would be incorrectly traversed.)
Implemented a new _STA optimization where namespace
subtrees that do not contain _INI are identified and
ignored during device initialization. Selectively running
_STA can significantly improve boot time on large machines
(with assistance from Len Brown.)
Implemented support for the device initialization case
where the returned _STA flags indicate a device not-present
but functioning. In this case, _INI is not run, but the
device children are examined for presence, as per the
ACPI specification.
Implemented an additional change to the IndexField support
in order to conform to MS behavior. The value written to
the Index Register is not simply a byte offset, it is a
byte offset in units of the access width of the parent
Index Field. (Fiodor Suietov)
Defined and deployed a new OSL interface,
acpi_os_validate_address(). This interface is called during
the creation of all AML operation regions, and allows
the host OS to exert control over what addresses it will
allow the AML code to access. Operation Regions whose
addresses are disallowed will cause a runtime exception
when they are actually accessed (will not affect or abort
table loading.)
Defined and deployed a new OSL interface,
acpi_os_validate_interface(). This interface allows the host OS
to match the various "optional" interface/behavior strings
for the _OSI predefined control method as appropriate
(with assistance from Bjorn Helgaas.)
Restructured and corrected various problems in the
exception handling code paths within DsCallControlMethod
and DsTerminateControlMethod in dsmethod (with assistance
from Takayoshi Kochi.)
Modified the Linux source converter to ignore quoted string
literals while converting identifiers from mixed to lower
case. This will correct problems with the disassembler
and other areas where such strings must not be modified.
The ACPI_FUNCTION_* macros no longer require quotes around
the function name. This allows the Linux source converter
to convert the names, now that the converter ignores
quoted strings.
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 21:15:00 +00:00
|
|
|
ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-12-30 01:45:17 +00:00
|
|
|
status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block, NULL);
|
2005-08-05 04:44:28 +00:00
|
|
|
return_ACPI_STATUS(status);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_enable_all_wakeup_gpes
|
|
|
|
*
|
ACPICA 20050617-0624 from Bob Moore <robert.moore@intel.com>
ACPICA 20050617:
Moved the object cache operations into the OS interface
layer (OSL) to allow the host OS to handle these operations
if desired (for example, the Linux OSL will invoke the
slab allocator). This support is optional; the compile
time define ACPI_USE_LOCAL_CACHE may be used to utilize
the original cache code in the ACPI CA core. The new OSL
interfaces are shown below. See utalloc.c for an example
implementation, and acpiosxf.h for the exact interface
definitions. Thanks to Alexey Starikovskiy.
acpi_os_create_cache
acpi_os_delete_cache
acpi_os_purge_cache
acpi_os_acquire_object
acpi_os_release_object
Modified the interfaces to acpi_os_acquire_lock and
acpi_os_release_lock to return and restore a flags
parameter. This fits better with many OS lock models.
Note: the current execution state (interrupt handler
or not) is no longer passed to these interfaces. If
necessary, the OSL must determine this state by itself, a
simple and fast operation. Thanks to Alexey Starikovskiy.
Fixed a problem in the ACPI table handling where a valid
XSDT was assumed present if the revision of the RSDP
was 2 or greater. According to the ACPI specification,
the XSDT is optional in all cases, and the table manager
therefore now checks for both an RSDP >=2 and a valid
XSDT pointer. Otherwise, the RSDT pointer is used.
Some ACPI 2.0 compliant BIOSs contain only the RSDT.
Fixed an interpreter problem with the Mid() operator in the
case of an input string where the resulting output string
is of zero length. It now correctly returns a valid,
null terminated string object instead of a string object
with a null pointer.
Fixed a problem with the control method argument handling
to allow a store to an Arg object that already contains an
object of type Device. The Device object is now correctly
overwritten. Previously, an error was returned.
ACPICA 20050624:
Modified the new OSL cache interfaces to use ACPI_CACHE_T
as the type for the host-defined cache object. This allows
the OSL implementation to define and type this object in
any manner desired, simplifying the OSL implementation.
For example, ACPI_CACHE_T is defined as kmem_cache_t for
Linux, and should be defined in the OS-specific header
file for other operating systems as required.
Changed the interface to AcpiOsAcquireObject to directly
return the requested object as the function return (instead
of ACPI_STATUS.) This change was made for performance
reasons, since this is the purpose of the interface in the
first place. acpi_os_acquire_object is now similar to the
acpi_os_allocate interface. Thanks to Alexey Starikovskiy.
Modified the initialization sequence in
acpi_initialize_subsystem to call the OSL interface
acpi_osl_initialize first, before any local initialization.
This change was required because the global initialization
now calls OSL interfaces.
Restructured the code base to split some files because
of size and/or because the code logically belonged in a
separate file. New files are listed below.
utilities/utcache.c /* Local cache interfaces */
utilities/utmutex.c /* Local mutex support */
utilities/utstate.c /* State object support */
parser/psloop.c /* Main AML parse loop */
Signed-off-by: Len Brown <len.brown@intel.com>
2005-06-24 04:00:00 +00:00
|
|
|
* PARAMETERS: None
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
2005-04-19 02:49:35 +00:00
|
|
|
* DESCRIPTION: Enable all "wakeup" GPEs, in all GPE blocks
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status acpi_hw_enable_all_wakeup_gpes(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ACPI: ACPICA 20060421
Removed a device initialization optimization introduced in
20051216 where the _STA method was not run unless an _INI
was also present for the same device. This optimization
could cause problems because it could allow _INI methods
to be run within a not-present device subtree (If a
not-present device had no _INI, _STA would not be run,
the not-present status would not be discovered, and the
children of the device would be incorrectly traversed.)
Implemented a new _STA optimization where namespace
subtrees that do not contain _INI are identified and
ignored during device initialization. Selectively running
_STA can significantly improve boot time on large machines
(with assistance from Len Brown.)
Implemented support for the device initialization case
where the returned _STA flags indicate a device not-present
but functioning. In this case, _INI is not run, but the
device children are examined for presence, as per the
ACPI specification.
Implemented an additional change to the IndexField support
in order to conform to MS behavior. The value written to
the Index Register is not simply a byte offset, it is a
byte offset in units of the access width of the parent
Index Field. (Fiodor Suietov)
Defined and deployed a new OSL interface,
acpi_os_validate_address(). This interface is called during
the creation of all AML operation regions, and allows
the host OS to exert control over what addresses it will
allow the AML code to access. Operation Regions whose
addresses are disallowed will cause a runtime exception
when they are actually accessed (will not affect or abort
table loading.)
Defined and deployed a new OSL interface,
acpi_os_validate_interface(). This interface allows the host OS
to match the various "optional" interface/behavior strings
for the _OSI predefined control method as appropriate
(with assistance from Bjorn Helgaas.)
Restructured and corrected various problems in the
exception handling code paths within DsCallControlMethod
and DsTerminateControlMethod in dsmethod (with assistance
from Takayoshi Kochi.)
Modified the Linux source converter to ignore quoted string
literals while converting identifiers from mixed to lower
case. This will correct problems with the disassembler
and other areas where such strings must not be modified.
The ACPI_FUNCTION_* macros no longer require quotes around
the function name. This allows the Linux source converter
to convert the names, now that the converter ignores
quoted strings.
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 21:15:00 +00:00
|
|
|
ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-12-30 01:45:17 +00:00
|
|
|
status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL);
|
2005-08-05 04:44:28 +00:00
|
|
|
return_ACPI_STATUS(status);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2012-02-14 10:14:27 +00:00
|
|
|
|
2020-02-11 16:52:32 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_hw_check_all_gpes
|
|
|
|
*
|
2020-03-25 10:54:29 +00:00
|
|
|
* PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip
|
|
|
|
* gpe_skip_number - Number of the GPE to skip
|
2020-02-11 16:52:32 +00:00
|
|
|
*
|
|
|
|
* RETURN: Combined status of all GPEs
|
|
|
|
*
|
2020-03-25 10:54:29 +00:00
|
|
|
* DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one
|
|
|
|
* represented by the "skip" arguments, and return TRUE if the
|
2020-02-11 16:52:32 +00:00
|
|
|
* status bit is set for at least one of them of FALSE otherwise.
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2020-03-25 10:54:29 +00:00
|
|
|
u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number)
|
2020-02-11 16:52:32 +00:00
|
|
|
{
|
2020-03-25 10:54:29 +00:00
|
|
|
struct acpi_gpe_block_status_context context = {
|
|
|
|
.gpe_skip_register_info = NULL,
|
|
|
|
.retval = 0,
|
|
|
|
};
|
|
|
|
struct acpi_gpe_event_info *gpe_event_info;
|
|
|
|
acpi_cpu_flags flags;
|
2020-02-11 16:52:32 +00:00
|
|
|
|
|
|
|
ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
|
|
|
|
|
2020-03-25 10:54:29 +00:00
|
|
|
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
|
|
|
|
|
|
|
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device,
|
|
|
|
gpe_skip_number);
|
|
|
|
if (gpe_event_info) {
|
|
|
|
context.gpe_skip_register_info = gpe_event_info->register_info;
|
|
|
|
context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
2020-02-11 16:52:32 +00:00
|
|
|
|
2020-03-25 10:54:29 +00:00
|
|
|
(void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context);
|
|
|
|
return (context.retval != 0);
|
2020-02-11 16:52:32 +00:00
|
|
|
}
|
|
|
|
|
2012-02-14 10:14:27 +00:00
|
|
|
#endif /* !ACPI_REDUCED_HARDWARE */
|