mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 04:42:12 +00:00
Merge branch 'akpm' (updates from Andrew Morton)
Merge first patch-bomb from Andrew Morton: - various misc bits - I'm been patchmonkeying ocfs2 for a while, as Joel and Mark have been distracted. There has been quite a bit of activity. - About half the MM queue - Some backlight bits - Various lib/ updates - checkpatch updates - zillions more little rtc patches - ptrace - signals - exec - procfs - rapidio - nbd - aoe - pps - memstick - tools/testing/selftests updates * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (445 commits) tools/testing/selftests: don't assume the x bit is set on scripts selftests: add .gitignore for kcmp selftests: fix clean target in kcmp Makefile selftests: add .gitignore for vm selftests: add hugetlbfstest self-test: fix make clean selftests: exit 1 on failure kernel/resource.c: remove the unneeded assignment in function __find_resource aio: fix wrong comment in aio_complete() drivers/w1/slaves/w1_ds2408.c: add magic sequence to disable P0 test mode drivers/memstick/host/r592.c: convert to module_pci_driver drivers/memstick/host/jmb38x_ms: convert to module_pci_driver pps-gpio: add device-tree binding and support drivers/pps/clients/pps-gpio.c: convert to module_platform_driver drivers/pps/clients/pps-gpio.c: convert to devm_* helpers drivers/parport/share.c: use kzalloc Documentation/accounting/getdelays.c: avoid strncpy in accounting tool aoe: update internal version number to v83 aoe: update copyright date aoe: perform I/O completions in parallel ...
This commit is contained in:
commit
7f0ef0267e
@ -389,7 +389,8 @@ Albeit deprecated by some people, the equivalent of the goto statement is
|
||||
used frequently by compilers in form of the unconditional jump instruction.
|
||||
|
||||
The goto statement comes in handy when a function exits from multiple
|
||||
locations and some common work such as cleanup has to be done.
|
||||
locations and some common work such as cleanup has to be done. If there is no
|
||||
cleanup needed then just return directly.
|
||||
|
||||
The rationale is:
|
||||
|
||||
|
@ -1955,12 +1955,17 @@ machines due to caching.
|
||||
</sect1>
|
||||
</chapter>
|
||||
|
||||
<chapter id="apiref">
|
||||
<chapter id="apiref-mutex">
|
||||
<title>Mutex API reference</title>
|
||||
!Iinclude/linux/mutex.h
|
||||
!Ekernel/mutex.c
|
||||
</chapter>
|
||||
|
||||
<chapter id="apiref-futex">
|
||||
<title>Futex API reference</title>
|
||||
!Ikernel/futex.c
|
||||
</chapter>
|
||||
|
||||
<chapter id="references">
|
||||
<title>Further reading</title>
|
||||
|
||||
|
@ -272,7 +272,7 @@ int main(int argc, char *argv[])
|
||||
char *logfile = NULL;
|
||||
int loop = 0;
|
||||
int containerset = 0;
|
||||
char containerpath[1024];
|
||||
char *containerpath = NULL;
|
||||
int cfd = 0;
|
||||
int forking = 0;
|
||||
sigset_t sigset;
|
||||
@ -299,7 +299,7 @@ int main(int argc, char *argv[])
|
||||
break;
|
||||
case 'C':
|
||||
containerset = 1;
|
||||
strncpy(containerpath, optarg, strlen(optarg) + 1);
|
||||
containerpath = optarg;
|
||||
break;
|
||||
case 'w':
|
||||
logfile = strdup(optarg);
|
||||
|
@ -834,10 +834,9 @@ Test:
|
||||
|
||||
12. TODO
|
||||
|
||||
1. Add support for accounting huge pages (as a separate controller)
|
||||
2. Make per-cgroup scanner reclaim not-shared pages first
|
||||
3. Teach controller to account for shared-pages
|
||||
4. Start reclamation in the background when the limit is
|
||||
1. Make per-cgroup scanner reclaim not-shared pages first
|
||||
2. Teach controller to account for shared-pages
|
||||
3. Start reclamation in the background when the limit is
|
||||
not yet hit but the usage is getting closer
|
||||
|
||||
Summary
|
||||
|
@ -222,5 +222,4 @@ drivers/dma/: location for offload engine drivers
|
||||
include/linux/async_tx.h: core header file for the async_tx api
|
||||
crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code
|
||||
crypto/async_tx/async_memcpy.c: copy offload
|
||||
crypto/async_tx/async_memset.c: memory fill offload
|
||||
crypto/async_tx/async_xor.c: xor and xor zero sum offload
|
||||
|
@ -100,8 +100,7 @@ Your cooperation is appreciated.
|
||||
10 = /dev/aio Asynchronous I/O notification interface
|
||||
11 = /dev/kmsg Writes to this come out as printk's, reads
|
||||
export the buffered printk records.
|
||||
12 = /dev/oldmem Used by crashdump kernels to access
|
||||
the memory of the kernel that crashed.
|
||||
12 = /dev/oldmem OBSOLETE - replaced by /proc/vmcore
|
||||
|
||||
1 block RAM disk
|
||||
0 = /dev/ram0 First RAM disk
|
||||
|
20
Documentation/devicetree/bindings/pps/pps-gpio.txt
Normal file
20
Documentation/devicetree/bindings/pps/pps-gpio.txt
Normal file
@ -0,0 +1,20 @@
|
||||
Device-Tree Bindings for a PPS Signal on GPIO
|
||||
|
||||
These properties describe a PPS (pulse-per-second) signal connected to
|
||||
a GPIO pin.
|
||||
|
||||
Required properties:
|
||||
- compatible: should be "pps-gpio"
|
||||
- gpios: one PPS GPIO in the format described by ../gpio/gpio.txt
|
||||
|
||||
Optional properties:
|
||||
- assert-falling-edge: when present, assert is indicated by a falling edge
|
||||
(instead of by a rising edge)
|
||||
|
||||
Example:
|
||||
pps {
|
||||
compatible = "pps-gpio";
|
||||
gpios = <&gpio2 6 0>;
|
||||
|
||||
assert-falling-edge;
|
||||
};
|
@ -473,7 +473,8 @@ This file is only present if the CONFIG_MMU kernel configuration option is
|
||||
enabled.
|
||||
|
||||
The /proc/PID/clear_refs is used to reset the PG_Referenced and ACCESSED/YOUNG
|
||||
bits on both physical and virtual pages associated with a process.
|
||||
bits on both physical and virtual pages associated with a process, and the
|
||||
soft-dirty bit on pte (see Documentation/vm/soft-dirty.txt for details).
|
||||
To clear the bits for all the pages associated with the process
|
||||
> echo 1 > /proc/PID/clear_refs
|
||||
|
||||
@ -482,6 +483,10 @@ To clear the bits for the anonymous pages associated with the process
|
||||
|
||||
To clear the bits for the file mapped pages associated with the process
|
||||
> echo 3 > /proc/PID/clear_refs
|
||||
|
||||
To clear the soft-dirty bit
|
||||
> echo 4 > /proc/PID/clear_refs
|
||||
|
||||
Any other value written to /proc/PID/clear_refs will have no effect.
|
||||
|
||||
The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags
|
||||
|
@ -559,7 +559,6 @@ your filesystem. The following members are defined:
|
||||
struct address_space_operations {
|
||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
||||
int (*readpage)(struct file *, struct page *);
|
||||
int (*sync_page)(struct page *);
|
||||
int (*writepages)(struct address_space *, struct writeback_control *);
|
||||
int (*set_page_dirty)(struct page *page);
|
||||
int (*readpages)(struct file *filp, struct address_space *mapping,
|
||||
@ -581,6 +580,9 @@ struct address_space_operations {
|
||||
/* migrate the contents of a page to the specified target */
|
||||
int (*migratepage) (struct page *, struct page *);
|
||||
int (*launder_page) (struct page *);
|
||||
int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
|
||||
unsigned long);
|
||||
void (*is_dirty_writeback) (struct page *, bool *, bool *);
|
||||
int (*error_remove_page) (struct mapping *mapping, struct page *page);
|
||||
int (*swap_activate)(struct file *);
|
||||
int (*swap_deactivate)(struct file *);
|
||||
@ -612,13 +614,6 @@ struct address_space_operations {
|
||||
In this case, the page will be relocated, relocked and if
|
||||
that all succeeds, ->readpage will be called again.
|
||||
|
||||
sync_page: called by the VM to notify the backing store to perform all
|
||||
queued I/O operations for a page. I/O operations for other pages
|
||||
associated with this address_space object may also be performed.
|
||||
|
||||
This function is optional and is called only for pages with
|
||||
PG_Writeback set while waiting for the writeback to complete.
|
||||
|
||||
writepages: called by the VM to write out pages associated with the
|
||||
address_space object. If wbc->sync_mode is WBC_SYNC_ALL, then
|
||||
the writeback_control will specify a range of pages that must be
|
||||
@ -747,6 +742,20 @@ struct address_space_operations {
|
||||
prevent redirtying the page, it is kept locked during the whole
|
||||
operation.
|
||||
|
||||
is_partially_uptodate: Called by the VM when reading a file through the
|
||||
pagecache when the underlying blocksize != pagesize. If the required
|
||||
block is up to date then the read can complete without needing the IO
|
||||
to bring the whole page up to date.
|
||||
|
||||
is_dirty_writeback: Called by the VM when attempting to reclaim a page.
|
||||
The VM uses dirty and writeback information to determine if it needs
|
||||
to stall to allow flushers a chance to complete some IO. Ordinarily
|
||||
it can use PageDirty and PageWriteback but some filesystems have
|
||||
more complex state (unstable pages in NFS prevent reclaim) or
|
||||
do not set those flags due to locking problems (jbd). This callback
|
||||
allows a filesystem to indicate to the VM if a page should be
|
||||
treated as dirty or writeback for the purposes of stalling.
|
||||
|
||||
error_remove_page: normally set to generic_error_remove_page if truncation
|
||||
is ok for this address space. Used for memory failure handling.
|
||||
Setting this implies you deal with pages going away under you,
|
||||
|
@ -47,19 +47,12 @@ parameter. Optionally the size of the ELF header can also be passed
|
||||
when using the elfcorehdr=[size[KMG]@]offset[KMG] syntax.
|
||||
|
||||
|
||||
With the dump-capture kernel, you can access the memory image, or "old
|
||||
memory," in two ways:
|
||||
|
||||
- Through a /dev/oldmem device interface. A capture utility can read the
|
||||
device file and write out the memory in raw format. This is a raw dump
|
||||
of memory. Analysis and capture tools must be intelligent enough to
|
||||
determine where to look for the right information.
|
||||
|
||||
- Through /proc/vmcore. This exports the dump as an ELF-format file that
|
||||
you can write out using file copy commands such as cp or scp. Further,
|
||||
you can use analysis tools such as the GNU Debugger (GDB) and the Crash
|
||||
tool to debug the dump file. This method ensures that the dump pages are
|
||||
correctly ordered.
|
||||
With the dump-capture kernel, you can access the memory image through
|
||||
/proc/vmcore. This exports the dump as an ELF-format file that you can
|
||||
write out using file copy commands such as cp or scp. Further, you can
|
||||
use analysis tools such as the GNU Debugger (GDB) and the Crash tool to
|
||||
debug the dump file. This method ensures that the dump pages are correctly
|
||||
ordered.
|
||||
|
||||
|
||||
Setup and Installation
|
||||
@ -423,18 +416,6 @@ the following command:
|
||||
|
||||
cp /proc/vmcore <dump-file>
|
||||
|
||||
You can also access dumped memory as a /dev/oldmem device for a linear
|
||||
and raw view. To create the device, use the following command:
|
||||
|
||||
mknod /dev/oldmem c 1 12
|
||||
|
||||
Use the dd command with suitable options for count, bs, and skip to
|
||||
access specific portions of the dump.
|
||||
|
||||
To see the entire memory, use the following command:
|
||||
|
||||
dd if=/dev/oldmem of=oldmem.001
|
||||
|
||||
|
||||
Analysis
|
||||
========
|
||||
|
@ -73,28 +73,44 @@ data structure. This structure includes lists of all devices and local master
|
||||
ports that form the same network. It also contains a pointer to the default
|
||||
master port that is used to communicate with devices within the network.
|
||||
|
||||
2.5 Device Drivers
|
||||
|
||||
RapidIO device-specific drivers follow Linux Kernel Driver Model and are
|
||||
intended to support specific RapidIO devices attached to the RapidIO network.
|
||||
|
||||
2.6 Subsystem Interfaces
|
||||
|
||||
RapidIO interconnect specification defines features that may be used to provide
|
||||
one or more common service layers for all participating RapidIO devices. These
|
||||
common services may act separately from device-specific drivers or be used by
|
||||
device-specific drivers. Example of such service provider is the RIONET driver
|
||||
which implements Ethernet-over-RapidIO interface. Because only one driver can be
|
||||
registered for a device, all common RapidIO services have to be registered as
|
||||
subsystem interfaces. This allows to have multiple common services attached to
|
||||
the same device without blocking attachment of a device-specific driver.
|
||||
|
||||
3. Subsystem Initialization
|
||||
---------------------------
|
||||
|
||||
In order to initialize the RapidIO subsystem, a platform must initialize and
|
||||
register at least one master port within the RapidIO network. To register mport
|
||||
within the subsystem controller driver initialization code calls function
|
||||
within the subsystem controller driver's initialization code calls function
|
||||
rio_register_mport() for each available master port.
|
||||
|
||||
RapidIO subsystem uses subsys_initcall() or device_initcall() to perform
|
||||
controller initialization (depending on controller device type).
|
||||
|
||||
After all active master ports are registered with a RapidIO subsystem,
|
||||
an enumeration and/or discovery routine may be called automatically or
|
||||
by user-space command.
|
||||
|
||||
RapidIO subsystem can be configured to be built as a statically linked or
|
||||
modular component of the kernel (see details below).
|
||||
|
||||
4. Enumeration and Discovery
|
||||
----------------------------
|
||||
|
||||
4.1 Overview
|
||||
------------
|
||||
|
||||
RapidIO subsystem configuration options allow users to specify enumeration and
|
||||
RapidIO subsystem configuration options allow users to build enumeration and
|
||||
discovery methods as statically linked components or loadable modules.
|
||||
An enumeration/discovery method implementation and available input parameters
|
||||
define how any given method can be attached to available RapidIO mports:
|
||||
@ -115,8 +131,8 @@ several methods to initiate an enumeration and/or discovery process:
|
||||
endpoint waits for enumeration to be completed. If the specified timeout
|
||||
expires the discovery process is terminated without obtaining RapidIO network
|
||||
information. NOTE: a timed out discovery process may be restarted later using
|
||||
a user-space command as it is described later if the given endpoint was
|
||||
enumerated successfully.
|
||||
a user-space command as it is described below (if the given endpoint was
|
||||
enumerated successfully).
|
||||
|
||||
(b) Statically linked enumeration and discovery process can be started by
|
||||
a command from user space. This initiation method provides more flexibility
|
||||
@ -138,15 +154,42 @@ When a network scan process is started it calls an enumeration or discovery
|
||||
routine depending on the configured role of a master port: host or agent.
|
||||
|
||||
Enumeration is performed by a master port if it is configured as a host port by
|
||||
assigning a host device ID greater than or equal to zero. A host device ID is
|
||||
assigned to a master port through the kernel command line parameter "riohdid=",
|
||||
or can be configured in a platform-specific manner. If the host device ID for
|
||||
a specific master port is set to -1, the discovery process will be performed
|
||||
for it.
|
||||
assigning a host destination ID greater than or equal to zero. The host
|
||||
destination ID can be assigned to a master port using various methods depending
|
||||
on RapidIO subsystem build configuration:
|
||||
|
||||
(a) For a statically linked RapidIO subsystem core use command line parameter
|
||||
"rapidio.hdid=" with a list of destination ID assignments in order of mport
|
||||
device registration. For example, in a system with two RapidIO controllers
|
||||
the command line parameter "rapidio.hdid=-1,7" will result in assignment of
|
||||
the host destination ID=7 to the second RapidIO controller, while the first
|
||||
one will be assigned destination ID=-1.
|
||||
|
||||
(b) If the RapidIO subsystem core is built as a loadable module, in addition
|
||||
to the method shown above, the host destination ID(s) can be specified using
|
||||
traditional methods of passing module parameter "hdid=" during its loading:
|
||||
- from command line: "modprobe rapidio hdid=-1,7", or
|
||||
- from modprobe configuration file using configuration command "options",
|
||||
like in this example: "options rapidio hdid=-1,7". An example of modprobe
|
||||
configuration file is provided in the section below.
|
||||
|
||||
NOTES:
|
||||
(i) if "hdid=" parameter is omitted all available mport will be assigned
|
||||
destination ID = -1;
|
||||
(ii) the "hdid=" parameter in systems with multiple mports can have
|
||||
destination ID assignments omitted from the end of list (default = -1).
|
||||
|
||||
If the host device ID for a specific master port is set to -1, the discovery
|
||||
process will be performed for it.
|
||||
|
||||
The enumeration and discovery routines use RapidIO maintenance transactions
|
||||
to access the configuration space of devices.
|
||||
|
||||
NOTE: If RapidIO switch-specific device drivers are built as loadable modules
|
||||
they must be loaded before enumeration/discovery process starts.
|
||||
This requirement is cased by the fact that enumeration/discovery methods invoke
|
||||
vendor-specific callbacks on early stages.
|
||||
|
||||
4.2 Automatic Start of Enumeration and Discovery
|
||||
------------------------------------------------
|
||||
|
||||
@ -266,7 +309,36 @@ method's module initialization routine calls rio_register_scan() to attach
|
||||
an enumerator to a specified mport device (or devices). The basic enumerator
|
||||
implementation demonstrates this process.
|
||||
|
||||
5. References
|
||||
4.6 Using Loadable RapidIO Switch Drivers
|
||||
-----------------------------------------
|
||||
|
||||
In the case when RapidIO switch drivers are built as loadable modules a user
|
||||
must ensure that they are loaded before the enumeration/discovery starts.
|
||||
This process can be automated by specifying pre- or post- dependencies in the
|
||||
RapidIO-specific modprobe configuration file as shown in the example below.
|
||||
|
||||
File /etc/modprobe.d/rapidio.conf:
|
||||
----------------------------------
|
||||
|
||||
# Configure RapidIO subsystem modules
|
||||
|
||||
# Set enumerator host destination ID (overrides kernel command line option)
|
||||
options rapidio hdid=-1,2
|
||||
|
||||
# Load RapidIO switch drivers immediately after rapidio core module was loaded
|
||||
softdep rapidio post: idt_gen2 idtcps tsi57x
|
||||
|
||||
# OR :
|
||||
|
||||
# Load RapidIO switch drivers just before rio-scan enumerator module is loaded
|
||||
softdep rio-scan pre: idt_gen2 idtcps tsi57x
|
||||
|
||||
--------------------------
|
||||
|
||||
NOTE: In the example above, one of "softdep" commands must be removed or
|
||||
commented out to keep required module loading sequence.
|
||||
|
||||
A. References
|
||||
-------------
|
||||
|
||||
[1] RapidIO Trade Association. RapidIO Interconnect Specifications.
|
||||
|
@ -40,6 +40,7 @@ device_rev - returns the device revision level
|
||||
(see 4.1 for switch specific details)
|
||||
lprev - returns name of previous device (switch) on the path to the device
|
||||
that that owns this attribute
|
||||
modalias - returns the device modalias
|
||||
|
||||
In addition to the files listed above, each device has a binary attribute file
|
||||
that allows read/write access to the device configuration registers using
|
||||
|
@ -153,9 +153,10 @@ since_epoch: The number of seconds since the epoch according to the RTC
|
||||
time: RTC-provided time
|
||||
wakealarm: The time at which the clock will generate a system wakeup
|
||||
event. This is a one shot wakeup event, so must be reset
|
||||
after wake if a daily wakeup is required. Format is either
|
||||
seconds since the epoch or, if there's a leading +, seconds
|
||||
in the future.
|
||||
after wake if a daily wakeup is required. Format is seconds since
|
||||
the epoch by default, or if there's a leading +, seconds in the
|
||||
future, or if there is a leading +=, seconds ahead of the current
|
||||
alarm.
|
||||
|
||||
IOCTL INTERFACE
|
||||
---------------
|
||||
|
@ -15,7 +15,8 @@ There are three components to pagemap:
|
||||
* Bits 0-54 page frame number (PFN) if present
|
||||
* Bits 0-4 swap type if swapped
|
||||
* Bits 5-54 swap offset if swapped
|
||||
* Bits 55-60 page shift (page size = 1<<page shift)
|
||||
* Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
|
||||
* Bits 56-60 zero
|
||||
* Bit 61 page is file-page or shared-anon
|
||||
* Bit 62 page swapped
|
||||
* Bit 63 page present
|
||||
|
36
Documentation/vm/soft-dirty.txt
Normal file
36
Documentation/vm/soft-dirty.txt
Normal file
@ -0,0 +1,36 @@
|
||||
SOFT-DIRTY PTEs
|
||||
|
||||
The soft-dirty is a bit on a PTE which helps to track which pages a task
|
||||
writes to. In order to do this tracking one should
|
||||
|
||||
1. Clear soft-dirty bits from the task's PTEs.
|
||||
|
||||
This is done by writing "4" into the /proc/PID/clear_refs file of the
|
||||
task in question.
|
||||
|
||||
2. Wait some time.
|
||||
|
||||
3. Read soft-dirty bits from the PTEs.
|
||||
|
||||
This is done by reading from the /proc/PID/pagemap. The bit 55 of the
|
||||
64-bit qword is the soft-dirty one. If set, the respective PTE was
|
||||
written to since step 1.
|
||||
|
||||
|
||||
Internally, to do this tracking, the writable bit is cleared from PTEs
|
||||
when the soft-dirty bit is cleared. So, after this, when the task tries to
|
||||
modify a page at some virtual address the #PF occurs and the kernel sets
|
||||
the soft-dirty bit on the respective PTE.
|
||||
|
||||
Note, that although all the task's address space is marked as r/o after the
|
||||
soft-dirty bits clear, the #PF-s that occur after that are processed fast.
|
||||
This is so, since the pages are still mapped to physical memory, and thus all
|
||||
the kernel does is finds this fact out and puts both writable and soft-dirty
|
||||
bits on the PTE.
|
||||
|
||||
|
||||
This feature is actively used by the checkpoint-restore project. You
|
||||
can find more details about it on http://criu.org
|
||||
|
||||
|
||||
-- Pavel Emelyanov, Apr 9, 2013
|
10
MAINTAINERS
10
MAINTAINERS
@ -1617,6 +1617,7 @@ F: drivers/net/wireless/b43legacy/
|
||||
|
||||
BACKLIGHT CLASS/SUBSYSTEM
|
||||
M: Richard Purdie <rpurdie@rpsys.net>
|
||||
M: Jingoo Han <jg1.han@samsung.com>
|
||||
S: Maintained
|
||||
F: drivers/video/backlight/
|
||||
F: include/linux/backlight.h
|
||||
@ -5974,8 +5975,10 @@ M: Willem Riede <osst@riede.org>
|
||||
L: osst-users@lists.sourceforge.net
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/scsi/osst*
|
||||
F: drivers/scsi/st*
|
||||
F: Documentation/scsi/osst.txt
|
||||
F: drivers/scsi/osst.*
|
||||
F: drivers/scsi/osst_*.h
|
||||
F: drivers/scsi/st.h
|
||||
|
||||
OPENCORES I2C BUS DRIVER
|
||||
M: Peter Korsgaard <jacmet@sunsite.dk>
|
||||
@ -7133,7 +7136,8 @@ M: Kai Mäkisara <Kai.Makisara@kolumbus.fi>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/scsi/st.txt
|
||||
F: drivers/scsi/st*
|
||||
F: drivers/scsi/st.*
|
||||
F: drivers/scsi/st_*.h
|
||||
|
||||
SCTP PROTOCOL
|
||||
M: Vlad Yasevich <vyasevich@gmail.com>
|
||||
|
@ -365,6 +365,9 @@ config HAVE_IRQ_TIME_ACCOUNTING
|
||||
config HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
bool
|
||||
|
||||
config HAVE_ARCH_SOFT_DIRTY
|
||||
bool
|
||||
|
||||
config HAVE_MOD_ARCH_SPECIFIC
|
||||
bool
|
||||
help
|
||||
|
@ -71,8 +71,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
|
||||
|
||||
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)
|
||||
|
||||
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
|
||||
#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32))
|
||||
#define pte_pfn(pte) (pte_val(pte) >> 32)
|
||||
|
@ -238,8 +238,8 @@ nautilus_init_pci(void)
|
||||
if (pci_mem < memtop)
|
||||
memtop = pci_mem;
|
||||
if (memtop > alpha_mv.min_mem_address) {
|
||||
free_reserved_area((unsigned long)__va(alpha_mv.min_mem_address),
|
||||
(unsigned long)__va(memtop), 0, NULL);
|
||||
free_reserved_area(__va(alpha_mv.min_mem_address),
|
||||
__va(memtop), -1, NULL);
|
||||
printk("nautilus_init_pci: %ldk freed\n",
|
||||
(memtop - alpha_mv.min_mem_address) >> 10);
|
||||
}
|
||||
|
@ -276,56 +276,25 @@ srm_paging_stop (void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
static void __init
|
||||
printk_memory_info(void)
|
||||
{
|
||||
unsigned long codesize, reservedpages, datasize, initsize, tmp;
|
||||
extern int page_is_ram(unsigned long) __init;
|
||||
|
||||
/* printk all informations */
|
||||
reservedpages = 0;
|
||||
for (tmp = 0; tmp < max_low_pfn; tmp++)
|
||||
/*
|
||||
* Only count reserved RAM pages
|
||||
*/
|
||||
if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
|
||||
reservedpages++;
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_data;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
|
||||
printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
max_mapnr << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10);
|
||||
}
|
||||
|
||||
void __init
|
||||
mem_init(void)
|
||||
{
|
||||
max_mapnr = num_physpages = max_low_pfn;
|
||||
totalram_pages += free_all_bootmem();
|
||||
set_max_mapnr(max_low_pfn);
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
printk_memory_info();
|
||||
free_all_bootmem();
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
void
|
||||
free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void
|
||||
free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
@ -129,8 +129,6 @@ setup_memory_node(int nid, void *kernel_end)
|
||||
if (node_max_pfn > max_low_pfn)
|
||||
max_pfn = max_low_pfn = node_max_pfn;
|
||||
|
||||
num_physpages += node_max_pfn - node_min_pfn;
|
||||
|
||||
#if 0 /* we'll try this one again in a little while */
|
||||
/* Cute trick to make sure our local node data is on local memory */
|
||||
node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
|
||||
@ -321,41 +319,3 @@ void __init paging_init(void)
|
||||
/* Initialize the kernel's ZERO_PGE. */
|
||||
memset((void *)ZERO_PGE, 0, PAGE_SIZE);
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long codesize, reservedpages, datasize, initsize, pfn;
|
||||
extern int page_is_ram(unsigned long) __init;
|
||||
unsigned long nid, i;
|
||||
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
|
||||
|
||||
reservedpages = 0;
|
||||
for_each_online_node(nid) {
|
||||
/*
|
||||
* This will free up the bootmem, ie, slot 0 memory
|
||||
*/
|
||||
totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
|
||||
|
||||
pfn = NODE_DATA(nid)->node_start_pfn;
|
||||
for (i = 0; i < node_spanned_pages(nid); i++, pfn++)
|
||||
if (page_is_ram(pfn) &&
|
||||
PageReserved(nid_page_nr(nid, i)))
|
||||
reservedpages++;
|
||||
}
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_data;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
|
||||
printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, "
|
||||
"%luk data, %luk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10);
|
||||
#if 0
|
||||
mem_stress();
|
||||
#endif
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ void __init setup_arch_memory(void)
|
||||
/* Last usable page of low mem (no HIGHMEM yet for ARC port) */
|
||||
max_low_pfn = max_pfn = PFN_DOWN(end_mem);
|
||||
|
||||
max_mapnr = num_physpages = max_low_pfn - min_low_pfn;
|
||||
max_mapnr = max_low_pfn - min_low_pfn;
|
||||
|
||||
/*------------- reserve kernel image -----------------------*/
|
||||
memblock_reserve(CONFIG_LINUX_LINK_BASE,
|
||||
@ -84,7 +84,7 @@ void __init setup_arch_memory(void)
|
||||
|
||||
/*-------------- node setup --------------------------------*/
|
||||
memset(zones_size, 0, sizeof(zones_size));
|
||||
zones_size[ZONE_NORMAL] = num_physpages;
|
||||
zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
|
||||
|
||||
/*
|
||||
* We can't use the helper free_area_init(zones[]) because it uses
|
||||
@ -106,39 +106,9 @@ void __init setup_arch_memory(void)
|
||||
*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codesize, datasize, initsize, reserved_pages, free_pages;
|
||||
int tmp;
|
||||
|
||||
high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz);
|
||||
|
||||
totalram_pages = free_all_bootmem();
|
||||
|
||||
/* count all reserved pages [kernel code/data/mem_map..] */
|
||||
reserved_pages = 0;
|
||||
for (tmp = 0; tmp < max_mapnr; tmp++)
|
||||
if (PageReserved(mem_map + tmp))
|
||||
reserved_pages++;
|
||||
|
||||
/* XXX: nr_free_pages() is equivalent */
|
||||
free_pages = max_mapnr - reserved_pages;
|
||||
|
||||
/*
|
||||
* For the purpose of display below, split the "reserve mem"
|
||||
* kernel code/data is already shown explicitly,
|
||||
* Show any other reservations (mem_map[ ] et al)
|
||||
*/
|
||||
reserved_pages -= (((unsigned int)_end - CONFIG_LINUX_LINK_BASE) >>
|
||||
PAGE_SHIFT);
|
||||
|
||||
codesize = _etext - _text;
|
||||
datasize = _end - _etext;
|
||||
initsize = __init_end - __init_begin;
|
||||
|
||||
pr_info("Memory Available: %dM / %ldM (%dK code, %dK data, %dK init, %dK reserv)\n",
|
||||
PAGES_TO_MB(free_pages),
|
||||
TO_MB(arc_mem_sz),
|
||||
TO_KB(codesize), TO_KB(datasize), TO_KB(initsize),
|
||||
PAGES_TO_KB(reserved_pages));
|
||||
free_all_bootmem();
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -146,13 +116,13 @@ void __init mem_init(void)
|
||||
*/
|
||||
void __init_refok free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -613,7 +613,7 @@
|
||||
};
|
||||
|
||||
rtc-iobg {
|
||||
compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus";
|
||||
compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
reg = <0x80030000 0x10000>;
|
||||
|
@ -610,7 +610,7 @@
|
||||
};
|
||||
|
||||
rtc-iobg {
|
||||
compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus";
|
||||
compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
reg = <0x80030000 0x10000>;
|
||||
|
@ -276,12 +276,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
|
||||
/*
|
||||
* Conversion between a struct page and a physical address.
|
||||
*
|
||||
* Note: when converting an unknown physical address to a
|
||||
* struct page, the resulting pointer must be validated
|
||||
* using VALID_PAGE(). It must return an invalid struct page
|
||||
* for any physical address not corresponding to a system
|
||||
* RAM address.
|
||||
*
|
||||
* page_to_pfn(page) convert a struct page * to a PFN number
|
||||
* pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
|
||||
*
|
||||
|
@ -469,7 +469,6 @@ void __init iop13xx_platform_init(void)
|
||||
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_XOR, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
|
||||
break;
|
||||
case IOP13XX_INIT_ADMA_1:
|
||||
@ -479,7 +478,6 @@ void __init iop13xx_platform_init(void)
|
||||
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_XOR, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
|
||||
break;
|
||||
case IOP13XX_INIT_ADMA_2:
|
||||
@ -489,7 +487,6 @@ void __init iop13xx_platform_init(void)
|
||||
dma_cap_set(DMA_MEMCPY, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_XOR, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_MEMSET, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_PQ, plat_data->cap_mask);
|
||||
dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask);
|
||||
|
@ -583,9 +583,6 @@ static void __init free_highpages(void)
|
||||
*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long reserved_pages, free_pages;
|
||||
struct memblock_region *reg;
|
||||
int i;
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
/* These pointers are filled in on TCM detection */
|
||||
extern u32 dtcm_end;
|
||||
@ -596,57 +593,16 @@ void __init mem_init(void)
|
||||
|
||||
/* this will put all unused low memory onto the freelists */
|
||||
free_unused_memmap(&meminfo);
|
||||
|
||||
totalram_pages += free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
|
||||
#ifdef CONFIG_SA1111
|
||||
/* now that our DMA memory is actually so designated, we can free it */
|
||||
free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
|
||||
free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, -1, NULL);
|
||||
#endif
|
||||
|
||||
free_highpages();
|
||||
|
||||
reserved_pages = free_pages = 0;
|
||||
|
||||
for_each_bank(i, &meminfo) {
|
||||
struct membank *bank = &meminfo.bank[i];
|
||||
unsigned int pfn1, pfn2;
|
||||
struct page *page, *end;
|
||||
|
||||
pfn1 = bank_pfn_start(bank);
|
||||
pfn2 = bank_pfn_end(bank);
|
||||
|
||||
page = pfn_to_page(pfn1);
|
||||
end = pfn_to_page(pfn2 - 1) + 1;
|
||||
|
||||
do {
|
||||
if (PageReserved(page))
|
||||
reserved_pages++;
|
||||
else if (!page_count(page))
|
||||
free_pages++;
|
||||
page++;
|
||||
} while (page < end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Since our memory may not be contiguous, calculate the
|
||||
* real number of pages we have in this system
|
||||
*/
|
||||
printk(KERN_INFO "Memory:");
|
||||
num_physpages = 0;
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned long pages = memblock_region_memory_end_pfn(reg) -
|
||||
memblock_region_memory_base_pfn(reg);
|
||||
num_physpages += pages;
|
||||
printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
|
||||
}
|
||||
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
|
||||
|
||||
printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
free_pages << (PAGE_SHIFT-10),
|
||||
reserved_pages << (PAGE_SHIFT-10),
|
||||
totalhigh_pages << (PAGE_SHIFT-10));
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
#define MLK(b, t) b, t, ((t) - (b)) >> 10
|
||||
#define MLM(b, t) b, t, ((t) - (b)) >> 20
|
||||
@ -712,7 +668,7 @@ void __init mem_init(void)
|
||||
BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
|
||||
#endif
|
||||
|
||||
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
|
||||
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
|
||||
extern int sysctl_overcommit_memory;
|
||||
/*
|
||||
* On a machine this small we won't get
|
||||
@ -729,12 +685,12 @@ void free_initmem(void)
|
||||
extern char __tcm_start, __tcm_end;
|
||||
|
||||
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
|
||||
free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
|
||||
free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
|
||||
#endif
|
||||
|
||||
poison_init_mem(__init_begin, __init_end - __init_begin);
|
||||
if (!machine_is_integrator() && !machine_is_cintegrator())
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
@ -745,7 +701,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!keep_initrd) {
|
||||
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,12 +192,10 @@ static int __init iop3xx_adma_cap_init(void)
|
||||
|
||||
#ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */
|
||||
dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
|
||||
dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
|
||||
dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
|
||||
#else
|
||||
dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask);
|
||||
dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask);
|
||||
dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask);
|
||||
dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask);
|
||||
#endif
|
||||
|
||||
|
@ -666,14 +666,9 @@ void __init orion_xor0_init(unsigned long mapbase_low,
|
||||
orion_xor0_shared_resources[3].start = irq_1;
|
||||
orion_xor0_shared_resources[3].end = irq_1;
|
||||
|
||||
/*
|
||||
* two engines can't do memset simultaneously, this limitation
|
||||
* satisfied by removing memset support from one of the engines.
|
||||
*/
|
||||
dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask);
|
||||
dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask);
|
||||
|
||||
dma_cap_set(DMA_MEMSET, orion_xor0_channels_data[1].cap_mask);
|
||||
dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask);
|
||||
dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask);
|
||||
|
||||
@ -732,14 +727,9 @@ void __init orion_xor1_init(unsigned long mapbase_low,
|
||||
orion_xor1_shared_resources[3].start = irq_1;
|
||||
orion_xor1_shared_resources[3].end = irq_1;
|
||||
|
||||
/*
|
||||
* two engines can't do memset simultaneously, this limitation
|
||||
* satisfied by removing memset support from one of the engines.
|
||||
*/
|
||||
dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask);
|
||||
dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask);
|
||||
|
||||
dma_cap_set(DMA_MEMSET, orion_xor1_channels_data[1].cap_mask);
|
||||
dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask);
|
||||
dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask);
|
||||
|
||||
|
@ -197,14 +197,6 @@ void __init bootmem_init(void)
|
||||
max_pfn = max_low_pfn = max;
|
||||
}
|
||||
|
||||
/*
|
||||
* Poison init memory with an undefined instruction (0x0).
|
||||
*/
|
||||
static inline void poison_init_mem(void *s, size_t count)
|
||||
{
|
||||
memset(s, 0, count);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
||||
static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
|
||||
{
|
||||
@ -280,59 +272,17 @@ static void __init free_unused_memmap(void)
|
||||
*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long reserved_pages, free_pages;
|
||||
struct memblock_region *reg;
|
||||
|
||||
arm64_swiotlb_init();
|
||||
|
||||
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
|
||||
|
||||
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
||||
/* this will put all unused low memory onto the freelists */
|
||||
free_unused_memmap();
|
||||
#endif
|
||||
/* this will put all unused low memory onto the freelists */
|
||||
free_all_bootmem();
|
||||
|
||||
totalram_pages += free_all_bootmem();
|
||||
|
||||
reserved_pages = free_pages = 0;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned int pfn1, pfn2;
|
||||
struct page *page, *end;
|
||||
|
||||
pfn1 = __phys_to_pfn(reg->base);
|
||||
pfn2 = pfn1 + __phys_to_pfn(reg->size);
|
||||
|
||||
page = pfn_to_page(pfn1);
|
||||
end = pfn_to_page(pfn2 - 1) + 1;
|
||||
|
||||
do {
|
||||
if (PageReserved(page))
|
||||
reserved_pages++;
|
||||
else if (!page_count(page))
|
||||
free_pages++;
|
||||
page++;
|
||||
} while (page < end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Since our memory may not be contiguous, calculate the real number
|
||||
* of pages we have in this system.
|
||||
*/
|
||||
pr_info("Memory:");
|
||||
num_physpages = 0;
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned long pages = memblock_region_memory_end_pfn(reg) -
|
||||
memblock_region_memory_base_pfn(reg);
|
||||
num_physpages += pages;
|
||||
printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
|
||||
}
|
||||
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
|
||||
|
||||
pr_notice("Memory: %luk/%luk available, %luk reserved\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
free_pages << (PAGE_SHIFT-10),
|
||||
reserved_pages << (PAGE_SHIFT-10));
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
#define MLK(b, t) b, t, ((t) - (b)) >> 10
|
||||
#define MLM(b, t) b, t, ((t) - (b)) >> 20
|
||||
@ -374,7 +324,7 @@ void __init mem_init(void)
|
||||
BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
|
||||
BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
|
||||
|
||||
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
|
||||
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
|
||||
extern int sysctl_overcommit_memory;
|
||||
/*
|
||||
* On a machine this small we won't get anywhere without
|
||||
@ -386,7 +336,6 @@ void __init mem_init(void)
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
poison_init_mem(__init_begin, __init_end - __init_begin);
|
||||
free_initmem_default(0);
|
||||
}
|
||||
|
||||
@ -396,10 +345,8 @@ static int keep_initrd;
|
||||
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!keep_initrd) {
|
||||
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
}
|
||||
if (!keep_initrd)
|
||||
free_reserved_area((void *)start, (void *)end, 0, "initrd");
|
||||
}
|
||||
|
||||
static int __init keepinitrd_setup(char *__unused)
|
||||
|
@ -555,7 +555,7 @@ void __init setup_arch (char **cmdline_p)
|
||||
{
|
||||
struct clk *cpu_clk;
|
||||
|
||||
init_mm.start_code = (unsigned long)_text;
|
||||
init_mm.start_code = (unsigned long)_stext;
|
||||
init_mm.end_code = (unsigned long)_etext;
|
||||
init_mm.end_data = (unsigned long)_edata;
|
||||
init_mm.brk = (unsigned long)_end;
|
||||
|
@ -23,7 +23,7 @@ SECTIONS
|
||||
{
|
||||
. = CONFIG_ENTRY_ADDRESS;
|
||||
.init : AT(ADDR(.init) - LOAD_OFFSET) {
|
||||
_stext = .;
|
||||
_text = .;
|
||||
__init_begin = .;
|
||||
_sinittext = .;
|
||||
*(.text.reset)
|
||||
@ -46,7 +46,7 @@ SECTIONS
|
||||
|
||||
.text : AT(ADDR(.text) - LOAD_OFFSET) {
|
||||
_evba = .;
|
||||
_text = .;
|
||||
_stext = .;
|
||||
*(.ex.text)
|
||||
*(.irq.text)
|
||||
KPROBES_TEXT
|
||||
|
@ -100,60 +100,26 @@ void __init paging_init(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codesize, reservedpages, datasize, initsize;
|
||||
int nid, i;
|
||||
pg_data_t *pgdat;
|
||||
|
||||
reservedpages = 0;
|
||||
high_memory = NULL;
|
||||
for_each_online_pgdat(pgdat)
|
||||
high_memory = max_t(void *, high_memory,
|
||||
__va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
for_each_online_node(nid) {
|
||||
pg_data_t *pgdat = NODE_DATA(nid);
|
||||
unsigned long node_pages = 0;
|
||||
void *node_high_memory;
|
||||
|
||||
num_physpages += pgdat->node_present_pages;
|
||||
|
||||
if (pgdat->node_spanned_pages != 0)
|
||||
node_pages = free_all_bootmem_node(pgdat);
|
||||
|
||||
totalram_pages += node_pages;
|
||||
|
||||
for (i = 0; i < node_pages; i++)
|
||||
if (PageReserved(pgdat->node_mem_map + i))
|
||||
reservedpages++;
|
||||
|
||||
node_high_memory = (void *)((pgdat->node_start_pfn
|
||||
+ pgdat->node_spanned_pages)
|
||||
<< PAGE_SHIFT);
|
||||
if (node_high_memory > high_memory)
|
||||
high_memory = node_high_memory;
|
||||
}
|
||||
|
||||
max_mapnr = MAP_NR(high_memory);
|
||||
|
||||
codesize = (unsigned long)_etext - (unsigned long)_text;
|
||||
datasize = (unsigned long)_edata - (unsigned long)_data;
|
||||
initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
|
||||
|
||||
printk ("Memory: %luk/%luk available (%dk kernel code, "
|
||||
"%dk reserved, %dk data, %dk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT - 10),
|
||||
totalram_pages << (PAGE_SHIFT - 10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT - 10),
|
||||
datasize >> 10,
|
||||
initsize >> 10);
|
||||
set_max_mapnr(MAP_NR(high_memory));
|
||||
free_all_bootmem();
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
@ -90,50 +90,24 @@ asmlinkage void __init init_pda(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned int codek = 0, datak = 0, initk = 0;
|
||||
unsigned int reservedpages = 0, freepages = 0;
|
||||
unsigned long tmp;
|
||||
unsigned long start_mem = memory_start;
|
||||
unsigned long end_mem = memory_end;
|
||||
char buf[64];
|
||||
|
||||
end_mem &= PAGE_MASK;
|
||||
high_memory = (void *)end_mem;
|
||||
|
||||
start_mem = PAGE_ALIGN(start_mem);
|
||||
max_mapnr = num_physpages = MAP_NR(high_memory);
|
||||
printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages);
|
||||
high_memory = (void *)(memory_end & PAGE_MASK);
|
||||
max_mapnr = MAP_NR(high_memory);
|
||||
printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", max_mapnr);
|
||||
|
||||
/* This will put all low memory onto the freelists. */
|
||||
totalram_pages = free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
|
||||
reservedpages = 0;
|
||||
for (tmp = ARCH_PFN_OFFSET; tmp < max_mapnr; tmp++)
|
||||
if (PageReserved(pfn_to_page(tmp)))
|
||||
reservedpages++;
|
||||
freepages = max_mapnr - ARCH_PFN_OFFSET - reservedpages;
|
||||
|
||||
/* do not count in kernel image between _rambase and _ramstart */
|
||||
reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT;
|
||||
#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
|
||||
reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT;
|
||||
#endif
|
||||
|
||||
codek = (_etext - _stext) >> 10;
|
||||
initk = (__init_end - __init_begin) >> 10;
|
||||
datak = ((_ramstart - _rambase) >> 10) - codek - initk;
|
||||
|
||||
printk(KERN_INFO
|
||||
"Memory available: %luk/%luk RAM, "
|
||||
"(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n",
|
||||
(unsigned long) freepages << (PAGE_SHIFT-10), (_ramend - CONFIG_PHY_RAM_BASE_ADDRESS) >> 10,
|
||||
initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10)));
|
||||
snprintf(buf, sizeof(buf) - 1, "%uK DMA", DMA_UNCACHED_REGION >> 10);
|
||||
mem_init_print_info(buf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
#ifndef CONFIG_MPU
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
@ -141,7 +115,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
void __init_refok free_initmem(void)
|
||||
{
|
||||
#if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
if (memory_start == (unsigned long)(&__init_end))
|
||||
memory_start = (unsigned long)(&__init_begin);
|
||||
#endif
|
||||
|
@ -54,16 +54,15 @@ SECTIONS
|
||||
}
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_begin = .;
|
||||
.init :
|
||||
{
|
||||
_stext = .;
|
||||
_sinittext = .;
|
||||
HEAD_TEXT
|
||||
INIT_TEXT
|
||||
_einittext = .;
|
||||
}
|
||||
|
||||
__init_begin = _stext;
|
||||
INIT_DATA_SECTION(16)
|
||||
|
||||
PERCPU_SECTION(128)
|
||||
@ -74,6 +73,7 @@ SECTIONS
|
||||
.text :
|
||||
{
|
||||
_text = .;
|
||||
_stext = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/initrd.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a special page that is used for zero-initialized
|
||||
@ -57,31 +58,22 @@ void __init paging_init(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codek, datak;
|
||||
unsigned long tmp;
|
||||
unsigned long len = memory_end - memory_start;
|
||||
|
||||
high_memory = (void *)(memory_end & PAGE_MASK);
|
||||
|
||||
/* this will put all memory onto the freelists */
|
||||
totalram_pages = free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
|
||||
codek = (_etext - _stext) >> 10;
|
||||
datak = (_end - _sdata) >> 10;
|
||||
|
||||
tmp = nr_free_pages() << PAGE_SHIFT;
|
||||
printk(KERN_INFO "Memory: %luk/%luk RAM (%dk kernel code, %dk data)\n",
|
||||
tmp >> 10, len >> 10, codek, datak);
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
@ -51,7 +51,6 @@ typedef struct page *pgtable_t;
|
||||
*/
|
||||
|
||||
#define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT))
|
||||
#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)
|
||||
#define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
/* convert a page (based on mem_map and forward) to a physical address
|
||||
|
@ -19,9 +19,6 @@ unsigned long empty_zero_page;
|
||||
void __init
|
||||
mem_init(void)
|
||||
{
|
||||
int codesize, reservedpages, datasize, initsize;
|
||||
unsigned long tmp;
|
||||
|
||||
BUG_ON(!mem_map);
|
||||
|
||||
/* max/min_low_pfn was set by setup.c
|
||||
@ -29,35 +26,9 @@ mem_init(void)
|
||||
*
|
||||
* high_memory was also set in setup.c
|
||||
*/
|
||||
|
||||
max_mapnr = num_physpages = max_low_pfn - min_low_pfn;
|
||||
|
||||
/* this will put all memory onto the freelists */
|
||||
totalram_pages = free_all_bootmem();
|
||||
|
||||
reservedpages = 0;
|
||||
for (tmp = 0; tmp < max_mapnr; tmp++) {
|
||||
/*
|
||||
* Only count reserved RAM pages
|
||||
*/
|
||||
if (PageReserved(mem_map + tmp))
|
||||
reservedpages++;
|
||||
}
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_stext;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
|
||||
printk(KERN_INFO
|
||||
"Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, "
|
||||
"%dk init)\n" ,
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
max_mapnr << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10
|
||||
);
|
||||
max_mapnr = max_low_pfn - min_low_pfn;
|
||||
free_all_bootmem();
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
||||
/* free the pages occupied by initialization code */
|
||||
@ -65,5 +36,5 @@ mem_init(void)
|
||||
void
|
||||
free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
@ -735,7 +735,7 @@ static void __init parse_cmdline_early(char *cmdline)
|
||||
/* "mem=XXX[kKmM]" sets SDRAM size to <mem>, overriding the value we worked
|
||||
* out from the SDRAM controller mask register
|
||||
*/
|
||||
if (!memcmp(cmdline, "mem=", 4)) {
|
||||
if (!strncmp(cmdline, "mem=", 4)) {
|
||||
unsigned long long mem_size;
|
||||
|
||||
mem_size = memparse(cmdline + 4, &cmdline);
|
||||
@ -876,6 +876,7 @@ late_initcall(setup_arch_serial);
|
||||
static void __init setup_linux_memory(void)
|
||||
{
|
||||
unsigned long bootmap_size, low_top_pfn, kstart, kend, high_mem;
|
||||
unsigned long physpages;
|
||||
|
||||
kstart = (unsigned long) &__kernel_image_start - PAGE_OFFSET;
|
||||
kend = (unsigned long) &__kernel_image_end - PAGE_OFFSET;
|
||||
@ -893,19 +894,19 @@ static void __init setup_linux_memory(void)
|
||||
);
|
||||
|
||||
/* pass the memory that the kernel can immediately use over to the bootmem allocator */
|
||||
max_mapnr = num_physpages = (memory_end - memory_start) >> PAGE_SHIFT;
|
||||
max_mapnr = physpages = (memory_end - memory_start) >> PAGE_SHIFT;
|
||||
low_top_pfn = (KERNEL_LOWMEM_END - KERNEL_LOWMEM_START) >> PAGE_SHIFT;
|
||||
high_mem = 0;
|
||||
|
||||
if (num_physpages > low_top_pfn) {
|
||||
if (physpages > low_top_pfn) {
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
high_mem = num_physpages - low_top_pfn;
|
||||
high_mem = physpages - low_top_pfn;
|
||||
#else
|
||||
max_mapnr = num_physpages = low_top_pfn;
|
||||
max_mapnr = physpages = low_top_pfn;
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
low_top_pfn = num_physpages;
|
||||
low_top_pfn = physpages;
|
||||
}
|
||||
|
||||
min_low_pfn = memory_start >> PAGE_SHIFT;
|
||||
@ -979,7 +980,7 @@ static void __init setup_uclinux_memory(void)
|
||||
free_bootmem(memory_start, memory_end - memory_start);
|
||||
|
||||
high_memory = (void *) (memory_end & PAGE_MASK);
|
||||
max_mapnr = num_physpages = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
|
||||
max_mapnr = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
|
||||
|
||||
min_low_pfn = memory_start >> PAGE_SHIFT;
|
||||
max_low_pfn = memory_end >> PAGE_SHIFT;
|
||||
|
@ -523,7 +523,7 @@ void die_if_kernel(const char *str, ...)
|
||||
return;
|
||||
|
||||
va_start(va, str);
|
||||
vsprintf(buffer, str, va);
|
||||
vsnprintf(buffer, sizeof(buffer), str, va);
|
||||
va_end(va);
|
||||
|
||||
console_verbose();
|
||||
|
@ -78,7 +78,7 @@ void __init paging_init(void)
|
||||
memset((void *) empty_zero_page, 0, PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (num_physpages - num_mappedpages) {
|
||||
if (get_num_physpages() - num_mappedpages) {
|
||||
pgd_t *pge;
|
||||
pud_t *pue;
|
||||
pmd_t *pme;
|
||||
@ -96,7 +96,7 @@ void __init paging_init(void)
|
||||
*/
|
||||
zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages;
|
||||
zones_size[ZONE_HIGHMEM] = get_num_physpages() - num_mappedpages;
|
||||
#endif
|
||||
|
||||
free_area_init(zones_size);
|
||||
@ -114,45 +114,24 @@ void __init paging_init(void)
|
||||
*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT;
|
||||
unsigned long tmp;
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long loop, pfn;
|
||||
int datapages = 0;
|
||||
#endif
|
||||
int codek = 0, datak = 0;
|
||||
unsigned long code_size = _etext - _stext;
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
totalram_pages = free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
#if defined(CONFIG_MMU) && defined(CONFIG_HIGHMEM)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
for (loop = 0 ; loop < npages ; loop++)
|
||||
if (PageReserved(&mem_map[loop]))
|
||||
datapages++;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--)
|
||||
free_highmem_page(&mem_map[pfn]);
|
||||
for (pfn = get_num_physpages() - 1;
|
||||
pfn >= num_mappedpages; pfn--)
|
||||
free_highmem_page(&mem_map[pfn]);
|
||||
}
|
||||
#endif
|
||||
|
||||
codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10;
|
||||
datak = datapages << (PAGE_SHIFT - 10);
|
||||
|
||||
#else
|
||||
codek = (_etext - _stext) >> 10;
|
||||
datak = 0; //(__bss_stop - _sdata) >> 10;
|
||||
#endif
|
||||
|
||||
tmp = nr_free_pages() << PAGE_SHIFT;
|
||||
printk("Memory available: %luKiB/%luKiB RAM, %luKiB/%luKiB ROM (%dKiB kernel code, %dKiB data)\n",
|
||||
tmp >> 10,
|
||||
npages << (PAGE_SHIFT - 10),
|
||||
(rom_length > 0) ? ((rom_length >> 10) - codek) : 0,
|
||||
rom_length >> 10,
|
||||
codek,
|
||||
datak
|
||||
);
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
if (rom_length > 0 && rom_length >= code_size)
|
||||
printk("Memory available: %luKiB/%luKiB ROM\n",
|
||||
(rom_length - code_size) >> 10, rom_length >> 10);
|
||||
} /* end mem_init() */
|
||||
|
||||
/*****************************************************************************/
|
||||
@ -162,7 +141,7 @@ void __init mem_init(void)
|
||||
void free_initmem(void)
|
||||
{
|
||||
#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL)
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
#endif
|
||||
} /* end free_initmem() */
|
||||
|
||||
@ -173,6 +152,6 @@ void free_initmem(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
} /* end free_initrd_mem() */
|
||||
#endif
|
||||
|
@ -79,7 +79,6 @@ static void error(char *m);
|
||||
|
||||
int puts(const char *);
|
||||
|
||||
extern int _text; /* Defined in vmlinux.lds.S */
|
||||
extern int _end;
|
||||
static unsigned long free_mem_ptr;
|
||||
static unsigned long free_mem_end_ptr;
|
||||
|
@ -132,10 +132,12 @@ SECTIONS
|
||||
{
|
||||
. = ALIGN(0x4) ;
|
||||
__sbss = . ;
|
||||
___bss_start = . ;
|
||||
*(.bss*)
|
||||
. = ALIGN(0x4) ;
|
||||
*(COMMON)
|
||||
. = ALIGN(0x4) ;
|
||||
___bss_stop = . ;
|
||||
__ebss = . ;
|
||||
__end = . ;
|
||||
__ramstart = .;
|
||||
|
@ -121,47 +121,27 @@ void __init paging_init(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codek = 0, datak = 0, initk = 0;
|
||||
/* DAVIDM look at setup memory map generically with reserved area */
|
||||
unsigned long tmp;
|
||||
extern unsigned long _ramend, _ramstart;
|
||||
unsigned long len = &_ramend - &_ramstart;
|
||||
unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
|
||||
unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */
|
||||
unsigned long codesize = _etext - _stext;
|
||||
|
||||
#ifdef DEBUG
|
||||
printk(KERN_DEBUG "Mem_init: start=%lx, end=%lx\n", start_mem, end_mem);
|
||||
#endif
|
||||
pr_devel("Mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
|
||||
|
||||
end_mem &= PAGE_MASK;
|
||||
high_memory = (void *) end_mem;
|
||||
|
||||
start_mem = PAGE_ALIGN(start_mem);
|
||||
max_mapnr = num_physpages = MAP_NR(high_memory);
|
||||
high_memory = (void *) (memory_end & PAGE_MASK);
|
||||
max_mapnr = MAP_NR(high_memory);
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
totalram_pages = free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
|
||||
codek = (_etext - _stext) >> 10;
|
||||
datak = (__bss_stop - _sdata) >> 10;
|
||||
initk = (__init_begin - __init_end) >> 10;
|
||||
|
||||
tmp = nr_free_pages() << PAGE_SHIFT;
|
||||
printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n",
|
||||
tmp >> 10,
|
||||
len >> 10,
|
||||
(rom_length > 0) ? ((rom_length >> 10) - codek) : 0,
|
||||
rom_length >> 10,
|
||||
codek,
|
||||
datak
|
||||
);
|
||||
mem_init_print_info(NULL);
|
||||
if (rom_length > 0 && rom_length > codesize)
|
||||
pr_info("Memory available: %luK/%luK ROM\n",
|
||||
(rom_length - codesize) >> 10, rom_length >> 10);
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -169,7 +149,7 @@ void
|
||||
free_initmem(void)
|
||||
{
|
||||
#ifdef CONFIG_RAMKERNEL
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -70,10 +70,8 @@ unsigned long long kmap_generation;
|
||||
void __init mem_init(void)
|
||||
{
|
||||
/* No idea where this is actually declared. Seems to evade LXR. */
|
||||
totalram_pages += free_all_bootmem();
|
||||
num_physpages = bootmem_lastpg-ARCH_PFN_OFFSET;
|
||||
|
||||
printk(KERN_INFO "totalram_pages = %ld\n", totalram_pages);
|
||||
free_all_bootmem();
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
/*
|
||||
* To-Do: someone somewhere should wipe out the bootmem map
|
||||
|
@ -1116,11 +1116,6 @@ efi_memmap_init(u64 *s, u64 *e)
|
||||
if (!is_memory_available(md))
|
||||
continue;
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
/* saved_max_pfn should ignore max_addr= command line arg */
|
||||
if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
|
||||
saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
|
||||
#endif
|
||||
/*
|
||||
* Round ends inward to granule boundaries
|
||||
* Give trimmings to uncached allocator
|
||||
|
@ -294,14 +294,6 @@ find_memory (void)
|
||||
alloc_per_cpu_data();
|
||||
}
|
||||
|
||||
static int count_pages(u64 start, u64 end, void *arg)
|
||||
{
|
||||
unsigned long *count = arg;
|
||||
|
||||
*count += (end - start) >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the page tables.
|
||||
*/
|
||||
@ -312,9 +304,6 @@ paging_init (void)
|
||||
unsigned long max_dma;
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
|
||||
num_physpages = 0;
|
||||
efi_memmap_walk(count_pages, &num_physpages);
|
||||
|
||||
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
|
||||
|
@ -37,7 +37,6 @@ struct early_node_data {
|
||||
struct ia64_node_data *node_data;
|
||||
unsigned long pernode_addr;
|
||||
unsigned long pernode_size;
|
||||
unsigned long num_physpages;
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
unsigned long num_dma_physpages;
|
||||
#endif
|
||||
@ -732,7 +731,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
|
||||
{
|
||||
unsigned long end = start + len;
|
||||
|
||||
mem_data[node].num_physpages += len >> PAGE_SHIFT;
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
if (start <= __pa(MAX_DMA_ADDRESS))
|
||||
mem_data[node].num_dma_physpages +=
|
||||
@ -778,7 +776,6 @@ void __init paging_init(void)
|
||||
#endif
|
||||
|
||||
for_each_online_node(node) {
|
||||
num_physpages += mem_data[node].num_physpages;
|
||||
pfn_offset = mem_data[node].min_pfn;
|
||||
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
|
@ -154,9 +154,8 @@ ia64_init_addr_space (void)
|
||||
void
|
||||
free_initmem (void)
|
||||
{
|
||||
free_reserved_area((unsigned long)ia64_imva(__init_begin),
|
||||
(unsigned long)ia64_imva(__init_end),
|
||||
0, "unused kernel");
|
||||
free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
|
||||
-1, "unused kernel");
|
||||
}
|
||||
|
||||
void __init
|
||||
@ -546,19 +545,6 @@ int __init register_active_ranges(u64 start, u64 len, int nid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init
|
||||
count_reserved_pages(u64 start, u64 end, void *arg)
|
||||
{
|
||||
unsigned long num_reserved = 0;
|
||||
unsigned long *count = arg;
|
||||
|
||||
for (; start < end; start += PAGE_SIZE)
|
||||
if (PageReserved(virt_to_page(start)))
|
||||
++num_reserved;
|
||||
*count += num_reserved;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
find_max_min_low_pfn (u64 start, u64 end, void *arg)
|
||||
{
|
||||
@ -597,8 +583,6 @@ __setup("nolwsys", nolwsys_setup);
|
||||
void __init
|
||||
mem_init (void)
|
||||
{
|
||||
long reserved_pages, codesize, datasize, initsize;
|
||||
pg_data_t *pgdat;
|
||||
int i;
|
||||
|
||||
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
|
||||
@ -616,27 +600,12 @@ mem_init (void)
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
BUG_ON(!mem_map);
|
||||
max_mapnr = max_low_pfn;
|
||||
#endif
|
||||
|
||||
set_max_mapnr(max_low_pfn);
|
||||
high_memory = __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
for_each_online_pgdat(pgdat)
|
||||
if (pgdat->bdata->node_bootmem_map)
|
||||
totalram_pages += free_all_bootmem_node(pgdat);
|
||||
|
||||
reserved_pages = 0;
|
||||
efi_memmap_walk(count_reserved_pages, &reserved_pages);
|
||||
|
||||
codesize = (unsigned long) _etext - (unsigned long) _stext;
|
||||
datasize = (unsigned long) _edata - (unsigned long) _etext;
|
||||
initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
|
||||
"%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10),
|
||||
num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
|
||||
reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
|
||||
|
||||
free_all_bootmem();
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
/*
|
||||
* For fsyscall entrpoints with no light-weight handler, use the ordinary
|
||||
|
@ -129,11 +129,10 @@ unsigned long __init setup_memory(void)
|
||||
#define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn)
|
||||
#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
|
||||
|
||||
unsigned long __init zone_sizes_init(void)
|
||||
void __init zone_sizes_init(void)
|
||||
{
|
||||
unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES];
|
||||
unsigned long low, start_pfn;
|
||||
unsigned long holes = 0;
|
||||
int nid, i;
|
||||
mem_prof_t *mp;
|
||||
|
||||
@ -147,7 +146,6 @@ unsigned long __init zone_sizes_init(void)
|
||||
low = MAX_LOW_PFN(nid);
|
||||
zones_size[ZONE_DMA] = low - start_pfn;
|
||||
zholes_size[ZONE_DMA] = mp->holes;
|
||||
holes += zholes_size[ZONE_DMA];
|
||||
|
||||
node_set_state(nid, N_NORMAL_MEMORY);
|
||||
free_area_init_node(nid, zones_size, start_pfn, zholes_size);
|
||||
@ -161,6 +159,4 @@ unsigned long __init zone_sizes_init(void)
|
||||
NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0;
|
||||
NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0;
|
||||
NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0;
|
||||
|
||||
return holes;
|
||||
}
|
||||
|
@ -40,7 +40,6 @@ unsigned long mmu_context_cache_dat;
|
||||
#else
|
||||
unsigned long mmu_context_cache_dat[NR_CPUS];
|
||||
#endif
|
||||
static unsigned long hole_pages;
|
||||
|
||||
/*
|
||||
* function prototype
|
||||
@ -57,7 +56,7 @@ void free_initrd_mem(unsigned long, unsigned long);
|
||||
#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
unsigned long __init zone_sizes_init(void)
|
||||
void __init zone_sizes_init(void)
|
||||
{
|
||||
unsigned long zones_size[MAX_NR_ZONES] = {0, };
|
||||
unsigned long max_dma;
|
||||
@ -83,11 +82,9 @@ unsigned long __init zone_sizes_init(void)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
free_area_init_node(0, zones_size, start_pfn, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else /* CONFIG_DISCONTIGMEM */
|
||||
extern unsigned long zone_sizes_init(void);
|
||||
extern void zone_sizes_init(void);
|
||||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
/*======================================================================*
|
||||
@ -105,24 +102,7 @@ void __init paging_init(void)
|
||||
for (i = 0 ; i < USER_PTRS_PER_PGD * 2 ; i++)
|
||||
pgd_val(pg_dir[i]) = 0;
|
||||
#endif /* CONFIG_MMU */
|
||||
hole_pages = zone_sizes_init();
|
||||
}
|
||||
|
||||
int __init reservedpages_count(void)
|
||||
{
|
||||
int reservedpages, nid, i;
|
||||
|
||||
reservedpages = 0;
|
||||
for_each_online_node(nid) {
|
||||
unsigned long flags;
|
||||
pgdat_resize_lock(NODE_DATA(nid), &flags);
|
||||
for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++)
|
||||
if (PageReserved(nid_page_nr(nid, i)))
|
||||
reservedpages++;
|
||||
pgdat_resize_unlock(NODE_DATA(nid), &flags);
|
||||
}
|
||||
|
||||
return reservedpages;
|
||||
zone_sizes_init();
|
||||
}
|
||||
|
||||
/*======================================================================*
|
||||
@ -131,48 +111,20 @@ int __init reservedpages_count(void)
|
||||
*======================================================================*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codesize, reservedpages, datasize, initsize;
|
||||
int nid;
|
||||
#ifndef CONFIG_MMU
|
||||
extern unsigned long memory_end;
|
||||
#endif
|
||||
|
||||
num_physpages = 0;
|
||||
for_each_online_node(nid)
|
||||
num_physpages += MAX_LOW_PFN(nid) - START_PFN(nid) + 1;
|
||||
|
||||
num_physpages -= hole_pages;
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
max_mapnr = num_physpages;
|
||||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0)));
|
||||
#else
|
||||
high_memory = (void *)(memory_end & PAGE_MASK);
|
||||
#else
|
||||
high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0)));
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/* clear the zero-page */
|
||||
memset(empty_zero_page, 0, PAGE_SIZE);
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
for_each_online_node(nid)
|
||||
totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
|
||||
|
||||
reservedpages = reservedpages_count() - hole_pages;
|
||||
codesize = (unsigned long) &_etext - (unsigned long)&_text;
|
||||
datasize = (unsigned long) &_edata - (unsigned long)&_etext;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long)&__init_begin;
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
|
||||
"%dk reserved, %dk data, %dk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10);
|
||||
set_max_mapnr(get_num_physpages());
|
||||
free_all_bootmem();
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
||||
/*======================================================================*
|
||||
@ -181,7 +133,7 @@ void __init mem_init(void)
|
||||
*======================================================================*/
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
@ -191,6 +143,6 @@ void free_initmem(void)
|
||||
*======================================================================*/
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
@ -110,7 +110,7 @@ void __init paging_init(void)
|
||||
void free_initmem(void)
|
||||
{
|
||||
#ifndef CONFIG_MMU_SUN3
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
#endif /* CONFIG_MMU_SUN3 */
|
||||
}
|
||||
|
||||
@ -146,38 +146,11 @@ void __init print_memmap(void)
|
||||
MLK_ROUNDUP(__bss_start, __bss_stop));
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
static inline void init_pointer_tables(void)
|
||||
{
|
||||
pg_data_t *pgdat;
|
||||
int codepages = 0;
|
||||
int datapages = 0;
|
||||
int initpages = 0;
|
||||
#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
|
||||
int i;
|
||||
|
||||
/* this will put all memory onto the freelists */
|
||||
totalram_pages = num_physpages = 0;
|
||||
for_each_online_pgdat(pgdat) {
|
||||
num_physpages += pgdat->node_present_pages;
|
||||
|
||||
totalram_pages += free_all_bootmem_node(pgdat);
|
||||
for (i = 0; i < pgdat->node_spanned_pages; i++) {
|
||||
struct page *page = pgdat->node_mem_map + i;
|
||||
char *addr = page_to_virt(page);
|
||||
|
||||
if (!PageReserved(page))
|
||||
continue;
|
||||
if (addr >= _text &&
|
||||
addr < _etext)
|
||||
codepages++;
|
||||
else if (addr >= __init_begin &&
|
||||
addr < __init_end)
|
||||
initpages++;
|
||||
else
|
||||
datapages++;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
|
||||
/* insert pointer tables allocated so far into the tablelist */
|
||||
init_pointer_table((unsigned long)kernel_pg_dir);
|
||||
for (i = 0; i < PTRS_PER_PGD; i++) {
|
||||
@ -189,19 +162,20 @@ void __init mem_init(void)
|
||||
if (zero_pgtable)
|
||||
init_pointer_table((unsigned long)zero_pgtable);
|
||||
#endif
|
||||
}
|
||||
|
||||
pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
totalram_pages << (PAGE_SHIFT-10),
|
||||
codepages << (PAGE_SHIFT-10),
|
||||
datapages << (PAGE_SHIFT-10),
|
||||
initpages << (PAGE_SHIFT-10));
|
||||
void __init mem_init(void)
|
||||
{
|
||||
/* this will put all memory onto the freelists */
|
||||
free_all_bootmem();
|
||||
init_pointer_tables();
|
||||
mem_init_print_info(NULL);
|
||||
print_memmap();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
@ -376,34 +376,21 @@ void __init paging_init(unsigned long mem_end)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int nid;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
unsigned long tmp;
|
||||
|
||||
/*
|
||||
* Explicitly reset zone->managed_pages because highmem pages are
|
||||
* freed before calling free_all_bootmem();
|
||||
*/
|
||||
reset_all_zones_managed_pages();
|
||||
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
|
||||
free_highmem_page(pfn_to_page(tmp));
|
||||
num_physpages += totalhigh_pages;
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
for_each_online_node(nid) {
|
||||
pg_data_t *pgdat = NODE_DATA(nid);
|
||||
unsigned long node_pages = 0;
|
||||
|
||||
num_physpages += pgdat->node_present_pages;
|
||||
|
||||
if (pgdat->node_spanned_pages)
|
||||
node_pages = free_all_bootmem_node(pgdat);
|
||||
|
||||
totalram_pages += node_pages;
|
||||
}
|
||||
|
||||
pr_info("Memory: %luk/%luk available\n",
|
||||
(unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
|
||||
num_physpages << (PAGE_SHIFT - 10));
|
||||
|
||||
free_all_bootmem();
|
||||
mem_init_print_info(NULL);
|
||||
show_mem(0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
@ -414,7 +401,8 @@ void free_initmem(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -168,7 +168,6 @@ extern int page_is_ram(unsigned long pfn);
|
||||
# else /* CONFIG_MMU */
|
||||
# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
|
||||
# define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET))
|
||||
# define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
|
||||
# endif /* CONFIG_MMU */
|
||||
|
||||
# endif /* __ASSEMBLY__ */
|
||||
|
@ -71,24 +71,17 @@ static void __init highmem_init(void)
|
||||
kmap_prot = PAGE_KERNEL;
|
||||
}
|
||||
|
||||
static unsigned long highmem_setup(void)
|
||||
static void highmem_setup(void)
|
||||
{
|
||||
unsigned long pfn;
|
||||
unsigned long reservedpages = 0;
|
||||
|
||||
for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
|
||||
/* FIXME not sure about */
|
||||
if (memblock_is_reserved(pfn << PAGE_SHIFT))
|
||||
continue;
|
||||
free_highmem_page(page);
|
||||
reservedpages++;
|
||||
if (!memblock_is_reserved(pfn << PAGE_SHIFT))
|
||||
free_highmem_page(page);
|
||||
}
|
||||
pr_info("High memory: %luk\n",
|
||||
totalhigh_pages << (PAGE_SHIFT-10));
|
||||
|
||||
return reservedpages;
|
||||
}
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
@ -167,13 +160,12 @@ void __init setup_memory(void)
|
||||
* min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
|
||||
* max_low_pfn
|
||||
* max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
|
||||
* num_physpages - number of all pages
|
||||
*/
|
||||
|
||||
/* memory start is from the kernel end (aligned) to higher addr */
|
||||
min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
|
||||
/* RAM is assumed contiguous */
|
||||
num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
|
||||
max_mapnr = memory_size >> PAGE_SHIFT;
|
||||
max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
|
||||
max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
|
||||
|
||||
@ -235,57 +227,26 @@ void __init setup_memory(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
pg_data_t *pgdat;
|
||||
unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
|
||||
|
||||
high_memory = (void *)__va(memory_start + lowmem_size - 1);
|
||||
|
||||
/* this will put all memory onto the freelists */
|
||||
totalram_pages += free_all_bootmem();
|
||||
|
||||
for_each_online_pgdat(pgdat) {
|
||||
unsigned long i;
|
||||
struct page *page;
|
||||
|
||||
for (i = 0; i < pgdat->node_spanned_pages; i++) {
|
||||
if (!pfn_valid(pgdat->node_start_pfn + i))
|
||||
continue;
|
||||
page = pgdat_page_nr(pgdat, i);
|
||||
if (PageReserved(page))
|
||||
reservedpages++;
|
||||
}
|
||||
}
|
||||
|
||||
free_all_bootmem();
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
reservedpages -= highmem_setup();
|
||||
highmem_setup();
|
||||
#endif
|
||||
|
||||
codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
|
||||
datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
|
||||
initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
|
||||
bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
|
||||
|
||||
pr_info("Memory: %luk/%luk available (%luk kernel code, ",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT-10),
|
||||
codesize >> 10);
|
||||
pr_cont("%luk reserved, %luk data, %luk bss, %luk init)\n",
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
bsssize >> 10,
|
||||
initsize >> 10);
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
#ifdef CONFIG_MMU
|
||||
pr_info("Kernel virtual memory layout:\n");
|
||||
pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
|
||||
|
@ -4,16 +4,6 @@
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static int __init parse_savemaxmem(char *p)
|
||||
{
|
||||
if (p)
|
||||
saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("savemaxmem=", parse_savemaxmem);
|
||||
|
||||
|
||||
static void *kdump_buf_page;
|
||||
|
||||
/**
|
||||
|
@ -359,11 +359,24 @@ void __init paging_init(void)
|
||||
static struct kcore_list kcore_kseg0;
|
||||
#endif
|
||||
|
||||
static inline void mem_init_free_highmem(void)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
unsigned long tmp;
|
||||
|
||||
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
|
||||
struct page *page = pfn_to_page(tmp);
|
||||
|
||||
if (!page_is_ram(tmp))
|
||||
SetPageReserved(page);
|
||||
else
|
||||
free_highmem_page(page);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long codesize, reservedpages, datasize, initsize;
|
||||
unsigned long tmp, ram;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
|
||||
@ -374,34 +387,10 @@ void __init mem_init(void)
|
||||
#endif
|
||||
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
|
||||
|
||||
totalram_pages += free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
setup_zero_pages(); /* Setup zeroed pages. */
|
||||
|
||||
reservedpages = ram = 0;
|
||||
for (tmp = 0; tmp < max_low_pfn; tmp++)
|
||||
if (page_is_ram(tmp) && pfn_valid(tmp)) {
|
||||
ram++;
|
||||
if (PageReserved(pfn_to_page(tmp)))
|
||||
reservedpages++;
|
||||
}
|
||||
num_physpages = ram;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
|
||||
struct page *page = pfn_to_page(tmp);
|
||||
|
||||
if (!page_is_ram(tmp)) {
|
||||
SetPageReserved(page);
|
||||
continue;
|
||||
}
|
||||
free_highmem_page(page);
|
||||
}
|
||||
num_physpages += totalhigh_pages;
|
||||
#endif
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
mem_init_free_highmem();
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
if ((unsigned long) &_text > (unsigned long) CKSEG0)
|
||||
@ -410,16 +399,6 @@ void __init mem_init(void)
|
||||
kclist_add(&kcore_kseg0, (void *) CKSEG0,
|
||||
0x80000000 - 4, KCORE_TEXT);
|
||||
#endif
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
|
||||
"%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
ram << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10,
|
||||
totalhigh_pages << (PAGE_SHIFT-10));
|
||||
}
|
||||
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
|
||||
|
||||
@ -440,7 +419,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -89,7 +89,7 @@ static inline u32 ltq_calc_bar11mask(void)
|
||||
u32 mem, bar11mask;
|
||||
|
||||
/* BAR11MASK value depends on available memory on system. */
|
||||
mem = num_physpages * PAGE_SIZE;
|
||||
mem = get_num_physpages() * PAGE_SIZE;
|
||||
bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8;
|
||||
|
||||
return bar11mask;
|
||||
|
@ -357,8 +357,6 @@ static void __init szmem(void)
|
||||
int slot;
|
||||
cnodeid_t node;
|
||||
|
||||
num_physpages = 0;
|
||||
|
||||
for_each_online_node(node) {
|
||||
nodebytes = 0;
|
||||
for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
|
||||
@ -381,7 +379,6 @@ static void __init szmem(void)
|
||||
slot = MAX_MEM_SLOTS;
|
||||
continue;
|
||||
}
|
||||
num_physpages += slot_psize;
|
||||
memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
|
||||
PFN_PHYS(slot_psize), node);
|
||||
}
|
||||
@ -480,32 +477,8 @@ void __init paging_init(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long codesize, datasize, initsize, tmp;
|
||||
unsigned node;
|
||||
|
||||
high_memory = (void *) __va(num_physpages << PAGE_SHIFT);
|
||||
|
||||
for_each_online_node(node) {
|
||||
/*
|
||||
* This will free up the bootmem, ie, slot 0 memory.
|
||||
*/
|
||||
totalram_pages += free_all_bootmem_node(NODE_DATA(node));
|
||||
}
|
||||
|
||||
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
|
||||
free_all_bootmem();
|
||||
setup_zero_pages(); /* This comes from node 0 */
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
|
||||
tmp = nr_free_pages();
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
|
||||
"%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
|
||||
tmp << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
(num_physpages - tmp) << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10,
|
||||
totalhigh_pages << (PAGE_SHIFT-10));
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
@ -99,43 +99,21 @@ void __init paging_init(void)
|
||||
*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codesize, reservedpages, datasize, initsize;
|
||||
int tmp;
|
||||
|
||||
BUG_ON(!mem_map);
|
||||
|
||||
#define START_PFN (contig_page_data.bdata->node_min_pfn)
|
||||
#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
|
||||
|
||||
max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
|
||||
max_mapnr = MAX_LOW_PFN - START_PFN;
|
||||
high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE);
|
||||
|
||||
/* clear the zero-page */
|
||||
memset(empty_zero_page, 0, PAGE_SIZE);
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
totalram_pages += free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
|
||||
reservedpages = 0;
|
||||
for (tmp = 0; tmp < num_physpages; tmp++)
|
||||
if (PageReserved(&mem_map[tmp]))
|
||||
reservedpages++;
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_stext;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
|
||||
printk(KERN_INFO
|
||||
"Memory: %luk/%luk available"
|
||||
" (%dk kernel code, %dk reserved, %dk data, %dk init,"
|
||||
" %ldk highmem)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT - 10),
|
||||
max_mapnr << (PAGE_SHIFT - 10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT - 10),
|
||||
datasize >> 10,
|
||||
initsize >> 10,
|
||||
totalhigh_pages << (PAGE_SHIFT - 10));
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -152,6 +130,7 @@ void free_initmem(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
}
|
||||
#endif
|
||||
|
@ -202,56 +202,20 @@ void __init paging_init(void)
|
||||
|
||||
/* References to section boundaries */
|
||||
|
||||
static int __init free_pages_init(void)
|
||||
{
|
||||
int reservedpages, pfn;
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
totalram_pages = free_all_bootmem();
|
||||
|
||||
reservedpages = 0;
|
||||
for (pfn = 0; pfn < max_low_pfn; pfn++) {
|
||||
/*
|
||||
* Only count reserved RAM pages
|
||||
*/
|
||||
if (PageReserved(mem_map + pfn))
|
||||
reservedpages++;
|
||||
}
|
||||
|
||||
return reservedpages;
|
||||
}
|
||||
|
||||
static void __init set_max_mapnr_init(void)
|
||||
{
|
||||
max_mapnr = num_physpages = max_low_pfn;
|
||||
}
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codesize, reservedpages, datasize, initsize;
|
||||
|
||||
BUG_ON(!mem_map);
|
||||
|
||||
set_max_mapnr_init();
|
||||
|
||||
max_mapnr = max_low_pfn;
|
||||
high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
/* clear the zero-page */
|
||||
memset((void *)empty_zero_page, 0, PAGE_SIZE);
|
||||
|
||||
reservedpages = free_pages_init();
|
||||
/* this will put all low memory onto the freelists */
|
||||
free_all_bootmem();
|
||||
|
||||
codesize = (unsigned long)&_etext - (unsigned long)&_stext;
|
||||
datasize = (unsigned long)&_edata - (unsigned long)&_etext;
|
||||
initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
|
||||
|
||||
printk(KERN_INFO
|
||||
"Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
|
||||
(unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
|
||||
max_mapnr << (PAGE_SHIFT - 10), codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT - 10), datasize >> 10,
|
||||
initsize >> 10, (unsigned long)(0 << (PAGE_SHIFT - 10))
|
||||
);
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
printk("mem_init_done ...........................................\n");
|
||||
mem_init_done = 1;
|
||||
@ -261,11 +225,11 @@ void __init mem_init(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
@ -214,7 +214,6 @@ static void __init setup_bootmem(void)
|
||||
mem_limit_func(); /* check for "mem=" argument */
|
||||
|
||||
mem_max = 0;
|
||||
num_physpages = 0;
|
||||
for (i = 0; i < npmem_ranges; i++) {
|
||||
unsigned long rsize;
|
||||
|
||||
@ -229,10 +228,8 @@ static void __init setup_bootmem(void)
|
||||
npmem_ranges = i + 1;
|
||||
mem_max = mem_limit;
|
||||
}
|
||||
num_physpages += pmem_ranges[i].pages;
|
||||
break;
|
||||
}
|
||||
num_physpages += pmem_ranges[i].pages;
|
||||
mem_max += rsize;
|
||||
}
|
||||
|
||||
@ -532,7 +529,7 @@ void free_initmem(void)
|
||||
* pages are no-longer executable */
|
||||
flush_icache_range(init_begin, init_end);
|
||||
|
||||
num_physpages += free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
|
||||
/* set up a new led state on systems shipped LED State panel */
|
||||
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
|
||||
@ -580,8 +577,6 @@ unsigned long pcxl_dma_start __read_mostly;
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codesize, reservedpages, datasize, initsize;
|
||||
|
||||
/* Do sanity checks on page table constants */
|
||||
BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
|
||||
BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
|
||||
@ -590,45 +585,8 @@ void __init mem_init(void)
|
||||
> BITS_PER_LONG);
|
||||
|
||||
high_memory = __va((max_pfn << PAGE_SHIFT));
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
|
||||
totalram_pages += free_all_bootmem();
|
||||
#else
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < npmem_ranges; i++)
|
||||
totalram_pages += free_all_bootmem_node(NODE_DATA(i));
|
||||
}
|
||||
#endif
|
||||
|
||||
codesize = (unsigned long)_etext - (unsigned long)_text;
|
||||
datasize = (unsigned long)_edata - (unsigned long)_etext;
|
||||
initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
|
||||
|
||||
reservedpages = 0;
|
||||
{
|
||||
unsigned long pfn;
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
int i;
|
||||
|
||||
for (i = 0; i < npmem_ranges; i++) {
|
||||
for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) {
|
||||
if (PageReserved(pfn_to_page(pfn)))
|
||||
reservedpages++;
|
||||
}
|
||||
}
|
||||
#else /* !CONFIG_DISCONTIGMEM */
|
||||
for (pfn = 0; pfn < max_pfn; pfn++) {
|
||||
/*
|
||||
* Only count reserved RAM pages
|
||||
*/
|
||||
if (PageReserved(pfn_to_page(pfn)))
|
||||
reservedpages++;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
|
||||
free_all_bootmem();
|
||||
|
||||
#ifdef CONFIG_PA11
|
||||
if (hppa_dma_ops == &pcxl_dma_ops) {
|
||||
@ -643,15 +601,7 @@ void __init mem_init(void)
|
||||
parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
|
||||
#endif
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10
|
||||
);
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
|
||||
printk("virtual kernel memory layout:\n"
|
||||
" vmalloc : 0x%p - 0x%p (%4ld MB)\n"
|
||||
@ -1101,6 +1051,6 @@ void flush_tlb_all(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
num_physpages += free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
@ -69,16 +69,6 @@ void __init setup_kdump_trampoline(void)
|
||||
}
|
||||
#endif /* CONFIG_NONSTATIC_KERNEL */
|
||||
|
||||
static int __init parse_savemaxmem(char *p)
|
||||
{
|
||||
if (p)
|
||||
saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("savemaxmem=", parse_savemaxmem);
|
||||
|
||||
|
||||
static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
|
||||
unsigned long offset, int userbuf)
|
||||
{
|
||||
|
@ -750,13 +750,8 @@ EXPORT_SYMBOL_GPL(kvm_hypercall);
|
||||
|
||||
static __init void kvm_free_tmp(void)
|
||||
{
|
||||
unsigned long start, end;
|
||||
|
||||
start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
|
||||
end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
|
||||
|
||||
/* Free the tmp space we don't need */
|
||||
free_reserved_area(start, end, 0, NULL);
|
||||
free_reserved_area(&kvm_tmp[kvm_tmp_index],
|
||||
&kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
|
||||
}
|
||||
|
||||
static int __init kvm_guest_init(void)
|
||||
|
@ -357,7 +357,7 @@ void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
|
||||
int alloc_bootmem_huge_page(struct hstate *hstate)
|
||||
{
|
||||
struct huge_bootmem_page *m;
|
||||
int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT);
|
||||
int idx = shift_to_mmu_psize(huge_page_shift(hstate));
|
||||
int nr_gpages = gpage_freearray[idx].nr_gpages;
|
||||
|
||||
if (nr_gpages == 0)
|
||||
|
@ -299,47 +299,13 @@ void __init paging_init(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
int nid;
|
||||
#endif
|
||||
pg_data_t *pgdat;
|
||||
unsigned long i;
|
||||
struct page *page;
|
||||
unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
swiotlb_init(0);
|
||||
#endif
|
||||
|
||||
num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
for_each_online_node(nid) {
|
||||
if (NODE_DATA(nid)->node_spanned_pages != 0) {
|
||||
printk("freeing bootmem node %d\n", nid);
|
||||
totalram_pages +=
|
||||
free_all_bootmem_node(NODE_DATA(nid));
|
||||
}
|
||||
}
|
||||
#else
|
||||
max_mapnr = max_pfn;
|
||||
totalram_pages += free_all_bootmem();
|
||||
#endif
|
||||
for_each_online_pgdat(pgdat) {
|
||||
for (i = 0; i < pgdat->node_spanned_pages; i++) {
|
||||
if (!pfn_valid(pgdat->node_start_pfn + i))
|
||||
continue;
|
||||
page = pgdat_page_nr(pgdat, i);
|
||||
if (PageReserved(page))
|
||||
reservedpages++;
|
||||
}
|
||||
}
|
||||
|
||||
codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
|
||||
datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
|
||||
initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
|
||||
bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
|
||||
set_max_mapnr(max_pfn);
|
||||
free_all_bootmem();
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
{
|
||||
@ -349,13 +315,9 @@ void __init mem_init(void)
|
||||
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
|
||||
phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
if (memblock_is_reserved(paddr))
|
||||
continue;
|
||||
free_highmem_page(page);
|
||||
reservedpages--;
|
||||
if (!memblock_is_reserved(paddr))
|
||||
free_highmem_page(page);
|
||||
}
|
||||
printk(KERN_DEBUG "High memory: %luk\n",
|
||||
totalhigh_pages << (PAGE_SHIFT-10));
|
||||
}
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
@ -368,16 +330,7 @@ void __init mem_init(void)
|
||||
(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
|
||||
#endif
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
|
||||
"%luk reserved, %luk data, %luk bss, %luk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
bsssize >> 10,
|
||||
initsize >> 10);
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
#ifdef CONFIG_PPC32
|
||||
pr_info("Kernel virtual memory layout:\n");
|
||||
pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
|
||||
@ -407,7 +360,7 @@ void free_initmem(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -719,10 +719,6 @@ static void reserve_oldmem(void)
|
||||
}
|
||||
create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE);
|
||||
create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE);
|
||||
if (OLDMEM_BASE + OLDMEM_SIZE == real_size)
|
||||
saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
|
||||
else
|
||||
saved_max_pfn = PFN_DOWN(real_size) - 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -135,30 +135,17 @@ void __init paging_init(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long codesize, reservedpages, datasize, initsize;
|
||||
|
||||
max_mapnr = num_physpages = max_low_pfn;
|
||||
max_mapnr = max_low_pfn;
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
/* Setup guest page hinting */
|
||||
cmma_init();
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
totalram_pages += free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
setup_zero_pages(); /* Setup zeroed pages. */
|
||||
|
||||
reservedpages = 0;
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
max_mapnr << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >>10,
|
||||
initsize >> 10);
|
||||
mem_init_print_info(NULL);
|
||||
printk("Write protected kernel read-only data: %#lx - %#lx\n",
|
||||
(unsigned long)&_stext,
|
||||
PFN_ALIGN((unsigned long)&_eshared) - 1);
|
||||
@ -166,13 +153,14 @@ void __init mem_init(void)
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -49,6 +49,7 @@ SECTIONS
|
||||
}
|
||||
|
||||
. = ALIGN(16);
|
||||
_sdata = .; /* Start of data section */
|
||||
RODATA
|
||||
|
||||
EXCEPTION_TABLE(16)
|
||||
|
@ -75,40 +75,19 @@ void __init paging_init(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long codesize, reservedpages, datasize, initsize;
|
||||
unsigned long tmp, ram = 0;
|
||||
|
||||
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
|
||||
totalram_pages += free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
setup_zero_page(); /* Setup zeroed pages. */
|
||||
reservedpages = 0;
|
||||
|
||||
for (tmp = 0; tmp < max_low_pfn; tmp++)
|
||||
if (page_is_ram(tmp)) {
|
||||
ram++;
|
||||
if (PageReserved(pfn_to_page(tmp)))
|
||||
reservedpages++;
|
||||
}
|
||||
|
||||
num_physpages = ram;
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
|
||||
"%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
|
||||
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
|
||||
ram << (PAGE_SHIFT-10), codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10), datasize >> 10,
|
||||
initsize >> 10,
|
||||
totalhigh_pages << (PAGE_SHIFT-10));
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -407,30 +407,16 @@ unsigned int mem_init_done = 0;
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codesize, datasize, initsize;
|
||||
int nid;
|
||||
pg_data_t *pgdat;
|
||||
|
||||
iommu_init();
|
||||
|
||||
num_physpages = 0;
|
||||
high_memory = NULL;
|
||||
for_each_online_pgdat(pgdat)
|
||||
high_memory = max_t(void *, high_memory,
|
||||
__va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
|
||||
|
||||
for_each_online_node(nid) {
|
||||
pg_data_t *pgdat = NODE_DATA(nid);
|
||||
void *node_high_memory;
|
||||
|
||||
num_physpages += pgdat->node_present_pages;
|
||||
|
||||
if (pgdat->node_spanned_pages)
|
||||
totalram_pages += free_all_bootmem_node(pgdat);
|
||||
|
||||
|
||||
node_high_memory = (void *)__va((pgdat->node_start_pfn +
|
||||
pgdat->node_spanned_pages) <<
|
||||
PAGE_SHIFT);
|
||||
if (node_high_memory > high_memory)
|
||||
high_memory = node_high_memory;
|
||||
}
|
||||
free_all_bootmem();
|
||||
|
||||
/* Set this up early, so we can take care of the zero page */
|
||||
cpu_cache_init();
|
||||
@ -441,19 +427,8 @@ void __init mem_init(void)
|
||||
|
||||
vsyscall_init();
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
|
||||
"%dk data, %dk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
datasize >> 10,
|
||||
initsize >> 10);
|
||||
|
||||
printk(KERN_INFO "virtual kernel memory layout:\n"
|
||||
mem_init_print_info(NULL);
|
||||
pr_info("virtual kernel memory layout:\n"
|
||||
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
" pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
@ -499,13 +474,13 @@ void __init mem_init(void)
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -254,15 +254,12 @@ void __init leon_smp_done(void)
|
||||
/* Free unneeded trap tables */
|
||||
if (!cpu_present(1)) {
|
||||
free_reserved_page(virt_to_page(&trapbase_cpu1));
|
||||
num_physpages++;
|
||||
}
|
||||
if (!cpu_present(2)) {
|
||||
free_reserved_page(virt_to_page(&trapbase_cpu2));
|
||||
num_physpages++;
|
||||
}
|
||||
if (!cpu_present(3)) {
|
||||
free_reserved_page(virt_to_page(&trapbase_cpu3));
|
||||
num_physpages++;
|
||||
}
|
||||
/* Ok, they are spinning and ready to go. */
|
||||
smp_processors_ready = 1;
|
||||
|
@ -288,10 +288,6 @@ static void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codepages = 0;
|
||||
int datapages = 0;
|
||||
int initpages = 0;
|
||||
int reservedpages = 0;
|
||||
int i;
|
||||
|
||||
if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
|
||||
@ -323,15 +319,12 @@ void __init mem_init(void)
|
||||
|
||||
max_mapnr = last_valid_pfn - pfn_base;
|
||||
high_memory = __va(max_low_pfn << PAGE_SHIFT);
|
||||
|
||||
totalram_pages = free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
|
||||
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
|
||||
unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
|
||||
unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
|
||||
|
||||
num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT;
|
||||
|
||||
if (end_pfn <= highstart_pfn)
|
||||
continue;
|
||||
|
||||
@ -341,39 +334,19 @@ void __init mem_init(void)
|
||||
map_high_region(start_pfn, end_pfn);
|
||||
}
|
||||
|
||||
codepages = (((unsigned long) &_etext) - ((unsigned long)&_start));
|
||||
codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
|
||||
datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext));
|
||||
datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
|
||||
initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
|
||||
initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
|
||||
|
||||
/* Ignore memory holes for the purpose of counting reserved pages */
|
||||
for (i=0; i < max_low_pfn; i++)
|
||||
if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap)
|
||||
&& PageReserved(pfn_to_page(i)))
|
||||
reservedpages++;
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT - 10),
|
||||
codepages << (PAGE_SHIFT-10),
|
||||
reservedpages << (PAGE_SHIFT - 10),
|
||||
datapages << (PAGE_SHIFT-10),
|
||||
initpages << (PAGE_SHIFT-10),
|
||||
totalhigh_pages << (PAGE_SHIFT-10));
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
||||
void free_initmem (void)
|
||||
{
|
||||
num_physpages += free_initmem_default(POISON_FREE_INITMEM);
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
num_physpages += free_reserved_area(start, end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2045,7 +2045,6 @@ static void __init register_page_bootmem_info(void)
|
||||
}
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long codepages, datapages, initpages;
|
||||
unsigned long addr, last;
|
||||
|
||||
addr = PAGE_OFFSET + kern_base;
|
||||
@ -2061,12 +2060,7 @@ void __init mem_init(void)
|
||||
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
|
||||
|
||||
register_page_bootmem_info();
|
||||
totalram_pages = free_all_bootmem();
|
||||
|
||||
/* We subtract one to account for the mem_map_zero page
|
||||
* allocated below.
|
||||
*/
|
||||
num_physpages = totalram_pages - 1;
|
||||
free_all_bootmem();
|
||||
|
||||
/*
|
||||
* Set up the zero page, mark it reserved, so that page count
|
||||
@ -2079,19 +2073,7 @@ void __init mem_init(void)
|
||||
}
|
||||
mark_page_reserved(mem_map_zero);
|
||||
|
||||
codepages = (((unsigned long) _etext) - ((unsigned long) _start));
|
||||
codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
|
||||
datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
|
||||
datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
|
||||
initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
|
||||
initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
|
||||
|
||||
printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
codepages << (PAGE_SHIFT-10),
|
||||
datapages << (PAGE_SHIFT-10),
|
||||
initpages << (PAGE_SHIFT-10),
|
||||
PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
if (tlb_type == cheetah || tlb_type == cheetah_plus)
|
||||
cheetah_ecache_flush_init();
|
||||
@ -2131,8 +2113,8 @@ void free_initmem(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
num_physpages += free_reserved_area(start, end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
|
||||
"initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -34,7 +34,7 @@ extern char __sys_cmpxchg_grab_lock[];
|
||||
extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
|
||||
#endif
|
||||
|
||||
/* Handle the discontiguity between _sdata and _stext. */
|
||||
/* Handle the discontiguity between _sdata and _text. */
|
||||
static inline int arch_is_kernel_data(unsigned long addr)
|
||||
{
|
||||
return addr >= (unsigned long)_sdata &&
|
||||
|
@ -307,8 +307,8 @@ static void __cpuinit store_permanent_mappings(void)
|
||||
hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
|
||||
}
|
||||
|
||||
hv_store_mapping((HV_VirtAddr)_stext,
|
||||
(uint32_t)(_einittext - _stext), 0);
|
||||
hv_store_mapping((HV_VirtAddr)_text,
|
||||
(uint32_t)(_einittext - _text), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -329,6 +329,7 @@ static void __init setup_memory(void)
|
||||
#if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
|
||||
long lowmem_pages;
|
||||
#endif
|
||||
unsigned long physpages = 0;
|
||||
|
||||
/* We are using a char to hold the cpu_2_node[] mapping */
|
||||
BUILD_BUG_ON(MAX_NUMNODES > 127);
|
||||
@ -388,8 +389,8 @@ static void __init setup_memory(void)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) {
|
||||
int max_size = maxmem_pfn - num_physpages;
|
||||
if (physpages + PFN_DOWN(range.size) > maxmem_pfn) {
|
||||
int max_size = maxmem_pfn - physpages;
|
||||
if (max_size > 0) {
|
||||
pr_err("Maxmem reduced node %d to %d pages\n",
|
||||
i, max_size);
|
||||
@ -446,7 +447,7 @@ static void __init setup_memory(void)
|
||||
node_start_pfn[i] = start;
|
||||
node_end_pfn[i] = end;
|
||||
node_controller[i] = range.controller;
|
||||
num_physpages += size;
|
||||
physpages += size;
|
||||
max_pfn = end;
|
||||
|
||||
/* Mark node as online */
|
||||
@ -465,7 +466,7 @@ static void __init setup_memory(void)
|
||||
* we're willing to use at 8 million pages (32GB of 4KB pages).
|
||||
*/
|
||||
cap = 8 * 1024 * 1024; /* 8 million pages */
|
||||
if (num_physpages > cap) {
|
||||
if (physpages > cap) {
|
||||
int num_nodes = num_online_nodes();
|
||||
int cap_each = cap / num_nodes;
|
||||
unsigned long dropped_pages = 0;
|
||||
@ -476,10 +477,10 @@ static void __init setup_memory(void)
|
||||
node_end_pfn[i] = node_start_pfn[i] + cap_each;
|
||||
}
|
||||
}
|
||||
num_physpages -= dropped_pages;
|
||||
physpages -= dropped_pages;
|
||||
pr_warning("Only using %ldMB memory;"
|
||||
" ignoring %ldMB.\n",
|
||||
num_physpages >> (20 - PAGE_SHIFT),
|
||||
physpages >> (20 - PAGE_SHIFT),
|
||||
dropped_pages >> (20 - PAGE_SHIFT));
|
||||
pr_warning("Consider using a larger page size.\n");
|
||||
}
|
||||
@ -497,7 +498,7 @@ static void __init setup_memory(void)
|
||||
|
||||
lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
|
||||
MAXMEM_PFN : mappable_physpages;
|
||||
highmem_pages = (long) (num_physpages - lowmem_pages);
|
||||
highmem_pages = (long) (physpages - lowmem_pages);
|
||||
|
||||
pr_notice("%ldMB HIGHMEM available.\n",
|
||||
pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
|
||||
@ -514,7 +515,6 @@ static void __init setup_memory(void)
|
||||
pr_warning("Use a HIGHMEM enabled kernel.\n");
|
||||
max_low_pfn = MAXMEM_PFN;
|
||||
max_pfn = MAXMEM_PFN;
|
||||
num_physpages = MAXMEM_PFN;
|
||||
node_end_pfn[0] = MAXMEM_PFN;
|
||||
} else {
|
||||
pr_notice("%ldMB memory available.\n",
|
||||
|
@ -27,7 +27,6 @@ SECTIONS
|
||||
.intrpt1 (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */
|
||||
{
|
||||
_text = .;
|
||||
_stext = .;
|
||||
*(.intrpt1)
|
||||
} :intrpt1 =0
|
||||
|
||||
@ -36,6 +35,7 @@ SECTIONS
|
||||
|
||||
/* Now the real code */
|
||||
. = ALIGN(0x20000);
|
||||
_stext = .;
|
||||
.text : AT (ADDR(.text) - LOAD_OFFSET) {
|
||||
HEAD_TEXT
|
||||
SCHED_TEXT
|
||||
@ -58,11 +58,13 @@ SECTIONS
|
||||
#define LOAD_OFFSET PAGE_OFFSET
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_begin = .;
|
||||
VMLINUX_SYMBOL(_sinitdata) = .;
|
||||
INIT_DATA_SECTION(16) :data =0
|
||||
PERCPU_SECTION(L2_CACHE_BYTES)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
VMLINUX_SYMBOL(_einitdata) = .;
|
||||
__init_end = .;
|
||||
|
||||
_sdata = .; /* Start of data section */
|
||||
|
||||
|
@ -562,7 +562,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
|
||||
prot = ktext_set_nocache(prot);
|
||||
}
|
||||
|
||||
BUG_ON(address != (unsigned long)_stext);
|
||||
BUG_ON(address != (unsigned long)_text);
|
||||
pte = NULL;
|
||||
for (; address < (unsigned long)_einittext;
|
||||
pfn++, address += PAGE_SIZE) {
|
||||
@ -720,7 +720,7 @@ static void __init init_free_pfn_range(unsigned long start, unsigned long end)
|
||||
}
|
||||
init_page_count(page);
|
||||
__free_pages(page, order);
|
||||
totalram_pages += count;
|
||||
adjust_managed_page_count(page, count);
|
||||
|
||||
page += count;
|
||||
pfn += count;
|
||||
@ -821,7 +821,6 @@ static void __init set_max_mapnr_init(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codesize, datasize, initsize;
|
||||
int i;
|
||||
#ifndef __tilegx__
|
||||
void *last;
|
||||
@ -846,26 +845,14 @@ void __init mem_init(void)
|
||||
set_max_mapnr_init();
|
||||
|
||||
/* this will put all bootmem onto the freelists */
|
||||
totalram_pages += free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/* count all remaining LOWMEM and give all HIGHMEM to page allocator */
|
||||
set_non_bootmem_pages_init();
|
||||
#endif
|
||||
|
||||
codesize = (unsigned long)&_etext - (unsigned long)&_text;
|
||||
datasize = (unsigned long)&_end - (unsigned long)&_sdata;
|
||||
initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext;
|
||||
initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata;
|
||||
|
||||
pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
|
||||
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
datasize >> 10,
|
||||
initsize >> 10,
|
||||
(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
|
||||
);
|
||||
mem_init_print_info(NULL);
|
||||
|
||||
/*
|
||||
* In debug mode, dump some interesting memory mappings.
|
||||
@ -1024,16 +1011,13 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
|
||||
pte_clear(&init_mm, addr, ptep);
|
||||
continue;
|
||||
}
|
||||
__ClearPageReserved(page);
|
||||
init_page_count(page);
|
||||
if (pte_huge(*ptep))
|
||||
BUG_ON(!kdata_huge);
|
||||
else
|
||||
set_pte_at(&init_mm, addr, ptep,
|
||||
pfn_pte(pfn, PAGE_KERNEL));
|
||||
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
|
||||
free_page(addr);
|
||||
totalram_pages++;
|
||||
free_reserved_page(page);
|
||||
}
|
||||
pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
|
||||
}
|
||||
|
@ -57,7 +57,6 @@
|
||||
*(.uml.initcall.init)
|
||||
__uml_initcall_end = .;
|
||||
}
|
||||
__init_end = .;
|
||||
|
||||
SECURITY_INIT
|
||||
|
||||
|
@ -14,8 +14,6 @@ SECTIONS
|
||||
__binary_start = .;
|
||||
. = ALIGN(4096); /* Init code and data */
|
||||
_text = .;
|
||||
_stext = .;
|
||||
__init_begin = .;
|
||||
INIT_TEXT_SECTION(PAGE_SIZE)
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
@ -67,6 +65,7 @@ SECTIONS
|
||||
} =0x90909090
|
||||
.plt : { *(.plt) }
|
||||
.text : {
|
||||
_stext = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
@ -91,7 +90,9 @@ SECTIONS
|
||||
|
||||
#include <asm/common.lds.S>
|
||||
|
||||
__init_begin = .;
|
||||
init.data : { INIT_DATA }
|
||||
__init_end = .;
|
||||
|
||||
/* Ensure the __preinit_array_start label is properly aligned. We
|
||||
could instead move the label definition inside the section, but
|
||||
@ -155,6 +156,7 @@ SECTIONS
|
||||
. = ALIGN(32 / 8);
|
||||
. = ALIGN(32 / 8);
|
||||
}
|
||||
__bss_stop = .;
|
||||
_end = .;
|
||||
PROVIDE (end = .);
|
||||
|
||||
|
@ -65,15 +65,13 @@ void __init mem_init(void)
|
||||
uml_reserved = brk_end;
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
totalram_pages = free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
max_low_pfn = totalram_pages;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
setup_highmem(end_iomem, highmem);
|
||||
#endif
|
||||
num_physpages = totalram_pages;
|
||||
max_pfn = totalram_pages;
|
||||
printk(KERN_INFO "Memory: %luk available\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10));
|
||||
mem_init_print_info(NULL);
|
||||
kmalloc_ok = 1;
|
||||
}
|
||||
|
||||
@ -244,7 +242,7 @@ void free_initmem(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -20,13 +20,12 @@ SECTIONS
|
||||
. = START + SIZEOF_HEADERS;
|
||||
|
||||
_text = .;
|
||||
_stext = .;
|
||||
__init_begin = .;
|
||||
INIT_TEXT_SECTION(0)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
|
||||
.text :
|
||||
{
|
||||
_stext = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
@ -62,7 +61,10 @@ SECTIONS
|
||||
|
||||
#include <asm/common.lds.S>
|
||||
|
||||
__init_begin = .;
|
||||
init.data : { INIT_DATA }
|
||||
__init_end = .;
|
||||
|
||||
.data :
|
||||
{
|
||||
INIT_TASK_DATA(KERNEL_STACK_SIZE)
|
||||
@ -97,6 +99,7 @@ SECTIONS
|
||||
PROVIDE(_bss_start = .);
|
||||
SBSS(0)
|
||||
BSS(0)
|
||||
__bss_stop = .;
|
||||
_end = .;
|
||||
PROVIDE (end = .);
|
||||
|
||||
|
@ -98,12 +98,6 @@
|
||||
/*
|
||||
* Conversion between a struct page and a physical address.
|
||||
*
|
||||
* Note: when converting an unknown physical address to a
|
||||
* struct page, the resulting pointer must be validated
|
||||
* using VALID_PAGE(). It must return an invalid struct page
|
||||
* for any physical address not corresponding to a system
|
||||
* RAM address.
|
||||
*
|
||||
* page_to_pfn(page) convert a struct page * to a PFN number
|
||||
* pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
|
||||
*
|
||||
|
@ -383,59 +383,14 @@ static void __init free_unused_memmap(struct meminfo *mi)
|
||||
*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long reserved_pages, free_pages;
|
||||
struct memblock_region *reg;
|
||||
int i;
|
||||
|
||||
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
|
||||
|
||||
free_unused_memmap(&meminfo);
|
||||
|
||||
/* this will put all unused low memory onto the freelists */
|
||||
totalram_pages += free_all_bootmem();
|
||||
|
||||
reserved_pages = free_pages = 0;
|
||||
|
||||
for_each_bank(i, &meminfo) {
|
||||
struct membank *bank = &meminfo.bank[i];
|
||||
unsigned int pfn1, pfn2;
|
||||
struct page *page, *end;
|
||||
|
||||
pfn1 = bank_pfn_start(bank);
|
||||
pfn2 = bank_pfn_end(bank);
|
||||
|
||||
page = pfn_to_page(pfn1);
|
||||
end = pfn_to_page(pfn2 - 1) + 1;
|
||||
|
||||
do {
|
||||
if (PageReserved(page))
|
||||
reserved_pages++;
|
||||
else if (!page_count(page))
|
||||
free_pages++;
|
||||
page++;
|
||||
} while (page < end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Since our memory may not be contiguous, calculate the
|
||||
* real number of pages we have in this system
|
||||
*/
|
||||
printk(KERN_INFO "Memory:");
|
||||
num_physpages = 0;
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned long pages = memblock_region_memory_end_pfn(reg) -
|
||||
memblock_region_memory_base_pfn(reg);
|
||||
num_physpages += pages;
|
||||
printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
|
||||
}
|
||||
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
|
||||
|
||||
printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
free_pages << (PAGE_SHIFT-10),
|
||||
reserved_pages << (PAGE_SHIFT-10),
|
||||
totalhigh_pages << (PAGE_SHIFT-10));
|
||||
free_all_bootmem();
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
|
||||
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
|
||||
@ -464,7 +419,7 @@ void __init mem_init(void)
|
||||
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
|
||||
BUG_ON(TASK_SIZE > MODULES_VADDR);
|
||||
|
||||
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
|
||||
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
|
||||
/*
|
||||
* On a machine this small we won't get
|
||||
* anywhere without overcommit, so turn
|
||||
@ -476,7 +431,7 @@ void __init mem_init(void)
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
@ -486,7 +441,7 @@ static int keep_initrd;
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!keep_initrd)
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
|
||||
static int __init keepinitrd_setup(char *__unused)
|
||||
|
@ -102,6 +102,7 @@ config X86
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
select GENERIC_CMOS_UPDATE
|
||||
select HAVE_ARCH_SOFT_DIRTY
|
||||
select CLOCKSOURCE_WATCHDOG
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_CLOCKSOURCE_DATA if X86_64
|
||||
@ -2258,11 +2259,11 @@ source "drivers/pcmcia/Kconfig"
|
||||
source "drivers/pci/hotplug/Kconfig"
|
||||
|
||||
config RAPIDIO
|
||||
bool "RapidIO support"
|
||||
tristate "RapidIO support"
|
||||
depends on PCI
|
||||
default n
|
||||
help
|
||||
If you say Y here, the kernel will include drivers and
|
||||
If enabled this option will include drivers and the core
|
||||
infrastructure code to support RapidIO interconnect devices.
|
||||
|
||||
source "drivers/rapidio/Kconfig"
|
||||
|
@ -207,7 +207,7 @@ static inline pte_t pte_mkexec(pte_t pte)
|
||||
|
||||
static inline pte_t pte_mkdirty(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_DIRTY);
|
||||
return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkyoung(pte_t pte)
|
||||
@ -271,7 +271,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
|
||||
|
||||
static inline pmd_t pmd_mkdirty(pmd_t pmd)
|
||||
{
|
||||
return pmd_set_flags(pmd, _PAGE_DIRTY);
|
||||
return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
||||
@ -294,6 +294,26 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
||||
return pmd_clear_flags(pmd, _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
static inline int pte_soft_dirty(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_SOFT_DIRTY;
|
||||
}
|
||||
|
||||
static inline int pmd_soft_dirty(pmd_t pmd)
|
||||
{
|
||||
return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mksoft_dirty(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
|
||||
{
|
||||
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask out unsupported bits in a present pgprot. Non-present pgprots
|
||||
* can use those bits for other purposes, so leave them be.
|
||||
|
@ -55,6 +55,18 @@
|
||||
#define _PAGE_HIDDEN (_AT(pteval_t, 0))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The same hidden bit is used by kmemcheck, but since kmemcheck
|
||||
* works on kernel pages while soft-dirty engine on user space,
|
||||
* they do not conflict with each other.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
|
||||
#else
|
||||
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
||||
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
|
||||
#else
|
||||
|
@ -89,7 +89,6 @@ struct thread_info {
|
||||
#define TIF_FORK 18 /* ret_from_fork */
|
||||
#define TIF_NOHZ 19 /* in adaptive nohz mode */
|
||||
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
|
||||
#define TIF_DEBUG 21 /* uses debug registers */
|
||||
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
||||
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
|
||||
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
|
||||
@ -113,7 +112,6 @@ struct thread_info {
|
||||
#define _TIF_IA32 (1 << TIF_IA32)
|
||||
#define _TIF_FORK (1 << TIF_FORK)
|
||||
#define _TIF_NOHZ (1 << TIF_NOHZ)
|
||||
#define _TIF_DEBUG (1 << TIF_DEBUG)
|
||||
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
||||
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
|
||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||
@ -154,7 +152,7 @@ struct thread_info {
|
||||
(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
|
||||
|
||||
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
|
||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
|
||||
|
||||
#define PREEMPT_ACTIVE 0x10000000
|
||||
|
||||
|
@ -90,7 +90,7 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
|
||||
static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 l, h;
|
||||
int mbytes = num_physpages >> (20-PAGE_SHIFT);
|
||||
int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
|
||||
|
||||
if (c->x86_model < 6) {
|
||||
/* Based on AMD doc 20734R - June 2000 */
|
||||
|
@ -1040,8 +1040,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
/* max_low_pfn get updated here */
|
||||
find_low_pfn_range();
|
||||
#else
|
||||
num_physpages = max_pfn;
|
||||
|
||||
check_x2apic();
|
||||
|
||||
/* How many end-of-memory variables you have, grandma! */
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/swap.h> /* for totalram_pages */
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
void *kmap(struct page *page)
|
||||
{
|
||||
@ -121,6 +122,11 @@ void __init set_highmem_pages_init(void)
|
||||
struct zone *zone;
|
||||
int nid;
|
||||
|
||||
/*
|
||||
* Explicitly reset zone->managed_pages because set_highmem_pages_init()
|
||||
* is invoked before free_all_bootmem()
|
||||
*/
|
||||
reset_all_zones_managed_pages();
|
||||
for_each_zone(zone) {
|
||||
unsigned long zone_start_pfn, zone_end_pfn;
|
||||
|
||||
|
@ -494,7 +494,6 @@ int devmem_is_allowed(unsigned long pagenr)
|
||||
|
||||
void free_init_pages(char *what, unsigned long begin, unsigned long end)
|
||||
{
|
||||
unsigned long addr;
|
||||
unsigned long begin_aligned, end_aligned;
|
||||
|
||||
/* Make sure boundaries are page aligned */
|
||||
@ -509,8 +508,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
|
||||
if (begin >= end)
|
||||
return;
|
||||
|
||||
addr = begin;
|
||||
|
||||
/*
|
||||
* If debugging page accesses then do not free this memory but
|
||||
* mark them not present - any buggy init-section access will
|
||||
@ -529,18 +526,13 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
|
||||
set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
|
||||
set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
|
||||
|
||||
printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
|
||||
|
||||
for (; addr < end; addr += PAGE_SIZE) {
|
||||
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
|
||||
free_reserved_page(virt_to_page(addr));
|
||||
}
|
||||
free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what);
|
||||
#endif
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_init_pages("unused kernel memory",
|
||||
free_init_pages("unused kernel",
|
||||
(unsigned long)(&__init_begin),
|
||||
(unsigned long)(&__init_end));
|
||||
}
|
||||
@ -566,7 +558,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
* - relocate_initrd()
|
||||
* So here We can do PAGE_ALIGN() safely to get partial page to be freed
|
||||
*/
|
||||
free_init_pages("initrd memory", start, PAGE_ALIGN(end));
|
||||
free_init_pages("initrd", start, PAGE_ALIGN(end));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -660,10 +660,8 @@ void __init initmem_init(void)
|
||||
highstart_pfn = max_low_pfn;
|
||||
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
|
||||
pages_to_mb(highend_pfn - highstart_pfn));
|
||||
num_physpages = highend_pfn;
|
||||
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
|
||||
#else
|
||||
num_physpages = max_low_pfn;
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
|
||||
#endif
|
||||
|
||||
@ -671,7 +669,7 @@ void __init initmem_init(void)
|
||||
sparse_memory_present_with_active_regions(0);
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
max_mapnr = num_physpages;
|
||||
max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
|
||||
#endif
|
||||
__vmalloc_start_set = true;
|
||||
|
||||
@ -739,9 +737,6 @@ static void __init test_wp_bit(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
int codesize, reservedpages, datasize, initsize;
|
||||
int tmp;
|
||||
|
||||
pci_iommu_alloc();
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
@ -759,32 +754,11 @@ void __init mem_init(void)
|
||||
set_highmem_pages_init();
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
totalram_pages += free_all_bootmem();
|
||||
|
||||
reservedpages = 0;
|
||||
for (tmp = 0; tmp < max_low_pfn; tmp++)
|
||||
/*
|
||||
* Only count reserved RAM pages:
|
||||
*/
|
||||
if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
|
||||
reservedpages++;
|
||||
free_all_bootmem();
|
||||
|
||||
after_bootmem = 1;
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
|
||||
"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
num_physpages << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10,
|
||||
totalhigh_pages << (PAGE_SHIFT-10));
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
printk(KERN_INFO "virtual kernel memory layout:\n"
|
||||
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
@ -712,36 +712,22 @@ EXPORT_SYMBOL_GPL(arch_add_memory);
|
||||
|
||||
static void __meminit free_pagetable(struct page *page, int order)
|
||||
{
|
||||
struct zone *zone;
|
||||
bool bootmem = false;
|
||||
unsigned long magic;
|
||||
unsigned int nr_pages = 1 << order;
|
||||
|
||||
/* bootmem page has reserved flag */
|
||||
if (PageReserved(page)) {
|
||||
__ClearPageReserved(page);
|
||||
bootmem = true;
|
||||
|
||||
magic = (unsigned long)page->lru.next;
|
||||
if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
|
||||
while (nr_pages--)
|
||||
put_page_bootmem(page++);
|
||||
} else
|
||||
__free_pages_bootmem(page, order);
|
||||
while (nr_pages--)
|
||||
free_reserved_page(page++);
|
||||
} else
|
||||
free_pages((unsigned long)page_address(page), order);
|
||||
|
||||
/*
|
||||
* SECTION_INFO pages and MIX_SECTION_INFO pages
|
||||
* are all allocated by bootmem.
|
||||
*/
|
||||
if (bootmem) {
|
||||
zone = page_zone(page);
|
||||
zone_span_writelock(zone);
|
||||
zone->present_pages += nr_pages;
|
||||
zone_span_writeunlock(zone);
|
||||
totalram_pages += nr_pages;
|
||||
}
|
||||
}
|
||||
|
||||
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
|
||||
@ -1058,9 +1044,6 @@ static void __init register_page_bootmem_info(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
long codesize, reservedpages, datasize, initsize;
|
||||
unsigned long absent_pages;
|
||||
|
||||
pci_iommu_alloc();
|
||||
|
||||
/* clear_bss() already clear the empty_zero_page */
|
||||
@ -1068,29 +1051,14 @@ void __init mem_init(void)
|
||||
register_page_bootmem_info();
|
||||
|
||||
/* this will put all memory onto the freelists */
|
||||
totalram_pages = free_all_bootmem();
|
||||
|
||||
absent_pages = absent_pages_in_range(0, max_pfn);
|
||||
reservedpages = max_pfn - totalram_pages - absent_pages;
|
||||
free_all_bootmem();
|
||||
after_bootmem = 1;
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
||||
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
||||
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
||||
|
||||
/* Register memory areas for /proc/kcore */
|
||||
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
|
||||
VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
|
||||
"%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
max_pfn << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
absent_pages << (PAGE_SHIFT-10),
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10);
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
@ -1166,11 +1134,10 @@ void mark_rodata_ro(void)
|
||||
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
|
||||
#endif
|
||||
|
||||
free_init_pages("unused kernel memory",
|
||||
free_init_pages("unused kernel",
|
||||
(unsigned long) __va(__pa_symbol(text_end)),
|
||||
(unsigned long) __va(__pa_symbol(rodata_start)));
|
||||
|
||||
free_init_pages("unused kernel memory",
|
||||
free_init_pages("unused kernel",
|
||||
(unsigned long) __va(__pa_symbol(rodata_end)),
|
||||
(unsigned long) __va(__pa_symbol(_sdata)));
|
||||
}
|
||||
|
@ -83,10 +83,8 @@ void __init initmem_init(void)
|
||||
highstart_pfn = max_low_pfn;
|
||||
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
|
||||
pages_to_mb(highend_pfn - highstart_pfn));
|
||||
num_physpages = highend_pfn;
|
||||
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
|
||||
#else
|
||||
num_physpages = max_low_pfn;
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
|
||||
#endif
|
||||
printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
|
||||
|
@ -173,39 +173,16 @@ void __init zones_init(void)
|
||||
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned long codesize, reservedpages, datasize, initsize;
|
||||
unsigned long highmemsize, tmp, ram;
|
||||
|
||||
max_mapnr = num_physpages = max_low_pfn - ARCH_PFN_OFFSET;
|
||||
max_mapnr = max_low_pfn - ARCH_PFN_OFFSET;
|
||||
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
|
||||
highmemsize = 0;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
#error HIGHGMEM not implemented in init.c
|
||||
#endif
|
||||
|
||||
totalram_pages += free_all_bootmem();
|
||||
free_all_bootmem();
|
||||
|
||||
reservedpages = ram = 0;
|
||||
for (tmp = 0; tmp < max_mapnr; tmp++) {
|
||||
ram++;
|
||||
if (PageReserved(mem_map+tmp))
|
||||
reservedpages++;
|
||||
}
|
||||
|
||||
codesize = (unsigned long) _etext - (unsigned long) _stext;
|
||||
datasize = (unsigned long) _edata - (unsigned long) _sdata;
|
||||
initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
|
||||
|
||||
printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
|
||||
"%ldk data, %ldk init %ldk highmem)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
ram << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10,
|
||||
highmemsize >> 10);
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
@ -214,11 +191,11 @@ extern int initrd_is_mapped;
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (initrd_is_mapped)
|
||||
free_reserved_area(start, end, 0, "initrd");
|
||||
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
||||
}
|
||||
#endif
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
free_initmem_default(0);
|
||||
free_initmem_default(-1);
|
||||
}
|
||||
|
@ -59,6 +59,7 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
|
||||
if (!disk->fops->getgeo)
|
||||
return -ENOTTY;
|
||||
|
||||
memset(&geo, 0, sizeof(geo));
|
||||
/*
|
||||
* We need to set the startsect first, the driver may
|
||||
* want to override it.
|
||||
|
@ -512,7 +512,7 @@ static void register_disk(struct gendisk *disk)
|
||||
|
||||
ddev->parent = disk->driverfs_dev;
|
||||
|
||||
dev_set_name(ddev, disk->disk_name);
|
||||
dev_set_name(ddev, "%s", disk->disk_name);
|
||||
|
||||
/* delay uevents, until we scanned partition table */
|
||||
dev_set_uevent_suppress(ddev, 1);
|
||||
|
@ -495,7 +495,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
|
||||
|
||||
struct crypto_template *crypto_lookup_template(const char *name)
|
||||
{
|
||||
return try_then_request_module(__crypto_lookup_template(name), name);
|
||||
return try_then_request_module(__crypto_lookup_template(name), "%s",
|
||||
name);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_lookup_template);
|
||||
|
||||
|
@ -10,10 +10,6 @@ config ASYNC_XOR
|
||||
select ASYNC_CORE
|
||||
select XOR_BLOCKS
|
||||
|
||||
config ASYNC_MEMSET
|
||||
tristate
|
||||
select ASYNC_CORE
|
||||
|
||||
config ASYNC_PQ
|
||||
tristate
|
||||
select ASYNC_CORE
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user