Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
"147 patches, based on 7d2a07b769.
Subsystems affected by this patch series: mm (memory-hotplug, rmap,
ioremap, highmem, cleanups, secretmem, kfence, damon, and vmscan),
alpha, percpu, procfs, misc, core-kernel, MAINTAINERS, lib,
checkpatch, epoll, init, nilfs2, coredump, fork, pids, criu, kconfig,
selftests, ipc, and scripts"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (94 commits)
scripts: check_extable: fix typo in user error message
mm/workingset: correct kernel-doc notations
ipc: replace costly bailout check in sysvipc_find_ipc()
selftests/memfd: remove unused variable
Kconfig.debug: drop selecting non-existing HARDLOCKUP_DETECTOR_ARCH
configs: remove the obsolete CONFIG_INPUT_POLLDEV
prctl: allow to setup brk for et_dyn executables
pid: cleanup the stale comment mentioning pidmap_init().
kernel/fork.c: unexport get_{mm,task}_exe_file
coredump: fix memleak in dump_vma_snapshot()
fs/coredump.c: log if a core dump is aborted due to changed file permissions
nilfs2: use refcount_dec_and_lock() to fix potential UAF
nilfs2: fix memory leak in nilfs_sysfs_delete_snapshot_group
nilfs2: fix memory leak in nilfs_sysfs_create_snapshot_group
nilfs2: fix memory leak in nilfs_sysfs_delete_##name##_group
nilfs2: fix memory leak in nilfs_sysfs_create_##name##_group
nilfs2: fix NULL pointer in nilfs_##name##_attr_release
nilfs2: fix memory leak in nilfs_sysfs_create_device_group
trap: cleanup trap_init()
init: move usermodehelper_enable() to populate_rootfs()
...
This commit is contained in:
@@ -54,7 +54,7 @@ static bool asm_test_bit(long nr, const unsigned long *addr)
|
||||
|
||||
static int do_for_each_set_bit(unsigned int num_bits)
|
||||
{
|
||||
unsigned long *to_test = bitmap_alloc(num_bits);
|
||||
unsigned long *to_test = bitmap_zalloc(num_bits);
|
||||
struct timeval start, end, diff;
|
||||
u64 runtime_us;
|
||||
struct stats fb_time_stats, tb_time_stats;
|
||||
|
||||
@@ -139,11 +139,11 @@ static void *c2c_he_zalloc(size_t size)
|
||||
if (!c2c_he)
|
||||
return NULL;
|
||||
|
||||
c2c_he->cpuset = bitmap_alloc(c2c.cpus_cnt);
|
||||
c2c_he->cpuset = bitmap_zalloc(c2c.cpus_cnt);
|
||||
if (!c2c_he->cpuset)
|
||||
return NULL;
|
||||
|
||||
c2c_he->nodeset = bitmap_alloc(c2c.nodes_cnt);
|
||||
c2c_he->nodeset = bitmap_zalloc(c2c.nodes_cnt);
|
||||
if (!c2c_he->nodeset)
|
||||
return NULL;
|
||||
|
||||
@@ -2047,7 +2047,7 @@ static int setup_nodes(struct perf_session *session)
|
||||
struct perf_cpu_map *map = n[node].map;
|
||||
unsigned long *set;
|
||||
|
||||
set = bitmap_alloc(c2c.cpus_cnt);
|
||||
set = bitmap_zalloc(c2c.cpus_cnt);
|
||||
if (!set)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
@@ -2757,7 +2757,7 @@ int cmd_record(int argc, const char **argv)
|
||||
|
||||
if (rec->opts.affinity != PERF_AFFINITY_SYS) {
|
||||
rec->affinity_mask.nbits = cpu__max_cpu();
|
||||
rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
|
||||
rec->affinity_mask.bits = bitmap_zalloc(rec->affinity_mask.nbits);
|
||||
if (!rec->affinity_mask.bits) {
|
||||
pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
|
||||
err = -ENOMEM;
|
||||
|
||||
@@ -14,7 +14,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
|
||||
unsigned long *bm = NULL;
|
||||
int i;
|
||||
|
||||
bm = bitmap_alloc(nbits);
|
||||
bm = bitmap_zalloc(nbits);
|
||||
|
||||
if (map && bm) {
|
||||
for (i = 0; i < map->nr; i++)
|
||||
|
||||
@@ -27,7 +27,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
|
||||
unsigned long *bm = NULL;
|
||||
int i;
|
||||
|
||||
bm = bitmap_alloc(nbits);
|
||||
bm = bitmap_zalloc(nbits);
|
||||
|
||||
if (map && bm) {
|
||||
for (i = 0; i < map->nr; i++) {
|
||||
|
||||
@@ -25,11 +25,11 @@ int affinity__setup(struct affinity *a)
|
||||
{
|
||||
int cpu_set_size = get_cpu_set_size();
|
||||
|
||||
a->orig_cpus = bitmap_alloc(cpu_set_size * 8);
|
||||
a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
|
||||
if (!a->orig_cpus)
|
||||
return -1;
|
||||
sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
|
||||
a->sched_cpus = bitmap_alloc(cpu_set_size * 8);
|
||||
a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
|
||||
if (!a->sched_cpus) {
|
||||
zfree(&a->orig_cpus);
|
||||
return -1;
|
||||
|
||||
@@ -278,7 +278,7 @@ static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
set = bitmap_alloc(size);
|
||||
set = bitmap_zalloc(size);
|
||||
if (!set)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1294,7 +1294,7 @@ static int memory_node__read(struct memory_node *n, unsigned long idx)
|
||||
|
||||
size++;
|
||||
|
||||
n->set = bitmap_alloc(size);
|
||||
n->set = bitmap_zalloc(size);
|
||||
if (!n->set) {
|
||||
closedir(dir);
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -313,7 +313,7 @@ static int metricgroup__setup_events(struct list_head *groups,
|
||||
struct evsel *evsel, *tmp;
|
||||
unsigned long *evlist_used;
|
||||
|
||||
evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
|
||||
evlist_used = bitmap_zalloc(perf_evlist->core.nr_entries);
|
||||
if (!evlist_used)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
|
||||
data = map->aio.data[idx];
|
||||
mmap_len = mmap__mmap_len(map);
|
||||
node_index = cpu__get_node(cpu);
|
||||
node_mask = bitmap_alloc(node_index + 1);
|
||||
node_mask = bitmap_zalloc(node_index + 1);
|
||||
if (!node_mask) {
|
||||
pr_err("Failed to allocate node mask for mbind: error %m\n");
|
||||
return -1;
|
||||
@@ -258,7 +258,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
|
||||
static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
|
||||
{
|
||||
map->affinity_mask.nbits = cpu__max_cpu();
|
||||
map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits);
|
||||
map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
|
||||
if (!map->affinity_mask.bits)
|
||||
return -1;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user