Pull out string hash to <linux/stringhash.h>

... so they can be used without the rest of <linux/dcache.h>

The hashlen_* macros will make sense next patch.

Signed-off-by: George Spelvin <linux@sciencehorizons.net>
This commit is contained in:
George Spelvin 2016-05-20 07:26:00 -04:00
parent 0fed3ac866
commit f4bcbe792b
2 changed files with 73 additions and 26 deletions

View File

@ -10,6 +10,7 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/lockref.h> #include <linux/lockref.h>
#include <linux/stringhash.h>
struct path; struct path;
struct vfsmount; struct vfsmount;
@ -52,9 +53,6 @@ struct qstr {
}; };
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n } #define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
#define hashlen_hash(hashlen) ((u32) (hashlen))
#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
struct dentry_stat_t { struct dentry_stat_t {
long nr_dentry; long nr_dentry;
@ -65,29 +63,6 @@ struct dentry_stat_t {
}; };
extern struct dentry_stat_t dentry_stat; extern struct dentry_stat_t dentry_stat;
/* Name hashing routines. Initial hash value */
/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
#define init_name_hash() 0
/* partial hash update function. Assume roughly 4 bits per character */
static inline unsigned long
partial_name_hash(unsigned long c, unsigned long prevhash)
{
return (prevhash + (c << 4) + (c >> 4)) * 11;
}
/*
* Finally: cut down the number of bits to a int value (and try to avoid
* losing bits)
*/
static inline unsigned long end_name_hash(unsigned long hash)
{
return (unsigned int) hash;
}
/* Compute the hash for a name string. */
extern unsigned int full_name_hash(const unsigned char *, unsigned int);
/* /*
* Try to keep struct dentry aligned on 64 byte cachelines (this will * Try to keep struct dentry aligned on 64 byte cachelines (this will
* give reasonable cacheline footprint with larger lines without the * give reasonable cacheline footprint with larger lines without the

View File

@ -0,0 +1,72 @@
#ifndef __LINUX_STRINGHASH_H
#define __LINUX_STRINGHASH_H
#include <linux/types.h>
/*
* Routines for hashing strings of bytes to a 32-bit hash value.
*
* These hash functions are NOT GUARANTEED STABLE between kernel
* versions, architectures, or even repeated boots of the same kernel.
* (E.g. they may depend on boot-time hardware detection or be
* deliberately randomized.)
*
* They are also not intended to be secure against collisions caused by
* malicious inputs; much slower hash functions are required for that.
*
* They are optimized for pathname components, meaning short strings.
* Even if a majority of files have longer names, the dynamic profile of
* pathname components skews short due to short directory names.
* (E.g. /usr/lib/libsesquipedalianism.so.3.141.)
*/
/*
* Version 1: one byte at a time. Example of use:
*
* unsigned long hash = init_name_hash;
* while (*p)
* hash = partial_name_hash(tolower(*p++), hash);
* hash = end_name_hash(hash);
*
* Although this is designed for bytes, fs/hfsplus/unicode.c
* abuses it to hash 16-bit values.
*/
/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
#define init_name_hash() 0
/* partial hash update function. Assume roughly 4 bits per character */
static inline unsigned long
partial_name_hash(unsigned long c, unsigned long prevhash)
{
return (prevhash + (c << 4) + (c >> 4)) * 11;
}
/*
* Finally: cut down the number of bits to a int value (and try to avoid
* losing bits)
*/
static inline unsigned long end_name_hash(unsigned long hash)
{
return (unsigned int)hash;
}
/*
* Version 2: One word (32 or 64 bits) at a time.
* If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h>
* exists, which describes major Linux platforms like x86 and ARM), then
* this computes a different hash function much faster.
*
* If not set, this falls back to a wrapper around the preceding.
*/
extern unsigned int full_name_hash(const unsigned char *, unsigned int);
/*
* A hash_len is a u64 with the hash of a string in the low
* half and the length in the high half.
*/
#define hashlen_hash(hashlen) ((u32)(hashlen))
#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
#endif /* __LINUX_STRINGHASH_H */