libceph: add support for HASHPSPOOL pool flag

The legacy behavior adds the pgid seed and pool together as the input for
CRUSH.  That is problematic because each pool's PGs end up mapping to the
same OSDs: 1.5 == 2.4 == 3.3 == ...

Instead, if the HASHPSPOOL flag is set, we has the ps and pool together and
feed that into CRUSH.  This ensures that two adjacent pools will map to
an independent pseudorandom set of OSDs.

Advertise our support for this via a protocol feature flag.

Signed-off-by: Sage Weil <sage@inktank.com>
Reviewed-by: Alex Elder <elder@inktank.com>
This commit is contained in:
Sage Weil 2013-02-26 10:39:09 -08:00
parent 1b83bef24c
commit 83ca14fdd3
3 changed files with 31 additions and 14 deletions

View File

@ -34,6 +34,7 @@
#define CEPH_FEATURE_REPLY_CREATE_INODE (1<<27)
#define CEPH_FEATURE_OSD_HBMSGS (1<<28)
#define CEPH_FEATURE_MDSENC (1<<29)
#define CEPH_FEATURE_OSDHASHPSPOOL (1<<30)
/*
* Features supported.
@ -45,7 +46,8 @@
CEPH_FEATURE_OSDENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE)
CEPH_FEATURE_REPLY_CREATE_INODE | \
CEPH_FEATURE_OSDHASHPSPOOL)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \

View File

@ -23,6 +23,8 @@ struct ceph_pg {
uint32_t seed;
};
#define CEPH_POOL_FLAG_HASHPSPOOL 1
struct ceph_pg_pool_info {
struct rb_node node;
s64 id;

View File

@ -1127,18 +1127,16 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
struct ceph_pg_mapping *pg;
struct ceph_pg_pool_info *pool;
int ruleno;
unsigned int poolid, ps, pps, t, r;
int r;
u32 pps;
poolid = pgid.pool;
ps = pgid.seed;
pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
if (!pool)
return NULL;
/* pg_temp? */
t = ceph_stable_mod(ps, pool->pg_num, pool->pgp_num_mask);
pgid.seed = t;
pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
pool->pgp_num_mask);
pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
if (pg) {
*num = pg->len;
@ -1149,20 +1147,35 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
pool->type, pool->size);
if (ruleno < 0) {
pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
poolid, pool->crush_ruleset, pool->type,
pr_err("no crush rule pool %lld ruleset %d type %d size %d\n",
pgid.pool, pool->crush_ruleset, pool->type,
pool->size);
return NULL;
}
pps = ceph_stable_mod(ps, pool->pgp_num, pool->pgp_num_mask);
pps += poolid;
if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
/* hash pool id and seed sothat pool PGs do not overlap */
pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
ceph_stable_mod(pgid.seed, pool->pgp_num,
pool->pgp_num_mask),
pgid.pool);
} else {
/*
* legacy ehavior: add ps and pool together. this is
* not a great approach because the PGs from each pool
* will overlap on top of each other: 0.5 == 1.4 ==
* 2.3 == ...
*/
pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
pool->pgp_num_mask) +
(unsigned)pgid.pool;
}
r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
min_t(int, pool->size, *num),
osdmap->osd_weight);
if (r < 0) {
pr_err("error %d from crush rule: pool %d ruleset %d type %d"
" size %d\n", r, poolid, pool->crush_ruleset,
pr_err("error %d from crush rule: pool %lld ruleset %d type %d"
" size %d\n", r, pgid.pool, pool->crush_ruleset,
pool->type, pool->size);
return NULL;
}