hashmap: allow rehash failure

There is a chance that rehash may fail,
even with a decent hash function, and no collision attack.

Allow the hashmap to be rehashed for a second time during insert,
and ignore rehash failures during delete.
This commit is contained in:
CismonX 2025-04-03 09:51:09 +08:00
parent 52b7f87536
commit 5ad23ac8f6
No known key found for this signature in database
GPG key ID: 3094873E29A482FB

View file

@ -33,7 +33,6 @@
#include "hashmap.h" #include "hashmap.h"
#include <limits.h> #include <limits.h>
#include <stdbool.h>
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
@ -110,7 +109,7 @@ struct hashmap {
static int count_tz (unsigned long); static int count_tz (unsigned long);
static int find_entry (struct hashmap const *, void const *, struct bucket **); static int find_entry (struct hashmap const *, void const *, struct bucket **);
static int make_room (struct bucket *, struct bucket const *, unsigned); static int make_room (struct bucket *, struct bucket const *, unsigned);
static int rehash (struct hashmap *, bool); static int rehash (struct hashmap *, unsigned);
// Forward declaration end // Forward declaration end
static int static int
@ -240,16 +239,11 @@ make_room (
static int static int
rehash ( rehash (
struct hashmap *map, struct hashmap *map,
bool grow unsigned new_exp
) { ) {
unsigned new_exp = map->exp; if (unlikely(new_exp > EXP_MAX)) {
if (grow) { log_printf("%p: new size exceeds max limit", (void *)map);
if (unlikely(++new_exp > EXP_MAX)) { return -1;
log_puts("hashmap size exceeds max limit");
return -1;
}
} else {
--new_exp;
} }
size_t new_nbuckets = BUCKET_CNT(new_exp); size_t new_nbuckets = BUCKET_CNT(new_exp);
@ -271,8 +265,9 @@ rehash (
int hop_idx = make_room(new_home, new_buckets + new_nbuckets, new_exp); int hop_idx = make_room(new_home, new_buckets + new_nbuckets, new_exp);
if (unlikely(hop_idx < 0)) { if (unlikely(hop_idx < 0)) {
log_puts("collision attack or poor hash function"); log_printf("%p: rehash failed", (void *)map);
goto fail; free(new_buckets);
return -1;
} }
BIT_SET(new_home->bits, hop_idx); BIT_SET(new_home->bits, hop_idx);
@ -286,10 +281,6 @@ rehash (
map->num_buckets = new_nbuckets; map->num_buckets = new_nbuckets;
map->exp = new_exp; map->exp = new_exp;
return 0; return 0;
fail:
free(new_buckets);
return -1;
} }
struct hashmap * struct hashmap *
@ -378,7 +369,7 @@ hashmap_delete (
if (buckets_used < (map->num_buckets >> 3)) { if (buckets_used < (map->num_buckets >> 3)) {
debug_printf("%p: rehashing: %zu / %zu", (void *)map, debug_printf("%p: rehashing: %zu / %zu", (void *)map,
buckets_used, map->num_buckets - (map->exp - 1)); buckets_used, map->num_buckets - (map->exp - 1));
xassert(0 == rehash(map, false)); rehash(map, map->exp - 1);
} }
} }
@ -396,7 +387,9 @@ hashmap_insert (
if (unlikely(hop_idx < 0)) { if (unlikely(hop_idx < 0)) {
debug_printf("%p: rehashing: %zu / %zu", (void *)map, debug_printf("%p: rehashing: %zu / %zu", (void *)map,
map->num_used, map->num_buckets - (exp - 1)); map->num_used, map->num_buckets - (exp - 1));
xassert(0 == rehash(map, true)); if (unlikely(0 != rehash(map, ++exp))) {
xassert(0 == rehash(map, ++exp));
}
hashmap_insert(map, hashcode, entry); hashmap_insert(map, hashcode, entry);
return; return;
} }