Files
linux/security/selinux/ss/hashtab.c
Ondrej Mosnacek 24def7bb92 selinux: prepare for inlining of hashtab functions
Refactor searching and inserting into hashtabs to pave the way for
converting hashtab_search() and hashtab_insert() to inline functions in
the next patch. This will avoid indirect calls and allow the compiler to
better optimize individual callers, leading to a significant performance
improvement.

In order to avoid the indirect calls, the key hashing and comparison
callbacks need to be extracted from the hashtab struct and passed
directly to hashtab_search()/_insert() by the callers so that the
callback address is always known at compile time. The kernel's
rhashtable library (<linux/rhashtable*.h>) does the same thing.

This of course makes the hashtab functions slightly easier to misuse by
passing a wrong callback set, but unfortunately there is no better way
to implement a hash table that is both generic and efficient in C. This
patch tries to somewhat mitigate this by only calling the hashtab
functions in the same file where the corresponding callbacks are
defined (wrapping them into more specialized functions as needed).

Note that this patch doesn't bring any benefit without also moving the
definitions of hashtab_search() and -_insert() to the header file, which
is done in a follow-up patch for easier review of the hashtab.c changes
in this patch.

Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
Acked-by: Stephen Smalley <stephen.smalley.work@gmail.com>
Signed-off-by: Paul Moore <paul@paul-moore.com>
2020-07-09 19:05:36 -04:00

180 lines
3.7 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of the hash table type.
*
* Author : Stephen Smalley, <sds@tycho.nsa.gov>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include "hashtab.h"
static struct kmem_cache *hashtab_node_cachep;
/*
* Here we simply round the number of elements up to the nearest power of two.
* I tried also other options like rouding down or rounding to the closest
* power of two (up or down based on which is closer), but I was unable to
* find any significant difference in lookup/insert performance that would
* justify switching to a different (less intuitive) formula. It could be that
* a different formula is actually more optimal, but any future changes here
* should be supported with performance/memory usage data.
*
* The total memory used by the htable arrays (only) with Fedora policy loaded
* is approximately 163 KB at the time of writing.
*/
static u32 hashtab_compute_size(u32 nel)
{
return nel == 0 ? 0 : roundup_pow_of_two(nel);
}
int hashtab_init(struct hashtab *h, u32 nel_hint)
{
h->size = hashtab_compute_size(nel_hint);
h->nel = 0;
if (!h->size)
return 0;
h->htable = kcalloc(h->size, sizeof(*h->htable), GFP_KERNEL);
return h->htable ? 0 : -ENOMEM;
}
int hashtab_insert(struct hashtab *h, void *key, void *datum,
struct hashtab_key_params key_params)
{
u32 hvalue;
struct hashtab_node *prev, *cur, *newnode;
cond_resched();
if (!h->size || h->nel == HASHTAB_MAX_NODES)
return -EINVAL;
hvalue = key_params.hash(key) & (h->size - 1);
prev = NULL;
cur = h->htable[hvalue];
while (cur) {
int cmp = key_params.cmp(key, cur->key);
if (cmp == 0)
return -EEXIST;
if (cmp < 0)
break;
prev = cur;
cur = cur->next;
}
newnode = kmem_cache_zalloc(hashtab_node_cachep, GFP_KERNEL);
if (!newnode)
return -ENOMEM;
newnode->key = key;
newnode->datum = datum;
if (prev) {
newnode->next = prev->next;
prev->next = newnode;
} else {
newnode->next = h->htable[hvalue];
h->htable[hvalue] = newnode;
}
h->nel++;
return 0;
}
void *hashtab_search(struct hashtab *h, const void *key,
struct hashtab_key_params key_params)
{
u32 hvalue;
struct hashtab_node *cur;
if (!h->size)
return NULL;
hvalue = key_params.hash(key) & (h->size - 1);
cur = h->htable[hvalue];
while (cur) {
int cmp = key_params.cmp(key, cur->key);
if (cmp == 0)
return cur->datum;
if (cmp < 0)
break;
cur = cur->next;
}
return NULL;
}
void hashtab_destroy(struct hashtab *h)
{
u32 i;
struct hashtab_node *cur, *temp;
for (i = 0; i < h->size; i++) {
cur = h->htable[i];
while (cur) {
temp = cur;
cur = cur->next;
kmem_cache_free(hashtab_node_cachep, temp);
}
h->htable[i] = NULL;
}
kfree(h->htable);
h->htable = NULL;
}
int hashtab_map(struct hashtab *h,
int (*apply)(void *k, void *d, void *args),
void *args)
{
u32 i;
int ret;
struct hashtab_node *cur;
for (i = 0; i < h->size; i++) {
cur = h->htable[i];
while (cur) {
ret = apply(cur->key, cur->datum, args);
if (ret)
return ret;
cur = cur->next;
}
}
return 0;
}
void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
{
u32 i, chain_len, slots_used, max_chain_len;
struct hashtab_node *cur;
slots_used = 0;
max_chain_len = 0;
for (i = 0; i < h->size; i++) {
cur = h->htable[i];
if (cur) {
slots_used++;
chain_len = 0;
while (cur) {
chain_len++;
cur = cur->next;
}
if (chain_len > max_chain_len)
max_chain_len = chain_len;
}
}
info->slots_used = slots_used;
info->max_chain_len = max_chain_len;
}
void __init hashtab_cache_init(void)
{
hashtab_node_cachep = kmem_cache_create("hashtab_node",
sizeof(struct hashtab_node),
0, SLAB_PANIC, NULL);
}