mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2026-01-06 20:44:11 +00:00
While running the high_systime workload of the AIM7 benchmark on a 2-socket 12-core Westmere x86-64 machine running 3.10-rc4 kernel (with HT on), it was found that a pretty sizable amount of time was spent in the SELinux code. Below was the perf trace of the "perf record -a -s" of a test run at 1500 users: 5.04% ls [kernel.kallsyms] [k] ebitmap_get_bit 1.96% ls [kernel.kallsyms] [k] mls_level_isvalid 1.95% ls [kernel.kallsyms] [k] find_next_bit The ebitmap_get_bit() was the hottest function in the perf-report output. Both the ebitmap_get_bit() and find_next_bit() functions were, in fact, called by mls_level_isvalid(). As a result, the mls_level_isvalid() call consumed 8.95% of the total CPU time of all the 24 virtual CPUs which is quite a lot. The majority of the mls_level_isvalid() function invocations come from the socket creation system call. Looking at the mls_level_isvalid() function, it is checking to see if all the bits set in one of the ebitmap structure are also set in another one as well as the highest set bit is no bigger than the one specified by the given policydb data structure. It is doing it in a bit-by-bit manner. So if the ebitmap structure has many bits set, the iteration loop will be done many times. The current code can be rewritten to use a similar algorithm as the ebitmap_contains() function with an additional check for the highest set bit. The ebitmap_contains() function was extended to cover an optional additional check for the highest set bit, and the mls_level_isvalid() function was modified to call ebitmap_contains(). With that change, the perf trace showed that the used CPU time drop down to just 0.08% (ebitmap_contains + mls_level_isvalid) of the total which is about 100X less than before. 0.07% ls [kernel.kallsyms] [k] ebitmap_contains 0.05% ls [kernel.kallsyms] [k] ebitmap_get_bit 0.01% ls [kernel.kallsyms] [k] mls_level_isvalid 0.01% ls [kernel.kallsyms] [k] find_next_bit The remaining ebitmap_get_bit() and find_next_bit() functions calls are made by other kernel routines as the new mls_level_isvalid() function will not call them anymore. This patch also improves the high_systime AIM7 benchmark result, though the improvement is not as impressive as is suggested by the reduction in CPU time spent in the ebitmap functions. The table below shows the performance change on the 2-socket x86-64 system (with HT on) mentioned above. +--------------+---------------+----------------+-----------------+ | Workload | mean % change | mean % change | mean % change | | | 10-100 users | 200-1000 users | 1100-2000 users | +--------------+---------------+----------------+-----------------+ | high_systime | +0.1% | +0.9% | +2.6% | +--------------+---------------+----------------+-----------------+ Signed-off-by: Waiman Long <Waiman.Long@hp.com> Acked-by: Stephen Smalley <sds@tycho.nsa.gov> Signed-off-by: Paul Moore <pmoore@redhat.com> Signed-off-by: Eric Paris <eparis@redhat.com>
146 lines
4.1 KiB
C
146 lines
4.1 KiB
C
/*
|
|
* An extensible bitmap is a bitmap that supports an
|
|
* arbitrary number of bits. Extensible bitmaps are
|
|
* used to represent sets of values, such as types,
|
|
* roles, categories, and classes.
|
|
*
|
|
* Each extensible bitmap is implemented as a linked
|
|
* list of bitmap nodes, where each bitmap node has
|
|
* an explicitly specified starting bit position within
|
|
* the total bitmap.
|
|
*
|
|
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
|
|
*/
|
|
#ifndef _SS_EBITMAP_H_
|
|
#define _SS_EBITMAP_H_
|
|
|
|
#include <net/netlabel.h>
|
|
|
|
#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \
|
|
/ sizeof(unsigned long))
|
|
#define EBITMAP_UNIT_SIZE BITS_PER_LONG
|
|
#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
|
|
#define EBITMAP_BIT 1ULL
|
|
#define EBITMAP_SHIFT_UNIT_SIZE(x) \
|
|
(((x) >> EBITMAP_UNIT_SIZE / 2) >> EBITMAP_UNIT_SIZE / 2)
|
|
|
|
struct ebitmap_node {
|
|
struct ebitmap_node *next;
|
|
unsigned long maps[EBITMAP_UNIT_NUMS];
|
|
u32 startbit;
|
|
};
|
|
|
|
struct ebitmap {
|
|
struct ebitmap_node *node; /* first node in the bitmap */
|
|
u32 highbit; /* highest position in the total bitmap */
|
|
};
|
|
|
|
#define ebitmap_length(e) ((e)->highbit)
|
|
|
|
static inline unsigned int ebitmap_start_positive(struct ebitmap *e,
|
|
struct ebitmap_node **n)
|
|
{
|
|
unsigned int ofs;
|
|
|
|
for (*n = e->node; *n; *n = (*n)->next) {
|
|
ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
|
|
if (ofs < EBITMAP_SIZE)
|
|
return (*n)->startbit + ofs;
|
|
}
|
|
return ebitmap_length(e);
|
|
}
|
|
|
|
static inline void ebitmap_init(struct ebitmap *e)
|
|
{
|
|
memset(e, 0, sizeof(*e));
|
|
}
|
|
|
|
static inline unsigned int ebitmap_next_positive(struct ebitmap *e,
|
|
struct ebitmap_node **n,
|
|
unsigned int bit)
|
|
{
|
|
unsigned int ofs;
|
|
|
|
ofs = find_next_bit((*n)->maps, EBITMAP_SIZE, bit - (*n)->startbit + 1);
|
|
if (ofs < EBITMAP_SIZE)
|
|
return ofs + (*n)->startbit;
|
|
|
|
for (*n = (*n)->next; *n; *n = (*n)->next) {
|
|
ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
|
|
if (ofs < EBITMAP_SIZE)
|
|
return ofs + (*n)->startbit;
|
|
}
|
|
return ebitmap_length(e);
|
|
}
|
|
|
|
#define EBITMAP_NODE_INDEX(node, bit) \
|
|
(((bit) - (node)->startbit) / EBITMAP_UNIT_SIZE)
|
|
#define EBITMAP_NODE_OFFSET(node, bit) \
|
|
(((bit) - (node)->startbit) % EBITMAP_UNIT_SIZE)
|
|
|
|
static inline int ebitmap_node_get_bit(struct ebitmap_node *n,
|
|
unsigned int bit)
|
|
{
|
|
unsigned int index = EBITMAP_NODE_INDEX(n, bit);
|
|
unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
|
|
|
|
BUG_ON(index >= EBITMAP_UNIT_NUMS);
|
|
if ((n->maps[index] & (EBITMAP_BIT << ofs)))
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
static inline void ebitmap_node_set_bit(struct ebitmap_node *n,
|
|
unsigned int bit)
|
|
{
|
|
unsigned int index = EBITMAP_NODE_INDEX(n, bit);
|
|
unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
|
|
|
|
BUG_ON(index >= EBITMAP_UNIT_NUMS);
|
|
n->maps[index] |= (EBITMAP_BIT << ofs);
|
|
}
|
|
|
|
static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
|
|
unsigned int bit)
|
|
{
|
|
unsigned int index = EBITMAP_NODE_INDEX(n, bit);
|
|
unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
|
|
|
|
BUG_ON(index >= EBITMAP_UNIT_NUMS);
|
|
n->maps[index] &= ~(EBITMAP_BIT << ofs);
|
|
}
|
|
|
|
#define ebitmap_for_each_positive_bit(e, n, bit) \
|
|
for (bit = ebitmap_start_positive(e, &n); \
|
|
bit < ebitmap_length(e); \
|
|
bit = ebitmap_next_positive(e, &n, bit)) \
|
|
|
|
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
|
|
int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
|
|
int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
|
|
int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
|
|
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
|
|
void ebitmap_destroy(struct ebitmap *e);
|
|
int ebitmap_read(struct ebitmap *e, void *fp);
|
|
int ebitmap_write(struct ebitmap *e, void *fp);
|
|
|
|
#ifdef CONFIG_NETLABEL
|
|
int ebitmap_netlbl_export(struct ebitmap *ebmap,
|
|
struct netlbl_lsm_secattr_catmap **catmap);
|
|
int ebitmap_netlbl_import(struct ebitmap *ebmap,
|
|
struct netlbl_lsm_secattr_catmap *catmap);
|
|
#else
|
|
static inline int ebitmap_netlbl_export(struct ebitmap *ebmap,
|
|
struct netlbl_lsm_secattr_catmap **catmap)
|
|
{
|
|
return -ENOMEM;
|
|
}
|
|
static inline int ebitmap_netlbl_import(struct ebitmap *ebmap,
|
|
struct netlbl_lsm_secattr_catmap *catmap)
|
|
{
|
|
return -ENOMEM;
|
|
}
|
|
#endif
|
|
|
|
#endif /* _SS_EBITMAP_H_ */
|