Re: [PATCH] selinux: optimize MLS context to string conversion

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 7/30/19 8:48 AM, Ondrej Mosnacek wrote:
When mls_compute_context_len() or mls_sid_to_context() encounters a
context with large category ranges, it behaves suboptimally - it
traverses each positive bit of the category bitmap, each time calling
find_next_bit() again.

This has a large performance impact on UNIX datagram sockets with
SO_PASSSEC set, since here the peer context needs to be converted to
string for each recieved datagram. See [1] for more information.

This patch introduces a new helper for ebitmap traversal, which allows
to efficiently iterate over positive ranges instead of bits -
ebitmap_for_each_positive_range() - and converts the two mls_*()
functions to leverage it.

[1] https://bugzilla.redhat.com/show_bug.cgi?id=1733259

Reported-by: Michal Sekletar <msekleta@xxxxxxxxxx>
Signed-off-by: Ondrej Mosnacek <omosnace@xxxxxxxxxx>
---
  security/selinux/ss/ebitmap.h | 46 +++++++++++++++++++++
  security/selinux/ss/mls.c     | 76 +++++++++++++----------------------
  2 files changed, 73 insertions(+), 49 deletions(-)

diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 6aa7cf6a2197..a415741cb206 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -42,6 +42,10 @@ struct ebitmap {
  	u32 highbit;	/* highest position in the total bitmap */
  };
+struct ebitmap_range {
+	unsigned int start, end;
+};
+
  #define ebitmap_length(e) ((e)->highbit)
static inline unsigned int ebitmap_start_positive(struct ebitmap *e,
@@ -80,6 +84,43 @@ static inline unsigned int ebitmap_next_positive(struct ebitmap *e,
  	return ebitmap_length(e);
  }
+static inline unsigned int ebitmap_next_negative(struct ebitmap *e,
+						 struct ebitmap_node **n,
+						 unsigned int bit)
+{
+	unsigned int ofs;
+
+	ofs = find_next_zero_bit((*n)->maps, EBITMAP_SIZE,
+				 bit - (*n)->startbit + 1);
+	if (ofs < EBITMAP_SIZE)
+		return ofs + (*n)->startbit;
+
+	for (*n = (*n)->next; *n; *n = (*n)->next) {
+		ofs = find_first_zero_bit((*n)->maps, EBITMAP_SIZE);
+		if (ofs < EBITMAP_SIZE)
+			return ofs + (*n)->startbit;
+	}
+	return ebitmap_length(e);
+}

This is likely moot given that the patch was deferred on exploring the cache option, but if/when this patch is revisited, you don't seem to account for the possibility that there could be a hole between the bitmaps represented by (*n) and (*n)->next, and that might be where the next negative/zero bit is.

+
+static inline void ebitmap_start_positive_range(struct ebitmap *e,
+						struct ebitmap_node **n,
+						struct ebitmap_range *range)
+{
+	range->end = range->start = ebitmap_start_positive(e, n);
+	if (range->start < ebitmap_length(e))
+		range->end = ebitmap_next_negative(e, n, range->start);
+}
+
+static inline void ebitmap_next_positive_range(struct ebitmap *e,
+					       struct ebitmap_node **n,
+					       struct ebitmap_range *range)
+{
+	range->end = range->start = ebitmap_next_positive(e, n, range->end);
+	if (range->start < ebitmap_length(e))
+		range->end = ebitmap_next_negative(e, n, range->start);
+}
+
  #define EBITMAP_NODE_INDEX(node, bit)	\
  	(((bit) - (node)->startbit) / EBITMAP_UNIT_SIZE)
  #define EBITMAP_NODE_OFFSET(node, bit)	\
@@ -122,6 +163,11 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
  	     bit < ebitmap_length(e);			\
  	     bit = ebitmap_next_positive(e, &n, bit))	\
+#define ebitmap_for_each_positive_range(e, n, range) \
+	for (ebitmap_start_positive_range(e, &n, &range);	\
+	     range.start < ebitmap_length(e);			\
+	     ebitmap_next_positive_range(e, &n, &range))	\
+
  int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
  int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
  int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 5e05f5b902d7..3abd6b950c66 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -35,10 +35,12 @@
   */
  int mls_compute_context_len(struct policydb *p, struct context *context)
  {
-	int i, l, len, head, prev;
+	int l, len;
  	char *nm;
  	struct ebitmap *e;
  	struct ebitmap_node *node;
+	struct ebitmap_range range;
+	unsigned int rlen;
if (!p->mls_enabled)
  		return 0;
@@ -49,24 +51,14 @@ int mls_compute_context_len(struct policydb *p, struct context *context)
  		len += strlen(sym_name(p, SYM_LEVELS, index_sens - 1));
/* categories */
-		head = -2;
-		prev = -2;
  		e = &context->range.level[l].cat;
-		ebitmap_for_each_positive_bit(e, node, i) {
-			if (i - prev > 1) {
-				/* one or more negative bits are skipped */
-				if (head != prev) {
-					nm = sym_name(p, SYM_CATS, prev);
-					len += strlen(nm) + 1;
-				}
-				nm = sym_name(p, SYM_CATS, i);
+		ebitmap_for_each_positive_range(e, node, range) {
+			rlen = range.end - range.start;
+			if (rlen > 1) {
+				nm = sym_name(p, SYM_CATS, range.start);
  				len += strlen(nm) + 1;
-				head = i;
  			}
-			prev = i;
-		}
-		if (prev != head) {
-			nm = sym_name(p, SYM_CATS, prev);
+			nm = sym_name(p, SYM_CATS, range.end - 1);
  			len += strlen(nm) + 1;
  		}
  		if (l == 0) {
@@ -91,9 +83,11 @@ void mls_sid_to_context(struct policydb *p,
  			char **scontext)
  {
  	char *scontextp, *nm;
-	int i, l, head, prev;
+	int l, first;
  	struct ebitmap *e;
  	struct ebitmap_node *node;
+	struct ebitmap_range range;
+	unsigned int rlen;
if (!p->mls_enabled)
  		return;
@@ -109,43 +103,27 @@ void mls_sid_to_context(struct policydb *p,
  		scontextp += strlen(scontextp);
/* categories */
-		head = -2;
-		prev = -2;
+		first = 1;
  		e = &context->range.level[l].cat;
-		ebitmap_for_each_positive_bit(e, node, i) {
-			if (i - prev > 1) {
-				/* one or more negative bits are skipped */
-				if (prev != head) {
-					if (prev - head > 1)
-						*scontextp++ = '.';
-					else
-						*scontextp++ = ',';
-					nm = sym_name(p, SYM_CATS, prev);
-					strcpy(scontextp, nm);
-					scontextp += strlen(nm);
-				}
-				if (prev < 0)
-					*scontextp++ = ':';
-				else
-					*scontextp++ = ',';
-				nm = sym_name(p, SYM_CATS, i);
-				strcpy(scontextp, nm);
-				scontextp += strlen(nm);
-				head = i;
-			}
-			prev = i;
-		}
-
-		if (prev != head) {
-			if (prev - head > 1)
-				*scontextp++ = '.';
-			else
+		ebitmap_for_each_positive_range(e, node, range) {
+			if (first) {
+				first = 0;
+				*scontextp++ = ':';
+			} else {
  				*scontextp++ = ',';
-			nm = sym_name(p, SYM_CATS, prev);
+			}
+			nm = sym_name(p, SYM_CATS, range.start);
  			strcpy(scontextp, nm);
  			scontextp += strlen(nm);
-		}
+			rlen = range.end - range.start;
+			if (rlen > 1) {
+				*scontextp++ = rlen > 2 ? '.' : ',';
+ nm = sym_name(p, SYM_CATS, range.end - 1);
+				strcpy(scontextp, nm);
+				scontextp += strlen(nm);
+			}
+		}
  		if (l == 0) {
  			if (mls_level_eq(&context->range.level[0],
  					 &context->range.level[1]))





[Index of Archives]     [Selinux Refpolicy]     [Linux SGX]     [Fedora Users]     [Fedora Desktop]     [Yosemite Photos]     [Yosemite Camping]     [Yosemite Campsites]     [KDE Users]     [Gnome Users]

  Powered by Linux