Skip to content

Commit 62bb482

Browse files
Andreagit97poiana
authored andcommitted
fix(libscap): use the correct memory barrier for ARM64
Signed-off-by: Andrea Terzolo <andreaterzolo3@gmail.com>
1 parent 4c907c1 commit 62bb482

File tree

3 files changed

+63
-28
lines changed

3 files changed

+63
-28
lines changed

userspace/libpman/src/ringbuffer_definitions.h

Lines changed: 1 addition & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ limitations under the License.
2020

2121
#include "state.h"
2222
#include <linux/bpf.h>
23+
#include <libscap/scap_barrier.h>
2324

2425
/* Taken from libbpf: /src/ringbuf.c */
2526
struct ring {
@@ -50,23 +51,3 @@ static inline int roundup_len(uint32_t len) {
5051
/* round up to 8 byte alignment */
5152
return (len + 7) / 8 * 8;
5253
}
53-
54-
/* Taken from libbpf: `include/linux/compiler.h` */
55-
56-
#define READ_ONCE(x) (*(volatile typeof(x) *)&x)
57-
#define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
58-
59-
#define barrier() asm volatile("" ::: "memory")
60-
61-
#define smp_store_release(p, v) \
62-
do { \
63-
barrier(); \
64-
WRITE_ONCE(*p, v); \
65-
} while(0)
66-
67-
#define smp_load_acquire(p) \
68-
({ \
69-
typeof(*p) ___p = READ_ONCE(*p); \
70-
barrier(); \
71-
___p; \
72-
})

userspace/libscap/engine/bpf/scap_bpf.h

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ limitations under the License.
2020

2121
#include <libscap/compat/bpf.h>
2222
#include <libscap/compat/perf_event.h>
23+
#include <libscap/scap_barrier.h>
2324

2425
struct perf_event_sample {
2526
struct perf_event_header header;
@@ -49,9 +50,7 @@ static inline void scap_bpf_get_buf_pointers(scap_device *dev,
4950
*phead = header->data_head;
5051
*ptail = header->data_tail;
5152

52-
// clang-format off
53-
asm volatile("" ::: "memory");
54-
// clang-format on
53+
mem_barrier();
5554

5655
uint64_t cons = *ptail % header->data_size; // consumer position
5756
uint64_t prod = *phead % header->data_size; // producer position
@@ -154,9 +153,7 @@ static inline void scap_bpf_advance_tail(struct scap_device *dev) {
154153

155154
header = (struct perf_event_mmap_page *)dev->m_buffer;
156155

157-
// clang-format off
158-
asm volatile("" ::: "memory");
159-
// clang-format on
156+
mem_barrier();
160157

161158
ASSERT(dev->m_lastreadsize > 0);
162159
/* `header->data_tail` is the consumer position. */

userspace/libscap/linux/barrier.h

Lines changed: 59 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
// SPDX-License-Identifier: Apache-2.0
22
/*
3-
Copyright (C) 2023 The Falco Authors.
3+
Copyright (C) 2024 The Falco Authors.
44
55
Licensed under the Apache License, Version 2.0 (the "License");
66
you may not use this file except in compliance with the License.
@@ -17,4 +17,61 @@ limitations under the License.
1717
*/
1818
#pragma once
1919

20-
#define mem_barrier() __sync_synchronize()
20+
// This is taken from kernel headers `/include/linux/compiler.h`
21+
// Used by libpman and scap_bpf engine
22+
23+
#define READ_ONCE(x) (*(volatile typeof(x) *)&x)
24+
#define WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
25+
26+
#define barrier() asm volatile("" ::: "memory")
27+
28+
#if defined(__x86_64__)
29+
30+
#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
31+
32+
#define smp_store_release(p, v) \
33+
do { \
34+
barrier(); \
35+
WRITE_ONCE(*p, v); \
36+
} while(0)
37+
38+
#define smp_load_acquire(p) \
39+
({ \
40+
typeof(*p) ___p = READ_ONCE(*p); \
41+
barrier(); \
42+
___p; \
43+
})
44+
45+
#elif defined(__aarch64__)
46+
47+
#define smp_mb() asm volatile("dmb ish" ::: "memory")
48+
49+
#endif
50+
51+
#ifndef smp_mb
52+
#define smp_mb() __sync_synchronize()
53+
#endif
54+
55+
#ifndef smp_store_release
56+
#define smp_store_release(p, v) \
57+
do { \
58+
smp_mb(); \
59+
WRITE_ONCE(*p, v); \
60+
} while(0)
61+
#endif
62+
63+
#ifndef smp_load_acquire
64+
#define smp_load_acquire(p) \
65+
({ \
66+
typeof(*p) ___p = READ_ONCE(*p); \
67+
smp_mb(); \
68+
___p; \
69+
})
70+
#endif
71+
72+
// This is defined by us
73+
#if defined(__x86_64__)
74+
#define mem_barrier() barrier()
75+
#else
76+
#define mem_barrier() smp_mb()
77+
#endif

0 commit comments

Comments
 (0)