Convert indentation to spaces

This commit is contained in:
M. Mert Yildiran 2022-06-08 07:09:25 +03:00
parent 4315313743
commit c477e5bb57
No known key found for this signature in database
GPG Key ID: D42ADB236521BF7A
4 changed files with 133 additions and 133 deletions

View File

@ -12,159 +12,159 @@ Copyright (C) UP9 Inc.
static __always_inline int get_count_bytes(struct pt_regs *ctx, struct ssl_info* info, __u64 id) {
int returnValue = PT_REGS_RC(ctx);
int returnValue = PT_REGS_RC(ctx);
if (info->count_ptr == NULL) {
// ssl_read and ssl_write return the number of bytes written/read
//
return returnValue;
}
if (info->count_ptr == NULL) {
// ssl_read and ssl_write return the number of bytes written/read
//
return returnValue;
}
// ssl_read_ex and ssl_write_ex return 1 for success
//
if (returnValue != 1) {
return 0;
}
// ssl_read_ex and ssl_write_ex return 1 for success
//
if (returnValue != 1) {
return 0;
}
// ssl_read_ex and ssl_write_ex write the number of bytes to an arg named *count
//
size_t countBytes;
long err = bpf_probe_read(&countBytes, sizeof(size_t), (void*) info->count_ptr);
// ssl_read_ex and ssl_write_ex write the number of bytes to an arg named *count
//
size_t countBytes;
long err = bpf_probe_read(&countBytes, sizeof(size_t), (void*) info->count_ptr);
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_BYTES_COUNT, id, err, 0l);
return 0;
}
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_BYTES_COUNT, id, err, 0l);
return 0;
}
return countBytes;
return countBytes;
}
static __always_inline int add_address_to_chunk(struct pt_regs *ctx, struct tls_chunk* chunk, __u64 id, __u32 fd) {
__u32 pid = id >> 32;
__u64 key = (__u64) pid << 32 | fd;
__u32 pid = id >> 32;
__u64 key = (__u64) pid << 32 | fd;
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
if (fdinfo == NULL) {
return 0;
}
if (fdinfo == NULL) {
return 0;
}
int err = bpf_probe_read(chunk->address, sizeof(chunk->address), fdinfo->ipv4_addr);
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
int err = bpf_probe_read(chunk->address, sizeof(chunk->address), fdinfo->ipv4_addr);
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
return 0;
}
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
return 0;
}
return 1;
return 1;
}
static __always_inline void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id,
struct tls_chunk* chunk, int start, int end) {
size_t recorded = MIN(end - start, sizeof(chunk->data));
struct tls_chunk* chunk, int start, int end) {
size_t recorded = MIN(end - start, sizeof(chunk->data));
if (recorded <= 0) {
return;
}
if (recorded <= 0) {
return;
}
chunk->recorded = recorded;
chunk->start = start;
chunk->recorded = recorded;
chunk->start = start;
// This ugly trick is for the ebpf verifier happiness
//
long err = 0;
if (chunk->recorded == sizeof(chunk->data)) {
err = bpf_probe_read(chunk->data, sizeof(chunk->data), buffer + start);
} else {
recorded &= (sizeof(chunk->data) - 1); // Buffer must be N^2
err = bpf_probe_read(chunk->data, recorded, buffer + start);
}
// This ugly trick is for the ebpf verifier happiness
//
long err = 0;
if (chunk->recorded == sizeof(chunk->data)) {
err = bpf_probe_read(chunk->data, sizeof(chunk->data), buffer + start);
} else {
recorded &= (sizeof(chunk->data) - 1); // Buffer must be N^2
err = bpf_probe_read(chunk->data, recorded, buffer + start);
}
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_FROM_SSL_BUFFER, id, err, 0l);
return;
}
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_FROM_SSL_BUFFER, id, err, 0l);
return;
}
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, chunk, sizeof(struct tls_chunk));
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, chunk, sizeof(struct tls_chunk));
}
static __always_inline void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tls_chunk* chunk) {
// ebpf loops must be bounded at compile time, we can't use (i < chunk->len / CHUNK_SIZE)
//
// https://lwn.net/Articles/794934/
//
// However we want to run in kernel older than 5.3, hence we use "#pragma unroll" anyway
//
#pragma unroll
for (int i = 0; i < MAX_CHUNKS_PER_OPERATION; i++) {
if (chunk->len <= (CHUNK_SIZE * i)) {
break;
}
// ebpf loops must be bounded at compile time, we can't use (i < chunk->len / CHUNK_SIZE)
//
// https://lwn.net/Articles/794934/
//
// However we want to run in kernel older than 5.3, hence we use "#pragma unroll" anyway
//
#pragma unroll
for (int i = 0; i < MAX_CHUNKS_PER_OPERATION; i++) {
if (chunk->len <= (CHUNK_SIZE * i)) {
break;
}
send_chunk_part(ctx, buffer, id, chunk, CHUNK_SIZE * i, chunk->len);
}
send_chunk_part(ctx, buffer, id, chunk, CHUNK_SIZE * i, chunk->len);
}
}
static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_info* info, int count_bytes, __u64 id, __u32 flags) {
if (count_bytes <= 0) {
return;
}
if (count_bytes <= 0) {
return;
}
if (count_bytes > (CHUNK_SIZE * MAX_CHUNKS_PER_OPERATION)) {
log_error(ctx, LOG_ERROR_BUFFER_TOO_BIG, id, count_bytes, 0l);
return;
}
if (count_bytes > (CHUNK_SIZE * MAX_CHUNKS_PER_OPERATION)) {
log_error(ctx, LOG_ERROR_BUFFER_TOO_BIG, id, count_bytes, 0l);
return;
}
struct tls_chunk* chunk;
int zero = 0;
struct tls_chunk* chunk;
int zero = 0;
// If other thread, running on the same CPU get to this point at the same time like us (context switch)
// the data will be corrupted - protection may be added in the future
//
chunk = bpf_map_lookup_elem(&heap, &zero);
// If other thread, running on the same CPU get to this point at the same time like us (context switch)
// the data will be corrupted - protection may be added in the future
//
chunk = bpf_map_lookup_elem(&heap, &zero);
if (!chunk) {
log_error(ctx, LOG_ERROR_ALLOCATING_CHUNK, id, 0l, 0l);
return;
}
if (!chunk) {
log_error(ctx, LOG_ERROR_ALLOCATING_CHUNK, id, 0l, 0l);
return;
}
chunk->flags = flags;
chunk->pid = id >> 32;
chunk->tgid = id;
chunk->len = count_bytes;
chunk->fd = info->fd;
chunk->flags = flags;
chunk->pid = id >> 32;
chunk->tgid = id;
chunk->len = count_bytes;
chunk->fd = info->fd;
if (!add_address_to_chunk(ctx, chunk, id, chunk->fd)) {
// Without an address, we drop the chunk because there is not much to do with it in Go
//
return;
}
if (!add_address_to_chunk(ctx, chunk, id, chunk->fd)) {
// Without an address, we drop the chunk because there is not much to do with it in Go
//
return;
}
send_chunk(ctx, info->buffer, id, chunk);
send_chunk(ctx, info->buffer, id, chunk);
}
static __always_inline struct ssl_info lookup_ssl_info(struct pt_regs *ctx, struct bpf_map_def* map_fd, __u64 pid_tgid) {
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &pid_tgid);
struct ssl_info info = {};
struct ssl_info info = {};
if (infoPtr == NULL) {
info.fd = -1;
info.created_at_nano = bpf_ktime_get_ns();
} else {
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
if (infoPtr == NULL) {
info.fd = -1;
info.created_at_nano = bpf_ktime_get_ns();
} else {
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, pid_tgid, err, ORIGIN_SSL_UPROBE_CODE);
}
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, pid_tgid, err, ORIGIN_SSL_UPROBE_CODE);
}
if ((bpf_ktime_get_ns() - info.created_at_nano) > SSL_INFO_MAX_TTL_NANO) {
// If the ssl info is too old, we don't want to use its info because it may be incorrect.
//
info.fd = -1;
info.created_at_nano = bpf_ktime_get_ns();
}
}
if ((bpf_ktime_get_ns() - info.created_at_nano) > SSL_INFO_MAX_TTL_NANO) {
// If the ssl info is too old, we don't want to use its info because it may be incorrect.
//
info.fd = -1;
info.created_at_nano = bpf_ktime_get_ns();
}
}
return info;
}

View File

@ -9,46 +9,46 @@ https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.
/* Scan the ARCH passed in from ARCH env variable */
#if defined(__TARGET_ARCH_x86)
#define bpf_target_x86
#define bpf_target_defined
#define bpf_target_x86
#define bpf_target_defined
#elif defined(__TARGET_ARCH_s390)
#define bpf_target_s390
#define bpf_target_defined
#define bpf_target_s390
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm)
#define bpf_target_arm
#define bpf_target_defined
#define bpf_target_arm
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm64)
#define bpf_target_arm64
#define bpf_target_defined
#define bpf_target_arm64
#define bpf_target_defined
#elif defined(__TARGET_ARCH_mips)
#define bpf_target_mips
#define bpf_target_defined
#define bpf_target_mips
#define bpf_target_defined
#elif defined(__TARGET_ARCH_powerpc)
#define bpf_target_powerpc
#define bpf_target_defined
#define bpf_target_powerpc
#define bpf_target_defined
#elif defined(__TARGET_ARCH_sparc)
#define bpf_target_sparc
#define bpf_target_defined
#define bpf_target_sparc
#define bpf_target_defined
#else
#undef bpf_target_defined
#undef bpf_target_defined
#endif
/* Fall back to what the compiler says */
#ifndef bpf_target_defined
#if defined(__x86_64__)
#define bpf_target_x86
#define bpf_target_x86
#elif defined(__s390__)
#define bpf_target_s390
#define bpf_target_s390
#elif defined(__arm__)
#define bpf_target_arm
#define bpf_target_arm
#elif defined(__aarch64__)
#define bpf_target_arm64
#define bpf_target_arm64
#elif defined(__mips__)
#define bpf_target_mips
#define bpf_target_mips
#elif defined(__powerpc__)
#define bpf_target_powerpc
#define bpf_target_powerpc
#elif defined(__sparc__)
#define bpf_target_sparc
#define bpf_target_sparc
#endif
#endif

Binary file not shown.

Binary file not shown.