mirror of
https://github.com/falcosecurity/falco.git
synced 2025-09-07 09:40:44 +00:00
chore(userspace): addressing review comments and typos
Co-authored-by: Lorenzo Fontana <lo@linux.com> Signed-off-by: Leonardo Di Donato <leodidonato@gmail.com>
This commit is contained in:
committed by
Leo Di Donato
parent
732965f973
commit
98cdc30aa3
@@ -290,7 +290,6 @@ int falco_formats::resolve_tokens(lua_State *ls)
|
||||
json_event_formatter json_formatter(s_engine->json_factory(), sformat);
|
||||
values = json_formatter.tomap((json_event*) evt);
|
||||
}
|
||||
// todo(leodido, fntlnz) > check explicitly for k8s_audit, otherwise throw exception
|
||||
|
||||
lua_newtable(ls);
|
||||
for(auto const& v : values)
|
||||
|
@@ -151,7 +151,6 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
|
||||
m_grpc_enabled = m_config->get_scalar<bool>("grpc", "enabled", false);
|
||||
m_grpc_bind_address = m_config->get_scalar<string>("grpc", "bind_address", "0.0.0.0:5060");
|
||||
m_grpc_threadiness = m_config->get_scalar<uint32_t>("grpc", "threadiness", 8); // todo > limit it to avoid overshubscription? std::thread::hardware_concurrency()
|
||||
// todo(fntlnz,leodido) > chose correct paths
|
||||
m_grpc_private_key = m_config->get_scalar<string>("grpc", "private_key", "/etc/falco/certs/server.key");
|
||||
m_grpc_cert_chain = m_config->get_scalar<string>("grpc", "cert_chain", "/etc/falco/certs/server.crt");
|
||||
m_grpc_root_certs = m_config->get_scalar<string>("grpc", "root_certs", "/etc/falco/certs/ca.crt");
|
||||
|
@@ -60,9 +60,8 @@ public:
|
||||
SUCCESS,
|
||||
ERROR
|
||||
} m_status = STREAMING;
|
||||
// Request-specific stream data
|
||||
mutable void* m_stream = nullptr;
|
||||
// Are there more responses to stream?
|
||||
|
||||
mutable void* m_stream = nullptr; // todo(fntlnz, leodido) > useful in the future
|
||||
mutable bool m_has_more = false;
|
||||
};
|
||||
} // namespace grpc
|
||||
|
@@ -27,6 +27,15 @@ limitations under the License.
|
||||
#include "grpc_context.h"
|
||||
#include "utils.h"
|
||||
|
||||
#define REGISTER_STREAM(req, res, svc, rpc, impl, num) \
|
||||
std::vector<request_stream_context<req, res>> rpc##_contexts(num); \
|
||||
for(request_stream_context<req, res> & ctx : rpc##_contexts) \
|
||||
{ \
|
||||
ctx.m_process_func = &server::impl; \
|
||||
ctx.m_request_func = &svc::AsyncService::Request##rpc; \
|
||||
ctx.start(this); \
|
||||
}
|
||||
|
||||
template<>
|
||||
void falco::grpc::request_stream_context<falco::output::request, falco::output::response>::start(server* srv)
|
||||
{
|
||||
@@ -52,7 +61,7 @@ void falco::grpc::request_stream_context<falco::output::request, falco::output::
|
||||
|
||||
// Processing
|
||||
output::response res;
|
||||
(srv->*m_process_func)(*m_stream_ctx, m_req, res);
|
||||
(srv->*m_process_func)(*m_stream_ctx, m_req, res); // subscribe()
|
||||
|
||||
// When there still are more responses to stream
|
||||
if(m_stream_ctx->m_has_more)
|
||||
@@ -86,14 +95,12 @@ void falco::grpc::request_stream_context<falco::output::request, falco::output::
|
||||
|
||||
void falco::grpc::server::thread_process(int thread_index)
|
||||
{
|
||||
|
||||
void* tag = nullptr;
|
||||
bool event_read_success = false;
|
||||
while(m_completion_queue->Next(&tag, &event_read_success))
|
||||
{
|
||||
if(tag == nullptr)
|
||||
{
|
||||
// TODO: empty tag returned, log "completion queue with empty tag"
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -105,7 +112,6 @@ void falco::grpc::server::thread_process(int thread_index)
|
||||
{
|
||||
if(ctx->m_state != request_context_base::REQUEST)
|
||||
{
|
||||
// todo > log "server completion queue failed to read event for tag `tag`"
|
||||
// End the context with error
|
||||
ctx->end(this, true);
|
||||
}
|
||||
@@ -127,24 +133,11 @@ void falco::grpc::server::thread_process(int thread_index)
|
||||
break;
|
||||
default:
|
||||
// todo > log "unkown completion queue event"
|
||||
// todo > abort?
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Create array of contexts and start processing streaming RPC request.
|
||||
//
|
||||
#define REGISTER_STREAM(REQ, RESP, RPC, IMPL, CONTEXT_NUM) \
|
||||
std::vector<request_stream_context<REQ, RESP>> RPC##_contexts(CONTEXT_NUM); \
|
||||
for(request_stream_context<REQ, RESP> & ctx : RPC##_contexts) \
|
||||
{ \
|
||||
ctx.m_process_func = &server::IMPL; \
|
||||
ctx.m_request_func = &output::service::AsyncService::Request##RPC; \
|
||||
ctx.start(this); \
|
||||
}
|
||||
|
||||
void falco::grpc::server::init(std::string server_addr, int threadiness, std::string private_key, std::string cert_chain, std::string root_certs)
|
||||
{
|
||||
m_server_addr = server_addr;
|
||||
@@ -182,7 +175,9 @@ void falco::grpc::server::run()
|
||||
// This defines the number of simultaneous completion queue requests of the same type (service::AsyncService::Request##RPC)
|
||||
// For this approach to be sufficient server::IMPL have to be fast
|
||||
int context_num = m_threadiness * 10;
|
||||
REGISTER_STREAM(output::request, output::response, subscribe, subscribe, context_num)
|
||||
REGISTER_STREAM(output::request, output::response, output::service, subscribe, subscribe, context_num)
|
||||
|
||||
// register_stream<output::request, output::response>(subscribe, context_num)
|
||||
|
||||
m_threads.resize(m_threadiness);
|
||||
int thread_idx = 0;
|
||||
|
@@ -32,14 +32,11 @@ void falco::grpc::server_impl::subscribe(const stream_context& ctx, const output
|
||||
{
|
||||
if(ctx.m_status == stream_context::SUCCESS || ctx.m_status == stream_context::ERROR)
|
||||
{
|
||||
// todo > logic
|
||||
|
||||
ctx.m_stream = nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Start (or continue) streaming
|
||||
// ctx.m_status == stream_context::STREAMING
|
||||
// Streaming
|
||||
if(output::queue::get().try_pop(res) && !req.keepalive())
|
||||
{
|
||||
ctx.m_has_more = true;
|
||||
|
@@ -170,12 +170,14 @@ function mod.http_reopen()
|
||||
end
|
||||
|
||||
function mod.grpc(event, rule, source, priority, priority_num, msg, format, options)
|
||||
if options.enabled == true then
|
||||
fields = formats.resolve_tokens(event, source, format)
|
||||
c_outputs.handle_grpc(event, rule, source, priority, msg, fields, options)
|
||||
end
|
||||
end
|
||||
|
||||
function mod.grpc_message(priority, priority_num, msg, options)
|
||||
-- todo
|
||||
-- todo(fntlnz, leodido) > gRPC does not support subscribing to dropped events yet
|
||||
end
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user