From e5714b748543ffd21576db392bd9a8cf244aaa46 Mon Sep 17 00:00:00 2001 From: Hector Date: Sun, 29 Aug 2021 16:02:31 +0100 Subject: [PATCH] fix: read socket response data in chunks Read the response data from the socket in chunks to prevent errors when processing large payloads. The initial implementation solved large payloads by just defining a very large buffer, but this is not a solution. The new code reads the socket data in a loop until a terminator is found and appends all the data into a single byte array. Reduce the buffer size to `1024` bytes. --- src/socket/protocol.go | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/socket/protocol.go b/src/socket/protocol.go index 9dc10f5..1cf533d 100644 --- a/src/socket/protocol.go +++ b/src/socket/protocol.go @@ -1,6 +1,7 @@ package socket import ( + "bufio" "bytes" "fmt" "github.com/nlpodyssey/gopickle/pickle" @@ -9,7 +10,7 @@ import ( const ( commandTerminator = "" pingCommand = "ping" - socketReadBufferSize = 10000 + socketReadBufferSize = 1024 ) func (s *Fail2BanSocket) sendCommand(command []string) (interface{}, error) { @@ -33,13 +34,23 @@ func (s *Fail2BanSocket) write(command []string) error { } func (s *Fail2BanSocket) read() (interface{}, error) { - buf := make([]byte, socketReadBufferSize) - _, err := s.socket.Read(buf) - if err != nil { - return nil, err + reader := bufio.NewReader(s.socket) + + data := []byte{} + for { + buf := make([]byte, socketReadBufferSize) + _, err := reader.Read(buf) + if err != nil { + return nil, err + } + data = append(data, buf...) + containsTerminator := bytes.Contains(data, []byte(commandTerminator)) + if containsTerminator { + break + } } - bufReader := bytes.NewReader(buf) + bufReader := bytes.NewReader(data) unpickler := pickle.NewUnpickler(bufReader) unpickler.FindClass = func(module, name string) (interface{}, error) {