-
Notifications
You must be signed in to change notification settings - Fork 0
/
interfaces.go
102 lines (87 loc) · 2.59 KB
/
interfaces.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
package main
// Tokenizer has four conceptual components. The following diagram illustrates
// how these components interact. The receiver receives incoming data and
// forwards it to the aggregator, which takes advantage of the tokenizer to
// turn data into tokens, which are then sent to the forwarder.
//
// ┏━━━━━━━━━━┓ ┏━━━━━━━━━━━━┓ ┏━━━━━━━━━━━┓
// ┃ Receiver ┃ ━> ┃ Aggregator ┃ ━> ┃ Forwarder ┃
// ┗━━━━━━━━━━┛ ┗━━━━━━━━━━━━┛ ┗━━━━━━━━━━━┛
// ┃
// v
// ┏━━━━━━━━━━━┓
// ┃ Tokenizer ┃
// ┗━━━━━━━━━━━┛
import (
"time"
uuid "github.com/google/uuid"
)
type blob []byte
type token []byte
func (b blob) bytes() []byte {
return b
}
// config stores the configuration for each component, all in one data
// structure. Considering that we have few and simple components for now,
// that's acceptable.
type config struct {
kafkaConfig *kafkaConfig
fwdInterval time.Duration
keyExpiry time.Duration
port uint16
prometheusPort uint16
exposePrometheus bool
}
type components struct {
r receiver
a aggregator
t tokenizer
f forwarder
}
type keyID struct {
uuid.UUID
}
// serializer allows for serializing data into a byte slice.
type serializer interface {
bytes() []byte
}
// configurer allows for setting the configuration.
type configurer interface {
setConfig(*config)
}
// startStopper allows for starting and stopping.
type startStopper interface {
start()
stop()
}
// receiver receives input data from somewhere. The data can be of arbitrary
// nature and come from anywhere as long as it supports the serializer
// interface.
type receiver interface {
inbox() chan serializer
startStopper
configurer
}
// aggregator aggregates and manages data by sitting in between the receiver
// and forwarder while using the tokenizer.
type aggregator interface {
connect(inbox chan serializer, outbox chan token)
use(tokenizer)
startStopper
configurer
}
// tokenizer turns a serializer object into tokens, which typically involves a
// secret key.
type tokenizer interface {
keyID() *keyID
tokenize(serializer) (token, error)
tokenizeAndKeyID(serializer) (token, *keyID, error)
resetKey() error
preservesLen() bool
}
// forwarder sends tokens somewhere. Anywhere, really.
type forwarder interface {
outbox() chan token
startStopper
configurer
}