-
Notifications
You must be signed in to change notification settings - Fork 38
/
server.go
199 lines (169 loc) · 5.31 KB
/
server.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
package httpteleport
import (
"bufio"
"crypto/tls"
"fmt"
"github.com/valyala/fasthttp"
"github.com/valyala/fastrpc"
"github.com/valyala/tcplisten"
"net"
"time"
)
// Server accepts requests from httpteleport Client.
type Server struct {
// Handler must process incoming http requests.
//
// Handler mustn't use the following features:
//
// - Connection hijacking, i.e. RequestCtx.Hijack
// - Streamed response bodies, i.e. RequestCtx.*BodyStream*
Handler fasthttp.RequestHandler
// CompressType is the compression type used for responses.
//
// CompressFlate is used by default.
CompressType CompressType
// Concurrency is the maximum number of concurrent goroutines
// with Server.Handler the server may run.
//
// DefaultConcurrency is used by default.
Concurrency int
// TLSConfig is TLS (aka SSL) config used for accepting encrypted
// client connections.
//
// Encrypted connections may be used for transferring sensitive
// information over untrusted networks.
//
// By default server accepts only unencrypted connections.
TLSConfig *tls.Config
// MaxBatchDelay is the maximum duration before ready responses
// are sent to the client.
//
// Responses' batching may reduce network bandwidth usage and CPU usage.
//
// By default responses are sent immediately to the client.
MaxBatchDelay time.Duration
// Maximum duration for reading the full request (including body).
//
// This also limits the maximum lifetime for idle connections.
//
// By default request read timeout is unlimited.
ReadTimeout time.Duration
// Maximum duration for writing the full response (including body).
//
// By default response write timeout is unlimited.
WriteTimeout time.Duration
// ReduceMemoryUsage leads to reduced memory usage at the cost
// of higher CPU usage if set to true.
//
// Memory usage reduction is disabled by default.
ReduceMemoryUsage bool
// ReadBufferSize is the size for read buffer.
//
// DefaultReadBufferSize is used by default.
ReadBufferSize int
// WriteBufferSize is the size for write buffer.
//
// DefaultWriteBufferSize is used by default.
WriteBufferSize int
// Logger used for logging.
//
// Standard logger from log package is used by default.
Logger fasthttp.Logger
// PipelineRequests enables requests' pipelining.
//
// Requests from a single client are processed serially
// if is set to true.
//
// Enabling requests' pipelining may be useful in the following cases:
//
// - if requests from a single client must be processed serially;
// - if the Server.Handler doesn't block and maximum throughput
// must be achieved for requests' processing.
//
// By default requests from a single client are processed concurrently.
PipelineRequests bool
s fastrpc.Server
}
// ListenAndServe serves httpteleport requests accepted from the given
// TCP address.
func (s *Server) ListenAndServe(addr string) error {
var cfg tcplisten.Config
ln, err := cfg.NewListener("tcp4", addr)
if err != nil {
return err
}
return s.Serve(ln)
}
// Serve serves httpteleport requests accepted from the given listener.
func (s *Server) Serve(ln net.Listener) error {
s.init()
return s.s.Serve(ln)
}
func (s *Server) init() {
if s.Handler == nil {
panic("BUG: Server.Handler must be set")
}
s.s.SniffHeader = sniffHeader
s.s.ProtocolVersion = protocolVersion
s.s.NewHandlerCtx = s.newHandlerCtx
s.s.Handler = s.requestHandler
s.s.CompressType = fastrpc.CompressType(s.CompressType)
s.s.Concurrency = s.Concurrency
s.s.TLSConfig = s.TLSConfig
s.s.MaxBatchDelay = s.MaxBatchDelay
s.s.ReadTimeout = s.ReadTimeout
s.s.WriteTimeout = s.WriteTimeout
s.s.ReadBufferSize = s.ReadBufferSize
s.s.WriteBufferSize = s.WriteBufferSize
s.s.Logger = s.Logger
s.s.PipelineRequests = s.PipelineRequests
}
type handlerCtx struct {
ctx *fasthttp.RequestCtx
s *Server
}
func (s *Server) newHandlerCtx() fastrpc.HandlerCtx {
return &handlerCtx{
ctx: &fasthttp.RequestCtx{},
s: s,
}
}
func (ctx *handlerCtx) Init(conn net.Conn, logger fasthttp.Logger) {
ctx.ctx.Init2(conn, logger, ctx.s.ReduceMemoryUsage)
}
func (ctx *handlerCtx) ReadRequest(br *bufio.Reader) error {
return ctx.ctx.Request.Read(br)
}
func (ctx *handlerCtx) WriteResponse(bw *bufio.Writer) error {
err := ctx.ctx.Response.Write(bw)
// Response is no longer needed, so reset it in order to release
// resources occupied by the response.
ctx.ctx.Response.Reset()
return err
}
func (ctx *handlerCtx) ConcurrencyLimitError(concurrency int) {
fmt.Fprintf(ctx.ctx, "concurrency limit exceeded: %d. Increase Server.Concurrency or decrease load on the server", concurrency)
ctx.ctx.SetStatusCode(fasthttp.StatusTooManyRequests)
}
func (s *Server) requestHandler(ctxv fastrpc.HandlerCtx) fastrpc.HandlerCtx {
ctx := ctxv.(*handlerCtx)
s.Handler(ctx.ctx)
if ctx.ctx.IsBodyStream() {
panic("chunked responses aren't supported")
}
if ctx.ctx.Hijacked() {
panic("hijacking isn't supported")
}
timeoutResp := ctx.ctx.LastTimeoutErrorResponse()
if timeoutResp != nil {
// The current ctx may be still in use by the handler.
// So create new one for passing to pendingResponses.
ctxNew := s.newHandlerCtx().(*handlerCtx)
timeoutResp.CopyTo(&ctxNew.ctx.Response)
ctx = ctxNew
}
// Request is no longer needed, so reset it in order
// to free up resources occupied by the request.
ctx.ctx.Request.Reset()
return ctx
}