Skip to content

Commit b320d3a

Browse files
committed
internal/jsonrpc2/servertest: support both TCP and pipe connection
Update the servertest package to support connecting to a jsonrpc2 server using either TCP or io.Pipes. The latter is provided so that regtests can more accurately mimic the current gopls execution mode, where gopls is run as a sidecar and communicated with via a pipe. Updates golang/go#36879 Change-Id: I0e14ed0e628333ba2cc7b088009f1887fcaa82a5 Reviewed-on: https://go-review.googlesource.com/c/tools/+/218777 Run-TryBot: Robert Findley <rfindley@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Heschi Kreinick <heschi@google.com>
1 parent 5916a50 commit b320d3a

File tree

6 files changed

+130
-41
lines changed

6 files changed

+130
-41
lines changed

gopls/test/gopls_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ func testCommandLine(t *testing.T, exporter packagestest.Exporter) {
4343
ctx := tests.Context(t)
4444
cache := cache.New(commandLineOptions)
4545
ss := lsprpc.NewStreamServer(cache, false)
46-
ts := servertest.NewServer(ctx, ss)
46+
ts := servertest.NewTCPServer(ctx, ss)
4747
for _, data := range data {
4848
defer data.Exported.Cleanup()
4949
t.Run(data.Folder, func(t *testing.T) {

internal/jsonrpc2/servertest/servertest.go

+85-13
Original file line numberDiff line numberDiff line change
@@ -9,46 +9,118 @@ package servertest
99
import (
1010
"context"
1111
"fmt"
12+
"io"
1213
"net"
14+
"sync"
1315

1416
"golang.org/x/tools/internal/jsonrpc2"
1517
)
1618

17-
// Server is a helper for executing tests against a remote jsonrpc2 connection.
18-
// Once initialized, its Addr field may be used to connect a jsonrpc2 client.
19-
type Server struct {
19+
// Connector is the interface used to connect to a server.
20+
type Connector interface {
21+
Connect(context.Context) *jsonrpc2.Conn
22+
}
23+
24+
// TCPServer is a helper for executing tests against a remote jsonrpc2
25+
// connection. Once initialized, its Addr field may be used to connect a
26+
// jsonrpc2 client.
27+
type TCPServer struct {
2028
Addr string
2129

2230
ln net.Listener
31+
cls *closerList
2332
}
2433

25-
// NewServer returns a new test server listening on local tcp port and serving
26-
// incoming jsonrpc2 streams using the provided stream server. It panics on any
27-
// error.
28-
func NewServer(ctx context.Context, server jsonrpc2.StreamServer) *Server {
34+
// NewTCPServer returns a new test server listening on local tcp port and
35+
// serving incoming jsonrpc2 streams using the provided stream server. It
36+
// panics on any error.
37+
func NewTCPServer(ctx context.Context, server jsonrpc2.StreamServer) *TCPServer {
2938
ln, err := net.Listen("tcp", "127.0.0.1:0")
3039
if err != nil {
3140
panic(fmt.Sprintf("servertest: failed to listen: %v", err))
3241
}
3342
go jsonrpc2.Serve(ctx, ln, server)
34-
return &Server{Addr: ln.Addr().String(), ln: ln}
43+
return &TCPServer{Addr: ln.Addr().String(), ln: ln, cls: &closerList{}}
3544
}
3645

3746
// Connect dials the test server and returns a jsonrpc2 Connection that is
3847
// ready for use.
39-
func (s *Server) Connect(ctx context.Context) *jsonrpc2.Conn {
48+
func (s *TCPServer) Connect(ctx context.Context) *jsonrpc2.Conn {
4049
netConn, err := net.Dial("tcp", s.Addr)
4150
if err != nil {
4251
panic(fmt.Sprintf("servertest: failed to connect to test instance: %v", err))
4352
}
53+
s.cls.add(func() {
54+
netConn.Close()
55+
})
4456
conn := jsonrpc2.NewConn(jsonrpc2.NewHeaderStream(netConn, netConn))
4557
go conn.Run(ctx)
4658
return conn
4759
}
4860

49-
// Close is a placeholder for proper test server shutdown.
50-
// TODO: implement proper shutdown, which gracefully closes existing
51-
// connections to the test server.
52-
func (s *Server) Close() error {
61+
// Close closes all connected pipes.
62+
func (s *TCPServer) Close() error {
63+
s.cls.closeAll()
5364
return nil
5465
}
66+
67+
// PipeServer is a test server that handles connections over io.Pipes.
68+
type PipeServer struct {
69+
server jsonrpc2.StreamServer
70+
cls *closerList
71+
}
72+
73+
// NewPipeServer returns a test server that can be connected to via io.Pipes.
74+
func NewPipeServer(ctx context.Context, server jsonrpc2.StreamServer) *PipeServer {
75+
return &PipeServer{server: server, cls: &closerList{}}
76+
}
77+
78+
// Connect creates new io.Pipes and binds them to the underlying StreamServer.
79+
func (s *PipeServer) Connect(ctx context.Context) *jsonrpc2.Conn {
80+
// Pipes connect like this:
81+
// Client🡒(sWriter)🡒(sReader)🡒Server
82+
// 🡔(cReader)🡐(cWriter)🡗
83+
sReader, sWriter := io.Pipe()
84+
cReader, cWriter := io.Pipe()
85+
s.cls.add(func() {
86+
sReader.Close()
87+
sWriter.Close()
88+
cReader.Close()
89+
cWriter.Close()
90+
})
91+
serverStream := jsonrpc2.NewStream(sReader, cWriter)
92+
go s.server.ServeStream(ctx, serverStream)
93+
94+
clientStream := jsonrpc2.NewStream(cReader, sWriter)
95+
clientConn := jsonrpc2.NewConn(clientStream)
96+
go clientConn.Run(ctx)
97+
return clientConn
98+
}
99+
100+
// Close closes all connected pipes.
101+
func (s *PipeServer) Close() error {
102+
s.cls.closeAll()
103+
return nil
104+
}
105+
106+
// closerList tracks closers to run when a testserver is closed. This is a
107+
// convenience, so that callers don't have to worry about closing each
108+
// connection.
109+
type closerList struct {
110+
mu sync.Mutex
111+
closers []func()
112+
}
113+
114+
func (l *closerList) add(closer func()) {
115+
l.mu.Lock()
116+
defer l.mu.Unlock()
117+
l.closers = append(l.closers, closer)
118+
}
119+
120+
func (l *closerList) closeAll() {
121+
l.mu.Lock()
122+
defer l.mu.Unlock()
123+
for _, closer := range l.closers {
124+
closer()
125+
}
126+
}

internal/jsonrpc2/servertest/servertest_test.go

+25-8
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,31 @@ func (fakeHandler) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered b
3030
func TestTestServer(t *testing.T) {
3131
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
3232
defer cancel()
33-
ts := NewServer(ctx, jsonrpc2.HandlerServer(fakeHandler{}))
34-
defer ts.Close()
35-
conn := ts.Connect(ctx)
36-
var got msg
37-
if err := conn.Call(ctx, "ping", &msg{"ping"}, &got); err != nil {
38-
t.Fatal(err)
33+
server := jsonrpc2.HandlerServer(fakeHandler{})
34+
tcpTS := NewTCPServer(ctx, server)
35+
defer tcpTS.Close()
36+
pipeTS := NewPipeServer(ctx, server)
37+
defer pipeTS.Close()
38+
39+
40+
tests := []struct {
41+
name string
42+
connector Connector
43+
} {
44+
{"tcp", tcpTS},
45+
{"pipe", pipeTS},
3946
}
40-
if want := "pong"; got.Msg != want {
41-
t.Errorf("conn.Call(...): returned %q, want %q", got, want)
47+
48+
for _, test := range tests {
49+
t.Run(test.name, func(t *testing.T) {
50+
conn := test.connector.Connect(ctx)
51+
var got msg
52+
if err := conn.Call(ctx, "ping", &msg{"ping"}, &got); err != nil {
53+
t.Fatal(err)
54+
}
55+
if want := "pong"; got.Msg != want {
56+
t.Errorf("conn.Call(...): returned %q, want %q", got, want)
57+
}
58+
})
4259
}
4360
}

internal/lsp/cmd/cmd_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -45,10 +45,10 @@ func testCommandLine(t *testing.T, exporter packagestest.Exporter) {
4545
}
4646
}
4747

48-
func testServer(ctx context.Context) *servertest.Server {
48+
func testServer(ctx context.Context) *servertest.TCPServer {
4949
cache := cache.New(nil)
5050
ss := lsprpc.NewStreamServer(cache, false)
51-
return servertest.NewServer(ctx, ss)
51+
return servertest.NewTCPServer(ctx, ss)
5252
}
5353

5454
func TestDefinitionHelpExample(t *testing.T) {

internal/lsp/lsprpc/lsprpc_test.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ func TestClientLogging(t *testing.T) {
4646
return server
4747
},
4848
}
49-
ts := servertest.NewServer(ctx, ss)
49+
ts := servertest.NewPipeServer(ctx, ss)
5050
cc := ts.Connect(ctx)
5151
cc.AddHandler(protocol.ClientHandler(client))
5252

@@ -100,14 +100,14 @@ func TestRequestCancellation(t *testing.T) {
100100
},
101101
}
102102
ctx := context.Background()
103-
tsDirect := servertest.NewServer(ctx, ss)
103+
tsDirect := servertest.NewTCPServer(ctx, ss)
104104

105105
forwarder := NewForwarder(tsDirect.Addr, false)
106-
tsForwarded := servertest.NewServer(ctx, forwarder)
106+
tsForwarded := servertest.NewPipeServer(ctx, forwarder)
107107

108108
tests := []struct {
109109
serverType string
110-
ts *servertest.Server
110+
ts servertest.Connector
111111
}{
112112
{"direct", tsDirect},
113113
{"forwarder", tsForwarded},

internal/lsp/regtest/env.go

+13-13
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ const (
4040
// remote), any tests that execute on the same Runner will share the same
4141
// state.
4242
type Runner struct {
43-
ts *servertest.Server
43+
ts *servertest.TCPServer
4444
modes EnvMode
4545
timeout time.Duration
4646
}
@@ -49,7 +49,7 @@ type Runner struct {
4949
// run tests.
5050
func NewTestRunner(modes EnvMode, testTimeout time.Duration) *Runner {
5151
ss := lsprpc.NewStreamServer(cache.New(nil), false)
52-
ts := servertest.NewServer(context.Background(), ss)
52+
ts := servertest.NewTCPServer(context.Background(), ss)
5353
return &Runner{
5454
ts: ts,
5555
modes: modes,
@@ -69,9 +69,9 @@ func (r *Runner) Run(t *testing.T, filedata string, test func(context.Context, *
6969
t.Helper()
7070

7171
tests := []struct {
72-
name string
73-
mode EnvMode
74-
makeServer func(context.Context, *testing.T) (*servertest.Server, func())
72+
name string
73+
mode EnvMode
74+
getConnector func(context.Context, *testing.T) (servertest.Connector, func())
7575
}{
7676
{"singleton", Singleton, r.singletonEnv},
7777
{"shared", Shared, r.sharedEnv},
@@ -92,30 +92,30 @@ func (r *Runner) Run(t *testing.T, filedata string, test func(context.Context, *
9292
t.Fatal(err)
9393
}
9494
defer ws.Close()
95-
ts, cleanup := tc.makeServer(ctx, t)
95+
ts, cleanup := tc.getConnector(ctx, t)
9696
defer cleanup()
9797
env := NewEnv(ctx, t, ws, ts)
9898
test(ctx, t, env)
9999
})
100100
}
101101
}
102102

103-
func (r *Runner) singletonEnv(ctx context.Context, t *testing.T) (*servertest.Server, func()) {
103+
func (r *Runner) singletonEnv(ctx context.Context, t *testing.T) (servertest.Connector, func()) {
104104
ss := lsprpc.NewStreamServer(cache.New(nil), false)
105-
ts := servertest.NewServer(ctx, ss)
105+
ts := servertest.NewPipeServer(ctx, ss)
106106
cleanup := func() {
107107
ts.Close()
108108
}
109109
return ts, cleanup
110110
}
111111

112-
func (r *Runner) sharedEnv(ctx context.Context, t *testing.T) (*servertest.Server, func()) {
112+
func (r *Runner) sharedEnv(ctx context.Context, t *testing.T) (servertest.Connector, func()) {
113113
return r.ts, func() {}
114114
}
115115

116-
func (r *Runner) forwardedEnv(ctx context.Context, t *testing.T) (*servertest.Server, func()) {
116+
func (r *Runner) forwardedEnv(ctx context.Context, t *testing.T) (servertest.Connector, func()) {
117117
forwarder := lsprpc.NewForwarder(r.ts.Addr, false)
118-
ts2 := servertest.NewServer(ctx, forwarder)
118+
ts2 := servertest.NewTCPServer(ctx, forwarder)
119119
cleanup := func() {
120120
ts2.Close()
121121
}
@@ -134,7 +134,7 @@ type Env struct {
134134
// but they are available if needed.
135135
W *fake.Workspace
136136
E *fake.Editor
137-
Server *servertest.Server
137+
Server servertest.Connector
138138

139139
// mu guards the fields below, for the purpose of checking conditions on
140140
// every change to diagnostics.
@@ -154,7 +154,7 @@ type diagnosticCondition struct {
154154

155155
// NewEnv creates a new test environment using the given workspace and gopls
156156
// server.
157-
func NewEnv(ctx context.Context, t *testing.T, ws *fake.Workspace, ts *servertest.Server) *Env {
157+
func NewEnv(ctx context.Context, t *testing.T, ws *fake.Workspace, ts servertest.Connector) *Env {
158158
t.Helper()
159159
conn := ts.Connect(ctx)
160160
editor, err := fake.NewConnectedEditor(ctx, ws, conn)

0 commit comments

Comments
 (0)