Browse Source

test: deflake tests

pull/1083/head
Steven Allen 4 years ago
parent
commit
a6747e5b72
  1. 33
      p2p/host/basic/basic_host_test.go
  2. 11
      p2p/net/mock/mock_notif_test.go
  3. 19
      p2p/protocol/identify/id_glass_test.go
  4. 23
      p2p/protocol/identify/id_test.go
  5. 8
      p2p/protocol/identify/obsaddr_test.go

33
p2p/host/basic/basic_host_test.go

@ -202,11 +202,11 @@ func TestHostAddrsFactory(t *testing.T) {
t.Fatalf("expected %s, got %s", maddr.String(), addrs[0].String())
}
var err error
h.autoNat, err = autonat.New(ctx, h, autonat.WithReachability(network.ReachabilityPublic))
autoNat, err := autonat.New(ctx, h, autonat.WithReachability(network.ReachabilityPublic))
if err != nil {
t.Fatalf("should be able to attach autonat: %v", err)
}
h.SetAutoNat(autoNat)
addrs = h.Addrs()
if len(addrs) != 1 {
t.Fatalf("didn't expect change in returned addresses.")
@ -283,7 +283,7 @@ func assertWait(t *testing.T, c chan protocol.ID, exp protocol.ID) {
select {
case proto := <-c:
if proto != exp {
t.Fatal("should have connected on ", exp)
t.Fatalf("should have connected on %s, got %s", exp, proto)
}
case <-time.After(time.Second * 5):
t.Fatal("timeout waiting for stream")
@ -309,6 +309,10 @@ func TestHostProtoPreference(t *testing.T) {
s.Close()
}
// Prevent pushing identify information so this test works.
h2.RemoveStreamHandler(identify.IDPush)
h2.RemoveStreamHandler(identify.IDDelta)
h1.SetStreamHandler(protoOld, handler)
s, err := h2.NewStream(ctx, h1.ID(), protoMinor, protoNew, protoOld)
@ -344,8 +348,6 @@ func TestHostProtoPreference(t *testing.T) {
t.Fatal(err)
}
// XXX: This is racy now that we push protocol updates. If this tests
// fails, try allowing both protoOld and protoMinor.
assertWait(t, connectedOn, protoOld)
s2.Close()
@ -399,6 +401,10 @@ func TestHostProtoPreknowledge(t *testing.T) {
h1.SetStreamHandler("/super", handler)
// Prevent pushing identify information so this test actually _uses_ the super protocol.
h2.RemoveStreamHandler(identify.IDPush)
h2.RemoveStreamHandler(identify.IDDelta)
h2pi := h2.Peerstore().PeerInfo(h2.ID())
if err := h1.Connect(ctx, h2pi); err != nil {
t.Fatal(err)
@ -507,7 +513,7 @@ func TestProtoDowngrade(t *testing.T) {
}
s.Close()
h1.Network().ConnsToPeer(h2.ID())[0].Close()
h1.Network().ClosePeer(h2.ID())
time.Sleep(time.Millisecond * 50) // allow notifications to propagate
h1.RemoveStreamHandler("/testing/1.0.0")
@ -526,16 +532,16 @@ func TestProtoDowngrade(t *testing.T) {
t.Fatal(err)
}
if s2.Protocol() != "/testing" {
t.Fatal("shoould have gotten /testing")
}
_, err = s2.Write(nil)
if err != nil {
t.Fatal(err)
}
assertWait(t, connectedOn, "/testing")
if s2.Protocol() != "/testing" {
t.Fatal("shoould have gotten /testing")
}
s2.Close()
}
@ -657,18 +663,19 @@ func TestAddrChangeImmediatelyIfAddressNonEmpty(t *testing.T) {
ctx := context.Background()
taddrs := []ma.Multiaddr{ma.StringCast("/ip4/1.2.3.4/tcp/1234")}
starting := make(chan struct{})
h := New(swarmt.GenSwarm(t, ctx), AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
<-starting
return taddrs
}))
defer h.Close()
sub, err := h.EventBus().Subscribe(&event.EvtLocalAddressesUpdated{})
close(starting)
if err != nil {
t.Error(err)
}
defer sub.Close()
// wait for the host background thread to start
time.Sleep(1 * time.Second)
expected := event.EvtLocalAddressesUpdated{
Diffs: true,
@ -837,7 +844,7 @@ func waitForAddrChangeEvent(ctx context.Context, sub event.Subscription, t *test
return evt.(event.EvtLocalAddressesUpdated)
case <-ctx.Done():
t.Fatal("context should not have cancelled")
case <-time.After(2 * time.Second):
case <-time.After(5 * time.Second):
t.Fatal("timed out waiting for address change event")
}
}

11
p2p/net/mock/mock_notif_test.go

@ -139,8 +139,15 @@ func TestNotifications(t *testing.T) {
}
for _, n1 := range notifiees {
// Avoid holding this lock while waiting, otherwise we can deadlock.
streamStateCopy := map[network.Stream]chan struct{}{}
n1.streamState.Lock()
for str1, ch1 := range n1.streamState.m {
for str, ch := range n1.streamState.m {
streamStateCopy[str] = ch
}
n1.streamState.Unlock()
for str1, ch1 := range streamStateCopy {
<-ch1
str2 := StreamComplement(str1)
n2 := notifiees[str1.Conn().RemotePeer()]
@ -151,8 +158,6 @@ func TestNotifications(t *testing.T) {
<-ch2
}
n1.streamState.Unlock()
}
}

19
p2p/protocol/identify/id_glass_test.go

@ -28,12 +28,17 @@ func TestFastDisconnect(t *testing.T) {
sync := make(chan struct{})
target.SetStreamHandler(ID, func(s network.Stream) {
// Wait till the stream is setup on both sides.
<-sync
select {
case <-sync:
case <-ctx.Done():
return
}
// Kill the connection, and make sure we're completely disconnected.
s.Conn().Close()
for target.Network().Connectedness(s.Conn().RemotePeer()) == network.Connected {
// wait till we're disconnected.
// let something else run
time.Sleep(time.Millisecond)
}
// Now try to handle the response.
// This should not block indefinitely, or panic, or anything like that.
@ -42,7 +47,11 @@ func TestFastDisconnect(t *testing.T) {
ids.sendIdentifyResp(s)
// Ok, allow the outer test to continue.
<-sync
select {
case <-sync:
case <-ctx.Done():
return
}
})
source := blhost.NewBlankHost(swarmt.GenSwarm(t, ctx))
@ -59,13 +68,15 @@ func TestFastDisconnect(t *testing.T) {
select {
case sync <- struct{}{}:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
s.Reset()
select {
case sync <- struct{}{}:
case <-ctx.Done():
t.Fatal(ctx.Err())
}
// Make sure we didn't timeout anywhere.
// double-check to make sure we didn't actually timeout somewhere.
if ctx.Err() != nil {
t.Fatal(ctx.Err())
}

23
p2p/protocol/identify/id_test.go

@ -9,6 +9,7 @@ import (
"testing"
"time"
detectrace "github.com/ipfs/go-detect-race"
ic "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/event"
"github.com/libp2p/go-libp2p-core/host"
@ -266,6 +267,10 @@ func emitAddrChangeEvt(t *testing.T, h host.Host) {
// this is because it used to be concurrent. Now, Dial wait till the
// id service is done.
func TestIDService(t *testing.T) {
// This test is highly timing dependent, waiting on timeouts/expiration.
if detectrace.WithRace() {
t.Skip("skipping highly timing dependent test when race detector is enabled")
}
oldTTL := peerstore.RecentlyConnectedAddrTTL
peerstore.RecentlyConnectedAddrTTL = time.Second
defer func() {
@ -605,12 +610,12 @@ func TestIdentifyPushOnAddrChange(t *testing.T) {
h2pi := h2.Peerstore().PeerInfo(h2p)
require.NoError(t, h1.Connect(ctx, h2pi))
// h1 should immediately see a connection from h2
require.Len(t, h1.Network().ConnsToPeer(h2p), 1)
require.NotEmpty(t, h1.Network().ConnsToPeer(h2p))
// wait for h2 to Identify itself so we are sure h2 has seen the connection.
ids1.IdentifyConn(h1.Network().ConnsToPeer(h2p)[0])
// h2 should now see the connection and we should wait for h1 to Identify itself to h2.
require.Len(t, h2.Network().ConnsToPeer(h1p), 1)
require.NotEmpty(t, h2.Network().ConnsToPeer(h1p))
ids2.IdentifyConn(h2.Network().ConnsToPeer(h1p)[0])
testKnowsAddrs(t, h1, h2p, h2.Peerstore().Addrs(h2p))
@ -949,12 +954,12 @@ func TestLargePushMessage(t *testing.T) {
h2pi := h2.Peerstore().PeerInfo(h2p)
require.NoError(t, h1.Connect(ctx, h2pi))
// h1 should immediately see a connection from h2
require.Len(t, h1.Network().ConnsToPeer(h2p), 1)
require.NotEmpty(t, h1.Network().ConnsToPeer(h2p))
// wait for h2 to Identify itself so we are sure h2 has seen the connection.
ids1.IdentifyConn(h1.Network().ConnsToPeer(h2p)[0])
// h2 should now see the connection and we should wait for h1 to Identify itself to h2.
require.Len(t, h2.Network().ConnsToPeer(h1p), 1)
require.NotEmpty(t, h2.Network().ConnsToPeer(h1p))
ids2.IdentifyConn(h2.Network().ConnsToPeer(h1p)[0])
testKnowsAddrs(t, h1, h2p, h2.Peerstore().Addrs(h2p))
@ -1050,7 +1055,7 @@ func TestIdentifyResponseReadTimeout(t *testing.T) {
select {
case ev := <-sub.Out():
fev := ev.(event.EvtPeerIdentificationFailed)
require.EqualError(t, fev.Reason, "i/o deadline reached")
require.Contains(t, fev.Reason.Error(), "deadline")
case <-time.After(5 * time.Second):
t.Fatal("did not receive identify failure event")
}
@ -1092,8 +1097,12 @@ func TestIncomingIDStreamsTimeout(t *testing.T) {
// remote peer should eventually reset stream
require.Eventually(t, func() bool {
c := h2.Network().ConnsToPeer(h1.ID())[0]
return len(c.GetStreams()) == 0
for _, c := range h2.Network().ConnsToPeer(h1.ID()) {
if len(c.GetStreams()) > 0 {
return false
}
}
return true
}, 1*time.Second, 200*time.Millisecond)
}
}

8
p2p/protocol/identify/obsaddr_test.go

@ -216,9 +216,9 @@ func TestObsAddrSet(t *testing.T) {
// force a refresh.
harness.oas.SetTTL(time.Millisecond * 200)
<-time.After(time.Millisecond * 210)
time.Sleep(time.Millisecond * 300)
if !addrsMatch(harness.oas.Addrs(), []ma.Multiaddr{a1, a2}) {
t.Error("addrs should only have a1, a2")
t.Errorf("addrs should only have %s, %s; have %s", a1, a2, harness.oas.Addrs())
}
// disconnect from all but b5.
@ -230,7 +230,7 @@ func TestObsAddrSet(t *testing.T) {
}
// wait for all other addresses to time out.
<-time.After(time.Millisecond * 210)
time.Sleep(time.Millisecond * 300)
// Should still have a2
if !addrsMatch(harness.oas.Addrs(), []ma.Multiaddr{a2}) {
@ -240,7 +240,7 @@ func TestObsAddrSet(t *testing.T) {
harness.host.Network().ClosePeer(pb5)
// wait for all addresses to timeout
<-time.After(time.Millisecond * 400)
time.Sleep(time.Millisecond * 400)
// Should still have a2
if !addrsMatch(harness.oas.Addrs(), nil) {

Loading…
Cancel
Save