Browse Source

Chore: unexported globals with _

Use uber’s go style.
pull/76/head
xjasonlyu 4 years ago
parent
commit
b54d9936f9
  1. 2
      stats/connections.go
  2. 6
      stats/server.go
  3. 16
      tunnel/tunnel.go
  4. 14
      tunnel/udp.go

2
stats/connections.go

@ -31,7 +31,7 @@ func getConnections(w http.ResponseWriter, r *http.Request) {
return
}
conn, err := upgrader.Upgrade(w, r, nil)
conn, err := _upgrader.Upgrade(w, r, nil)
if err != nil {
return
}

6
stats/server.go

@ -19,7 +19,7 @@ import (
)
var (
upgrader = websocket.Upgrader{
_upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
@ -114,7 +114,7 @@ func getLogs(w http.ResponseWriter, r *http.Request) {
var wsConn *websocket.Conn
if websocket.IsWebSocketUpgrade(r) {
wsConn, err = upgrader.Upgrade(w, r, nil)
wsConn, err = _upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
@ -163,7 +163,7 @@ func traffic(w http.ResponseWriter, r *http.Request) {
var wsConn *websocket.Conn
if websocket.IsWebSocketUpgrade(r) {
var err error
wsConn, err = upgrader.Upgrade(w, r, nil)
wsConn, err = _upgrader.Upgrade(w, r, nil)
if err != nil {
return
}

16
tunnel/tunnel.go

@ -15,9 +15,9 @@ const (
)
var (
tcpQueue = make(chan core.TCPConn) /* unbuffered */
udpQueue = make(chan core.UDPPacket, maxUDPQueueSize)
numUDPWorkers = max(runtime.NumCPU(), 4 /* at least 4 workers */)
_tcpQueue = make(chan core.TCPConn) /* unbuffered */
_udpQueue = make(chan core.UDPPacket, maxUDPQueueSize)
_numUDPWorkers = max(runtime.NumCPU(), 4 /* at least 4 workers */)
)
func init() {
@ -26,13 +26,13 @@ func init() {
// Add adds tcpConn to tcpQueue.
func Add(conn core.TCPConn) {
tcpQueue <- conn
_tcpQueue <- conn
}
// AddPacket adds udpPacket to udpQueue.
func AddPacket(packet core.UDPPacket) {
select {
case udpQueue <- packet:
case _udpQueue <- packet:
default:
log.Warnf("queue is currently full, packet will be dropped")
packet.Drop()
@ -47,8 +47,8 @@ func max(a, b int) int {
}
func process() {
for i := 0; i < numUDPWorkers; i++ {
queue := udpQueue
for i := 0; i < _numUDPWorkers; i++ {
queue := _udpQueue
go func() {
for packet := range queue {
handleUDP(packet)
@ -56,7 +56,7 @@ func process() {
}()
}
for conn := range tcpQueue {
for conn := range _tcpQueue {
go handleTCP(conn)
}
}

14
tunnel/udp.go

@ -21,9 +21,9 @@ const (
)
var (
// natTable uses source udp packet information
// _natTable uses source udp packet information
// as key to store destination udp packetConn.
natTable = nat.NewTable()
_natTable = nat.NewTable()
)
func newUDPTracker(conn net.PacketConn, metadata *M.Metadata) net.PacketConn {
@ -46,7 +46,7 @@ func handleUDP(packet core.UDPPacket) {
key := generateNATKey(metadata)
handle := func(drop bool) bool {
pc := natTable.Get(key)
pc := _natTable.Get(key)
if pc != nil {
handleUDPToRemote(packet, pc, metadata /* as net.Addr */, drop)
return true
@ -59,7 +59,7 @@ func handleUDP(packet core.UDPPacket) {
}
lockKey := key + "-lock"
cond, loaded := natTable.GetOrCreateLock(lockKey)
cond, loaded := _natTable.GetOrCreateLock(lockKey)
go func() {
if loaded {
cond.L.Lock()
@ -70,7 +70,7 @@ func handleUDP(packet core.UDPPacket) {
}
defer func() {
natTable.Delete(lockKey)
_natTable.Delete(lockKey)
cond.Broadcast()
}()
@ -95,12 +95,12 @@ func handleUDP(packet core.UDPPacket) {
go func() {
defer pc.Close()
defer packet.Drop()
defer natTable.Delete(key)
defer _natTable.Delete(key)
handleUDPToLocal(packet, pc, udpSessionTimeout)
}()
natTable.Set(key, pc)
_natTable.Set(key, pc)
handle(false /* drop */)
}()
}

Loading…
Cancel
Save