如何重用侦听器/连接? Golang

时间:2014-12-12 07:00:57

标签: go network-programming

我正在尝试通过第三方服务器(也就是反向连接)将NAT后面的计算机连接到互联网。我在听两个端口。在一个端口(dstNet)上连接NAT后面的机器,另一个端口连接互联网客户端。 问题是我不知道如何处理NAT后面的机器断开连接。即使机器再次连接,流量也不会被发送/写入......我得到[DEBUG] socks: Copied 0 bytes to client这当然是我的警告。下面是代码。它很长,但我找不到要修剪的东西。

// Make a bridge between dstNet which is
// usually behind NAT and srcNet which is usually a client
// which wants to route the traffic though the NAT machine.
package main

import (
    "bufio"
    "errors"
    log "github.com/golang/glog"
    "io"
    "net"
    "time"
)

const (
    // listen on the dstNet so that we can
    // create a connection with the NAT client
    dstNet = "0.0.0.0:9000"
    // listen on srcNet so that we can get traffic
    // to forward to dstNet
    srcNet = "0.0.0.0:9001"
)

var errCh = make(chan error, 1)

// make a channel to send the reverse connections
var lrCh = make(chan net.Conn, 1)
func listenDst() {
    // Listen on the dstNet
    lr, err := net.Listen("tcp", dstNet)
    if err != nil {
        log.Error(err)
        errCh <- err
        return
    }
    // accept the connection
    for {
        lrConn, err := lr.Accept()
        if err != nil {
            log.Error(err)
            errCh <- err
            return
        }
            log.Errorf("sent connection")
        //  lrConn.SetReadDeadline(time.Now().Add(10 * time.Second))
            lrCh <- lrConn

    }

}

func main() {

    go func() {
        for err := range errCh {
            if err != nil {
                panic(err)
            }
        }
    }()
    // listen for the nat server
    go listenDst()

    // listen for clients to connect
    l, err := net.Listen("tcp", srcNet)
    if err != nil {
        log.Error(err)
        panic(err)
    }
    // accept the connection
    for {
        conn, err := l.Accept()
        if err != nil {
            log.Error(err)
            panic(err)
        }
        // serve the connection
        go func(conn net.Conn) {
            defer conn.Close()
            bufConn := bufio.NewReader(conn)
            dst := <-lrCh
            defer dst.Close()

            // Start proxying
            errCh2 := make(chan error, 2)
            go proxy("target", dst, bufConn, errCh2)
            go proxy("client", conn, dst, errCh2)

            // Wait
            var ei int
            for err = range errCh2 {
                switch {
                case err != nil && err.Error() == "no byte":
                    log.Error(err)
                case err != nil && err.Error() == "use of closed network connection":
                    // if the connection is closed we restart it.
                    log.Error(err)
                    // BUG() attempt to write again the bytes
                case err != nil:
                    log.Error(err)
                    errCh <- err
                }
                if ei == 1 {
                    log.Errorf("done with errors")
                    close(errCh2)
                }
                ei++

            }
        }(conn)

    }
}

// proxy is used to suffle data from src to destination, and sends errors
// down a dedicated channel
func proxy(name string, dst io.Writer, src io.Reader, errCh2 chan error) {
    n, err := io.Copy(dst, src)
    // Log, and sleep. This is jank but allows the otherside
    // to finish a pending copy
    log.Errorf("[DEBUG] socks: Copied %d bytes to %s", n, name)
    time.Sleep(10 * time.Millisecond)
    // Send any errors
    switch {
    case err != nil:
        log.Error(err)
        errCh2 <- err
    case n < 1:
        errCh2 <- errors.New("no byte")
    default:
        errCh2 <- nil
    }
    return
}

2 个答案:

答案 0 :(得分:2)

错误后唯一可以重用连接的时间是临时条件。

if err, ok := err.(net.Error); ok && err.Temporary() {
}

如果您尝试代理TCP连接,并且任何其他错误(检查Temporary可能甚至不是那么有用),您需要删除整个事情并重新开始。您不知道远程服务器的状态是什么,有多少数据包正在传输或丢失,并且它只会导致更难的错误,您尝试的更难。 (提示:不要隐藏sleep的并发或时间问题。从长远来看,它只是让它变得更难了)

如果你想引用它,这是一个更简单的代理模式: https://gist.github.com/jbardin/821d08cb64c01c84b81a

func Proxy(srvConn, cliConn *net.TCPConn) {
    // channels to wait on the close event for each connection
    serverClosed := make(chan struct{}, 1)
    clientClosed := make(chan struct{}, 1)

    go broker(srvConn, cliConn, clientClosed)
    go broker(cliConn, srvConn, serverClosed)

    // wait for one half of the proxy to exit, then trigger a shutdown of the
    // other half by calling CloseRead(). This will break the read loop in the
    // broker and allow us to fully close the connection cleanly without a
    // "use of closed network connection" error.
    var waitFor chan struct{}
    select {
    case <-clientClosed:
        // the client closed first and any more packets from the server aren't
        // useful, so we can optionally SetLinger(0) here to recycle the port
        // faster.
        srvConn.SetLinger(0)
        srvConn.CloseRead()
        waitFor = serverClosed
    case <-serverClosed:
        cliConn.CloseRead()
        waitFor = clientClosed
    }

    // Wait for the other connection to close.
    // This "waitFor" pattern isn't required, but gives us a way to track the
    // connection and ensure all copies terminate correctly; we can trigger
    // stats on entry and deferred exit of this function.
    <-waitFor
}

// This does the actual data transfer.
// The broker only closes the Read side.
func broker(dst, src net.Conn, srcClosed chan struct{}) {
    // We can handle errors in a finer-grained manner by inlining io.Copy (it's
    // simple, and we drop the ReaderFrom or WriterTo checks for
    // net.Conn->net.Conn transfers, which aren't needed). This would also let
    // us adjust buffersize.
    _, err := io.Copy(dst, src)

    if err != nil {
        log.Printf("Copy error: %s", err)
    }
    if err := src.Close(); err != nil {
        log.Printf("Close error: %s", err)
    }
    srcClosed <- struct{}{}
}

答案 1 :(得分:0)

事实证明,我不得不重启监听器,不仅要关闭连接。我修改了代理函数,如果它不能写入(即写入0字节)src,则重置destNet监听器。我仍然不确定这是否是正确的方法(即在多连接方案中关闭监听器似乎很糟糕,因为我想我重置了拨打该地址的所有客户端连接)但到目前为止这是最好的我可以解决它。

 if n == 0 {
        lrNewCh <- 1
    }

这是所有代码。所有功劳都归功于@JimB

// Make a bridge between dstNet which is
// usually behind NAT and srcNet which is usually a client
// which wants to route the traffic though the NAT machine.
package main

import (
    log "github.com/golang/glog"
    "io"
    "net"
)

// listen on the dstNet so that we can
// create a connection with the NAT client
var dstNet *net.TCPAddr = &net.TCPAddr{IP: net.ParseIP("0.0.0.0"), Port: 9000}

// listen on srcNet so that we can get traffic
// to forward to dstNet
var srcNet *net.TCPAddr = &net.TCPAddr{IP: net.ParseIP("0.0.0.0"), Port: 9001}

var errCh = make(chan error, 1)

// make a channel to send the reverse connections
var lrCh = make(chan *net.TCPConn, 1)
var lrNewCh = make(chan int, 1)

func listenDst() {
    // Listen on the dstNet
    lr, err := net.ListenTCP("tcp", dstNet)
    if err != nil {
        log.Error(err)
        errCh <- err
        return
    }
    // accept the connection
    for {
        lrConn, err := lr.AcceptTCP()
        if err != nil {
                log.Error(err)
                //errCh <- err
                //return
        }
        status := <-lrNewCh
            log.Errorf("status request is %v", status)
            if status == 1{
                log.Errorf("we close and restart the listener and the connection")
                if err =  lrConn.Close(); err !=nil{
                    log.Error(err)
                }
                if err =  lr.Close(); err !=nil{
                    log.Error(err)
                }
                    lr, err = net.ListenTCP("tcp", dstNet)
                    if err != nil {
                        log.Error(err)
                        errCh <- err
                        return
                    }
                lrConn, err = lr.AcceptTCP()
                if err !=nil{
                    log.Error(err)
                    errCh <- err
                }
            }else{
                log.Errorf("new connection on its way")
                lrCh <- lrConn
            }
    //  default:
            // log.Errorf("accepting new connections")


    }

}

func main() {

    go func() {
        for err := range errCh {
            if err != nil {
                panic(err)
            }
        }
    }()
    // listen for the nat server
    go listenDst()

    // listen for clients to connect
    l, err := net.ListenTCP("tcp", srcNet)
    if err != nil {
        log.Error(err)
        panic(err)
    }
    // accept the connection
    for {
        conn, err := l.AcceptTCP()
        if err != nil {
            log.Error(err)
            panic(err)
        }
        // serve the connection
        go func(conn *net.TCPConn) {
            defer conn.Close()
            lrNewCh <- 0
            dst := <-lrCh
            defer dst.Close()
            proxy(dst, conn)
        }(conn)

    }
}

func proxy(srvConn, cliConn *net.TCPConn) {
    // channels to wait on the close event for each connection
    serverClosed := make(chan struct{}, 1)
    clientClosed := make(chan struct{}, 1)

    go broker(srvConn, cliConn, clientClosed)
    go broker(cliConn, srvConn, serverClosed)

    // wait for one half of the proxy to exit, then trigger a shutdown of the
    // other half by calling CloseRead(). This will break the read loop in the
    // broker and allow us to fully close the connection cleanly without a
    // "use of closed network connection" error.
    var waitFor chan struct{}
    select {
    case <-clientClosed:
        // the client closed first and any more packets from the server aren't
        // useful, so we can optionally SetLinger(0) here to recycle the port
        // faster.
        srvConn.SetLinger(0)
        srvConn.CloseRead()
        waitFor = serverClosed
    case <-serverClosed:
        cliConn.CloseRead()
        waitFor = clientClosed
    }

    // Wait for the other connection to close.
    // This "waitFor" pattern isn't required, but gives us a way to track the
    // connection and ensure all copies terminate correctly; we can trigger
    // stats on entry and deferred exit of this function.
    <-waitFor
}

// This does the actual data transfer.
// The broker only closes the Read side.
func broker(dst, src net.Conn, srcClosed chan struct{}) {
    // We can handle errors in a finer-grained manner by inlining io.Copy (it's
    // simple, and we drop the ReaderFrom or WriterTo checks for
    // net.Conn->net.Conn transfers, which aren't needed). This would also let
    // us adjust buffersize.
    n, err := io.Copy(dst, src)
    log.Errorf(" %v bytes copied", n)
    if err != nil {
        log.Errorf("Copy error: %s", err)
        // errCh <- err
    }
    if err := src.Close(); err != nil {
        log.Errorf("Close error: %s", err)
        errCh <- err
    }
    if n == 0 {
        lrNewCh <- 1
    }
    srcClosed <- struct{}{}

}