Go language performance is so high

Countless programmers have broken their backs

The age of the cloud is in its own right

Only JAVA is better

Anyway, why does GO perform so well when a server is built?

Please take back

The excellent performance of GO in the construction of network services mainly comes from the multiplexing technology used in NET package, and combined with go’s unique Goroutine, a characteristic mode goroutine per Connection is formed.

Post a simple TCP server code, then analyze

func main(a) {
  // Listen on port 8080
	ln, err := net.Listen("tcp".": 8080")
	iferr ! =nil {
		log.Fatal(err)
	}
	for {
		conn, err := ln.Accept()
		iferr ! =nil {
			continue
		}
		go read(conn)
	}
}

func read(conn net.Conn) {
	io.Copy(io.Discard, conn)
}

Copy the code

Enter from NET’s Listen method and work your way down to the following code

// ------------------ net/sock_posix.go ------------------

// socket returns a network file descriptor that is ready for
// asynchronous I/O using the network poller.
func socket(ctx context.Context, net string, family, sotype, proto int, ipv6only bool, laddr, raddr sockaddr, ctrlFn func(string.string, syscall.RawConn) error) (fd *netFD, err error) {
  // The underlying layer calls the socket of the system and returns the socket file descriptor fd
	s, err := sysSocket(family, sotype, proto)
	iferr ! =nil {
		return nil, err
	}
  // Set socket parameters
	iferr = setDefaultSockopts(s, family, sotype, ipv6only); err ! =nil {
		poll.CloseFunc(s)
		return nil, err
	}
  // Create a netFD containing poll.fd
	iffd, err = newFD(s, family, sotype, net); err ! =nil {
		poll.CloseFunc(s)
		return nil, err
	}

	ifladdr ! =nil && raddr == nil {
		switch sotype {
		case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET:
      // Set the listener because the sotype set in listenTCP is syscall.sock_stream
			iferr := fd.listenStream(laddr, listenerBacklog(), ctrlFn); err ! =nil {
				fd.Close()
				return nil, err
			}
			return fd, nil
		case syscall.SOCK_DGRAM:
			iferr := fd.listenDatagram(laddr, ctrlFn); err ! =nil {
				fd.Close()
				return nil, err
			}
			return fd, nil}}// When in client mode, it will enter here
	iferr := fd.dial(ctx, laddr, raddr, ctrlFn); err ! =nil {
		fd.Close()
		return nil, err
	}
	return fd, nil
}


func (fd *netFD) listenStream(laddr sockaddr, backlog int, ctrlFn func(string.string, syscall.RawConn) error) error{...// Perform socket binding
	iferr = syscall.Bind(fd.pfd.Sysfd, lsa); err ! =nil {
		return os.NewSyscallError("bind", err)
	}
  // Invoke the operating system listener
	iferr = listenFunc(fd.pfd.Sysfd, backlog); err ! =nil {
		return os.NewSyscallError("listen", err)
	}
  Poll.fd = poll.fd = poll.fd = poll.fd = poll.fd
	iferr = fd.init(); err ! =nil {
		return err
	}
	lsa, _ = syscall.Getsockname(fd.pfd.Sysfd)
	fd.setAddr(fd.addrFunc()(lsa), nil)
	return nil
}




// ------------------ net/fd_unix.go ------------------

func (fd *netFD) init(a) error {
  //netFD initialization only calls PFD initialization, indicating that PFD is important. I couldn't wait to undress her
	return fd.pfd.Init(fd.net, true)}/ / file Internal/poll/fd_unix. Go

func (fd *FD) Init(net string, pollable bool) error {
	// We don't actually care about the various network types.
	if net == "file" {
		fd.isFile = true
	}
	if! pollable { fd.isBlocking =1
		return nil
	}
  Pd is the pollDesc object. This is a wrapper around the pollfile descriptor. Let's see how it is initialized.
	err := fd.pd.init(fd)
	iferr ! =nil {
		// If we could not initialize the runtime poller,
		// assume we are using blocking mode.
    
		fd.isBlocking = 1
	}
	return err
}




//------------------ Internal/poll/fd_poll_runtime.go ------------------

func runtime_pollServerInit(a)
func runtime_pollOpen(fd uintptr) (uintptr.int)
func runtime_pollClose(ctx uintptr)
func runtime_pollWait(ctx uintptr, mode int) int
func runtime_pollWaitCanceled(ctx uintptr, mode int) int
func runtime_pollReset(ctx uintptr, mode int) int
func runtime_pollSetDeadline(ctx uintptr, d int64, mode int)
func runtime_pollUnblock(ctx uintptr)
func runtime_isPollServerDescriptor(fd uintptr) bool

type pollDesc struct {
	runtimeCtx uintptr
}

var serverInit sync.Once

func (pd *pollDesc) init(fd *FD) error {
  // The service is initialized once, with sync.Onece to ensure that poll is initialized globally only once
	serverInit.Do(runtime_pollServerInit)
  //
	ctx, errno := runtime_pollOpen(uintptr(fd.Sysfd))
	....
}


// ------------------ runtime/netpoll.go ------------------

// Here poll_runtime_pollOpen is bound to runtime_pollOpen
//go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen
func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int){...var errno int32
  // This is different from the definition of each operating system. kqueue/epoll
	errno = netpollopen(fd, pd)
	return pd, int(errno)
}

//------------------ runtime/netpoll_epoll.go ------------------

// Take the epoll implementation, which calls epollCTL, and is now interacting with the operating system. Here, the current server connection itself is added to epoll for listening. When accepted, it is waiting for the callback of epoll events
func netpollopen(fd uintptr, pd *pollDesc) int32 {
	var ev epollevent
	ev.events = _EPOLLIN | _EPOLLOUT | _EPOLLRDHUP | _EPOLLET
	*(**pollDesc)(unsafe.Pointer(&ev.data)) = pd
	return -epollctl(epfd, _EPOLL_CTL_ADD, int32(fd), &ev)
}
Copy the code

At this point, a TCP server port binding and Epoll listener are created, and the entire path is sorted

net.Listen -> sysListener.listenTCP -> sysSocket -> netFD.listenStream -> syscall.Bind -> poll.pollDesc.init -> runtime_pollServerInit -> runtime_pollOpen 
Copy the code

Over the set of links, create a SOCKS descriptor, bind the address to the port, create a globally unique Poller, and then add the socket to the Poller.

Now that the initialization is done, it’s time to wait for the client to connect

// ------------------ net/tcpsock.go ------------------

func (ln *TCPListener) accept(a) (*TCPConn, error) {
  // The key code is still the accept of the fd call
	fd, err := ln.fd.accept()
  ....
}


// ------------------ net/fd_unix.go ------------------

func (fd *netFD) accept(a) (netfd *netFD, err error) {
  //net fd finally calls pfD. Accept, which poll.fd Accept continues
	d, rsa, errcall, err := fd.pfd.Accept()
	iferr ! =nil {
		iferrcall ! ="" {
			err = wrapSyscallError(errcall, err)
		}
		return nil, err
	}
	After receiving the fd from the client, create a netFD object and initialize it. In fact, the process is the same as above, initialize the poller, add the FD to the Poller listener, so that the client connection and the server listener are managed in a poller.
	ifnetfd, err = newFD(d, fd.family, fd.sotype, fd.net); err ! =nil {
		poll.CloseFunc(d)
		return nil, err
	}
	iferr = netfd.init(); err ! =nil {
		netfd.Close()
		return nil, err
	}
	lsa, _ := syscall.Getsockname(netfd.pfd.Sysfd)
	netfd.setAddr(netfd.addrFunc()(lsa), netfd.addrFunc()(rsa))
	return netfd, nil
}

// ------------------ poll/fd_unix.go ------------------

// Accept wraps the accept network call.
func (fd *FD) Accept(a) (int, syscall.Sockaddr, string, error){...for {
    If syscall.EAGAIN is returned, poll_runtime_pollWait will be executed to wait for poller status to change. If the Poller changes state, call Accept again to get the client link and return it to the caller.
		s, rsa, errcall, err := accept(fd.Sysfd)
		if err == nil {
			return s, rsa, "", err
		}
		switch err {
		case syscall.EINTR:
			continue
		case syscall.EAGAIN:
      //syscall.EAGAIN The previous accept function called the system accept, because it was a non-blocking fd, so it returned a retry error.
      // Our PD is pollable, so we will not continue to retry, but wait for the event notification from the system poll
			if fd.pd.pollable() {
				if err = fd.pd.waitRead(fd.isFile); err == nil {
					continue}}case syscall.ECONNABORTED:
			// This means that a socket on the listen
			// queue was closed before we Accept()ed it;
			// it's a silly error, so try again.
			continue
		}
		return - 1.nil, errcall, err
	}
}

// ------------------ runtime/netpoll.go ------------------
// Check whether poll fd is ready. This is all about the level of interaction with the operating system. If you're interested, you can go further
func poll_runtime_pollWait(pd *pollDesc, mode int) int {
	errcode := netpollcheckerr(pd, int32(mode))
	iferrcode ! = pollNoError {return errcode
	}
	// As for now only Solaris, illumos, and AIX use level-triggered IO.
	if GOOS == "solaris" || GOOS == "illumos" || GOOS == "aix" {
		netpollarm(pd, mode)
	}
  
	for! netpollblock(pd,int32(mode), false) {
		errcode = netpollcheckerr(pd, int32(mode))
		iferrcode ! = pollNoError {return errcode
		}
		// Can happen if timeout has fired and unblocked us,
		// but before we had a chance to run, timeout has been reset.
		// Pretend it has not happened and retry.
	}
	return pollNoError
}
Copy the code

Through a series of operations, such as whatcreate server, create pollDesc, and join the poll listener, the go underlying network package achieves high performance.

Implementing a high-performance Web service, Websocket, or TCP server on our own is much easier. Of course, this performance is relative, but if you want to continue to squeeze server performance, you need to interact directly with epoll and other technologies, eliminating the need for Goroutine to listen for connection reads alone.

Organization needs to be strengthened. Keep trying