我们编写了最简单的TCP服务器(带有少量日志记录)来检查内存占用(参见下面的tcp-server.go )
服务器只是接受连接,什么也不做。它运行在带有go1.3 linux/amd64版本的Ubuntu 12.04.4 LTS服务器(内核3.2.0-61-generic)上。
在本例中,附加基准测试程序(pulse.go)创建10k个连接,在30秒后断开它们,重复该循环三次,然后连续重复1k个连接/断开的小脉冲。用于测试的命令是./pulse -big=10000 -bs=30。
第一个附图是在客户端数量变化了500倍时通过记录runtime.ReadMemStats获得的,第二个附图是服务器进程的“top”所看到的RES内存大小。
服务器启动时只有可以忽略不计的1.6KB内存。然后,内存由10k连接的“大”脉冲设置为~60MB (如顶部所示),或约为16MB的“SystemMemory”(如ReadMemStats所示)。正如预期的那样,当10K脉冲结束时,正在使用的内存下降,最终程序开始将内存释放回操作系统,如灰色的“已释放内存”行所示。
问题是系统内存(相应地,“top”看到的RES内存)从来没有显著下降(尽管它下降了一点,如第二张图所示)。
我们预计在10K脉冲结束后,内存将继续被释放,直到RES大小达到处理每个1k脉冲所需的最小分辨率(这是“top”所示的8m RES和runtime.ReadMemStats报告的2MB in-use )。取而代之的是,RES保持在56MB左右,并且在使用中根本不会从60MB的最高值下降。
我们希望确保偶尔出现尖峰的不规则流量的可扩展性,以及能够在同一机器上运行在不同时间具有尖峰的多个服务器。是否有一种方法可以有效地确保在合理的时间范围内尽可能多地将内存释放回系统?
代码https://gist.github.com/eugene-bulkin/e8d690b4db144f468bc5:
server.go:
package main
import (
"net"
"log"
"runtime"
"sync"
)
var m sync.Mutex
var num_clients = 0
var cycle = 0
func printMem() {
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
log.Printf("Cycle #%3d: %5d clients | System: %8d Inuse: %8d Released: %8d Objects: %6d\n", cycle, num_clients, ms.HeapSys, ms.HeapInuse, ms.HeapReleased, ms.HeapObjects)
}
func handleConnection(conn net.Conn) {
//log.Println("Accepted connection:", conn.RemoteAddr())
m.Lock()
num_clients++
if num_clients % 500 == 0 {
printMem()
}
m.Unlock()
buffer := make([]byte, 256)
for {
_, err := conn.Read(buffer)
if err != nil {
//log.Println("Lost connection:", conn.RemoteAddr())
err := conn.Close()
if err != nil {
log.Println("Connection close error:", err)
}
m.Lock()
num_clients--
if num_clients % 500 == 0 {
printMem()
}
if num_clients == 0 {
cycle++
}
m.Unlock()
break
}
}
}
func main() {
printMem()
cycle++
listener, err := net.Listen("tcp", ":3033")
if err != nil {
log.Fatal("Could not listen.")
}
for {
conn, err := listener.Accept()
if err != nil {
log.Println("Could not listen to client:", err)
continue
}
go handleConnection(conn)
}
}
pulse.go:
package main
import (
"flag"
"net"
"sync"
"log"
"time"
)
var (
numBig = flag.Int("big", 4000, "Number of connections in big pulse")
bigIters = flag.Int("i", 3, "Number of iterations of big pulse")
bigSep = flag.Int("bs", 5, "Number of seconds between big pulses")
numSmall = flag.Int("small", 1000, "Number of connections in small pulse")
smallSep = flag.Int("ss", 20, "Number of seconds between small pulses")
linger = flag.Int("l", 4, "How long connections should linger before being disconnected")
)
var m sync.Mutex
var active_conns = 0
var connections = make(map[net.Conn] bool)
func pulse(n int, linger int) {
var wg sync.WaitGroup
log.Printf("Connecting %d client(s)...\n", n)
for i := 0; i < n; i++ {
wg.Add(1)
go func() {
m.Lock()
defer m.Unlock()
defer wg.Done()
active_conns++
conn, err := net.Dial("tcp", ":3033")
if err != nil {
log.Panicln("Unable to connect: ", err)
return
}
connections[conn] = true
}()
}
wg.Wait()
if len(connections) != n {
log.Fatalf("Unable to connect all %d client(s).\n", n)
}
log.Printf("Connected %d client(s).\n", n)
time.Sleep(time.Duration(linger) * time.Second)
for conn := range connections {
active_conns--
err := conn.Close()
if err != nil {
log.Panicln("Unable to close connection:", err)
conn = nil
continue
}
delete(connections, conn)
conn = nil
}
if len(connections) > 0 {
log.Fatalf("Unable to disconnect all %d client(s) [%d remain].\n", n, len(connections))
}
log.Printf("Disconnected %d client(s).\n", n)
}
func main() {
flag.Parse()
for i := 0; i < *bigIters; i++ {
pulse(*numBig, *linger)
time.Sleep(time.Duration(*bigSep) * time.Second)
}
for {
pulse(*numSmall, *linger)
time.Sleep(time.Duration(*smallSep) * time.Second)
}
}
https://stackoverflow.com/questions/24376817
复制相似问题