server.go

   1// Copyright 2014 The Go Authors. All rights reserved.
   2// Use of this source code is governed by a BSD-style
   3// license that can be found in the LICENSE file.
   4
   5// TODO: turn off the serve goroutine when idle, so
   6// an idle conn only has the readFrames goroutine active. (which could
   7// also be optimized probably to pin less memory in crypto/tls). This
   8// would involve tracking when the serve goroutine is active (atomic
   9// int32 read/CAS probably?) and starting it up when frames arrive,
  10// and shutting it down when all handlers exit. the occasional PING
  11// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
  12// (which is a no-op if already running) and then queue the PING write
  13// as normal. The serve loop would then exit in most cases (if no
  14// Handlers running) and not be woken up again until the PING packet
  15// returns.
  16
  17// TODO (maybe): add a mechanism for Handlers to going into
  18// half-closed-local mode (rw.(io.Closer) test?) but not exit their
  19// handler, and continue to be able to read from the
  20// Request.Body. This would be a somewhat semantic change from HTTP/1
  21// (or at least what we expose in net/http), so I'd probably want to
  22// add it there too. For now, this package says that returning from
  23// the Handler ServeHTTP function means you're both done reading and
  24// done writing, without a way to stop just one or the other.
  25
  26package http2
  27
  28import (
  29	"bufio"
  30	"bytes"
  31	"context"
  32	"crypto/rand"
  33	"crypto/tls"
  34	"errors"
  35	"fmt"
  36	"io"
  37	"log"
  38	"math"
  39	"net"
  40	"net/http"
  41	"net/textproto"
  42	"net/url"
  43	"os"
  44	"reflect"
  45	"runtime"
  46	"strconv"
  47	"strings"
  48	"sync"
  49	"time"
  50
  51	"golang.org/x/net/http/httpguts"
  52	"golang.org/x/net/http2/hpack"
  53	"golang.org/x/net/internal/httpcommon"
  54)
  55
  56const (
  57	prefaceTimeout        = 10 * time.Second
  58	firstSettingsTimeout  = 2 * time.Second // should be in-flight with preface anyway
  59	handlerChunkWriteSize = 4 << 10
  60	defaultMaxStreams     = 250 // TODO: make this 100 as the GFE seems to?
  61
  62	// maxQueuedControlFrames is the maximum number of control frames like
  63	// SETTINGS, PING and RST_STREAM that will be queued for writing before
  64	// the connection is closed to prevent memory exhaustion attacks.
  65	maxQueuedControlFrames = 10000
  66)
  67
  68var (
  69	errClientDisconnected = errors.New("client disconnected")
  70	errClosedBody         = errors.New("body closed by handler")
  71	errHandlerComplete    = errors.New("http2: request body closed due to handler exiting")
  72	errStreamClosed       = errors.New("http2: stream closed")
  73)
  74
  75var responseWriterStatePool = sync.Pool{
  76	New: func() interface{} {
  77		rws := &responseWriterState{}
  78		rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
  79		return rws
  80	},
  81}
  82
  83// Test hooks.
  84var (
  85	testHookOnConn        func()
  86	testHookGetServerConn func(*serverConn)
  87	testHookOnPanicMu     *sync.Mutex // nil except in tests
  88	testHookOnPanic       func(sc *serverConn, panicVal interface{}) (rePanic bool)
  89)
  90
  91// Server is an HTTP/2 server.
  92type Server struct {
  93	// MaxHandlers limits the number of http.Handler ServeHTTP goroutines
  94	// which may run at a time over all connections.
  95	// Negative or zero no limit.
  96	// TODO: implement
  97	MaxHandlers int
  98
  99	// MaxConcurrentStreams optionally specifies the number of
 100	// concurrent streams that each client may have open at a
 101	// time. This is unrelated to the number of http.Handler goroutines
 102	// which may be active globally, which is MaxHandlers.
 103	// If zero, MaxConcurrentStreams defaults to at least 100, per
 104	// the HTTP/2 spec's recommendations.
 105	MaxConcurrentStreams uint32
 106
 107	// MaxDecoderHeaderTableSize optionally specifies the http2
 108	// SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
 109	// informs the remote endpoint of the maximum size of the header compression
 110	// table used to decode header blocks, in octets. If zero, the default value
 111	// of 4096 is used.
 112	MaxDecoderHeaderTableSize uint32
 113
 114	// MaxEncoderHeaderTableSize optionally specifies an upper limit for the
 115	// header compression table used for encoding request headers. Received
 116	// SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
 117	// the default value of 4096 is used.
 118	MaxEncoderHeaderTableSize uint32
 119
 120	// MaxReadFrameSize optionally specifies the largest frame
 121	// this server is willing to read. A valid value is between
 122	// 16k and 16M, inclusive. If zero or otherwise invalid, a
 123	// default value is used.
 124	MaxReadFrameSize uint32
 125
 126	// PermitProhibitedCipherSuites, if true, permits the use of
 127	// cipher suites prohibited by the HTTP/2 spec.
 128	PermitProhibitedCipherSuites bool
 129
 130	// IdleTimeout specifies how long until idle clients should be
 131	// closed with a GOAWAY frame. PING frames are not considered
 132	// activity for the purposes of IdleTimeout.
 133	// If zero or negative, there is no timeout.
 134	IdleTimeout time.Duration
 135
 136	// ReadIdleTimeout is the timeout after which a health check using a ping
 137	// frame will be carried out if no frame is received on the connection.
 138	// If zero, no health check is performed.
 139	ReadIdleTimeout time.Duration
 140
 141	// PingTimeout is the timeout after which the connection will be closed
 142	// if a response to a ping is not received.
 143	// If zero, a default of 15 seconds is used.
 144	PingTimeout time.Duration
 145
 146	// WriteByteTimeout is the timeout after which a connection will be
 147	// closed if no data can be written to it. The timeout begins when data is
 148	// available to write, and is extended whenever any bytes are written.
 149	// If zero or negative, there is no timeout.
 150	WriteByteTimeout time.Duration
 151
 152	// MaxUploadBufferPerConnection is the size of the initial flow
 153	// control window for each connections. The HTTP/2 spec does not
 154	// allow this to be smaller than 65535 or larger than 2^32-1.
 155	// If the value is outside this range, a default value will be
 156	// used instead.
 157	MaxUploadBufferPerConnection int32
 158
 159	// MaxUploadBufferPerStream is the size of the initial flow control
 160	// window for each stream. The HTTP/2 spec does not allow this to
 161	// be larger than 2^32-1. If the value is zero or larger than the
 162	// maximum, a default value will be used instead.
 163	MaxUploadBufferPerStream int32
 164
 165	// NewWriteScheduler constructs a write scheduler for a connection.
 166	// If nil, a default scheduler is chosen.
 167	NewWriteScheduler func() WriteScheduler
 168
 169	// CountError, if non-nil, is called on HTTP/2 server errors.
 170	// It's intended to increment a metric for monitoring, such
 171	// as an expvar or Prometheus metric.
 172	// The errType consists of only ASCII word characters.
 173	CountError func(errType string)
 174
 175	// Internal state. This is a pointer (rather than embedded directly)
 176	// so that we don't embed a Mutex in this struct, which will make the
 177	// struct non-copyable, which might break some callers.
 178	state *serverInternalState
 179
 180	// Synchronization group used for testing.
 181	// Outside of tests, this is nil.
 182	group synctestGroupInterface
 183}
 184
 185func (s *Server) markNewGoroutine() {
 186	if s.group != nil {
 187		s.group.Join()
 188	}
 189}
 190
 191func (s *Server) now() time.Time {
 192	if s.group != nil {
 193		return s.group.Now()
 194	}
 195	return time.Now()
 196}
 197
 198// newTimer creates a new time.Timer, or a synthetic timer in tests.
 199func (s *Server) newTimer(d time.Duration) timer {
 200	if s.group != nil {
 201		return s.group.NewTimer(d)
 202	}
 203	return timeTimer{time.NewTimer(d)}
 204}
 205
 206// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
 207func (s *Server) afterFunc(d time.Duration, f func()) timer {
 208	if s.group != nil {
 209		return s.group.AfterFunc(d, f)
 210	}
 211	return timeTimer{time.AfterFunc(d, f)}
 212}
 213
 214type serverInternalState struct {
 215	mu          sync.Mutex
 216	activeConns map[*serverConn]struct{}
 217}
 218
 219func (s *serverInternalState) registerConn(sc *serverConn) {
 220	if s == nil {
 221		return // if the Server was used without calling ConfigureServer
 222	}
 223	s.mu.Lock()
 224	s.activeConns[sc] = struct{}{}
 225	s.mu.Unlock()
 226}
 227
 228func (s *serverInternalState) unregisterConn(sc *serverConn) {
 229	if s == nil {
 230		return // if the Server was used without calling ConfigureServer
 231	}
 232	s.mu.Lock()
 233	delete(s.activeConns, sc)
 234	s.mu.Unlock()
 235}
 236
 237func (s *serverInternalState) startGracefulShutdown() {
 238	if s == nil {
 239		return // if the Server was used without calling ConfigureServer
 240	}
 241	s.mu.Lock()
 242	for sc := range s.activeConns {
 243		sc.startGracefulShutdown()
 244	}
 245	s.mu.Unlock()
 246}
 247
 248// ConfigureServer adds HTTP/2 support to a net/http Server.
 249//
 250// The configuration conf may be nil.
 251//
 252// ConfigureServer must be called before s begins serving.
 253func ConfigureServer(s *http.Server, conf *Server) error {
 254	if s == nil {
 255		panic("nil *http.Server")
 256	}
 257	if conf == nil {
 258		conf = new(Server)
 259	}
 260	conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
 261	if h1, h2 := s, conf; h2.IdleTimeout == 0 {
 262		if h1.IdleTimeout != 0 {
 263			h2.IdleTimeout = h1.IdleTimeout
 264		} else {
 265			h2.IdleTimeout = h1.ReadTimeout
 266		}
 267	}
 268	s.RegisterOnShutdown(conf.state.startGracefulShutdown)
 269
 270	if s.TLSConfig == nil {
 271		s.TLSConfig = new(tls.Config)
 272	} else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 {
 273		// If they already provided a TLS 1.0–1.2 CipherSuite list, return an
 274		// error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
 275		// ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
 276		haveRequired := false
 277		for _, cs := range s.TLSConfig.CipherSuites {
 278			switch cs {
 279			case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
 280				// Alternative MTI cipher to not discourage ECDSA-only servers.
 281				// See http://golang.org/cl/30721 for further information.
 282				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
 283				haveRequired = true
 284			}
 285		}
 286		if !haveRequired {
 287			return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
 288		}
 289	}
 290
 291	// Note: not setting MinVersion to tls.VersionTLS12,
 292	// as we don't want to interfere with HTTP/1.1 traffic
 293	// on the user's server. We enforce TLS 1.2 later once
 294	// we accept a connection. Ideally this should be done
 295	// during next-proto selection, but using TLS <1.2 with
 296	// HTTP/2 is still the client's bug.
 297
 298	s.TLSConfig.PreferServerCipherSuites = true
 299
 300	if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) {
 301		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
 302	}
 303	if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
 304		s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
 305	}
 306
 307	if s.TLSNextProto == nil {
 308		s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
 309	}
 310	protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) {
 311		if testHookOnConn != nil {
 312			testHookOnConn()
 313		}
 314		// The TLSNextProto interface predates contexts, so
 315		// the net/http package passes down its per-connection
 316		// base context via an exported but unadvertised
 317		// method on the Handler. This is for internal
 318		// net/http<=>http2 use only.
 319		var ctx context.Context
 320		type baseContexter interface {
 321			BaseContext() context.Context
 322		}
 323		if bc, ok := h.(baseContexter); ok {
 324			ctx = bc.BaseContext()
 325		}
 326		conf.ServeConn(c, &ServeConnOpts{
 327			Context:          ctx,
 328			Handler:          h,
 329			BaseConfig:       hs,
 330			SawClientPreface: sawClientPreface,
 331		})
 332	}
 333	s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
 334		protoHandler(hs, c, h, false)
 335	}
 336	// The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns.
 337	//
 338	// A connection passed in this method has already had the HTTP/2 preface read from it.
 339	s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
 340		nc, err := unencryptedNetConnFromTLSConn(c)
 341		if err != nil {
 342			if lg := hs.ErrorLog; lg != nil {
 343				lg.Print(err)
 344			} else {
 345				log.Print(err)
 346			}
 347			go c.Close()
 348			return
 349		}
 350		protoHandler(hs, nc, h, true)
 351	}
 352	return nil
 353}
 354
 355// ServeConnOpts are options for the Server.ServeConn method.
 356type ServeConnOpts struct {
 357	// Context is the base context to use.
 358	// If nil, context.Background is used.
 359	Context context.Context
 360
 361	// BaseConfig optionally sets the base configuration
 362	// for values. If nil, defaults are used.
 363	BaseConfig *http.Server
 364
 365	// Handler specifies which handler to use for processing
 366	// requests. If nil, BaseConfig.Handler is used. If BaseConfig
 367	// or BaseConfig.Handler is nil, http.DefaultServeMux is used.
 368	Handler http.Handler
 369
 370	// UpgradeRequest is an initial request received on a connection
 371	// undergoing an h2c upgrade. The request body must have been
 372	// completely read from the connection before calling ServeConn,
 373	// and the 101 Switching Protocols response written.
 374	UpgradeRequest *http.Request
 375
 376	// Settings is the decoded contents of the HTTP2-Settings header
 377	// in an h2c upgrade request.
 378	Settings []byte
 379
 380	// SawClientPreface is set if the HTTP/2 connection preface
 381	// has already been read from the connection.
 382	SawClientPreface bool
 383}
 384
 385func (o *ServeConnOpts) context() context.Context {
 386	if o != nil && o.Context != nil {
 387		return o.Context
 388	}
 389	return context.Background()
 390}
 391
 392func (o *ServeConnOpts) baseConfig() *http.Server {
 393	if o != nil && o.BaseConfig != nil {
 394		return o.BaseConfig
 395	}
 396	return new(http.Server)
 397}
 398
 399func (o *ServeConnOpts) handler() http.Handler {
 400	if o != nil {
 401		if o.Handler != nil {
 402			return o.Handler
 403		}
 404		if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
 405			return o.BaseConfig.Handler
 406		}
 407	}
 408	return http.DefaultServeMux
 409}
 410
 411// ServeConn serves HTTP/2 requests on the provided connection and
 412// blocks until the connection is no longer readable.
 413//
 414// ServeConn starts speaking HTTP/2 assuming that c has not had any
 415// reads or writes. It writes its initial settings frame and expects
 416// to be able to read the preface and settings frame from the
 417// client. If c has a ConnectionState method like a *tls.Conn, the
 418// ConnectionState is used to verify the TLS ciphersuite and to set
 419// the Request.TLS field in Handlers.
 420//
 421// ServeConn does not support h2c by itself. Any h2c support must be
 422// implemented in terms of providing a suitably-behaving net.Conn.
 423//
 424// The opts parameter is optional. If nil, default values are used.
 425func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
 426	s.serveConn(c, opts, nil)
 427}
 428
 429func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) {
 430	baseCtx, cancel := serverConnBaseContext(c, opts)
 431	defer cancel()
 432
 433	http1srv := opts.baseConfig()
 434	conf := configFromServer(http1srv, s)
 435	sc := &serverConn{
 436		srv:                         s,
 437		hs:                          http1srv,
 438		conn:                        c,
 439		baseCtx:                     baseCtx,
 440		remoteAddrStr:               c.RemoteAddr().String(),
 441		bw:                          newBufferedWriter(s.group, c, conf.WriteByteTimeout),
 442		handler:                     opts.handler(),
 443		streams:                     make(map[uint32]*stream),
 444		readFrameCh:                 make(chan readFrameResult),
 445		wantWriteFrameCh:            make(chan FrameWriteRequest, 8),
 446		serveMsgCh:                  make(chan interface{}, 8),
 447		wroteFrameCh:                make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
 448		bodyReadCh:                  make(chan bodyReadMsg),         // buffering doesn't matter either way
 449		doneServing:                 make(chan struct{}),
 450		clientMaxStreams:            math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
 451		advMaxStreams:               conf.MaxConcurrentStreams,
 452		initialStreamSendWindowSize: initialWindowSize,
 453		initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
 454		maxFrameSize:                initialMaxFrameSize,
 455		pingTimeout:                 conf.PingTimeout,
 456		countErrorFunc:              conf.CountError,
 457		serveG:                      newGoroutineLock(),
 458		pushEnabled:                 true,
 459		sawClientPreface:            opts.SawClientPreface,
 460	}
 461	if newf != nil {
 462		newf(sc)
 463	}
 464
 465	s.state.registerConn(sc)
 466	defer s.state.unregisterConn(sc)
 467
 468	// The net/http package sets the write deadline from the
 469	// http.Server.WriteTimeout during the TLS handshake, but then
 470	// passes the connection off to us with the deadline already set.
 471	// Write deadlines are set per stream in serverConn.newStream.
 472	// Disarm the net.Conn write deadline here.
 473	if sc.hs.WriteTimeout > 0 {
 474		sc.conn.SetWriteDeadline(time.Time{})
 475	}
 476
 477	if s.NewWriteScheduler != nil {
 478		sc.writeSched = s.NewWriteScheduler()
 479	} else {
 480		sc.writeSched = newRoundRobinWriteScheduler()
 481	}
 482
 483	// These start at the RFC-specified defaults. If there is a higher
 484	// configured value for inflow, that will be updated when we send a
 485	// WINDOW_UPDATE shortly after sending SETTINGS.
 486	sc.flow.add(initialWindowSize)
 487	sc.inflow.init(initialWindowSize)
 488	sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
 489	sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
 490
 491	fr := NewFramer(sc.bw, c)
 492	if conf.CountError != nil {
 493		fr.countError = conf.CountError
 494	}
 495	fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil)
 496	fr.MaxHeaderListSize = sc.maxHeaderListSize()
 497	fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
 498	sc.framer = fr
 499
 500	if tc, ok := c.(connectionStater); ok {
 501		sc.tlsState = new(tls.ConnectionState)
 502		*sc.tlsState = tc.ConnectionState()
 503		// 9.2 Use of TLS Features
 504		// An implementation of HTTP/2 over TLS MUST use TLS
 505		// 1.2 or higher with the restrictions on feature set
 506		// and cipher suite described in this section. Due to
 507		// implementation limitations, it might not be
 508		// possible to fail TLS negotiation. An endpoint MUST
 509		// immediately terminate an HTTP/2 connection that
 510		// does not meet the TLS requirements described in
 511		// this section with a connection error (Section
 512		// 5.4.1) of type INADEQUATE_SECURITY.
 513		if sc.tlsState.Version < tls.VersionTLS12 {
 514			sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
 515			return
 516		}
 517
 518		if sc.tlsState.ServerName == "" {
 519			// Client must use SNI, but we don't enforce that anymore,
 520			// since it was causing problems when connecting to bare IP
 521			// addresses during development.
 522			//
 523			// TODO: optionally enforce? Or enforce at the time we receive
 524			// a new request, and verify the ServerName matches the :authority?
 525			// But that precludes proxy situations, perhaps.
 526			//
 527			// So for now, do nothing here again.
 528		}
 529
 530		if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
 531			// "Endpoints MAY choose to generate a connection error
 532			// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
 533			// the prohibited cipher suites are negotiated."
 534			//
 535			// We choose that. In my opinion, the spec is weak
 536			// here. It also says both parties must support at least
 537			// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
 538			// excuses here. If we really must, we could allow an
 539			// "AllowInsecureWeakCiphers" option on the server later.
 540			// Let's see how it plays out first.
 541			sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
 542			return
 543		}
 544	}
 545
 546	if opts.Settings != nil {
 547		fr := &SettingsFrame{
 548			FrameHeader: FrameHeader{valid: true},
 549			p:           opts.Settings,
 550		}
 551		if err := fr.ForeachSetting(sc.processSetting); err != nil {
 552			sc.rejectConn(ErrCodeProtocol, "invalid settings")
 553			return
 554		}
 555		opts.Settings = nil
 556	}
 557
 558	if hook := testHookGetServerConn; hook != nil {
 559		hook(sc)
 560	}
 561
 562	if opts.UpgradeRequest != nil {
 563		sc.upgradeRequest(opts.UpgradeRequest)
 564		opts.UpgradeRequest = nil
 565	}
 566
 567	sc.serve(conf)
 568}
 569
 570func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
 571	ctx, cancel = context.WithCancel(opts.context())
 572	ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
 573	if hs := opts.baseConfig(); hs != nil {
 574		ctx = context.WithValue(ctx, http.ServerContextKey, hs)
 575	}
 576	return
 577}
 578
 579func (sc *serverConn) rejectConn(err ErrCode, debug string) {
 580	sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
 581	// ignoring errors. hanging up anyway.
 582	sc.framer.WriteGoAway(0, err, []byte(debug))
 583	sc.bw.Flush()
 584	sc.conn.Close()
 585}
 586
 587type serverConn struct {
 588	// Immutable:
 589	srv              *Server
 590	hs               *http.Server
 591	conn             net.Conn
 592	bw               *bufferedWriter // writing to conn
 593	handler          http.Handler
 594	baseCtx          context.Context
 595	framer           *Framer
 596	doneServing      chan struct{}          // closed when serverConn.serve ends
 597	readFrameCh      chan readFrameResult   // written by serverConn.readFrames
 598	wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
 599	wroteFrameCh     chan frameWriteResult  // from writeFrameAsync -> serve, tickles more frame writes
 600	bodyReadCh       chan bodyReadMsg       // from handlers -> serve
 601	serveMsgCh       chan interface{}       // misc messages & code to send to / run on the serve loop
 602	flow             outflow                // conn-wide (not stream-specific) outbound flow control
 603	inflow           inflow                 // conn-wide inbound flow control
 604	tlsState         *tls.ConnectionState   // shared by all handlers, like net/http
 605	remoteAddrStr    string
 606	writeSched       WriteScheduler
 607	countErrorFunc   func(errType string)
 608
 609	// Everything following is owned by the serve loop; use serveG.check():
 610	serveG                      goroutineLock // used to verify funcs are on serve()
 611	pushEnabled                 bool
 612	sawClientPreface            bool // preface has already been read, used in h2c upgrade
 613	sawFirstSettings            bool // got the initial SETTINGS frame after the preface
 614	needToSendSettingsAck       bool
 615	unackedSettings             int    // how many SETTINGS have we sent without ACKs?
 616	queuedControlFrames         int    // control frames in the writeSched queue
 617	clientMaxStreams            uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
 618	advMaxStreams               uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
 619	curClientStreams            uint32 // number of open streams initiated by the client
 620	curPushedStreams            uint32 // number of open streams initiated by server push
 621	curHandlers                 uint32 // number of running handler goroutines
 622	maxClientStreamID           uint32 // max ever seen from client (odd), or 0 if there have been no client requests
 623	maxPushPromiseID            uint32 // ID of the last push promise (even), or 0 if there have been no pushes
 624	streams                     map[uint32]*stream
 625	unstartedHandlers           []unstartedHandler
 626	initialStreamSendWindowSize int32
 627	initialStreamRecvWindowSize int32
 628	maxFrameSize                int32
 629	peerMaxHeaderListSize       uint32            // zero means unknown (default)
 630	canonHeader                 map[string]string // http2-lower-case -> Go-Canonical-Case
 631	canonHeaderKeysSize         int               // canonHeader keys size in bytes
 632	writingFrame                bool              // started writing a frame (on serve goroutine or separate)
 633	writingFrameAsync           bool              // started a frame on its own goroutine but haven't heard back on wroteFrameCh
 634	needsFrameFlush             bool              // last frame write wasn't a flush
 635	inGoAway                    bool              // we've started to or sent GOAWAY
 636	inFrameScheduleLoop         bool              // whether we're in the scheduleFrameWrite loop
 637	needToSendGoAway            bool              // we need to schedule a GOAWAY frame write
 638	pingSent                    bool
 639	sentPingData                [8]byte
 640	goAwayCode                  ErrCode
 641	shutdownTimer               timer // nil until used
 642	idleTimer                   timer // nil if unused
 643	readIdleTimeout             time.Duration
 644	pingTimeout                 time.Duration
 645	readIdleTimer               timer // nil if unused
 646
 647	// Owned by the writeFrameAsync goroutine:
 648	headerWriteBuf bytes.Buffer
 649	hpackEncoder   *hpack.Encoder
 650
 651	// Used by startGracefulShutdown.
 652	shutdownOnce sync.Once
 653}
 654
 655func (sc *serverConn) maxHeaderListSize() uint32 {
 656	n := sc.hs.MaxHeaderBytes
 657	if n <= 0 {
 658		n = http.DefaultMaxHeaderBytes
 659	}
 660	return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
 661}
 662
 663func (sc *serverConn) curOpenStreams() uint32 {
 664	sc.serveG.check()
 665	return sc.curClientStreams + sc.curPushedStreams
 666}
 667
 668// stream represents a stream. This is the minimal metadata needed by
 669// the serve goroutine. Most of the actual stream state is owned by
 670// the http.Handler's goroutine in the responseWriter. Because the
 671// responseWriter's responseWriterState is recycled at the end of a
 672// handler, this struct intentionally has no pointer to the
 673// *responseWriter{,State} itself, as the Handler ending nils out the
 674// responseWriter's state field.
 675type stream struct {
 676	// immutable:
 677	sc        *serverConn
 678	id        uint32
 679	body      *pipe       // non-nil if expecting DATA frames
 680	cw        closeWaiter // closed wait stream transitions to closed state
 681	ctx       context.Context
 682	cancelCtx func()
 683
 684	// owned by serverConn's serve loop:
 685	bodyBytes        int64   // body bytes seen so far
 686	declBodyBytes    int64   // or -1 if undeclared
 687	flow             outflow // limits writing from Handler to client
 688	inflow           inflow  // what the client is allowed to POST/etc to us
 689	state            streamState
 690	resetQueued      bool  // RST_STREAM queued for write; set by sc.resetStream
 691	gotTrailerHeader bool  // HEADER frame for trailers was seen
 692	wroteHeaders     bool  // whether we wrote headers (not status 100)
 693	readDeadline     timer // nil if unused
 694	writeDeadline    timer // nil if unused
 695	closeErr         error // set before cw is closed
 696
 697	trailer    http.Header // accumulated trailers
 698	reqTrailer http.Header // handler's Request.Trailer
 699}
 700
 701func (sc *serverConn) Framer() *Framer  { return sc.framer }
 702func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
 703func (sc *serverConn) Flush() error     { return sc.bw.Flush() }
 704func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
 705	return sc.hpackEncoder, &sc.headerWriteBuf
 706}
 707
 708func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
 709	sc.serveG.check()
 710	// http://tools.ietf.org/html/rfc7540#section-5.1
 711	if st, ok := sc.streams[streamID]; ok {
 712		return st.state, st
 713	}
 714	// "The first use of a new stream identifier implicitly closes all
 715	// streams in the "idle" state that might have been initiated by
 716	// that peer with a lower-valued stream identifier. For example, if
 717	// a client sends a HEADERS frame on stream 7 without ever sending a
 718	// frame on stream 5, then stream 5 transitions to the "closed"
 719	// state when the first frame for stream 7 is sent or received."
 720	if streamID%2 == 1 {
 721		if streamID <= sc.maxClientStreamID {
 722			return stateClosed, nil
 723		}
 724	} else {
 725		if streamID <= sc.maxPushPromiseID {
 726			return stateClosed, nil
 727		}
 728	}
 729	return stateIdle, nil
 730}
 731
 732// setConnState calls the net/http ConnState hook for this connection, if configured.
 733// Note that the net/http package does StateNew and StateClosed for us.
 734// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
 735func (sc *serverConn) setConnState(state http.ConnState) {
 736	if sc.hs.ConnState != nil {
 737		sc.hs.ConnState(sc.conn, state)
 738	}
 739}
 740
 741func (sc *serverConn) vlogf(format string, args ...interface{}) {
 742	if VerboseLogs {
 743		sc.logf(format, args...)
 744	}
 745}
 746
 747func (sc *serverConn) logf(format string, args ...interface{}) {
 748	if lg := sc.hs.ErrorLog; lg != nil {
 749		lg.Printf(format, args...)
 750	} else {
 751		log.Printf(format, args...)
 752	}
 753}
 754
 755// errno returns v's underlying uintptr, else 0.
 756//
 757// TODO: remove this helper function once http2 can use build
 758// tags. See comment in isClosedConnError.
 759func errno(v error) uintptr {
 760	if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
 761		return uintptr(rv.Uint())
 762	}
 763	return 0
 764}
 765
 766// isClosedConnError reports whether err is an error from use of a closed
 767// network connection.
 768func isClosedConnError(err error) bool {
 769	if err == nil {
 770		return false
 771	}
 772
 773	if errors.Is(err, net.ErrClosed) {
 774		return true
 775	}
 776
 777	// TODO(bradfitz): x/tools/cmd/bundle doesn't really support
 778	// build tags, so I can't make an http2_windows.go file with
 779	// Windows-specific stuff. Fix that and move this, once we
 780	// have a way to bundle this into std's net/http somehow.
 781	if runtime.GOOS == "windows" {
 782		if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
 783			if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
 784				const WSAECONNABORTED = 10053
 785				const WSAECONNRESET = 10054
 786				if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
 787					return true
 788				}
 789			}
 790		}
 791	}
 792	return false
 793}
 794
 795func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
 796	if err == nil {
 797		return
 798	}
 799	if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout {
 800		// Boring, expected errors.
 801		sc.vlogf(format, args...)
 802	} else {
 803		sc.logf(format, args...)
 804	}
 805}
 806
 807// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
 808// of the entries in the canonHeader cache.
 809// This should be larger than the size of unique, uncommon header keys likely to
 810// be sent by the peer, while not so high as to permit unreasonable memory usage
 811// if the peer sends an unbounded number of unique header keys.
 812const maxCachedCanonicalHeadersKeysSize = 2048
 813
 814func (sc *serverConn) canonicalHeader(v string) string {
 815	sc.serveG.check()
 816	cv, ok := httpcommon.CachedCanonicalHeader(v)
 817	if ok {
 818		return cv
 819	}
 820	cv, ok = sc.canonHeader[v]
 821	if ok {
 822		return cv
 823	}
 824	if sc.canonHeader == nil {
 825		sc.canonHeader = make(map[string]string)
 826	}
 827	cv = http.CanonicalHeaderKey(v)
 828	size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
 829	if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize {
 830		sc.canonHeader[v] = cv
 831		sc.canonHeaderKeysSize += size
 832	}
 833	return cv
 834}
 835
 836type readFrameResult struct {
 837	f   Frame // valid until readMore is called
 838	err error
 839
 840	// readMore should be called once the consumer no longer needs or
 841	// retains f. After readMore, f is invalid and more frames can be
 842	// read.
 843	readMore func()
 844}
 845
 846// readFrames is the loop that reads incoming frames.
 847// It takes care to only read one frame at a time, blocking until the
 848// consumer is done with the frame.
 849// It's run on its own goroutine.
 850func (sc *serverConn) readFrames() {
 851	sc.srv.markNewGoroutine()
 852	gate := make(chan struct{})
 853	gateDone := func() { gate <- struct{}{} }
 854	for {
 855		f, err := sc.framer.ReadFrame()
 856		select {
 857		case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
 858		case <-sc.doneServing:
 859			return
 860		}
 861		select {
 862		case <-gate:
 863		case <-sc.doneServing:
 864			return
 865		}
 866		if terminalReadFrameError(err) {
 867			return
 868		}
 869	}
 870}
 871
 872// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
 873type frameWriteResult struct {
 874	_   incomparable
 875	wr  FrameWriteRequest // what was written (or attempted)
 876	err error             // result of the writeFrame call
 877}
 878
 879// writeFrameAsync runs in its own goroutine and writes a single frame
 880// and then reports when it's done.
 881// At most one goroutine can be running writeFrameAsync at a time per
 882// serverConn.
 883func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
 884	sc.srv.markNewGoroutine()
 885	var err error
 886	if wd == nil {
 887		err = wr.write.writeFrame(sc)
 888	} else {
 889		err = sc.framer.endWrite()
 890	}
 891	sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
 892}
 893
 894func (sc *serverConn) closeAllStreamsOnConnClose() {
 895	sc.serveG.check()
 896	for _, st := range sc.streams {
 897		sc.closeStream(st, errClientDisconnected)
 898	}
 899}
 900
 901func (sc *serverConn) stopShutdownTimer() {
 902	sc.serveG.check()
 903	if t := sc.shutdownTimer; t != nil {
 904		t.Stop()
 905	}
 906}
 907
 908func (sc *serverConn) notePanic() {
 909	// Note: this is for serverConn.serve panicking, not http.Handler code.
 910	if testHookOnPanicMu != nil {
 911		testHookOnPanicMu.Lock()
 912		defer testHookOnPanicMu.Unlock()
 913	}
 914	if testHookOnPanic != nil {
 915		if e := recover(); e != nil {
 916			if testHookOnPanic(sc, e) {
 917				panic(e)
 918			}
 919		}
 920	}
 921}
 922
 923func (sc *serverConn) serve(conf http2Config) {
 924	sc.serveG.check()
 925	defer sc.notePanic()
 926	defer sc.conn.Close()
 927	defer sc.closeAllStreamsOnConnClose()
 928	defer sc.stopShutdownTimer()
 929	defer close(sc.doneServing) // unblocks handlers trying to send
 930
 931	if VerboseLogs {
 932		sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
 933	}
 934
 935	settings := writeSettings{
 936		{SettingMaxFrameSize, conf.MaxReadFrameSize},
 937		{SettingMaxConcurrentStreams, sc.advMaxStreams},
 938		{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
 939		{SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
 940		{SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
 941	}
 942	if !disableExtendedConnectProtocol {
 943		settings = append(settings, Setting{SettingEnableConnectProtocol, 1})
 944	}
 945	sc.writeFrame(FrameWriteRequest{
 946		write: settings,
 947	})
 948	sc.unackedSettings++
 949
 950	// Each connection starts with initialWindowSize inflow tokens.
 951	// If a higher value is configured, we add more tokens.
 952	if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 {
 953		sc.sendWindowUpdate(nil, int(diff))
 954	}
 955
 956	if err := sc.readPreface(); err != nil {
 957		sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
 958		return
 959	}
 960	// Now that we've got the preface, get us out of the
 961	// "StateNew" state. We can't go directly to idle, though.
 962	// Active means we read some data and anticipate a request. We'll
 963	// do another Active when we get a HEADERS frame.
 964	sc.setConnState(http.StateActive)
 965	sc.setConnState(http.StateIdle)
 966
 967	if sc.srv.IdleTimeout > 0 {
 968		sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
 969		defer sc.idleTimer.Stop()
 970	}
 971
 972	if conf.SendPingTimeout > 0 {
 973		sc.readIdleTimeout = conf.SendPingTimeout
 974		sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
 975		defer sc.readIdleTimer.Stop()
 976	}
 977
 978	go sc.readFrames() // closed by defer sc.conn.Close above
 979
 980	settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
 981	defer settingsTimer.Stop()
 982
 983	lastFrameTime := sc.srv.now()
 984	loopNum := 0
 985	for {
 986		loopNum++
 987		select {
 988		case wr := <-sc.wantWriteFrameCh:
 989			if se, ok := wr.write.(StreamError); ok {
 990				sc.resetStream(se)
 991				break
 992			}
 993			sc.writeFrame(wr)
 994		case res := <-sc.wroteFrameCh:
 995			sc.wroteFrame(res)
 996		case res := <-sc.readFrameCh:
 997			lastFrameTime = sc.srv.now()
 998			// Process any written frames before reading new frames from the client since a
 999			// written frame could have triggered a new stream to be started.
1000			if sc.writingFrameAsync {
1001				select {
1002				case wroteRes := <-sc.wroteFrameCh:
1003					sc.wroteFrame(wroteRes)
1004				default:
1005				}
1006			}
1007			if !sc.processFrameFromReader(res) {
1008				return
1009			}
1010			res.readMore()
1011			if settingsTimer != nil {
1012				settingsTimer.Stop()
1013				settingsTimer = nil
1014			}
1015		case m := <-sc.bodyReadCh:
1016			sc.noteBodyRead(m.st, m.n)
1017		case msg := <-sc.serveMsgCh:
1018			switch v := msg.(type) {
1019			case func(int):
1020				v(loopNum) // for testing
1021			case *serverMessage:
1022				switch v {
1023				case settingsTimerMsg:
1024					sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
1025					return
1026				case idleTimerMsg:
1027					sc.vlogf("connection is idle")
1028					sc.goAway(ErrCodeNo)
1029				case readIdleTimerMsg:
1030					sc.handlePingTimer(lastFrameTime)
1031				case shutdownTimerMsg:
1032					sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
1033					return
1034				case gracefulShutdownMsg:
1035					sc.startGracefulShutdownInternal()
1036				case handlerDoneMsg:
1037					sc.handlerDone()
1038				default:
1039					panic("unknown timer")
1040				}
1041			case *startPushRequest:
1042				sc.startPush(v)
1043			case func(*serverConn):
1044				v(sc)
1045			default:
1046				panic(fmt.Sprintf("unexpected type %T", v))
1047			}
1048		}
1049
1050		// If the peer is causing us to generate a lot of control frames,
1051		// but not reading them from us, assume they are trying to make us
1052		// run out of memory.
1053		if sc.queuedControlFrames > maxQueuedControlFrames {
1054			sc.vlogf("http2: too many control frames in send queue, closing connection")
1055			return
1056		}
1057
1058		// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
1059		// with no error code (graceful shutdown), don't start the timer until
1060		// all open streams have been completed.
1061		sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
1062		gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0
1063		if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {
1064			sc.shutDownIn(goAwayTimeout)
1065		}
1066	}
1067}
1068
1069func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
1070	if sc.pingSent {
1071		sc.logf("timeout waiting for PING response")
1072		if f := sc.countErrorFunc; f != nil {
1073			f("conn_close_lost_ping")
1074		}
1075		sc.conn.Close()
1076		return
1077	}
1078
1079	pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
1080	now := sc.srv.now()
1081	if pingAt.After(now) {
1082		// We received frames since arming the ping timer.
1083		// Reset it for the next possible timeout.
1084		sc.readIdleTimer.Reset(pingAt.Sub(now))
1085		return
1086	}
1087
1088	sc.pingSent = true
1089	// Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
1090	// is we send a PING frame containing 0s.
1091	_, _ = rand.Read(sc.sentPingData[:])
1092	sc.writeFrame(FrameWriteRequest{
1093		write: &writePing{data: sc.sentPingData},
1094	})
1095	sc.readIdleTimer.Reset(sc.pingTimeout)
1096}
1097
1098type serverMessage int
1099
1100// Message values sent to serveMsgCh.
1101var (
1102	settingsTimerMsg    = new(serverMessage)
1103	idleTimerMsg        = new(serverMessage)
1104	readIdleTimerMsg    = new(serverMessage)
1105	shutdownTimerMsg    = new(serverMessage)
1106	gracefulShutdownMsg = new(serverMessage)
1107	handlerDoneMsg      = new(serverMessage)
1108)
1109
1110func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
1111func (sc *serverConn) onIdleTimer()     { sc.sendServeMsg(idleTimerMsg) }
1112func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
1113func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
1114
1115func (sc *serverConn) sendServeMsg(msg interface{}) {
1116	sc.serveG.checkNotOn() // NOT
1117	select {
1118	case sc.serveMsgCh <- msg:
1119	case <-sc.doneServing:
1120	}
1121}
1122
1123var errPrefaceTimeout = errors.New("timeout waiting for client preface")
1124
1125// readPreface reads the ClientPreface greeting from the peer or
1126// returns errPrefaceTimeout on timeout, or an error if the greeting
1127// is invalid.
1128func (sc *serverConn) readPreface() error {
1129	if sc.sawClientPreface {
1130		return nil
1131	}
1132	errc := make(chan error, 1)
1133	go func() {
1134		// Read the client preface
1135		buf := make([]byte, len(ClientPreface))
1136		if _, err := io.ReadFull(sc.conn, buf); err != nil {
1137			errc <- err
1138		} else if !bytes.Equal(buf, clientPreface) {
1139			errc <- fmt.Errorf("bogus greeting %q", buf)
1140		} else {
1141			errc <- nil
1142		}
1143	}()
1144	timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
1145	defer timer.Stop()
1146	select {
1147	case <-timer.C():
1148		return errPrefaceTimeout
1149	case err := <-errc:
1150		if err == nil {
1151			if VerboseLogs {
1152				sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
1153			}
1154		}
1155		return err
1156	}
1157}
1158
1159var errChanPool = sync.Pool{
1160	New: func() interface{} { return make(chan error, 1) },
1161}
1162
1163var writeDataPool = sync.Pool{
1164	New: func() interface{} { return new(writeData) },
1165}
1166
1167// writeDataFromHandler writes DATA response frames from a handler on
1168// the given stream.
1169func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
1170	ch := errChanPool.Get().(chan error)
1171	writeArg := writeDataPool.Get().(*writeData)
1172	*writeArg = writeData{stream.id, data, endStream}
1173	err := sc.writeFrameFromHandler(FrameWriteRequest{
1174		write:  writeArg,
1175		stream: stream,
1176		done:   ch,
1177	})
1178	if err != nil {
1179		return err
1180	}
1181	var frameWriteDone bool // the frame write is done (successfully or not)
1182	select {
1183	case err = <-ch:
1184		frameWriteDone = true
1185	case <-sc.doneServing:
1186		return errClientDisconnected
1187	case <-stream.cw:
1188		// If both ch and stream.cw were ready (as might
1189		// happen on the final Write after an http.Handler
1190		// ends), prefer the write result. Otherwise this
1191		// might just be us successfully closing the stream.
1192		// The writeFrameAsync and serve goroutines guarantee
1193		// that the ch send will happen before the stream.cw
1194		// close.
1195		select {
1196		case err = <-ch:
1197			frameWriteDone = true
1198		default:
1199			return errStreamClosed
1200		}
1201	}
1202	errChanPool.Put(ch)
1203	if frameWriteDone {
1204		writeDataPool.Put(writeArg)
1205	}
1206	return err
1207}
1208
1209// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
1210// if the connection has gone away.
1211//
1212// This must not be run from the serve goroutine itself, else it might
1213// deadlock writing to sc.wantWriteFrameCh (which is only mildly
1214// buffered and is read by serve itself). If you're on the serve
1215// goroutine, call writeFrame instead.
1216func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
1217	sc.serveG.checkNotOn() // NOT
1218	select {
1219	case sc.wantWriteFrameCh <- wr:
1220		return nil
1221	case <-sc.doneServing:
1222		// Serve loop is gone.
1223		// Client has closed their connection to the server.
1224		return errClientDisconnected
1225	}
1226}
1227
1228// writeFrame schedules a frame to write and sends it if there's nothing
1229// already being written.
1230//
1231// There is no pushback here (the serve goroutine never blocks). It's
1232// the http.Handlers that block, waiting for their previous frames to
1233// make it onto the wire
1234//
1235// If you're not on the serve goroutine, use writeFrameFromHandler instead.
1236func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
1237	sc.serveG.check()
1238
1239	// If true, wr will not be written and wr.done will not be signaled.
1240	var ignoreWrite bool
1241
1242	// We are not allowed to write frames on closed streams. RFC 7540 Section
1243	// 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
1244	// a closed stream." Our server never sends PRIORITY, so that exception
1245	// does not apply.
1246	//
1247	// The serverConn might close an open stream while the stream's handler
1248	// is still running. For example, the server might close a stream when it
1249	// receives bad data from the client. If this happens, the handler might
1250	// attempt to write a frame after the stream has been closed (since the
1251	// handler hasn't yet been notified of the close). In this case, we simply
1252	// ignore the frame. The handler will notice that the stream is closed when
1253	// it waits for the frame to be written.
1254	//
1255	// As an exception to this rule, we allow sending RST_STREAM after close.
1256	// This allows us to immediately reject new streams without tracking any
1257	// state for those streams (except for the queued RST_STREAM frame). This
1258	// may result in duplicate RST_STREAMs in some cases, but the client should
1259	// ignore those.
1260	if wr.StreamID() != 0 {
1261		_, isReset := wr.write.(StreamError)
1262		if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
1263			ignoreWrite = true
1264		}
1265	}
1266
1267	// Don't send a 100-continue response if we've already sent headers.
1268	// See golang.org/issue/14030.
1269	switch wr.write.(type) {
1270	case *writeResHeaders:
1271		wr.stream.wroteHeaders = true
1272	case write100ContinueHeadersFrame:
1273		if wr.stream.wroteHeaders {
1274			// We do not need to notify wr.done because this frame is
1275			// never written with wr.done != nil.
1276			if wr.done != nil {
1277				panic("wr.done != nil for write100ContinueHeadersFrame")
1278			}
1279			ignoreWrite = true
1280		}
1281	}
1282
1283	if !ignoreWrite {
1284		if wr.isControl() {
1285			sc.queuedControlFrames++
1286			// For extra safety, detect wraparounds, which should not happen,
1287			// and pull the plug.
1288			if sc.queuedControlFrames < 0 {
1289				sc.conn.Close()
1290			}
1291		}
1292		sc.writeSched.Push(wr)
1293	}
1294	sc.scheduleFrameWrite()
1295}
1296
1297// startFrameWrite starts a goroutine to write wr (in a separate
1298// goroutine since that might block on the network), and updates the
1299// serve goroutine's state about the world, updated from info in wr.
1300func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
1301	sc.serveG.check()
1302	if sc.writingFrame {
1303		panic("internal error: can only be writing one frame at a time")
1304	}
1305
1306	st := wr.stream
1307	if st != nil {
1308		switch st.state {
1309		case stateHalfClosedLocal:
1310			switch wr.write.(type) {
1311			case StreamError, handlerPanicRST, writeWindowUpdate:
1312				// RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
1313				// in this state. (We never send PRIORITY from the server, so that is not checked.)
1314			default:
1315				panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
1316			}
1317		case stateClosed:
1318			panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
1319		}
1320	}
1321	if wpp, ok := wr.write.(*writePushPromise); ok {
1322		var err error
1323		wpp.promisedID, err = wpp.allocatePromisedID()
1324		if err != nil {
1325			sc.writingFrameAsync = false
1326			wr.replyToWriter(err)
1327			return
1328		}
1329	}
1330
1331	sc.writingFrame = true
1332	sc.needsFrameFlush = true
1333	if wr.write.staysWithinBuffer(sc.bw.Available()) {
1334		sc.writingFrameAsync = false
1335		err := wr.write.writeFrame(sc)
1336		sc.wroteFrame(frameWriteResult{wr: wr, err: err})
1337	} else if wd, ok := wr.write.(*writeData); ok {
1338		// Encode the frame in the serve goroutine, to ensure we don't have
1339		// any lingering asynchronous references to data passed to Write.
1340		// See https://go.dev/issue/58446.
1341		sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
1342		sc.writingFrameAsync = true
1343		go sc.writeFrameAsync(wr, wd)
1344	} else {
1345		sc.writingFrameAsync = true
1346		go sc.writeFrameAsync(wr, nil)
1347	}
1348}
1349
1350// errHandlerPanicked is the error given to any callers blocked in a read from
1351// Request.Body when the main goroutine panics. Since most handlers read in the
1352// main ServeHTTP goroutine, this will show up rarely.
1353var errHandlerPanicked = errors.New("http2: handler panicked")
1354
1355// wroteFrame is called on the serve goroutine with the result of
1356// whatever happened on writeFrameAsync.
1357func (sc *serverConn) wroteFrame(res frameWriteResult) {
1358	sc.serveG.check()
1359	if !sc.writingFrame {
1360		panic("internal error: expected to be already writing a frame")
1361	}
1362	sc.writingFrame = false
1363	sc.writingFrameAsync = false
1364
1365	if res.err != nil {
1366		sc.conn.Close()
1367	}
1368
1369	wr := res.wr
1370
1371	if writeEndsStream(wr.write) {
1372		st := wr.stream
1373		if st == nil {
1374			panic("internal error: expecting non-nil stream")
1375		}
1376		switch st.state {
1377		case stateOpen:
1378			// Here we would go to stateHalfClosedLocal in
1379			// theory, but since our handler is done and
1380			// the net/http package provides no mechanism
1381			// for closing a ResponseWriter while still
1382			// reading data (see possible TODO at top of
1383			// this file), we go into closed state here
1384			// anyway, after telling the peer we're
1385			// hanging up on them. We'll transition to
1386			// stateClosed after the RST_STREAM frame is
1387			// written.
1388			st.state = stateHalfClosedLocal
1389			// Section 8.1: a server MAY request that the client abort
1390			// transmission of a request without error by sending a
1391			// RST_STREAM with an error code of NO_ERROR after sending
1392			// a complete response.
1393			sc.resetStream(streamError(st.id, ErrCodeNo))
1394		case stateHalfClosedRemote:
1395			sc.closeStream(st, errHandlerComplete)
1396		}
1397	} else {
1398		switch v := wr.write.(type) {
1399		case StreamError:
1400			// st may be unknown if the RST_STREAM was generated to reject bad input.
1401			if st, ok := sc.streams[v.StreamID]; ok {
1402				sc.closeStream(st, v)
1403			}
1404		case handlerPanicRST:
1405			sc.closeStream(wr.stream, errHandlerPanicked)
1406		}
1407	}
1408
1409	// Reply (if requested) to unblock the ServeHTTP goroutine.
1410	wr.replyToWriter(res.err)
1411
1412	sc.scheduleFrameWrite()
1413}
1414
1415// scheduleFrameWrite tickles the frame writing scheduler.
1416//
1417// If a frame is already being written, nothing happens. This will be called again
1418// when the frame is done being written.
1419//
1420// If a frame isn't being written and we need to send one, the best frame
1421// to send is selected by writeSched.
1422//
1423// If a frame isn't being written and there's nothing else to send, we
1424// flush the write buffer.
1425func (sc *serverConn) scheduleFrameWrite() {
1426	sc.serveG.check()
1427	if sc.writingFrame || sc.inFrameScheduleLoop {
1428		return
1429	}
1430	sc.inFrameScheduleLoop = true
1431	for !sc.writingFrameAsync {
1432		if sc.needToSendGoAway {
1433			sc.needToSendGoAway = false
1434			sc.startFrameWrite(FrameWriteRequest{
1435				write: &writeGoAway{
1436					maxStreamID: sc.maxClientStreamID,
1437					code:        sc.goAwayCode,
1438				},
1439			})
1440			continue
1441		}
1442		if sc.needToSendSettingsAck {
1443			sc.needToSendSettingsAck = false
1444			sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
1445			continue
1446		}
1447		if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
1448			if wr, ok := sc.writeSched.Pop(); ok {
1449				if wr.isControl() {
1450					sc.queuedControlFrames--
1451				}
1452				sc.startFrameWrite(wr)
1453				continue
1454			}
1455		}
1456		if sc.needsFrameFlush {
1457			sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
1458			sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
1459			continue
1460		}
1461		break
1462	}
1463	sc.inFrameScheduleLoop = false
1464}
1465
1466// startGracefulShutdown gracefully shuts down a connection. This
1467// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
1468// shutting down. The connection isn't closed until all current
1469// streams are done.
1470//
1471// startGracefulShutdown returns immediately; it does not wait until
1472// the connection has shut down.
1473func (sc *serverConn) startGracefulShutdown() {
1474	sc.serveG.checkNotOn() // NOT
1475	sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
1476}
1477
1478// After sending GOAWAY with an error code (non-graceful shutdown), the
1479// connection will close after goAwayTimeout.
1480//
1481// If we close the connection immediately after sending GOAWAY, there may
1482// be unsent data in our kernel receive buffer, which will cause the kernel
1483// to send a TCP RST on close() instead of a FIN. This RST will abort the
1484// connection immediately, whether or not the client had received the GOAWAY.
1485//
1486// Ideally we should delay for at least 1 RTT + epsilon so the client has
1487// a chance to read the GOAWAY and stop sending messages. Measuring RTT
1488// is hard, so we approximate with 1 second. See golang.org/issue/18701.
1489//
1490// This is a var so it can be shorter in tests, where all requests uses the
1491// loopback interface making the expected RTT very small.
1492//
1493// TODO: configurable?
1494var goAwayTimeout = 1 * time.Second
1495
1496func (sc *serverConn) startGracefulShutdownInternal() {
1497	sc.goAway(ErrCodeNo)
1498}
1499
1500func (sc *serverConn) goAway(code ErrCode) {
1501	sc.serveG.check()
1502	if sc.inGoAway {
1503		if sc.goAwayCode == ErrCodeNo {
1504			sc.goAwayCode = code
1505		}
1506		return
1507	}
1508	sc.inGoAway = true
1509	sc.needToSendGoAway = true
1510	sc.goAwayCode = code
1511	sc.scheduleFrameWrite()
1512}
1513
1514func (sc *serverConn) shutDownIn(d time.Duration) {
1515	sc.serveG.check()
1516	sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
1517}
1518
1519func (sc *serverConn) resetStream(se StreamError) {
1520	sc.serveG.check()
1521	sc.writeFrame(FrameWriteRequest{write: se})
1522	if st, ok := sc.streams[se.StreamID]; ok {
1523		st.resetQueued = true
1524	}
1525}
1526
1527// processFrameFromReader processes the serve loop's read from readFrameCh from the
1528// frame-reading goroutine.
1529// processFrameFromReader returns whether the connection should be kept open.
1530func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
1531	sc.serveG.check()
1532	err := res.err
1533	if err != nil {
1534		if err == ErrFrameTooLarge {
1535			sc.goAway(ErrCodeFrameSize)
1536			return true // goAway will close the loop
1537		}
1538		clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
1539		if clientGone {
1540			// TODO: could we also get into this state if
1541			// the peer does a half close
1542			// (e.g. CloseWrite) because they're done
1543			// sending frames but they're still wanting
1544			// our open replies?  Investigate.
1545			// TODO: add CloseWrite to crypto/tls.Conn first
1546			// so we have a way to test this? I suppose
1547			// just for testing we could have a non-TLS mode.
1548			return false
1549		}
1550	} else {
1551		f := res.f
1552		if VerboseLogs {
1553			sc.vlogf("http2: server read frame %v", summarizeFrame(f))
1554		}
1555		err = sc.processFrame(f)
1556		if err == nil {
1557			return true
1558		}
1559	}
1560
1561	switch ev := err.(type) {
1562	case StreamError:
1563		sc.resetStream(ev)
1564		return true
1565	case goAwayFlowError:
1566		sc.goAway(ErrCodeFlowControl)
1567		return true
1568	case ConnectionError:
1569		if res.f != nil {
1570			if id := res.f.Header().StreamID; id > sc.maxClientStreamID {
1571				sc.maxClientStreamID = id
1572			}
1573		}
1574		sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
1575		sc.goAway(ErrCode(ev))
1576		return true // goAway will handle shutdown
1577	default:
1578		if res.err != nil {
1579			sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
1580		} else {
1581			sc.logf("http2: server closing client connection: %v", err)
1582		}
1583		return false
1584	}
1585}
1586
1587func (sc *serverConn) processFrame(f Frame) error {
1588	sc.serveG.check()
1589
1590	// First frame received must be SETTINGS.
1591	if !sc.sawFirstSettings {
1592		if _, ok := f.(*SettingsFrame); !ok {
1593			return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
1594		}
1595		sc.sawFirstSettings = true
1596	}
1597
1598	// Discard frames for streams initiated after the identified last
1599	// stream sent in a GOAWAY, or all frames after sending an error.
1600	// We still need to return connection-level flow control for DATA frames.
1601	// RFC 9113 Section 6.8.
1602	if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
1603
1604		if f, ok := f.(*DataFrame); ok {
1605			if !sc.inflow.take(f.Length) {
1606				return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
1607			}
1608			sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
1609		}
1610		return nil
1611	}
1612
1613	switch f := f.(type) {
1614	case *SettingsFrame:
1615		return sc.processSettings(f)
1616	case *MetaHeadersFrame:
1617		return sc.processHeaders(f)
1618	case *WindowUpdateFrame:
1619		return sc.processWindowUpdate(f)
1620	case *PingFrame:
1621		return sc.processPing(f)
1622	case *DataFrame:
1623		return sc.processData(f)
1624	case *RSTStreamFrame:
1625		return sc.processResetStream(f)
1626	case *PriorityFrame:
1627		return sc.processPriority(f)
1628	case *GoAwayFrame:
1629		return sc.processGoAway(f)
1630	case *PushPromiseFrame:
1631		// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
1632		// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
1633		return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
1634	default:
1635		sc.vlogf("http2: server ignoring frame: %v", f.Header())
1636		return nil
1637	}
1638}
1639
1640func (sc *serverConn) processPing(f *PingFrame) error {
1641	sc.serveG.check()
1642	if f.IsAck() {
1643		if sc.pingSent && sc.sentPingData == f.Data {
1644			// This is a response to a PING we sent.
1645			sc.pingSent = false
1646			sc.readIdleTimer.Reset(sc.readIdleTimeout)
1647		}
1648		// 6.7 PING: " An endpoint MUST NOT respond to PING frames
1649		// containing this flag."
1650		return nil
1651	}
1652	if f.StreamID != 0 {
1653		// "PING frames are not associated with any individual
1654		// stream. If a PING frame is received with a stream
1655		// identifier field value other than 0x0, the recipient MUST
1656		// respond with a connection error (Section 5.4.1) of type
1657		// PROTOCOL_ERROR."
1658		return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
1659	}
1660	sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
1661	return nil
1662}
1663
1664func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
1665	sc.serveG.check()
1666	switch {
1667	case f.StreamID != 0: // stream-level flow control
1668		state, st := sc.state(f.StreamID)
1669		if state == stateIdle {
1670			// Section 5.1: "Receiving any frame other than HEADERS
1671			// or PRIORITY on a stream in this state MUST be
1672			// treated as a connection error (Section 5.4.1) of
1673			// type PROTOCOL_ERROR."
1674			return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
1675		}
1676		if st == nil {
1677			// "WINDOW_UPDATE can be sent by a peer that has sent a
1678			// frame bearing the END_STREAM flag. This means that a
1679			// receiver could receive a WINDOW_UPDATE frame on a "half
1680			// closed (remote)" or "closed" stream. A receiver MUST
1681			// NOT treat this as an error, see Section 5.1."
1682			return nil
1683		}
1684		if !st.flow.add(int32(f.Increment)) {
1685			return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
1686		}
1687	default: // connection-level flow control
1688		if !sc.flow.add(int32(f.Increment)) {
1689			return goAwayFlowError{}
1690		}
1691	}
1692	sc.scheduleFrameWrite()
1693	return nil
1694}
1695
1696func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
1697	sc.serveG.check()
1698
1699	state, st := sc.state(f.StreamID)
1700	if state == stateIdle {
1701		// 6.4 "RST_STREAM frames MUST NOT be sent for a
1702		// stream in the "idle" state. If a RST_STREAM frame
1703		// identifying an idle stream is received, the
1704		// recipient MUST treat this as a connection error
1705		// (Section 5.4.1) of type PROTOCOL_ERROR.
1706		return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
1707	}
1708	if st != nil {
1709		st.cancelCtx()
1710		sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
1711	}
1712	return nil
1713}
1714
1715func (sc *serverConn) closeStream(st *stream, err error) {
1716	sc.serveG.check()
1717	if st.state == stateIdle || st.state == stateClosed {
1718		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
1719	}
1720	st.state = stateClosed
1721	if st.readDeadline != nil {
1722		st.readDeadline.Stop()
1723	}
1724	if st.writeDeadline != nil {
1725		st.writeDeadline.Stop()
1726	}
1727	if st.isPushed() {
1728		sc.curPushedStreams--
1729	} else {
1730		sc.curClientStreams--
1731	}
1732	delete(sc.streams, st.id)
1733	if len(sc.streams) == 0 {
1734		sc.setConnState(http.StateIdle)
1735		if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil {
1736			sc.idleTimer.Reset(sc.srv.IdleTimeout)
1737		}
1738		if h1ServerKeepAlivesDisabled(sc.hs) {
1739			sc.startGracefulShutdownInternal()
1740		}
1741	}
1742	if p := st.body; p != nil {
1743		// Return any buffered unread bytes worth of conn-level flow control.
1744		// See golang.org/issue/16481
1745		sc.sendWindowUpdate(nil, p.Len())
1746
1747		p.CloseWithError(err)
1748	}
1749	if e, ok := err.(StreamError); ok {
1750		if e.Cause != nil {
1751			err = e.Cause
1752		} else {
1753			err = errStreamClosed
1754		}
1755	}
1756	st.closeErr = err
1757	st.cancelCtx()
1758	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
1759	sc.writeSched.CloseStream(st.id)
1760}
1761
1762func (sc *serverConn) processSettings(f *SettingsFrame) error {
1763	sc.serveG.check()
1764	if f.IsAck() {
1765		sc.unackedSettings--
1766		if sc.unackedSettings < 0 {
1767			// Why is the peer ACKing settings we never sent?
1768			// The spec doesn't mention this case, but
1769			// hang up on them anyway.
1770			return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
1771		}
1772		return nil
1773	}
1774	if f.NumSettings() > 100 || f.HasDuplicates() {
1775		// This isn't actually in the spec, but hang up on
1776		// suspiciously large settings frames or those with
1777		// duplicate entries.
1778		return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
1779	}
1780	if err := f.ForeachSetting(sc.processSetting); err != nil {
1781		return err
1782	}
1783	// TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
1784	// acknowledged individually, even if multiple are received before the ACK.
1785	sc.needToSendSettingsAck = true
1786	sc.scheduleFrameWrite()
1787	return nil
1788}
1789
1790func (sc *serverConn) processSetting(s Setting) error {
1791	sc.serveG.check()
1792	if err := s.Valid(); err != nil {
1793		return err
1794	}
1795	if VerboseLogs {
1796		sc.vlogf("http2: server processing setting %v", s)
1797	}
1798	switch s.ID {
1799	case SettingHeaderTableSize:
1800		sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
1801	case SettingEnablePush:
1802		sc.pushEnabled = s.Val != 0
1803	case SettingMaxConcurrentStreams:
1804		sc.clientMaxStreams = s.Val
1805	case SettingInitialWindowSize:
1806		return sc.processSettingInitialWindowSize(s.Val)
1807	case SettingMaxFrameSize:
1808		sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
1809	case SettingMaxHeaderListSize:
1810		sc.peerMaxHeaderListSize = s.Val
1811	case SettingEnableConnectProtocol:
1812		// Receipt of this parameter by a server does not
1813		// have any impact
1814	default:
1815		// Unknown setting: "An endpoint that receives a SETTINGS
1816		// frame with any unknown or unsupported identifier MUST
1817		// ignore that setting."
1818		if VerboseLogs {
1819			sc.vlogf("http2: server ignoring unknown setting %v", s)
1820		}
1821	}
1822	return nil
1823}
1824
1825func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
1826	sc.serveG.check()
1827	// Note: val already validated to be within range by
1828	// processSetting's Valid call.
1829
1830	// "A SETTINGS frame can alter the initial flow control window
1831	// size for all current streams. When the value of
1832	// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
1833	// adjust the size of all stream flow control windows that it
1834	// maintains by the difference between the new value and the
1835	// old value."
1836	old := sc.initialStreamSendWindowSize
1837	sc.initialStreamSendWindowSize = int32(val)
1838	growth := int32(val) - old // may be negative
1839	for _, st := range sc.streams {
1840		if !st.flow.add(growth) {
1841			// 6.9.2 Initial Flow Control Window Size
1842			// "An endpoint MUST treat a change to
1843			// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
1844			// control window to exceed the maximum size as a
1845			// connection error (Section 5.4.1) of type
1846			// FLOW_CONTROL_ERROR."
1847			return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
1848		}
1849	}
1850	return nil
1851}
1852
1853func (sc *serverConn) processData(f *DataFrame) error {
1854	sc.serveG.check()
1855	id := f.Header().StreamID
1856
1857	data := f.Data()
1858	state, st := sc.state(id)
1859	if id == 0 || state == stateIdle {
1860		// Section 6.1: "DATA frames MUST be associated with a
1861		// stream. If a DATA frame is received whose stream
1862		// identifier field is 0x0, the recipient MUST respond
1863		// with a connection error (Section 5.4.1) of type
1864		// PROTOCOL_ERROR."
1865		//
1866		// Section 5.1: "Receiving any frame other than HEADERS
1867		// or PRIORITY on a stream in this state MUST be
1868		// treated as a connection error (Section 5.4.1) of
1869		// type PROTOCOL_ERROR."
1870		return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
1871	}
1872
1873	// "If a DATA frame is received whose stream is not in "open"
1874	// or "half closed (local)" state, the recipient MUST respond
1875	// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
1876	if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
1877		// This includes sending a RST_STREAM if the stream is
1878		// in stateHalfClosedLocal (which currently means that
1879		// the http.Handler returned, so it's done reading &
1880		// done writing). Try to stop the client from sending
1881		// more DATA.
1882
1883		// But still enforce their connection-level flow control,
1884		// and return any flow control bytes since we're not going
1885		// to consume them.
1886		if !sc.inflow.take(f.Length) {
1887			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
1888		}
1889		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
1890
1891		if st != nil && st.resetQueued {
1892			// Already have a stream error in flight. Don't send another.
1893			return nil
1894		}
1895		return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
1896	}
1897	if st.body == nil {
1898		panic("internal error: should have a body in this state")
1899	}
1900
1901	// Sender sending more than they'd declared?
1902	if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
1903		if !sc.inflow.take(f.Length) {
1904			return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
1905		}
1906		sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
1907
1908		st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
1909		// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
1910		// value of a content-length header field does not equal the sum of the
1911		// DATA frame payload lengths that form the body.
1912		return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
1913	}
1914	if f.Length > 0 {
1915		// Check whether the client has flow control quota.
1916		if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
1917			return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
1918		}
1919
1920		if len(data) > 0 {
1921			st.bodyBytes += int64(len(data))
1922			wrote, err := st.body.Write(data)
1923			if err != nil {
1924				// The handler has closed the request body.
1925				// Return the connection-level flow control for the discarded data,
1926				// but not the stream-level flow control.
1927				sc.sendWindowUpdate(nil, int(f.Length)-wrote)
1928				return nil
1929			}
1930			if wrote != len(data) {
1931				panic("internal error: bad Writer")
1932			}
1933		}
1934
1935		// Return any padded flow control now, since we won't
1936		// refund it later on body reads.
1937		// Call sendWindowUpdate even if there is no padding,
1938		// to return buffered flow control credit if the sent
1939		// window has shrunk.
1940		pad := int32(f.Length) - int32(len(data))
1941		sc.sendWindowUpdate32(nil, pad)
1942		sc.sendWindowUpdate32(st, pad)
1943	}
1944	if f.StreamEnded() {
1945		st.endStream()
1946	}
1947	return nil
1948}
1949
1950func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
1951	sc.serveG.check()
1952	if f.ErrCode != ErrCodeNo {
1953		sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
1954	} else {
1955		sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
1956	}
1957	sc.startGracefulShutdownInternal()
1958	// http://tools.ietf.org/html/rfc7540#section-6.8
1959	// We should not create any new streams, which means we should disable push.
1960	sc.pushEnabled = false
1961	return nil
1962}
1963
1964// isPushed reports whether the stream is server-initiated.
1965func (st *stream) isPushed() bool {
1966	return st.id%2 == 0
1967}
1968
1969// endStream closes a Request.Body's pipe. It is called when a DATA
1970// frame says a request body is over (or after trailers).
1971func (st *stream) endStream() {
1972	sc := st.sc
1973	sc.serveG.check()
1974
1975	if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
1976		st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
1977			st.declBodyBytes, st.bodyBytes))
1978	} else {
1979		st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
1980		st.body.CloseWithError(io.EOF)
1981	}
1982	st.state = stateHalfClosedRemote
1983}
1984
1985// copyTrailersToHandlerRequest is run in the Handler's goroutine in
1986// its Request.Body.Read just before it gets io.EOF.
1987func (st *stream) copyTrailersToHandlerRequest() {
1988	for k, vv := range st.trailer {
1989		if _, ok := st.reqTrailer[k]; ok {
1990			// Only copy it over it was pre-declared.
1991			st.reqTrailer[k] = vv
1992		}
1993	}
1994}
1995
1996// onReadTimeout is run on its own goroutine (from time.AfterFunc)
1997// when the stream's ReadTimeout has fired.
1998func (st *stream) onReadTimeout() {
1999	if st.body != nil {
2000		// Wrap the ErrDeadlineExceeded to avoid callers depending on us
2001		// returning the bare error.
2002		st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
2003	}
2004}
2005
2006// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
2007// when the stream's WriteTimeout has fired.
2008func (st *stream) onWriteTimeout() {
2009	st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
2010		StreamID: st.id,
2011		Code:     ErrCodeInternal,
2012		Cause:    os.ErrDeadlineExceeded,
2013	}})
2014}
2015
2016func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
2017	sc.serveG.check()
2018	id := f.StreamID
2019	// http://tools.ietf.org/html/rfc7540#section-5.1.1
2020	// Streams initiated by a client MUST use odd-numbered stream
2021	// identifiers. [...] An endpoint that receives an unexpected
2022	// stream identifier MUST respond with a connection error
2023	// (Section 5.4.1) of type PROTOCOL_ERROR.
2024	if id%2 != 1 {
2025		return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
2026	}
2027	// A HEADERS frame can be used to create a new stream or
2028	// send a trailer for an open one. If we already have a stream
2029	// open, let it process its own HEADERS frame (trailers at this
2030	// point, if it's valid).
2031	if st := sc.streams[f.StreamID]; st != nil {
2032		if st.resetQueued {
2033			// We're sending RST_STREAM to close the stream, so don't bother
2034			// processing this frame.
2035			return nil
2036		}
2037		// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
2038		// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
2039		// this state, it MUST respond with a stream error (Section 5.4.2) of
2040		// type STREAM_CLOSED.
2041		if st.state == stateHalfClosedRemote {
2042			return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
2043		}
2044		return st.processTrailerHeaders(f)
2045	}
2046
2047	// [...] The identifier of a newly established stream MUST be
2048	// numerically greater than all streams that the initiating
2049	// endpoint has opened or reserved. [...]  An endpoint that
2050	// receives an unexpected stream identifier MUST respond with
2051	// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
2052	if id <= sc.maxClientStreamID {
2053		return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
2054	}
2055	sc.maxClientStreamID = id
2056
2057	if sc.idleTimer != nil {
2058		sc.idleTimer.Stop()
2059	}
2060
2061	// http://tools.ietf.org/html/rfc7540#section-5.1.2
2062	// [...] Endpoints MUST NOT exceed the limit set by their peer. An
2063	// endpoint that receives a HEADERS frame that causes their
2064	// advertised concurrent stream limit to be exceeded MUST treat
2065	// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
2066	// or REFUSED_STREAM.
2067	if sc.curClientStreams+1 > sc.advMaxStreams {
2068		if sc.unackedSettings == 0 {
2069			// They should know better.
2070			return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
2071		}
2072		// Assume it's a network race, where they just haven't
2073		// received our last SETTINGS update. But actually
2074		// this can't happen yet, because we don't yet provide
2075		// a way for users to adjust server parameters at
2076		// runtime.
2077		return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
2078	}
2079
2080	initialState := stateOpen
2081	if f.StreamEnded() {
2082		initialState = stateHalfClosedRemote
2083	}
2084	st := sc.newStream(id, 0, initialState)
2085
2086	if f.HasPriority() {
2087		if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
2088			return err
2089		}
2090		sc.writeSched.AdjustStream(st.id, f.Priority)
2091	}
2092
2093	rw, req, err := sc.newWriterAndRequest(st, f)
2094	if err != nil {
2095		return err
2096	}
2097	st.reqTrailer = req.Trailer
2098	if st.reqTrailer != nil {
2099		st.trailer = make(http.Header)
2100	}
2101	st.body = req.Body.(*requestBody).pipe // may be nil
2102	st.declBodyBytes = req.ContentLength
2103
2104	handler := sc.handler.ServeHTTP
2105	if f.Truncated {
2106		// Their header list was too long. Send a 431 error.
2107		handler = handleHeaderListTooLong
2108	} else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
2109		handler = new400Handler(err)
2110	}
2111
2112	// The net/http package sets the read deadline from the
2113	// http.Server.ReadTimeout during the TLS handshake, but then
2114	// passes the connection off to us with the deadline already
2115	// set. Disarm it here after the request headers are read,
2116	// similar to how the http1 server works. Here it's
2117	// technically more like the http1 Server's ReadHeaderTimeout
2118	// (in Go 1.8), though. That's a more sane option anyway.
2119	if sc.hs.ReadTimeout > 0 {
2120		sc.conn.SetReadDeadline(time.Time{})
2121		st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
2122	}
2123
2124	return sc.scheduleHandler(id, rw, req, handler)
2125}
2126
2127func (sc *serverConn) upgradeRequest(req *http.Request) {
2128	sc.serveG.check()
2129	id := uint32(1)
2130	sc.maxClientStreamID = id
2131	st := sc.newStream(id, 0, stateHalfClosedRemote)
2132	st.reqTrailer = req.Trailer
2133	if st.reqTrailer != nil {
2134		st.trailer = make(http.Header)
2135	}
2136	rw := sc.newResponseWriter(st, req)
2137
2138	// Disable any read deadline set by the net/http package
2139	// prior to the upgrade.
2140	if sc.hs.ReadTimeout > 0 {
2141		sc.conn.SetReadDeadline(time.Time{})
2142	}
2143
2144	// This is the first request on the connection,
2145	// so start the handler directly rather than going
2146	// through scheduleHandler.
2147	sc.curHandlers++
2148	go sc.runHandler(rw, req, sc.handler.ServeHTTP)
2149}
2150
2151func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
2152	sc := st.sc
2153	sc.serveG.check()
2154	if st.gotTrailerHeader {
2155		return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
2156	}
2157	st.gotTrailerHeader = true
2158	if !f.StreamEnded() {
2159		return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
2160	}
2161
2162	if len(f.PseudoFields()) > 0 {
2163		return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
2164	}
2165	if st.trailer != nil {
2166		for _, hf := range f.RegularFields() {
2167			key := sc.canonicalHeader(hf.Name)
2168			if !httpguts.ValidTrailerHeader(key) {
2169				// TODO: send more details to the peer somehow. But http2 has
2170				// no way to send debug data at a stream level. Discuss with
2171				// HTTP folk.
2172				return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
2173			}
2174			st.trailer[key] = append(st.trailer[key], hf.Value)
2175		}
2176	}
2177	st.endStream()
2178	return nil
2179}
2180
2181func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
2182	if streamID == p.StreamDep {
2183		// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
2184		// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
2185		// Section 5.3.3 says that a stream can depend on one of its dependencies,
2186		// so it's only self-dependencies that are forbidden.
2187		return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
2188	}
2189	return nil
2190}
2191
2192func (sc *serverConn) processPriority(f *PriorityFrame) error {
2193	if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
2194		return err
2195	}
2196	sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
2197	return nil
2198}
2199
2200func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
2201	sc.serveG.check()
2202	if id == 0 {
2203		panic("internal error: cannot create stream with id 0")
2204	}
2205
2206	ctx, cancelCtx := context.WithCancel(sc.baseCtx)
2207	st := &stream{
2208		sc:        sc,
2209		id:        id,
2210		state:     state,
2211		ctx:       ctx,
2212		cancelCtx: cancelCtx,
2213	}
2214	st.cw.Init()
2215	st.flow.conn = &sc.flow // link to conn-level counter
2216	st.flow.add(sc.initialStreamSendWindowSize)
2217	st.inflow.init(sc.initialStreamRecvWindowSize)
2218	if sc.hs.WriteTimeout > 0 {
2219		st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
2220	}
2221
2222	sc.streams[id] = st
2223	sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
2224	if st.isPushed() {
2225		sc.curPushedStreams++
2226	} else {
2227		sc.curClientStreams++
2228	}
2229	if sc.curOpenStreams() == 1 {
2230		sc.setConnState(http.StateActive)
2231	}
2232
2233	return st
2234}
2235
2236func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
2237	sc.serveG.check()
2238
2239	rp := httpcommon.ServerRequestParam{
2240		Method:    f.PseudoValue("method"),
2241		Scheme:    f.PseudoValue("scheme"),
2242		Authority: f.PseudoValue("authority"),
2243		Path:      f.PseudoValue("path"),
2244		Protocol:  f.PseudoValue("protocol"),
2245	}
2246
2247	// extended connect is disabled, so we should not see :protocol
2248	if disableExtendedConnectProtocol && rp.Protocol != "" {
2249		return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
2250	}
2251
2252	isConnect := rp.Method == "CONNECT"
2253	if isConnect {
2254		if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
2255			return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
2256		}
2257	} else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
2258		// See 8.1.2.6 Malformed Requests and Responses:
2259		//
2260		// Malformed requests or responses that are detected
2261		// MUST be treated as a stream error (Section 5.4.2)
2262		// of type PROTOCOL_ERROR."
2263		//
2264		// 8.1.2.3 Request Pseudo-Header Fields
2265		// "All HTTP/2 requests MUST include exactly one valid
2266		// value for the :method, :scheme, and :path
2267		// pseudo-header fields"
2268		return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
2269	}
2270
2271	header := make(http.Header)
2272	rp.Header = header
2273	for _, hf := range f.RegularFields() {
2274		header.Add(sc.canonicalHeader(hf.Name), hf.Value)
2275	}
2276	if rp.Authority == "" {
2277		rp.Authority = header.Get("Host")
2278	}
2279	if rp.Protocol != "" {
2280		header.Set(":protocol", rp.Protocol)
2281	}
2282
2283	rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
2284	if err != nil {
2285		return nil, nil, err
2286	}
2287	bodyOpen := !f.StreamEnded()
2288	if bodyOpen {
2289		if vv, ok := rp.Header["Content-Length"]; ok {
2290			if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
2291				req.ContentLength = int64(cl)
2292			} else {
2293				req.ContentLength = 0
2294			}
2295		} else {
2296			req.ContentLength = -1
2297		}
2298		req.Body.(*requestBody).pipe = &pipe{
2299			b: &dataBuffer{expected: req.ContentLength},
2300		}
2301	}
2302	return rw, req, nil
2303}
2304
2305func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) {
2306	sc.serveG.check()
2307
2308	var tlsState *tls.ConnectionState // nil if not scheme https
2309	if rp.Scheme == "https" {
2310		tlsState = sc.tlsState
2311	}
2312
2313	res := httpcommon.NewServerRequest(rp)
2314	if res.InvalidReason != "" {
2315		return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
2316	}
2317
2318	body := &requestBody{
2319		conn:          sc,
2320		stream:        st,
2321		needsContinue: res.NeedsContinue,
2322	}
2323	req := (&http.Request{
2324		Method:     rp.Method,
2325		URL:        res.URL,
2326		RemoteAddr: sc.remoteAddrStr,
2327		Header:     rp.Header,
2328		RequestURI: res.RequestURI,
2329		Proto:      "HTTP/2.0",
2330		ProtoMajor: 2,
2331		ProtoMinor: 0,
2332		TLS:        tlsState,
2333		Host:       rp.Authority,
2334		Body:       body,
2335		Trailer:    res.Trailer,
2336	}).WithContext(st.ctx)
2337	rw := sc.newResponseWriter(st, req)
2338	return rw, req, nil
2339}
2340
2341func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter {
2342	rws := responseWriterStatePool.Get().(*responseWriterState)
2343	bwSave := rws.bw
2344	*rws = responseWriterState{} // zero all the fields
2345	rws.conn = sc
2346	rws.bw = bwSave
2347	rws.bw.Reset(chunkWriter{rws})
2348	rws.stream = st
2349	rws.req = req
2350	return &responseWriter{rws: rws}
2351}
2352
2353type unstartedHandler struct {
2354	streamID uint32
2355	rw       *responseWriter
2356	req      *http.Request
2357	handler  func(http.ResponseWriter, *http.Request)
2358}
2359
2360// scheduleHandler starts a handler goroutine,
2361// or schedules one to start as soon as an existing handler finishes.
2362func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
2363	sc.serveG.check()
2364	maxHandlers := sc.advMaxStreams
2365	if sc.curHandlers < maxHandlers {
2366		sc.curHandlers++
2367		go sc.runHandler(rw, req, handler)
2368		return nil
2369	}
2370	if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
2371		return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
2372	}
2373	sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
2374		streamID: streamID,
2375		rw:       rw,
2376		req:      req,
2377		handler:  handler,
2378	})
2379	return nil
2380}
2381
2382func (sc *serverConn) handlerDone() {
2383	sc.serveG.check()
2384	sc.curHandlers--
2385	i := 0
2386	maxHandlers := sc.advMaxStreams
2387	for ; i < len(sc.unstartedHandlers); i++ {
2388		u := sc.unstartedHandlers[i]
2389		if sc.streams[u.streamID] == nil {
2390			// This stream was reset before its goroutine had a chance to start.
2391			continue
2392		}
2393		if sc.curHandlers >= maxHandlers {
2394			break
2395		}
2396		sc.curHandlers++
2397		go sc.runHandler(u.rw, u.req, u.handler)
2398		sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
2399	}
2400	sc.unstartedHandlers = sc.unstartedHandlers[i:]
2401	if len(sc.unstartedHandlers) == 0 {
2402		sc.unstartedHandlers = nil
2403	}
2404}
2405
2406// Run on its own goroutine.
2407func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
2408	sc.srv.markNewGoroutine()
2409	defer sc.sendServeMsg(handlerDoneMsg)
2410	didPanic := true
2411	defer func() {
2412		rw.rws.stream.cancelCtx()
2413		if req.MultipartForm != nil {
2414			req.MultipartForm.RemoveAll()
2415		}
2416		if didPanic {
2417			e := recover()
2418			sc.writeFrameFromHandler(FrameWriteRequest{
2419				write:  handlerPanicRST{rw.rws.stream.id},
2420				stream: rw.rws.stream,
2421			})
2422			// Same as net/http:
2423			if e != nil && e != http.ErrAbortHandler {
2424				const size = 64 << 10
2425				buf := make([]byte, size)
2426				buf = buf[:runtime.Stack(buf, false)]
2427				sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
2428			}
2429			return
2430		}
2431		rw.handlerDone()
2432	}()
2433	handler(rw, req)
2434	didPanic = false
2435}
2436
2437func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
2438	// 10.5.1 Limits on Header Block Size:
2439	// .. "A server that receives a larger header block than it is
2440	// willing to handle can send an HTTP 431 (Request Header Fields Too
2441	// Large) status code"
2442	const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
2443	w.WriteHeader(statusRequestHeaderFieldsTooLarge)
2444	io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
2445}
2446
2447// called from handler goroutines.
2448// h may be nil.
2449func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
2450	sc.serveG.checkNotOn() // NOT on
2451	var errc chan error
2452	if headerData.h != nil {
2453		// If there's a header map (which we don't own), so we have to block on
2454		// waiting for this frame to be written, so an http.Flush mid-handler
2455		// writes out the correct value of keys, before a handler later potentially
2456		// mutates it.
2457		errc = errChanPool.Get().(chan error)
2458	}
2459	if err := sc.writeFrameFromHandler(FrameWriteRequest{
2460		write:  headerData,
2461		stream: st,
2462		done:   errc,
2463	}); err != nil {
2464		return err
2465	}
2466	if errc != nil {
2467		select {
2468		case err := <-errc:
2469			errChanPool.Put(errc)
2470			return err
2471		case <-sc.doneServing:
2472			return errClientDisconnected
2473		case <-st.cw:
2474			return errStreamClosed
2475		}
2476	}
2477	return nil
2478}
2479
2480// called from handler goroutines.
2481func (sc *serverConn) write100ContinueHeaders(st *stream) {
2482	sc.writeFrameFromHandler(FrameWriteRequest{
2483		write:  write100ContinueHeadersFrame{st.id},
2484		stream: st,
2485	})
2486}
2487
2488// A bodyReadMsg tells the server loop that the http.Handler read n
2489// bytes of the DATA from the client on the given stream.
2490type bodyReadMsg struct {
2491	st *stream
2492	n  int
2493}
2494
2495// called from handler goroutines.
2496// Notes that the handler for the given stream ID read n bytes of its body
2497// and schedules flow control tokens to be sent.
2498func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
2499	sc.serveG.checkNotOn() // NOT on
2500	if n > 0 {
2501		select {
2502		case sc.bodyReadCh <- bodyReadMsg{st, n}:
2503		case <-sc.doneServing:
2504		}
2505	}
2506}
2507
2508func (sc *serverConn) noteBodyRead(st *stream, n int) {
2509	sc.serveG.check()
2510	sc.sendWindowUpdate(nil, n) // conn-level
2511	if st.state != stateHalfClosedRemote && st.state != stateClosed {
2512		// Don't send this WINDOW_UPDATE if the stream is closed
2513		// remotely.
2514		sc.sendWindowUpdate(st, n)
2515	}
2516}
2517
2518// st may be nil for conn-level
2519func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
2520	sc.sendWindowUpdate(st, int(n))
2521}
2522
2523// st may be nil for conn-level
2524func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
2525	sc.serveG.check()
2526	var streamID uint32
2527	var send int32
2528	if st == nil {
2529		send = sc.inflow.add(n)
2530	} else {
2531		streamID = st.id
2532		send = st.inflow.add(n)
2533	}
2534	if send == 0 {
2535		return
2536	}
2537	sc.writeFrame(FrameWriteRequest{
2538		write:  writeWindowUpdate{streamID: streamID, n: uint32(send)},
2539		stream: st,
2540	})
2541}
2542
2543// requestBody is the Handler's Request.Body type.
2544// Read and Close may be called concurrently.
2545type requestBody struct {
2546	_             incomparable
2547	stream        *stream
2548	conn          *serverConn
2549	closeOnce     sync.Once // for use by Close only
2550	sawEOF        bool      // for use by Read only
2551	pipe          *pipe     // non-nil if we have an HTTP entity message body
2552	needsContinue bool      // need to send a 100-continue
2553}
2554
2555func (b *requestBody) Close() error {
2556	b.closeOnce.Do(func() {
2557		if b.pipe != nil {
2558			b.pipe.BreakWithError(errClosedBody)
2559		}
2560	})
2561	return nil
2562}
2563
2564func (b *requestBody) Read(p []byte) (n int, err error) {
2565	if b.needsContinue {
2566		b.needsContinue = false
2567		b.conn.write100ContinueHeaders(b.stream)
2568	}
2569	if b.pipe == nil || b.sawEOF {
2570		return 0, io.EOF
2571	}
2572	n, err = b.pipe.Read(p)
2573	if err == io.EOF {
2574		b.sawEOF = true
2575	}
2576	if b.conn == nil && inTests {
2577		return
2578	}
2579	b.conn.noteBodyReadFromHandler(b.stream, n, err)
2580	return
2581}
2582
2583// responseWriter is the http.ResponseWriter implementation. It's
2584// intentionally small (1 pointer wide) to minimize garbage. The
2585// responseWriterState pointer inside is zeroed at the end of a
2586// request (in handlerDone) and calls on the responseWriter thereafter
2587// simply crash (caller's mistake), but the much larger responseWriterState
2588// and buffers are reused between multiple requests.
2589type responseWriter struct {
2590	rws *responseWriterState
2591}
2592
2593// Optional http.ResponseWriter interfaces implemented.
2594var (
2595	_ http.CloseNotifier = (*responseWriter)(nil)
2596	_ http.Flusher       = (*responseWriter)(nil)
2597	_ stringWriter       = (*responseWriter)(nil)
2598)
2599
2600type responseWriterState struct {
2601	// immutable within a request:
2602	stream *stream
2603	req    *http.Request
2604	conn   *serverConn
2605
2606	// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
2607	bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
2608
2609	// mutated by http.Handler goroutine:
2610	handlerHeader http.Header // nil until called
2611	snapHeader    http.Header // snapshot of handlerHeader at WriteHeader time
2612	trailers      []string    // set in writeChunk
2613	status        int         // status code passed to WriteHeader
2614	wroteHeader   bool        // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
2615	sentHeader    bool        // have we sent the header frame?
2616	handlerDone   bool        // handler has finished
2617
2618	sentContentLen int64 // non-zero if handler set a Content-Length header
2619	wroteBytes     int64
2620
2621	closeNotifierMu sync.Mutex // guards closeNotifierCh
2622	closeNotifierCh chan bool  // nil until first used
2623}
2624
2625type chunkWriter struct{ rws *responseWriterState }
2626
2627func (cw chunkWriter) Write(p []byte) (n int, err error) {
2628	n, err = cw.rws.writeChunk(p)
2629	if err == errStreamClosed {
2630		// If writing failed because the stream has been closed,
2631		// return the reason it was closed.
2632		err = cw.rws.stream.closeErr
2633	}
2634	return n, err
2635}
2636
2637func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
2638
2639func (rws *responseWriterState) hasNonemptyTrailers() bool {
2640	for _, trailer := range rws.trailers {
2641		if _, ok := rws.handlerHeader[trailer]; ok {
2642			return true
2643		}
2644	}
2645	return false
2646}
2647
2648// declareTrailer is called for each Trailer header when the
2649// response header is written. It notes that a header will need to be
2650// written in the trailers at the end of the response.
2651func (rws *responseWriterState) declareTrailer(k string) {
2652	k = http.CanonicalHeaderKey(k)
2653	if !httpguts.ValidTrailerHeader(k) {
2654		// Forbidden by RFC 7230, section 4.1.2.
2655		rws.conn.logf("ignoring invalid trailer %q", k)
2656		return
2657	}
2658	if !strSliceContains(rws.trailers, k) {
2659		rws.trailers = append(rws.trailers, k)
2660	}
2661}
2662
2663// writeChunk writes chunks from the bufio.Writer. But because
2664// bufio.Writer may bypass its chunking, sometimes p may be
2665// arbitrarily large.
2666//
2667// writeChunk is also responsible (on the first chunk) for sending the
2668// HEADER response.
2669func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
2670	if !rws.wroteHeader {
2671		rws.writeHeader(200)
2672	}
2673
2674	if rws.handlerDone {
2675		rws.promoteUndeclaredTrailers()
2676	}
2677
2678	isHeadResp := rws.req.Method == "HEAD"
2679	if !rws.sentHeader {
2680		rws.sentHeader = true
2681		var ctype, clen string
2682		if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
2683			rws.snapHeader.Del("Content-Length")
2684			if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
2685				rws.sentContentLen = int64(cl)
2686			} else {
2687				clen = ""
2688			}
2689		}
2690		_, hasContentLength := rws.snapHeader["Content-Length"]
2691		if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
2692			clen = strconv.Itoa(len(p))
2693		}
2694		_, hasContentType := rws.snapHeader["Content-Type"]
2695		// If the Content-Encoding is non-blank, we shouldn't
2696		// sniff the body. See Issue golang.org/issue/31753.
2697		ce := rws.snapHeader.Get("Content-Encoding")
2698		hasCE := len(ce) > 0
2699		if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
2700			ctype = http.DetectContentType(p)
2701		}
2702		var date string
2703		if _, ok := rws.snapHeader["Date"]; !ok {
2704			// TODO(bradfitz): be faster here, like net/http? measure.
2705			date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
2706		}
2707
2708		for _, v := range rws.snapHeader["Trailer"] {
2709			foreachHeaderElement(v, rws.declareTrailer)
2710		}
2711
2712		// "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
2713		// but respect "Connection" == "close" to mean sending a GOAWAY and tearing
2714		// down the TCP connection when idle, like we do for HTTP/1.
2715		// TODO: remove more Connection-specific header fields here, in addition
2716		// to "Connection".
2717		if _, ok := rws.snapHeader["Connection"]; ok {
2718			v := rws.snapHeader.Get("Connection")
2719			delete(rws.snapHeader, "Connection")
2720			if v == "close" {
2721				rws.conn.startGracefulShutdown()
2722			}
2723		}
2724
2725		endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
2726		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
2727			streamID:      rws.stream.id,
2728			httpResCode:   rws.status,
2729			h:             rws.snapHeader,
2730			endStream:     endStream,
2731			contentType:   ctype,
2732			contentLength: clen,
2733			date:          date,
2734		})
2735		if err != nil {
2736			return 0, err
2737		}
2738		if endStream {
2739			return 0, nil
2740		}
2741	}
2742	if isHeadResp {
2743		return len(p), nil
2744	}
2745	if len(p) == 0 && !rws.handlerDone {
2746		return 0, nil
2747	}
2748
2749	// only send trailers if they have actually been defined by the
2750	// server handler.
2751	hasNonemptyTrailers := rws.hasNonemptyTrailers()
2752	endStream := rws.handlerDone && !hasNonemptyTrailers
2753	if len(p) > 0 || endStream {
2754		// only send a 0 byte DATA frame if we're ending the stream.
2755		if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
2756			return 0, err
2757		}
2758	}
2759
2760	if rws.handlerDone && hasNonemptyTrailers {
2761		err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
2762			streamID:  rws.stream.id,
2763			h:         rws.handlerHeader,
2764			trailers:  rws.trailers,
2765			endStream: true,
2766		})
2767		return len(p), err
2768	}
2769	return len(p), nil
2770}
2771
2772// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
2773// that, if present, signals that the map entry is actually for
2774// the response trailers, and not the response headers. The prefix
2775// is stripped after the ServeHTTP call finishes and the values are
2776// sent in the trailers.
2777//
2778// This mechanism is intended only for trailers that are not known
2779// prior to the headers being written. If the set of trailers is fixed
2780// or known before the header is written, the normal Go trailers mechanism
2781// is preferred:
2782//
2783//	https://golang.org/pkg/net/http/#ResponseWriter
2784//	https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
2785const TrailerPrefix = "Trailer:"
2786
2787// promoteUndeclaredTrailers permits http.Handlers to set trailers
2788// after the header has already been flushed. Because the Go
2789// ResponseWriter interface has no way to set Trailers (only the
2790// Header), and because we didn't want to expand the ResponseWriter
2791// interface, and because nobody used trailers, and because RFC 7230
2792// says you SHOULD (but not must) predeclare any trailers in the
2793// header, the official ResponseWriter rules said trailers in Go must
2794// be predeclared, and then we reuse the same ResponseWriter.Header()
2795// map to mean both Headers and Trailers. When it's time to write the
2796// Trailers, we pick out the fields of Headers that were declared as
2797// trailers. That worked for a while, until we found the first major
2798// user of Trailers in the wild: gRPC (using them only over http2),
2799// and gRPC libraries permit setting trailers mid-stream without
2800// predeclaring them. So: change of plans. We still permit the old
2801// way, but we also permit this hack: if a Header() key begins with
2802// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
2803// invalid token byte anyway, there is no ambiguity. (And it's already
2804// filtered out) It's mildly hacky, but not terrible.
2805//
2806// This method runs after the Handler is done and promotes any Header
2807// fields to be trailers.
2808func (rws *responseWriterState) promoteUndeclaredTrailers() {
2809	for k, vv := range rws.handlerHeader {
2810		if !strings.HasPrefix(k, TrailerPrefix) {
2811			continue
2812		}
2813		trailerKey := strings.TrimPrefix(k, TrailerPrefix)
2814		rws.declareTrailer(trailerKey)
2815		rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
2816	}
2817
2818	if len(rws.trailers) > 1 {
2819		sorter := sorterPool.Get().(*sorter)
2820		sorter.SortStrings(rws.trailers)
2821		sorterPool.Put(sorter)
2822	}
2823}
2824
2825func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
2826	st := w.rws.stream
2827	if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
2828		// If we're setting a deadline in the past, reset the stream immediately
2829		// so writes after SetWriteDeadline returns will fail.
2830		st.onReadTimeout()
2831		return nil
2832	}
2833	w.rws.conn.sendServeMsg(func(sc *serverConn) {
2834		if st.readDeadline != nil {
2835			if !st.readDeadline.Stop() {
2836				// Deadline already exceeded, or stream has been closed.
2837				return
2838			}
2839		}
2840		if deadline.IsZero() {
2841			st.readDeadline = nil
2842		} else if st.readDeadline == nil {
2843			st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
2844		} else {
2845			st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
2846		}
2847	})
2848	return nil
2849}
2850
2851func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
2852	st := w.rws.stream
2853	if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
2854		// If we're setting a deadline in the past, reset the stream immediately
2855		// so writes after SetWriteDeadline returns will fail.
2856		st.onWriteTimeout()
2857		return nil
2858	}
2859	w.rws.conn.sendServeMsg(func(sc *serverConn) {
2860		if st.writeDeadline != nil {
2861			if !st.writeDeadline.Stop() {
2862				// Deadline already exceeded, or stream has been closed.
2863				return
2864			}
2865		}
2866		if deadline.IsZero() {
2867			st.writeDeadline = nil
2868		} else if st.writeDeadline == nil {
2869			st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
2870		} else {
2871			st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
2872		}
2873	})
2874	return nil
2875}
2876
2877func (w *responseWriter) EnableFullDuplex() error {
2878	// We always support full duplex responses, so this is a no-op.
2879	return nil
2880}
2881
2882func (w *responseWriter) Flush() {
2883	w.FlushError()
2884}
2885
2886func (w *responseWriter) FlushError() error {
2887	rws := w.rws
2888	if rws == nil {
2889		panic("Header called after Handler finished")
2890	}
2891	var err error
2892	if rws.bw.Buffered() > 0 {
2893		err = rws.bw.Flush()
2894	} else {
2895		// The bufio.Writer won't call chunkWriter.Write
2896		// (writeChunk with zero bytes), so we have to do it
2897		// ourselves to force the HTTP response header and/or
2898		// final DATA frame (with END_STREAM) to be sent.
2899		_, err = chunkWriter{rws}.Write(nil)
2900		if err == nil {
2901			select {
2902			case <-rws.stream.cw:
2903				err = rws.stream.closeErr
2904			default:
2905			}
2906		}
2907	}
2908	return err
2909}
2910
2911func (w *responseWriter) CloseNotify() <-chan bool {
2912	rws := w.rws
2913	if rws == nil {
2914		panic("CloseNotify called after Handler finished")
2915	}
2916	rws.closeNotifierMu.Lock()
2917	ch := rws.closeNotifierCh
2918	if ch == nil {
2919		ch = make(chan bool, 1)
2920		rws.closeNotifierCh = ch
2921		cw := rws.stream.cw
2922		go func() {
2923			cw.Wait() // wait for close
2924			ch <- true
2925		}()
2926	}
2927	rws.closeNotifierMu.Unlock()
2928	return ch
2929}
2930
2931func (w *responseWriter) Header() http.Header {
2932	rws := w.rws
2933	if rws == nil {
2934		panic("Header called after Handler finished")
2935	}
2936	if rws.handlerHeader == nil {
2937		rws.handlerHeader = make(http.Header)
2938	}
2939	return rws.handlerHeader
2940}
2941
2942// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
2943func checkWriteHeaderCode(code int) {
2944	// Issue 22880: require valid WriteHeader status codes.
2945	// For now we only enforce that it's three digits.
2946	// In the future we might block things over 599 (600 and above aren't defined
2947	// at http://httpwg.org/specs/rfc7231.html#status.codes).
2948	// But for now any three digits.
2949	//
2950	// We used to send "HTTP/1.1 000 0" on the wire in responses but there's
2951	// no equivalent bogus thing we can realistically send in HTTP/2,
2952	// so we'll consistently panic instead and help people find their bugs
2953	// early. (We can't return an error from WriteHeader even if we wanted to.)
2954	if code < 100 || code > 999 {
2955		panic(fmt.Sprintf("invalid WriteHeader code %v", code))
2956	}
2957}
2958
2959func (w *responseWriter) WriteHeader(code int) {
2960	rws := w.rws
2961	if rws == nil {
2962		panic("WriteHeader called after Handler finished")
2963	}
2964	rws.writeHeader(code)
2965}
2966
2967func (rws *responseWriterState) writeHeader(code int) {
2968	if rws.wroteHeader {
2969		return
2970	}
2971
2972	checkWriteHeaderCode(code)
2973
2974	// Handle informational headers
2975	if code >= 100 && code <= 199 {
2976		// Per RFC 8297 we must not clear the current header map
2977		h := rws.handlerHeader
2978
2979		_, cl := h["Content-Length"]
2980		_, te := h["Transfer-Encoding"]
2981		if cl || te {
2982			h = h.Clone()
2983			h.Del("Content-Length")
2984			h.Del("Transfer-Encoding")
2985		}
2986
2987		rws.conn.writeHeaders(rws.stream, &writeResHeaders{
2988			streamID:    rws.stream.id,
2989			httpResCode: code,
2990			h:           h,
2991			endStream:   rws.handlerDone && !rws.hasTrailers(),
2992		})
2993
2994		return
2995	}
2996
2997	rws.wroteHeader = true
2998	rws.status = code
2999	if len(rws.handlerHeader) > 0 {
3000		rws.snapHeader = cloneHeader(rws.handlerHeader)
3001	}
3002}
3003
3004func cloneHeader(h http.Header) http.Header {
3005	h2 := make(http.Header, len(h))
3006	for k, vv := range h {
3007		vv2 := make([]string, len(vv))
3008		copy(vv2, vv)
3009		h2[k] = vv2
3010	}
3011	return h2
3012}
3013
3014// The Life Of A Write is like this:
3015//
3016// * Handler calls w.Write or w.WriteString ->
3017// * -> rws.bw (*bufio.Writer) ->
3018// * (Handler might call Flush)
3019// * -> chunkWriter{rws}
3020// * -> responseWriterState.writeChunk(p []byte)
3021// * -> responseWriterState.writeChunk (most of the magic; see comment there)
3022func (w *responseWriter) Write(p []byte) (n int, err error) {
3023	return w.write(len(p), p, "")
3024}
3025
3026func (w *responseWriter) WriteString(s string) (n int, err error) {
3027	return w.write(len(s), nil, s)
3028}
3029
3030// either dataB or dataS is non-zero.
3031func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
3032	rws := w.rws
3033	if rws == nil {
3034		panic("Write called after Handler finished")
3035	}
3036	if !rws.wroteHeader {
3037		w.WriteHeader(200)
3038	}
3039	if !bodyAllowedForStatus(rws.status) {
3040		return 0, http.ErrBodyNotAllowed
3041	}
3042	rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
3043	if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
3044		// TODO: send a RST_STREAM
3045		return 0, errors.New("http2: handler wrote more than declared Content-Length")
3046	}
3047
3048	if dataB != nil {
3049		return rws.bw.Write(dataB)
3050	} else {
3051		return rws.bw.WriteString(dataS)
3052	}
3053}
3054
3055func (w *responseWriter) handlerDone() {
3056	rws := w.rws
3057	rws.handlerDone = true
3058	w.Flush()
3059	w.rws = nil
3060	responseWriterStatePool.Put(rws)
3061}
3062
3063// Push errors.
3064var (
3065	ErrRecursivePush    = errors.New("http2: recursive push not allowed")
3066	ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
3067)
3068
3069var _ http.Pusher = (*responseWriter)(nil)
3070
3071func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
3072	st := w.rws.stream
3073	sc := st.sc
3074	sc.serveG.checkNotOn()
3075
3076	// No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
3077	// http://tools.ietf.org/html/rfc7540#section-6.6
3078	if st.isPushed() {
3079		return ErrRecursivePush
3080	}
3081
3082	if opts == nil {
3083		opts = new(http.PushOptions)
3084	}
3085
3086	// Default options.
3087	if opts.Method == "" {
3088		opts.Method = "GET"
3089	}
3090	if opts.Header == nil {
3091		opts.Header = http.Header{}
3092	}
3093	wantScheme := "http"
3094	if w.rws.req.TLS != nil {
3095		wantScheme = "https"
3096	}
3097
3098	// Validate the request.
3099	u, err := url.Parse(target)
3100	if err != nil {
3101		return err
3102	}
3103	if u.Scheme == "" {
3104		if !strings.HasPrefix(target, "/") {
3105			return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
3106		}
3107		u.Scheme = wantScheme
3108		u.Host = w.rws.req.Host
3109	} else {
3110		if u.Scheme != wantScheme {
3111			return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
3112		}
3113		if u.Host == "" {
3114			return errors.New("URL must have a host")
3115		}
3116	}
3117	for k := range opts.Header {
3118		if strings.HasPrefix(k, ":") {
3119			return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
3120		}
3121		// These headers are meaningful only if the request has a body,
3122		// but PUSH_PROMISE requests cannot have a body.
3123		// http://tools.ietf.org/html/rfc7540#section-8.2
3124		// Also disallow Host, since the promised URL must be absolute.
3125		if asciiEqualFold(k, "content-length") ||
3126			asciiEqualFold(k, "content-encoding") ||
3127			asciiEqualFold(k, "trailer") ||
3128			asciiEqualFold(k, "te") ||
3129			asciiEqualFold(k, "expect") ||
3130			asciiEqualFold(k, "host") {
3131			return fmt.Errorf("promised request headers cannot include %q", k)
3132		}
3133	}
3134	if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
3135		return err
3136	}
3137
3138	// The RFC effectively limits promised requests to GET and HEAD:
3139	// "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
3140	// http://tools.ietf.org/html/rfc7540#section-8.2
3141	if opts.Method != "GET" && opts.Method != "HEAD" {
3142		return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
3143	}
3144
3145	msg := &startPushRequest{
3146		parent: st,
3147		method: opts.Method,
3148		url:    u,
3149		header: cloneHeader(opts.Header),
3150		done:   errChanPool.Get().(chan error),
3151	}
3152
3153	select {
3154	case <-sc.doneServing:
3155		return errClientDisconnected
3156	case <-st.cw:
3157		return errStreamClosed
3158	case sc.serveMsgCh <- msg:
3159	}
3160
3161	select {
3162	case <-sc.doneServing:
3163		return errClientDisconnected
3164	case <-st.cw:
3165		return errStreamClosed
3166	case err := <-msg.done:
3167		errChanPool.Put(msg.done)
3168		return err
3169	}
3170}
3171
3172type startPushRequest struct {
3173	parent *stream
3174	method string
3175	url    *url.URL
3176	header http.Header
3177	done   chan error
3178}
3179
3180func (sc *serverConn) startPush(msg *startPushRequest) {
3181	sc.serveG.check()
3182
3183	// http://tools.ietf.org/html/rfc7540#section-6.6.
3184	// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
3185	// is in either the "open" or "half-closed (remote)" state.
3186	if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
3187		// responseWriter.Push checks that the stream is peer-initiated.
3188		msg.done <- errStreamClosed
3189		return
3190	}
3191
3192	// http://tools.ietf.org/html/rfc7540#section-6.6.
3193	if !sc.pushEnabled {
3194		msg.done <- http.ErrNotSupported
3195		return
3196	}
3197
3198	// PUSH_PROMISE frames must be sent in increasing order by stream ID, so
3199	// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
3200	// is written. Once the ID is allocated, we start the request handler.
3201	allocatePromisedID := func() (uint32, error) {
3202		sc.serveG.check()
3203
3204		// Check this again, just in case. Technically, we might have received
3205		// an updated SETTINGS by the time we got around to writing this frame.
3206		if !sc.pushEnabled {
3207			return 0, http.ErrNotSupported
3208		}
3209		// http://tools.ietf.org/html/rfc7540#section-6.5.2.
3210		if sc.curPushedStreams+1 > sc.clientMaxStreams {
3211			return 0, ErrPushLimitReached
3212		}
3213
3214		// http://tools.ietf.org/html/rfc7540#section-5.1.1.
3215		// Streams initiated by the server MUST use even-numbered identifiers.
3216		// A server that is unable to establish a new stream identifier can send a GOAWAY
3217		// frame so that the client is forced to open a new connection for new streams.
3218		if sc.maxPushPromiseID+2 >= 1<<31 {
3219			sc.startGracefulShutdownInternal()
3220			return 0, ErrPushLimitReached
3221		}
3222		sc.maxPushPromiseID += 2
3223		promisedID := sc.maxPushPromiseID
3224
3225		// http://tools.ietf.org/html/rfc7540#section-8.2.
3226		// Strictly speaking, the new stream should start in "reserved (local)", then
3227		// transition to "half closed (remote)" after sending the initial HEADERS, but
3228		// we start in "half closed (remote)" for simplicity.
3229		// See further comments at the definition of stateHalfClosedRemote.
3230		promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
3231		rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
3232			Method:    msg.method,
3233			Scheme:    msg.url.Scheme,
3234			Authority: msg.url.Host,
3235			Path:      msg.url.RequestURI(),
3236			Header:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
3237		})
3238		if err != nil {
3239			// Should not happen, since we've already validated msg.url.
3240			panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
3241		}
3242
3243		sc.curHandlers++
3244		go sc.runHandler(rw, req, sc.handler.ServeHTTP)
3245		return promisedID, nil
3246	}
3247
3248	sc.writeFrame(FrameWriteRequest{
3249		write: &writePushPromise{
3250			streamID:           msg.parent.id,
3251			method:             msg.method,
3252			url:                msg.url,
3253			h:                  msg.header,
3254			allocatePromisedID: allocatePromisedID,
3255		},
3256		stream: msg.parent,
3257		done:   msg.done,
3258	})
3259}
3260
3261// foreachHeaderElement splits v according to the "#rule" construction
3262// in RFC 7230 section 7 and calls fn for each non-empty element.
3263func foreachHeaderElement(v string, fn func(string)) {
3264	v = textproto.TrimString(v)
3265	if v == "" {
3266		return
3267	}
3268	if !strings.Contains(v, ",") {
3269		fn(v)
3270		return
3271	}
3272	for _, f := range strings.Split(v, ",") {
3273		if f = textproto.TrimString(f); f != "" {
3274			fn(f)
3275		}
3276	}
3277}
3278
3279// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
3280var connHeaders = []string{
3281	"Connection",
3282	"Keep-Alive",
3283	"Proxy-Connection",
3284	"Transfer-Encoding",
3285	"Upgrade",
3286}
3287
3288// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
3289// per RFC 7540 Section 8.1.2.2.
3290// The returned error is reported to users.
3291func checkValidHTTP2RequestHeaders(h http.Header) error {
3292	for _, k := range connHeaders {
3293		if _, ok := h[k]; ok {
3294			return fmt.Errorf("request header %q is not valid in HTTP/2", k)
3295		}
3296	}
3297	te := h["Te"]
3298	if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
3299		return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
3300	}
3301	return nil
3302}
3303
3304func new400Handler(err error) http.HandlerFunc {
3305	return func(w http.ResponseWriter, r *http.Request) {
3306		http.Error(w, err.Error(), http.StatusBadRequest)
3307	}
3308}
3309
3310// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
3311// disabled. See comments on h1ServerShutdownChan above for why
3312// the code is written this way.
3313func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
3314	var x interface{} = hs
3315	type I interface {
3316		doKeepAlives() bool
3317	}
3318	if hs, ok := x.(I); ok {
3319		return !hs.doKeepAlives()
3320	}
3321	return false
3322}
3323
3324func (sc *serverConn) countError(name string, err error) error {
3325	if sc == nil || sc.srv == nil {
3326		return err
3327	}
3328	f := sc.countErrorFunc
3329	if f == nil {
3330		return err
3331	}
3332	var typ string
3333	var code ErrCode
3334	switch e := err.(type) {
3335	case ConnectionError:
3336		typ = "conn"
3337		code = ErrCode(e)
3338	case StreamError:
3339		typ = "stream"
3340		code = ErrCode(e.Code)
3341	default:
3342		return err
3343	}
3344	codeStr := errCodeName[code]
3345	if codeStr == "" {
3346		codeStr = strconv.Itoa(int(code))
3347	}
3348	f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
3349	return err
3350}