_http2.py 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. # -*- test-case-name: twisted.web.test.test_http2 -*-
  2. # Copyright (c) Twisted Matrix Laboratories.
  3. # See LICENSE for details.
  4. """
  5. HTTP2 Implementation
  6. This is the basic server-side protocol implementation used by the Twisted
  7. Web server for HTTP2. This functionality is intended to be combined with the
  8. HTTP/1.1 and HTTP/1.0 functionality in twisted.web.http to provide complete
  9. protocol support for HTTP-type protocols.
  10. This API is currently considered private because it's in early draft form. When
  11. it has stabilised, it'll be made public.
  12. """
  13. import io
  14. from collections import deque
  15. from typing import List
  16. from zope.interface import implementer
  17. import h2.config
  18. import h2.connection
  19. import h2.errors
  20. import h2.events
  21. import h2.exceptions
  22. import priority
  23. from twisted.internet._producer_helpers import _PullToPush
  24. from twisted.internet.defer import Deferred
  25. from twisted.internet.error import ConnectionLost
  26. from twisted.internet.interfaces import (
  27. IConsumer,
  28. IProtocol,
  29. IPushProducer,
  30. ISSLTransport,
  31. ITransport,
  32. )
  33. from twisted.internet.protocol import Protocol
  34. from twisted.logger import Logger
  35. from twisted.protocols.policies import TimeoutMixin
  36. from twisted.python.failure import Failure
  37. from twisted.web.error import ExcessiveBufferingError
  38. # This API is currently considered private.
  39. __all__: List[str] = []
  40. _END_STREAM_SENTINEL = object()
  41. @implementer(IProtocol, IPushProducer)
  42. class H2Connection(Protocol, TimeoutMixin):
  43. """
  44. A class representing a single HTTP/2 connection.
  45. This implementation of L{IProtocol} works hand in hand with L{H2Stream}.
  46. This is because we have the requirement to register multiple producers for
  47. a single HTTP/2 connection, one for each stream. The standard Twisted
  48. interfaces don't really allow for this, so instead there's a custom
  49. interface between the two objects that allows them to work hand-in-hand here.
  50. @ivar conn: The HTTP/2 connection state machine.
  51. @type conn: L{h2.connection.H2Connection}
  52. @ivar streams: A mapping of stream IDs to L{H2Stream} objects, used to call
  53. specific methods on streams when events occur.
  54. @type streams: L{dict}, mapping L{int} stream IDs to L{H2Stream} objects.
  55. @ivar priority: A HTTP/2 priority tree used to ensure that responses are
  56. prioritised appropriately.
  57. @type priority: L{priority.PriorityTree}
  58. @ivar _consumerBlocked: A flag tracking whether or not the L{IConsumer}
  59. that is consuming this data has asked us to stop producing.
  60. @type _consumerBlocked: L{bool}
  61. @ivar _sendingDeferred: A L{Deferred} used to restart the data-sending loop
  62. when more response data has been produced. Will not be present if there
  63. is outstanding data still to send.
  64. @type _consumerBlocked: A L{twisted.internet.defer.Deferred}, or L{None}
  65. @ivar _outboundStreamQueues: A map of stream IDs to queues, used to store
  66. data blocks that are yet to be sent on the connection. These are used
  67. both to handle producers that do not respect L{IConsumer} but also to
  68. allow priority to multiplex data appropriately.
  69. @type _outboundStreamQueues: A L{dict} mapping L{int} stream IDs to
  70. L{collections.deque} queues, which contain either L{bytes} objects or
  71. C{_END_STREAM_SENTINEL}.
  72. @ivar _sender: A handle to the data-sending loop, allowing it to be
  73. terminated if needed.
  74. @type _sender: L{twisted.internet.task.LoopingCall}
  75. @ivar abortTimeout: The number of seconds to wait after we attempt to shut
  76. the transport down cleanly to give up and forcibly terminate it. This
  77. is only used when we time a connection out, to prevent errors causing
  78. the FD to get leaked. If this is L{None}, we will wait forever.
  79. @type abortTimeout: L{int}
  80. @ivar _abortingCall: The L{twisted.internet.base.DelayedCall} that will be
  81. used to forcibly close the transport if it doesn't close cleanly.
  82. @type _abortingCall: L{twisted.internet.base.DelayedCall}
  83. """
  84. factory = None
  85. site = None
  86. abortTimeout = 15
  87. _log = Logger()
  88. _abortingCall = None
  89. def __init__(self, reactor=None):
  90. config = h2.config.H2Configuration(client_side=False, header_encoding=None)
  91. self.conn = h2.connection.H2Connection(config=config)
  92. self.streams = {}
  93. self.priority = priority.PriorityTree()
  94. self._consumerBlocked = None
  95. self._sendingDeferred = None
  96. self._outboundStreamQueues = {}
  97. self._streamCleanupCallbacks = {}
  98. self._stillProducing = True
  99. # Limit the number of buffered control frame (e.g. PING and
  100. # SETTINGS) bytes.
  101. self._maxBufferedControlFrameBytes = 1024 * 17
  102. self._bufferedControlFrames = deque()
  103. self._bufferedControlFrameBytes = 0
  104. if reactor is None:
  105. from twisted.internet import reactor
  106. self._reactor = reactor
  107. # Start the data sending function.
  108. self._reactor.callLater(0, self._sendPrioritisedData)
  109. # Implementation of IProtocol
  110. def connectionMade(self):
  111. """
  112. Called by the reactor when a connection is received. May also be called
  113. by the L{twisted.web.http._GenericHTTPChannelProtocol} during upgrade
  114. to HTTP/2.
  115. """
  116. self.setTimeout(self.timeOut)
  117. self.conn.initiate_connection()
  118. self.transport.write(self.conn.data_to_send())
  119. def dataReceived(self, data):
  120. """
  121. Called whenever a chunk of data is received from the transport.
  122. @param data: The data received from the transport.
  123. @type data: L{bytes}
  124. """
  125. try:
  126. events = self.conn.receive_data(data)
  127. except h2.exceptions.ProtocolError:
  128. stillActive = self._tryToWriteControlData()
  129. if stillActive:
  130. self.transport.loseConnection()
  131. self.connectionLost(Failure(), _cancelTimeouts=False)
  132. return
  133. # Only reset the timeout if we've received an actual H2
  134. # protocol message
  135. self.resetTimeout()
  136. for event in events:
  137. if isinstance(event, h2.events.RequestReceived):
  138. self._requestReceived(event)
  139. elif isinstance(event, h2.events.DataReceived):
  140. self._requestDataReceived(event)
  141. elif isinstance(event, h2.events.StreamEnded):
  142. self._requestEnded(event)
  143. elif isinstance(event, h2.events.StreamReset):
  144. self._requestAborted(event)
  145. elif isinstance(event, h2.events.WindowUpdated):
  146. self._handleWindowUpdate(event)
  147. elif isinstance(event, h2.events.PriorityUpdated):
  148. self._handlePriorityUpdate(event)
  149. elif isinstance(event, h2.events.ConnectionTerminated):
  150. self.transport.loseConnection()
  151. self.connectionLost(
  152. Failure(ConnectionLost("Remote peer sent GOAWAY")),
  153. _cancelTimeouts=False,
  154. )
  155. self._tryToWriteControlData()
  156. def timeoutConnection(self):
  157. """
  158. Called when the connection has been inactive for
  159. L{self.timeOut<twisted.protocols.policies.TimeoutMixin.timeOut>}
  160. seconds. Cleanly tears the connection down, attempting to notify the
  161. peer if needed.
  162. We override this method to add two extra bits of functionality:
  163. - We want to log the timeout.
  164. - We want to send a GOAWAY frame indicating that the connection is
  165. being terminated, and whether it was clean or not. We have to do this
  166. before the connection is torn down.
  167. """
  168. self._log.info("Timing out client {client}", client=self.transport.getPeer())
  169. # Check whether there are open streams. If there are, we're going to
  170. # want to use the error code PROTOCOL_ERROR. If there aren't, use
  171. # NO_ERROR.
  172. if self.conn.open_outbound_streams > 0 or self.conn.open_inbound_streams > 0:
  173. error_code = h2.errors.ErrorCodes.PROTOCOL_ERROR
  174. else:
  175. error_code = h2.errors.ErrorCodes.NO_ERROR
  176. self.conn.close_connection(error_code=error_code)
  177. self.transport.write(self.conn.data_to_send())
  178. # Don't let the client hold this connection open too long.
  179. if self.abortTimeout is not None:
  180. # We use self.callLater because that's what TimeoutMixin does, even
  181. # though we have a perfectly good reactor sitting around. See
  182. # https://twistedmatrix.com/trac/ticket/8488.
  183. self._abortingCall = self.callLater(
  184. self.abortTimeout, self.forceAbortClient
  185. )
  186. # We're done, throw the connection away.
  187. self.transport.loseConnection()
  188. def forceAbortClient(self):
  189. """
  190. Called if C{abortTimeout} seconds have passed since the timeout fired,
  191. and the connection still hasn't gone away. This can really only happen
  192. on extremely bad connections or when clients are maliciously attempting
  193. to keep connections open.
  194. """
  195. self._log.info(
  196. "Forcibly timing out client: {client}", client=self.transport.getPeer()
  197. )
  198. # We want to lose track of the _abortingCall so that no-one tries to
  199. # cancel it.
  200. self._abortingCall = None
  201. self.transport.abortConnection()
  202. def connectionLost(self, reason, _cancelTimeouts=True):
  203. """
  204. Called when the transport connection is lost.
  205. Informs all outstanding response handlers that the connection
  206. has been lost, and cleans up all internal state.
  207. @param reason: See L{IProtocol.connectionLost}
  208. @param _cancelTimeouts: Propagate the C{reason} to this
  209. connection's streams but don't cancel any timers, so that
  210. peers who never read the data we've written are eventually
  211. timed out.
  212. """
  213. self._stillProducing = False
  214. if _cancelTimeouts:
  215. self.setTimeout(None)
  216. for stream in self.streams.values():
  217. stream.connectionLost(reason)
  218. for streamID in list(self.streams.keys()):
  219. self._requestDone(streamID)
  220. # If we were going to force-close the transport, we don't have to now.
  221. if _cancelTimeouts and self._abortingCall is not None:
  222. self._abortingCall.cancel()
  223. self._abortingCall = None
  224. # Implementation of IPushProducer
  225. #
  226. # Here's how we handle IPushProducer. We have multiple outstanding
  227. # H2Streams. Each of these exposes an IConsumer interface to the response
  228. # handler that allows it to push data into the H2Stream. The H2Stream then
  229. # writes the data into the H2Connection object.
  230. #
  231. # The H2Connection needs to manage these writes to account for:
  232. #
  233. # - flow control
  234. # - priority
  235. #
  236. # We manage each of these in different ways.
  237. #
  238. # For flow control, we simply use the equivalent of the IPushProducer
  239. # interface. We simply tell the H2Stream: "Hey, you can't send any data
  240. # right now, sorry!". When that stream becomes unblocked, we free it up
  241. # again. This allows the H2Stream to propagate this backpressure up the
  242. # chain.
  243. #
  244. # For priority, we need to keep a backlog of data frames that we can send,
  245. # and interleave them appropriately. This backlog is most sensibly kept in
  246. # the H2Connection object itself. We keep one queue per stream, which is
  247. # where the writes go, and then we have a loop that manages popping these
  248. # streams off in priority order.
  249. #
  250. # Logically then, we go as follows:
  251. #
  252. # 1. Stream calls writeDataToStream(). This causes a DataFrame to be placed
  253. # on the queue for that stream. It also informs the priority
  254. # implementation that this stream is unblocked.
  255. # 2. The _sendPrioritisedData() function spins in a tight loop. Each
  256. # iteration it asks the priority implementation which stream should send
  257. # next, and pops a data frame off that stream's queue. If, after sending
  258. # that frame, there is no data left on that stream's queue, the function
  259. # informs the priority implementation that the stream is blocked.
  260. #
  261. # If all streams are blocked, or if there are no outstanding streams, the
  262. # _sendPrioritisedData function waits to be awoken when more data is ready
  263. # to send.
  264. #
  265. # Note that all of this only applies to *data*. Headers and other control
  266. # frames deliberately skip this processing as they are not subject to flow
  267. # control or priority constraints. Instead, they are stored in their own buffer
  268. # which is used primarily to detect excessive buffering.
  269. def stopProducing(self):
  270. """
  271. Stop producing data.
  272. This tells the L{H2Connection} that its consumer has died, so it must
  273. stop producing data for good.
  274. """
  275. self.connectionLost(Failure(ConnectionLost("Producing stopped")))
  276. def pauseProducing(self):
  277. """
  278. Pause producing data.
  279. Tells the L{H2Connection} that it has produced too much data to process
  280. for the time being, and to stop until resumeProducing() is called.
  281. """
  282. self._consumerBlocked = Deferred()
  283. # Ensure pending control data (if any) are sent first.
  284. self._consumerBlocked.addCallback(self._flushBufferedControlData)
  285. def resumeProducing(self):
  286. """
  287. Resume producing data.
  288. This tells the L{H2Connection} to re-add itself to the main loop and
  289. produce more data for the consumer.
  290. """
  291. if self._consumerBlocked is not None:
  292. d = self._consumerBlocked
  293. self._consumerBlocked = None
  294. d.callback(None)
  295. def _sendPrioritisedData(self, *args):
  296. """
  297. The data sending loop. This function repeatedly calls itself, either
  298. from L{Deferred}s or from
  299. L{reactor.callLater<twisted.internet.interfaces.IReactorTime.callLater>}
  300. This function sends data on streams according to the rules of HTTP/2
  301. priority. It ensures that the data from each stream is interleved
  302. according to the priority signalled by the client, making sure that the
  303. connection is used with maximal efficiency.
  304. This function will execute if data is available: if all data is
  305. exhausted, the function will place a deferred onto the L{H2Connection}
  306. object and wait until it is called to resume executing.
  307. """
  308. # If producing has stopped, we're done. Don't reschedule ourselves
  309. if not self._stillProducing:
  310. return
  311. stream = None
  312. while stream is None:
  313. try:
  314. stream = next(self.priority)
  315. except priority.DeadlockError:
  316. # All streams are currently blocked or not progressing. Wait
  317. # until a new one becomes available.
  318. assert self._sendingDeferred is None
  319. self._sendingDeferred = Deferred()
  320. self._sendingDeferred.addCallback(self._sendPrioritisedData)
  321. return
  322. # Wait behind the transport.
  323. if self._consumerBlocked is not None:
  324. self._consumerBlocked.addCallback(self._sendPrioritisedData)
  325. return
  326. self.resetTimeout()
  327. remainingWindow = self.conn.local_flow_control_window(stream)
  328. frameData = self._outboundStreamQueues[stream].popleft()
  329. maxFrameSize = min(self.conn.max_outbound_frame_size, remainingWindow)
  330. if frameData is _END_STREAM_SENTINEL:
  331. # There's no error handling here even though this can throw
  332. # ProtocolError because we really shouldn't encounter this problem.
  333. # If we do, that's a nasty bug.
  334. self.conn.end_stream(stream)
  335. self.transport.write(self.conn.data_to_send())
  336. # Clean up the stream
  337. self._requestDone(stream)
  338. else:
  339. # Respect the max frame size.
  340. if len(frameData) > maxFrameSize:
  341. excessData = frameData[maxFrameSize:]
  342. frameData = frameData[:maxFrameSize]
  343. self._outboundStreamQueues[stream].appendleft(excessData)
  344. # There's deliberately no error handling here, because this just
  345. # absolutely should not happen.
  346. # If for whatever reason the max frame length is zero and so we
  347. # have no frame data to send, don't send any.
  348. if frameData:
  349. self.conn.send_data(stream, frameData)
  350. self.transport.write(self.conn.data_to_send())
  351. # If there's no data left, this stream is now blocked.
  352. if not self._outboundStreamQueues[stream]:
  353. self.priority.block(stream)
  354. # Also, if the stream's flow control window is exhausted, tell it
  355. # to stop.
  356. if self.remainingOutboundWindow(stream) <= 0:
  357. self.streams[stream].flowControlBlocked()
  358. self._reactor.callLater(0, self._sendPrioritisedData)
  359. # Internal functions.
  360. def _requestReceived(self, event):
  361. """
  362. Internal handler for when a request has been received.
  363. @param event: The Hyper-h2 event that encodes information about the
  364. received request.
  365. @type event: L{h2.events.RequestReceived}
  366. """
  367. stream = H2Stream(
  368. event.stream_id,
  369. self,
  370. event.headers,
  371. self.requestFactory,
  372. self.site,
  373. self.factory,
  374. )
  375. self.streams[event.stream_id] = stream
  376. self._streamCleanupCallbacks[event.stream_id] = Deferred()
  377. self._outboundStreamQueues[event.stream_id] = deque()
  378. # Add the stream to the priority tree but immediately block it.
  379. try:
  380. self.priority.insert_stream(event.stream_id)
  381. except priority.DuplicateStreamError:
  382. # Stream already in the tree. This can happen if we received a
  383. # PRIORITY frame before a HEADERS frame. Just move on: we set the
  384. # stream up properly in _handlePriorityUpdate.
  385. pass
  386. else:
  387. self.priority.block(event.stream_id)
  388. def _requestDataReceived(self, event):
  389. """
  390. Internal handler for when a chunk of data is received for a given
  391. request.
  392. @param event: The Hyper-h2 event that encodes information about the
  393. received data.
  394. @type event: L{h2.events.DataReceived}
  395. """
  396. stream = self.streams[event.stream_id]
  397. stream.receiveDataChunk(event.data, event.flow_controlled_length)
  398. def _requestEnded(self, event):
  399. """
  400. Internal handler for when a request is complete, and we expect no
  401. further data for that request.
  402. @param event: The Hyper-h2 event that encodes information about the
  403. completed stream.
  404. @type event: L{h2.events.StreamEnded}
  405. """
  406. stream = self.streams[event.stream_id]
  407. stream.requestComplete()
  408. def _requestAborted(self, event):
  409. """
  410. Internal handler for when a request is aborted by a remote peer.
  411. @param event: The Hyper-h2 event that encodes information about the
  412. reset stream.
  413. @type event: L{h2.events.StreamReset}
  414. """
  415. stream = self.streams[event.stream_id]
  416. stream.connectionLost(
  417. Failure(ConnectionLost("Stream reset with code %s" % event.error_code))
  418. )
  419. self._requestDone(event.stream_id)
  420. def _handlePriorityUpdate(self, event):
  421. """
  422. Internal handler for when a stream priority is updated.
  423. @param event: The Hyper-h2 event that encodes information about the
  424. stream reprioritization.
  425. @type event: L{h2.events.PriorityUpdated}
  426. """
  427. try:
  428. self.priority.reprioritize(
  429. stream_id=event.stream_id,
  430. depends_on=event.depends_on or None,
  431. weight=event.weight,
  432. exclusive=event.exclusive,
  433. )
  434. except priority.MissingStreamError:
  435. # A PRIORITY frame arrived before the HEADERS frame that would
  436. # trigger us to insert the stream into the tree. That's fine: we
  437. # can create the stream here and mark it as blocked.
  438. self.priority.insert_stream(
  439. stream_id=event.stream_id,
  440. depends_on=event.depends_on or None,
  441. weight=event.weight,
  442. exclusive=event.exclusive,
  443. )
  444. self.priority.block(event.stream_id)
  445. def writeHeaders(self, version, code, reason, headers, streamID):
  446. """
  447. Called by L{twisted.web.http.Request} objects to write a complete set
  448. of HTTP headers to a stream.
  449. @param version: The HTTP version in use. Unused in HTTP/2.
  450. @type version: L{bytes}
  451. @param code: The HTTP status code to write.
  452. @type code: L{bytes}
  453. @param reason: The HTTP reason phrase to write. Unused in HTTP/2.
  454. @type reason: L{bytes}
  455. @param headers: The headers to write to the stream.
  456. @type headers: L{twisted.web.http_headers.Headers}
  457. @param streamID: The ID of the stream to write the headers to.
  458. @type streamID: L{int}
  459. """
  460. headers.insert(0, (b":status", code))
  461. try:
  462. self.conn.send_headers(streamID, headers)
  463. except h2.exceptions.StreamClosedError:
  464. # Stream was closed by the client at some point. We need to not
  465. # explode here: just swallow the error. That's what write() does
  466. # when a connection is lost, so that's what we do too.
  467. return
  468. else:
  469. self._tryToWriteControlData()
  470. def writeDataToStream(self, streamID, data):
  471. """
  472. May be called by L{H2Stream} objects to write response data to a given
  473. stream. Writes a single data frame.
  474. @param streamID: The ID of the stream to write the data to.
  475. @type streamID: L{int}
  476. @param data: The data chunk to write to the stream.
  477. @type data: L{bytes}
  478. """
  479. self._outboundStreamQueues[streamID].append(data)
  480. # There's obviously no point unblocking this stream and the sending
  481. # loop if the data can't actually be sent, so confirm that there's
  482. # some room to send data.
  483. if self.conn.local_flow_control_window(streamID) > 0:
  484. self.priority.unblock(streamID)
  485. if self._sendingDeferred is not None:
  486. d = self._sendingDeferred
  487. self._sendingDeferred = None
  488. d.callback(streamID)
  489. if self.remainingOutboundWindow(streamID) <= 0:
  490. self.streams[streamID].flowControlBlocked()
  491. def endRequest(self, streamID):
  492. """
  493. Called by L{H2Stream} objects to signal completion of a response.
  494. @param streamID: The ID of the stream to write the data to.
  495. @type streamID: L{int}
  496. """
  497. self._outboundStreamQueues[streamID].append(_END_STREAM_SENTINEL)
  498. self.priority.unblock(streamID)
  499. if self._sendingDeferred is not None:
  500. d = self._sendingDeferred
  501. self._sendingDeferred = None
  502. d.callback(streamID)
  503. def abortRequest(self, streamID):
  504. """
  505. Called by L{H2Stream} objects to request early termination of a stream.
  506. This emits a RstStream frame and then removes all stream state.
  507. @param streamID: The ID of the stream to write the data to.
  508. @type streamID: L{int}
  509. """
  510. self.conn.reset_stream(streamID)
  511. stillActive = self._tryToWriteControlData()
  512. if stillActive:
  513. self._requestDone(streamID)
  514. def _requestDone(self, streamID):
  515. """
  516. Called internally by the data sending loop to clean up state that was
  517. being used for the stream. Called when the stream is complete.
  518. @param streamID: The ID of the stream to clean up state for.
  519. @type streamID: L{int}
  520. """
  521. del self._outboundStreamQueues[streamID]
  522. self.priority.remove_stream(streamID)
  523. del self.streams[streamID]
  524. cleanupCallback = self._streamCleanupCallbacks.pop(streamID)
  525. cleanupCallback.callback(streamID)
  526. def remainingOutboundWindow(self, streamID):
  527. """
  528. Called to determine how much room is left in the send window for a
  529. given stream. Allows us to handle blocking and unblocking producers.
  530. @param streamID: The ID of the stream whose flow control window we'll
  531. check.
  532. @type streamID: L{int}
  533. @return: The amount of room remaining in the send window for the given
  534. stream, including the data queued to be sent.
  535. @rtype: L{int}
  536. """
  537. # TODO: This involves a fair bit of looping and computation for
  538. # something that is called a lot. Consider caching values somewhere.
  539. windowSize = self.conn.local_flow_control_window(streamID)
  540. sendQueue = self._outboundStreamQueues[streamID]
  541. alreadyConsumed = sum(
  542. len(chunk) for chunk in sendQueue if chunk is not _END_STREAM_SENTINEL
  543. )
  544. return windowSize - alreadyConsumed
  545. def _handleWindowUpdate(self, event):
  546. """
  547. Manage flow control windows.
  548. Streams that are blocked on flow control will register themselves with
  549. the connection. This will fire deferreds that wake those streams up and
  550. allow them to continue processing.
  551. @param event: The Hyper-h2 event that encodes information about the
  552. flow control window change.
  553. @type event: L{h2.events.WindowUpdated}
  554. """
  555. streamID = event.stream_id
  556. if streamID:
  557. if not self._streamIsActive(streamID):
  558. # We may have already cleaned up our stream state, making this
  559. # a late WINDOW_UPDATE frame. That's fine: the update is
  560. # unnecessary but benign. We'll ignore it.
  561. return
  562. # If we haven't got any data to send, don't unblock the stream. If
  563. # we do, we'll eventually get an exception inside the
  564. # _sendPrioritisedData loop some time later.
  565. if self._outboundStreamQueues.get(streamID):
  566. self.priority.unblock(streamID)
  567. self.streams[streamID].windowUpdated()
  568. else:
  569. # Update strictly applies to all streams.
  570. for stream in self.streams.values():
  571. stream.windowUpdated()
  572. # If we still have data to send for this stream, unblock it.
  573. if self._outboundStreamQueues.get(stream.streamID):
  574. self.priority.unblock(stream.streamID)
  575. def getPeer(self):
  576. """
  577. Get the remote address of this connection.
  578. Treat this method with caution. It is the unfortunate result of the
  579. CGI and Jabber standards, but should not be considered reliable for
  580. the usual host of reasons; port forwarding, proxying, firewalls, IP
  581. masquerading, etc.
  582. @return: An L{IAddress} provider.
  583. """
  584. return self.transport.getPeer()
  585. def getHost(self):
  586. """
  587. Similar to getPeer, but returns an address describing this side of the
  588. connection.
  589. @return: An L{IAddress} provider.
  590. """
  591. return self.transport.getHost()
  592. def openStreamWindow(self, streamID, increment):
  593. """
  594. Open the stream window by a given increment.
  595. @param streamID: The ID of the stream whose window needs to be opened.
  596. @type streamID: L{int}
  597. @param increment: The amount by which the stream window must be
  598. incremented.
  599. @type increment: L{int}
  600. """
  601. self.conn.acknowledge_received_data(increment, streamID)
  602. self._tryToWriteControlData()
  603. def _isSecure(self):
  604. """
  605. Returns L{True} if this channel is using a secure transport.
  606. @returns: L{True} if this channel is secure.
  607. @rtype: L{bool}
  608. """
  609. # A channel is secure if its transport is ISSLTransport.
  610. return ISSLTransport(self.transport, None) is not None
  611. def _send100Continue(self, streamID):
  612. """
  613. Sends a 100 Continue response, used to signal to clients that further
  614. processing will be performed.
  615. @param streamID: The ID of the stream that needs the 100 Continue
  616. response
  617. @type streamID: L{int}
  618. """
  619. headers = [(b":status", b"100")]
  620. self.conn.send_headers(headers=headers, stream_id=streamID)
  621. self._tryToWriteControlData()
  622. def _respondToBadRequestAndDisconnect(self, streamID):
  623. """
  624. This is a quick and dirty way of responding to bad requests.
  625. As described by HTTP standard we should be patient and accept the
  626. whole request from the client before sending a polite bad request
  627. response, even in the case when clients send tons of data.
  628. Unlike in the HTTP/1.1 case, this does not actually disconnect the
  629. underlying transport: there's no need. This instead just sends a 400
  630. response and terminates the stream.
  631. @param streamID: The ID of the stream that needs the 100 Continue
  632. response
  633. @type streamID: L{int}
  634. """
  635. headers = [(b":status", b"400")]
  636. self.conn.send_headers(headers=headers, stream_id=streamID, end_stream=True)
  637. stillActive = self._tryToWriteControlData()
  638. if stillActive:
  639. stream = self.streams[streamID]
  640. stream.connectionLost(Failure(ConnectionLost("Invalid request")))
  641. self._requestDone(streamID)
  642. def _streamIsActive(self, streamID):
  643. """
  644. Checks whether Twisted has still got state for a given stream and so
  645. can process events for that stream.
  646. @param streamID: The ID of the stream that needs processing.
  647. @type streamID: L{int}
  648. @return: Whether the stream still has state allocated.
  649. @rtype: L{bool}
  650. """
  651. return streamID in self.streams
  652. def _tryToWriteControlData(self):
  653. """
  654. Checks whether the connection is blocked on flow control and,
  655. if it isn't, writes any buffered control data.
  656. @return: L{True} if the connection is still active and
  657. L{False} if it was aborted because too many bytes have
  658. been written but not consumed by the other end.
  659. """
  660. bufferedBytes = self.conn.data_to_send()
  661. if not bufferedBytes:
  662. return True
  663. if self._consumerBlocked is None and not self._bufferedControlFrames:
  664. # The consumer isn't blocked, and we don't have any buffered frames:
  665. # write this directly.
  666. self.transport.write(bufferedBytes)
  667. return True
  668. else:
  669. # Either the consumer is blocked or we have buffered frames. If the
  670. # consumer is blocked, we'll write this when we unblock. If we have
  671. # buffered frames, we have presumably been re-entered from
  672. # transport.write, and so to avoid reordering issues we'll buffer anyway.
  673. self._bufferedControlFrames.append(bufferedBytes)
  674. self._bufferedControlFrameBytes += len(bufferedBytes)
  675. if self._bufferedControlFrameBytes >= self._maxBufferedControlFrameBytes:
  676. maxBuffCtrlFrameBytes = self._maxBufferedControlFrameBytes
  677. self._log.error(
  678. "Maximum number of control frame bytes buffered: "
  679. "{bufferedControlFrameBytes} > = "
  680. "{maxBufferedControlFrameBytes}. "
  681. "Aborting connection to client: {client} ",
  682. bufferedControlFrameBytes=self._bufferedControlFrameBytes,
  683. maxBufferedControlFrameBytes=maxBuffCtrlFrameBytes,
  684. client=self.transport.getPeer(),
  685. )
  686. # We've exceeded a reasonable buffer size for max buffered
  687. # control frames. This is a denial of service risk, so we're
  688. # going to drop this connection.
  689. self.transport.abortConnection()
  690. self.connectionLost(Failure(ExcessiveBufferingError()))
  691. return False
  692. return True
  693. def _flushBufferedControlData(self, *args):
  694. """
  695. Called when the connection is marked writable again after being marked unwritable.
  696. Attempts to flush buffered control data if there is any.
  697. """
  698. # To respect backpressure here we send each write in order, paying attention to whether
  699. # we got blocked
  700. while self._consumerBlocked is None and self._bufferedControlFrames:
  701. nextWrite = self._bufferedControlFrames.popleft()
  702. self._bufferedControlFrameBytes -= len(nextWrite)
  703. self.transport.write(nextWrite)
  704. @implementer(ITransport, IConsumer, IPushProducer)
  705. class H2Stream:
  706. """
  707. A class representing a single HTTP/2 stream.
  708. This class works hand-in-hand with L{H2Connection}. It acts to provide an
  709. implementation of L{ITransport}, L{IConsumer}, and L{IProducer} that work
  710. for a single HTTP/2 connection, while tightly cleaving to the interface
  711. provided by those interfaces. It does this by having a tight coupling to
  712. L{H2Connection}, which allows associating many of the functions of
  713. L{ITransport}, L{IConsumer}, and L{IProducer} to objects on a
  714. stream-specific level.
  715. @ivar streamID: The numerical stream ID that this object corresponds to.
  716. @type streamID: L{int}
  717. @ivar producing: Whether this stream is currently allowed to produce data
  718. to its consumer.
  719. @type producing: L{bool}
  720. @ivar command: The HTTP verb used on the request.
  721. @type command: L{unicode}
  722. @ivar path: The HTTP path used on the request.
  723. @type path: L{unicode}
  724. @ivar producer: The object producing the response, if any.
  725. @type producer: L{IProducer}
  726. @ivar site: The L{twisted.web.server.Site} object this stream belongs to,
  727. if any.
  728. @type site: L{twisted.web.server.Site}
  729. @ivar factory: The L{twisted.web.http.HTTPFactory} object that constructed
  730. this stream's parent connection.
  731. @type factory: L{twisted.web.http.HTTPFactory}
  732. @ivar _producerProducing: Whether the producer stored in producer is
  733. currently producing data.
  734. @type _producerProducing: L{bool}
  735. @ivar _inboundDataBuffer: Any data that has been received from the network
  736. but has not yet been received by the consumer.
  737. @type _inboundDataBuffer: A L{collections.deque} containing L{bytes}
  738. @ivar _conn: A reference to the connection this stream belongs to.
  739. @type _conn: L{H2Connection}
  740. @ivar _request: A request object that this stream corresponds to.
  741. @type _request: L{twisted.web.iweb.IRequest}
  742. @ivar _buffer: A buffer containing data produced by the producer that could
  743. not be sent on the network at this time.
  744. @type _buffer: L{io.BytesIO}
  745. """
  746. # We need a transport property for t.w.h.Request, but HTTP/2 doesn't want
  747. # to expose it. So we just set it to None.
  748. transport = None
  749. def __init__(self, streamID, connection, headers, requestFactory, site, factory):
  750. """
  751. Initialize this HTTP/2 stream.
  752. @param streamID: The numerical stream ID that this object corresponds
  753. to.
  754. @type streamID: L{int}
  755. @param connection: The HTTP/2 connection this stream belongs to.
  756. @type connection: L{H2Connection}
  757. @param headers: The HTTP/2 request headers.
  758. @type headers: A L{list} of L{tuple}s of header name and header value,
  759. both as L{bytes}.
  760. @param requestFactory: A function that builds appropriate request
  761. request objects.
  762. @type requestFactory: A callable that returns a
  763. L{twisted.web.iweb.IRequest}.
  764. @param site: The L{twisted.web.server.Site} object this stream belongs
  765. to, if any.
  766. @type site: L{twisted.web.server.Site}
  767. @param factory: The L{twisted.web.http.HTTPFactory} object that
  768. constructed this stream's parent connection.
  769. @type factory: L{twisted.web.http.HTTPFactory}
  770. """
  771. self.streamID = streamID
  772. self.site = site
  773. self.factory = factory
  774. self.producing = True
  775. self.command = None
  776. self.path = None
  777. self.producer = None
  778. self._producerProducing = False
  779. self._hasStreamingProducer = None
  780. self._inboundDataBuffer = deque()
  781. self._conn = connection
  782. self._request = requestFactory(self, queued=False)
  783. self._buffer = io.BytesIO()
  784. self._convertHeaders(headers)
  785. def _convertHeaders(self, headers):
  786. """
  787. This method converts the HTTP/2 header set into something that looks
  788. like HTTP/1.1. In particular, it strips the 'special' headers and adds
  789. a Host: header.
  790. @param headers: The HTTP/2 header set.
  791. @type headers: A L{list} of L{tuple}s of header name and header value,
  792. both as L{bytes}.
  793. """
  794. gotLength = False
  795. for header in headers:
  796. if not header[0].startswith(b":"):
  797. gotLength = _addHeaderToRequest(self._request, header) or gotLength
  798. elif header[0] == b":method":
  799. self.command = header[1]
  800. elif header[0] == b":path":
  801. self.path = header[1]
  802. elif header[0] == b":authority":
  803. # This is essentially the Host: header from HTTP/1.1
  804. _addHeaderToRequest(self._request, (b"host", header[1]))
  805. if not gotLength:
  806. if self.command in (b"GET", b"HEAD"):
  807. self._request.gotLength(0)
  808. else:
  809. self._request.gotLength(None)
  810. self._request.parseCookies()
  811. expectContinue = self._request.requestHeaders.getRawHeaders(b"expect")
  812. if expectContinue and expectContinue[0].lower() == b"100-continue":
  813. self._send100Continue()
  814. # Methods called by the H2Connection
  815. def receiveDataChunk(self, data, flowControlledLength):
  816. """
  817. Called when the connection has received a chunk of data from the
  818. underlying transport. If the stream has been registered with a
  819. consumer, and is currently able to push data, immediately passes it
  820. through. Otherwise, buffers the chunk until we can start producing.
  821. @param data: The chunk of data that was received.
  822. @type data: L{bytes}
  823. @param flowControlledLength: The total flow controlled length of this
  824. chunk, which is used when we want to re-open the window. May be
  825. different to C{len(data)}.
  826. @type flowControlledLength: L{int}
  827. """
  828. if not self.producing:
  829. # Buffer data.
  830. self._inboundDataBuffer.append((data, flowControlledLength))
  831. else:
  832. self._request.handleContentChunk(data)
  833. self._conn.openStreamWindow(self.streamID, flowControlledLength)
  834. def requestComplete(self):
  835. """
  836. Called by the L{H2Connection} when the all data for a request has been
  837. received. Currently, with the legacy L{twisted.web.http.Request}
  838. object, just calls requestReceived unless the producer wants us to be
  839. quiet.
  840. """
  841. if self.producing:
  842. self._request.requestReceived(self.command, self.path, b"HTTP/2")
  843. else:
  844. self._inboundDataBuffer.append((_END_STREAM_SENTINEL, None))
  845. def connectionLost(self, reason):
  846. """
  847. Called by the L{H2Connection} when a connection is lost or a stream is
  848. reset.
  849. @param reason: The reason the connection was lost.
  850. @type reason: L{str}
  851. """
  852. self._request.connectionLost(reason)
  853. def windowUpdated(self):
  854. """
  855. Called by the L{H2Connection} when this stream's flow control window
  856. has been opened.
  857. """
  858. # If we don't have a producer, we have no-one to tell.
  859. if not self.producer:
  860. return
  861. # If we're not blocked on flow control, we don't care.
  862. if self._producerProducing:
  863. return
  864. # We check whether the stream's flow control window is actually above
  865. # 0, and then, if a producer is registered and we still have space in
  866. # the window, we unblock it.
  867. remainingWindow = self._conn.remainingOutboundWindow(self.streamID)
  868. if not remainingWindow > 0:
  869. return
  870. # We have a producer and space in the window, so that producer can
  871. # start producing again!
  872. self._producerProducing = True
  873. self.producer.resumeProducing()
  874. def flowControlBlocked(self):
  875. """
  876. Called by the L{H2Connection} when this stream's flow control window
  877. has been exhausted.
  878. """
  879. if not self.producer:
  880. return
  881. if self._producerProducing:
  882. self.producer.pauseProducing()
  883. self._producerProducing = False
  884. # Methods called by the consumer (usually an IRequest).
  885. def writeHeaders(self, version, code, reason, headers):
  886. """
  887. Called by the consumer to write headers to the stream.
  888. @param version: The HTTP version.
  889. @type version: L{bytes}
  890. @param code: The status code.
  891. @type code: L{int}
  892. @param reason: The reason phrase. Ignored in HTTP/2.
  893. @type reason: L{bytes}
  894. @param headers: The HTTP response headers.
  895. @type headers: Any iterable of two-tuples of L{bytes}, representing header
  896. names and header values.
  897. """
  898. self._conn.writeHeaders(version, code, reason, headers, self.streamID)
  899. def requestDone(self, request):
  900. """
  901. Called by a consumer to clean up whatever permanent state is in use.
  902. @param request: The request calling the method.
  903. @type request: L{twisted.web.iweb.IRequest}
  904. """
  905. self._conn.endRequest(self.streamID)
  906. def _send100Continue(self):
  907. """
  908. Sends a 100 Continue response, used to signal to clients that further
  909. processing will be performed.
  910. """
  911. self._conn._send100Continue(self.streamID)
  912. def _respondToBadRequestAndDisconnect(self):
  913. """
  914. This is a quick and dirty way of responding to bad requests.
  915. As described by HTTP standard we should be patient and accept the
  916. whole request from the client before sending a polite bad request
  917. response, even in the case when clients send tons of data.
  918. Unlike in the HTTP/1.1 case, this does not actually disconnect the
  919. underlying transport: there's no need. This instead just sends a 400
  920. response and terminates the stream.
  921. """
  922. self._conn._respondToBadRequestAndDisconnect(self.streamID)
  923. # Implementation: ITransport
  924. def write(self, data):
  925. """
  926. Write a single chunk of data into a data frame.
  927. @param data: The data chunk to send.
  928. @type data: L{bytes}
  929. """
  930. self._conn.writeDataToStream(self.streamID, data)
  931. return
  932. def writeSequence(self, iovec):
  933. """
  934. Write a sequence of chunks of data into data frames.
  935. @param iovec: A sequence of chunks to send.
  936. @type iovec: An iterable of L{bytes} chunks.
  937. """
  938. for chunk in iovec:
  939. self.write(chunk)
  940. def loseConnection(self):
  941. """
  942. Close the connection after writing all pending data.
  943. """
  944. self._conn.endRequest(self.streamID)
  945. def abortConnection(self):
  946. """
  947. Forcefully abort the connection by sending a RstStream frame.
  948. """
  949. self._conn.abortRequest(self.streamID)
  950. def getPeer(self):
  951. """
  952. Get information about the peer.
  953. """
  954. return self._conn.getPeer()
  955. def getHost(self):
  956. """
  957. Similar to getPeer, but for this side of the connection.
  958. """
  959. return self._conn.getHost()
  960. def isSecure(self):
  961. """
  962. Returns L{True} if this channel is using a secure transport.
  963. @returns: L{True} if this channel is secure.
  964. @rtype: L{bool}
  965. """
  966. return self._conn._isSecure()
  967. # Implementation: IConsumer
  968. def registerProducer(self, producer, streaming):
  969. """
  970. Register to receive data from a producer.
  971. This sets self to be a consumer for a producer. When this object runs
  972. out of data (as when a send(2) call on a socket succeeds in moving the
  973. last data from a userspace buffer into a kernelspace buffer), it will
  974. ask the producer to resumeProducing().
  975. For L{IPullProducer} providers, C{resumeProducing} will be called once
  976. each time data is required.
  977. For L{IPushProducer} providers, C{pauseProducing} will be called
  978. whenever the write buffer fills up and C{resumeProducing} will only be
  979. called when it empties.
  980. @param producer: The producer to register.
  981. @type producer: L{IProducer} provider
  982. @param streaming: L{True} if C{producer} provides L{IPushProducer},
  983. L{False} if C{producer} provides L{IPullProducer}.
  984. @type streaming: L{bool}
  985. @raise RuntimeError: If a producer is already registered.
  986. @return: L{None}
  987. """
  988. if self.producer:
  989. raise ValueError(
  990. "registering producer %s before previous one (%s) was "
  991. "unregistered" % (producer, self.producer)
  992. )
  993. if not streaming:
  994. self.hasStreamingProducer = False
  995. producer = _PullToPush(producer, self)
  996. producer.startStreaming()
  997. else:
  998. self.hasStreamingProducer = True
  999. self.producer = producer
  1000. self._producerProducing = True
  1001. def unregisterProducer(self):
  1002. """
  1003. @see: L{IConsumer.unregisterProducer}
  1004. """
  1005. # When the producer is unregistered, we're done.
  1006. if self.producer is not None and not self.hasStreamingProducer:
  1007. self.producer.stopStreaming()
  1008. self._producerProducing = False
  1009. self.producer = None
  1010. self.hasStreamingProducer = None
  1011. # Implementation: IPushProducer
  1012. def stopProducing(self):
  1013. """
  1014. @see: L{IProducer.stopProducing}
  1015. """
  1016. self.producing = False
  1017. self.abortConnection()
  1018. def pauseProducing(self):
  1019. """
  1020. @see: L{IPushProducer.pauseProducing}
  1021. """
  1022. self.producing = False
  1023. def resumeProducing(self):
  1024. """
  1025. @see: L{IPushProducer.resumeProducing}
  1026. """
  1027. self.producing = True
  1028. consumedLength = 0
  1029. while self.producing and self._inboundDataBuffer:
  1030. # Allow for pauseProducing to be called in response to a call to
  1031. # resumeProducing.
  1032. chunk, flowControlledLength = self._inboundDataBuffer.popleft()
  1033. if chunk is _END_STREAM_SENTINEL:
  1034. self.requestComplete()
  1035. else:
  1036. consumedLength += flowControlledLength
  1037. self._request.handleContentChunk(chunk)
  1038. self._conn.openStreamWindow(self.streamID, consumedLength)
  1039. def _addHeaderToRequest(request, header):
  1040. """
  1041. Add a header tuple to a request header object.
  1042. @param request: The request to add the header tuple to.
  1043. @type request: L{twisted.web.http.Request}
  1044. @param header: The header tuple to add to the request.
  1045. @type header: A L{tuple} with two elements, the header name and header
  1046. value, both as L{bytes}.
  1047. @return: If the header being added was the C{Content-Length} header.
  1048. @rtype: L{bool}
  1049. """
  1050. requestHeaders = request.requestHeaders
  1051. name, value = header
  1052. values = requestHeaders.getRawHeaders(name)
  1053. if values is not None:
  1054. values.append(value)
  1055. else:
  1056. requestHeaders.setRawHeaders(name, [value])
  1057. if name == b"content-length":
  1058. request.gotLength(int(value))
  1059. return True
  1060. return False