Package cherrypy :: Package wsgiserver :: Module wsgiserver3
[hide private]
[frames] | no frames]

Source Code for Module cherrypy.wsgiserver.wsgiserver3

   1  """A high-speed, production ready, thread pooled, generic HTTP server. 
   2   
   3  Simplest example on how to use this module directly 
   4  (without using CherryPy's application machinery):: 
   5   
   6      from cherrypy import wsgiserver 
   7   
   8      def my_crazy_app(environ, start_response): 
   9          status = '200 OK' 
  10          response_headers = [('Content-type','text/plain')] 
  11          start_response(status, response_headers) 
  12          return ['Hello world!'] 
  13   
  14      server = wsgiserver.CherryPyWSGIServer( 
  15                  ('0.0.0.0', 8070), my_crazy_app, 
  16                  server_name='www.cherrypy.example') 
  17      server.start() 
  18   
  19  The CherryPy WSGI server can serve as many WSGI applications 
  20  as you want in one instance by using a WSGIPathInfoDispatcher:: 
  21   
  22      d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app}) 
  23      server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d) 
  24   
  25  Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance. 
  26   
  27  This won't call the CherryPy engine (application side) at all, only the 
  28  HTTP server, which is independent from the rest of CherryPy. Don't 
  29  let the name "CherryPyWSGIServer" throw you; the name merely reflects 
  30  its origin, not its coupling. 
  31   
  32  For those of you wanting to understand internals of this module, here's the 
  33  basic call flow. The server's listening thread runs a very tight loop, 
  34  sticking incoming connections onto a Queue:: 
  35   
  36      server = CherryPyWSGIServer(...) 
  37      server.start() 
  38      while True: 
  39          tick() 
  40          # This blocks until a request comes in: 
  41          child = socket.accept() 
  42          conn = HTTPConnection(child, ...) 
  43          server.requests.put(conn) 
  44   
  45  Worker threads are kept in a pool and poll the Queue, popping off and then 
  46  handling each connection in turn. Each connection can consist of an arbitrary 
  47  number of requests and their responses, so we run a nested loop:: 
  48   
  49      while True: 
  50          conn = server.requests.get() 
  51          conn.communicate() 
  52          ->  while True: 
  53                  req = HTTPRequest(...) 
  54                  req.parse_request() 
  55                  ->  # Read the Request-Line, e.g. "GET /page HTTP/1.1" 
  56                      req.rfile.readline() 
  57                      read_headers(req.rfile, req.inheaders) 
  58                  req.respond() 
  59                  ->  response = app(...) 
  60                      try: 
  61                          for chunk in response: 
  62                              if chunk: 
  63                                  req.write(chunk) 
  64                      finally: 
  65                          if hasattr(response, "close"): 
  66                              response.close() 
  67                  if req.close_connection: 
  68                      return 
  69  """ 
  70   
  71  __all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer', 
  72             'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile', 
  73             'CP_makefile', 
  74             'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert', 
  75             'WorkerThread', 'ThreadPool', 'SSLAdapter', 
  76             'CherryPyWSGIServer', 
  77             'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0', 
  78             'WSGIPathInfoDispatcher', 'get_ssl_adapter_class'] 
  79   
  80  import os 
  81  try: 
  82      import queue 
  83  except: 
  84      import Queue as queue 
  85  import re 
  86  import email.utils 
  87  import socket 
  88  import sys 
  89  if 'win' in sys.platform and hasattr(socket, "AF_INET6"): 
  90      if not hasattr(socket, 'IPPROTO_IPV6'): 
  91          socket.IPPROTO_IPV6 = 41 
  92      if not hasattr(socket, 'IPV6_V6ONLY'): 
  93          socket.IPV6_V6ONLY = 27 
  94  if sys.version_info < (3, 1): 
  95      import io 
  96  else: 
  97      import _pyio as io 
  98  DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE 
  99   
 100  import threading 
 101  import time 
 102  from traceback import format_exc 
 103   
 104  if sys.version_info >= (3, 0): 
 105      bytestr = bytes 
 106      unicodestr = str 
 107      basestring = (bytes, str) 
 108   
109 - def ntob(n, encoding='ISO-8859-1'):
110 """Return the given native string as a byte string in the given 111 encoding. 112 """ 113 # In Python 3, the native string type is unicode 114 return n.encode(encoding)
115 else: 116 bytestr = str 117 unicodestr = unicode 118 basestring = basestring 119
120 - def ntob(n, encoding='ISO-8859-1'):
121 """Return the given native string as a byte string in the given 122 encoding. 123 """ 124 # In Python 2, the native string type is bytes. Assume it's already 125 # in the given encoding, which for ISO-8859-1 is almost always what 126 # was intended. 127 return n
128 129 LF = ntob('\n') 130 CRLF = ntob('\r\n') 131 TAB = ntob('\t') 132 SPACE = ntob(' ') 133 COLON = ntob(':') 134 SEMICOLON = ntob(';') 135 EMPTY = ntob('') 136 NUMBER_SIGN = ntob('#') 137 QUESTION_MARK = ntob('?') 138 ASTERISK = ntob('*') 139 FORWARD_SLASH = ntob('/') 140 quoted_slash = re.compile(ntob("(?i)%2F")) 141 142 import errno 143 144
145 -def plat_specific_errors(*errnames):
146 """Return error numbers for all errors in errnames on this platform. 147 148 The 'errno' module contains different global constants depending on 149 the specific platform (OS). This function will return the list of 150 numeric values for a given list of potential names. 151 """ 152 errno_names = dir(errno) 153 nums = [getattr(errno, k) for k in errnames if k in errno_names] 154 # de-dupe the list 155 return list(dict.fromkeys(nums).keys())
156 157 socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR") 158 159 socket_errors_to_ignore = plat_specific_errors( 160 "EPIPE", 161 "EBADF", "WSAEBADF", 162 "ENOTSOCK", "WSAENOTSOCK", 163 "ETIMEDOUT", "WSAETIMEDOUT", 164 "ECONNREFUSED", "WSAECONNREFUSED", 165 "ECONNRESET", "WSAECONNRESET", 166 "ECONNABORTED", "WSAECONNABORTED", 167 "ENETRESET", "WSAENETRESET", 168 "EHOSTDOWN", "EHOSTUNREACH", 169 ) 170 socket_errors_to_ignore.append("timed out") 171 socket_errors_to_ignore.append("The read operation timed out") 172 173 socket_errors_nonblocking = plat_specific_errors( 174 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK') 175 176 comma_separated_headers = [ 177 ntob(h) for h in 178 ['Accept', 'Accept-Charset', 'Accept-Encoding', 179 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control', 180 'Connection', 'Content-Encoding', 'Content-Language', 'Expect', 181 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE', 182 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning', 183 'WWW-Authenticate'] 184 ] 185 186 187 import logging 188 if not hasattr(logging, 'statistics'): 189 logging.statistics = {} 190 191
192 -def read_headers(rfile, hdict=None):
193 """Read headers from the given stream into the given header dict. 194 195 If hdict is None, a new header dict is created. Returns the populated 196 header dict. 197 198 Headers which are repeated are folded together using a comma if their 199 specification so dictates. 200 201 This function raises ValueError when the read bytes violate the HTTP spec. 202 You should probably return "400 Bad Request" if this happens. 203 """ 204 if hdict is None: 205 hdict = {} 206 207 while True: 208 line = rfile.readline() 209 if not line: 210 # No more data--illegal end of headers 211 raise ValueError("Illegal end of headers.") 212 213 if line == CRLF: 214 # Normal end of headers 215 break 216 if not line.endswith(CRLF): 217 raise ValueError("HTTP requires CRLF terminators") 218 219 if line[0] in (SPACE, TAB): 220 # It's a continuation line. 221 v = line.strip() 222 else: 223 try: 224 k, v = line.split(COLON, 1) 225 except ValueError: 226 raise ValueError("Illegal header line.") 227 # TODO: what about TE and WWW-Authenticate? 228 k = k.strip().title() 229 v = v.strip() 230 hname = k 231 232 if k in comma_separated_headers: 233 existing = hdict.get(hname) 234 if existing: 235 v = b", ".join((existing, v)) 236 hdict[hname] = v 237 238 return hdict
239 240
241 -class MaxSizeExceeded(Exception):
242 pass
243 244
245 -class SizeCheckWrapper(object):
246 247 """Wraps a file-like object, raising MaxSizeExceeded if too large.""" 248
249 - def __init__(self, rfile, maxlen):
250 self.rfile = rfile 251 self.maxlen = maxlen 252 self.bytes_read = 0
253
254 - def _check_length(self):
255 if self.maxlen and self.bytes_read > self.maxlen: 256 raise MaxSizeExceeded()
257
258 - def read(self, size=None):
259 data = self.rfile.read(size) 260 self.bytes_read += len(data) 261 self._check_length() 262 return data
263
264 - def readline(self, size=None):
265 if size is not None: 266 data = self.rfile.readline(size) 267 self.bytes_read += len(data) 268 self._check_length() 269 return data 270 271 # User didn't specify a size ... 272 # We read the line in chunks to make sure it's not a 100MB line ! 273 res = [] 274 while True: 275 data = self.rfile.readline(256) 276 self.bytes_read += len(data) 277 self._check_length() 278 res.append(data) 279 # See https://bitbucket.org/cherrypy/cherrypy/issue/421 280 if len(data) < 256 or data[-1:] == LF: 281 return EMPTY.join(res)
282
283 - def readlines(self, sizehint=0):
284 # Shamelessly stolen from StringIO 285 total = 0 286 lines = [] 287 line = self.readline() 288 while line: 289 lines.append(line) 290 total += len(line) 291 if 0 < sizehint <= total: 292 break 293 line = self.readline() 294 return lines
295
296 - def close(self):
297 self.rfile.close()
298
299 - def __iter__(self):
300 return self
301
302 - def __next__(self):
303 data = next(self.rfile) 304 self.bytes_read += len(data) 305 self._check_length() 306 return data
307
308 - def next(self):
309 data = self.rfile.next() 310 self.bytes_read += len(data) 311 self._check_length() 312 return data
313 314
315 -class KnownLengthRFile(object):
316 317 """Wraps a file-like object, returning an empty string when exhausted.""" 318
319 - def __init__(self, rfile, content_length):
320 self.rfile = rfile 321 self.remaining = content_length
322
323 - def read(self, size=None):
324 if self.remaining == 0: 325 return b'' 326 if size is None: 327 size = self.remaining 328 else: 329 size = min(size, self.remaining) 330 331 data = self.rfile.read(size) 332 self.remaining -= len(data) 333 return data
334
335 - def readline(self, size=None):
336 if self.remaining == 0: 337 return b'' 338 if size is None: 339 size = self.remaining 340 else: 341 size = min(size, self.remaining) 342 343 data = self.rfile.readline(size) 344 self.remaining -= len(data) 345 return data
346
347 - def readlines(self, sizehint=0):
348 # Shamelessly stolen from StringIO 349 total = 0 350 lines = [] 351 line = self.readline(sizehint) 352 while line: 353 lines.append(line) 354 total += len(line) 355 if 0 < sizehint <= total: 356 break 357 line = self.readline(sizehint) 358 return lines
359
360 - def close(self):
361 self.rfile.close()
362
363 - def __iter__(self):
364 return self
365
366 - def __next__(self):
367 data = next(self.rfile) 368 self.remaining -= len(data) 369 return data
370 371
372 -class ChunkedRFile(object):
373 374 """Wraps a file-like object, returning an empty string when exhausted. 375 376 This class is intended to provide a conforming wsgi.input value for 377 request entities that have been encoded with the 'chunked' transfer 378 encoding. 379 """ 380
381 - def __init__(self, rfile, maxlen, bufsize=8192):
382 self.rfile = rfile 383 self.maxlen = maxlen 384 self.bytes_read = 0 385 self.buffer = EMPTY 386 self.bufsize = bufsize 387 self.closed = False
388
389 - def _fetch(self):
390 if self.closed: 391 return 392 393 line = self.rfile.readline() 394 self.bytes_read += len(line) 395 396 if self.maxlen and self.bytes_read > self.maxlen: 397 raise MaxSizeExceeded("Request Entity Too Large", self.maxlen) 398 399 line = line.strip().split(SEMICOLON, 1) 400 401 try: 402 chunk_size = line.pop(0) 403 chunk_size = int(chunk_size, 16) 404 except ValueError: 405 raise ValueError("Bad chunked transfer size: " + repr(chunk_size)) 406 407 if chunk_size <= 0: 408 self.closed = True 409 return 410 411 ## if line: chunk_extension = line[0] 412 413 if self.maxlen and self.bytes_read + chunk_size > self.maxlen: 414 raise IOError("Request Entity Too Large") 415 416 chunk = self.rfile.read(chunk_size) 417 self.bytes_read += len(chunk) 418 self.buffer += chunk 419 420 crlf = self.rfile.read(2) 421 if crlf != CRLF: 422 raise ValueError( 423 "Bad chunked transfer coding (expected '\\r\\n', " 424 "got " + repr(crlf) + ")")
425
426 - def read(self, size=None):
427 data = EMPTY 428 while True: 429 if size and len(data) >= size: 430 return data 431 432 if not self.buffer: 433 self._fetch() 434 if not self.buffer: 435 # EOF 436 return data 437 438 if size: 439 remaining = size - len(data) 440 data += self.buffer[:remaining] 441 self.buffer = self.buffer[remaining:] 442 else: 443 data += self.buffer
444
445 - def readline(self, size=None):
446 data = EMPTY 447 while True: 448 if size and len(data) >= size: 449 return data 450 451 if not self.buffer: 452 self._fetch() 453 if not self.buffer: 454 # EOF 455 return data 456 457 newline_pos = self.buffer.find(LF) 458 if size: 459 if newline_pos == -1: 460 remaining = size - len(data) 461 data += self.buffer[:remaining] 462 self.buffer = self.buffer[remaining:] 463 else: 464 remaining = min(size - len(data), newline_pos) 465 data += self.buffer[:remaining] 466 self.buffer = self.buffer[remaining:] 467 else: 468 if newline_pos == -1: 469 data += self.buffer 470 else: 471 data += self.buffer[:newline_pos] 472 self.buffer = self.buffer[newline_pos:]
473
474 - def readlines(self, sizehint=0):
475 # Shamelessly stolen from StringIO 476 total = 0 477 lines = [] 478 line = self.readline(sizehint) 479 while line: 480 lines.append(line) 481 total += len(line) 482 if 0 < sizehint <= total: 483 break 484 line = self.readline(sizehint) 485 return lines
486
487 - def read_trailer_lines(self):
488 if not self.closed: 489 raise ValueError( 490 "Cannot read trailers until the request body has been read.") 491 492 while True: 493 line = self.rfile.readline() 494 if not line: 495 # No more data--illegal end of headers 496 raise ValueError("Illegal end of headers.") 497 498 self.bytes_read += len(line) 499 if self.maxlen and self.bytes_read > self.maxlen: 500 raise IOError("Request Entity Too Large") 501 502 if line == CRLF: 503 # Normal end of headers 504 break 505 if not line.endswith(CRLF): 506 raise ValueError("HTTP requires CRLF terminators") 507 508 yield line
509
510 - def close(self):
511 self.rfile.close()
512
513 - def __iter__(self):
514 # Shamelessly stolen from StringIO 515 total = 0 516 line = self.readline(sizehint) 517 while line: 518 yield line 519 total += len(line) 520 if 0 < sizehint <= total: 521 break 522 line = self.readline(sizehint)
523 524
525 -class HTTPRequest(object):
526 527 """An HTTP Request (and response). 528 529 A single HTTP connection may consist of multiple request/response pairs. 530 """ 531 532 server = None 533 """The HTTPServer object which is receiving this request.""" 534 535 conn = None 536 """The HTTPConnection object on which this request connected.""" 537 538 inheaders = {} 539 """A dict of request headers.""" 540 541 outheaders = [] 542 """A list of header tuples to write in the response.""" 543 544 ready = False 545 """When True, the request has been parsed and is ready to begin generating 546 the response. When False, signals the calling Connection that the response 547 should not be generated and the connection should close.""" 548 549 close_connection = False 550 """Signals the calling Connection that the request should close. This does 551 not imply an error! The client and/or server may each request that the 552 connection be closed.""" 553 554 chunked_write = False 555 """If True, output will be encoded with the "chunked" transfer-coding. 556 557 This value is set automatically inside send_headers.""" 558
559 - def __init__(self, server, conn):
560 self.server = server 561 self.conn = conn 562 563 self.ready = False 564 self.started_request = False 565 self.scheme = ntob("http") 566 if self.server.ssl_adapter is not None: 567 self.scheme = ntob("https") 568 # Use the lowest-common protocol in case read_request_line errors. 569 self.response_protocol = 'HTTP/1.0' 570 self.inheaders = {} 571 572 self.status = "" 573 self.outheaders = [] 574 self.sent_headers = False 575 self.close_connection = self.__class__.close_connection 576 self.chunked_read = False 577 self.chunked_write = self.__class__.chunked_write
578
579 - def parse_request(self):
580 """Parse the next HTTP request start-line and message-headers.""" 581 self.rfile = SizeCheckWrapper(self.conn.rfile, 582 self.server.max_request_header_size) 583 try: 584 success = self.read_request_line() 585 except MaxSizeExceeded: 586 self.simple_response( 587 "414 Request-URI Too Long", 588 "The Request-URI sent with the request exceeds the maximum " 589 "allowed bytes.") 590 return 591 else: 592 if not success: 593 return 594 595 try: 596 success = self.read_request_headers() 597 except MaxSizeExceeded: 598 self.simple_response( 599 "413 Request Entity Too Large", 600 "The headers sent with the request exceed the maximum " 601 "allowed bytes.") 602 return 603 else: 604 if not success: 605 return 606 607 self.ready = True
608
609 - def read_request_line(self):
610 # HTTP/1.1 connections are persistent by default. If a client 611 # requests a page, then idles (leaves the connection open), 612 # then rfile.readline() will raise socket.error("timed out"). 613 # Note that it does this based on the value given to settimeout(), 614 # and doesn't need the client to request or acknowledge the close 615 # (although your TCP stack might suffer for it: cf Apache's history 616 # with FIN_WAIT_2). 617 request_line = self.rfile.readline() 618 619 # Set started_request to True so communicate() knows to send 408 620 # from here on out. 621 self.started_request = True 622 if not request_line: 623 return False 624 625 if request_line == CRLF: 626 # RFC 2616 sec 4.1: "...if the server is reading the protocol 627 # stream at the beginning of a message and receives a CRLF 628 # first, it should ignore the CRLF." 629 # But only ignore one leading line! else we enable a DoS. 630 request_line = self.rfile.readline() 631 if not request_line: 632 return False 633 634 if not request_line.endswith(CRLF): 635 self.simple_response( 636 "400 Bad Request", "HTTP requires CRLF terminators") 637 return False 638 639 try: 640 method, uri, req_protocol = request_line.strip().split(SPACE, 2) 641 # The [x:y] slicing is necessary for byte strings to avoid getting 642 # ord's 643 rp = int(req_protocol[5:6]), int(req_protocol[7:8]) 644 except ValueError: 645 self.simple_response("400 Bad Request", "Malformed Request-Line") 646 return False 647 648 self.uri = uri 649 self.method = method 650 651 # uri may be an abs_path (including "http://host.domain.tld"); 652 scheme, authority, path = self.parse_request_uri(uri) 653 if NUMBER_SIGN in path: 654 self.simple_response("400 Bad Request", 655 "Illegal #fragment in Request-URI.") 656 return False 657 658 if scheme: 659 self.scheme = scheme 660 661 qs = EMPTY 662 if QUESTION_MARK in path: 663 path, qs = path.split(QUESTION_MARK, 1) 664 665 # Unquote the path+params (e.g. "/this%20path" -> "/this path"). 666 # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 667 # 668 # But note that "...a URI must be separated into its components 669 # before the escaped characters within those components can be 670 # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2 671 # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path". 672 try: 673 atoms = [self.unquote_bytes(x) for x in quoted_slash.split(path)] 674 except ValueError: 675 ex = sys.exc_info()[1] 676 self.simple_response("400 Bad Request", ex.args[0]) 677 return False 678 path = b"%2F".join(atoms) 679 self.path = path 680 681 # Note that, like wsgiref and most other HTTP servers, 682 # we "% HEX HEX"-unquote the path but not the query string. 683 self.qs = qs 684 685 # Compare request and server HTTP protocol versions, in case our 686 # server does not support the requested protocol. Limit our output 687 # to min(req, server). We want the following output: 688 # request server actual written supported response 689 # protocol protocol response protocol feature set 690 # a 1.0 1.0 1.0 1.0 691 # b 1.0 1.1 1.1 1.0 692 # c 1.1 1.0 1.0 1.0 693 # d 1.1 1.1 1.1 1.1 694 # Notice that, in (b), the response will be "HTTP/1.1" even though 695 # the client only understands 1.0. RFC 2616 10.5.6 says we should 696 # only return 505 if the _major_ version is different. 697 # The [x:y] slicing is necessary for byte strings to avoid getting 698 # ord's 699 sp = int(self.server.protocol[5:6]), int(self.server.protocol[7:8]) 700 701 if sp[0] != rp[0]: 702 self.simple_response("505 HTTP Version Not Supported") 703 return False 704 705 self.request_protocol = req_protocol 706 self.response_protocol = "HTTP/%s.%s" % min(rp, sp) 707 return True
708
709 - def read_request_headers(self):
710 """Read self.rfile into self.inheaders. Return success.""" 711 712 # then all the http headers 713 try: 714 read_headers(self.rfile, self.inheaders) 715 except ValueError: 716 ex = sys.exc_info()[1] 717 self.simple_response("400 Bad Request", ex.args[0]) 718 return False 719 720 mrbs = self.server.max_request_body_size 721 if mrbs and int(self.inheaders.get(b"Content-Length", 0)) > mrbs: 722 self.simple_response( 723 "413 Request Entity Too Large", 724 "The entity sent with the request exceeds the maximum " 725 "allowed bytes.") 726 return False 727 728 # Persistent connection support 729 if self.response_protocol == "HTTP/1.1": 730 # Both server and client are HTTP/1.1 731 if self.inheaders.get(b"Connection", b"") == b"close": 732 self.close_connection = True 733 else: 734 # Either the server or client (or both) are HTTP/1.0 735 if self.inheaders.get(b"Connection", b"") != b"Keep-Alive": 736 self.close_connection = True 737 738 # Transfer-Encoding support 739 te = None 740 if self.response_protocol == "HTTP/1.1": 741 te = self.inheaders.get(b"Transfer-Encoding") 742 if te: 743 te = [x.strip().lower() for x in te.split(b",") if x.strip()] 744 745 self.chunked_read = False 746 747 if te: 748 for enc in te: 749 if enc == b"chunked": 750 self.chunked_read = True 751 else: 752 # Note that, even if we see "chunked", we must reject 753 # if there is an extension we don't recognize. 754 self.simple_response("501 Unimplemented") 755 self.close_connection = True 756 return False 757 758 # From PEP 333: 759 # "Servers and gateways that implement HTTP 1.1 must provide 760 # transparent support for HTTP 1.1's "expect/continue" mechanism. 761 # This may be done in any of several ways: 762 # 1. Respond to requests containing an Expect: 100-continue request 763 # with an immediate "100 Continue" response, and proceed normally. 764 # 2. Proceed with the request normally, but provide the application 765 # with a wsgi.input stream that will send the "100 Continue" 766 # response if/when the application first attempts to read from 767 # the input stream. The read request must then remain blocked 768 # until the client responds. 769 # 3. Wait until the client decides that the server does not support 770 # expect/continue, and sends the request body on its own. 771 # (This is suboptimal, and is not recommended.) 772 # 773 # We used to do 3, but are now doing 1. Maybe we'll do 2 someday, 774 # but it seems like it would be a big slowdown for such a rare case. 775 if self.inheaders.get(b"Expect", b"") == b"100-continue": 776 # Don't use simple_response here, because it emits headers 777 # we don't want. See 778 # https://bitbucket.org/cherrypy/cherrypy/issue/951 779 msg = self.server.protocol.encode( 780 'ascii') + b" 100 Continue\r\n\r\n" 781 try: 782 self.conn.wfile.write(msg) 783 except socket.error: 784 x = sys.exc_info()[1] 785 if x.args[0] not in socket_errors_to_ignore: 786 raise 787 return True
788
789 - def parse_request_uri(self, uri):
790 """Parse a Request-URI into (scheme, authority, path). 791 792 Note that Request-URI's must be one of:: 793 794 Request-URI = "*" | absoluteURI | abs_path | authority 795 796 Therefore, a Request-URI which starts with a double forward-slash 797 cannot be a "net_path":: 798 799 net_path = "//" authority [ abs_path ] 800 801 Instead, it must be interpreted as an "abs_path" with an empty first 802 path segment:: 803 804 abs_path = "/" path_segments 805 path_segments = segment *( "/" segment ) 806 segment = *pchar *( ";" param ) 807 param = *pchar 808 """ 809 if uri == ASTERISK: 810 return None, None, uri 811 812 scheme, sep, remainder = uri.partition(b'://') 813 if sep and QUESTION_MARK not in scheme: 814 # An absoluteURI. 815 # If there's a scheme (and it must be http or https), then: 816 # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query 817 # ]] 818 authority, path_a, path_b = remainder.partition(FORWARD_SLASH) 819 return scheme.lower(), authority, path_a + path_b 820 821 if uri.startswith(FORWARD_SLASH): 822 # An abs_path. 823 return None, None, uri 824 else: 825 # An authority. 826 return None, uri, None
827
828 - def unquote_bytes(self, path):
829 """takes quoted string and unquotes % encoded values""" 830 res = path.split(b'%') 831 832 for i in range(1, len(res)): 833 item = res[i] 834 try: 835 res[i] = bytes([int(item[:2], 16)]) + item[2:] 836 except ValueError: 837 raise 838 return b''.join(res)
839
840 - def respond(self):
841 """Call the gateway and write its iterable output.""" 842 mrbs = self.server.max_request_body_size 843 if self.chunked_read: 844 self.rfile = ChunkedRFile(self.conn.rfile, mrbs) 845 else: 846 cl = int(self.inheaders.get(b"Content-Length", 0)) 847 if mrbs and mrbs < cl: 848 if not self.sent_headers: 849 self.simple_response( 850 "413 Request Entity Too Large", 851 "The entity sent with the request exceeds the " 852 "maximum allowed bytes.") 853 return 854 self.rfile = KnownLengthRFile(self.conn.rfile, cl) 855 856 self.server.gateway(self).respond() 857 858 if (self.ready and not self.sent_headers): 859 self.sent_headers = True 860 self.send_headers() 861 if self.chunked_write: 862 self.conn.wfile.write(b"0\r\n\r\n")
863
864 - def simple_response(self, status, msg=""):
865 """Write a simple response back to the client.""" 866 status = str(status) 867 buf = [bytes(self.server.protocol, "ascii") + SPACE + 868 bytes(status, "ISO-8859-1") + CRLF, 869 bytes("Content-Length: %s\r\n" % len(msg), "ISO-8859-1"), 870 b"Content-Type: text/plain\r\n"] 871 872 if status[:3] in ("413", "414"): 873 # Request Entity Too Large / Request-URI Too Long 874 self.close_connection = True 875 if self.response_protocol == 'HTTP/1.1': 876 # This will not be true for 414, since read_request_line 877 # usually raises 414 before reading the whole line, and we 878 # therefore cannot know the proper response_protocol. 879 buf.append(b"Connection: close\r\n") 880 else: 881 # HTTP/1.0 had no 413/414 status nor Connection header. 882 # Emit 400 instead and trust the message body is enough. 883 status = "400 Bad Request" 884 885 buf.append(CRLF) 886 if msg: 887 if isinstance(msg, unicodestr): 888 msg = msg.encode("ISO-8859-1") 889 buf.append(msg) 890 891 try: 892 self.conn.wfile.write(b"".join(buf)) 893 except socket.error: 894 x = sys.exc_info()[1] 895 if x.args[0] not in socket_errors_to_ignore: 896 raise
897
898 - def write(self, chunk):
899 """Write unbuffered data to the client.""" 900 if self.chunked_write and chunk: 901 buf = [bytes(hex(len(chunk)), 'ASCII')[2:], CRLF, chunk, CRLF] 902 self.conn.wfile.write(EMPTY.join(buf)) 903 else: 904 self.conn.wfile.write(chunk)
905
906 - def send_headers(self):
907 """Assert, process, and send the HTTP response message-headers. 908 909 You must set self.status, and self.outheaders before calling this. 910 """ 911 hkeys = [key.lower() for key, value in self.outheaders] 912 status = int(self.status[:3]) 913 914 if status == 413: 915 # Request Entity Too Large. Close conn to avoid garbage. 916 self.close_connection = True 917 elif b"content-length" not in hkeys: 918 # "All 1xx (informational), 204 (no content), 919 # and 304 (not modified) responses MUST NOT 920 # include a message-body." So no point chunking. 921 if status < 200 or status in (204, 205, 304): 922 pass 923 else: 924 if (self.response_protocol == 'HTTP/1.1' 925 and self.method != b'HEAD'): 926 # Use the chunked transfer-coding 927 self.chunked_write = True 928 self.outheaders.append((b"Transfer-Encoding", b"chunked")) 929 else: 930 # Closing the conn is the only way to determine len. 931 self.close_connection = True 932 933 if b"connection" not in hkeys: 934 if self.response_protocol == 'HTTP/1.1': 935 # Both server and client are HTTP/1.1 or better 936 if self.close_connection: 937 self.outheaders.append((b"Connection", b"close")) 938 else: 939 # Server and/or client are HTTP/1.0 940 if not self.close_connection: 941 self.outheaders.append((b"Connection", b"Keep-Alive")) 942 943 if (not self.close_connection) and (not self.chunked_read): 944 # Read any remaining request body data on the socket. 945 # "If an origin server receives a request that does not include an 946 # Expect request-header field with the "100-continue" expectation, 947 # the request includes a request body, and the server responds 948 # with a final status code before reading the entire request body 949 # from the transport connection, then the server SHOULD NOT close 950 # the transport connection until it has read the entire request, 951 # or until the client closes the connection. Otherwise, the client 952 # might not reliably receive the response message. However, this 953 # requirement is not be construed as preventing a server from 954 # defending itself against denial-of-service attacks, or from 955 # badly broken client implementations." 956 remaining = getattr(self.rfile, 'remaining', 0) 957 if remaining > 0: 958 self.rfile.read(remaining) 959 960 if b"date" not in hkeys: 961 self.outheaders.append(( 962 b"Date", 963 email.utils.formatdate(usegmt=True).encode('ISO-8859-1') 964 )) 965 966 if b"server" not in hkeys: 967 self.outheaders.append( 968 (b"Server", self.server.server_name.encode('ISO-8859-1'))) 969 970 buf = [self.server.protocol.encode( 971 'ascii') + SPACE + self.status + CRLF] 972 for k, v in self.outheaders: 973 buf.append(k + COLON + SPACE + v + CRLF) 974 buf.append(CRLF) 975 self.conn.wfile.write(EMPTY.join(buf))
976 977
978 -class NoSSLError(Exception):
979 980 """Exception raised when a client speaks HTTP to an HTTPS socket.""" 981 pass
982 983
984 -class FatalSSLAlert(Exception):
985 986 """Exception raised when the SSL implementation signals a fatal alert.""" 987 pass
988 989
990 -class CP_BufferedWriter(io.BufferedWriter):
991 992 """Faux file object attached to a socket object.""" 993
994 - def write(self, b):
995 self._checkClosed() 996 if isinstance(b, str): 997 raise TypeError("can't write str to binary stream") 998 999 with self._write_lock: 1000 self._write_buf.extend(b) 1001 self._flush_unlocked() 1002 return len(b)
1003
1004 - def _flush_unlocked(self):
1005 self._checkClosed("flush of closed file") 1006 while self._write_buf: 1007 try: 1008 # ssl sockets only except 'bytes', not bytearrays 1009 # so perhaps we should conditionally wrap this for perf? 1010 n = self.raw.write(bytes(self._write_buf)) 1011 except io.BlockingIOError as e: 1012 n = e.characters_written 1013 del self._write_buf[:n]
1014 1015
1016 -def CP_makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
1017 if 'r' in mode: 1018 return io.BufferedReader(socket.SocketIO(sock, mode), bufsize) 1019 else: 1020 return CP_BufferedWriter(socket.SocketIO(sock, mode), bufsize)
1021 1022
1023 -class HTTPConnection(object):
1024 1025 """An HTTP connection (active socket). 1026 1027 server: the Server object which received this connection. 1028 socket: the raw socket object (usually TCP) for this connection. 1029 makefile: a fileobject class for reading from the socket. 1030 """ 1031 1032 remote_addr = None 1033 remote_port = None 1034 ssl_env = None 1035 rbufsize = DEFAULT_BUFFER_SIZE 1036 wbufsize = DEFAULT_BUFFER_SIZE 1037 RequestHandlerClass = HTTPRequest 1038
1039 - def __init__(self, server, sock, makefile=CP_makefile):
1040 self.server = server 1041 self.socket = sock 1042 self.rfile = makefile(sock, "rb", self.rbufsize) 1043 self.wfile = makefile(sock, "wb", self.wbufsize) 1044 self.requests_seen = 0
1045
1046 - def communicate(self):
1047 """Read each request and respond appropriately.""" 1048 request_seen = False 1049 try: 1050 while True: 1051 # (re)set req to None so that if something goes wrong in 1052 # the RequestHandlerClass constructor, the error doesn't 1053 # get written to the previous request. 1054 req = None 1055 req = self.RequestHandlerClass(self.server, self) 1056 1057 # This order of operations should guarantee correct pipelining. 1058 req.parse_request() 1059 if self.server.stats['Enabled']: 1060 self.requests_seen += 1 1061 if not req.ready: 1062 # Something went wrong in the parsing (and the server has 1063 # probably already made a simple_response). Return and 1064 # let the conn close. 1065 return 1066 1067 request_seen = True 1068 req.respond() 1069 if req.close_connection: 1070 return 1071 except socket.error: 1072 e = sys.exc_info()[1] 1073 errnum = e.args[0] 1074 # sadly SSL sockets return a different (longer) time out string 1075 if ( 1076 errnum == 'timed out' or 1077 errnum == 'The read operation timed out' 1078 ): 1079 # Don't error if we're between requests; only error 1080 # if 1) no request has been started at all, or 2) we're 1081 # in the middle of a request. 1082 # See https://bitbucket.org/cherrypy/cherrypy/issue/853 1083 if (not request_seen) or (req and req.started_request): 1084 # Don't bother writing the 408 if the response 1085 # has already started being written. 1086 if req and not req.sent_headers: 1087 try: 1088 req.simple_response("408 Request Timeout") 1089 except FatalSSLAlert: 1090 # Close the connection. 1091 return 1092 elif errnum not in socket_errors_to_ignore: 1093 self.server.error_log("socket.error %s" % repr(errnum), 1094 level=logging.WARNING, traceback=True) 1095 if req and not req.sent_headers: 1096 try: 1097 req.simple_response("500 Internal Server Error") 1098 except FatalSSLAlert: 1099 # Close the connection. 1100 return 1101 return 1102 except (KeyboardInterrupt, SystemExit): 1103 raise 1104 except FatalSSLAlert: 1105 # Close the connection. 1106 return 1107 except NoSSLError: 1108 if req and not req.sent_headers: 1109 # Unwrap our wfile 1110 self.wfile = CP_makefile( 1111 self.socket._sock, "wb", self.wbufsize) 1112 req.simple_response( 1113 "400 Bad Request", 1114 "The client sent a plain HTTP request, but this server " 1115 "only speaks HTTPS on this port.") 1116 self.linger = True 1117 except Exception: 1118 e = sys.exc_info()[1] 1119 self.server.error_log(repr(e), level=logging.ERROR, traceback=True) 1120 if req and not req.sent_headers: 1121 try: 1122 req.simple_response("500 Internal Server Error") 1123 except FatalSSLAlert: 1124 # Close the connection. 1125 return
1126 1127 linger = False 1128
1129 - def close(self):
1130 """Close the socket underlying this connection.""" 1131 self.rfile.close() 1132 1133 if not self.linger: 1134 # Python's socket module does NOT call close on the kernel 1135 # socket when you call socket.close(). We do so manually here 1136 # because we want this server to send a FIN TCP segment 1137 # immediately. Note this must be called *before* calling 1138 # socket.close(), because the latter drops its reference to 1139 # the kernel socket. 1140 # Python 3 *probably* fixed this with socket._real_close; 1141 # hard to tell. 1142 # self.socket._sock.close() 1143 self.socket.close() 1144 else: 1145 # On the other hand, sometimes we want to hang around for a bit 1146 # to make sure the client has a chance to read our entire 1147 # response. Skipping the close() calls here delays the FIN 1148 # packet until the socket object is garbage-collected later. 1149 # Someday, perhaps, we'll do the full lingering_close that 1150 # Apache does, but not today. 1151 pass
1152 1153
1154 -class TrueyZero(object):
1155 1156 """An object which equals and does math like the integer 0 but evals True. 1157 """ 1158
1159 - def __add__(self, other):
1160 return other
1161
1162 - def __radd__(self, other):
1163 return other
1164 trueyzero = TrueyZero() 1165 1166 1167 _SHUTDOWNREQUEST = None 1168 1169
1170 -class WorkerThread(threading.Thread):
1171 1172 """Thread which continuously polls a Queue for Connection objects. 1173 1174 Due to the timing issues of polling a Queue, a WorkerThread does not 1175 check its own 'ready' flag after it has started. To stop the thread, 1176 it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue 1177 (one for each running WorkerThread). 1178 """ 1179 1180 conn = None 1181 """The current connection pulled off the Queue, or None.""" 1182 1183 server = None 1184 """The HTTP Server which spawned this thread, and which owns the 1185 Queue and is placing active connections into it.""" 1186 1187 ready = False 1188 """A simple flag for the calling server to know when this thread 1189 has begun polling the Queue.""" 1190
1191 - def __init__(self, server):
1192 self.ready = False 1193 self.server = server 1194 1195 self.requests_seen = 0 1196 self.bytes_read = 0 1197 self.bytes_written = 0 1198 self.start_time = None 1199 self.work_time = 0 1200 self.stats = { 1201 'Requests': lambda s: self.requests_seen + ( 1202 (self.start_time is None) and 1203 trueyzero or 1204 self.conn.requests_seen 1205 ), 1206 'Bytes Read': lambda s: self.bytes_read + ( 1207 (self.start_time is None) and 1208 trueyzero or 1209 self.conn.rfile.bytes_read 1210 ), 1211 'Bytes Written': lambda s: self.bytes_written + ( 1212 (self.start_time is None) and 1213 trueyzero or 1214 self.conn.wfile.bytes_written 1215 ), 1216 'Work Time': lambda s: self.work_time + ( 1217 (self.start_time is None) and 1218 trueyzero or 1219 time.time() - self.start_time 1220 ), 1221 'Read Throughput': lambda s: s['Bytes Read'](s) / ( 1222 s['Work Time'](s) or 1e-6), 1223 'Write Throughput': lambda s: s['Bytes Written'](s) / ( 1224 s['Work Time'](s) or 1e-6), 1225 } 1226 threading.Thread.__init__(self)
1227
1228 - def run(self):
1229 self.server.stats['Worker Threads'][self.getName()] = self.stats 1230 try: 1231 self.ready = True 1232 while True: 1233 conn = self.server.requests.get() 1234 if conn is _SHUTDOWNREQUEST: 1235 return 1236 1237 self.conn = conn 1238 if self.server.stats['Enabled']: 1239 self.start_time = time.time() 1240 try: 1241 conn.communicate() 1242 finally: 1243 conn.close() 1244 if self.server.stats['Enabled']: 1245 self.requests_seen += self.conn.requests_seen 1246 self.bytes_read += self.conn.rfile.bytes_read 1247 self.bytes_written += self.conn.wfile.bytes_written 1248 self.work_time += time.time() - self.start_time 1249 self.start_time = None 1250 self.conn = None 1251 except (KeyboardInterrupt, SystemExit): 1252 exc = sys.exc_info()[1] 1253 self.server.interrupt = exc
1254 1255
1256 -class ThreadPool(object):
1257 1258 """A Request Queue for an HTTPServer which pools threads. 1259 1260 ThreadPool objects must provide min, get(), put(obj), start() 1261 and stop(timeout) attributes. 1262 """ 1263
1264 - def __init__(self, server, min=10, max=-1, 1265 accepted_queue_size=-1, accepted_queue_timeout=10):
1266 self.server = server 1267 self.min = min 1268 self.max = max 1269 self._threads = [] 1270 self._queue = queue.Queue(maxsize=accepted_queue_size) 1271 self._queue_put_timeout = accepted_queue_timeout 1272 self.get = self._queue.get
1273
1274 - def start(self):
1275 """Start the pool of threads.""" 1276 for i in range(self.min): 1277 self._threads.append(WorkerThread(self.server)) 1278 for worker in self._threads: 1279 worker.setName("CP Server " + worker.getName()) 1280 worker.start() 1281 for worker in self._threads: 1282 while not worker.ready: 1283 time.sleep(.1)
1284
1285 - def _get_idle(self):
1286 """Number of worker threads which are idle. Read-only.""" 1287 return len([t for t in self._threads if t.conn is None])
1288 idle = property(_get_idle, doc=_get_idle.__doc__) 1289
1290 - def put(self, obj):
1291 self._queue.put(obj, block=True, timeout=self._queue_put_timeout) 1292 if obj is _SHUTDOWNREQUEST: 1293 return
1294
1295 - def grow(self, amount):
1296 """Spawn new worker threads (not above self.max).""" 1297 if self.max > 0: 1298 budget = max(self.max - len(self._threads), 0) 1299 else: 1300 # self.max <= 0 indicates no maximum 1301 budget = float('inf') 1302 1303 n_new = min(amount, budget) 1304 1305 workers = [self._spawn_worker() for i in range(n_new)] 1306 while not all(worker.ready for worker in workers): 1307 time.sleep(.1) 1308 self._threads.extend(workers)
1309
1310 - def _spawn_worker(self):
1311 worker = WorkerThread(self.server) 1312 worker.setName("CP Server " + worker.getName()) 1313 worker.start() 1314 return worker
1315
1316 - def shrink(self, amount):
1317 """Kill off worker threads (not below self.min).""" 1318 # Grow/shrink the pool if necessary. 1319 # Remove any dead threads from our list 1320 for t in self._threads: 1321 if not t.isAlive(): 1322 self._threads.remove(t) 1323 amount -= 1 1324 1325 # calculate the number of threads above the minimum 1326 n_extra = max(len(self._threads) - self.min, 0) 1327 1328 # don't remove more than amount 1329 n_to_remove = min(amount, n_extra) 1330 1331 # put shutdown requests on the queue equal to the number of threads 1332 # to remove. As each request is processed by a worker, that worker 1333 # will terminate and be culled from the list. 1334 for n in range(n_to_remove): 1335 self._queue.put(_SHUTDOWNREQUEST)
1336
1337 - def stop(self, timeout=5):
1338 # Must shut down threads here so the code that calls 1339 # this method can know when all threads are stopped. 1340 for worker in self._threads: 1341 self._queue.put(_SHUTDOWNREQUEST) 1342 1343 # Don't join currentThread (when stop is called inside a request). 1344 current = threading.currentThread() 1345 if timeout and timeout >= 0: 1346 endtime = time.time() + timeout 1347 while self._threads: 1348 worker = self._threads.pop() 1349 if worker is not current and worker.isAlive(): 1350 try: 1351 if timeout is None or timeout < 0: 1352 worker.join() 1353 else: 1354 remaining_time = endtime - time.time() 1355 if remaining_time > 0: 1356 worker.join(remaining_time) 1357 if worker.isAlive(): 1358 # We exhausted the timeout. 1359 # Forcibly shut down the socket. 1360 c = worker.conn 1361 if c and not c.rfile.closed: 1362 try: 1363 c.socket.shutdown(socket.SHUT_RD) 1364 except TypeError: 1365 # pyOpenSSL sockets don't take an arg 1366 c.socket.shutdown() 1367 worker.join() 1368 except (AssertionError, 1369 # Ignore repeated Ctrl-C. 1370 # See 1371 # https://bitbucket.org/cherrypy/cherrypy/issue/691. 1372 KeyboardInterrupt): 1373 pass
1374
1375 - def _get_qsize(self):
1376 return self._queue.qsize()
1377 qsize = property(_get_qsize)
1378 1379 1380 try: 1381 import fcntl 1382 except ImportError: 1383 try: 1384 from ctypes import windll, WinError 1385 import ctypes.wintypes 1386 _SetHandleInformation = windll.kernel32.SetHandleInformation 1387 _SetHandleInformation.argtypes = [ 1388 ctypes.wintypes.HANDLE, 1389 ctypes.wintypes.DWORD, 1390 ctypes.wintypes.DWORD, 1391 ] 1392 _SetHandleInformation.restype = ctypes.wintypes.BOOL 1393 except ImportError:
1394 - def prevent_socket_inheritance(sock):
1395 """Dummy function, since neither fcntl nor ctypes are available.""" 1396 pass
1397 else:
1398 - def prevent_socket_inheritance(sock):
1399 """Mark the given socket fd as non-inheritable (Windows).""" 1400 if not _SetHandleInformation(sock.fileno(), 1, 0): 1401 raise WinError()
1402 else:
1403 - def prevent_socket_inheritance(sock):
1404 """Mark the given socket fd as non-inheritable (POSIX).""" 1405 fd = sock.fileno() 1406 old_flags = fcntl.fcntl(fd, fcntl.F_GETFD) 1407 fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
1408 1409
1410 -class SSLAdapter(object):
1411 1412 """Base class for SSL driver library adapters. 1413 1414 Required methods: 1415 1416 * ``wrap(sock) -> (wrapped socket, ssl environ dict)`` 1417 * ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> 1418 socket file object`` 1419 """ 1420
1421 - def __init__(self, certificate, private_key, certificate_chain=None):
1425
1426 - def wrap(self, sock):
1427 raise NotImplemented
1428
1429 - def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
1430 raise NotImplemented
1431 1432
1433 -class HTTPServer(object):
1434 1435 """An HTTP server.""" 1436 1437 _bind_addr = "127.0.0.1" 1438 _interrupt = None 1439 1440 gateway = None 1441 """A Gateway instance.""" 1442 1443 minthreads = None 1444 """The minimum number of worker threads to create (default 10).""" 1445 1446 maxthreads = None 1447 """The maximum number of worker threads to create (default -1 = no limit). 1448 """ 1449 1450 server_name = None 1451 """The name of the server; defaults to socket.gethostname().""" 1452 1453 protocol = "HTTP/1.1" 1454 """The version string to write in the Status-Line of all HTTP responses. 1455 1456 For example, "HTTP/1.1" is the default. This also limits the supported 1457 features used in the response.""" 1458 1459 request_queue_size = 5 1460 """The 'backlog' arg to socket.listen(); max queued connections 1461 (default 5). 1462 """ 1463 1464 shutdown_timeout = 5 1465 """The total time, in seconds, to wait for worker threads to cleanly exit. 1466 """ 1467 1468 timeout = 10 1469 """The timeout in seconds for accepted connections (default 10).""" 1470 1471 version = "CherryPy/3.5.0" 1472 """A version string for the HTTPServer.""" 1473 1474 software = None 1475 """The value to set for the SERVER_SOFTWARE entry in the WSGI environ. 1476 1477 If None, this defaults to ``'%s Server' % self.version``.""" 1478 1479 ready = False 1480 """An internal flag which marks whether the socket is accepting 1481 connections. 1482 """ 1483 1484 max_request_header_size = 0 1485 """The maximum size, in bytes, for request headers, or 0 for no limit.""" 1486 1487 max_request_body_size = 0 1488 """The maximum size, in bytes, for request bodies, or 0 for no limit.""" 1489 1490 nodelay = True 1491 """If True (the default since 3.1), sets the TCP_NODELAY socket option.""" 1492 1493 ConnectionClass = HTTPConnection 1494 """The class to use for handling HTTP connections.""" 1495 1496 ssl_adapter = None 1497 """An instance of SSLAdapter (or a subclass). 1498 1499 You must have the corresponding SSL driver library installed.""" 1500
1501 - def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1, 1502 server_name=None):
1503 self.bind_addr = bind_addr 1504 self.gateway = gateway 1505 1506 self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads) 1507 1508 if not server_name: 1509 server_name = socket.gethostname() 1510 self.server_name = server_name 1511 self.clear_stats()
1512
1513 - def clear_stats(self):
1514 self._start_time = None 1515 self._run_time = 0 1516 self.stats = { 1517 'Enabled': False, 1518 'Bind Address': lambda s: repr(self.bind_addr), 1519 'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(), 1520 'Accepts': 0, 1521 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(), 1522 'Queue': lambda s: getattr(self.requests, "qsize", None), 1523 'Threads': lambda s: len(getattr(self.requests, "_threads", [])), 1524 'Threads Idle': lambda s: getattr(self.requests, "idle", None), 1525 'Socket Errors': 0, 1526 'Requests': lambda s: (not s['Enabled']) and -1 or sum( 1527 [w['Requests'](w) for w in s['Worker Threads'].values()], 0), 1528 'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum( 1529 [w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0), 1530 'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum( 1531 [w['Bytes Written'](w) for w in s['Worker Threads'].values()], 1532 0), 1533 'Work Time': lambda s: (not s['Enabled']) and -1 or sum( 1534 [w['Work Time'](w) for w in s['Worker Threads'].values()], 0), 1535 'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum( 1536 [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6) 1537 for w in s['Worker Threads'].values()], 0), 1538 'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum( 1539 [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6) 1540 for w in s['Worker Threads'].values()], 0), 1541 'Worker Threads': {}, 1542 } 1543 logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
1544
1545 - def runtime(self):
1546 if self._start_time is None: 1547 return self._run_time 1548 else: 1549 return self._run_time + (time.time() - self._start_time)
1550
1551 - def __str__(self):
1552 return "%s.%s(%r)" % (self.__module__, self.__class__.__name__, 1553 self.bind_addr)
1554
1555 - def _get_bind_addr(self):
1556 return self._bind_addr
1557
1558 - def _set_bind_addr(self, value):
1559 if isinstance(value, tuple) and value[0] in ('', None): 1560 # Despite the socket module docs, using '' does not 1561 # allow AI_PASSIVE to work. Passing None instead 1562 # returns '0.0.0.0' like we want. In other words: 1563 # host AI_PASSIVE result 1564 # '' Y 192.168.x.y 1565 # '' N 192.168.x.y 1566 # None Y 0.0.0.0 1567 # None N 127.0.0.1 1568 # But since you can get the same effect with an explicit 1569 # '0.0.0.0', we deny both the empty string and None as values. 1570 raise ValueError("Host values of '' or None are not allowed. " 1571 "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead " 1572 "to listen on all active interfaces.") 1573 self._bind_addr = value
1574 bind_addr = property( 1575 _get_bind_addr, 1576 _set_bind_addr, 1577 doc="""The interface on which to listen for connections. 1578 1579 For TCP sockets, a (host, port) tuple. Host values may be any IPv4 1580 or IPv6 address, or any valid hostname. The string 'localhost' is a 1581 synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6). 1582 The string '0.0.0.0' is a special IPv4 entry meaning "any active 1583 interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for 1584 IPv6. The empty string or None are not allowed. 1585 1586 For UNIX sockets, supply the filename as a string.""") 1587
1588 - def start(self):
1589 """Run the server forever.""" 1590 # We don't have to trap KeyboardInterrupt or SystemExit here, 1591 # because cherrpy.server already does so, calling self.stop() for us. 1592 # If you're using this server with another framework, you should 1593 # trap those exceptions in whatever code block calls start(). 1594 self._interrupt = None 1595 1596 if self.software is None: 1597 self.software = "%s Server" % self.version 1598 1599 # Select the appropriate socket 1600 if isinstance(self.bind_addr, basestring): 1601 # AF_UNIX socket 1602 1603 # So we can reuse the socket... 1604 try: 1605 os.unlink(self.bind_addr) 1606 except: 1607 pass 1608 1609 # So everyone can access the socket... 1610 try: 1611 os.chmod(self.bind_addr, 511) # 0777 1612 except: 1613 pass 1614 1615 info = [ 1616 (socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)] 1617 else: 1618 # AF_INET or AF_INET6 socket 1619 # Get the correct address family for our host (allows IPv6 1620 # addresses) 1621 host, port = self.bind_addr 1622 try: 1623 info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, 1624 socket.SOCK_STREAM, 0, 1625 socket.AI_PASSIVE) 1626 except socket.gaierror: 1627 if ':' in self.bind_addr[0]: 1628 info = [(socket.AF_INET6, socket.SOCK_STREAM, 1629 0, "", self.bind_addr + (0, 0))] 1630 else: 1631 info = [(socket.AF_INET, socket.SOCK_STREAM, 1632 0, "", self.bind_addr)] 1633 1634 self.socket = None 1635 msg = "No socket could be created" 1636 for res in info: 1637 af, socktype, proto, canonname, sa = res 1638 try: 1639 self.bind(af, socktype, proto) 1640 except socket.error as serr: 1641 msg = "%s -- (%s: %s)" % (msg, sa, serr) 1642 if self.socket: 1643 self.socket.close() 1644 self.socket = None 1645 continue 1646 break 1647 if not self.socket: 1648 raise socket.error(msg) 1649 1650 # Timeout so KeyboardInterrupt can be caught on Win32 1651 self.socket.settimeout(1) 1652 self.socket.listen(self.request_queue_size) 1653 1654 # Create worker threads 1655 self.requests.start() 1656 1657 self.ready = True 1658 self._start_time = time.time() 1659 while self.ready: 1660 try: 1661 self.tick() 1662 except (KeyboardInterrupt, SystemExit): 1663 raise 1664 except: 1665 self.error_log("Error in HTTPServer.tick", level=logging.ERROR, 1666 traceback=True) 1667 if self.interrupt: 1668 while self.interrupt is True: 1669 # Wait for self.stop() to complete. See _set_interrupt. 1670 time.sleep(0.1) 1671 if self.interrupt: 1672 raise self.interrupt
1673
1674 - def error_log(self, msg="", level=20, traceback=False):
1675 # Override this in subclasses as desired 1676 sys.stderr.write(msg + '\n') 1677 sys.stderr.flush() 1678 if traceback: 1679 tblines = format_exc() 1680 sys.stderr.write(tblines) 1681 sys.stderr.flush()
1682
1683 - def bind(self, family, type, proto=0):
1684 """Create (or recreate) the actual socket object.""" 1685 self.socket = socket.socket(family, type, proto) 1686 prevent_socket_inheritance(self.socket) 1687 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 1688 if self.nodelay and not isinstance(self.bind_addr, str): 1689 self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) 1690 1691 if self.ssl_adapter is not None: 1692 self.socket = self.ssl_adapter.bind(self.socket) 1693 1694 # If listening on the IPV6 any address ('::' = IN6ADDR_ANY), 1695 # activate dual-stack. See 1696 # https://bitbucket.org/cherrypy/cherrypy/issue/871. 1697 if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 1698 and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')): 1699 try: 1700 self.socket.setsockopt( 1701 socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) 1702 except (AttributeError, socket.error): 1703 # Apparently, the socket option is not available in 1704 # this machine's TCP stack 1705 pass 1706 1707 self.socket.bind(self.bind_addr)
1708
1709 - def tick(self):
1710 """Accept a new connection and put it on the Queue.""" 1711 try: 1712 s, addr = self.socket.accept() 1713 if self.stats['Enabled']: 1714 self.stats['Accepts'] += 1 1715 if not self.ready: 1716 return 1717 1718 prevent_socket_inheritance(s) 1719 if hasattr(s, 'settimeout'): 1720 s.settimeout(self.timeout) 1721 1722 makefile = CP_makefile 1723 ssl_env = {} 1724 # if ssl cert and key are set, we try to be a secure HTTP server 1725 if self.ssl_adapter is not None: 1726 try: 1727 s, ssl_env = self.ssl_adapter.wrap(s) 1728 except NoSSLError: 1729 msg = ("The client sent a plain HTTP request, but " 1730 "this server only speaks HTTPS on this port.") 1731 buf = ["%s 400 Bad Request\r\n" % self.protocol, 1732 "Content-Length: %s\r\n" % len(msg), 1733 "Content-Type: text/plain\r\n\r\n", 1734 msg] 1735 1736 wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE) 1737 try: 1738 wfile.write("".join(buf).encode('ISO-8859-1')) 1739 except socket.error: 1740 x = sys.exc_info()[1] 1741 if x.args[0] not in socket_errors_to_ignore: 1742 raise 1743 return 1744 if not s: 1745 return 1746 makefile = self.ssl_adapter.makefile 1747 # Re-apply our timeout since we may have a new socket object 1748 if hasattr(s, 'settimeout'): 1749 s.settimeout(self.timeout) 1750 1751 conn = self.ConnectionClass(self, s, makefile) 1752 1753 if not isinstance(self.bind_addr, basestring): 1754 # optional values 1755 # Until we do DNS lookups, omit REMOTE_HOST 1756 if addr is None: # sometimes this can happen 1757 # figure out if AF_INET or AF_INET6. 1758 if len(s.getsockname()) == 2: 1759 # AF_INET 1760 addr = ('0.0.0.0', 0) 1761 else: 1762 # AF_INET6 1763 addr = ('::', 0) 1764 conn.remote_addr = addr[0] 1765 conn.remote_port = addr[1] 1766 1767 conn.ssl_env = ssl_env 1768 1769 try: 1770 self.requests.put(conn) 1771 except queue.Full: 1772 # Just drop the conn. TODO: write 503 back? 1773 conn.close() 1774 return 1775 except socket.timeout: 1776 # The only reason for the timeout in start() is so we can 1777 # notice keyboard interrupts on Win32, which don't interrupt 1778 # accept() by default 1779 return 1780 except socket.error: 1781 x = sys.exc_info()[1] 1782 if self.stats['Enabled']: 1783 self.stats['Socket Errors'] += 1 1784 if x.args[0] in socket_error_eintr: 1785 # I *think* this is right. EINTR should occur when a signal 1786 # is received during the accept() call; all docs say retry 1787 # the call, and I *think* I'm reading it right that Python 1788 # will then go ahead and poll for and handle the signal 1789 # elsewhere. See 1790 # https://bitbucket.org/cherrypy/cherrypy/issue/707. 1791 return 1792 if x.args[0] in socket_errors_nonblocking: 1793 # Just try again. See 1794 # https://bitbucket.org/cherrypy/cherrypy/issue/479. 1795 return 1796 if x.args[0] in socket_errors_to_ignore: 1797 # Our socket was closed. 1798 # See https://bitbucket.org/cherrypy/cherrypy/issue/686. 1799 return 1800 raise
1801
1802 - def _get_interrupt(self):
1803 return self._interrupt
1804
1805 - def _set_interrupt(self, interrupt):
1806 self._interrupt = True 1807 self.stop() 1808 self._interrupt = interrupt
1809 interrupt = property(_get_interrupt, _set_interrupt, 1810 doc="Set this to an Exception instance to " 1811 "interrupt the server.") 1812
1813 - def stop(self):
1814 """Gracefully shutdown a server that is serving forever.""" 1815 self.ready = False 1816 if self._start_time is not None: 1817 self._run_time += (time.time() - self._start_time) 1818 self._start_time = None 1819 1820 sock = getattr(self, "socket", None) 1821 if sock: 1822 if not isinstance(self.bind_addr, basestring): 1823 # Touch our own socket to make accept() return immediately. 1824 try: 1825 host, port = sock.getsockname()[:2] 1826 except socket.error: 1827 x = sys.exc_info()[1] 1828 if x.args[0] not in socket_errors_to_ignore: 1829 # Changed to use error code and not message 1830 # See 1831 # https://bitbucket.org/cherrypy/cherrypy/issue/860. 1832 raise 1833 else: 1834 # Note that we're explicitly NOT using AI_PASSIVE, 1835 # here, because we want an actual IP to touch. 1836 # localhost won't work if we've bound to a public IP, 1837 # but it will if we bound to '0.0.0.0' (INADDR_ANY). 1838 for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, 1839 socket.SOCK_STREAM): 1840 af, socktype, proto, canonname, sa = res 1841 s = None 1842 try: 1843 s = socket.socket(af, socktype, proto) 1844 # See 1845 # http://groups.google.com/group/cherrypy-users/ 1846 # browse_frm/thread/bbfe5eb39c904fe0 1847 s.settimeout(1.0) 1848 s.connect((host, port)) 1849 s.close() 1850 except socket.error: 1851 if s: 1852 s.close() 1853 if hasattr(sock, "close"): 1854 sock.close() 1855 self.socket = None 1856 1857 self.requests.stop(self.shutdown_timeout)
1858 1859
1860 -class Gateway(object):
1861 1862 """A base class to interface HTTPServer with other systems, such as WSGI. 1863 """ 1864
1865 - def __init__(self, req):
1866 self.req = req
1867
1868 - def respond(self):
1869 """Process the current request. Must be overridden in a subclass.""" 1870 raise NotImplemented
1871 1872 1873 # These may either be wsgiserver.SSLAdapter subclasses or the string names 1874 # of such classes (in which case they will be lazily loaded). 1875 ssl_adapters = { 1876 'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter', 1877 } 1878 1879
1880 -def get_ssl_adapter_class(name='builtin'):
1881 """Return an SSL adapter class for the given name.""" 1882 adapter = ssl_adapters[name.lower()] 1883 if isinstance(adapter, basestring): 1884 last_dot = adapter.rfind(".") 1885 attr_name = adapter[last_dot + 1:] 1886 mod_path = adapter[:last_dot] 1887 1888 try: 1889 mod = sys.modules[mod_path] 1890 if mod is None: 1891 raise KeyError() 1892 except KeyError: 1893 # The last [''] is important. 1894 mod = __import__(mod_path, globals(), locals(), ['']) 1895 1896 # Let an AttributeError propagate outward. 1897 try: 1898 adapter = getattr(mod, attr_name) 1899 except AttributeError: 1900 raise AttributeError("'%s' object has no attribute '%s'" 1901 % (mod_path, attr_name)) 1902 1903 return adapter
1904 1905 # ------------------------------- WSGI Stuff -------------------------------- # 1906 1907
1908 -class CherryPyWSGIServer(HTTPServer):
1909 1910 """A subclass of HTTPServer which calls a WSGI application.""" 1911 1912 wsgi_version = (1, 0) 1913 """The version of WSGI to produce.""" 1914
1915 - def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None, 1916 max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5, 1917 accepted_queue_size=-1, accepted_queue_timeout=10):
1918 self.requests = ThreadPool(self, min=numthreads or 1, max=max, 1919 accepted_queue_size=accepted_queue_size, 1920 accepted_queue_timeout=accepted_queue_timeout) 1921 self.wsgi_app = wsgi_app 1922 self.gateway = wsgi_gateways[self.wsgi_version] 1923 1924 self.bind_addr = bind_addr 1925 if not server_name: 1926 server_name = socket.gethostname() 1927 self.server_name = server_name 1928 self.request_queue_size = request_queue_size 1929 1930 self.timeout = timeout 1931 self.shutdown_timeout = shutdown_timeout 1932 self.clear_stats()
1933
1934 - def _get_numthreads(self):
1935 return self.requests.min
1936
1937 - def _set_numthreads(self, value):
1938 self.requests.min = value
1939 numthreads = property(_get_numthreads, _set_numthreads)
1940 1941
1942 -class WSGIGateway(Gateway):
1943 1944 """A base class to interface HTTPServer with WSGI.""" 1945
1946 - def __init__(self, req):
1947 self.req = req 1948 self.started_response = False 1949 self.env = self.get_environ() 1950 self.remaining_bytes_out = None
1951
1952 - def get_environ(self):
1953 """Return a new environ dict targeting the given wsgi.version""" 1954 raise NotImplemented
1955
1956 - def respond(self):
1957 """Process the current request.""" 1958 response = self.req.server.wsgi_app(self.env, self.start_response) 1959 try: 1960 for chunk in response: 1961 # "The start_response callable must not actually transmit 1962 # the response headers. Instead, it must store them for the 1963 # server or gateway to transmit only after the first 1964 # iteration of the application return value that yields 1965 # a NON-EMPTY string, or upon the application's first 1966 # invocation of the write() callable." (PEP 333) 1967 if chunk: 1968 if isinstance(chunk, unicodestr): 1969 chunk = chunk.encode('ISO-8859-1') 1970 self.write(chunk) 1971 finally: 1972 if hasattr(response, "close"): 1973 response.close()
1974
1975 - def start_response(self, status, headers, exc_info=None):
1976 """WSGI callable to begin the HTTP response.""" 1977 # "The application may call start_response more than once, 1978 # if and only if the exc_info argument is provided." 1979 if self.started_response and not exc_info: 1980 raise AssertionError("WSGI start_response called a second " 1981 "time with no exc_info.") 1982 self.started_response = True 1983 1984 # "if exc_info is provided, and the HTTP headers have already been 1985 # sent, start_response must raise an error, and should raise the 1986 # exc_info tuple." 1987 if self.req.sent_headers: 1988 try: 1989 raise exc_info[0](exc_info[1]).with_traceback(exc_info[2]) 1990 finally: 1991 exc_info = None 1992 1993 # According to PEP 3333, when using Python 3, the response status 1994 # and headers must be bytes masquerading as unicode; that is, they 1995 # must be of type "str" but are restricted to code points in the 1996 # "latin-1" set. 1997 if not isinstance(status, str): 1998 raise TypeError("WSGI response status is not of type str.") 1999 self.req.status = status.encode('ISO-8859-1') 2000 2001 for k, v in headers: 2002 if not isinstance(k, str): 2003 raise TypeError( 2004 "WSGI response header key %r is not of type str." % k) 2005 if not isinstance(v, str): 2006 raise TypeError( 2007 "WSGI response header value %r is not of type str." % v) 2008 if k.lower() == 'content-length': 2009 self.remaining_bytes_out = int(v) 2010 self.req.outheaders.append( 2011 (k.encode('ISO-8859-1'), v.encode('ISO-8859-1'))) 2012 2013 return self.write
2014
2015 - def write(self, chunk):
2016 """WSGI callable to write unbuffered data to the client. 2017 2018 This method is also used internally by start_response (to write 2019 data from the iterable returned by the WSGI application). 2020 """ 2021 if not self.started_response: 2022 raise AssertionError("WSGI write called before start_response.") 2023 2024 chunklen = len(chunk) 2025 rbo = self.remaining_bytes_out 2026 if rbo is not None and chunklen > rbo: 2027 if not self.req.sent_headers: 2028 # Whew. We can send a 500 to the client. 2029 self.req.simple_response("500 Internal Server Error", 2030 "The requested resource returned " 2031 "more bytes than the declared " 2032 "Content-Length.") 2033 else: 2034 # Dang. We have probably already sent data. Truncate the chunk 2035 # to fit (so the client doesn't hang) and raise an error later. 2036 chunk = chunk[:rbo] 2037 2038 if not self.req.sent_headers: 2039 self.req.sent_headers = True 2040 self.req.send_headers() 2041 2042 self.req.write(chunk) 2043 2044 if rbo is not None: 2045 rbo -= chunklen 2046 if rbo < 0: 2047 raise ValueError( 2048 "Response body exceeds the declared Content-Length.")
2049 2050
2051 -class WSGIGateway_10(WSGIGateway):
2052 2053 """A Gateway class to interface HTTPServer with WSGI 1.0.x.""" 2054
2055 - def get_environ(self):
2056 """Return a new environ dict targeting the given wsgi.version""" 2057 req = self.req 2058 env = { 2059 # set a non-standard environ entry so the WSGI app can know what 2060 # the *real* server protocol is (and what features to support). 2061 # See http://www.faqs.org/rfcs/rfc2145.html. 2062 'ACTUAL_SERVER_PROTOCOL': req.server.protocol, 2063 'PATH_INFO': req.path.decode('ISO-8859-1'), 2064 'QUERY_STRING': req.qs.decode('ISO-8859-1'), 2065 'REMOTE_ADDR': req.conn.remote_addr or '', 2066 'REMOTE_PORT': str(req.conn.remote_port or ''), 2067 'REQUEST_METHOD': req.method.decode('ISO-8859-1'), 2068 'REQUEST_URI': req.uri.decode('ISO-8859-1'), 2069 'SCRIPT_NAME': '', 2070 'SERVER_NAME': req.server.server_name, 2071 # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol. 2072 'SERVER_PROTOCOL': req.request_protocol.decode('ISO-8859-1'), 2073 'SERVER_SOFTWARE': req.server.software, 2074 'wsgi.errors': sys.stderr, 2075 'wsgi.input': req.rfile, 2076 'wsgi.multiprocess': False, 2077 'wsgi.multithread': True, 2078 'wsgi.run_once': False, 2079 'wsgi.url_scheme': req.scheme.decode('ISO-8859-1'), 2080 'wsgi.version': (1, 0), 2081 } 2082 if isinstance(req.server.bind_addr, basestring): 2083 # AF_UNIX. This isn't really allowed by WSGI, which doesn't 2084 # address unix domain sockets. But it's better than nothing. 2085 env["SERVER_PORT"] = "" 2086 else: 2087 env["SERVER_PORT"] = str(req.server.bind_addr[1]) 2088 2089 # Request headers 2090 for k, v in req.inheaders.items(): 2091 k = k.decode('ISO-8859-1').upper().replace("-", "_") 2092 env["HTTP_" + k] = v.decode('ISO-8859-1') 2093 2094 # CONTENT_TYPE/CONTENT_LENGTH 2095 ct = env.pop("HTTP_CONTENT_TYPE", None) 2096 if ct is not None: 2097 env["CONTENT_TYPE"] = ct 2098 cl = env.pop("HTTP_CONTENT_LENGTH", None) 2099 if cl is not None: 2100 env["CONTENT_LENGTH"] = cl 2101 2102 if req.conn.ssl_env: 2103 env.update(req.conn.ssl_env) 2104 2105 return env
2106 2107
2108 -class WSGIGateway_u0(WSGIGateway_10):
2109 2110 """A Gateway class to interface HTTPServer with WSGI u.0. 2111 2112 WSGI u.0 is an experimental protocol, which uses unicode for keys 2113 and values in both Python 2 and Python 3. 2114 """ 2115
2116 - def get_environ(self):
2117 """Return a new environ dict targeting the given wsgi.version""" 2118 req = self.req 2119 env_10 = WSGIGateway_10.get_environ(self) 2120 env = env_10.copy() 2121 env['wsgi.version'] = ('u', 0) 2122 2123 # Request-URI 2124 env.setdefault('wsgi.url_encoding', 'utf-8') 2125 try: 2126 # SCRIPT_NAME is the empty string, who cares what encoding it is? 2127 env["PATH_INFO"] = req.path.decode(env['wsgi.url_encoding']) 2128 env["QUERY_STRING"] = req.qs.decode(env['wsgi.url_encoding']) 2129 except UnicodeDecodeError: 2130 # Fall back to latin 1 so apps can transcode if needed. 2131 env['wsgi.url_encoding'] = 'ISO-8859-1' 2132 env["PATH_INFO"] = env_10["PATH_INFO"] 2133 env["QUERY_STRING"] = env_10["QUERY_STRING"] 2134 2135 return env
2136 2137 wsgi_gateways = { 2138 (1, 0): WSGIGateway_10, 2139 ('u', 0): WSGIGateway_u0, 2140 } 2141 2142
2143 -class WSGIPathInfoDispatcher(object):
2144 2145 """A WSGI dispatcher for dispatch based on the PATH_INFO. 2146 2147 apps: a dict or list of (path_prefix, app) pairs. 2148 """ 2149
2150 - def __init__(self, apps):
2151 try: 2152 apps = list(apps.items()) 2153 except AttributeError: 2154 pass 2155 2156 # Sort the apps by len(path), descending 2157 apps.sort() 2158 apps.reverse() 2159 2160 # The path_prefix strings must start, but not end, with a slash. 2161 # Use "" instead of "/". 2162 self.apps = [(p.rstrip("/"), a) for p, a in apps]
2163
2164 - def __call__(self, environ, start_response):
2165 path = environ["PATH_INFO"] or "/" 2166 for p, app in self.apps: 2167 # The apps list should be sorted by length, descending. 2168 if path.startswith(p + "/") or path == p: 2169 environ = environ.copy() 2170 environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p 2171 environ["PATH_INFO"] = path[len(p):] 2172 return app(environ, start_response) 2173 2174 start_response('404 Not Found', [('Content-Type', 'text/plain'), 2175 ('Content-Length', '0')]) 2176 return ['']
2177