diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/__init__.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/__init__.py
new file mode 100644
index 00000000..309d698a
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/__init__.py
@@ -0,0 +1,15 @@
+# This is the canonical package information.
+__author__ = 'Andrew Dunham'
+__license__ = 'Apache'
+__copyright__ = "Copyright (c) 2012-2013, Andrew Dunham"
+__version__ = "0.0.6"
+
+
+from .multipart import (
+ FormParser,
+ MultipartParser,
+ QuerystringParser,
+ OctetStreamParser,
+ create_form_parser,
+ parse_form,
+)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/decoders.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/decoders.py
new file mode 100644
index 00000000..0d7ab32e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/decoders.py
@@ -0,0 +1,171 @@
+import base64
+import binascii
+
+from .exceptions import DecodeError
+
+
+class Base64Decoder:
+ """This object provides an interface to decode a stream of Base64 data. It
+ is instantiated with an "underlying object", and whenever a write()
+ operation is performed, it will decode the incoming data as Base64, and
+ call write() on the underlying object. This is primarily used for decoding
+ form data encoded as Base64, but can be used for other purposes::
+
+ from multipart.decoders import Base64Decoder
+ fd = open("notb64.txt", "wb")
+ decoder = Base64Decoder(fd)
+ try:
+ decoder.write("Zm9vYmFy") # "foobar" in Base64
+ decoder.finalize()
+ finally:
+ decoder.close()
+
+ # The contents of "notb64.txt" should be "foobar".
+
+ This object will also pass all finalize() and close() calls to the
+ underlying object, if the underlying object supports them.
+
+ Note that this class maintains a cache of base64 chunks, so that a write of
+ arbitrary size can be performed. You must call :meth:`finalize` on this
+ object after all writes are completed to ensure that all data is flushed
+ to the underlying object.
+
+ :param underlying: the underlying object to pass writes to
+ """
+
+ def __init__(self, underlying):
+ self.cache = bytearray()
+ self.underlying = underlying
+
+ def write(self, data):
+ """Takes any input data provided, decodes it as base64, and passes it
+ on to the underlying object. If the data provided is invalid base64
+ data, then this method will raise
+ a :class:`multipart.exceptions.DecodeError`
+
+ :param data: base64 data to decode
+ """
+
+ # Prepend any cache info to our data.
+ if len(self.cache) > 0:
+ data = self.cache + data
+
+ # Slice off a string that's a multiple of 4.
+ decode_len = (len(data) // 4) * 4
+ val = data[:decode_len]
+
+ # Decode and write, if we have any.
+ if len(val) > 0:
+ try:
+ decoded = base64.b64decode(val)
+ except binascii.Error:
+ raise DecodeError('There was an error raised while decoding '
+ 'base64-encoded data.')
+
+ self.underlying.write(decoded)
+
+ # Get the remaining bytes and save in our cache.
+ remaining_len = len(data) % 4
+ if remaining_len > 0:
+ self.cache = data[-remaining_len:]
+ else:
+ self.cache = b''
+
+ # Return the length of the data to indicate no error.
+ return len(data)
+
+ def close(self):
+ """Close this decoder. If the underlying object has a `close()`
+ method, this function will call it.
+ """
+ if hasattr(self.underlying, 'close'):
+ self.underlying.close()
+
+ def finalize(self):
+ """Finalize this object. This should be called when no more data
+ should be written to the stream. This function can raise a
+ :class:`multipart.exceptions.DecodeError` if there is some remaining
+ data in the cache.
+
+ If the underlying object has a `finalize()` method, this function will
+ call it.
+ """
+ if len(self.cache) > 0:
+ raise DecodeError('There are %d bytes remaining in the '
+ 'Base64Decoder cache when finalize() is called'
+ % len(self.cache))
+
+ if hasattr(self.underlying, 'finalize'):
+ self.underlying.finalize()
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}(underlying={self.underlying!r})"
+
+
+class QuotedPrintableDecoder:
+ """This object provides an interface to decode a stream of quoted-printable
+ data. It is instantiated with an "underlying object", in the same manner
+ as the :class:`multipart.decoders.Base64Decoder` class. This class behaves
+ in exactly the same way, including maintaining a cache of quoted-printable
+ chunks.
+
+ :param underlying: the underlying object to pass writes to
+ """
+ def __init__(self, underlying):
+ self.cache = b''
+ self.underlying = underlying
+
+ def write(self, data):
+ """Takes any input data provided, decodes it as quoted-printable, and
+ passes it on to the underlying object.
+
+ :param data: quoted-printable data to decode
+ """
+ # Prepend any cache info to our data.
+ if len(self.cache) > 0:
+ data = self.cache + data
+
+ # If the last 2 characters have an '=' sign in it, then we won't be
+ # able to decode the encoded value and we'll need to save it for the
+ # next decoding step.
+ if data[-2:].find(b'=') != -1:
+ enc, rest = data[:-2], data[-2:]
+ else:
+ enc = data
+ rest = b''
+
+ # Encode and write, if we have data.
+ if len(enc) > 0:
+ self.underlying.write(binascii.a2b_qp(enc))
+
+ # Save remaining in cache.
+ self.cache = rest
+ return len(data)
+
+ def close(self):
+ """Close this decoder. If the underlying object has a `close()`
+ method, this function will call it.
+ """
+ if hasattr(self.underlying, 'close'):
+ self.underlying.close()
+
+ def finalize(self):
+ """Finalize this object. This should be called when no more data
+ should be written to the stream. This function will not raise any
+ exceptions, but it may write more data to the underlying object if
+ there is data remaining in the cache.
+
+ If the underlying object has a `finalize()` method, this function will
+ call it.
+ """
+ # If we have a cache, write and then remove it.
+ if len(self.cache) > 0:
+ self.underlying.write(binascii.a2b_qp(self.cache))
+ self.cache = b''
+
+ # Finalize our underlying stream.
+ if hasattr(self.underlying, 'finalize'):
+ self.underlying.finalize()
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}(underlying={self.underlying!r})"
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/exceptions.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/exceptions.py
new file mode 100644
index 00000000..016e7f7c
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/exceptions.py
@@ -0,0 +1,46 @@
+class FormParserError(ValueError):
+ """Base error class for our form parser."""
+ pass
+
+
+class ParseError(FormParserError):
+ """This exception (or a subclass) is raised when there is an error while
+ parsing something.
+ """
+
+ #: This is the offset in the input data chunk (*NOT* the overall stream) in
+ #: which the parse error occurred. It will be -1 if not specified.
+ offset = -1
+
+
+class MultipartParseError(ParseError):
+ """This is a specific error that is raised when the MultipartParser detects
+ an error while parsing.
+ """
+ pass
+
+
+class QuerystringParseError(ParseError):
+ """This is a specific error that is raised when the QuerystringParser
+ detects an error while parsing.
+ """
+ pass
+
+
+class DecodeError(ParseError):
+ """This exception is raised when there is a decoding error - for example
+ with the Base64Decoder or QuotedPrintableDecoder.
+ """
+ pass
+
+
+# On Python 3.3, IOError is the same as OSError, so we don't want to inherit
+# from both of them. We handle this case below.
+if IOError is not OSError: # pragma: no cover
+ class FileError(FormParserError, IOError, OSError):
+ """Exception class for problems with the File class."""
+ pass
+else: # pragma: no cover
+ class FileError(FormParserError, OSError):
+ """Exception class for problems with the File class."""
+ pass
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/multipart.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/multipart.py
new file mode 100644
index 00000000..a9f1f9f6
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/multipart.py
@@ -0,0 +1,1893 @@
+from .decoders import *
+from .exceptions import *
+
+import os
+import re
+import sys
+import shutil
+import logging
+import tempfile
+from io import BytesIO
+from numbers import Number
+
+# Unique missing object.
+_missing = object()
+
+# States for the querystring parser.
+STATE_BEFORE_FIELD = 0
+STATE_FIELD_NAME = 1
+STATE_FIELD_DATA = 2
+
+# States for the multipart parser
+STATE_START = 0
+STATE_START_BOUNDARY = 1
+STATE_HEADER_FIELD_START = 2
+STATE_HEADER_FIELD = 3
+STATE_HEADER_VALUE_START = 4
+STATE_HEADER_VALUE = 5
+STATE_HEADER_VALUE_ALMOST_DONE = 6
+STATE_HEADERS_ALMOST_DONE = 7
+STATE_PART_DATA_START = 8
+STATE_PART_DATA = 9
+STATE_PART_DATA_END = 10
+STATE_END = 11
+
+STATES = [
+ "START",
+ "START_BOUNDARY", "HEADER_FIELD_START", "HEADER_FIELD", "HEADER_VALUE_START", "HEADER_VALUE",
+ "HEADER_VALUE_ALMOST_DONE", "HEADRES_ALMOST_DONE", "PART_DATA_START", "PART_DATA", "PART_DATA_END", "END"
+]
+
+
+# Flags for the multipart parser.
+FLAG_PART_BOUNDARY = 1
+FLAG_LAST_BOUNDARY = 2
+
+# Get constants. Since iterating over a str on Python 2 gives you a 1-length
+# string, but iterating over a bytes object on Python 3 gives you an integer,
+# we need to save these constants.
+CR = b'\r'[0]
+LF = b'\n'[0]
+COLON = b':'[0]
+SPACE = b' '[0]
+HYPHEN = b'-'[0]
+AMPERSAND = b'&'[0]
+SEMICOLON = b';'[0]
+LOWER_A = b'a'[0]
+LOWER_Z = b'z'[0]
+NULL = b'\x00'[0]
+
+# Lower-casing a character is different, because of the difference between
+# str on Py2, and bytes on Py3. Same with getting the ordinal value of a byte,
+# and joining a list of bytes together.
+# These functions abstract that.
+lower_char = lambda c: c | 0x20
+ord_char = lambda c: c
+join_bytes = lambda b: bytes(list(b))
+
+# These are regexes for parsing header values.
+SPECIAL_CHARS = re.escape(b'()<>@,;:\\"/[]?={} \t')
+QUOTED_STR = br'"(?:\\.|[^"])*"'
+VALUE_STR = br'(?:[^' + SPECIAL_CHARS + br']+|' + QUOTED_STR + br')'
+OPTION_RE_STR = (
+ br'(?:;|^)\s*([^' + SPECIAL_CHARS + br']+)\s*=\s*(' + VALUE_STR + br')'
+)
+OPTION_RE = re.compile(OPTION_RE_STR)
+QUOTE = b'"'[0]
+
+
+def parse_options_header(value):
+ """
+ Parses a Content-Type header into a value in the following format:
+ (content_type, {parameters})
+ """
+ if not value:
+ return (b'', {})
+
+ # If we are passed a string, we assume that it conforms to WSGI and does
+ # not contain any code point that's not in latin-1.
+ if isinstance(value, str): # pragma: no cover
+ value = value.encode('latin-1')
+
+ # If we have no options, return the string as-is.
+ if b';' not in value:
+ return (value.lower().strip(), {})
+
+ # Split at the first semicolon, to get our value and then options.
+ ctype, rest = value.split(b';', 1)
+ options = {}
+
+ # Parse the options.
+ for match in OPTION_RE.finditer(rest):
+ key = match.group(1).lower()
+ value = match.group(2)
+ if value[0] == QUOTE and value[-1] == QUOTE:
+ # Unquote the value.
+ value = value[1:-1]
+ value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
+
+ # If the value is a filename, we need to fix a bug on IE6 that sends
+ # the full file path instead of the filename.
+ if key == b'filename':
+ if value[1:3] == b':\\' or value[:2] == b'\\\\':
+ value = value.split(b'\\')[-1]
+
+ options[key] = value
+
+ return ctype, options
+
+
+class Field:
+ """A Field object represents a (parsed) form field. It represents a single
+ field with a corresponding name and value.
+
+ The name that a :class:`Field` will be instantiated with is the same name
+ that would be found in the following HTML::
+
+
+
+ This class defines two methods, :meth:`on_data` and :meth:`on_end`, that
+ will be called when data is written to the Field, and when the Field is
+ finalized, respectively.
+
+ :param name: the name of the form field
+ """
+ def __init__(self, name):
+ self._name = name
+ self._value = []
+
+ # We cache the joined version of _value for speed.
+ self._cache = _missing
+
+ @classmethod
+ def from_value(klass, name, value):
+ """Create an instance of a :class:`Field`, and set the corresponding
+ value - either None or an actual value. This method will also
+ finalize the Field itself.
+
+ :param name: the name of the form field
+ :param value: the value of the form field - either a bytestring or
+ None
+ """
+
+ f = klass(name)
+ if value is None:
+ f.set_none()
+ else:
+ f.write(value)
+ f.finalize()
+ return f
+
+ def write(self, data):
+ """Write some data into the form field.
+
+ :param data: a bytestring
+ """
+ return self.on_data(data)
+
+ def on_data(self, data):
+ """This method is a callback that will be called whenever data is
+ written to the Field.
+
+ :param data: a bytestring
+ """
+ self._value.append(data)
+ self._cache = _missing
+ return len(data)
+
+ def on_end(self):
+ """This method is called whenever the Field is finalized.
+ """
+ if self._cache is _missing:
+ self._cache = b''.join(self._value)
+
+ def finalize(self):
+ """Finalize the form field.
+ """
+ self.on_end()
+
+ def close(self):
+ """Close the Field object. This will free any underlying cache.
+ """
+ # Free our value array.
+ if self._cache is _missing:
+ self._cache = b''.join(self._value)
+
+ del self._value
+
+ def set_none(self):
+ """Some fields in a querystring can possibly have a value of None - for
+ example, the string "foo&bar=&baz=asdf" will have a field with the
+ name "foo" and value None, one with name "bar" and value "", and one
+ with name "baz" and value "asdf". Since the write() interface doesn't
+ support writing None, this function will set the field value to None.
+ """
+ self._cache = None
+
+ @property
+ def field_name(self):
+ """This property returns the name of the field."""
+ return self._name
+
+ @property
+ def value(self):
+ """This property returns the value of the form field."""
+ if self._cache is _missing:
+ self._cache = b''.join(self._value)
+
+ return self._cache
+
+ def __eq__(self, other):
+ if isinstance(other, Field):
+ return (
+ self.field_name == other.field_name and
+ self.value == other.value
+ )
+ else:
+ return NotImplemented
+
+ def __repr__(self):
+ if len(self.value) > 97:
+ # We get the repr, and then insert three dots before the final
+ # quote.
+ v = repr(self.value[:97])[:-1] + "...'"
+ else:
+ v = repr(self.value)
+
+ return "{}(field_name={!r}, value={})".format(
+ self.__class__.__name__,
+ self.field_name,
+ v
+ )
+
+
+class File:
+ """This class represents an uploaded file. It handles writing file data to
+ either an in-memory file or a temporary file on-disk, if the optional
+ threshold is passed.
+
+ There are some options that can be passed to the File to change behavior
+ of the class. Valid options are as follows:
+
+ .. list-table::
+ :widths: 15 5 5 30
+ :header-rows: 1
+
+ * - Name
+ - Type
+ - Default
+ - Description
+ * - UPLOAD_DIR
+ - `str`
+ - None
+ - The directory to store uploaded files in. If this is None, a
+ temporary file will be created in the system's standard location.
+ * - UPLOAD_DELETE_TMP
+ - `bool`
+ - True
+ - Delete automatically created TMP file
+ * - UPLOAD_KEEP_FILENAME
+ - `bool`
+ - False
+ - Whether or not to keep the filename of the uploaded file. If True,
+ then the filename will be converted to a safe representation (e.g.
+ by removing any invalid path segments), and then saved with the
+ same name). Otherwise, a temporary name will be used.
+ * - UPLOAD_KEEP_EXTENSIONS
+ - `bool`
+ - False
+ - Whether or not to keep the uploaded file's extension. If False, the
+ file will be saved with the default temporary extension (usually
+ ".tmp"). Otherwise, the file's extension will be maintained. Note
+ that this will properly combine with the UPLOAD_KEEP_FILENAME
+ setting.
+ * - MAX_MEMORY_FILE_SIZE
+ - `int`
+ - 1 MiB
+ - The maximum number of bytes of a File to keep in memory. By
+ default, the contents of a File are kept into memory until a certain
+ limit is reached, after which the contents of the File are written
+ to a temporary file. This behavior can be disabled by setting this
+ value to an appropriately large value (or, for example, infinity,
+ such as `float('inf')`.
+
+ :param file_name: The name of the file that this :class:`File` represents
+
+ :param field_name: The field name that uploaded this file. Note that this
+ can be None, if, for example, the file was uploaded
+ with Content-Type application/octet-stream
+
+ :param config: The configuration for this File. See above for valid
+ configuration keys and their corresponding values.
+ """
+ def __init__(self, file_name, field_name=None, config={}):
+ # Save configuration, set other variables default.
+ self.logger = logging.getLogger(__name__)
+ self._config = config
+ self._in_memory = True
+ self._bytes_written = 0
+ self._fileobj = BytesIO()
+
+ # Save the provided field/file name.
+ self._field_name = field_name
+ self._file_name = file_name
+
+ # Our actual file name is None by default, since, depending on our
+ # config, we may not actually use the provided name.
+ self._actual_file_name = None
+
+ # Split the extension from the filename.
+ if file_name is not None:
+ base, ext = os.path.splitext(file_name)
+ self._file_base = base
+ self._ext = ext
+
+ @property
+ def field_name(self):
+ """The form field associated with this file. May be None if there isn't
+ one, for example when we have an application/octet-stream upload.
+ """
+ return self._field_name
+
+ @property
+ def file_name(self):
+ """The file name given in the upload request.
+ """
+ return self._file_name
+
+ @property
+ def actual_file_name(self):
+ """The file name that this file is saved as. Will be None if it's not
+ currently saved on disk.
+ """
+ return self._actual_file_name
+
+ @property
+ def file_object(self):
+ """The file object that we're currently writing to. Note that this
+ will either be an instance of a :class:`io.BytesIO`, or a regular file
+ object.
+ """
+ return self._fileobj
+
+ @property
+ def size(self):
+ """The total size of this file, counted as the number of bytes that
+ currently have been written to the file.
+ """
+ return self._bytes_written
+
+ @property
+ def in_memory(self):
+ """A boolean representing whether or not this file object is currently
+ stored in-memory or on-disk.
+ """
+ return self._in_memory
+
+ def flush_to_disk(self):
+ """If the file is already on-disk, do nothing. Otherwise, copy from
+ the in-memory buffer to a disk file, and then reassign our internal
+ file object to this new disk file.
+
+ Note that if you attempt to flush a file that is already on-disk, a
+ warning will be logged to this module's logger.
+ """
+ if not self._in_memory:
+ self.logger.warning(
+ "Trying to flush to disk when we're not in memory"
+ )
+ return
+
+ # Go back to the start of our file.
+ self._fileobj.seek(0)
+
+ # Open a new file.
+ new_file = self._get_disk_file()
+
+ # Copy the file objects.
+ shutil.copyfileobj(self._fileobj, new_file)
+
+ # Seek to the new position in our new file.
+ new_file.seek(self._bytes_written)
+
+ # Reassign the fileobject.
+ old_fileobj = self._fileobj
+ self._fileobj = new_file
+
+ # We're no longer in memory.
+ self._in_memory = False
+
+ # Close the old file object.
+ old_fileobj.close()
+
+ def _get_disk_file(self):
+ """This function is responsible for getting a file object on-disk for us.
+ """
+ self.logger.info("Opening a file on disk")
+
+ file_dir = self._config.get('UPLOAD_DIR')
+ keep_filename = self._config.get('UPLOAD_KEEP_FILENAME', False)
+ keep_extensions = self._config.get('UPLOAD_KEEP_EXTENSIONS', False)
+ delete_tmp = self._config.get('UPLOAD_DELETE_TMP', True)
+
+ # If we have a directory and are to keep the filename...
+ if file_dir is not None and keep_filename:
+ self.logger.info("Saving with filename in: %r", file_dir)
+
+ # Build our filename.
+ # TODO: what happens if we don't have a filename?
+ fname = self._file_base
+ if keep_extensions:
+ fname = fname + self._ext
+
+ path = os.path.join(file_dir, fname)
+ try:
+ self.logger.info("Opening file: %r", path)
+ tmp_file = open(path, 'w+b')
+ except OSError as e:
+ tmp_file = None
+
+ self.logger.exception("Error opening temporary file")
+ raise FileError("Error opening temporary file: %r" % path)
+ else:
+ # Build options array.
+ # Note that on Python 3, tempfile doesn't support byte names. We
+ # encode our paths using the default filesystem encoding.
+ options = {}
+ if keep_extensions:
+ ext = self._ext
+ if isinstance(ext, bytes):
+ ext = ext.decode(sys.getfilesystemencoding())
+
+ options['suffix'] = ext
+ if file_dir is not None:
+ d = file_dir
+ if isinstance(d, bytes):
+ d = d.decode(sys.getfilesystemencoding())
+
+ options['dir'] = d
+ options['delete'] = delete_tmp
+
+ # Create a temporary (named) file with the appropriate settings.
+ self.logger.info("Creating a temporary file with options: %r",
+ options)
+ try:
+ tmp_file = tempfile.NamedTemporaryFile(**options)
+ except OSError:
+ self.logger.exception("Error creating named temporary file")
+ raise FileError("Error creating named temporary file")
+
+ fname = tmp_file.name
+
+ # Encode filename as bytes.
+ if isinstance(fname, str):
+ fname = fname.encode(sys.getfilesystemencoding())
+
+ self._actual_file_name = fname
+ return tmp_file
+
+ def write(self, data):
+ """Write some data to the File.
+
+ :param data: a bytestring
+ """
+ return self.on_data(data)
+
+ def on_data(self, data):
+ """This method is a callback that will be called whenever data is
+ written to the File.
+
+ :param data: a bytestring
+ """
+ pos = self._fileobj.tell()
+ bwritten = self._fileobj.write(data)
+ # true file objects write returns None
+ if bwritten is None:
+ bwritten = self._fileobj.tell() - pos
+
+ # If the bytes written isn't the same as the length, just return.
+ if bwritten != len(data):
+ self.logger.warning("bwritten != len(data) (%d != %d)", bwritten,
+ len(data))
+ return bwritten
+
+ # Keep track of how many bytes we've written.
+ self._bytes_written += bwritten
+
+ # If we're in-memory and are over our limit, we create a file.
+ if (self._in_memory and
+ self._config.get('MAX_MEMORY_FILE_SIZE') is not None and
+ (self._bytes_written >
+ self._config.get('MAX_MEMORY_FILE_SIZE'))):
+ self.logger.info("Flushing to disk")
+ self.flush_to_disk()
+
+ # Return the number of bytes written.
+ return bwritten
+
+ def on_end(self):
+ """This method is called whenever the Field is finalized.
+ """
+ # Flush the underlying file object
+ self._fileobj.flush()
+
+ def finalize(self):
+ """Finalize the form file. This will not close the underlying file,
+ but simply signal that we are finished writing to the File.
+ """
+ self.on_end()
+
+ def close(self):
+ """Close the File object. This will actually close the underlying
+ file object (whether it's a :class:`io.BytesIO` or an actual file
+ object).
+ """
+ self._fileobj.close()
+
+ def __repr__(self):
+ return "{}(file_name={!r}, field_name={!r})".format(
+ self.__class__.__name__,
+ self.file_name,
+ self.field_name
+ )
+
+
+class BaseParser:
+ """This class is the base class for all parsers. It contains the logic for
+ calling and adding callbacks.
+
+ A callback can be one of two different forms. "Notification callbacks" are
+ callbacks that are called when something happens - for example, when a new
+ part of a multipart message is encountered by the parser. "Data callbacks"
+ are called when we get some sort of data - for example, part of the body of
+ a multipart chunk. Notification callbacks are called with no parameters,
+ whereas data callbacks are called with three, as follows::
+
+ data_callback(data, start, end)
+
+ The "data" parameter is a bytestring (i.e. "foo" on Python 2, or b"foo" on
+ Python 3). "start" and "end" are integer indexes into the "data" string
+ that represent the data of interest. Thus, in a data callback, the slice
+ `data[start:end]` represents the data that the callback is "interested in".
+ The callback is not passed a copy of the data, since copying severely hurts
+ performance.
+ """
+ def __init__(self):
+ self.logger = logging.getLogger(__name__)
+
+ def callback(self, name, data=None, start=None, end=None):
+ """This function calls a provided callback with some data. If the
+ callback is not set, will do nothing.
+
+ :param name: The name of the callback to call (as a string).
+
+ :param data: Data to pass to the callback. If None, then it is
+ assumed that the callback is a notification callback,
+ and no parameters are given.
+
+ :param end: An integer that is passed to the data callback.
+
+ :param start: An integer that is passed to the data callback.
+ """
+ name = "on_" + name
+ func = self.callbacks.get(name)
+ if func is None:
+ return
+
+ # Depending on whether we're given a buffer...
+ if data is not None:
+ # Don't do anything if we have start == end.
+ if start is not None and start == end:
+ return
+
+ self.logger.debug("Calling %s with data[%d:%d]", name, start, end)
+ func(data, start, end)
+ else:
+ self.logger.debug("Calling %s with no data", name)
+ func()
+
+ def set_callback(self, name, new_func):
+ """Update the function for a callback. Removes from the callbacks dict
+ if new_func is None.
+
+ :param name: The name of the callback to call (as a string).
+
+ :param new_func: The new function for the callback. If None, then the
+ callback will be removed (with no error if it does not
+ exist).
+ """
+ if new_func is None:
+ self.callbacks.pop('on_' + name, None)
+ else:
+ self.callbacks['on_' + name] = new_func
+
+ def close(self):
+ pass # pragma: no cover
+
+ def finalize(self):
+ pass # pragma: no cover
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+
+class OctetStreamParser(BaseParser):
+ """This parser parses an octet-stream request body and calls callbacks when
+ incoming data is received. Callbacks are as follows:
+
+ .. list-table::
+ :widths: 15 10 30
+ :header-rows: 1
+
+ * - Callback Name
+ - Parameters
+ - Description
+ * - on_start
+ - None
+ - Called when the first data is parsed.
+ * - on_data
+ - data, start, end
+ - Called for each data chunk that is parsed.
+ * - on_end
+ - None
+ - Called when the parser is finished parsing all data.
+
+ :param callbacks: A dictionary of callbacks. See the documentation for
+ :class:`BaseParser`.
+
+ :param max_size: The maximum size of body to parse. Defaults to infinity -
+ i.e. unbounded.
+ """
+ def __init__(self, callbacks={}, max_size=float('inf')):
+ super().__init__()
+ self.callbacks = callbacks
+ self._started = False
+
+ if not isinstance(max_size, Number) or max_size < 1:
+ raise ValueError("max_size must be a positive number, not %r" %
+ max_size)
+ self.max_size = max_size
+ self._current_size = 0
+
+ def write(self, data):
+ """Write some data to the parser, which will perform size verification,
+ and then pass the data to the underlying callback.
+
+ :param data: a bytestring
+ """
+ if not self._started:
+ self.callback('start')
+ self._started = True
+
+ # Truncate data length.
+ data_len = len(data)
+ if (self._current_size + data_len) > self.max_size:
+ # We truncate the length of data that we are to process.
+ new_size = int(self.max_size - self._current_size)
+ self.logger.warning("Current size is %d (max %d), so truncating "
+ "data length from %d to %d",
+ self._current_size, self.max_size, data_len,
+ new_size)
+ data_len = new_size
+
+ # Increment size, then callback, in case there's an exception.
+ self._current_size += data_len
+ self.callback('data', data, 0, data_len)
+ return data_len
+
+ def finalize(self):
+ """Finalize this parser, which signals to that we are finished parsing,
+ and sends the on_end callback.
+ """
+ self.callback('end')
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+
+class QuerystringParser(BaseParser):
+ """This is a streaming querystring parser. It will consume data, and call
+ the callbacks given when it has data.
+
+ .. list-table::
+ :widths: 15 10 30
+ :header-rows: 1
+
+ * - Callback Name
+ - Parameters
+ - Description
+ * - on_field_start
+ - None
+ - Called when a new field is encountered.
+ * - on_field_name
+ - data, start, end
+ - Called when a portion of a field's name is encountered.
+ * - on_field_data
+ - data, start, end
+ - Called when a portion of a field's data is encountered.
+ * - on_field_end
+ - None
+ - Called when the end of a field is encountered.
+ * - on_end
+ - None
+ - Called when the parser is finished parsing all data.
+
+ :param callbacks: A dictionary of callbacks. See the documentation for
+ :class:`BaseParser`.
+
+ :param strict_parsing: Whether or not to parse the body strictly. Defaults
+ to False. If this is set to True, then the behavior
+ of the parser changes as the following: if a field
+ has a value with an equal sign (e.g. "foo=bar", or
+ "foo="), it is always included. If a field has no
+ equals sign (e.g. "...&name&..."), it will be
+ treated as an error if 'strict_parsing' is True,
+ otherwise included. If an error is encountered,
+ then a
+ :class:`multipart.exceptions.QuerystringParseError`
+ will be raised.
+
+ :param max_size: The maximum size of body to parse. Defaults to infinity -
+ i.e. unbounded.
+ """
+ def __init__(self, callbacks={}, strict_parsing=False,
+ max_size=float('inf')):
+ super().__init__()
+ self.state = STATE_BEFORE_FIELD
+ self._found_sep = False
+
+ self.callbacks = callbacks
+
+ # Max-size stuff
+ if not isinstance(max_size, Number) or max_size < 1:
+ raise ValueError("max_size must be a positive number, not %r" %
+ max_size)
+ self.max_size = max_size
+ self._current_size = 0
+
+ # Should parsing be strict?
+ self.strict_parsing = strict_parsing
+
+ def write(self, data):
+ """Write some data to the parser, which will perform size verification,
+ parse into either a field name or value, and then pass the
+ corresponding data to the underlying callback. If an error is
+ encountered while parsing, a QuerystringParseError will be raised. The
+ "offset" attribute of the raised exception will be set to the offset in
+ the input data chunk (NOT the overall stream) that caused the error.
+
+ :param data: a bytestring
+ """
+ # Handle sizing.
+ data_len = len(data)
+ if (self._current_size + data_len) > self.max_size:
+ # We truncate the length of data that we are to process.
+ new_size = int(self.max_size - self._current_size)
+ self.logger.warning("Current size is %d (max %d), so truncating "
+ "data length from %d to %d",
+ self._current_size, self.max_size, data_len,
+ new_size)
+ data_len = new_size
+
+ l = 0
+ try:
+ l = self._internal_write(data, data_len)
+ finally:
+ self._current_size += l
+
+ return l
+
+ def _internal_write(self, data, length):
+ state = self.state
+ strict_parsing = self.strict_parsing
+ found_sep = self._found_sep
+
+ i = 0
+ while i < length:
+ ch = data[i]
+
+ # Depending on our state...
+ if state == STATE_BEFORE_FIELD:
+ # If the 'found_sep' flag is set, we've already encountered
+ # and skipped a single separator. If so, we check our strict
+ # parsing flag and decide what to do. Otherwise, we haven't
+ # yet reached a separator, and thus, if we do, we need to skip
+ # it as it will be the boundary between fields that's supposed
+ # to be there.
+ if ch == AMPERSAND or ch == SEMICOLON:
+ if found_sep:
+ # If we're parsing strictly, we disallow blank chunks.
+ if strict_parsing:
+ e = QuerystringParseError(
+ "Skipping duplicate ampersand/semicolon at "
+ "%d" % i
+ )
+ e.offset = i
+ raise e
+ else:
+ self.logger.debug("Skipping duplicate ampersand/"
+ "semicolon at %d", i)
+ else:
+ # This case is when we're skipping the (first)
+ # separator between fields, so we just set our flag
+ # and continue on.
+ found_sep = True
+ else:
+ # Emit a field-start event, and go to that state. Also,
+ # reset the "found_sep" flag, for the next time we get to
+ # this state.
+ self.callback('field_start')
+ i -= 1
+ state = STATE_FIELD_NAME
+ found_sep = False
+
+ elif state == STATE_FIELD_NAME:
+ # Try and find a separator - we ensure that, if we do, we only
+ # look for the equal sign before it.
+ sep_pos = data.find(b'&', i)
+ if sep_pos == -1:
+ sep_pos = data.find(b';', i)
+
+ # See if we can find an equals sign in the remaining data. If
+ # so, we can immediately emit the field name and jump to the
+ # data state.
+ if sep_pos != -1:
+ equals_pos = data.find(b'=', i, sep_pos)
+ else:
+ equals_pos = data.find(b'=', i)
+
+ if equals_pos != -1:
+ # Emit this name.
+ self.callback('field_name', data, i, equals_pos)
+
+ # Jump i to this position. Note that it will then have 1
+ # added to it below, which means the next iteration of this
+ # loop will inspect the character after the equals sign.
+ i = equals_pos
+ state = STATE_FIELD_DATA
+ else:
+ # No equals sign found.
+ if not strict_parsing:
+ # See also comments in the STATE_FIELD_DATA case below.
+ # If we found the separator, we emit the name and just
+ # end - there's no data callback at all (not even with
+ # a blank value).
+ if sep_pos != -1:
+ self.callback('field_name', data, i, sep_pos)
+ self.callback('field_end')
+
+ i = sep_pos - 1
+ state = STATE_BEFORE_FIELD
+ else:
+ # Otherwise, no separator in this block, so the
+ # rest of this chunk must be a name.
+ self.callback('field_name', data, i, length)
+ i = length
+
+ else:
+ # We're parsing strictly. If we find a separator,
+ # this is an error - we require an equals sign.
+ if sep_pos != -1:
+ e = QuerystringParseError(
+ "When strict_parsing is True, we require an "
+ "equals sign in all field chunks. Did not "
+ "find one in the chunk that starts at %d" %
+ (i,)
+ )
+ e.offset = i
+ raise e
+
+ # No separator in the rest of this chunk, so it's just
+ # a field name.
+ self.callback('field_name', data, i, length)
+ i = length
+
+ elif state == STATE_FIELD_DATA:
+ # Try finding either an ampersand or a semicolon after this
+ # position.
+ sep_pos = data.find(b'&', i)
+ if sep_pos == -1:
+ sep_pos = data.find(b';', i)
+
+ # If we found it, callback this bit as data and then go back
+ # to expecting to find a field.
+ if sep_pos != -1:
+ self.callback('field_data', data, i, sep_pos)
+ self.callback('field_end')
+
+ # Note that we go to the separator, which brings us to the
+ # "before field" state. This allows us to properly emit
+ # "field_start" events only when we actually have data for
+ # a field of some sort.
+ i = sep_pos - 1
+ state = STATE_BEFORE_FIELD
+
+ # Otherwise, emit the rest as data and finish.
+ else:
+ self.callback('field_data', data, i, length)
+ i = length
+
+ else: # pragma: no cover (error case)
+ msg = "Reached an unknown state %d at %d" % (state, i)
+ self.logger.warning(msg)
+ e = QuerystringParseError(msg)
+ e.offset = i
+ raise e
+
+ i += 1
+
+ self.state = state
+ self._found_sep = found_sep
+ return len(data)
+
+ def finalize(self):
+ """Finalize this parser, which signals to that we are finished parsing,
+ if we're still in the middle of a field, an on_field_end callback, and
+ then the on_end callback.
+ """
+ # If we're currently in the middle of a field, we finish it.
+ if self.state == STATE_FIELD_DATA:
+ self.callback('field_end')
+ self.callback('end')
+
+ def __repr__(self):
+ return "{}(strict_parsing={!r}, max_size={!r})".format(
+ self.__class__.__name__,
+ self.strict_parsing, self.max_size
+ )
+
+
+class MultipartParser(BaseParser):
+ """This class is a streaming multipart/form-data parser.
+
+ .. list-table::
+ :widths: 15 10 30
+ :header-rows: 1
+
+ * - Callback Name
+ - Parameters
+ - Description
+ * - on_part_begin
+ - None
+ - Called when a new part of the multipart message is encountered.
+ * - on_part_data
+ - data, start, end
+ - Called when a portion of a part's data is encountered.
+ * - on_part_end
+ - None
+ - Called when the end of a part is reached.
+ * - on_header_begin
+ - None
+ - Called when we've found a new header in a part of a multipart
+ message
+ * - on_header_field
+ - data, start, end
+ - Called each time an additional portion of a header is read (i.e. the
+ part of the header that is before the colon; the "Foo" in
+ "Foo: Bar").
+ * - on_header_value
+ - data, start, end
+ - Called when we get data for a header.
+ * - on_header_end
+ - None
+ - Called when the current header is finished - i.e. we've reached the
+ newline at the end of the header.
+ * - on_headers_finished
+ - None
+ - Called when all headers are finished, and before the part data
+ starts.
+ * - on_end
+ - None
+ - Called when the parser is finished parsing all data.
+
+
+ :param boundary: The multipart boundary. This is required, and must match
+ what is given in the HTTP request - usually in the
+ Content-Type header.
+
+ :param callbacks: A dictionary of callbacks. See the documentation for
+ :class:`BaseParser`.
+
+ :param max_size: The maximum size of body to parse. Defaults to infinity -
+ i.e. unbounded.
+ """
+
+ def __init__(self, boundary, callbacks={}, max_size=float('inf')):
+ # Initialize parser state.
+ super().__init__()
+ self.state = STATE_START
+ self.index = self.flags = 0
+
+ self.callbacks = callbacks
+
+ if not isinstance(max_size, Number) or max_size < 1:
+ raise ValueError("max_size must be a positive number, not %r" %
+ max_size)
+ self.max_size = max_size
+ self._current_size = 0
+
+ # Setup marks. These are used to track the state of data received.
+ self.marks = {}
+
+ # TODO: Actually use this rather than the dumb version we currently use
+ # # Precompute the skip table for the Boyer-Moore-Horspool algorithm.
+ # skip = [len(boundary) for x in range(256)]
+ # for i in range(len(boundary) - 1):
+ # skip[ord_char(boundary[i])] = len(boundary) - i - 1
+ #
+ # # We use a tuple since it's a constant, and marginally faster.
+ # self.skip = tuple(skip)
+
+ # Save our boundary.
+ if isinstance(boundary, str): # pragma: no cover
+ boundary = boundary.encode('latin-1')
+ self.boundary = b'\r\n--' + boundary
+
+ # Get a set of characters that belong to our boundary.
+ self.boundary_chars = frozenset(self.boundary)
+
+ # We also create a lookbehind list.
+ # Note: the +8 is since we can have, at maximum, "\r\n--" + boundary +
+ # "--\r\n" at the final boundary, and the length of '\r\n--' and
+ # '--\r\n' is 8 bytes.
+ self.lookbehind = [NULL for x in range(len(boundary) + 8)]
+
+ def write(self, data):
+ """Write some data to the parser, which will perform size verification,
+ and then parse the data into the appropriate location (e.g. header,
+ data, etc.), and pass this on to the underlying callback. If an error
+ is encountered, a MultipartParseError will be raised. The "offset"
+ attribute on the raised exception will be set to the offset of the byte
+ in the input chunk that caused the error.
+
+ :param data: a bytestring
+ """
+ # Handle sizing.
+ data_len = len(data)
+ if (self._current_size + data_len) > self.max_size:
+ # We truncate the length of data that we are to process.
+ new_size = int(self.max_size - self._current_size)
+ self.logger.warning("Current size is %d (max %d), so truncating "
+ "data length from %d to %d",
+ self._current_size, self.max_size, data_len,
+ new_size)
+ data_len = new_size
+
+ l = 0
+ try:
+ l = self._internal_write(data, data_len)
+ finally:
+ self._current_size += l
+
+ return l
+
+ def _internal_write(self, data, length):
+ # Get values from locals.
+ boundary = self.boundary
+
+ # Get our state, flags and index. These are persisted between calls to
+ # this function.
+ state = self.state
+ index = self.index
+ flags = self.flags
+
+ # Our index defaults to 0.
+ i = 0
+
+ # Set a mark.
+ def set_mark(name):
+ self.marks[name] = i
+
+ # Remove a mark.
+ def delete_mark(name, reset=False):
+ self.marks.pop(name, None)
+
+ # Helper function that makes calling a callback with data easier. The
+ # 'remaining' parameter will callback from the marked value until the
+ # end of the buffer, and reset the mark, instead of deleting it. This
+ # is used at the end of the function to call our callbacks with any
+ # remaining data in this chunk.
+ def data_callback(name, remaining=False):
+ marked_index = self.marks.get(name)
+ if marked_index is None:
+ return
+
+ # If we're getting remaining data, we ignore the current i value
+ # and just call with the remaining data.
+ if remaining:
+ self.callback(name, data, marked_index, length)
+ self.marks[name] = 0
+
+ # Otherwise, we call it from the mark to the current byte we're
+ # processing.
+ else:
+ self.callback(name, data, marked_index, i)
+ self.marks.pop(name, None)
+
+ # For each byte...
+ while i < length:
+ c = data[i]
+
+ if state == STATE_START:
+ # Skip leading newlines
+ if c == CR or c == LF:
+ i += 1
+ self.logger.debug("Skipping leading CR/LF at %d", i)
+ continue
+
+ # index is used as in index into our boundary. Set to 0.
+ index = 0
+
+ # Move to the next state, but decrement i so that we re-process
+ # this character.
+ state = STATE_START_BOUNDARY
+ i -= 1
+
+ elif state == STATE_START_BOUNDARY:
+ # Check to ensure that the last 2 characters in our boundary
+ # are CRLF.
+ if index == len(boundary) - 2:
+ if c != CR:
+ # Error!
+ msg = "Did not find CR at end of boundary (%d)" % (i,)
+ self.logger.warning(msg)
+ e = MultipartParseError(msg)
+ e.offset = i
+ raise e
+
+ index += 1
+
+ elif index == len(boundary) - 2 + 1:
+ if c != LF:
+ msg = "Did not find LF at end of boundary (%d)" % (i,)
+ self.logger.warning(msg)
+ e = MultipartParseError(msg)
+ e.offset = i
+ raise e
+
+ # The index is now used for indexing into our boundary.
+ index = 0
+
+ # Callback for the start of a part.
+ self.callback('part_begin')
+
+ # Move to the next character and state.
+ state = STATE_HEADER_FIELD_START
+
+ else:
+ # Check to ensure our boundary matches
+ if c != boundary[index + 2]:
+ msg = "Did not find boundary character %r at index " \
+ "%d" % (c, index + 2)
+ self.logger.warning(msg)
+ e = MultipartParseError(msg)
+ e.offset = i
+ raise e
+
+ # Increment index into boundary and continue.
+ index += 1
+
+ elif state == STATE_HEADER_FIELD_START:
+ # Mark the start of a header field here, reset the index, and
+ # continue parsing our header field.
+ index = 0
+
+ # Set a mark of our header field.
+ set_mark('header_field')
+
+ # Move to parsing header fields.
+ state = STATE_HEADER_FIELD
+ i -= 1
+
+ elif state == STATE_HEADER_FIELD:
+ # If we've reached a CR at the beginning of a header, it means
+ # that we've reached the second of 2 newlines, and so there are
+ # no more headers to parse.
+ if c == CR:
+ delete_mark('header_field')
+ state = STATE_HEADERS_ALMOST_DONE
+ i += 1
+ continue
+
+ # Increment our index in the header.
+ index += 1
+
+ # Do nothing if we encounter a hyphen.
+ if c == HYPHEN:
+ pass
+
+ # If we've reached a colon, we're done with this header.
+ elif c == COLON:
+ # A 0-length header is an error.
+ if index == 1:
+ msg = "Found 0-length header at %d" % (i,)
+ self.logger.warning(msg)
+ e = MultipartParseError(msg)
+ e.offset = i
+ raise e
+
+ # Call our callback with the header field.
+ data_callback('header_field')
+
+ # Move to parsing the header value.
+ state = STATE_HEADER_VALUE_START
+
+ else:
+ # Lower-case this character, and ensure that it is in fact
+ # a valid letter. If not, it's an error.
+ cl = lower_char(c)
+ if cl < LOWER_A or cl > LOWER_Z:
+ msg = "Found non-alphanumeric character %r in " \
+ "header at %d" % (c, i)
+ self.logger.warning(msg)
+ e = MultipartParseError(msg)
+ e.offset = i
+ raise e
+
+ elif state == STATE_HEADER_VALUE_START:
+ # Skip leading spaces.
+ if c == SPACE:
+ i += 1
+ continue
+
+ # Mark the start of the header value.
+ set_mark('header_value')
+
+ # Move to the header-value state, reprocessing this character.
+ state = STATE_HEADER_VALUE
+ i -= 1
+
+ elif state == STATE_HEADER_VALUE:
+ # If we've got a CR, we're nearly done our headers. Otherwise,
+ # we do nothing and just move past this character.
+ if c == CR:
+ data_callback('header_value')
+ self.callback('header_end')
+ state = STATE_HEADER_VALUE_ALMOST_DONE
+
+ elif state == STATE_HEADER_VALUE_ALMOST_DONE:
+ # The last character should be a LF. If not, it's an error.
+ if c != LF:
+ msg = "Did not find LF character at end of header " \
+ "(found %r)" % (c,)
+ self.logger.warning(msg)
+ e = MultipartParseError(msg)
+ e.offset = i
+ raise e
+
+ # Move back to the start of another header. Note that if that
+ # state detects ANOTHER newline, it'll trigger the end of our
+ # headers.
+ state = STATE_HEADER_FIELD_START
+
+ elif state == STATE_HEADERS_ALMOST_DONE:
+ # We're almost done our headers. This is reached when we parse
+ # a CR at the beginning of a header, so our next character
+ # should be a LF, or it's an error.
+ if c != LF:
+ msg = f"Did not find LF at end of headers (found {c!r})"
+ self.logger.warning(msg)
+ e = MultipartParseError(msg)
+ e.offset = i
+ raise e
+
+ self.callback('headers_finished')
+ state = STATE_PART_DATA_START
+
+ elif state == STATE_PART_DATA_START:
+ # Mark the start of our part data.
+ set_mark('part_data')
+
+ # Start processing part data, including this character.
+ state = STATE_PART_DATA
+ i -= 1
+
+ elif state == STATE_PART_DATA:
+ # We're processing our part data right now. During this, we
+ # need to efficiently search for our boundary, since any data
+ # on any number of lines can be a part of the current data.
+ # We use the Boyer-Moore-Horspool algorithm to efficiently
+ # search through the remainder of the buffer looking for our
+ # boundary.
+
+ # Save the current value of our index. We use this in case we
+ # find part of a boundary, but it doesn't match fully.
+ prev_index = index
+
+ # Set up variables.
+ boundary_length = len(boundary)
+ boundary_end = boundary_length - 1
+ data_length = length
+ boundary_chars = self.boundary_chars
+
+ # If our index is 0, we're starting a new part, so start our
+ # search.
+ if index == 0:
+ # Search forward until we either hit the end of our buffer,
+ # or reach a character that's in our boundary.
+ i += boundary_end
+ while i < data_length - 1 and data[i] not in boundary_chars:
+ i += boundary_length
+
+ # Reset i back the length of our boundary, which is the
+ # earliest possible location that could be our match (i.e.
+ # if we've just broken out of our loop since we saw the
+ # last character in our boundary)
+ i -= boundary_end
+ c = data[i]
+
+ # Now, we have a couple of cases here. If our index is before
+ # the end of the boundary...
+ if index < boundary_length:
+ # If the character matches...
+ if boundary[index] == c:
+ # If we found a match for our boundary, we send the
+ # existing data.
+ if index == 0:
+ data_callback('part_data')
+
+ # The current character matches, so continue!
+ index += 1
+ else:
+ index = 0
+
+ # Our index is equal to the length of our boundary!
+ elif index == boundary_length:
+ # First we increment it.
+ index += 1
+
+ # Now, if we've reached a newline, we need to set this as
+ # the potential end of our boundary.
+ if c == CR:
+ flags |= FLAG_PART_BOUNDARY
+
+ # Otherwise, if this is a hyphen, we might be at the last
+ # of all boundaries.
+ elif c == HYPHEN:
+ flags |= FLAG_LAST_BOUNDARY
+
+ # Otherwise, we reset our index, since this isn't either a
+ # newline or a hyphen.
+ else:
+ index = 0
+
+ # Our index is right after the part boundary, which should be
+ # a LF.
+ elif index == boundary_length + 1:
+ # If we're at a part boundary (i.e. we've seen a CR
+ # character already)...
+ if flags & FLAG_PART_BOUNDARY:
+ # We need a LF character next.
+ if c == LF:
+ # Unset the part boundary flag.
+ flags &= (~FLAG_PART_BOUNDARY)
+
+ # Callback indicating that we've reached the end of
+ # a part, and are starting a new one.
+ self.callback('part_end')
+ self.callback('part_begin')
+
+ # Move to parsing new headers.
+ index = 0
+ state = STATE_HEADER_FIELD_START
+ i += 1
+ continue
+
+ # We didn't find an LF character, so no match. Reset
+ # our index and clear our flag.
+ index = 0
+ flags &= (~FLAG_PART_BOUNDARY)
+
+ # Otherwise, if we're at the last boundary (i.e. we've
+ # seen a hyphen already)...
+ elif flags & FLAG_LAST_BOUNDARY:
+ # We need a second hyphen here.
+ if c == HYPHEN:
+ # Callback to end the current part, and then the
+ # message.
+ self.callback('part_end')
+ self.callback('end')
+ state = STATE_END
+ else:
+ # No match, so reset index.
+ index = 0
+
+ # If we have an index, we need to keep this byte for later, in
+ # case we can't match the full boundary.
+ if index > 0:
+ self.lookbehind[index - 1] = c
+
+ # Otherwise, our index is 0. If the previous index is not, it
+ # means we reset something, and we need to take the data we
+ # thought was part of our boundary and send it along as actual
+ # data.
+ elif prev_index > 0:
+ # Callback to write the saved data.
+ lb_data = join_bytes(self.lookbehind)
+ self.callback('part_data', lb_data, 0, prev_index)
+
+ # Overwrite our previous index.
+ prev_index = 0
+
+ # Re-set our mark for part data.
+ set_mark('part_data')
+
+ # Re-consider the current character, since this could be
+ # the start of the boundary itself.
+ i -= 1
+
+ elif state == STATE_END:
+ # Do nothing and just consume a byte in the end state.
+ if c not in (CR, LF):
+ self.logger.warning("Consuming a byte '0x%x' in the end state", c)
+
+ else: # pragma: no cover (error case)
+ # We got into a strange state somehow! Just stop processing.
+ msg = "Reached an unknown state %d at %d" % (state, i)
+ self.logger.warning(msg)
+ e = MultipartParseError(msg)
+ e.offset = i
+ raise e
+
+ # Move to the next byte.
+ i += 1
+
+ # We call our callbacks with any remaining data. Note that we pass
+ # the 'remaining' flag, which sets the mark back to 0 instead of
+ # deleting it, if it's found. This is because, if the mark is found
+ # at this point, we assume that there's data for one of these things
+ # that has been parsed, but not yet emitted. And, as such, it implies
+ # that we haven't yet reached the end of this 'thing'. So, by setting
+ # the mark to 0, we cause any data callbacks that take place in future
+ # calls to this function to start from the beginning of that buffer.
+ data_callback('header_field', True)
+ data_callback('header_value', True)
+ data_callback('part_data', True)
+
+ # Save values to locals.
+ self.state = state
+ self.index = index
+ self.flags = flags
+
+ # Return our data length to indicate no errors, and that we processed
+ # all of it.
+ return length
+
+ def finalize(self):
+ """Finalize this parser, which signals to that we are finished parsing.
+
+ Note: It does not currently, but in the future, it will verify that we
+ are in the final state of the parser (i.e. the end of the multipart
+ message is well-formed), and, if not, throw an error.
+ """
+ # TODO: verify that we're in the state STATE_END, otherwise throw an
+ # error or otherwise state that we're not finished parsing.
+ pass
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}(boundary={self.boundary!r})"
+
+
+class FormParser:
+ """This class is the all-in-one form parser. Given all the information
+ necessary to parse a form, it will instantiate the correct parser, create
+ the proper :class:`Field` and :class:`File` classes to store the data that
+ is parsed, and call the two given callbacks with each field and file as
+ they become available.
+
+ :param content_type: The Content-Type of the incoming request. This is
+ used to select the appropriate parser.
+
+ :param on_field: The callback to call when a field has been parsed and is
+ ready for usage. See above for parameters.
+
+ :param on_file: The callback to call when a file has been parsed and is
+ ready for usage. See above for parameters.
+
+ :param on_end: An optional callback to call when all fields and files in a
+ request has been parsed. Can be None.
+
+ :param boundary: If the request is a multipart/form-data request, this
+ should be the boundary of the request, as given in the
+ Content-Type header, as a bytestring.
+
+ :param file_name: If the request is of type application/octet-stream, then
+ the body of the request will not contain any information
+ about the uploaded file. In such cases, you can provide
+ the file name of the uploaded file manually.
+
+ :param FileClass: The class to use for uploaded files. Defaults to
+ :class:`File`, but you can provide your own class if you
+ wish to customize behaviour. The class will be
+ instantiated as FileClass(file_name, field_name), and it
+ must provide the following functions::
+ file_instance.write(data)
+ file_instance.finalize()
+ file_instance.close()
+
+ :param FieldClass: The class to use for uploaded fields. Defaults to
+ :class:`Field`, but you can provide your own class if
+ you wish to customize behaviour. The class will be
+ instantiated as FieldClass(field_name), and it must
+ provide the following functions::
+ field_instance.write(data)
+ field_instance.finalize()
+ field_instance.close()
+
+ :param config: Configuration to use for this FormParser. The default
+ values are taken from the DEFAULT_CONFIG value, and then
+ any keys present in this dictionary will overwrite the
+ default values.
+
+ """
+ #: This is the default configuration for our form parser.
+ #: Note: all file sizes should be in bytes.
+ DEFAULT_CONFIG = {
+ 'MAX_BODY_SIZE': float('inf'),
+ 'MAX_MEMORY_FILE_SIZE': 1 * 1024 * 1024,
+ 'UPLOAD_DIR': None,
+ 'UPLOAD_KEEP_FILENAME': False,
+ 'UPLOAD_KEEP_EXTENSIONS': False,
+
+ # Error on invalid Content-Transfer-Encoding?
+ 'UPLOAD_ERROR_ON_BAD_CTE': False,
+ }
+
+ def __init__(self, content_type, on_field, on_file, on_end=None,
+ boundary=None, file_name=None, FileClass=File,
+ FieldClass=Field, config={}):
+
+ self.logger = logging.getLogger(__name__)
+
+ # Save variables.
+ self.content_type = content_type
+ self.boundary = boundary
+ self.bytes_received = 0
+ self.parser = None
+
+ # Save callbacks.
+ self.on_field = on_field
+ self.on_file = on_file
+ self.on_end = on_end
+
+ # Save classes.
+ self.FileClass = File
+ self.FieldClass = Field
+
+ # Set configuration options.
+ self.config = self.DEFAULT_CONFIG.copy()
+ self.config.update(config)
+
+ # Depending on the Content-Type, we instantiate the correct parser.
+ if content_type == 'application/octet-stream':
+ # Work around the lack of 'nonlocal' in Py2
+ class vars:
+ f = None
+
+ def on_start():
+ vars.f = FileClass(file_name, None, config=self.config)
+
+ def on_data(data, start, end):
+ vars.f.write(data[start:end])
+
+ def on_end():
+ # Finalize the file itself.
+ vars.f.finalize()
+
+ # Call our callback.
+ on_file(vars.f)
+
+ # Call the on-end callback.
+ if self.on_end is not None:
+ self.on_end()
+
+ callbacks = {
+ 'on_start': on_start,
+ 'on_data': on_data,
+ 'on_end': on_end,
+ }
+
+ # Instantiate an octet-stream parser
+ parser = OctetStreamParser(callbacks,
+ max_size=self.config['MAX_BODY_SIZE'])
+
+ elif (content_type == 'application/x-www-form-urlencoded' or
+ content_type == 'application/x-url-encoded'):
+
+ name_buffer = []
+
+ class vars:
+ f = None
+
+ def on_field_start():
+ pass
+
+ def on_field_name(data, start, end):
+ name_buffer.append(data[start:end])
+
+ def on_field_data(data, start, end):
+ if vars.f is None:
+ vars.f = FieldClass(b''.join(name_buffer))
+ del name_buffer[:]
+ vars.f.write(data[start:end])
+
+ def on_field_end():
+ # Finalize and call callback.
+ if vars.f is None:
+ # If we get here, it's because there was no field data.
+ # We create a field, set it to None, and then continue.
+ vars.f = FieldClass(b''.join(name_buffer))
+ del name_buffer[:]
+ vars.f.set_none()
+
+ vars.f.finalize()
+ on_field(vars.f)
+ vars.f = None
+
+ def on_end():
+ if self.on_end is not None:
+ self.on_end()
+
+ # Setup callbacks.
+ callbacks = {
+ 'on_field_start': on_field_start,
+ 'on_field_name': on_field_name,
+ 'on_field_data': on_field_data,
+ 'on_field_end': on_field_end,
+ 'on_end': on_end,
+ }
+
+ # Instantiate parser.
+ parser = QuerystringParser(
+ callbacks=callbacks,
+ max_size=self.config['MAX_BODY_SIZE']
+ )
+
+ elif content_type == 'multipart/form-data':
+ if boundary is None:
+ self.logger.error("No boundary given")
+ raise FormParserError("No boundary given")
+
+ header_name = []
+ header_value = []
+ headers = {}
+
+ # No 'nonlocal' on Python 2 :-(
+ class vars:
+ f = None
+ writer = None
+ is_file = False
+
+ def on_part_begin():
+ pass
+
+ def on_part_data(data, start, end):
+ bytes_processed = vars.writer.write(data[start:end])
+ # TODO: check for error here.
+ return bytes_processed
+
+ def on_part_end():
+ vars.f.finalize()
+ if vars.is_file:
+ on_file(vars.f)
+ else:
+ on_field(vars.f)
+
+ def on_header_field(data, start, end):
+ header_name.append(data[start:end])
+
+ def on_header_value(data, start, end):
+ header_value.append(data[start:end])
+
+ def on_header_end():
+ headers[b''.join(header_name)] = b''.join(header_value)
+ del header_name[:]
+ del header_value[:]
+
+ def on_headers_finished():
+ # Reset the 'is file' flag.
+ vars.is_file = False
+
+ # Parse the content-disposition header.
+ # TODO: handle mixed case
+ content_disp = headers.get(b'Content-Disposition')
+ disp, options = parse_options_header(content_disp)
+
+ # Get the field and filename.
+ field_name = options.get(b'name')
+ file_name = options.get(b'filename')
+ # TODO: check for errors
+
+ # Create the proper class.
+ if file_name is None:
+ vars.f = FieldClass(field_name)
+ else:
+ vars.f = FileClass(file_name, field_name, config=self.config)
+ vars.is_file = True
+
+ # Parse the given Content-Transfer-Encoding to determine what
+ # we need to do with the incoming data.
+ # TODO: check that we properly handle 8bit / 7bit encoding.
+ transfer_encoding = headers.get(b'Content-Transfer-Encoding',
+ b'7bit')
+
+ if (transfer_encoding == b'binary' or
+ transfer_encoding == b'8bit' or
+ transfer_encoding == b'7bit'):
+ vars.writer = vars.f
+
+ elif transfer_encoding == b'base64':
+ vars.writer = Base64Decoder(vars.f)
+
+ elif transfer_encoding == b'quoted-printable':
+ vars.writer = QuotedPrintableDecoder(vars.f)
+
+ else:
+ self.logger.warning("Unknown Content-Transfer-Encoding: "
+ "%r", transfer_encoding)
+ if self.config['UPLOAD_ERROR_ON_BAD_CTE']:
+ raise FormParserError(
+ 'Unknown Content-Transfer-Encoding "{}"'.format(
+ transfer_encoding
+ )
+ )
+ else:
+ # If we aren't erroring, then we just treat this as an
+ # unencoded Content-Transfer-Encoding.
+ vars.writer = vars.f
+
+ def on_end():
+ vars.writer.finalize()
+ if self.on_end is not None:
+ self.on_end()
+
+ # These are our callbacks for the parser.
+ callbacks = {
+ 'on_part_begin': on_part_begin,
+ 'on_part_data': on_part_data,
+ 'on_part_end': on_part_end,
+ 'on_header_field': on_header_field,
+ 'on_header_value': on_header_value,
+ 'on_header_end': on_header_end,
+ 'on_headers_finished': on_headers_finished,
+ 'on_end': on_end,
+ }
+
+ # Instantiate a multipart parser.
+ parser = MultipartParser(boundary, callbacks,
+ max_size=self.config['MAX_BODY_SIZE'])
+
+ else:
+ self.logger.warning("Unknown Content-Type: %r", content_type)
+ raise FormParserError("Unknown Content-Type: {}".format(
+ content_type
+ ))
+
+ self.parser = parser
+
+ def write(self, data):
+ """Write some data. The parser will forward this to the appropriate
+ underlying parser.
+
+ :param data: a bytestring
+ """
+ self.bytes_received += len(data)
+ # TODO: check the parser's return value for errors?
+ return self.parser.write(data)
+
+ def finalize(self):
+ """Finalize the parser."""
+ if self.parser is not None and hasattr(self.parser, 'finalize'):
+ self.parser.finalize()
+
+ def close(self):
+ """Close the parser."""
+ if self.parser is not None and hasattr(self.parser, 'close'):
+ self.parser.close()
+
+ def __repr__(self):
+ return "{}(content_type={!r}, parser={!r})".format(
+ self.__class__.__name__,
+ self.content_type,
+ self.parser,
+ )
+
+
+def create_form_parser(headers, on_field, on_file, trust_x_headers=False,
+ config={}):
+ """This function is a helper function to aid in creating a FormParser
+ instances. Given a dictionary-like headers object, it will determine
+ the correct information needed, instantiate a FormParser with the
+ appropriate values and given callbacks, and then return the corresponding
+ parser.
+
+ :param headers: A dictionary-like object of HTTP headers. The only
+ required header is Content-Type.
+
+ :param on_field: Callback to call with each parsed field.
+
+ :param on_file: Callback to call with each parsed file.
+
+ :param trust_x_headers: Whether or not to trust information received from
+ certain X-Headers - for example, the file name from
+ X-File-Name.
+
+ :param config: Configuration variables to pass to the FormParser.
+ """
+ content_type = headers.get('Content-Type')
+ if content_type is None:
+ logging.getLogger(__name__).warning("No Content-Type header given")
+ raise ValueError("No Content-Type header given!")
+
+ # Boundaries are optional (the FormParser will raise if one is needed
+ # but not given).
+ content_type, params = parse_options_header(content_type)
+ boundary = params.get(b'boundary')
+
+ # We need content_type to be a string, not a bytes object.
+ content_type = content_type.decode('latin-1')
+
+ # File names are optional.
+ file_name = headers.get('X-File-Name')
+
+ # Instantiate a form parser.
+ form_parser = FormParser(content_type,
+ on_field,
+ on_file,
+ boundary=boundary,
+ file_name=file_name,
+ config=config)
+
+ # Return our parser.
+ return form_parser
+
+
+def parse_form(headers, input_stream, on_field, on_file, chunk_size=1048576,
+ **kwargs):
+ """This function is useful if you just want to parse a request body,
+ without too much work. Pass it a dictionary-like object of the request's
+ headers, and a file-like object for the input stream, along with two
+ callbacks that will get called whenever a field or file is parsed.
+
+ :param headers: A dictionary-like object of HTTP headers. The only
+ required header is Content-Type.
+
+ :param input_stream: A file-like object that represents the request body.
+ The read() method must return bytestrings.
+
+ :param on_field: Callback to call with each parsed field.
+
+ :param on_file: Callback to call with each parsed file.
+
+ :param chunk_size: The maximum size to read from the input stream and write
+ to the parser at one time. Defaults to 1 MiB.
+ """
+
+ # Create our form parser.
+ parser = create_form_parser(headers, on_field, on_file)
+
+ # Read chunks of 100KiB and write to the parser, but never read more than
+ # the given Content-Length, if any.
+ content_length = headers.get('Content-Length')
+ if content_length is not None:
+ content_length = int(content_length)
+ else:
+ content_length = float('inf')
+ bytes_read = 0
+
+ while True:
+ # Read only up to the Content-Length given.
+ max_readable = min(content_length - bytes_read, 1048576)
+ buff = input_stream.read(max_readable)
+
+ # Write to the parser and update our length.
+ parser.write(buff)
+ bytes_read += len(buff)
+
+ # If we get a buffer that's smaller than the size requested, or if we
+ # have read up to our content length, we're done.
+ if len(buff) != max_readable or bytes_read == content_length:
+ break
+
+ # Tell our parser that we're done writing data.
+ parser.finalize()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/__init__.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/compat.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/compat.py
new file mode 100644
index 00000000..897188d3
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/compat.py
@@ -0,0 +1,133 @@
+import os
+import re
+import sys
+import types
+import functools
+
+
+def ensure_in_path(path):
+ """
+ Ensure that a given path is in the sys.path array
+ """
+ if not os.path.isdir(path):
+ raise RuntimeError('Tried to add nonexisting path')
+
+ def _samefile(x, y):
+ try:
+ return os.path.samefile(x, y)
+ except OSError:
+ return False
+ except AttributeError:
+ # Probably on Windows.
+ path1 = os.path.abspath(x).lower()
+ path2 = os.path.abspath(y).lower()
+ return path1 == path2
+
+ # Remove existing copies of it.
+ for pth in sys.path:
+ if _samefile(pth, path):
+ sys.path.remove(pth)
+
+ # Add it at the beginning.
+ sys.path.insert(0, path)
+
+
+# Check if pytest is imported. If so, we use it to create marking decorators.
+# If not, we just create a function that does nothing.
+try:
+ import pytest
+except ImportError:
+ pytest = None
+
+if pytest is not None:
+ slow_test = pytest.mark.slow_test
+ xfail = pytest.mark.xfail
+
+else:
+ slow_test = lambda x: x
+
+ def xfail(*args, **kwargs):
+ if len(args) > 0 and isinstance(args[0], types.FunctionType):
+ return args[0]
+
+ return lambda x: x
+
+
+# We don't use the pytest parametrizing function, since it seems to break
+# with unittest.TestCase subclasses.
+def parametrize(field_names, field_values):
+ # If we're not given a list of field names, we make it.
+ if not isinstance(field_names, (tuple, list)):
+ field_names = (field_names,)
+ field_values = [(val,) for val in field_values]
+
+ # Create a decorator that saves this list of field names and values on the
+ # function for later parametrizing.
+ def decorator(func):
+ func.__dict__['param_names'] = field_names
+ func.__dict__['param_values'] = field_values
+ return func
+
+ return decorator
+
+
+# This is a metaclass that actually performs the parametrization.
+class ParametrizingMetaclass(type):
+ IDENTIFIER_RE = re.compile('[^A-Za-z0-9]')
+
+ def __new__(klass, name, bases, attrs):
+ new_attrs = attrs.copy()
+ for attr_name, attr in attrs.items():
+ # We only care about functions
+ if not isinstance(attr, types.FunctionType):
+ continue
+
+ param_names = attr.__dict__.pop('param_names', None)
+ param_values = attr.__dict__.pop('param_values', None)
+ if param_names is None or param_values is None:
+ continue
+
+ # Create multiple copies of the function.
+ for i, values in enumerate(param_values):
+ assert len(param_names) == len(values)
+
+ # Get a repr of the values, and fix it to be a valid identifier
+ human = '_'.join(
+ [klass.IDENTIFIER_RE.sub('', repr(x)) for x in values]
+ )
+
+ # Create a new name.
+ # new_name = attr.__name__ + "_%d" % i
+ new_name = attr.__name__ + "__" + human
+
+ # Create a replacement function.
+ def create_new_func(func, names, values):
+ # Create a kwargs dictionary.
+ kwargs = dict(zip(names, values))
+
+ @functools.wraps(func)
+ def new_func(self):
+ return func(self, **kwargs)
+
+ # Manually set the name and return the new function.
+ new_func.__name__ = new_name
+ return new_func
+
+ # Actually create the new function.
+ new_func = create_new_func(attr, param_names, values)
+
+ # Save this new function in our attrs dict.
+ new_attrs[new_name] = new_func
+
+ # Remove the old attribute from our new dictionary.
+ del new_attrs[attr_name]
+
+ # We create the class as normal, except we use our new attributes.
+ return type.__new__(klass, name, bases, new_attrs)
+
+
+# This is a class decorator that actually applies the above metaclass.
+def parametrize_class(klass):
+ return ParametrizingMetaclass(klass.__name__,
+ klass.__bases__,
+ klass.__dict__)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header.http
new file mode 100644
index 00000000..0c81daef
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header.http
@@ -0,0 +1,5 @@
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc
+Content-
isposition: form-data; name="field"
+
+This is a test.
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header.yaml
new file mode 100644
index 00000000..c9b55f24
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header.yaml
@@ -0,0 +1,3 @@
+boundary: ----WebKitFormBoundaryTkr3kCBQlBe1nrhc
+expected:
+ error: 51
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header_value.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header_value.http
new file mode 100644
index 00000000..f3dc8346
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header_value.http
@@ -0,0 +1,5 @@
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc
+Content-Disposition: form-data; n
me="field"
+
+This is a test.
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header_value.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header_value.yaml
new file mode 100644
index 00000000..a6efa7dd
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/CR_in_header_value.yaml
@@ -0,0 +1,3 @@
+boundary: ----WebKitFormBoundaryTkr3kCBQlBe1nrhc
+expected:
+ error: 76
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary.http
new file mode 100644
index 00000000..7d97e51b
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary.http
@@ -0,0 +1,13 @@
+----boundary
+Content-Disposition: form-data; name="file"; filename="test.txt"
+Content-Type: text/plain
+
+--boundari
+--boundaryq--boundary
q--boundarq
+--bounaryd--
+--notbound--
+--mismatch
+--mismatch--
+--boundary-Q
+--boundary
Q--boundaryQ
+----boundary--
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary.yaml
new file mode 100644
index 00000000..235493e7
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary.yaml
@@ -0,0 +1,8 @@
+boundary: --boundary
+expected:
+ - name: file
+ type: file
+ file_name: test.txt
+ data: !!binary |
+ LS1ib3VuZGFyaQ0KLS1ib3VuZGFyeXEtLWJvdW5kYXJ5DXEtLWJvdW5kYXJxDQotLWJvdW5hcnlkLS0NCi0tbm90Ym91bmQtLQ0KLS1taXNtYXRjaA0KLS1taXNtYXRjaC0tDQotLWJvdW5kYXJ5LVENCi0tYm91bmRhcnkNUS0tYm91bmRhcnlR
+
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_CR.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_CR.http
new file mode 100644
index 00000000..327cc9b3
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_CR.http
@@ -0,0 +1,6 @@
+----boundary
+Content-Disposition: form-data; name="field"
+
+QQQQQQQQQQQQQQQQQQQQ
+----boundaryQQQQQQQQQQQQQQQQQQQQ
+----boundary--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_CR.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_CR.yaml
new file mode 100644
index 00000000..921637f9
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_CR.yaml
@@ -0,0 +1,8 @@
+boundary: --boundary
+expected:
+ - name: field
+ type: field
+ data: !!binary |
+ UVFRUVFRUVFRUVFRUVFRUVFRUVENCi0tLS1ib3VuZGFyeVFRUVFRUVFRUVFRUVFRUVFRUVFR
+
+
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_LF.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_LF.http
new file mode 100644
index 00000000..e9a5a6cd
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_LF.http
@@ -0,0 +1,6 @@
+----boundary
+Content-Disposition: form-data; name="field"
+
+QQQQQQQQQQQQQQQQQQQQ
+----boundary
QQQQQQQQQQQQQQQQQQQQ
+----boundary--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_LF.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_LF.yaml
new file mode 100644
index 00000000..7346e032
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_LF.yaml
@@ -0,0 +1,8 @@
+boundary: --boundary
+expected:
+ - name: field
+ type: field
+ data: !!binary |
+ UVFRUVFRUVFRUVFRUVFRUVFRUVENCi0tLS1ib3VuZGFyeQ1RUVFRUVFRUVFRUVFRUVFRUVFRUQ==
+
+
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_final_hyphen.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_final_hyphen.http
new file mode 100644
index 00000000..9261f1bf
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_final_hyphen.http
@@ -0,0 +1,6 @@
+----boundary
+Content-Disposition: form-data; name="field"
+
+QQQQQQQQQQQQQQQQQQQQ
+----boundary-QQQQQQQQQQQQQQQQQQQQ
+----boundary--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_final_hyphen.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_final_hyphen.yaml
new file mode 100644
index 00000000..17133c91
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/almost_match_boundary_without_final_hyphen.yaml
@@ -0,0 +1,8 @@
+boundary: --boundary
+expected:
+ - name: field
+ type: field
+ data: !!binary |
+ UVFRUVFRUVFRUVFRUVFRUVFRUVENCi0tLS1ib3VuZGFyeS1RUVFRUVFRUVFRUVFRUVFRUVFRUQ==
+
+
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_end_of_headers.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_end_of_headers.http
new file mode 100644
index 00000000..de14ae11
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_end_of_headers.http
@@ -0,0 +1,4 @@
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc
+Content-Disposition: form-data; name="field"
+
QThis is a test.
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_end_of_headers.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_end_of_headers.yaml
new file mode 100644
index 00000000..5fc1ec07
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_end_of_headers.yaml
@@ -0,0 +1,3 @@
+boundary: ----WebKitFormBoundaryTkr3kCBQlBe1nrhc
+expected:
+ error: 89
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_header_char.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_header_char.http
new file mode 100644
index 00000000..b90d00d6
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_header_char.http
@@ -0,0 +1,5 @@
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc
+Content-999position: form-data; name="field"
+
+This is a test.
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_header_char.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_header_char.yaml
new file mode 100644
index 00000000..9d5f62a6
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_header_char.yaml
@@ -0,0 +1,3 @@
+boundary: ----WebKitFormBoundaryTkr3kCBQlBe1nrhc
+expected:
+ error: 50
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_initial_boundary.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_initial_boundary.http
new file mode 100644
index 00000000..6aab9dad
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_initial_boundary.http
@@ -0,0 +1,5 @@
+------WebQitFormBoundaryTkr3kCBQlBe1nrhc
+Content-Disposition: form-data; name="field"
+
+This is a test.
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_initial_boundary.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_initial_boundary.yaml
new file mode 100644
index 00000000..ffa4eb78
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/bad_initial_boundary.yaml
@@ -0,0 +1,3 @@
+boundary: ----WebKitFormBoundaryTkr3kCBQlBe1nrhc
+expected:
+ error: 9
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/base64_encoding.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/base64_encoding.http
new file mode 100644
index 00000000..3d2980f7
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/base64_encoding.http
@@ -0,0 +1,7 @@
+----boundary
+Content-Disposition: form-data; name="file"; filename="test.txt"
+Content-Type: text/plain
+Content-Transfer-Encoding: base64
+
+VGVzdCAxMjM=
+----boundary--
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/base64_encoding.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/base64_encoding.yaml
new file mode 100644
index 00000000..10331505
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/base64_encoding.yaml
@@ -0,0 +1,7 @@
+boundary: --boundary
+expected:
+ - name: file
+ type: file
+ file_name: test.txt
+ data: !!binary |
+ VGVzdCAxMjM=
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/empty_header.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/empty_header.http
new file mode 100644
index 00000000..bd593f4b
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/empty_header.http
@@ -0,0 +1,5 @@
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc
+: form-data; name="field"
+
+This is a test.
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/empty_header.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/empty_header.yaml
new file mode 100644
index 00000000..574ed4c2
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/empty_header.yaml
@@ -0,0 +1,3 @@
+boundary: ----WebKitFormBoundaryTkr3kCBQlBe1nrhc
+expected:
+ error: 42
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_fields.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_fields.http
new file mode 100644
index 00000000..4f13037b
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_fields.http
@@ -0,0 +1,9 @@
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc
+Content-Disposition: form-data; name="field1"
+
+field1
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc
+Content-Disposition: form-data; name="field2"
+
+field2
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc--
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_fields.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_fields.yaml
new file mode 100644
index 00000000..cb2c2d6a
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_fields.yaml
@@ -0,0 +1,10 @@
+boundary: ----WebKitFormBoundaryTkr3kCBQlBe1nrhc
+expected:
+ - name: field1
+ type: field
+ data: !!binary |
+ ZmllbGQx
+ - name: field2
+ type: field
+ data: !!binary |
+ ZmllbGQy
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_files.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_files.http
new file mode 100644
index 00000000..fd2e4689
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_files.http
@@ -0,0 +1,11 @@
+------WebKitFormBoundarygbACTUR58IyeurVf
+Content-Disposition: form-data; name="file1"; filename="test1.txt"
+Content-Type: text/plain
+
+Test file #1
+------WebKitFormBoundarygbACTUR58IyeurVf
+Content-Disposition: form-data; name="file2"; filename="test2.txt"
+Content-Type: text/plain
+
+Test file #2
+------WebKitFormBoundarygbACTUR58IyeurVf--
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_files.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_files.yaml
new file mode 100644
index 00000000..3bf70e2c
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/multiple_files.yaml
@@ -0,0 +1,13 @@
+boundary: ----WebKitFormBoundarygbACTUR58IyeurVf
+expected:
+ - name: file1
+ type: file
+ file_name: test1.txt
+ data: !!binary |
+ VGVzdCBmaWxlICMx
+ - name: file2
+ type: file
+ file_name: test2.txt
+ data: !!binary |
+ VGVzdCBmaWxlICMy
+
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/quoted_printable_encoding.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/quoted_printable_encoding.http
new file mode 100644
index 00000000..09e555a4
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/quoted_printable_encoding.http
@@ -0,0 +1,7 @@
+----boundary
+Content-Disposition: form-data; name="file"; filename="test.txt"
+Content-Type: text/plain
+Content-Transfer-Encoding: quoted-printable
+
+foo=3Dbar
+----boundary--
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/quoted_printable_encoding.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/quoted_printable_encoding.yaml
new file mode 100644
index 00000000..2c6bbfb2
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/quoted_printable_encoding.yaml
@@ -0,0 +1,7 @@
+boundary: --boundary
+expected:
+ - name: file
+ type: file
+ file_name: test.txt
+ data: !!binary |
+ Zm9vPWJhcg==
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field.http
new file mode 100644
index 00000000..8b90b738
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field.http
@@ -0,0 +1,5 @@
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc
+Content-Disposition: form-data; name="field"
+
+This is a test.
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field.yaml
new file mode 100644
index 00000000..7690f086
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field.yaml
@@ -0,0 +1,6 @@
+boundary: ----WebKitFormBoundaryTkr3kCBQlBe1nrhc
+expected:
+ - name: field
+ type: field
+ data: !!binary |
+ VGhpcyBpcyBhIHRlc3Qu
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_blocks.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_blocks.http
new file mode 100644
index 00000000..5a61d836
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_blocks.http
@@ -0,0 +1,5 @@
+--boundary
+Content-Disposition: form-data; name="field"
+
+0123456789ABCDEFGHIJ0123456789ABCDEFGHIJ
+--boundary--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_blocks.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_blocks.yaml
new file mode 100644
index 00000000..efb1b327
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_blocks.yaml
@@ -0,0 +1,6 @@
+boundary: --boundary
+expected:
+ - name: field
+ type: field
+ data: !!binary |
+ MDEyMzQ1Njc4OUFCQ0RFRkdISUowMTIzNDU2Nzg5QUJDREVGR0hJSg==
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_longer.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_longer.http
new file mode 100644
index 00000000..46bd7e18
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_longer.http
@@ -0,0 +1,5 @@
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc
+Content-Disposition: form-data; name="field"
+
+qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_longer.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_longer.yaml
new file mode 100644
index 00000000..5a118409
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_longer.yaml
@@ -0,0 +1,6 @@
+boundary: ----WebKitFormBoundaryTkr3kCBQlBe1nrhc
+expected:
+ - name: field
+ type: field
+ data: !!binary |
+ cXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXFxcXE=
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_single_file.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_single_file.http
new file mode 100644
index 00000000..34a822b0
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_single_file.http
@@ -0,0 +1,10 @@
+--boundary
+Content-Disposition: form-data; name="field"
+
+test1
+--boundary
+Content-Disposition: form-data; name="file"; filename="file.txt"
+Content-Type: text/plain
+
+test2
+--boundary--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_single_file.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_single_file.yaml
new file mode 100644
index 00000000..47c8d6e0
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_single_file.yaml
@@ -0,0 +1,13 @@
+boundary: boundary
+expected:
+ - name: field
+ type: field
+ data: !!binary |
+ dGVzdDE=
+ - name: file
+ type: file
+ file_name: file.txt
+ data: !!binary |
+ dGVzdDI=
+
+
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_with_leading_newlines.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_with_leading_newlines.http
new file mode 100644
index 00000000..10ebc2e1
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_with_leading_newlines.http
@@ -0,0 +1,7 @@
+
+
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc
+Content-Disposition: form-data; name="field"
+
+This is a test.
+------WebKitFormBoundaryTkr3kCBQlBe1nrhc--
\ No newline at end of file
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_with_leading_newlines.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_with_leading_newlines.yaml
new file mode 100644
index 00000000..7690f086
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_field_with_leading_newlines.yaml
@@ -0,0 +1,6 @@
+boundary: ----WebKitFormBoundaryTkr3kCBQlBe1nrhc
+expected:
+ - name: field
+ type: field
+ data: !!binary |
+ VGhpcyBpcyBhIHRlc3Qu
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_file.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_file.http
new file mode 100644
index 00000000..104bfd08
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_file.http
@@ -0,0 +1,6 @@
+------WebKitFormBoundary5BZGOJCWtXGYC9HW
+Content-Disposition: form-data; name="file"; filename="test.txt"
+Content-Type: text/plain
+
+This is a test file.
+------WebKitFormBoundary5BZGOJCWtXGYC9HW--
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_file.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_file.yaml
new file mode 100644
index 00000000..2a8e005d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/single_file.yaml
@@ -0,0 +1,8 @@
+boundary: ----WebKitFormBoundary5BZGOJCWtXGYC9HW
+expected:
+ - name: file
+ type: file
+ file_name: test.txt
+ data: !!binary |
+ VGhpcyBpcyBhIHRlc3QgZmlsZS4=
+
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/utf8_filename.http b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/utf8_filename.http
new file mode 100644
index 00000000..c26df087
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/utf8_filename.http
@@ -0,0 +1,6 @@
+------WebKitFormBoundaryI9SCEFp2lpx5DR2K
+Content-Disposition: form-data; name="file"; filename="???.txt"
+Content-Type: text/plain
+
+これはテストです。
+------WebKitFormBoundaryI9SCEFp2lpx5DR2K--
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/utf8_filename.yaml b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/utf8_filename.yaml
new file mode 100644
index 00000000..507ba2ce
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_data/http/utf8_filename.yaml
@@ -0,0 +1,8 @@
+boundary: ----WebKitFormBoundaryI9SCEFp2lpx5DR2K
+expected:
+ - name: file
+ type: file
+ file_name: ???.txt
+ data: !!binary |
+ 44GT44KM44Gv44OG44K544OI44Gn44GZ44CC
+
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_multipart.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_multipart.py
new file mode 100644
index 00000000..089f4518
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/multipart/tests/test_multipart.py
@@ -0,0 +1,1305 @@
+import os
+import sys
+import glob
+import yaml
+import base64
+import random
+import tempfile
+import unittest
+from .compat import (
+ parametrize,
+ parametrize_class,
+ slow_test,
+)
+from io import BytesIO
+from unittest.mock import MagicMock, Mock, patch
+
+from ..multipart import *
+
+
+# Get the current directory for our later test cases.
+curr_dir = os.path.abspath(os.path.dirname(__file__))
+
+
+def force_bytes(val):
+ if isinstance(val, str):
+ val = val.encode(sys.getfilesystemencoding())
+
+ return val
+
+
+class TestField(unittest.TestCase):
+ def setUp(self):
+ self.f = Field('foo')
+
+ def test_name(self):
+ self.assertEqual(self.f.field_name, 'foo')
+
+ def test_data(self):
+ self.f.write(b'test123')
+ self.assertEqual(self.f.value, b'test123')
+
+ def test_cache_expiration(self):
+ self.f.write(b'test')
+ self.assertEqual(self.f.value, b'test')
+ self.f.write(b'123')
+ self.assertEqual(self.f.value, b'test123')
+
+ def test_finalize(self):
+ self.f.write(b'test123')
+ self.f.finalize()
+ self.assertEqual(self.f.value, b'test123')
+
+ def test_close(self):
+ self.f.write(b'test123')
+ self.f.close()
+ self.assertEqual(self.f.value, b'test123')
+
+ def test_from_value(self):
+ f = Field.from_value(b'name', b'value')
+ self.assertEqual(f.field_name, b'name')
+ self.assertEqual(f.value, b'value')
+
+ f2 = Field.from_value(b'name', None)
+ self.assertEqual(f2.value, None)
+
+ def test_equality(self):
+ f1 = Field.from_value(b'name', b'value')
+ f2 = Field.from_value(b'name', b'value')
+
+ self.assertEqual(f1, f2)
+
+ def test_equality_with_other(self):
+ f = Field.from_value(b'foo', b'bar')
+ self.assertFalse(f == b'foo')
+ self.assertFalse(b'foo' == f)
+
+ def test_set_none(self):
+ f = Field(b'foo')
+ self.assertEqual(f.value, b'')
+
+ f.set_none()
+ self.assertEqual(f.value, None)
+
+
+class TestFile(unittest.TestCase):
+ def setUp(self):
+ self.c = {}
+ self.d = force_bytes(tempfile.mkdtemp())
+ self.f = File(b'foo.txt', config=self.c)
+
+ def assert_data(self, data):
+ f = self.f.file_object
+ f.seek(0)
+ self.assertEqual(f.read(), data)
+ f.seek(0)
+ f.truncate()
+
+ def assert_exists(self):
+ full_path = os.path.join(self.d, self.f.actual_file_name)
+ self.assertTrue(os.path.exists(full_path))
+
+ def test_simple(self):
+ self.f.write(b'foobar')
+ self.assert_data(b'foobar')
+
+ def test_invalid_write(self):
+ m = Mock()
+ m.write.return_value = 5
+ self.f._fileobj = m
+ v = self.f.write(b'foobar')
+ self.assertEqual(v, 5)
+
+ def test_file_fallback(self):
+ self.c['MAX_MEMORY_FILE_SIZE'] = 1
+
+ self.f.write(b'1')
+ self.assertTrue(self.f.in_memory)
+ self.assert_data(b'1')
+
+ self.f.write(b'123')
+ self.assertFalse(self.f.in_memory)
+ self.assert_data(b'123')
+
+ # Test flushing too.
+ old_obj = self.f.file_object
+ self.f.flush_to_disk()
+ self.assertFalse(self.f.in_memory)
+ self.assertIs(self.f.file_object, old_obj)
+
+ def test_file_fallback_with_data(self):
+ self.c['MAX_MEMORY_FILE_SIZE'] = 10
+
+ self.f.write(b'1' * 10)
+ self.assertTrue(self.f.in_memory)
+
+ self.f.write(b'2' * 10)
+ self.assertFalse(self.f.in_memory)
+
+ self.assert_data(b'11111111112222222222')
+
+ def test_file_name(self):
+ # Write to this dir.
+ self.c['UPLOAD_DIR'] = self.d
+ self.c['MAX_MEMORY_FILE_SIZE'] = 10
+
+ # Write.
+ self.f.write(b'12345678901')
+ self.assertFalse(self.f.in_memory)
+
+ # Assert that the file exists
+ self.assertIsNotNone(self.f.actual_file_name)
+ self.assert_exists()
+
+ def test_file_full_name(self):
+ # Write to this dir.
+ self.c['UPLOAD_DIR'] = self.d
+ self.c['UPLOAD_KEEP_FILENAME'] = True
+ self.c['MAX_MEMORY_FILE_SIZE'] = 10
+
+ # Write.
+ self.f.write(b'12345678901')
+ self.assertFalse(self.f.in_memory)
+
+ # Assert that the file exists
+ self.assertEqual(self.f.actual_file_name, b'foo')
+ self.assert_exists()
+
+ def test_file_full_name_with_ext(self):
+ self.c['UPLOAD_DIR'] = self.d
+ self.c['UPLOAD_KEEP_FILENAME'] = True
+ self.c['UPLOAD_KEEP_EXTENSIONS'] = True
+ self.c['MAX_MEMORY_FILE_SIZE'] = 10
+
+ # Write.
+ self.f.write(b'12345678901')
+ self.assertFalse(self.f.in_memory)
+
+ # Assert that the file exists
+ self.assertEqual(self.f.actual_file_name, b'foo.txt')
+ self.assert_exists()
+
+ def test_file_full_name_with_ext(self):
+ self.c['UPLOAD_DIR'] = self.d
+ self.c['UPLOAD_KEEP_FILENAME'] = True
+ self.c['UPLOAD_KEEP_EXTENSIONS'] = True
+ self.c['MAX_MEMORY_FILE_SIZE'] = 10
+
+ # Write.
+ self.f.write(b'12345678901')
+ self.assertFalse(self.f.in_memory)
+
+ # Assert that the file exists
+ self.assertEqual(self.f.actual_file_name, b'foo.txt')
+ self.assert_exists()
+
+ def test_no_dir_with_extension(self):
+ self.c['UPLOAD_KEEP_EXTENSIONS'] = True
+ self.c['MAX_MEMORY_FILE_SIZE'] = 10
+
+ # Write.
+ self.f.write(b'12345678901')
+ self.assertFalse(self.f.in_memory)
+
+ # Assert that the file exists
+ ext = os.path.splitext(self.f.actual_file_name)[1]
+ self.assertEqual(ext, b'.txt')
+ self.assert_exists()
+
+ def test_invalid_dir_with_name(self):
+ # Write to this dir.
+ self.c['UPLOAD_DIR'] = force_bytes(os.path.join('/', 'tmp', 'notexisting'))
+ self.c['UPLOAD_KEEP_FILENAME'] = True
+ self.c['MAX_MEMORY_FILE_SIZE'] = 5
+
+ # Write.
+ with self.assertRaises(FileError):
+ self.f.write(b'1234567890')
+
+ def test_invalid_dir_no_name(self):
+ # Write to this dir.
+ self.c['UPLOAD_DIR'] = force_bytes(os.path.join('/', 'tmp', 'notexisting'))
+ self.c['UPLOAD_KEEP_FILENAME'] = False
+ self.c['MAX_MEMORY_FILE_SIZE'] = 5
+
+ # Write.
+ with self.assertRaises(FileError):
+ self.f.write(b'1234567890')
+
+ # TODO: test uploading two files with the same name.
+
+
+class TestParseOptionsHeader(unittest.TestCase):
+ def test_simple(self):
+ t, p = parse_options_header('application/json')
+ self.assertEqual(t, b'application/json')
+ self.assertEqual(p, {})
+
+ def test_blank(self):
+ t, p = parse_options_header('')
+ self.assertEqual(t, b'')
+ self.assertEqual(p, {})
+
+ def test_single_param(self):
+ t, p = parse_options_header('application/json;par=val')
+ self.assertEqual(t, b'application/json')
+ self.assertEqual(p, {b'par': b'val'})
+
+ def test_single_param_with_spaces(self):
+ t, p = parse_options_header(b'application/json; par=val')
+ self.assertEqual(t, b'application/json')
+ self.assertEqual(p, {b'par': b'val'})
+
+ def test_multiple_params(self):
+ t, p = parse_options_header(b'application/json;par=val;asdf=foo')
+ self.assertEqual(t, b'application/json')
+ self.assertEqual(p, {b'par': b'val', b'asdf': b'foo'})
+
+ def test_quoted_param(self):
+ t, p = parse_options_header(b'application/json;param="quoted"')
+ self.assertEqual(t, b'application/json')
+ self.assertEqual(p, {b'param': b'quoted'})
+
+ def test_quoted_param_with_semicolon(self):
+ t, p = parse_options_header(b'application/json;param="quoted;with;semicolons"')
+ self.assertEqual(p[b'param'], b'quoted;with;semicolons')
+
+ def test_quoted_param_with_escapes(self):
+ t, p = parse_options_header(b'application/json;param="This \\" is \\" a \\" quote"')
+ self.assertEqual(p[b'param'], b'This " is " a " quote')
+
+ def test_handles_ie6_bug(self):
+ t, p = parse_options_header(b'text/plain; filename="C:\\this\\is\\a\\path\\file.txt"')
+
+ self.assertEqual(p[b'filename'], b'file.txt')
+
+
+class TestBaseParser(unittest.TestCase):
+ def setUp(self):
+ self.b = BaseParser()
+ self.b.callbacks = {}
+
+ def test_callbacks(self):
+ # The stupid list-ness is to get around lack of nonlocal on py2
+ l = [0]
+ def on_foo():
+ l[0] += 1
+
+ self.b.set_callback('foo', on_foo)
+ self.b.callback('foo')
+ self.assertEqual(l[0], 1)
+
+ self.b.set_callback('foo', None)
+ self.b.callback('foo')
+ self.assertEqual(l[0], 1)
+
+
+class TestQuerystringParser(unittest.TestCase):
+ def assert_fields(self, *args, **kwargs):
+ if kwargs.pop('finalize', True):
+ self.p.finalize()
+
+ self.assertEqual(self.f, list(args))
+ if kwargs.get('reset', True):
+ self.f = []
+
+ def setUp(self):
+ self.reset()
+
+ def reset(self):
+ self.f = []
+
+ name_buffer = []
+ data_buffer = []
+
+ def on_field_name(data, start, end):
+ name_buffer.append(data[start:end])
+
+ def on_field_data(data, start, end):
+ data_buffer.append(data[start:end])
+
+ def on_field_end():
+ self.f.append((
+ b''.join(name_buffer),
+ b''.join(data_buffer)
+ ))
+
+ del name_buffer[:]
+ del data_buffer[:]
+
+ callbacks = {
+ 'on_field_name': on_field_name,
+ 'on_field_data': on_field_data,
+ 'on_field_end': on_field_end
+ }
+
+ self.p = QuerystringParser(callbacks)
+
+ def test_simple_querystring(self):
+ self.p.write(b'foo=bar')
+
+ self.assert_fields((b'foo', b'bar'))
+
+ def test_querystring_blank_beginning(self):
+ self.p.write(b'&foo=bar')
+
+ self.assert_fields((b'foo', b'bar'))
+
+ def test_querystring_blank_end(self):
+ self.p.write(b'foo=bar&')
+
+ self.assert_fields((b'foo', b'bar'))
+
+ def test_multiple_querystring(self):
+ self.p.write(b'foo=bar&asdf=baz')
+
+ self.assert_fields(
+ (b'foo', b'bar'),
+ (b'asdf', b'baz')
+ )
+
+ def test_streaming_simple(self):
+ self.p.write(b'foo=bar&')
+ self.assert_fields(
+ (b'foo', b'bar'),
+ finalize=False
+ )
+
+ self.p.write(b'asdf=baz')
+ self.assert_fields(
+ (b'asdf', b'baz')
+ )
+
+ def test_streaming_break(self):
+ self.p.write(b'foo=one')
+ self.assert_fields(finalize=False)
+
+ self.p.write(b'two')
+ self.assert_fields(finalize=False)
+
+ self.p.write(b'three')
+ self.assert_fields(finalize=False)
+
+ self.p.write(b'&asd')
+ self.assert_fields(
+ (b'foo', b'onetwothree'),
+ finalize=False
+ )
+
+ self.p.write(b'f=baz')
+ self.assert_fields(
+ (b'asdf', b'baz')
+ )
+
+ def test_semicolon_separator(self):
+ self.p.write(b'foo=bar;asdf=baz')
+
+ self.assert_fields(
+ (b'foo', b'bar'),
+ (b'asdf', b'baz')
+ )
+
+ def test_too_large_field(self):
+ self.p.max_size = 15
+
+ # Note: len = 8
+ self.p.write(b"foo=bar&")
+ self.assert_fields((b'foo', b'bar'), finalize=False)
+
+ # Note: len = 8, only 7 bytes processed
+ self.p.write(b'a=123456')
+ self.assert_fields((b'a', b'12345'))
+
+ def test_invalid_max_size(self):
+ with self.assertRaises(ValueError):
+ p = QuerystringParser(max_size=-100)
+
+ def test_strict_parsing_pass(self):
+ data = b'foo=bar&another=asdf'
+ for first, last in split_all(data):
+ self.reset()
+ self.p.strict_parsing = True
+
+ print(f"{first!r} / {last!r}")
+
+ self.p.write(first)
+ self.p.write(last)
+ self.assert_fields((b'foo', b'bar'), (b'another', b'asdf'))
+
+ def test_strict_parsing_fail_double_sep(self):
+ data = b'foo=bar&&another=asdf'
+ for first, last in split_all(data):
+ self.reset()
+ self.p.strict_parsing = True
+
+ cnt = 0
+ with self.assertRaises(QuerystringParseError) as cm:
+ cnt += self.p.write(first)
+ cnt += self.p.write(last)
+ self.p.finalize()
+
+ # The offset should occur at 8 bytes into the data (as a whole),
+ # so we calculate the offset into the chunk.
+ if cm is not None:
+ self.assertEqual(cm.exception.offset, 8 - cnt)
+
+ def test_double_sep(self):
+ data = b'foo=bar&&another=asdf'
+ for first, last in split_all(data):
+ print(f" {first!r} / {last!r} ")
+ self.reset()
+
+ cnt = 0
+ cnt += self.p.write(first)
+ cnt += self.p.write(last)
+
+ self.assert_fields((b'foo', b'bar'), (b'another', b'asdf'))
+
+ def test_strict_parsing_fail_no_value(self):
+ self.p.strict_parsing = True
+ with self.assertRaises(QuerystringParseError) as cm:
+ self.p.write(b'foo=bar&blank&another=asdf')
+
+ if cm is not None:
+ self.assertEqual(cm.exception.offset, 8)
+
+ def test_success_no_value(self):
+ self.p.write(b'foo=bar&blank&another=asdf')
+ self.assert_fields(
+ (b'foo', b'bar'),
+ (b'blank', b''),
+ (b'another', b'asdf')
+ )
+
+ def test_repr(self):
+ # Issue #29; verify we don't assert on repr()
+ _ignored = repr(self.p)
+
+
+class TestOctetStreamParser(unittest.TestCase):
+ def setUp(self):
+ self.d = []
+ self.started = 0
+ self.finished = 0
+
+ def on_start():
+ self.started += 1
+
+ def on_data(data, start, end):
+ self.d.append(data[start:end])
+
+ def on_end():
+ self.finished += 1
+
+ callbacks = {
+ 'on_start': on_start,
+ 'on_data': on_data,
+ 'on_end': on_end
+ }
+
+ self.p = OctetStreamParser(callbacks)
+
+ def assert_data(self, data, finalize=True):
+ self.assertEqual(b''.join(self.d), data)
+ self.d = []
+
+ def assert_started(self, val=True):
+ if val:
+ self.assertEqual(self.started, 1)
+ else:
+ self.assertEqual(self.started, 0)
+
+ def assert_finished(self, val=True):
+ if val:
+ self.assertEqual(self.finished, 1)
+ else:
+ self.assertEqual(self.finished, 0)
+
+ def test_simple(self):
+ # Assert is not started
+ self.assert_started(False)
+
+ # Write something, it should then be started + have data
+ self.p.write(b'foobar')
+ self.assert_started()
+ self.assert_data(b'foobar')
+
+ # Finalize, and check
+ self.assert_finished(False)
+ self.p.finalize()
+ self.assert_finished()
+
+ def test_multiple_chunks(self):
+ self.p.write(b'foo')
+ self.p.write(b'bar')
+ self.p.write(b'baz')
+ self.p.finalize()
+
+ self.assert_data(b'foobarbaz')
+ self.assert_finished()
+
+ def test_max_size(self):
+ self.p.max_size = 5
+
+ self.p.write(b'0123456789')
+ self.p.finalize()
+
+ self.assert_data(b'01234')
+ self.assert_finished()
+
+ def test_invalid_max_size(self):
+ with self.assertRaises(ValueError):
+ q = OctetStreamParser(max_size='foo')
+
+
+class TestBase64Decoder(unittest.TestCase):
+ # Note: base64('foobar') == 'Zm9vYmFy'
+ def setUp(self):
+ self.f = BytesIO()
+ self.d = Base64Decoder(self.f)
+
+ def assert_data(self, data, finalize=True):
+ if finalize:
+ self.d.finalize()
+
+ self.f.seek(0)
+ self.assertEqual(self.f.read(), data)
+ self.f.seek(0)
+ self.f.truncate()
+
+ def test_simple(self):
+ self.d.write(b'Zm9vYmFy')
+ self.assert_data(b'foobar')
+
+ def test_bad(self):
+ with self.assertRaises(DecodeError):
+ self.d.write(b'Zm9v!mFy')
+
+ def test_split_properly(self):
+ self.d.write(b'Zm9v')
+ self.d.write(b'YmFy')
+ self.assert_data(b'foobar')
+
+ def test_bad_split(self):
+ buff = b'Zm9v'
+ for i in range(1, 4):
+ first, second = buff[:i], buff[i:]
+
+ self.setUp()
+ self.d.write(first)
+ self.d.write(second)
+ self.assert_data(b'foo')
+
+ def test_long_bad_split(self):
+ buff = b'Zm9vYmFy'
+ for i in range(5, 8):
+ first, second = buff[:i], buff[i:]
+
+ self.setUp()
+ self.d.write(first)
+ self.d.write(second)
+ self.assert_data(b'foobar')
+
+ def test_close_and_finalize(self):
+ parser = Mock()
+ f = Base64Decoder(parser)
+
+ f.finalize()
+ parser.finalize.assert_called_once_with()
+
+ f.close()
+ parser.close.assert_called_once_with()
+
+ def test_bad_length(self):
+ self.d.write(b'Zm9vYmF') # missing ending 'y'
+
+ with self.assertRaises(DecodeError):
+ self.d.finalize()
+
+
+class TestQuotedPrintableDecoder(unittest.TestCase):
+ def setUp(self):
+ self.f = BytesIO()
+ self.d = QuotedPrintableDecoder(self.f)
+
+ def assert_data(self, data, finalize=True):
+ if finalize:
+ self.d.finalize()
+
+ self.f.seek(0)
+ self.assertEqual(self.f.read(), data)
+ self.f.seek(0)
+ self.f.truncate()
+
+ def test_simple(self):
+ self.d.write(b'foobar')
+ self.assert_data(b'foobar')
+
+ def test_with_escape(self):
+ self.d.write(b'foo=3Dbar')
+ self.assert_data(b'foo=bar')
+
+ def test_with_newline_escape(self):
+ self.d.write(b'foo=\r\nbar')
+ self.assert_data(b'foobar')
+
+ def test_with_only_newline_escape(self):
+ self.d.write(b'foo=\nbar')
+ self.assert_data(b'foobar')
+
+ def test_with_split_escape(self):
+ self.d.write(b'foo=3')
+ self.d.write(b'Dbar')
+ self.assert_data(b'foo=bar')
+
+ def test_with_split_newline_escape_1(self):
+ self.d.write(b'foo=\r')
+ self.d.write(b'\nbar')
+ self.assert_data(b'foobar')
+
+ def test_with_split_newline_escape_2(self):
+ self.d.write(b'foo=')
+ self.d.write(b'\r\nbar')
+ self.assert_data(b'foobar')
+
+ def test_close_and_finalize(self):
+ parser = Mock()
+ f = QuotedPrintableDecoder(parser)
+
+ f.finalize()
+ parser.finalize.assert_called_once_with()
+
+ f.close()
+ parser.close.assert_called_once_with()
+
+ def test_not_aligned(self):
+ """
+ https://github.com/andrew-d/python-multipart/issues/6
+ """
+ self.d.write(b'=3AX')
+ self.assert_data(b':X')
+
+ # Additional offset tests
+ self.d.write(b'=3')
+ self.d.write(b'AX')
+ self.assert_data(b':X')
+
+ self.d.write(b'q=3AX')
+ self.assert_data(b'q:X')
+
+
+# Load our list of HTTP test cases.
+http_tests_dir = os.path.join(curr_dir, 'test_data', 'http')
+
+# Read in all test cases and load them.
+NON_PARAMETRIZED_TESTS = {'single_field_blocks'}
+http_tests = []
+for f in os.listdir(http_tests_dir):
+ # Only load the HTTP test cases.
+ fname, ext = os.path.splitext(f)
+ if fname in NON_PARAMETRIZED_TESTS:
+ continue
+
+ if ext == '.http':
+ # Get the YAML file and load it too.
+ yaml_file = os.path.join(http_tests_dir, fname + '.yaml')
+
+ # Load both.
+ with open(os.path.join(http_tests_dir, f), 'rb') as f:
+ test_data = f.read()
+
+ with open(yaml_file, 'rb') as f:
+ yaml_data = yaml.safe_load(f)
+
+ http_tests.append({
+ 'name': fname,
+ 'test': test_data,
+ 'result': yaml_data
+ })
+
+
+def split_all(val):
+ """
+ This function will split an array all possible ways. For example:
+ split_all([1,2,3,4])
+ will give:
+ ([1], [2,3,4]), ([1,2], [3,4]), ([1,2,3], [4])
+ """
+ for i in range(1, len(val) - 1):
+ yield (val[:i], val[i:])
+
+
+@parametrize_class
+class TestFormParser(unittest.TestCase):
+ def make(self, boundary, config={}):
+ self.ended = False
+ self.files = []
+ self.fields = []
+
+ def on_field(f):
+ self.fields.append(f)
+
+ def on_file(f):
+ self.files.append(f)
+
+ def on_end():
+ self.ended = True
+
+ # Get a form-parser instance.
+ self.f = FormParser('multipart/form-data', on_field, on_file, on_end,
+ boundary=boundary, config=config)
+
+ def assert_file_data(self, f, data):
+ o = f.file_object
+ o.seek(0)
+ file_data = o.read()
+ self.assertEqual(file_data, data)
+
+ def assert_file(self, field_name, file_name, data):
+ # Find this file.
+ found = None
+ for f in self.files:
+ if f.field_name == field_name:
+ found = f
+ break
+
+ # Assert that we found it.
+ self.assertIsNotNone(found)
+
+ try:
+ # Assert about this file.
+ self.assert_file_data(found, data)
+ self.assertEqual(found.file_name, file_name)
+
+ # Remove it from our list.
+ self.files.remove(found)
+ finally:
+ # Close our file
+ found.close()
+
+ def assert_field(self, name, value):
+ # Find this field in our fields list.
+ found = None
+ for f in self.fields:
+ if f.field_name == name:
+ found = f
+ break
+
+ # Assert that it exists and matches.
+ self.assertIsNotNone(found)
+ self.assertEqual(value, found.value)
+
+ # Remove it for future iterations.
+ self.fields.remove(found)
+
+ @parametrize('param', http_tests)
+ def test_http(self, param):
+ # Firstly, create our parser with the given boundary.
+ boundary = param['result']['boundary']
+ if isinstance(boundary, str):
+ boundary = boundary.encode('latin-1')
+ self.make(boundary)
+
+ # Now, we feed the parser with data.
+ exc = None
+ try:
+ processed = self.f.write(param['test'])
+ self.f.finalize()
+ except MultipartParseError as e:
+ processed = 0
+ exc = e
+
+ # print(repr(param))
+ # print("")
+ # print(repr(self.fields))
+ # print(repr(self.files))
+
+ # Do we expect an error?
+ if 'error' in param['result']['expected']:
+ self.assertIsNotNone(exc)
+ self.assertEqual(param['result']['expected']['error'], exc.offset)
+ return
+
+ # No error!
+ self.assertEqual(processed, len(param['test']))
+
+ # Assert that the parser gave us the appropriate fields/files.
+ for e in param['result']['expected']:
+ # Get our type and name.
+ type = e['type']
+ name = e['name'].encode('latin-1')
+
+ if type == 'field':
+ self.assert_field(name, e['data'])
+
+ elif type == 'file':
+ self.assert_file(
+ name,
+ e['file_name'].encode('latin-1'),
+ e['data']
+ )
+
+ else:
+ assert False
+
+ def test_random_splitting(self):
+ """
+ This test runs a simple multipart body with one field and one file
+ through every possible split.
+ """
+ # Load test data.
+ test_file = 'single_field_single_file.http'
+ with open(os.path.join(http_tests_dir, test_file), 'rb') as f:
+ test_data = f.read()
+
+ # We split the file through all cases.
+ for first, last in split_all(test_data):
+ # Create form parser.
+ self.make('boundary')
+
+ # Feed with data in 2 chunks.
+ i = 0
+ i += self.f.write(first)
+ i += self.f.write(last)
+ self.f.finalize()
+
+ # Assert we processed everything.
+ self.assertEqual(i, len(test_data))
+
+ # Assert that our file and field are here.
+ self.assert_field(b'field', b'test1')
+ self.assert_file(b'file', b'file.txt', b'test2')
+
+ def test_feed_single_bytes(self):
+ """
+ This test parses a simple multipart body 1 byte at a time.
+ """
+ # Load test data.
+ test_file = 'single_field_single_file.http'
+ with open(os.path.join(http_tests_dir, test_file), 'rb') as f:
+ test_data = f.read()
+
+ # Create form parser.
+ self.make('boundary')
+
+ # Write all bytes.
+ # NOTE: Can't simply do `for b in test_data`, since that gives
+ # an integer when iterating over a bytes object on Python 3.
+ i = 0
+ for x in range(len(test_data)):
+ b = test_data[x:x + 1]
+ i += self.f.write(b)
+
+ self.f.finalize()
+
+ # Assert we processed everything.
+ self.assertEqual(i, len(test_data))
+
+ # Assert that our file and field are here.
+ self.assert_field(b'field', b'test1')
+ self.assert_file(b'file', b'file.txt', b'test2')
+
+ def test_feed_blocks(self):
+ """
+ This test parses a simple multipart body 1 byte at a time.
+ """
+ # Load test data.
+ test_file = 'single_field_blocks.http'
+ with open(os.path.join(http_tests_dir, test_file), 'rb') as f:
+ test_data = f.read()
+
+ for c in range(1, len(test_data) + 1):
+ # Skip first `d` bytes - not interesting
+ for d in range(c):
+
+ # Create form parser.
+ self.make('boundary')
+ # Skip
+ i = 0
+ self.f.write(test_data[:d])
+ i += d
+ for x in range(d, len(test_data), c):
+ # Write a chunk to achieve condition
+ # `i == data_length - 1`
+ # in boundary search loop (multipatr.py:1302)
+ b = test_data[x:x + c]
+ i += self.f.write(b)
+
+ self.f.finalize()
+
+ # Assert we processed everything.
+ self.assertEqual(i, len(test_data))
+
+ # Assert that our field is here.
+ self.assert_field(b'field',
+ b'0123456789ABCDEFGHIJ0123456789ABCDEFGHIJ')
+
+ @slow_test
+ def test_request_body_fuzz(self):
+ """
+ This test randomly fuzzes the request body to ensure that no strange
+ exceptions are raised and we don't end up in a strange state. The
+ fuzzing consists of randomly doing one of the following:
+ - Adding a random byte at a random offset
+ - Randomly deleting a single byte
+ - Randomly swapping two bytes
+ """
+ # Load test data.
+ test_file = 'single_field_single_file.http'
+ with open(os.path.join(http_tests_dir, test_file), 'rb') as f:
+ test_data = f.read()
+
+ iterations = 1000
+ successes = 0
+ failures = 0
+ exceptions = 0
+
+ print("Running %d iterations of fuzz testing:" % (iterations,))
+ for i in range(iterations):
+ # Create a bytearray to mutate.
+ fuzz_data = bytearray(test_data)
+
+ # Pick what we're supposed to do.
+ choice = random.choice([1, 2, 3])
+ if choice == 1:
+ # Add a random byte.
+ i = random.randrange(len(test_data))
+ b = random.randrange(256)
+
+ fuzz_data.insert(i, b)
+ msg = "Inserting byte %r at offset %d" % (b, i)
+
+ elif choice == 2:
+ # Remove a random byte.
+ i = random.randrange(len(test_data))
+ del fuzz_data[i]
+
+ msg = "Deleting byte at offset %d" % (i,)
+
+ elif choice == 3:
+ # Swap two bytes.
+ i = random.randrange(len(test_data) - 1)
+ fuzz_data[i], fuzz_data[i + 1] = fuzz_data[i + 1], fuzz_data[i]
+
+ msg = "Swapping bytes %d and %d" % (i, i + 1)
+
+ # Print message, so if this crashes, we can inspect the output.
+ print(" " + msg)
+
+ # Create form parser.
+ self.make('boundary')
+
+ # Feed with data, and ignore form parser exceptions.
+ i = 0
+ try:
+ i = self.f.write(bytes(fuzz_data))
+ self.f.finalize()
+ except FormParserError:
+ exceptions += 1
+ else:
+ if i == len(fuzz_data):
+ successes += 1
+ else:
+ failures += 1
+
+ print("--------------------------------------------------")
+ print("Successes: %d" % (successes,))
+ print("Failures: %d" % (failures,))
+ print("Exceptions: %d" % (exceptions,))
+
+ @slow_test
+ def test_request_body_fuzz_random_data(self):
+ """
+ This test will fuzz the multipart parser with some number of iterations
+ of randomly-generated data.
+ """
+ iterations = 1000
+ successes = 0
+ failures = 0
+ exceptions = 0
+
+ print("Running %d iterations of fuzz testing:" % (iterations,))
+ for i in range(iterations):
+ data_size = random.randrange(100, 4096)
+ data = os.urandom(data_size)
+ print(" Testing with %d random bytes..." % (data_size,))
+
+ # Create form parser.
+ self.make('boundary')
+
+ # Feed with data, and ignore form parser exceptions.
+ i = 0
+ try:
+ i = self.f.write(bytes(data))
+ self.f.finalize()
+ except FormParserError:
+ exceptions += 1
+ else:
+ if i == len(data):
+ successes += 1
+ else:
+ failures += 1
+
+ print("--------------------------------------------------")
+ print("Successes: %d" % (successes,))
+ print("Failures: %d" % (failures,))
+ print("Exceptions: %d" % (exceptions,))
+
+ def test_bad_start_boundary(self):
+ self.make('boundary')
+ data = b'--boundary\rfoobar'
+ with self.assertRaises(MultipartParseError):
+ self.f.write(data)
+
+ self.make('boundary')
+ data = b'--boundaryfoobar'
+ with self.assertRaises(MultipartParseError):
+ i = self.f.write(data)
+
+ def test_octet_stream(self):
+ files = []
+ def on_file(f):
+ files.append(f)
+ on_field = Mock()
+ on_end = Mock()
+
+ f = FormParser('application/octet-stream', on_field, on_file, on_end=on_end, file_name=b'foo.txt')
+ self.assertTrue(isinstance(f.parser, OctetStreamParser))
+
+ f.write(b'test')
+ f.write(b'1234')
+ f.finalize()
+
+ # Assert that we only received a single file, with the right data, and that we're done.
+ self.assertFalse(on_field.called)
+ self.assertEqual(len(files), 1)
+ self.assert_file_data(files[0], b'test1234')
+ self.assertTrue(on_end.called)
+
+ def test_querystring(self):
+ fields = []
+ def on_field(f):
+ fields.append(f)
+ on_file = Mock()
+ on_end = Mock()
+
+ def simple_test(f):
+ # Reset tracking.
+ del fields[:]
+ on_file.reset_mock()
+ on_end.reset_mock()
+
+ # Write test data.
+ f.write(b'foo=bar')
+ f.write(b'&test=asdf')
+ f.finalize()
+
+ # Assert we only received 2 fields...
+ self.assertFalse(on_file.called)
+ self.assertEqual(len(fields), 2)
+
+ # ...assert that we have the correct data...
+ self.assertEqual(fields[0].field_name, b'foo')
+ self.assertEqual(fields[0].value, b'bar')
+
+ self.assertEqual(fields[1].field_name, b'test')
+ self.assertEqual(fields[1].value, b'asdf')
+
+ # ... and assert that we've finished.
+ self.assertTrue(on_end.called)
+
+ f = FormParser('application/x-www-form-urlencoded', on_field, on_file, on_end=on_end)
+ self.assertTrue(isinstance(f.parser, QuerystringParser))
+ simple_test(f)
+
+ f = FormParser('application/x-url-encoded', on_field, on_file, on_end=on_end)
+ self.assertTrue(isinstance(f.parser, QuerystringParser))
+ simple_test(f)
+
+ def test_close_methods(self):
+ parser = Mock()
+ f = FormParser('application/x-url-encoded', None, None)
+ f.parser = parser
+
+ f.finalize()
+ parser.finalize.assert_called_once_with()
+
+ f.close()
+ parser.close.assert_called_once_with()
+
+ def test_bad_content_type(self):
+ # We should raise a ValueError for a bad Content-Type
+ with self.assertRaises(ValueError):
+ f = FormParser('application/bad', None, None)
+
+ def test_no_boundary_given(self):
+ # We should raise a FormParserError when parsing a multipart message
+ # without a boundary.
+ with self.assertRaises(FormParserError):
+ f = FormParser('multipart/form-data', None, None)
+
+ def test_bad_content_transfer_encoding(self):
+ data = b'----boundary\r\nContent-Disposition: form-data; name="file"; filename="test.txt"\r\nContent-Type: text/plain\r\nContent-Transfer-Encoding: badstuff\r\n\r\nTest\r\n----boundary--\r\n'
+
+ files = []
+ def on_file(f):
+ files.append(f)
+ on_field = Mock()
+ on_end = Mock()
+
+ # Test with erroring.
+ config = {'UPLOAD_ERROR_ON_BAD_CTE': True}
+ f = FormParser('multipart/form-data', on_field, on_file,
+ on_end=on_end, boundary='--boundary', config=config)
+
+ with self.assertRaises(FormParserError):
+ f.write(data)
+ f.finalize()
+
+ # Test without erroring.
+ config = {'UPLOAD_ERROR_ON_BAD_CTE': False}
+ f = FormParser('multipart/form-data', on_field, on_file,
+ on_end=on_end, boundary='--boundary', config=config)
+
+ f.write(data)
+ f.finalize()
+ self.assert_file_data(files[0], b'Test')
+
+ def test_handles_None_fields(self):
+ fields = []
+ def on_field(f):
+ fields.append(f)
+ on_file = Mock()
+ on_end = Mock()
+
+ f = FormParser('application/x-www-form-urlencoded', on_field, on_file, on_end=on_end)
+ f.write(b'foo=bar&another&baz=asdf')
+ f.finalize()
+
+ self.assertEqual(fields[0].field_name, b'foo')
+ self.assertEqual(fields[0].value, b'bar')
+
+ self.assertEqual(fields[1].field_name, b'another')
+ self.assertEqual(fields[1].value, None)
+
+ self.assertEqual(fields[2].field_name, b'baz')
+ self.assertEqual(fields[2].value, b'asdf')
+
+ def test_max_size_multipart(self):
+ # Load test data.
+ test_file = 'single_field_single_file.http'
+ with open(os.path.join(http_tests_dir, test_file), 'rb') as f:
+ test_data = f.read()
+
+ # Create form parser.
+ self.make('boundary')
+
+ # Set the maximum length that we can process to be halfway through the
+ # given data.
+ self.f.parser.max_size = len(test_data) / 2
+
+ i = self.f.write(test_data)
+ self.f.finalize()
+
+ # Assert we processed the correct amount.
+ self.assertEqual(i, len(test_data) / 2)
+
+ def test_max_size_form_parser(self):
+ # Load test data.
+ test_file = 'single_field_single_file.http'
+ with open(os.path.join(http_tests_dir, test_file), 'rb') as f:
+ test_data = f.read()
+
+ # Create form parser setting the maximum length that we can process to
+ # be halfway through the given data.
+ size = len(test_data) / 2
+ self.make('boundary', config={'MAX_BODY_SIZE': size})
+
+ i = self.f.write(test_data)
+ self.f.finalize()
+
+ # Assert we processed the correct amount.
+ self.assertEqual(i, len(test_data) / 2)
+
+ def test_octet_stream_max_size(self):
+ files = []
+ def on_file(f):
+ files.append(f)
+ on_field = Mock()
+ on_end = Mock()
+
+ f = FormParser('application/octet-stream', on_field, on_file,
+ on_end=on_end, file_name=b'foo.txt',
+ config={'MAX_BODY_SIZE': 10})
+
+ f.write(b'0123456789012345689')
+ f.finalize()
+
+ self.assert_file_data(files[0], b'0123456789')
+
+ def test_invalid_max_size_multipart(self):
+ with self.assertRaises(ValueError):
+ q = MultipartParser(b'bound', max_size='foo')
+
+
+class TestHelperFunctions(unittest.TestCase):
+ def test_create_form_parser(self):
+ r = create_form_parser({'Content-Type': 'application/octet-stream'},
+ None, None)
+ self.assertTrue(isinstance(r, FormParser))
+
+ def test_create_form_parser_error(self):
+ headers = {}
+ with self.assertRaises(ValueError):
+ create_form_parser(headers, None, None)
+
+ def test_parse_form(self):
+ on_field = Mock()
+ on_file = Mock()
+
+ parse_form(
+ {'Content-Type': 'application/octet-stream',
+ },
+ BytesIO(b'123456789012345'),
+ on_field,
+ on_file
+ )
+
+ assert on_file.call_count == 1
+
+ # Assert that the first argument of the call (a File object) has size
+ # 15 - i.e. all data is written.
+ self.assertEqual(on_file.call_args[0][0].size, 15)
+
+ def test_parse_form_content_length(self):
+ files = []
+ def on_file(file):
+ files.append(file)
+
+ parse_form(
+ {'Content-Type': 'application/octet-stream',
+ 'Content-Length': '10'
+ },
+ BytesIO(b'123456789012345'),
+ None,
+ on_file
+ )
+
+ self.assertEqual(len(files), 1)
+ self.assertEqual(files[0].size, 10)
+
+
+
+def suite():
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestFile))
+ suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestParseOptionsHeader))
+ suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestBaseParser))
+ suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestQuerystringParser))
+ suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestOctetStreamParser))
+ suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestBase64Decoder))
+ suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestQuotedPrintableDecoder))
+ suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestFormParser))
+ suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestHelperFunctions))
+
+ return suite
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/INSTALLER b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/INSTALLER
new file mode 100644
index 00000000..a1b589e3
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/METADATA b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/METADATA
new file mode 100644
index 00000000..916367c7
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/METADATA
@@ -0,0 +1,69 @@
+Metadata-Version: 2.1
+Name: python-multipart
+Version: 0.0.6
+Summary: A streaming multipart parser for Python
+Project-URL: Homepage, https://github.com/andrew-d/python-multipart
+Project-URL: Documentation, https://andrew-d.github.io/python-multipart/
+Project-URL: Changelog, https://github.com/andrew-d/python-multipart/tags
+Project-URL: Source, https://github.com/andrew-d/python-multipart
+Author-email: Andrew Dunham
+License-Expression: Apache-2.0
+License-File: LICENSE.txt
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.7
+Provides-Extra: dev
+Requires-Dist: atomicwrites==1.2.1; extra == 'dev'
+Requires-Dist: attrs==19.2.0; extra == 'dev'
+Requires-Dist: coverage==6.5.0; extra == 'dev'
+Requires-Dist: hatch; extra == 'dev'
+Requires-Dist: invoke==1.7.3; extra == 'dev'
+Requires-Dist: more-itertools==4.3.0; extra == 'dev'
+Requires-Dist: pbr==4.3.0; extra == 'dev'
+Requires-Dist: pluggy==1.0.0; extra == 'dev'
+Requires-Dist: py==1.11.0; extra == 'dev'
+Requires-Dist: pytest-cov==4.0.0; extra == 'dev'
+Requires-Dist: pytest-timeout==2.1.0; extra == 'dev'
+Requires-Dist: pytest==7.2.0; extra == 'dev'
+Requires-Dist: pyyaml==5.1; extra == 'dev'
+Description-Content-Type: text/x-rst
+
+==================
+ Python-Multipart
+==================
+
+.. image:: https://github.com/andrew-d/python-multipart/actions/workflows/test.yaml/badge.svg
+ :target: https://github.com/andrew-d/python-multipart/actions
+
+
+python-multipart is an Apache2 licensed streaming multipart parser for Python.
+Test coverage is currently 100%.
+Documentation is available `here`_.
+
+.. _here: https://andrew-d.github.io/python-multipart/
+
+Why?
+----
+
+Because streaming uploads are awesome for large files.
+
+How to Test
+-----------
+
+If you want to test:
+
+.. code-block:: bash
+
+ $ pip install .[dev]
+ $ inv test
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/RECORD b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/RECORD
new file mode 100644
index 00000000..b414492d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/RECORD
@@ -0,0 +1,62 @@
+multipart/__init__.py,sha256=EaZd7hXXXNz5RWfzZ4lr-wKWXC4anMNWE7u4tPXtWr0,335
+multipart/__pycache__/__init__.cpython-37.pyc,,
+multipart/__pycache__/decoders.cpython-37.pyc,,
+multipart/__pycache__/exceptions.cpython-37.pyc,,
+multipart/__pycache__/multipart.cpython-37.pyc,,
+multipart/decoders.py,sha256=6LeCVARmDrQgmMsaul1WUIf79Q-mLE9swhGxumQe_98,6107
+multipart/exceptions.py,sha256=yDZ9pqq3Y9ZMCvj2TkAvOcNdMjFHjLnHl4luFnzt750,1410
+multipart/multipart.py,sha256=ZRc1beZCgCIXkYe0Xwxh_g4nFdrp3eEid4XODYIfqgQ,71230
+multipart/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+multipart/tests/__pycache__/__init__.cpython-37.pyc,,
+multipart/tests/__pycache__/compat.cpython-37.pyc,,
+multipart/tests/__pycache__/test_multipart.cpython-37.pyc,,
+multipart/tests/compat.py,sha256=3aowcimO1SYU6WqS3GlUJ3jmkgLH63e8AsUPjlta1xU,4266
+multipart/tests/test_data/http/CR_in_header.http,sha256=XEimN_BgEqQXCqK463bMgD9PKIQeLrQhWt2M3vNr9cE,149
+multipart/tests/test_data/http/CR_in_header.yaml,sha256=OEzE2PqK78fi9kjM23YOu4xM0zQ_LRwSiwqFNAmku50,73
+multipart/tests/test_data/http/CR_in_header_value.http,sha256=pf4sP-l4_hzZ8Kr51gUE6CFcCifuWSZ10-vnx6mtXDg,149
+multipart/tests/test_data/http/CR_in_header_value.yaml,sha256=WjqJNYL-cUH2n9k-Xdy1YDvSfDqqXxsiinBDn3HTUu4,73
+multipart/tests/test_data/http/almost_match_boundary.http,sha256=jIsp1M6BHQIHF9o965z3Pt8TFncVvaBj5N43hprRpBM,264
+multipart/tests/test_data/http/almost_match_boundary.yaml,sha256=Hr7WZBwZrbf4vjurjRzGGeY9tFVJLRRmV1rEFXop-6s,300
+multipart/tests/test_data/http/almost_match_boundary_without_CR.http,sha256=KviMqo_FUy1N1-b-YUfyWhs5PmN6_fU7qhMYFTGnUhI,132
+multipart/tests/test_data/http/almost_match_boundary_without_CR.yaml,sha256=HjlUni-nuX3bG2-3FILo4GLBpLD4DImQ48VPlfnfIWY,167
+multipart/tests/test_data/http/almost_match_boundary_without_LF.http,sha256=KylmJ0O-RfnUnXbjVhwJpzHsWqNTPJn29_wfsvrG7AM,133
+multipart/tests/test_data/http/almost_match_boundary_without_LF.yaml,sha256=tkzz_kOFZtkarmMnTen355nm8McPwbmPmWGMxUUBSzU,171
+multipart/tests/test_data/http/almost_match_boundary_without_final_hyphen.http,sha256=L6bzRistD4X5TTd1zBtfR6gM4EQL77_iBI_Pgaw4ufw,133
+multipart/tests/test_data/http/almost_match_boundary_without_final_hyphen.yaml,sha256=cFKxwFMYTo9PKRb04Iai__mY9KG29IPkSm3p80DgEZw,171
+multipart/tests/test_data/http/bad_end_of_headers.http,sha256=ucEDylTCg1_hdEVkIc-1k8ZQ-CBIf5uXfDKbSBsSaF0,149
+multipart/tests/test_data/http/bad_end_of_headers.yaml,sha256=1UHERY2D7tp0HEUl5xD4SiotP2skETmBOF5EjcG2HTw,73
+multipart/tests/test_data/http/bad_header_char.http,sha256=zTqXFNQ9yrbc82vubPg95T4edg1Ueh2xadlVD2lO51A,149
+multipart/tests/test_data/http/bad_header_char.yaml,sha256=9ykVsASnvYvX51qtkCJqhgegeN-hoSU40MsYQvqeVNo,73
+multipart/tests/test_data/http/bad_initial_boundary.http,sha256=IGFSkpmw21XfAXr0xOHwj0vnhxyj-uCWVjcljo68LLo,149
+multipart/tests/test_data/http/bad_initial_boundary.yaml,sha256=eBSbue0BYDYhYtKdBCnm1LGq0O_fOMwV6ZoLpZFDFM4,72
+multipart/tests/test_data/http/base64_encoding.http,sha256=fDbr4BgLdNS8kYiTO7g4HxB81hvmiD2sRUCAoijfRx0,173
+multipart/tests/test_data/http/base64_encoding.yaml,sha256=cz2KxZxoi81MiXRh7DmJQOWcdqQH5ahkrJydGYv4hpU,125
+multipart/tests/test_data/http/empty_header.http,sha256=-wSHHSLu1D2wfdC8Zcaw5TX_USTvWz56CANpsceOZYQ,130
+multipart/tests/test_data/http/empty_header.yaml,sha256=4xdVCYJ-l88HMXkMLNkSQoLNgURoGcKzR1AclPLpkOc,73
+multipart/tests/test_data/http/multiple_fields.http,sha256=6p93ls_B7bk8mXPYhsrFwvktSX8CuRdUH4vn-EZBaRM,242
+multipart/tests/test_data/http/multiple_fields.yaml,sha256=mePM5DVfAzty7QNEEyMu2qrFI28TbG9yWRvWFpWj7Jo,197
+multipart/tests/test_data/http/multiple_files.http,sha256=EtmagVBVpsFGnCqlwfKgswQfU8lGa3QNkP6GVJBa5A0,348
+multipart/tests/test_data/http/multiple_files.yaml,sha256=QO9JMgTvkL2EmIWAl8LcbDrkfNmDk0eA5SOk3gFuFWE,260
+multipart/tests/test_data/http/quoted_printable_encoding.http,sha256=--yYceg17SmqIJsazw-SFChdxeTAq8zV4lzPVM_QMrM,180
+multipart/tests/test_data/http/quoted_printable_encoding.yaml,sha256=G_L6lnP-e4uHfGpYQFopxDdpbd_EbxL2oY8N910BTOI,127
+multipart/tests/test_data/http/single_field.http,sha256=JjdSwFiM0mG07HYzBCcjzeqgqAA9glx-VcRUjkOh8cA,149
+multipart/tests/test_data/http/single_field.yaml,sha256=HMXd14-m9sKBvTsnzWOaG12_3wve5SoXeUISF93wlRc,139
+multipart/tests/test_data/http/single_field_blocks.http,sha256=4laZAIbFmxERZtgPWzuOihvEhLWD1NGTSdqZ6Ra58Ns,115
+multipart/tests/test_data/http/single_field_blocks.yaml,sha256=6mKvHtmiXh6OxoibJsx5pUreIMyQyPb_DWy7GEG9BX8,147
+multipart/tests/test_data/http/single_field_longer.http,sha256=BTBt1MsUaxuHauu-mljb3lU-8Z2dpjRN_lkZW4pkDXA,262
+multipart/tests/test_data/http/single_field_longer.yaml,sha256=aENhQPtHaTPIvgJbdiDHvcOtcthEEUHCQIEfLj0aalY,293
+multipart/tests/test_data/http/single_field_single_file.http,sha256=G4dV0iCSjvEk5DSJ1VXWy6R8Hon3-WOExep41nPWVeQ,192
+multipart/tests/test_data/http/single_field_single_file.yaml,sha256=QO9gqdXQsoizLji9r8kdlPWHJB5vO7wszqP1fHvsNV8,189
+multipart/tests/test_data/http/single_field_with_leading_newlines.http,sha256=YfNEUdZxbi4bBGTU4T4WSQZ6QJDJlcLZUczYzGU5Jaw,153
+multipart/tests/test_data/http/single_field_with_leading_newlines.yaml,sha256=HMXd14-m9sKBvTsnzWOaG12_3wve5SoXeUISF93wlRc,139
+multipart/tests/test_data/http/single_file.http,sha256=axRB0Keb4uhAfHxt7Na1x9-PQHCiiKK8s38a2GG860E,202
+multipart/tests/test_data/http/single_file.yaml,sha256=eUKyGkNTDrXdGni4EyEDbxDBTfAKsstVQ5O5SWghYTc,170
+multipart/tests/test_data/http/utf8_filename.http,sha256=w_Ryf4hC_KJo7v-a18dJFECqm21nzA5Z18dsGyu6zjA,208
+multipart/tests/test_data/http/utf8_filename.yaml,sha256=KpDc4e-yYp_JUXa-S5lp591tzoEybgywtGian0kQFPc,177
+multipart/tests/test_multipart.py,sha256=VrxoOtXO4NWpT1OJqo7FWWIybnxGReumIWCR-FDIHCk,38988
+python_multipart-0.0.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+python_multipart-0.0.6.dist-info/METADATA,sha256=J4WQf99XHSSg_EDG7fGgJGotS_Hp7ViCtpY4rQ2OgyM,2459
+python_multipart-0.0.6.dist-info/RECORD,,
+python_multipart-0.0.6.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+python_multipart-0.0.6.dist-info/WHEEL,sha256=Fd6mP6ydyRguakwUJ05oBE7fh2IPxgtDN9IwHJ9OqJQ,87
+python_multipart-0.0.6.dist-info/licenses/LICENSE.txt,sha256=qOgzF2zWF9rwC51tOfoVyo7evG0WQwec0vSJPAwom-I,556
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/REQUESTED b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/REQUESTED
new file mode 100644
index 00000000..e69de29b
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/WHEEL b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/WHEEL
new file mode 100644
index 00000000..9d727675
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.13.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/licenses/LICENSE.txt b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/licenses/LICENSE.txt
new file mode 100644
index 00000000..303a1bf5
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/site-packages/python_multipart-0.0.6.dist-info/licenses/LICENSE.txt
@@ -0,0 +1,14 @@
+Copyright 2012, Andrew Dunham
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
diff --git a/Sources/setup.py b/Sources/setup.py
index 66b1d6cc..fca6d516 100755
--- a/Sources/setup.py
+++ b/Sources/setup.py
@@ -67,7 +67,8 @@ setup(name='pyOpenRPA',
'Jinja2>=2.2.11.2',
'selenium>=3.141.0',
'fastapi>=0.81.0',
- 'uvicorn>=0.18.3'
+ 'uvicorn>=0.18.3',
+ 'python-multipart>=0.0.6'
],
extras_require={
':sys_platform == "win32"': [