.
+ if isinstance(days, float):
+ dayfrac, days = _math.modf(days)
+ daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
+ assert daysecondswhole == int(daysecondswhole) # can't overflow
+ s = int(daysecondswhole)
+ assert days == int(days)
+ d = int(days)
+ else:
+ daysecondsfrac = 0.0
+ d = days
+ assert isinstance(daysecondsfrac, float)
+ assert abs(daysecondsfrac) <= 1.0
+ assert isinstance(d, int)
+ assert abs(s) <= 24 * 3600
+ # days isn't referenced again before redefinition
+
+ if isinstance(seconds, float):
+ secondsfrac, seconds = _math.modf(seconds)
+ assert seconds == int(seconds)
+ seconds = int(seconds)
+ secondsfrac += daysecondsfrac
+ assert abs(secondsfrac) <= 2.0
+ else:
+ secondsfrac = daysecondsfrac
+ # daysecondsfrac isn't referenced again
+ assert isinstance(secondsfrac, float)
+ assert abs(secondsfrac) <= 2.0
+
+ assert isinstance(seconds, int)
+ days, seconds = divmod(seconds, 24*3600)
+ d += days
+ s += int(seconds) # can't overflow
+ assert isinstance(s, int)
+ assert abs(s) <= 2 * 24 * 3600
+ # seconds isn't referenced again before redefinition
+
+ usdouble = secondsfrac * 1e6
+ assert abs(usdouble) < 2.1e6 # exact value not critical
+ # secondsfrac isn't referenced again
+
+ if isinstance(microseconds, float):
+ microseconds = round(microseconds + usdouble)
+ seconds, microseconds = divmod(microseconds, 1000000)
+ days, seconds = divmod(seconds, 24*3600)
+ d += days
+ s += seconds
+ else:
+ microseconds = int(microseconds)
+ seconds, microseconds = divmod(microseconds, 1000000)
+ days, seconds = divmod(seconds, 24*3600)
+ d += days
+ s += seconds
+ microseconds = round(microseconds + usdouble)
+ assert isinstance(s, int)
+ assert isinstance(microseconds, int)
+ assert abs(s) <= 3 * 24 * 3600
+ assert abs(microseconds) < 3.1e6
+
+ # Just a little bit of carrying possible for microseconds and seconds.
+ seconds, us = divmod(microseconds, 1000000)
+ s += seconds
+ days, s = divmod(s, 24*3600)
+ d += days
+
+ assert isinstance(d, int)
+ assert isinstance(s, int) and 0 <= s < 24*3600
+ assert isinstance(us, int) and 0 <= us < 1000000
+
+ if abs(d) > 999999999:
+ raise OverflowError("timedelta # of days is too large: %d" % d)
+
+ self = object.__new__(cls)
+ self._days = d
+ self._seconds = s
+ self._microseconds = us
+ self._hashcode = -1
+ return self
+
+ def __repr__(self):
+ args = []
+ if self._days:
+ args.append("days=%d" % self._days)
+ if self._seconds:
+ args.append("seconds=%d" % self._seconds)
+ if self._microseconds:
+ args.append("microseconds=%d" % self._microseconds)
+ if not args:
+ args.append('0')
+ return "%s.%s(%s)" % (self.__class__.__module__,
+ self.__class__.__qualname__,
+ ', '.join(args))
+
+ def __str__(self):
+ mm, ss = divmod(self._seconds, 60)
+ hh, mm = divmod(mm, 60)
+ s = "%d:%02d:%02d" % (hh, mm, ss)
+ if self._days:
+ def plural(n):
+ return n, abs(n) != 1 and "s" or ""
+ s = ("%d day%s, " % plural(self._days)) + s
+ if self._microseconds:
+ s = s + ".%06d" % self._microseconds
+ return s
+
+ def total_seconds(self):
+ """Total seconds in the duration."""
+ return ((self.days * 86400 + self.seconds) * 10**6 +
+ self.microseconds) / 10**6
+
+ # Read-only field accessors
+ @property
+ def days(self):
+ """days"""
+ return self._days
+
+ @property
+ def seconds(self):
+ """seconds"""
+ return self._seconds
+
+ @property
+ def microseconds(self):
+ """microseconds"""
+ return self._microseconds
+
+ def __add__(self, other):
+ if isinstance(other, timedelta):
+ # for CPython compatibility, we cannot use
+ # our __class__ here, but need a real timedelta
+ return timedelta(self._days + other._days,
+ self._seconds + other._seconds,
+ self._microseconds + other._microseconds)
+ return NotImplemented
+
+ __radd__ = __add__
+
+ def __sub__(self, other):
+ if isinstance(other, timedelta):
+ # for CPython compatibility, we cannot use
+ # our __class__ here, but need a real timedelta
+ return timedelta(self._days - other._days,
+ self._seconds - other._seconds,
+ self._microseconds - other._microseconds)
+ return NotImplemented
+
+ def __rsub__(self, other):
+ if isinstance(other, timedelta):
+ return -self + other
+ return NotImplemented
+
+ def __neg__(self):
+ # for CPython compatibility, we cannot use
+ # our __class__ here, but need a real timedelta
+ return timedelta(-self._days,
+ -self._seconds,
+ -self._microseconds)
+
+ def __pos__(self):
+ return self
+
+ def __abs__(self):
+ if self._days < 0:
+ return -self
+ else:
+ return self
+
+ def __mul__(self, other):
+ if isinstance(other, int):
+ # for CPython compatibility, we cannot use
+ # our __class__ here, but need a real timedelta
+ return timedelta(self._days * other,
+ self._seconds * other,
+ self._microseconds * other)
+ if isinstance(other, float):
+ usec = self._to_microseconds()
+ a, b = other.as_integer_ratio()
+ return timedelta(0, 0, _divide_and_round(usec * a, b))
+ return NotImplemented
+
+ __rmul__ = __mul__
+
+ def _to_microseconds(self):
+ return ((self._days * (24*3600) + self._seconds) * 1000000 +
+ self._microseconds)
+
+ def __floordiv__(self, other):
+ if not isinstance(other, (int, timedelta)):
+ return NotImplemented
+ usec = self._to_microseconds()
+ if isinstance(other, timedelta):
+ return usec // other._to_microseconds()
+ if isinstance(other, int):
+ return timedelta(0, 0, usec // other)
+
+ def __truediv__(self, other):
+ if not isinstance(other, (int, float, timedelta)):
+ return NotImplemented
+ usec = self._to_microseconds()
+ if isinstance(other, timedelta):
+ return usec / other._to_microseconds()
+ if isinstance(other, int):
+ return timedelta(0, 0, _divide_and_round(usec, other))
+ if isinstance(other, float):
+ a, b = other.as_integer_ratio()
+ return timedelta(0, 0, _divide_and_round(b * usec, a))
+
+ def __mod__(self, other):
+ if isinstance(other, timedelta):
+ r = self._to_microseconds() % other._to_microseconds()
+ return timedelta(0, 0, r)
+ return NotImplemented
+
+ def __divmod__(self, other):
+ if isinstance(other, timedelta):
+ q, r = divmod(self._to_microseconds(),
+ other._to_microseconds())
+ return q, timedelta(0, 0, r)
+ return NotImplemented
+
+ # Comparisons of timedelta objects with other.
+
+ def __eq__(self, other):
+ if isinstance(other, timedelta):
+ return self._cmp(other) == 0
+ else:
+ return False
+
+ def __le__(self, other):
+ if isinstance(other, timedelta):
+ return self._cmp(other) <= 0
+ else:
+ _cmperror(self, other)
+
+ def __lt__(self, other):
+ if isinstance(other, timedelta):
+ return self._cmp(other) < 0
+ else:
+ _cmperror(self, other)
+
+ def __ge__(self, other):
+ if isinstance(other, timedelta):
+ return self._cmp(other) >= 0
+ else:
+ _cmperror(self, other)
+
+ def __gt__(self, other):
+ if isinstance(other, timedelta):
+ return self._cmp(other) > 0
+ else:
+ _cmperror(self, other)
+
+ def _cmp(self, other):
+ assert isinstance(other, timedelta)
+ return _cmp(self._getstate(), other._getstate())
+
+ def __hash__(self):
+ if self._hashcode == -1:
+ self._hashcode = hash(self._getstate())
+ return self._hashcode
+
+ def __bool__(self):
+ return (self._days != 0 or
+ self._seconds != 0 or
+ self._microseconds != 0)
+
+ # Pickle support.
+
+ def _getstate(self):
+ return (self._days, self._seconds, self._microseconds)
+
+ def __reduce__(self):
+ return (self.__class__, self._getstate())
+
+timedelta.min = timedelta(-999999999)
+timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
+ microseconds=999999)
+timedelta.resolution = timedelta(microseconds=1)
+
+class date:
+ """Concrete date type.
+
+ Constructors:
+
+ __new__()
+ fromtimestamp()
+ today()
+ fromordinal()
+
+ Operators:
+
+ __repr__, __str__
+ __eq__, __le__, __lt__, __ge__, __gt__, __hash__
+ __add__, __radd__, __sub__ (add/radd only with timedelta arg)
+
+ Methods:
+
+ timetuple()
+ toordinal()
+ weekday()
+ isoweekday(), isocalendar(), isoformat()
+ ctime()
+ strftime()
+
+ Properties (readonly):
+ year, month, day
+ """
+ __slots__ = '_year', '_month', '_day', '_hashcode'
+
+ def __new__(cls, year, month=None, day=None):
+ """Constructor.
+
+ Arguments:
+
+ year, month, day (required, base 1)
+ """
+ if (month is None and
+ isinstance(year, (bytes, str)) and len(year) == 4 and
+ 1 <= ord(year[2:3]) <= 12):
+ # Pickle support
+ if isinstance(year, str):
+ try:
+ year = year.encode('latin1')
+ except UnicodeEncodeError:
+ # More informative error message.
+ raise ValueError(
+ "Failed to encode latin1 string when unpickling "
+ "a date object. "
+ "pickle.load(data, encoding='latin1') is assumed.")
+ self = object.__new__(cls)
+ self.__setstate(year)
+ self._hashcode = -1
+ return self
+ year, month, day = _check_date_fields(year, month, day)
+ self = object.__new__(cls)
+ self._year = year
+ self._month = month
+ self._day = day
+ self._hashcode = -1
+ return self
+
+ # Additional constructors
+
+ @classmethod
+ def fromtimestamp(cls, t):
+ "Construct a date from a POSIX timestamp (like time.time())."
+ y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
+ return cls(y, m, d)
+
+ @classmethod
+ def today(cls):
+ "Construct a date from time.time()."
+ t = _time.time()
+ return cls.fromtimestamp(t)
+
+ @classmethod
+ def fromordinal(cls, n):
+ """Construct a date from a proleptic Gregorian ordinal.
+
+ January 1 of year 1 is day 1. Only the year, month and day are
+ non-zero in the result.
+ """
+ y, m, d = _ord2ymd(n)
+ return cls(y, m, d)
+
+ @classmethod
+ def fromisoformat(cls, date_string):
+ """Construct a date from the output of date.isoformat()."""
+ if not isinstance(date_string, str):
+ raise TypeError('fromisoformat: argument must be str')
+
+ try:
+ assert len(date_string) == 10
+ return cls(*_parse_isoformat_date(date_string))
+ except Exception:
+ raise ValueError(f'Invalid isoformat string: {date_string!r}')
+
+
+ # Conversions to string
+
+ def __repr__(self):
+ """Convert to formal string, for repr().
+
+ >>> dt = datetime(2010, 1, 1)
+ >>> repr(dt)
+ 'datetime.datetime(2010, 1, 1, 0, 0)'
+
+ >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
+ >>> repr(dt)
+ 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
+ """
+ return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
+ self.__class__.__qualname__,
+ self._year,
+ self._month,
+ self._day)
+ # XXX These shouldn't depend on time.localtime(), because that
+ # clips the usable dates to [1970 .. 2038). At least ctime() is
+ # easily done without using strftime() -- that's better too because
+ # strftime("%c", ...) is locale specific.
+
+
+ def ctime(self):
+ "Return ctime() style string."
+ weekday = self.toordinal() % 7 or 7
+ return "%s %s %2d 00:00:00 %04d" % (
+ _DAYNAMES[weekday],
+ _MONTHNAMES[self._month],
+ self._day, self._year)
+
+ def strftime(self, fmt):
+ "Format using strftime()."
+ return _wrap_strftime(self, fmt, self.timetuple())
+
+ def __format__(self, fmt):
+ if not isinstance(fmt, str):
+ raise TypeError("must be str, not %s" % type(fmt).__name__)
+ if len(fmt) != 0:
+ return self.strftime(fmt)
+ return str(self)
+
+ def isoformat(self):
+ """Return the date formatted according to ISO.
+
+ This is 'YYYY-MM-DD'.
+
+ References:
+ - http://www.w3.org/TR/NOTE-datetime
+ - http://www.cl.cam.ac.uk/~mgk25/iso-time.html
+ """
+ return "%04d-%02d-%02d" % (self._year, self._month, self._day)
+
+ __str__ = isoformat
+
+ # Read-only field accessors
+ @property
+ def year(self):
+ """year (1-9999)"""
+ return self._year
+
+ @property
+ def month(self):
+ """month (1-12)"""
+ return self._month
+
+ @property
+ def day(self):
+ """day (1-31)"""
+ return self._day
+
+ # Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__,
+ # __hash__ (and helpers)
+
+ def timetuple(self):
+ "Return local time tuple compatible with time.localtime()."
+ return _build_struct_time(self._year, self._month, self._day,
+ 0, 0, 0, -1)
+
+ def toordinal(self):
+ """Return proleptic Gregorian ordinal for the year, month and day.
+
+ January 1 of year 1 is day 1. Only the year, month and day values
+ contribute to the result.
+ """
+ return _ymd2ord(self._year, self._month, self._day)
+
+ def replace(self, year=None, month=None, day=None):
+ """Return a new date with new values for the specified fields."""
+ if year is None:
+ year = self._year
+ if month is None:
+ month = self._month
+ if day is None:
+ day = self._day
+ return type(self)(year, month, day)
+
+ # Comparisons of date objects with other.
+
+ def __eq__(self, other):
+ if isinstance(other, date):
+ return self._cmp(other) == 0
+ return NotImplemented
+
+ def __le__(self, other):
+ if isinstance(other, date):
+ return self._cmp(other) <= 0
+ return NotImplemented
+
+ def __lt__(self, other):
+ if isinstance(other, date):
+ return self._cmp(other) < 0
+ return NotImplemented
+
+ def __ge__(self, other):
+ if isinstance(other, date):
+ return self._cmp(other) >= 0
+ return NotImplemented
+
+ def __gt__(self, other):
+ if isinstance(other, date):
+ return self._cmp(other) > 0
+ return NotImplemented
+
+ def _cmp(self, other):
+ assert isinstance(other, date)
+ y, m, d = self._year, self._month, self._day
+ y2, m2, d2 = other._year, other._month, other._day
+ return _cmp((y, m, d), (y2, m2, d2))
+
+ def __hash__(self):
+ "Hash."
+ if self._hashcode == -1:
+ self._hashcode = hash(self._getstate())
+ return self._hashcode
+
+ # Computations
+
+ def __add__(self, other):
+ "Add a date to a timedelta."
+ if isinstance(other, timedelta):
+ o = self.toordinal() + other.days
+ if 0 < o <= _MAXORDINAL:
+ return date.fromordinal(o)
+ raise OverflowError("result out of range")
+ return NotImplemented
+
+ __radd__ = __add__
+
+ def __sub__(self, other):
+ """Subtract two dates, or a date and a timedelta."""
+ if isinstance(other, timedelta):
+ return self + timedelta(-other.days)
+ if isinstance(other, date):
+ days1 = self.toordinal()
+ days2 = other.toordinal()
+ return timedelta(days1 - days2)
+ return NotImplemented
+
+ def weekday(self):
+ "Return day of the week, where Monday == 0 ... Sunday == 6."
+ return (self.toordinal() + 6) % 7
+
+ # Day-of-the-week and week-of-the-year, according to ISO
+
+ def isoweekday(self):
+ "Return day of the week, where Monday == 1 ... Sunday == 7."
+ # 1-Jan-0001 is a Monday
+ return self.toordinal() % 7 or 7
+
+ def isocalendar(self):
+ """Return a 3-tuple containing ISO year, week number, and weekday.
+
+ The first ISO week of the year is the (Mon-Sun) week
+ containing the year's first Thursday; everything else derives
+ from that.
+
+ The first week is 1; Monday is 1 ... Sunday is 7.
+
+ ISO calendar algorithm taken from
+ http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
+ (used with permission)
+ """
+ year = self._year
+ week1monday = _isoweek1monday(year)
+ today = _ymd2ord(self._year, self._month, self._day)
+ # Internally, week and day have origin 0
+ week, day = divmod(today - week1monday, 7)
+ if week < 0:
+ year -= 1
+ week1monday = _isoweek1monday(year)
+ week, day = divmod(today - week1monday, 7)
+ elif week >= 52:
+ if today >= _isoweek1monday(year+1):
+ year += 1
+ week = 0
+ return year, week+1, day+1
+
+ # Pickle support.
+
+ def _getstate(self):
+ yhi, ylo = divmod(self._year, 256)
+ return bytes([yhi, ylo, self._month, self._day]),
+
+ def __setstate(self, string):
+ yhi, ylo, self._month, self._day = string
+ self._year = yhi * 256 + ylo
+
+ def __reduce__(self):
+ return (self.__class__, self._getstate())
+
+_date_class = date # so functions w/ args named "date" can get at the class
+
+date.min = date(1, 1, 1)
+date.max = date(9999, 12, 31)
+date.resolution = timedelta(days=1)
+
+
+class tzinfo:
+ """Abstract base class for time zone info classes.
+
+ Subclasses must override the name(), utcoffset() and dst() methods.
+ """
+ __slots__ = ()
+
+ def tzname(self, dt):
+ "datetime -> string name of time zone."
+ raise NotImplementedError("tzinfo subclass must override tzname()")
+
+ def utcoffset(self, dt):
+ "datetime -> timedelta, positive for east of UTC, negative for west of UTC"
+ raise NotImplementedError("tzinfo subclass must override utcoffset()")
+
+ def dst(self, dt):
+ """datetime -> DST offset as timedelta, positive for east of UTC.
+
+ Return 0 if DST not in effect. utcoffset() must include the DST
+ offset.
+ """
+ raise NotImplementedError("tzinfo subclass must override dst()")
+
+ def fromutc(self, dt):
+ "datetime in UTC -> datetime in local time."
+
+ if not isinstance(dt, datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ dtoff = dt.utcoffset()
+ if dtoff is None:
+ raise ValueError("fromutc() requires a non-None utcoffset() "
+ "result")
+
+ # See the long comment block at the end of this file for an
+ # explanation of this algorithm.
+ dtdst = dt.dst()
+ if dtdst is None:
+ raise ValueError("fromutc() requires a non-None dst() result")
+ delta = dtoff - dtdst
+ if delta:
+ dt += delta
+ dtdst = dt.dst()
+ if dtdst is None:
+ raise ValueError("fromutc(): dt.dst gave inconsistent "
+ "results; cannot convert")
+ return dt + dtdst
+
+ # Pickle support.
+
+ def __reduce__(self):
+ getinitargs = getattr(self, "__getinitargs__", None)
+ if getinitargs:
+ args = getinitargs()
+ else:
+ args = ()
+ getstate = getattr(self, "__getstate__", None)
+ if getstate:
+ state = getstate()
+ else:
+ state = getattr(self, "__dict__", None) or None
+ if state is None:
+ return (self.__class__, args)
+ else:
+ return (self.__class__, args, state)
+
+_tzinfo_class = tzinfo
+
+class time:
+ """Time with time zone.
+
+ Constructors:
+
+ __new__()
+
+ Operators:
+
+ __repr__, __str__
+ __eq__, __le__, __lt__, __ge__, __gt__, __hash__
+
+ Methods:
+
+ strftime()
+ isoformat()
+ utcoffset()
+ tzname()
+ dst()
+
+ Properties (readonly):
+ hour, minute, second, microsecond, tzinfo, fold
+ """
+ __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode', '_fold'
+
+ def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold=0):
+ """Constructor.
+
+ Arguments:
+
+ hour, minute (required)
+ second, microsecond (default to zero)
+ tzinfo (default to None)
+ fold (keyword only, default to zero)
+ """
+ if (isinstance(hour, (bytes, str)) and len(hour) == 6 and
+ ord(hour[0:1])&0x7F < 24):
+ # Pickle support
+ if isinstance(hour, str):
+ try:
+ hour = hour.encode('latin1')
+ except UnicodeEncodeError:
+ # More informative error message.
+ raise ValueError(
+ "Failed to encode latin1 string when unpickling "
+ "a time object. "
+ "pickle.load(data, encoding='latin1') is assumed.")
+ self = object.__new__(cls)
+ self.__setstate(hour, minute or None)
+ self._hashcode = -1
+ return self
+ hour, minute, second, microsecond, fold = _check_time_fields(
+ hour, minute, second, microsecond, fold)
+ _check_tzinfo_arg(tzinfo)
+ self = object.__new__(cls)
+ self._hour = hour
+ self._minute = minute
+ self._second = second
+ self._microsecond = microsecond
+ self._tzinfo = tzinfo
+ self._hashcode = -1
+ self._fold = fold
+ return self
+
+ # Read-only field accessors
+ @property
+ def hour(self):
+ """hour (0-23)"""
+ return self._hour
+
+ @property
+ def minute(self):
+ """minute (0-59)"""
+ return self._minute
+
+ @property
+ def second(self):
+ """second (0-59)"""
+ return self._second
+
+ @property
+ def microsecond(self):
+ """microsecond (0-999999)"""
+ return self._microsecond
+
+ @property
+ def tzinfo(self):
+ """timezone info object"""
+ return self._tzinfo
+
+ @property
+ def fold(self):
+ return self._fold
+
+ # Standard conversions, __hash__ (and helpers)
+
+ # Comparisons of time objects with other.
+
+ def __eq__(self, other):
+ if isinstance(other, time):
+ return self._cmp(other, allow_mixed=True) == 0
+ else:
+ return False
+
+ def __le__(self, other):
+ if isinstance(other, time):
+ return self._cmp(other) <= 0
+ else:
+ _cmperror(self, other)
+
+ def __lt__(self, other):
+ if isinstance(other, time):
+ return self._cmp(other) < 0
+ else:
+ _cmperror(self, other)
+
+ def __ge__(self, other):
+ if isinstance(other, time):
+ return self._cmp(other) >= 0
+ else:
+ _cmperror(self, other)
+
+ def __gt__(self, other):
+ if isinstance(other, time):
+ return self._cmp(other) > 0
+ else:
+ _cmperror(self, other)
+
+ def _cmp(self, other, allow_mixed=False):
+ assert isinstance(other, time)
+ mytz = self._tzinfo
+ ottz = other._tzinfo
+ myoff = otoff = None
+
+ if mytz is ottz:
+ base_compare = True
+ else:
+ myoff = self.utcoffset()
+ otoff = other.utcoffset()
+ base_compare = myoff == otoff
+
+ if base_compare:
+ return _cmp((self._hour, self._minute, self._second,
+ self._microsecond),
+ (other._hour, other._minute, other._second,
+ other._microsecond))
+ if myoff is None or otoff is None:
+ if allow_mixed:
+ return 2 # arbitrary non-zero value
+ else:
+ raise TypeError("cannot compare naive and aware times")
+ myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
+ othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
+ return _cmp((myhhmm, self._second, self._microsecond),
+ (othhmm, other._second, other._microsecond))
+
+ def __hash__(self):
+ """Hash."""
+ if self._hashcode == -1:
+ if self.fold:
+ t = self.replace(fold=0)
+ else:
+ t = self
+ tzoff = t.utcoffset()
+ if not tzoff: # zero or None
+ self._hashcode = hash(t._getstate()[0])
+ else:
+ h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
+ timedelta(hours=1))
+ assert not m % timedelta(minutes=1), "whole minute"
+ m //= timedelta(minutes=1)
+ if 0 <= h < 24:
+ self._hashcode = hash(time(h, m, self.second, self.microsecond))
+ else:
+ self._hashcode = hash((h, m, self.second, self.microsecond))
+ return self._hashcode
+
+ # Conversion to string
+
+ def _tzstr(self):
+ """Return formatted timezone offset (+xx:xx) or an empty string."""
+ off = self.utcoffset()
+ return _format_offset(off)
+
+ def __repr__(self):
+ """Convert to formal string, for repr()."""
+ if self._microsecond != 0:
+ s = ", %d, %d" % (self._second, self._microsecond)
+ elif self._second != 0:
+ s = ", %d" % self._second
+ else:
+ s = ""
+ s= "%s.%s(%d, %d%s)" % (self.__class__.__module__,
+ self.__class__.__qualname__,
+ self._hour, self._minute, s)
+ if self._tzinfo is not None:
+ assert s[-1:] == ")"
+ s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
+ if self._fold:
+ assert s[-1:] == ")"
+ s = s[:-1] + ", fold=1)"
+ return s
+
+ def isoformat(self, timespec='auto'):
+ """Return the time formatted according to ISO.
+
+ The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional
+ part is omitted if self.microsecond == 0.
+
+ The optional argument timespec specifies the number of additional
+ terms of the time to include.
+ """
+ s = _format_time(self._hour, self._minute, self._second,
+ self._microsecond, timespec)
+ tz = self._tzstr()
+ if tz:
+ s += tz
+ return s
+
+ __str__ = isoformat
+
+ @classmethod
+ def fromisoformat(cls, time_string):
+ """Construct a time from the output of isoformat()."""
+ if not isinstance(time_string, str):
+ raise TypeError('fromisoformat: argument must be str')
+
+ try:
+ return cls(*_parse_isoformat_time(time_string))
+ except Exception:
+ raise ValueError(f'Invalid isoformat string: {time_string!r}')
+
+
+ def strftime(self, fmt):
+ """Format using strftime(). The date part of the timestamp passed
+ to underlying strftime should not be used.
+ """
+ # The year must be >= 1000 else Python's strftime implementation
+ # can raise a bogus exception.
+ timetuple = (1900, 1, 1,
+ self._hour, self._minute, self._second,
+ 0, 1, -1)
+ return _wrap_strftime(self, fmt, timetuple)
+
+ def __format__(self, fmt):
+ if not isinstance(fmt, str):
+ raise TypeError("must be str, not %s" % type(fmt).__name__)
+ if len(fmt) != 0:
+ return self.strftime(fmt)
+ return str(self)
+
+ # Timezone functions
+
+ def utcoffset(self):
+ """Return the timezone offset as timedelta, positive east of UTC
+ (negative west of UTC)."""
+ if self._tzinfo is None:
+ return None
+ offset = self._tzinfo.utcoffset(None)
+ _check_utc_offset("utcoffset", offset)
+ return offset
+
+ def tzname(self):
+ """Return the timezone name.
+
+ Note that the name is 100% informational -- there's no requirement that
+ it mean anything in particular. For example, "GMT", "UTC", "-500",
+ "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
+ """
+ if self._tzinfo is None:
+ return None
+ name = self._tzinfo.tzname(None)
+ _check_tzname(name)
+ return name
+
+ def dst(self):
+ """Return 0 if DST is not in effect, or the DST offset (as timedelta
+ positive eastward) if DST is in effect.
+
+ This is purely informational; the DST offset has already been added to
+ the UTC offset returned by utcoffset() if applicable, so there's no
+ need to consult dst() unless you're interested in displaying the DST
+ info.
+ """
+ if self._tzinfo is None:
+ return None
+ offset = self._tzinfo.dst(None)
+ _check_utc_offset("dst", offset)
+ return offset
+
+ def replace(self, hour=None, minute=None, second=None, microsecond=None,
+ tzinfo=True, *, fold=None):
+ """Return a new time with new values for the specified fields."""
+ if hour is None:
+ hour = self.hour
+ if minute is None:
+ minute = self.minute
+ if second is None:
+ second = self.second
+ if microsecond is None:
+ microsecond = self.microsecond
+ if tzinfo is True:
+ tzinfo = self.tzinfo
+ if fold is None:
+ fold = self._fold
+ return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold)
+
+ # Pickle support.
+
+ def _getstate(self, protocol=3):
+ us2, us3 = divmod(self._microsecond, 256)
+ us1, us2 = divmod(us2, 256)
+ h = self._hour
+ if self._fold and protocol > 3:
+ h += 128
+ basestate = bytes([h, self._minute, self._second,
+ us1, us2, us3])
+ if self._tzinfo is None:
+ return (basestate,)
+ else:
+ return (basestate, self._tzinfo)
+
+ def __setstate(self, string, tzinfo):
+ if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
+ raise TypeError("bad tzinfo state arg")
+ h, self._minute, self._second, us1, us2, us3 = string
+ if h > 127:
+ self._fold = 1
+ self._hour = h - 128
+ else:
+ self._fold = 0
+ self._hour = h
+ self._microsecond = (((us1 << 8) | us2) << 8) | us3
+ self._tzinfo = tzinfo
+
+ def __reduce_ex__(self, protocol):
+ return (time, self._getstate(protocol))
+
+ def __reduce__(self):
+ return self.__reduce_ex__(2)
+
+_time_class = time # so functions w/ args named "time" can get at the class
+
+time.min = time(0, 0, 0)
+time.max = time(23, 59, 59, 999999)
+time.resolution = timedelta(microseconds=1)
+
+class datetime(date):
+ """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
+
+ The year, month and day arguments are required. tzinfo may be None, or an
+ instance of a tzinfo subclass. The remaining arguments may be ints.
+ """
+ __slots__ = date.__slots__ + time.__slots__
+
+ def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
+ microsecond=0, tzinfo=None, *, fold=0):
+ if (isinstance(year, (bytes, str)) and len(year) == 10 and
+ 1 <= ord(year[2:3])&0x7F <= 12):
+ # Pickle support
+ if isinstance(year, str):
+ try:
+ year = bytes(year, 'latin1')
+ except UnicodeEncodeError:
+ # More informative error message.
+ raise ValueError(
+ "Failed to encode latin1 string when unpickling "
+ "a datetime object. "
+ "pickle.load(data, encoding='latin1') is assumed.")
+ self = object.__new__(cls)
+ self.__setstate(year, month)
+ self._hashcode = -1
+ return self
+ year, month, day = _check_date_fields(year, month, day)
+ hour, minute, second, microsecond, fold = _check_time_fields(
+ hour, minute, second, microsecond, fold)
+ _check_tzinfo_arg(tzinfo)
+ self = object.__new__(cls)
+ self._year = year
+ self._month = month
+ self._day = day
+ self._hour = hour
+ self._minute = minute
+ self._second = second
+ self._microsecond = microsecond
+ self._tzinfo = tzinfo
+ self._hashcode = -1
+ self._fold = fold
+ return self
+
+ # Read-only field accessors
+ @property
+ def hour(self):
+ """hour (0-23)"""
+ return self._hour
+
+ @property
+ def minute(self):
+ """minute (0-59)"""
+ return self._minute
+
+ @property
+ def second(self):
+ """second (0-59)"""
+ return self._second
+
+ @property
+ def microsecond(self):
+ """microsecond (0-999999)"""
+ return self._microsecond
+
+ @property
+ def tzinfo(self):
+ """timezone info object"""
+ return self._tzinfo
+
+ @property
+ def fold(self):
+ return self._fold
+
+ @classmethod
+ def _fromtimestamp(cls, t, utc, tz):
+ """Construct a datetime from a POSIX timestamp (like time.time()).
+
+ A timezone info object may be passed in as well.
+ """
+ frac, t = _math.modf(t)
+ us = round(frac * 1e6)
+ if us >= 1000000:
+ t += 1
+ us -= 1000000
+ elif us < 0:
+ t -= 1
+ us += 1000000
+
+ converter = _time.gmtime if utc else _time.localtime
+ y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
+ ss = min(ss, 59) # clamp out leap seconds if the platform has them
+ result = cls(y, m, d, hh, mm, ss, us, tz)
+ if tz is None:
+ # As of version 2015f max fold in IANA database is
+ # 23 hours at 1969-09-30 13:00:00 in Kwajalein.
+ # Let's probe 24 hours in the past to detect a transition:
+ max_fold_seconds = 24 * 3600
+
+ # On Windows localtime_s throws an OSError for negative values,
+ # thus we can't perform fold detection for values of time less
+ # than the max time fold. See comments in _datetimemodule's
+ # version of this method for more details.
+ if t < max_fold_seconds and sys.platform.startswith("win"):
+ return result
+
+ y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6]
+ probe1 = cls(y, m, d, hh, mm, ss, us, tz)
+ trans = result - probe1 - timedelta(0, max_fold_seconds)
+ if trans.days < 0:
+ y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6]
+ probe2 = cls(y, m, d, hh, mm, ss, us, tz)
+ if probe2 == result:
+ result._fold = 1
+ else:
+ result = tz.fromutc(result)
+ return result
+
+ @classmethod
+ def fromtimestamp(cls, t, tz=None):
+ """Construct a datetime from a POSIX timestamp (like time.time()).
+
+ A timezone info object may be passed in as well.
+ """
+ _check_tzinfo_arg(tz)
+
+ return cls._fromtimestamp(t, tz is not None, tz)
+
+ @classmethod
+ def utcfromtimestamp(cls, t):
+ """Construct a naive UTC datetime from a POSIX timestamp."""
+ return cls._fromtimestamp(t, True, None)
+
+ @classmethod
+ def now(cls, tz=None):
+ "Construct a datetime from time.time() and optional time zone info."
+ t = _time.time()
+ return cls.fromtimestamp(t, tz)
+
+ @classmethod
+ def utcnow(cls):
+ "Construct a UTC datetime from time.time()."
+ t = _time.time()
+ return cls.utcfromtimestamp(t)
+
+ @classmethod
+ def combine(cls, date, time, tzinfo=True):
+ "Construct a datetime from a given date and a given time."
+ if not isinstance(date, _date_class):
+ raise TypeError("date argument must be a date instance")
+ if not isinstance(time, _time_class):
+ raise TypeError("time argument must be a time instance")
+ if tzinfo is True:
+ tzinfo = time.tzinfo
+ return cls(date.year, date.month, date.day,
+ time.hour, time.minute, time.second, time.microsecond,
+ tzinfo, fold=time.fold)
+
+ @classmethod
+ def fromisoformat(cls, date_string):
+ """Construct a datetime from the output of datetime.isoformat()."""
+ if not isinstance(date_string, str):
+ raise TypeError('fromisoformat: argument must be str')
+
+ # Split this at the separator
+ dstr = date_string[0:10]
+ tstr = date_string[11:]
+
+ try:
+ date_components = _parse_isoformat_date(dstr)
+ except ValueError:
+ raise ValueError(f'Invalid isoformat string: {date_string!r}')
+
+ if tstr:
+ try:
+ time_components = _parse_isoformat_time(tstr)
+ except ValueError:
+ raise ValueError(f'Invalid isoformat string: {date_string!r}')
+ else:
+ time_components = [0, 0, 0, 0, None]
+
+ return cls(*(date_components + time_components))
+
+ def timetuple(self):
+ "Return local time tuple compatible with time.localtime()."
+ dst = self.dst()
+ if dst is None:
+ dst = -1
+ elif dst:
+ dst = 1
+ else:
+ dst = 0
+ return _build_struct_time(self.year, self.month, self.day,
+ self.hour, self.minute, self.second,
+ dst)
+
+ def _mktime(self):
+ """Return integer POSIX timestamp."""
+ epoch = datetime(1970, 1, 1)
+ max_fold_seconds = 24 * 3600
+ t = (self - epoch) // timedelta(0, 1)
+ def local(u):
+ y, m, d, hh, mm, ss = _time.localtime(u)[:6]
+ return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1)
+
+ # Our goal is to solve t = local(u) for u.
+ a = local(t) - t
+ u1 = t - a
+ t1 = local(u1)
+ if t1 == t:
+ # We found one solution, but it may not be the one we need.
+ # Look for an earlier solution (if `fold` is 0), or a
+ # later one (if `fold` is 1).
+ u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold]
+ b = local(u2) - u2
+ if a == b:
+ return u1
+ else:
+ b = t1 - u1
+ assert a != b
+ u2 = t - b
+ t2 = local(u2)
+ if t2 == t:
+ return u2
+ if t1 == t:
+ return u1
+ # We have found both offsets a and b, but neither t - a nor t - b is
+ # a solution. This means t is in the gap.
+ return (max, min)[self.fold](u1, u2)
+
+
+ def timestamp(self):
+ "Return POSIX timestamp as float"
+ if self._tzinfo is None:
+ s = self._mktime()
+ return s + self.microsecond / 1e6
+ else:
+ return (self - _EPOCH).total_seconds()
+
+ def utctimetuple(self):
+ "Return UTC time tuple compatible with time.gmtime()."
+ offset = self.utcoffset()
+ if offset:
+ self -= offset
+ y, m, d = self.year, self.month, self.day
+ hh, mm, ss = self.hour, self.minute, self.second
+ return _build_struct_time(y, m, d, hh, mm, ss, 0)
+
+ def date(self):
+ "Return the date part."
+ return date(self._year, self._month, self._day)
+
+ def time(self):
+ "Return the time part, with tzinfo None."
+ return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold)
+
+ def timetz(self):
+ "Return the time part, with same tzinfo."
+ return time(self.hour, self.minute, self.second, self.microsecond,
+ self._tzinfo, fold=self.fold)
+
+ def replace(self, year=None, month=None, day=None, hour=None,
+ minute=None, second=None, microsecond=None, tzinfo=True,
+ *, fold=None):
+ """Return a new datetime with new values for the specified fields."""
+ if year is None:
+ year = self.year
+ if month is None:
+ month = self.month
+ if day is None:
+ day = self.day
+ if hour is None:
+ hour = self.hour
+ if minute is None:
+ minute = self.minute
+ if second is None:
+ second = self.second
+ if microsecond is None:
+ microsecond = self.microsecond
+ if tzinfo is True:
+ tzinfo = self.tzinfo
+ if fold is None:
+ fold = self.fold
+ return type(self)(year, month, day, hour, minute, second,
+ microsecond, tzinfo, fold=fold)
+
+ def _local_timezone(self):
+ if self.tzinfo is None:
+ ts = self._mktime()
+ else:
+ ts = (self - _EPOCH) // timedelta(seconds=1)
+ localtm = _time.localtime(ts)
+ local = datetime(*localtm[:6])
+ try:
+ # Extract TZ data if available
+ gmtoff = localtm.tm_gmtoff
+ zone = localtm.tm_zone
+ except AttributeError:
+ delta = local - datetime(*_time.gmtime(ts)[:6])
+ zone = _time.strftime('%Z', localtm)
+ tz = timezone(delta, zone)
+ else:
+ tz = timezone(timedelta(seconds=gmtoff), zone)
+ return tz
+
+ def astimezone(self, tz=None):
+ if tz is None:
+ tz = self._local_timezone()
+ elif not isinstance(tz, tzinfo):
+ raise TypeError("tz argument must be an instance of tzinfo")
+
+ mytz = self.tzinfo
+ if mytz is None:
+ mytz = self._local_timezone()
+ myoffset = mytz.utcoffset(self)
+ else:
+ myoffset = mytz.utcoffset(self)
+ if myoffset is None:
+ mytz = self.replace(tzinfo=None)._local_timezone()
+ myoffset = mytz.utcoffset(self)
+
+ if tz is mytz:
+ return self
+
+ # Convert self to UTC, and attach the new time zone object.
+ utc = (self - myoffset).replace(tzinfo=tz)
+
+ # Convert from UTC to tz's local time.
+ return tz.fromutc(utc)
+
+ # Ways to produce a string.
+
+ def ctime(self):
+ "Return ctime() style string."
+ weekday = self.toordinal() % 7 or 7
+ return "%s %s %2d %02d:%02d:%02d %04d" % (
+ _DAYNAMES[weekday],
+ _MONTHNAMES[self._month],
+ self._day,
+ self._hour, self._minute, self._second,
+ self._year)
+
+ def isoformat(self, sep='T', timespec='auto'):
+ """Return the time formatted according to ISO.
+
+ The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.
+ By default, the fractional part is omitted if self.microsecond == 0.
+
+ If self.tzinfo is not None, the UTC offset is also attached, giving
+ giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
+
+ Optional argument sep specifies the separator between date and
+ time, default 'T'.
+
+ The optional argument timespec specifies the number of additional
+ terms of the time to include.
+ """
+ s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
+ _format_time(self._hour, self._minute, self._second,
+ self._microsecond, timespec))
+
+ off = self.utcoffset()
+ tz = _format_offset(off)
+ if tz:
+ s += tz
+
+ return s
+
+ def __repr__(self):
+ """Convert to formal string, for repr()."""
+ L = [self._year, self._month, self._day, # These are never zero
+ self._hour, self._minute, self._second, self._microsecond]
+ if L[-1] == 0:
+ del L[-1]
+ if L[-1] == 0:
+ del L[-1]
+ s = "%s.%s(%s)" % (self.__class__.__module__,
+ self.__class__.__qualname__,
+ ", ".join(map(str, L)))
+ if self._tzinfo is not None:
+ assert s[-1:] == ")"
+ s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
+ if self._fold:
+ assert s[-1:] == ")"
+ s = s[:-1] + ", fold=1)"
+ return s
+
+ def __str__(self):
+ "Convert to string, for str()."
+ return self.isoformat(sep=' ')
+
+ @classmethod
+ def strptime(cls, date_string, format):
+ 'string, format -> new datetime parsed from a string (like time.strptime()).'
+ import _strptime
+ return _strptime._strptime_datetime(cls, date_string, format)
+
+ def utcoffset(self):
+ """Return the timezone offset as timedelta positive east of UTC (negative west of
+ UTC)."""
+ if self._tzinfo is None:
+ return None
+ offset = self._tzinfo.utcoffset(self)
+ _check_utc_offset("utcoffset", offset)
+ return offset
+
+ def tzname(self):
+ """Return the timezone name.
+
+ Note that the name is 100% informational -- there's no requirement that
+ it mean anything in particular. For example, "GMT", "UTC", "-500",
+ "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
+ """
+ if self._tzinfo is None:
+ return None
+ name = self._tzinfo.tzname(self)
+ _check_tzname(name)
+ return name
+
+ def dst(self):
+ """Return 0 if DST is not in effect, or the DST offset (as timedelta
+ positive eastward) if DST is in effect.
+
+ This is purely informational; the DST offset has already been added to
+ the UTC offset returned by utcoffset() if applicable, so there's no
+ need to consult dst() unless you're interested in displaying the DST
+ info.
+ """
+ if self._tzinfo is None:
+ return None
+ offset = self._tzinfo.dst(self)
+ _check_utc_offset("dst", offset)
+ return offset
+
+ # Comparisons of datetime objects with other.
+
+ def __eq__(self, other):
+ if isinstance(other, datetime):
+ return self._cmp(other, allow_mixed=True) == 0
+ elif not isinstance(other, date):
+ return NotImplemented
+ else:
+ return False
+
+ def __le__(self, other):
+ if isinstance(other, datetime):
+ return self._cmp(other) <= 0
+ elif not isinstance(other, date):
+ return NotImplemented
+ else:
+ _cmperror(self, other)
+
+ def __lt__(self, other):
+ if isinstance(other, datetime):
+ return self._cmp(other) < 0
+ elif not isinstance(other, date):
+ return NotImplemented
+ else:
+ _cmperror(self, other)
+
+ def __ge__(self, other):
+ if isinstance(other, datetime):
+ return self._cmp(other) >= 0
+ elif not isinstance(other, date):
+ return NotImplemented
+ else:
+ _cmperror(self, other)
+
+ def __gt__(self, other):
+ if isinstance(other, datetime):
+ return self._cmp(other) > 0
+ elif not isinstance(other, date):
+ return NotImplemented
+ else:
+ _cmperror(self, other)
+
+ def _cmp(self, other, allow_mixed=False):
+ assert isinstance(other, datetime)
+ mytz = self._tzinfo
+ ottz = other._tzinfo
+ myoff = otoff = None
+
+ if mytz is ottz:
+ base_compare = True
+ else:
+ myoff = self.utcoffset()
+ otoff = other.utcoffset()
+ # Assume that allow_mixed means that we are called from __eq__
+ if allow_mixed:
+ if myoff != self.replace(fold=not self.fold).utcoffset():
+ return 2
+ if otoff != other.replace(fold=not other.fold).utcoffset():
+ return 2
+ base_compare = myoff == otoff
+
+ if base_compare:
+ return _cmp((self._year, self._month, self._day,
+ self._hour, self._minute, self._second,
+ self._microsecond),
+ (other._year, other._month, other._day,
+ other._hour, other._minute, other._second,
+ other._microsecond))
+ if myoff is None or otoff is None:
+ if allow_mixed:
+ return 2 # arbitrary non-zero value
+ else:
+ raise TypeError("cannot compare naive and aware datetimes")
+ # XXX What follows could be done more efficiently...
+ diff = self - other # this will take offsets into account
+ if diff.days < 0:
+ return -1
+ return diff and 1 or 0
+
+ def __add__(self, other):
+ "Add a datetime and a timedelta."
+ if not isinstance(other, timedelta):
+ return NotImplemented
+ delta = timedelta(self.toordinal(),
+ hours=self._hour,
+ minutes=self._minute,
+ seconds=self._second,
+ microseconds=self._microsecond)
+ delta += other
+ hour, rem = divmod(delta.seconds, 3600)
+ minute, second = divmod(rem, 60)
+ if 0 < delta.days <= _MAXORDINAL:
+ return datetime.combine(date.fromordinal(delta.days),
+ time(hour, minute, second,
+ delta.microseconds,
+ tzinfo=self._tzinfo))
+ raise OverflowError("result out of range")
+
+ __radd__ = __add__
+
+ def __sub__(self, other):
+ "Subtract two datetimes, or a datetime and a timedelta."
+ if not isinstance(other, datetime):
+ if isinstance(other, timedelta):
+ return self + -other
+ return NotImplemented
+
+ days1 = self.toordinal()
+ days2 = other.toordinal()
+ secs1 = self._second + self._minute * 60 + self._hour * 3600
+ secs2 = other._second + other._minute * 60 + other._hour * 3600
+ base = timedelta(days1 - days2,
+ secs1 - secs2,
+ self._microsecond - other._microsecond)
+ if self._tzinfo is other._tzinfo:
+ return base
+ myoff = self.utcoffset()
+ otoff = other.utcoffset()
+ if myoff == otoff:
+ return base
+ if myoff is None or otoff is None:
+ raise TypeError("cannot mix naive and timezone-aware time")
+ return base + otoff - myoff
+
+ def __hash__(self):
+ if self._hashcode == -1:
+ if self.fold:
+ t = self.replace(fold=0)
+ else:
+ t = self
+ tzoff = t.utcoffset()
+ if tzoff is None:
+ self._hashcode = hash(t._getstate()[0])
+ else:
+ days = _ymd2ord(self.year, self.month, self.day)
+ seconds = self.hour * 3600 + self.minute * 60 + self.second
+ self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff)
+ return self._hashcode
+
+ # Pickle support.
+
+ def _getstate(self, protocol=3):
+ yhi, ylo = divmod(self._year, 256)
+ us2, us3 = divmod(self._microsecond, 256)
+ us1, us2 = divmod(us2, 256)
+ m = self._month
+ if self._fold and protocol > 3:
+ m += 128
+ basestate = bytes([yhi, ylo, m, self._day,
+ self._hour, self._minute, self._second,
+ us1, us2, us3])
+ if self._tzinfo is None:
+ return (basestate,)
+ else:
+ return (basestate, self._tzinfo)
+
+ def __setstate(self, string, tzinfo):
+ if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
+ raise TypeError("bad tzinfo state arg")
+ (yhi, ylo, m, self._day, self._hour,
+ self._minute, self._second, us1, us2, us3) = string
+ if m > 127:
+ self._fold = 1
+ self._month = m - 128
+ else:
+ self._fold = 0
+ self._month = m
+ self._year = yhi * 256 + ylo
+ self._microsecond = (((us1 << 8) | us2) << 8) | us3
+ self._tzinfo = tzinfo
+
+ def __reduce_ex__(self, protocol):
+ return (self.__class__, self._getstate(protocol))
+
+ def __reduce__(self):
+ return self.__reduce_ex__(2)
+
+
+datetime.min = datetime(1, 1, 1)
+datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
+datetime.resolution = timedelta(microseconds=1)
+
+
+def _isoweek1monday(year):
+ # Helper to calculate the day number of the Monday starting week 1
+ # XXX This could be done more efficiently
+ THURSDAY = 3
+ firstday = _ymd2ord(year, 1, 1)
+ firstweekday = (firstday + 6) % 7 # See weekday() above
+ week1monday = firstday - firstweekday
+ if firstweekday > THURSDAY:
+ week1monday += 7
+ return week1monday
+
+class timezone(tzinfo):
+ __slots__ = '_offset', '_name'
+
+ # Sentinel value to disallow None
+ _Omitted = object()
+ def __new__(cls, offset, name=_Omitted):
+ if not isinstance(offset, timedelta):
+ raise TypeError("offset must be a timedelta")
+ if name is cls._Omitted:
+ if not offset:
+ return cls.utc
+ name = None
+ elif not isinstance(name, str):
+ raise TypeError("name must be a string")
+ if not cls._minoffset <= offset <= cls._maxoffset:
+ raise ValueError("offset must be a timedelta "
+ "strictly between -timedelta(hours=24) and "
+ "timedelta(hours=24).")
+ return cls._create(offset, name)
+
+ @classmethod
+ def _create(cls, offset, name=None):
+ self = tzinfo.__new__(cls)
+ self._offset = offset
+ self._name = name
+ return self
+
+ def __getinitargs__(self):
+ """pickle support"""
+ if self._name is None:
+ return (self._offset,)
+ return (self._offset, self._name)
+
+ def __eq__(self, other):
+ if type(other) != timezone:
+ return False
+ return self._offset == other._offset
+
+ def __hash__(self):
+ return hash(self._offset)
+
+ def __repr__(self):
+ """Convert to formal string, for repr().
+
+ >>> tz = timezone.utc
+ >>> repr(tz)
+ 'datetime.timezone.utc'
+ >>> tz = timezone(timedelta(hours=-5), 'EST')
+ >>> repr(tz)
+ "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
+ """
+ if self is self.utc:
+ return 'datetime.timezone.utc'
+ if self._name is None:
+ return "%s.%s(%r)" % (self.__class__.__module__,
+ self.__class__.__qualname__,
+ self._offset)
+ return "%s.%s(%r, %r)" % (self.__class__.__module__,
+ self.__class__.__qualname__,
+ self._offset, self._name)
+
+ def __str__(self):
+ return self.tzname(None)
+
+ def utcoffset(self, dt):
+ if isinstance(dt, datetime) or dt is None:
+ return self._offset
+ raise TypeError("utcoffset() argument must be a datetime instance"
+ " or None")
+
+ def tzname(self, dt):
+ if isinstance(dt, datetime) or dt is None:
+ if self._name is None:
+ return self._name_from_offset(self._offset)
+ return self._name
+ raise TypeError("tzname() argument must be a datetime instance"
+ " or None")
+
+ def dst(self, dt):
+ if isinstance(dt, datetime) or dt is None:
+ return None
+ raise TypeError("dst() argument must be a datetime instance"
+ " or None")
+
+ def fromutc(self, dt):
+ if isinstance(dt, datetime):
+ if dt.tzinfo is not self:
+ raise ValueError("fromutc: dt.tzinfo "
+ "is not self")
+ return dt + self._offset
+ raise TypeError("fromutc() argument must be a datetime instance"
+ " or None")
+
+ _maxoffset = timedelta(hours=23, minutes=59)
+ _minoffset = -_maxoffset
+
+ @staticmethod
+ def _name_from_offset(delta):
+ if not delta:
+ return 'UTC'
+ if delta < timedelta(0):
+ sign = '-'
+ delta = -delta
+ else:
+ sign = '+'
+ hours, rest = divmod(delta, timedelta(hours=1))
+ minutes, rest = divmod(rest, timedelta(minutes=1))
+ seconds = rest.seconds
+ microseconds = rest.microseconds
+ if microseconds:
+ return (f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
+ f'.{microseconds:06d}')
+ if seconds:
+ return f'UTC{sign}{hours:02d}:{minutes:02d}:{seconds:02d}'
+ return f'UTC{sign}{hours:02d}:{minutes:02d}'
+
+timezone.utc = timezone._create(timedelta(0))
+timezone.min = timezone._create(timezone._minoffset)
+timezone.max = timezone._create(timezone._maxoffset)
+_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
+
+# Some time zone algebra. For a datetime x, let
+# x.n = x stripped of its timezone -- its naive time.
+# x.o = x.utcoffset(), and assuming that doesn't raise an exception or
+# return None
+# x.d = x.dst(), and assuming that doesn't raise an exception or
+# return None
+# x.s = x's standard offset, x.o - x.d
+#
+# Now some derived rules, where k is a duration (timedelta).
+#
+# 1. x.o = x.s + x.d
+# This follows from the definition of x.s.
+#
+# 2. If x and y have the same tzinfo member, x.s = y.s.
+# This is actually a requirement, an assumption we need to make about
+# sane tzinfo classes.
+#
+# 3. The naive UTC time corresponding to x is x.n - x.o.
+# This is again a requirement for a sane tzinfo class.
+#
+# 4. (x+k).s = x.s
+# This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
+#
+# 5. (x+k).n = x.n + k
+# Again follows from how arithmetic is defined.
+#
+# Now we can explain tz.fromutc(x). Let's assume it's an interesting case
+# (meaning that the various tzinfo methods exist, and don't blow up or return
+# None when called).
+#
+# The function wants to return a datetime y with timezone tz, equivalent to x.
+# x is already in UTC.
+#
+# By #3, we want
+#
+# y.n - y.o = x.n [1]
+#
+# The algorithm starts by attaching tz to x.n, and calling that y. So
+# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
+# becomes true; in effect, we want to solve [2] for k:
+#
+# (y+k).n - (y+k).o = x.n [2]
+#
+# By #1, this is the same as
+#
+# (y+k).n - ((y+k).s + (y+k).d) = x.n [3]
+#
+# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
+# Substituting that into [3],
+#
+# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
+# k - (y+k).s - (y+k).d = 0; rearranging,
+# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
+# k = y.s - (y+k).d
+#
+# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
+# approximate k by ignoring the (y+k).d term at first. Note that k can't be
+# very large, since all offset-returning methods return a duration of magnitude
+# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
+# be 0, so ignoring it has no consequence then.
+#
+# In any case, the new value is
+#
+# z = y + y.s [4]
+#
+# It's helpful to step back at look at [4] from a higher level: it's simply
+# mapping from UTC to tz's standard time.
+#
+# At this point, if
+#
+# z.n - z.o = x.n [5]
+#
+# we have an equivalent time, and are almost done. The insecurity here is
+# at the start of daylight time. Picture US Eastern for concreteness. The wall
+# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
+# sense then. The docs ask that an Eastern tzinfo class consider such a time to
+# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
+# on the day DST starts. We want to return the 1:MM EST spelling because that's
+# the only spelling that makes sense on the local wall clock.
+#
+# In fact, if [5] holds at this point, we do have the standard-time spelling,
+# but that takes a bit of proof. We first prove a stronger result. What's the
+# difference between the LHS and RHS of [5]? Let
+#
+# diff = x.n - (z.n - z.o) [6]
+#
+# Now
+# z.n = by [4]
+# (y + y.s).n = by #5
+# y.n + y.s = since y.n = x.n
+# x.n + y.s = since z and y are have the same tzinfo member,
+# y.s = z.s by #2
+# x.n + z.s
+#
+# Plugging that back into [6] gives
+#
+# diff =
+# x.n - ((x.n + z.s) - z.o) = expanding
+# x.n - x.n - z.s + z.o = cancelling
+# - z.s + z.o = by #2
+# z.d
+#
+# So diff = z.d.
+#
+# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
+# spelling we wanted in the endcase described above. We're done. Contrarily,
+# if z.d = 0, then we have a UTC equivalent, and are also done.
+#
+# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
+# add to z (in effect, z is in tz's standard time, and we need to shift the
+# local clock into tz's daylight time).
+#
+# Let
+#
+# z' = z + z.d = z + diff [7]
+#
+# and we can again ask whether
+#
+# z'.n - z'.o = x.n [8]
+#
+# If so, we're done. If not, the tzinfo class is insane, according to the
+# assumptions we've made. This also requires a bit of proof. As before, let's
+# compute the difference between the LHS and RHS of [8] (and skipping some of
+# the justifications for the kinds of substitutions we've done several times
+# already):
+#
+# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
+# x.n - (z.n + diff - z'.o) = replacing diff via [6]
+# x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
+# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
+# - z.n + z.n - z.o + z'.o = cancel z.n
+# - z.o + z'.o = #1 twice
+# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
+# z'.d - z.d
+#
+# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
+# we've found the UTC-equivalent so are done. In fact, we stop with [7] and
+# return z', not bothering to compute z'.d.
+#
+# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
+# a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
+# would have to change the result dst() returns: we start in DST, and moving
+# a little further into it takes us out of DST.
+#
+# There isn't a sane case where this can happen. The closest it gets is at
+# the end of DST, where there's an hour in UTC with no spelling in a hybrid
+# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
+# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
+# UTC) because the docs insist on that, but 0:MM is taken as being in daylight
+# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
+# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
+# standard time. Since that's what the local clock *does*, we want to map both
+# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
+# in local time, but so it goes -- it's the way the local clock works.
+#
+# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
+# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
+# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
+# (correctly) concludes that z' is not UTC-equivalent to x.
+#
+# Because we know z.d said z was in daylight time (else [5] would have held and
+# we would have stopped then), and we know z.d != z'.d (else [8] would have held
+# and we have stopped then), and there are only 2 possible values dst() can
+# return in Eastern, it follows that z'.d must be 0 (which it is in the example,
+# but the reasoning doesn't depend on the example -- it depends on there being
+# two possible dst() outcomes, one zero and the other non-zero). Therefore
+# z' must be in standard time, and is the spelling we want in this case.
+#
+# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
+# concerned (because it takes z' as being in standard time rather than the
+# daylight time we intend here), but returning it gives the real-life "local
+# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
+# tz.
+#
+# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
+# the 1:MM standard time spelling we want.
+#
+# So how can this break? One of the assumptions must be violated. Two
+# possibilities:
+#
+# 1) [2] effectively says that y.s is invariant across all y belong to a given
+# time zone. This isn't true if, for political reasons or continental drift,
+# a region decides to change its base offset from UTC.
+#
+# 2) There may be versions of "double daylight" time where the tail end of
+# the analysis gives up a step too early. I haven't thought about that
+# enough to say.
+#
+# In any case, it's clear that the default fromutc() is strong enough to handle
+# "almost all" time zones: so long as the standard offset is invariant, it
+# doesn't matter if daylight time transition points change from year to year, or
+# if daylight time is skipped in some years; it doesn't matter how large or
+# small dst() may get within its bounds; and it doesn't even matter if some
+# perverse time zone returns a negative dst()). So a breaking case must be
+# pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
+
+try:
+ from _datetime import *
+except ImportError:
+ pass
+else:
+ # Clean up unused names
+ del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y,
+ _DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time,
+ _check_date_fields, _check_int_field, _check_time_fields,
+ _check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror,
+ _date_class, _days_before_month, _days_before_year, _days_in_month,
+ _format_time, _format_offset, _is_leap, _isoweek1monday, _math,
+ _ord2ymd, _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord,
+ _divide_and_round, _parse_isoformat_date, _parse_isoformat_time,
+ _parse_hh_mm_ss_ff)
+ # XXX Since import * above excludes names that start with _,
+ # docstring does not get overwritten. In the future, it may be
+ # appropriate to maintain a single module level docstring and
+ # remove the following line.
+ from _datetime import __doc__
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/__init__.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/__init__.py
new file mode 100644
index 00000000..6831a844
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/__init__.py
@@ -0,0 +1,188 @@
+"""Generic interface to all dbm clones.
+
+Use
+
+ import dbm
+ d = dbm.open(file, 'w', 0o666)
+
+The returned object is a dbm.gnu, dbm.ndbm or dbm.dumb object, dependent on the
+type of database being opened (determined by the whichdb function) in the case
+of an existing dbm. If the dbm does not exist and the create or new flag ('c'
+or 'n') was specified, the dbm type will be determined by the availability of
+the modules (tested in the above order).
+
+It has the following interface (key and data are strings):
+
+ d[key] = data # store data at key (may override data at
+ # existing key)
+ data = d[key] # retrieve data at key (raise KeyError if no
+ # such key)
+ del d[key] # delete data stored at key (raises KeyError
+ # if no such key)
+ flag = key in d # true if the key exists
+ list = d.keys() # return a list of all existing keys (slow!)
+
+Future versions may change the order in which implementations are
+tested for existence, and add interfaces to other dbm-like
+implementations.
+"""
+
+__all__ = ['open', 'whichdb', 'error']
+
+import io
+import os
+import struct
+import sys
+
+
+class error(Exception):
+ pass
+
+_names = ['dbm.gnu', 'dbm.ndbm', 'dbm.dumb']
+_defaultmod = None
+_modules = {}
+
+error = (error, OSError)
+
+try:
+ from dbm import ndbm
+except ImportError:
+ ndbm = None
+
+
+def open(file, flag='r', mode=0o666):
+ """Open or create database at path given by *file*.
+
+ Optional argument *flag* can be 'r' (default) for read-only access, 'w'
+ for read-write access of an existing database, 'c' for read-write access
+ to a new or existing database, and 'n' for read-write access to a new
+ database.
+
+ Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
+ only if it doesn't exist; and 'n' always creates a new database.
+ """
+ global _defaultmod
+ if _defaultmod is None:
+ for name in _names:
+ try:
+ mod = __import__(name, fromlist=['open'])
+ except ImportError:
+ continue
+ if not _defaultmod:
+ _defaultmod = mod
+ _modules[name] = mod
+ if not _defaultmod:
+ raise ImportError("no dbm clone found; tried %s" % _names)
+
+ # guess the type of an existing database, if not creating a new one
+ result = whichdb(file) if 'n' not in flag else None
+ if result is None:
+ # db doesn't exist or 'n' flag was specified to create a new db
+ if 'c' in flag or 'n' in flag:
+ # file doesn't exist and the new flag was used so use default type
+ mod = _defaultmod
+ else:
+ raise error[0]("need 'c' or 'n' flag to open new db")
+ elif result == "":
+ # db type cannot be determined
+ raise error[0]("db type could not be determined")
+ elif result not in _modules:
+ raise error[0]("db type is {0}, but the module is not "
+ "available".format(result))
+ else:
+ mod = _modules[result]
+ return mod.open(file, flag, mode)
+
+
+def whichdb(filename):
+ """Guess which db package to use to open a db file.
+
+ Return values:
+
+ - None if the database file can't be read;
+ - empty string if the file can be read but can't be recognized
+ - the name of the dbm submodule (e.g. "ndbm" or "gnu") if recognized.
+
+ Importing the given module may still fail, and opening the
+ database using that module may still fail.
+ """
+
+ # Check for ndbm first -- this has a .pag and a .dir file
+ try:
+ f = io.open(filename + ".pag", "rb")
+ f.close()
+ f = io.open(filename + ".dir", "rb")
+ f.close()
+ return "dbm.ndbm"
+ except OSError:
+ # some dbm emulations based on Berkeley DB generate a .db file
+ # some do not, but they should be caught by the bsd checks
+ try:
+ f = io.open(filename + ".db", "rb")
+ f.close()
+ # guarantee we can actually open the file using dbm
+ # kind of overkill, but since we are dealing with emulations
+ # it seems like a prudent step
+ if ndbm is not None:
+ d = ndbm.open(filename)
+ d.close()
+ return "dbm.ndbm"
+ except OSError:
+ pass
+
+ # Check for dumbdbm next -- this has a .dir and a .dat file
+ try:
+ # First check for presence of files
+ os.stat(filename + ".dat")
+ size = os.stat(filename + ".dir").st_size
+ # dumbdbm files with no keys are empty
+ if size == 0:
+ return "dbm.dumb"
+ f = io.open(filename + ".dir", "rb")
+ try:
+ if f.read(1) in (b"'", b'"'):
+ return "dbm.dumb"
+ finally:
+ f.close()
+ except OSError:
+ pass
+
+ # See if the file exists, return None if not
+ try:
+ f = io.open(filename, "rb")
+ except OSError:
+ return None
+
+ with f:
+ # Read the start of the file -- the magic number
+ s16 = f.read(16)
+ s = s16[0:4]
+
+ # Return "" if not at least 4 bytes
+ if len(s) != 4:
+ return ""
+
+ # Convert to 4-byte int in native byte order -- return "" if impossible
+ try:
+ (magic,) = struct.unpack("=l", s)
+ except struct.error:
+ return ""
+
+ # Check for GNU dbm
+ if magic in (0x13579ace, 0x13579acd, 0x13579acf):
+ return "dbm.gnu"
+
+ # Later versions of Berkeley db hash file have a 12-byte pad in
+ # front of the file type
+ try:
+ (magic,) = struct.unpack("=l", s16[-4:])
+ except struct.error:
+ return ""
+
+ # Unknown
+ return ""
+
+
+if __name__ == "__main__":
+ for filename in sys.argv[1:]:
+ print(whichdb(filename) or "UNKNOWN", filename)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/dumb.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/dumb.py
new file mode 100644
index 00000000..5064668c
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/dumb.py
@@ -0,0 +1,329 @@
+"""A dumb and slow but simple dbm clone.
+
+For database spam, spam.dir contains the index (a text file),
+spam.bak *may* contain a backup of the index (also a text file),
+while spam.dat contains the data (a binary file).
+
+XXX TO DO:
+
+- seems to contain a bug when updating...
+
+- reclaim free space (currently, space once occupied by deleted or expanded
+items is never reused)
+
+- support concurrent access (currently, if two processes take turns making
+updates, they can mess up the index)
+
+- support efficient access to large databases (currently, the whole index
+is read when the database is opened, and some updates rewrite the whole index)
+
+- support opening for read-only (flag = 'm')
+
+"""
+
+import ast as _ast
+import io as _io
+import os as _os
+import collections.abc
+
+__all__ = ["error", "open"]
+
+_BLOCKSIZE = 512
+
+error = OSError
+
+class _Database(collections.abc.MutableMapping):
+
+ # The on-disk directory and data files can remain in mutually
+ # inconsistent states for an arbitrarily long time (see comments
+ # at the end of __setitem__). This is only repaired when _commit()
+ # gets called. One place _commit() gets called is from __del__(),
+ # and if that occurs at program shutdown time, module globals may
+ # already have gotten rebound to None. Since it's crucial that
+ # _commit() finish successfully, we can't ignore shutdown races
+ # here, and _commit() must not reference any globals.
+ _os = _os # for _commit()
+ _io = _io # for _commit()
+
+ def __init__(self, filebasename, mode, flag='c'):
+ self._mode = mode
+ self._readonly = (flag == 'r')
+
+ # The directory file is a text file. Each line looks like
+ # "%r, (%d, %d)\n" % (key, pos, siz)
+ # where key is the string key, pos is the offset into the dat
+ # file of the associated value's first byte, and siz is the number
+ # of bytes in the associated value.
+ self._dirfile = filebasename + '.dir'
+
+ # The data file is a binary file pointed into by the directory
+ # file, and holds the values associated with keys. Each value
+ # begins at a _BLOCKSIZE-aligned byte offset, and is a raw
+ # binary 8-bit string value.
+ self._datfile = filebasename + '.dat'
+ self._bakfile = filebasename + '.bak'
+
+ # The index is an in-memory dict, mirroring the directory file.
+ self._index = None # maps keys to (pos, siz) pairs
+
+ # Handle the creation
+ self._create(flag)
+ self._update(flag)
+
+ def _create(self, flag):
+ if flag == 'n':
+ for filename in (self._datfile, self._bakfile, self._dirfile):
+ try:
+ _os.remove(filename)
+ except OSError:
+ pass
+ # Mod by Jack: create data file if needed
+ try:
+ f = _io.open(self._datfile, 'r', encoding="Latin-1")
+ except OSError:
+ if flag not in ('c', 'n'):
+ import warnings
+ warnings.warn("The database file is missing, the "
+ "semantics of the 'c' flag will be used.",
+ DeprecationWarning, stacklevel=4)
+ with _io.open(self._datfile, 'w', encoding="Latin-1") as f:
+ self._chmod(self._datfile)
+ else:
+ f.close()
+
+ # Read directory file into the in-memory index dict.
+ def _update(self, flag):
+ self._index = {}
+ try:
+ f = _io.open(self._dirfile, 'r', encoding="Latin-1")
+ except OSError:
+ self._modified = not self._readonly
+ if flag not in ('c', 'n'):
+ import warnings
+ warnings.warn("The index file is missing, the "
+ "semantics of the 'c' flag will be used.",
+ DeprecationWarning, stacklevel=4)
+ else:
+ self._modified = False
+ with f:
+ for line in f:
+ line = line.rstrip()
+ key, pos_and_siz_pair = _ast.literal_eval(line)
+ key = key.encode('Latin-1')
+ self._index[key] = pos_and_siz_pair
+
+ # Write the index dict to the directory file. The original directory
+ # file (if any) is renamed with a .bak extension first. If a .bak
+ # file currently exists, it's deleted.
+ def _commit(self):
+ # CAUTION: It's vital that _commit() succeed, and _commit() can
+ # be called from __del__(). Therefore we must never reference a
+ # global in this routine.
+ if self._index is None or not self._modified:
+ return # nothing to do
+
+ try:
+ self._os.unlink(self._bakfile)
+ except OSError:
+ pass
+
+ try:
+ self._os.rename(self._dirfile, self._bakfile)
+ except OSError:
+ pass
+
+ with self._io.open(self._dirfile, 'w', encoding="Latin-1") as f:
+ self._chmod(self._dirfile)
+ for key, pos_and_siz_pair in self._index.items():
+ # Use Latin-1 since it has no qualms with any value in any
+ # position; UTF-8, though, does care sometimes.
+ entry = "%r, %r\n" % (key.decode('Latin-1'), pos_and_siz_pair)
+ f.write(entry)
+
+ sync = _commit
+
+ def _verify_open(self):
+ if self._index is None:
+ raise error('DBM object has already been closed')
+
+ def __getitem__(self, key):
+ if isinstance(key, str):
+ key = key.encode('utf-8')
+ self._verify_open()
+ pos, siz = self._index[key] # may raise KeyError
+ with _io.open(self._datfile, 'rb') as f:
+ f.seek(pos)
+ dat = f.read(siz)
+ return dat
+
+ # Append val to the data file, starting at a _BLOCKSIZE-aligned
+ # offset. The data file is first padded with NUL bytes (if needed)
+ # to get to an aligned offset. Return pair
+ # (starting offset of val, len(val))
+ def _addval(self, val):
+ with _io.open(self._datfile, 'rb+') as f:
+ f.seek(0, 2)
+ pos = int(f.tell())
+ npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
+ f.write(b'\0'*(npos-pos))
+ pos = npos
+ f.write(val)
+ return (pos, len(val))
+
+ # Write val to the data file, starting at offset pos. The caller
+ # is responsible for ensuring that there's enough room starting at
+ # pos to hold val, without overwriting some other value. Return
+ # pair (pos, len(val)).
+ def _setval(self, pos, val):
+ with _io.open(self._datfile, 'rb+') as f:
+ f.seek(pos)
+ f.write(val)
+ return (pos, len(val))
+
+ # key is a new key whose associated value starts in the data file
+ # at offset pos and with length siz. Add an index record to
+ # the in-memory index dict, and append one to the directory file.
+ def _addkey(self, key, pos_and_siz_pair):
+ self._index[key] = pos_and_siz_pair
+ with _io.open(self._dirfile, 'a', encoding="Latin-1") as f:
+ self._chmod(self._dirfile)
+ f.write("%r, %r\n" % (key.decode("Latin-1"), pos_and_siz_pair))
+
+ def __setitem__(self, key, val):
+ if self._readonly:
+ import warnings
+ warnings.warn('The database is opened for reading only',
+ DeprecationWarning, stacklevel=2)
+ if isinstance(key, str):
+ key = key.encode('utf-8')
+ elif not isinstance(key, (bytes, bytearray)):
+ raise TypeError("keys must be bytes or strings")
+ if isinstance(val, str):
+ val = val.encode('utf-8')
+ elif not isinstance(val, (bytes, bytearray)):
+ raise TypeError("values must be bytes or strings")
+ self._verify_open()
+ self._modified = True
+ if key not in self._index:
+ self._addkey(key, self._addval(val))
+ else:
+ # See whether the new value is small enough to fit in the
+ # (padded) space currently occupied by the old value.
+ pos, siz = self._index[key]
+ oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
+ newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
+ if newblocks <= oldblocks:
+ self._index[key] = self._setval(pos, val)
+ else:
+ # The new value doesn't fit in the (padded) space used
+ # by the old value. The blocks used by the old value are
+ # forever lost.
+ self._index[key] = self._addval(val)
+
+ # Note that _index may be out of synch with the directory
+ # file now: _setval() and _addval() don't update the directory
+ # file. This also means that the on-disk directory and data
+ # files are in a mutually inconsistent state, and they'll
+ # remain that way until _commit() is called. Note that this
+ # is a disaster (for the database) if the program crashes
+ # (so that _commit() never gets called).
+
+ def __delitem__(self, key):
+ if self._readonly:
+ import warnings
+ warnings.warn('The database is opened for reading only',
+ DeprecationWarning, stacklevel=2)
+ if isinstance(key, str):
+ key = key.encode('utf-8')
+ self._verify_open()
+ self._modified = True
+ # The blocks used by the associated value are lost.
+ del self._index[key]
+ # XXX It's unclear why we do a _commit() here (the code always
+ # XXX has, so I'm not changing it). __setitem__ doesn't try to
+ # XXX keep the directory file in synch. Why should we? Or
+ # XXX why shouldn't __setitem__?
+ self._commit()
+
+ def keys(self):
+ try:
+ return list(self._index)
+ except TypeError:
+ raise error('DBM object has already been closed') from None
+
+ def items(self):
+ self._verify_open()
+ return [(key, self[key]) for key in self._index.keys()]
+
+ def __contains__(self, key):
+ if isinstance(key, str):
+ key = key.encode('utf-8')
+ try:
+ return key in self._index
+ except TypeError:
+ if self._index is None:
+ raise error('DBM object has already been closed') from None
+ else:
+ raise
+
+ def iterkeys(self):
+ try:
+ return iter(self._index)
+ except TypeError:
+ raise error('DBM object has already been closed') from None
+ __iter__ = iterkeys
+
+ def __len__(self):
+ try:
+ return len(self._index)
+ except TypeError:
+ raise error('DBM object has already been closed') from None
+
+ def close(self):
+ try:
+ self._commit()
+ finally:
+ self._index = self._datfile = self._dirfile = self._bakfile = None
+
+ __del__ = close
+
+ def _chmod(self, file):
+ if hasattr(self._os, 'chmod'):
+ self._os.chmod(file, self._mode)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+def open(file, flag='c', mode=0o666):
+ """Open the database file, filename, and return corresponding object.
+
+ The flag argument, used to control how the database is opened in the
+ other DBM implementations, supports only the semantics of 'c' and 'n'
+ values. Other values will default to the semantics of 'c' value:
+ the database will always opened for update and will be created if it
+ does not exist.
+
+ The optional mode argument is the UNIX mode of the file, used only when
+ the database has to be created. It defaults to octal code 0o666 (and
+ will be modified by the prevailing umask).
+
+ """
+
+ # Modify mode depending on the umask
+ try:
+ um = _os.umask(0)
+ _os.umask(um)
+ except AttributeError:
+ pass
+ else:
+ # Turn off any bits that are set in the umask
+ mode = mode & (~um)
+ if flag not in ('r', 'w', 'c', 'n'):
+ import warnings
+ warnings.warn("Flag must be one of 'r', 'w', 'c', or 'n'",
+ DeprecationWarning, stacklevel=2)
+ return _Database(file, mode, flag=flag)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/gnu.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/gnu.py
new file mode 100644
index 00000000..b07a1def
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/gnu.py
@@ -0,0 +1,3 @@
+"""Provide the _gdbm module as a dbm submodule."""
+
+from _gdbm import *
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/ndbm.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/ndbm.py
new file mode 100644
index 00000000..23056a29
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dbm/ndbm.py
@@ -0,0 +1,3 @@
+"""Provide the _dbm module as a dbm submodule."""
+
+from _dbm import *
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/decimal.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/decimal.py
new file mode 100644
index 00000000..7746ea26
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/decimal.py
@@ -0,0 +1,11 @@
+
+try:
+ from _decimal import *
+ from _decimal import __doc__
+ from _decimal import __version__
+ from _decimal import __libmpdec_version__
+except ImportError:
+ from _pydecimal import *
+ from _pydecimal import __doc__
+ from _pydecimal import __version__
+ from _pydecimal import __libmpdec_version__
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/difflib.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/difflib.py
new file mode 100644
index 00000000..887c3c26
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/difflib.py
@@ -0,0 +1,2097 @@
+"""
+Module difflib -- helpers for computing deltas between objects.
+
+Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
+ Use SequenceMatcher to return list of the best "good enough" matches.
+
+Function context_diff(a, b):
+ For two lists of strings, return a delta in context diff format.
+
+Function ndiff(a, b):
+ Return a delta: the difference between `a` and `b` (lists of strings).
+
+Function restore(delta, which):
+ Return one of the two sequences that generated an ndiff delta.
+
+Function unified_diff(a, b):
+ For two lists of strings, return a delta in unified diff format.
+
+Class SequenceMatcher:
+ A flexible class for comparing pairs of sequences of any type.
+
+Class Differ:
+ For producing human-readable deltas from sequences of lines of text.
+
+Class HtmlDiff:
+ For producing HTML side by side comparison with change highlights.
+"""
+
+__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
+ 'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
+ 'unified_diff', 'diff_bytes', 'HtmlDiff', 'Match']
+
+from heapq import nlargest as _nlargest
+from collections import namedtuple as _namedtuple
+
+Match = _namedtuple('Match', 'a b size')
+
+def _calculate_ratio(matches, length):
+ if length:
+ return 2.0 * matches / length
+ return 1.0
+
+class SequenceMatcher:
+
+ """
+ SequenceMatcher is a flexible class for comparing pairs of sequences of
+ any type, so long as the sequence elements are hashable. The basic
+ algorithm predates, and is a little fancier than, an algorithm
+ published in the late 1980's by Ratcliff and Obershelp under the
+ hyperbolic name "gestalt pattern matching". The basic idea is to find
+ the longest contiguous matching subsequence that contains no "junk"
+ elements (R-O doesn't address junk). The same idea is then applied
+ recursively to the pieces of the sequences to the left and to the right
+ of the matching subsequence. This does not yield minimal edit
+ sequences, but does tend to yield matches that "look right" to people.
+
+ SequenceMatcher tries to compute a "human-friendly diff" between two
+ sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+ longest *contiguous* & junk-free matching subsequence. That's what
+ catches peoples' eyes. The Windows(tm) windiff has another interesting
+ notion, pairing up elements that appear uniquely in each sequence.
+ That, and the method here, appear to yield more intuitive difference
+ reports than does diff. This method appears to be the least vulnerable
+ to synching up on blocks of "junk lines", though (like blank lines in
+ ordinary text files, or maybe "" lines in HTML files). That may be
+ because this is the only method of the 3 that has a *concept* of
+ "junk" .
+
+ Example, comparing two strings, and considering blanks to be "junk":
+
+ >>> s = SequenceMatcher(lambda x: x == " ",
+ ... "private Thread currentThread;",
+ ... "private volatile Thread currentThread;")
+ >>>
+
+ .ratio() returns a float in [0, 1], measuring the "similarity" of the
+ sequences. As a rule of thumb, a .ratio() value over 0.6 means the
+ sequences are close matches:
+
+ >>> print(round(s.ratio(), 3))
+ 0.866
+ >>>
+
+ If you're only interested in where the sequences match,
+ .get_matching_blocks() is handy:
+
+ >>> for block in s.get_matching_blocks():
+ ... print("a[%d] and b[%d] match for %d elements" % block)
+ a[0] and b[0] match for 8 elements
+ a[8] and b[17] match for 21 elements
+ a[29] and b[38] match for 0 elements
+
+ Note that the last tuple returned by .get_matching_blocks() is always a
+ dummy, (len(a), len(b), 0), and this is the only case in which the last
+ tuple element (number of elements matched) is 0.
+
+ If you want to know how to change the first sequence into the second,
+ use .get_opcodes():
+
+ >>> for opcode in s.get_opcodes():
+ ... print("%6s a[%d:%d] b[%d:%d]" % opcode)
+ equal a[0:8] b[0:8]
+ insert a[8:8] b[8:17]
+ equal a[8:29] b[17:38]
+
+ See the Differ class for a fancy human-friendly file differencer, which
+ uses SequenceMatcher both to compare sequences of lines, and to compare
+ sequences of characters within similar (near-matching) lines.
+
+ See also function get_close_matches() in this module, which shows how
+ simple code building on SequenceMatcher can be used to do useful work.
+
+ Timing: Basic R-O is cubic time worst case and quadratic time expected
+ case. SequenceMatcher is quadratic time for the worst case and has
+ expected-case behavior dependent in a complicated way on how many
+ elements the sequences have in common; best case time is linear.
+
+ Methods:
+
+ __init__(isjunk=None, a='', b='')
+ Construct a SequenceMatcher.
+
+ set_seqs(a, b)
+ Set the two sequences to be compared.
+
+ set_seq1(a)
+ Set the first sequence to be compared.
+
+ set_seq2(b)
+ Set the second sequence to be compared.
+
+ find_longest_match(alo, ahi, blo, bhi)
+ Find longest matching block in a[alo:ahi] and b[blo:bhi].
+
+ get_matching_blocks()
+ Return list of triples describing matching subsequences.
+
+ get_opcodes()
+ Return list of 5-tuples describing how to turn a into b.
+
+ ratio()
+ Return a measure of the sequences' similarity (float in [0,1]).
+
+ quick_ratio()
+ Return an upper bound on .ratio() relatively quickly.
+
+ real_quick_ratio()
+ Return an upper bound on ratio() very quickly.
+ """
+
+ def __init__(self, isjunk=None, a='', b='', autojunk=True):
+ """Construct a SequenceMatcher.
+
+ Optional arg isjunk is None (the default), or a one-argument
+ function that takes a sequence element and returns true iff the
+ element is junk. None is equivalent to passing "lambda x: 0", i.e.
+ no elements are considered to be junk. For example, pass
+ lambda x: x in " \\t"
+ if you're comparing lines as sequences of characters, and don't
+ want to synch up on blanks or hard tabs.
+
+ Optional arg a is the first of two sequences to be compared. By
+ default, an empty string. The elements of a must be hashable. See
+ also .set_seqs() and .set_seq1().
+
+ Optional arg b is the second of two sequences to be compared. By
+ default, an empty string. The elements of b must be hashable. See
+ also .set_seqs() and .set_seq2().
+
+ Optional arg autojunk should be set to False to disable the
+ "automatic junk heuristic" that treats popular elements as junk
+ (see module documentation for more information).
+ """
+
+ # Members:
+ # a
+ # first sequence
+ # b
+ # second sequence; differences are computed as "what do
+ # we need to do to 'a' to change it into 'b'?"
+ # b2j
+ # for x in b, b2j[x] is a list of the indices (into b)
+ # at which x appears; junk and popular elements do not appear
+ # fullbcount
+ # for x in b, fullbcount[x] == the number of times x
+ # appears in b; only materialized if really needed (used
+ # only for computing quick_ratio())
+ # matching_blocks
+ # a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
+ # ascending & non-overlapping in i and in j; terminated by
+ # a dummy (len(a), len(b), 0) sentinel
+ # opcodes
+ # a list of (tag, i1, i2, j1, j2) tuples, where tag is
+ # one of
+ # 'replace' a[i1:i2] should be replaced by b[j1:j2]
+ # 'delete' a[i1:i2] should be deleted
+ # 'insert' b[j1:j2] should be inserted
+ # 'equal' a[i1:i2] == b[j1:j2]
+ # isjunk
+ # a user-supplied function taking a sequence element and
+ # returning true iff the element is "junk" -- this has
+ # subtle but helpful effects on the algorithm, which I'll
+ # get around to writing up someday <0.9 wink>.
+ # DON'T USE! Only __chain_b uses this. Use "in self.bjunk".
+ # bjunk
+ # the items in b for which isjunk is True.
+ # bpopular
+ # nonjunk items in b treated as junk by the heuristic (if used).
+
+ self.isjunk = isjunk
+ self.a = self.b = None
+ self.autojunk = autojunk
+ self.set_seqs(a, b)
+
+ def set_seqs(self, a, b):
+ """Set the two sequences to be compared.
+
+ >>> s = SequenceMatcher()
+ >>> s.set_seqs("abcd", "bcde")
+ >>> s.ratio()
+ 0.75
+ """
+
+ self.set_seq1(a)
+ self.set_seq2(b)
+
+ def set_seq1(self, a):
+ """Set the first sequence to be compared.
+
+ The second sequence to be compared is not changed.
+
+ >>> s = SequenceMatcher(None, "abcd", "bcde")
+ >>> s.ratio()
+ 0.75
+ >>> s.set_seq1("bcde")
+ >>> s.ratio()
+ 1.0
+ >>>
+
+ SequenceMatcher computes and caches detailed information about the
+ second sequence, so if you want to compare one sequence S against
+ many sequences, use .set_seq2(S) once and call .set_seq1(x)
+ repeatedly for each of the other sequences.
+
+ See also set_seqs() and set_seq2().
+ """
+
+ if a is self.a:
+ return
+ self.a = a
+ self.matching_blocks = self.opcodes = None
+
+ def set_seq2(self, b):
+ """Set the second sequence to be compared.
+
+ The first sequence to be compared is not changed.
+
+ >>> s = SequenceMatcher(None, "abcd", "bcde")
+ >>> s.ratio()
+ 0.75
+ >>> s.set_seq2("abcd")
+ >>> s.ratio()
+ 1.0
+ >>>
+
+ SequenceMatcher computes and caches detailed information about the
+ second sequence, so if you want to compare one sequence S against
+ many sequences, use .set_seq2(S) once and call .set_seq1(x)
+ repeatedly for each of the other sequences.
+
+ See also set_seqs() and set_seq1().
+ """
+
+ if b is self.b:
+ return
+ self.b = b
+ self.matching_blocks = self.opcodes = None
+ self.fullbcount = None
+ self.__chain_b()
+
+ # For each element x in b, set b2j[x] to a list of the indices in
+ # b where x appears; the indices are in increasing order; note that
+ # the number of times x appears in b is len(b2j[x]) ...
+ # when self.isjunk is defined, junk elements don't show up in this
+ # map at all, which stops the central find_longest_match method
+ # from starting any matching block at a junk element ...
+ # b2j also does not contain entries for "popular" elements, meaning
+ # elements that account for more than 1 + 1% of the total elements, and
+ # when the sequence is reasonably large (>= 200 elements); this can
+ # be viewed as an adaptive notion of semi-junk, and yields an enormous
+ # speedup when, e.g., comparing program files with hundreds of
+ # instances of "return NULL;" ...
+ # note that this is only called when b changes; so for cross-product
+ # kinds of matches, it's best to call set_seq2 once, then set_seq1
+ # repeatedly
+
+ def __chain_b(self):
+ # Because isjunk is a user-defined (not C) function, and we test
+ # for junk a LOT, it's important to minimize the number of calls.
+ # Before the tricks described here, __chain_b was by far the most
+ # time-consuming routine in the whole module! If anyone sees
+ # Jim Roskind, thank him again for profile.py -- I never would
+ # have guessed that.
+ # The first trick is to build b2j ignoring the possibility
+ # of junk. I.e., we don't call isjunk at all yet. Throwing
+ # out the junk later is much cheaper than building b2j "right"
+ # from the start.
+ b = self.b
+ self.b2j = b2j = {}
+
+ for i, elt in enumerate(b):
+ indices = b2j.setdefault(elt, [])
+ indices.append(i)
+
+ # Purge junk elements
+ self.bjunk = junk = set()
+ isjunk = self.isjunk
+ if isjunk:
+ for elt in b2j.keys():
+ if isjunk(elt):
+ junk.add(elt)
+ for elt in junk: # separate loop avoids separate list of keys
+ del b2j[elt]
+
+ # Purge popular elements that are not junk
+ self.bpopular = popular = set()
+ n = len(b)
+ if self.autojunk and n >= 200:
+ ntest = n // 100 + 1
+ for elt, idxs in b2j.items():
+ if len(idxs) > ntest:
+ popular.add(elt)
+ for elt in popular: # ditto; as fast for 1% deletion
+ del b2j[elt]
+
+ def find_longest_match(self, alo, ahi, blo, bhi):
+ """Find longest matching block in a[alo:ahi] and b[blo:bhi].
+
+ If isjunk is not defined:
+
+ Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+ alo <= i <= i+k <= ahi
+ blo <= j <= j+k <= bhi
+ and for all (i',j',k') meeting those conditions,
+ k >= k'
+ i <= i'
+ and if i == i', j <= j'
+
+ In other words, of all maximal matching blocks, return one that
+ starts earliest in a, and of all those maximal matching blocks that
+ start earliest in a, return the one that starts earliest in b.
+
+ >>> s = SequenceMatcher(None, " abcd", "abcd abcd")
+ >>> s.find_longest_match(0, 5, 0, 9)
+ Match(a=0, b=4, size=5)
+
+ If isjunk is defined, first the longest matching block is
+ determined as above, but with the additional restriction that no
+ junk element appears in the block. Then that block is extended as
+ far as possible by matching (only) junk elements on both sides. So
+ the resulting block never matches on junk except as identical junk
+ happens to be adjacent to an "interesting" match.
+
+ Here's the same example as before, but considering blanks to be
+ junk. That prevents " abcd" from matching the " abcd" at the tail
+ end of the second sequence directly. Instead only the "abcd" can
+ match, and matches the leftmost "abcd" in the second sequence:
+
+ >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
+ >>> s.find_longest_match(0, 5, 0, 9)
+ Match(a=1, b=0, size=4)
+
+ If no blocks match, return (alo, blo, 0).
+
+ >>> s = SequenceMatcher(None, "ab", "c")
+ >>> s.find_longest_match(0, 2, 0, 1)
+ Match(a=0, b=0, size=0)
+ """
+
+ # CAUTION: stripping common prefix or suffix would be incorrect.
+ # E.g.,
+ # ab
+ # acab
+ # Longest matching block is "ab", but if common prefix is
+ # stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ # strip, so ends up claiming that ab is changed to acab by
+ # inserting "ca" in the middle. That's minimal but unintuitive:
+ # "it's obvious" that someone inserted "ac" at the front.
+ # Windiff ends up at the same place as diff, but by pairing up
+ # the unique 'b's and then matching the first two 'a's.
+
+ a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
+ besti, bestj, bestsize = alo, blo, 0
+ # find longest junk-free match
+ # during an iteration of the loop, j2len[j] = length of longest
+ # junk-free match ending with a[i-1] and b[j]
+ j2len = {}
+ nothing = []
+ for i in range(alo, ahi):
+ # look at all instances of a[i] in b; note that because
+ # b2j has no junk keys, the loop is skipped if a[i] is junk
+ j2lenget = j2len.get
+ newj2len = {}
+ for j in b2j.get(a[i], nothing):
+ # a[i] matches b[j]
+ if j < blo:
+ continue
+ if j >= bhi:
+ break
+ k = newj2len[j] = j2lenget(j-1, 0) + 1
+ if k > bestsize:
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ j2len = newj2len
+
+ # Extend the best by non-junk elements on each end. In particular,
+ # "popular" non-junk elements aren't in b2j, which greatly speeds
+ # the inner loop above, but also means "the best" match so far
+ # doesn't contain any junk *or* popular non-junk elements.
+ while besti > alo and bestj > blo and \
+ not isbjunk(b[bestj-1]) and \
+ a[besti-1] == b[bestj-1]:
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ while besti+bestsize < ahi and bestj+bestsize < bhi and \
+ not isbjunk(b[bestj+bestsize]) and \
+ a[besti+bestsize] == b[bestj+bestsize]:
+ bestsize += 1
+
+ # Now that we have a wholly interesting match (albeit possibly
+ # empty!), we may as well suck up the matching junk on each
+ # side of it too. Can't think of a good reason not to, and it
+ # saves post-processing the (possibly considerable) expense of
+ # figuring out what to do with it. In the case of an empty
+ # interesting match, this is clearly the right thing to do,
+ # because no other kind of match is possible in the regions.
+ while besti > alo and bestj > blo and \
+ isbjunk(b[bestj-1]) and \
+ a[besti-1] == b[bestj-1]:
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ while besti+bestsize < ahi and bestj+bestsize < bhi and \
+ isbjunk(b[bestj+bestsize]) and \
+ a[besti+bestsize] == b[bestj+bestsize]:
+ bestsize = bestsize + 1
+
+ return Match(besti, bestj, bestsize)
+
+ def get_matching_blocks(self):
+ """Return list of triples describing matching subsequences.
+
+ Each triple is of the form (i, j, n), and means that
+ a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+ i and in j. New in Python 2.5, it's also guaranteed that if
+ (i, j, n) and (i', j', n') are adjacent triples in the list, and
+ the second is not the last triple in the list, then i+n != i' or
+ j+n != j'. IOW, adjacent triples never describe adjacent equal
+ blocks.
+
+ The last triple is a dummy, (len(a), len(b), 0), and is the only
+ triple with n==0.
+
+ >>> s = SequenceMatcher(None, "abxcd", "abcd")
+ >>> list(s.get_matching_blocks())
+ [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
+ """
+
+ if self.matching_blocks is not None:
+ return self.matching_blocks
+ la, lb = len(self.a), len(self.b)
+
+ # This is most naturally expressed as a recursive algorithm, but
+ # at least one user bumped into extreme use cases that exceeded
+ # the recursion limit on their box. So, now we maintain a list
+ # ('queue`) of blocks we still need to look at, and append partial
+ # results to `matching_blocks` in a loop; the matches are sorted
+ # at the end.
+ queue = [(0, la, 0, lb)]
+ matching_blocks = []
+ while queue:
+ alo, ahi, blo, bhi = queue.pop()
+ i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
+ # a[alo:i] vs b[blo:j] unknown
+ # a[i:i+k] same as b[j:j+k]
+ # a[i+k:ahi] vs b[j+k:bhi] unknown
+ if k: # if k is 0, there was no matching block
+ matching_blocks.append(x)
+ if alo < i and blo < j:
+ queue.append((alo, i, blo, j))
+ if i+k < ahi and j+k < bhi:
+ queue.append((i+k, ahi, j+k, bhi))
+ matching_blocks.sort()
+
+ # It's possible that we have adjacent equal blocks in the
+ # matching_blocks list now. Starting with 2.5, this code was added
+ # to collapse them.
+ i1 = j1 = k1 = 0
+ non_adjacent = []
+ for i2, j2, k2 in matching_blocks:
+ # Is this block adjacent to i1, j1, k1?
+ if i1 + k1 == i2 and j1 + k1 == j2:
+ # Yes, so collapse them -- this just increases the length of
+ # the first block by the length of the second, and the first
+ # block so lengthened remains the block to compare against.
+ k1 += k2
+ else:
+ # Not adjacent. Remember the first block (k1==0 means it's
+ # the dummy we started with), and make the second block the
+ # new block to compare against.
+ if k1:
+ non_adjacent.append((i1, j1, k1))
+ i1, j1, k1 = i2, j2, k2
+ if k1:
+ non_adjacent.append((i1, j1, k1))
+
+ non_adjacent.append( (la, lb, 0) )
+ self.matching_blocks = list(map(Match._make, non_adjacent))
+ return self.matching_blocks
+
+ def get_opcodes(self):
+ """Return list of 5-tuples describing how to turn a into b.
+
+ Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+ has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+ tuple preceding it, and likewise for j1 == the previous j2.
+
+ The tags are strings, with these meanings:
+
+ 'replace': a[i1:i2] should be replaced by b[j1:j2]
+ 'delete': a[i1:i2] should be deleted.
+ Note that j1==j2 in this case.
+ 'insert': b[j1:j2] should be inserted at a[i1:i1].
+ Note that i1==i2 in this case.
+ 'equal': a[i1:i2] == b[j1:j2]
+
+ >>> a = "qabxcd"
+ >>> b = "abycdf"
+ >>> s = SequenceMatcher(None, a, b)
+ >>> for tag, i1, i2, j1, j2 in s.get_opcodes():
+ ... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
+ ... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
+ delete a[0:1] (q) b[0:0] ()
+ equal a[1:3] (ab) b[0:2] (ab)
+ replace a[3:4] (x) b[2:3] (y)
+ equal a[4:6] (cd) b[3:5] (cd)
+ insert a[6:6] () b[5:6] (f)
+ """
+
+ if self.opcodes is not None:
+ return self.opcodes
+ i = j = 0
+ self.opcodes = answer = []
+ for ai, bj, size in self.get_matching_blocks():
+ # invariant: we've pumped out correct diffs to change
+ # a[:i] into b[:j], and the next matching block is
+ # a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ # out a diff to change a[i:ai] into b[j:bj], pump out
+ # the matching block, and move (i,j) beyond the match
+ tag = ''
+ if i < ai and j < bj:
+ tag = 'replace'
+ elif i < ai:
+ tag = 'delete'
+ elif j < bj:
+ tag = 'insert'
+ if tag:
+ answer.append( (tag, i, ai, j, bj) )
+ i, j = ai+size, bj+size
+ # the list of matching blocks is terminated by a
+ # sentinel with size 0
+ if size:
+ answer.append( ('equal', ai, i, bj, j) )
+ return answer
+
+ def get_grouped_opcodes(self, n=3):
+ """ Isolate change clusters by eliminating ranges with no changes.
+
+ Return a generator of groups with up to n lines of context.
+ Each group is in the same format as returned by get_opcodes().
+
+ >>> from pprint import pprint
+ >>> a = list(map(str, range(1,40)))
+ >>> b = a[:]
+ >>> b[8:8] = ['i'] # Make an insertion
+ >>> b[20] += 'x' # Make a replacement
+ >>> b[23:28] = [] # Make a deletion
+ >>> b[30] += 'y' # Make another replacement
+ >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
+ [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
+ [('equal', 16, 19, 17, 20),
+ ('replace', 19, 20, 20, 21),
+ ('equal', 20, 22, 21, 23),
+ ('delete', 22, 27, 23, 23),
+ ('equal', 27, 30, 23, 26)],
+ [('equal', 31, 34, 27, 30),
+ ('replace', 34, 35, 30, 31),
+ ('equal', 35, 38, 31, 34)]]
+ """
+
+ codes = self.get_opcodes()
+ if not codes:
+ codes = [("equal", 0, 1, 0, 1)]
+ # Fixup leading and trailing groups if they show no changes.
+ if codes[0][0] == 'equal':
+ tag, i1, i2, j1, j2 = codes[0]
+ codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
+ if codes[-1][0] == 'equal':
+ tag, i1, i2, j1, j2 = codes[-1]
+ codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
+
+ nn = n + n
+ group = []
+ for tag, i1, i2, j1, j2 in codes:
+ # End the current group and start a new one whenever
+ # there is a large range with no changes.
+ if tag == 'equal' and i2-i1 > nn:
+ group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
+ yield group
+ group = []
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ group.append((tag, i1, i2, j1 ,j2))
+ if group and not (len(group)==1 and group[0][0] == 'equal'):
+ yield group
+
+ def ratio(self):
+ """Return a measure of the sequences' similarity (float in [0,1]).
+
+ Where T is the total number of elements in both sequences, and
+ M is the number of matches, this is 2.0*M / T.
+ Note that this is 1 if the sequences are identical, and 0 if
+ they have nothing in common.
+
+ .ratio() is expensive to compute if you haven't already computed
+ .get_matching_blocks() or .get_opcodes(), in which case you may
+ want to try .quick_ratio() or .real_quick_ratio() first to get an
+ upper bound.
+
+ >>> s = SequenceMatcher(None, "abcd", "bcde")
+ >>> s.ratio()
+ 0.75
+ >>> s.quick_ratio()
+ 0.75
+ >>> s.real_quick_ratio()
+ 1.0
+ """
+
+ matches = sum(triple[-1] for triple in self.get_matching_blocks())
+ return _calculate_ratio(matches, len(self.a) + len(self.b))
+
+ def quick_ratio(self):
+ """Return an upper bound on ratio() relatively quickly.
+
+ This isn't defined beyond that it is an upper bound on .ratio(), and
+ is faster to compute.
+ """
+
+ # viewing a and b as multisets, set matches to the cardinality
+ # of their intersection; this counts the number of matches
+ # without regard to order, so is clearly an upper bound
+ if self.fullbcount is None:
+ self.fullbcount = fullbcount = {}
+ for elt in self.b:
+ fullbcount[elt] = fullbcount.get(elt, 0) + 1
+ fullbcount = self.fullbcount
+ # avail[x] is the number of times x appears in 'b' less the
+ # number of times we've seen it in 'a' so far ... kinda
+ avail = {}
+ availhas, matches = avail.__contains__, 0
+ for elt in self.a:
+ if availhas(elt):
+ numb = avail[elt]
+ else:
+ numb = fullbcount.get(elt, 0)
+ avail[elt] = numb - 1
+ if numb > 0:
+ matches = matches + 1
+ return _calculate_ratio(matches, len(self.a) + len(self.b))
+
+ def real_quick_ratio(self):
+ """Return an upper bound on ratio() very quickly.
+
+ This isn't defined beyond that it is an upper bound on .ratio(), and
+ is faster to compute than either .ratio() or .quick_ratio().
+ """
+
+ la, lb = len(self.a), len(self.b)
+ # can't have more matches than the number of elements in the
+ # shorter sequence
+ return _calculate_ratio(min(la, lb), la + lb)
+
+def get_close_matches(word, possibilities, n=3, cutoff=0.6):
+ """Use SequenceMatcher to return list of the best "good enough" matches.
+
+ word is a sequence for which close matches are desired (typically a
+ string).
+
+ possibilities is a list of sequences against which to match word
+ (typically a list of strings).
+
+ Optional arg n (default 3) is the maximum number of close matches to
+ return. n must be > 0.
+
+ Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
+ that don't score at least that similar to word are ignored.
+
+ The best (no more than n) matches among the possibilities are returned
+ in a list, sorted by similarity score, most similar first.
+
+ >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
+ ['apple', 'ape']
+ >>> import keyword as _keyword
+ >>> get_close_matches("wheel", _keyword.kwlist)
+ ['while']
+ >>> get_close_matches("Apple", _keyword.kwlist)
+ []
+ >>> get_close_matches("accept", _keyword.kwlist)
+ ['except']
+ """
+
+ if not n > 0:
+ raise ValueError("n must be > 0: %r" % (n,))
+ if not 0.0 <= cutoff <= 1.0:
+ raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
+ result = []
+ s = SequenceMatcher()
+ s.set_seq2(word)
+ for x in possibilities:
+ s.set_seq1(x)
+ if s.real_quick_ratio() >= cutoff and \
+ s.quick_ratio() >= cutoff and \
+ s.ratio() >= cutoff:
+ result.append((s.ratio(), x))
+
+ # Move the best scorers to head of list
+ result = _nlargest(n, result)
+ # Strip scores for the best n matches
+ return [x for score, x in result]
+
+def _count_leading(line, ch):
+ """
+ Return number of `ch` characters at the start of `line`.
+
+ Example:
+
+ >>> _count_leading(' abc', ' ')
+ 3
+ """
+
+ i, n = 0, len(line)
+ while i < n and line[i] == ch:
+ i += 1
+ return i
+
+class Differ:
+ r"""
+ Differ is a class for comparing sequences of lines of text, and
+ producing human-readable differences or deltas. Differ uses
+ SequenceMatcher both to compare sequences of lines, and to compare
+ sequences of characters within similar (near-matching) lines.
+
+ Each line of a Differ delta begins with a two-letter code:
+
+ '- ' line unique to sequence 1
+ '+ ' line unique to sequence 2
+ ' ' line common to both sequences
+ '? ' line not present in either input sequence
+
+ Lines beginning with '? ' attempt to guide the eye to intraline
+ differences, and were not present in either input sequence. These lines
+ can be confusing if the sequences contain tab characters.
+
+ Note that Differ makes no claim to produce a *minimal* diff. To the
+ contrary, minimal diffs are often counter-intuitive, because they synch
+ up anywhere possible, sometimes accidental matches 100 pages apart.
+ Restricting synch points to contiguous matches preserves some notion of
+ locality, at the occasional cost of producing a longer diff.
+
+ Example: Comparing two texts.
+
+ First we set up the texts, sequences of individual single-line strings
+ ending with newlines (such sequences can also be obtained from the
+ `readlines()` method of file-like objects):
+
+ >>> text1 = ''' 1. Beautiful is better than ugly.
+ ... 2. Explicit is better than implicit.
+ ... 3. Simple is better than complex.
+ ... 4. Complex is better than complicated.
+ ... '''.splitlines(keepends=True)
+ >>> len(text1)
+ 4
+ >>> text1[0][-1]
+ '\n'
+ >>> text2 = ''' 1. Beautiful is better than ugly.
+ ... 3. Simple is better than complex.
+ ... 4. Complicated is better than complex.
+ ... 5. Flat is better than nested.
+ ... '''.splitlines(keepends=True)
+
+ Next we instantiate a Differ object:
+
+ >>> d = Differ()
+
+ Note that when instantiating a Differ object we may pass functions to
+ filter out line and character 'junk'. See Differ.__init__ for details.
+
+ Finally, we compare the two:
+
+ >>> result = list(d.compare(text1, text2))
+
+ 'result' is a list of strings, so let's pretty-print it:
+
+ >>> from pprint import pprint as _pprint
+ >>> _pprint(result)
+ [' 1. Beautiful is better than ugly.\n',
+ '- 2. Explicit is better than implicit.\n',
+ '- 3. Simple is better than complex.\n',
+ '+ 3. Simple is better than complex.\n',
+ '? ++\n',
+ '- 4. Complex is better than complicated.\n',
+ '? ^ ---- ^\n',
+ '+ 4. Complicated is better than complex.\n',
+ '? ++++ ^ ^\n',
+ '+ 5. Flat is better than nested.\n']
+
+ As a single multi-line string it looks like this:
+
+ >>> print(''.join(result), end="")
+ 1. Beautiful is better than ugly.
+ - 2. Explicit is better than implicit.
+ - 3. Simple is better than complex.
+ + 3. Simple is better than complex.
+ ? ++
+ - 4. Complex is better than complicated.
+ ? ^ ---- ^
+ + 4. Complicated is better than complex.
+ ? ++++ ^ ^
+ + 5. Flat is better than nested.
+
+ Methods:
+
+ __init__(linejunk=None, charjunk=None)
+ Construct a text differencer, with optional filters.
+
+ compare(a, b)
+ Compare two sequences of lines; generate the resulting delta.
+ """
+
+ def __init__(self, linejunk=None, charjunk=None):
+ """
+ Construct a text differencer, with optional filters.
+
+ The two optional keyword parameters are for filter functions:
+
+ - `linejunk`: A function that should accept a single string argument,
+ and return true iff the string is junk. The module-level function
+ `IS_LINE_JUNK` may be used to filter out lines without visible
+ characters, except for at most one splat ('#'). It is recommended
+ to leave linejunk None; the underlying SequenceMatcher class has
+ an adaptive notion of "noise" lines that's better than any static
+ definition the author has ever been able to craft.
+
+ - `charjunk`: A function that should accept a string of length 1. The
+ module-level function `IS_CHARACTER_JUNK` may be used to filter out
+ whitespace characters (a blank or tab; **note**: bad idea to include
+ newline in this!). Use of IS_CHARACTER_JUNK is recommended.
+ """
+
+ self.linejunk = linejunk
+ self.charjunk = charjunk
+
+ def compare(self, a, b):
+ r"""
+ Compare two sequences of lines; generate the resulting delta.
+
+ Each sequence must contain individual single-line strings ending with
+ newlines. Such sequences can be obtained from the `readlines()` method
+ of file-like objects. The delta generated also consists of newline-
+ terminated strings, ready to be printed as-is via the writeline()
+ method of a file-like object.
+
+ Example:
+
+ >>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True),
+ ... 'ore\ntree\nemu\n'.splitlines(True))),
+ ... end="")
+ - one
+ ? ^
+ + ore
+ ? ^
+ - two
+ - three
+ ? -
+ + tree
+ + emu
+ """
+
+ cruncher = SequenceMatcher(self.linejunk, a, b)
+ for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
+ if tag == 'replace':
+ g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
+ elif tag == 'delete':
+ g = self._dump('-', a, alo, ahi)
+ elif tag == 'insert':
+ g = self._dump('+', b, blo, bhi)
+ elif tag == 'equal':
+ g = self._dump(' ', a, alo, ahi)
+ else:
+ raise ValueError('unknown tag %r' % (tag,))
+
+ yield from g
+
+ def _dump(self, tag, x, lo, hi):
+ """Generate comparison results for a same-tagged range."""
+ for i in range(lo, hi):
+ yield '%s %s' % (tag, x[i])
+
+ def _plain_replace(self, a, alo, ahi, b, blo, bhi):
+ assert alo < ahi and blo < bhi
+ # dump the shorter block first -- reduces the burden on short-term
+ # memory if the blocks are of very different sizes
+ if bhi - blo < ahi - alo:
+ first = self._dump('+', b, blo, bhi)
+ second = self._dump('-', a, alo, ahi)
+ else:
+ first = self._dump('-', a, alo, ahi)
+ second = self._dump('+', b, blo, bhi)
+
+ for g in first, second:
+ yield from g
+
+ def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
+ r"""
+ When replacing one block of lines with another, search the blocks
+ for *similar* lines; the best-matching pair (if any) is used as a
+ synch point, and intraline difference marking is done on the
+ similar pair. Lots of work, but often worth it.
+
+ Example:
+
+ >>> d = Differ()
+ >>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
+ ... ['abcdefGhijkl\n'], 0, 1)
+ >>> print(''.join(results), end="")
+ - abcDefghiJkl
+ ? ^ ^ ^
+ + abcdefGhijkl
+ ? ^ ^ ^
+ """
+
+ # don't synch up unless the lines have a similarity score of at
+ # least cutoff; best_ratio tracks the best score seen so far
+ best_ratio, cutoff = 0.74, 0.75
+ cruncher = SequenceMatcher(self.charjunk)
+ eqi, eqj = None, None # 1st indices of equal lines (if any)
+
+ # search for the pair that matches best without being identical
+ # (identical lines must be junk lines, & we don't want to synch up
+ # on junk -- unless we have to)
+ for j in range(blo, bhi):
+ bj = b[j]
+ cruncher.set_seq2(bj)
+ for i in range(alo, ahi):
+ ai = a[i]
+ if ai == bj:
+ if eqi is None:
+ eqi, eqj = i, j
+ continue
+ cruncher.set_seq1(ai)
+ # computing similarity is expensive, so use the quick
+ # upper bounds first -- have seen this speed up messy
+ # compares by a factor of 3.
+ # note that ratio() is only expensive to compute the first
+ # time it's called on a sequence pair; the expensive part
+ # of the computation is cached by cruncher
+ if cruncher.real_quick_ratio() > best_ratio and \
+ cruncher.quick_ratio() > best_ratio and \
+ cruncher.ratio() > best_ratio:
+ best_ratio, best_i, best_j = cruncher.ratio(), i, j
+ if best_ratio < cutoff:
+ # no non-identical "pretty close" pair
+ if eqi is None:
+ # no identical pair either -- treat it as a straight replace
+ yield from self._plain_replace(a, alo, ahi, b, blo, bhi)
+ return
+ # no close pair, but an identical pair -- synch up on that
+ best_i, best_j, best_ratio = eqi, eqj, 1.0
+ else:
+ # there's a close pair, so forget the identical pair (if any)
+ eqi = None
+
+ # a[best_i] very similar to b[best_j]; eqi is None iff they're not
+ # identical
+
+ # pump out diffs from before the synch point
+ yield from self._fancy_helper(a, alo, best_i, b, blo, best_j)
+
+ # do intraline marking on the synch pair
+ aelt, belt = a[best_i], b[best_j]
+ if eqi is None:
+ # pump out a '-', '?', '+', '?' quad for the synched lines
+ atags = btags = ""
+ cruncher.set_seqs(aelt, belt)
+ for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
+ la, lb = ai2 - ai1, bj2 - bj1
+ if tag == 'replace':
+ atags += '^' * la
+ btags += '^' * lb
+ elif tag == 'delete':
+ atags += '-' * la
+ elif tag == 'insert':
+ btags += '+' * lb
+ elif tag == 'equal':
+ atags += ' ' * la
+ btags += ' ' * lb
+ else:
+ raise ValueError('unknown tag %r' % (tag,))
+ yield from self._qformat(aelt, belt, atags, btags)
+ else:
+ # the synch pair is identical
+ yield ' ' + aelt
+
+ # pump out diffs from after the synch point
+ yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi)
+
+ def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
+ g = []
+ if alo < ahi:
+ if blo < bhi:
+ g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
+ else:
+ g = self._dump('-', a, alo, ahi)
+ elif blo < bhi:
+ g = self._dump('+', b, blo, bhi)
+
+ yield from g
+
+ def _qformat(self, aline, bline, atags, btags):
+ r"""
+ Format "?" output and deal with leading tabs.
+
+ Example:
+
+ >>> d = Differ()
+ >>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
+ ... ' ^ ^ ^ ', ' ^ ^ ^ ')
+ >>> for line in results: print(repr(line))
+ ...
+ '- \tabcDefghiJkl\n'
+ '? \t ^ ^ ^\n'
+ '+ \tabcdefGhijkl\n'
+ '? \t ^ ^ ^\n'
+ """
+
+ # Can hurt, but will probably help most of the time.
+ common = min(_count_leading(aline, "\t"),
+ _count_leading(bline, "\t"))
+ common = min(common, _count_leading(atags[:common], " "))
+ common = min(common, _count_leading(btags[:common], " "))
+ atags = atags[common:].rstrip()
+ btags = btags[common:].rstrip()
+
+ yield "- " + aline
+ if atags:
+ yield "? %s%s\n" % ("\t" * common, atags)
+
+ yield "+ " + bline
+ if btags:
+ yield "? %s%s\n" % ("\t" * common, btags)
+
+# With respect to junk, an earlier version of ndiff simply refused to
+# *start* a match with a junk element. The result was cases like this:
+# before: private Thread currentThread;
+# after: private volatile Thread currentThread;
+# If you consider whitespace to be junk, the longest contiguous match
+# not starting with junk is "e Thread currentThread". So ndiff reported
+# that "e volatil" was inserted between the 't' and the 'e' in "private".
+# While an accurate view, to people that's absurd. The current version
+# looks for matching blocks that are entirely junk-free, then extends the
+# longest one of those as far as possible but only with matching junk.
+# So now "currentThread" is matched, then extended to suck up the
+# preceding blank; then "private" is matched, and extended to suck up the
+# following blank; then "Thread" is matched; and finally ndiff reports
+# that "volatile " was inserted before "Thread". The only quibble
+# remaining is that perhaps it was really the case that " volatile"
+# was inserted after "private". I can live with that .
+
+import re
+
+def IS_LINE_JUNK(line, pat=re.compile(r"\s*(?:#\s*)?$").match):
+ r"""
+ Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
+
+ Examples:
+
+ >>> IS_LINE_JUNK('\n')
+ True
+ >>> IS_LINE_JUNK(' # \n')
+ True
+ >>> IS_LINE_JUNK('hello\n')
+ False
+ """
+
+ return pat(line) is not None
+
+def IS_CHARACTER_JUNK(ch, ws=" \t"):
+ r"""
+ Return 1 for ignorable character: iff `ch` is a space or tab.
+
+ Examples:
+
+ >>> IS_CHARACTER_JUNK(' ')
+ True
+ >>> IS_CHARACTER_JUNK('\t')
+ True
+ >>> IS_CHARACTER_JUNK('\n')
+ False
+ >>> IS_CHARACTER_JUNK('x')
+ False
+ """
+
+ return ch in ws
+
+
+########################################################################
+### Unified Diff
+########################################################################
+
+def _format_range_unified(start, stop):
+ 'Convert range to the "ed" format'
+ # Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning = start + 1 # lines start numbering with one
+ length = stop - start
+ if length == 1:
+ return '{}'.format(beginning)
+ if not length:
+ beginning -= 1 # empty ranges begin at line just before the range
+ return '{},{}'.format(beginning, length)
+
+def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
+ tofiledate='', n=3, lineterm='\n'):
+ r"""
+ Compare two sequences of lines; generate the delta as a unified diff.
+
+ Unified diffs are a compact way of showing line changes and a few
+ lines of context. The number of context lines is set by 'n' which
+ defaults to three.
+
+ By default, the diff control lines (those with ---, +++, or @@) are
+ created with a trailing newline. This is helpful so that inputs
+ created from file.readlines() result in diffs that are suitable for
+ file.writelines() since both the inputs and outputs have trailing
+ newlines.
+
+ For inputs that do not have trailing newlines, set the lineterm
+ argument to "" so that the output will be uniformly newline free.
+
+ The unidiff format normally has a header for filenames and modification
+ times. Any or all of these may be specified using strings for
+ 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+ The modification times are normally expressed in the ISO 8601 format.
+
+ Example:
+
+ >>> for line in unified_diff('one two three four'.split(),
+ ... 'zero one tree four'.split(), 'Original', 'Current',
+ ... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
+ ... lineterm=''):
+ ... print(line) # doctest: +NORMALIZE_WHITESPACE
+ --- Original 2005-01-26 23:30:50
+ +++ Current 2010-04-02 10:20:52
+ @@ -1,4 +1,4 @@
+ +zero
+ one
+ -two
+ -three
+ +tree
+ four
+ """
+
+ _check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
+ started = False
+ for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
+ if not started:
+ started = True
+ fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
+ todate = '\t{}'.format(tofiledate) if tofiledate else ''
+ yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
+ yield '+++ {}{}{}'.format(tofile, todate, lineterm)
+
+ first, last = group[0], group[-1]
+ file1_range = _format_range_unified(first[1], last[2])
+ file2_range = _format_range_unified(first[3], last[4])
+ yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
+
+ for tag, i1, i2, j1, j2 in group:
+ if tag == 'equal':
+ for line in a[i1:i2]:
+ yield ' ' + line
+ continue
+ if tag in {'replace', 'delete'}:
+ for line in a[i1:i2]:
+ yield '-' + line
+ if tag in {'replace', 'insert'}:
+ for line in b[j1:j2]:
+ yield '+' + line
+
+
+########################################################################
+### Context Diff
+########################################################################
+
+def _format_range_context(start, stop):
+ 'Convert range to the "ed" format'
+ # Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning = start + 1 # lines start numbering with one
+ length = stop - start
+ if not length:
+ beginning -= 1 # empty ranges begin at line just before the range
+ if length <= 1:
+ return '{}'.format(beginning)
+ return '{},{}'.format(beginning, beginning + length - 1)
+
+# See http://www.unix.org/single_unix_specification/
+def context_diff(a, b, fromfile='', tofile='',
+ fromfiledate='', tofiledate='', n=3, lineterm='\n'):
+ r"""
+ Compare two sequences of lines; generate the delta as a context diff.
+
+ Context diffs are a compact way of showing line changes and a few
+ lines of context. The number of context lines is set by 'n' which
+ defaults to three.
+
+ By default, the diff control lines (those with *** or ---) are
+ created with a trailing newline. This is helpful so that inputs
+ created from file.readlines() result in diffs that are suitable for
+ file.writelines() since both the inputs and outputs have trailing
+ newlines.
+
+ For inputs that do not have trailing newlines, set the lineterm
+ argument to "" so that the output will be uniformly newline free.
+
+ The context diff format normally has a header for filenames and
+ modification times. Any or all of these may be specified using
+ strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+ The modification times are normally expressed in the ISO 8601 format.
+ If not specified, the strings default to blanks.
+
+ Example:
+
+ >>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
+ ... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')),
+ ... end="")
+ *** Original
+ --- Current
+ ***************
+ *** 1,4 ****
+ one
+ ! two
+ ! three
+ four
+ --- 1,4 ----
+ + zero
+ one
+ ! tree
+ four
+ """
+
+ _check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
+ prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
+ started = False
+ for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
+ if not started:
+ started = True
+ fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
+ todate = '\t{}'.format(tofiledate) if tofiledate else ''
+ yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
+ yield '--- {}{}{}'.format(tofile, todate, lineterm)
+
+ first, last = group[0], group[-1]
+ yield '***************' + lineterm
+
+ file1_range = _format_range_context(first[1], last[2])
+ yield '*** {} ****{}'.format(file1_range, lineterm)
+
+ if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
+ for tag, i1, i2, _, _ in group:
+ if tag != 'insert':
+ for line in a[i1:i2]:
+ yield prefix[tag] + line
+
+ file2_range = _format_range_context(first[3], last[4])
+ yield '--- {} ----{}'.format(file2_range, lineterm)
+
+ if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
+ for tag, _, _, j1, j2 in group:
+ if tag != 'delete':
+ for line in b[j1:j2]:
+ yield prefix[tag] + line
+
+def _check_types(a, b, *args):
+ # Checking types is weird, but the alternative is garbled output when
+ # someone passes mixed bytes and str to {unified,context}_diff(). E.g.
+ # without this check, passing filenames as bytes results in output like
+ # --- b'oldfile.txt'
+ # +++ b'newfile.txt'
+ # because of how str.format() incorporates bytes objects.
+ if a and not isinstance(a[0], str):
+ raise TypeError('lines to compare must be str, not %s (%r)' %
+ (type(a[0]).__name__, a[0]))
+ if b and not isinstance(b[0], str):
+ raise TypeError('lines to compare must be str, not %s (%r)' %
+ (type(b[0]).__name__, b[0]))
+ for arg in args:
+ if not isinstance(arg, str):
+ raise TypeError('all arguments must be str, not: %r' % (arg,))
+
+def diff_bytes(dfunc, a, b, fromfile=b'', tofile=b'',
+ fromfiledate=b'', tofiledate=b'', n=3, lineterm=b'\n'):
+ r"""
+ Compare `a` and `b`, two sequences of lines represented as bytes rather
+ than str. This is a wrapper for `dfunc`, which is typically either
+ unified_diff() or context_diff(). Inputs are losslessly converted to
+ strings so that `dfunc` only has to worry about strings, and encoded
+ back to bytes on return. This is necessary to compare files with
+ unknown or inconsistent encoding. All other inputs (except `n`) must be
+ bytes rather than str.
+ """
+ def decode(s):
+ try:
+ return s.decode('ascii', 'surrogateescape')
+ except AttributeError as err:
+ msg = ('all arguments must be bytes, not %s (%r)' %
+ (type(s).__name__, s))
+ raise TypeError(msg) from err
+ a = list(map(decode, a))
+ b = list(map(decode, b))
+ fromfile = decode(fromfile)
+ tofile = decode(tofile)
+ fromfiledate = decode(fromfiledate)
+ tofiledate = decode(tofiledate)
+ lineterm = decode(lineterm)
+
+ lines = dfunc(a, b, fromfile, tofile, fromfiledate, tofiledate, n, lineterm)
+ for line in lines:
+ yield line.encode('ascii', 'surrogateescape')
+
+def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
+ r"""
+ Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
+
+ Optional keyword parameters `linejunk` and `charjunk` are for filter
+ functions, or can be None:
+
+ - linejunk: A function that should accept a single string argument and
+ return true iff the string is junk. The default is None, and is
+ recommended; the underlying SequenceMatcher class has an adaptive
+ notion of "noise" lines.
+
+ - charjunk: A function that accepts a character (string of length
+ 1), and returns true iff the character is junk. The default is
+ the module-level function IS_CHARACTER_JUNK, which filters out
+ whitespace characters (a blank or tab; note: it's a bad idea to
+ include newline in this!).
+
+ Tools/scripts/ndiff.py is a command-line front-end to this function.
+
+ Example:
+
+ >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
+ ... 'ore\ntree\nemu\n'.splitlines(keepends=True))
+ >>> print(''.join(diff), end="")
+ - one
+ ? ^
+ + ore
+ ? ^
+ - two
+ - three
+ ? -
+ + tree
+ + emu
+ """
+ return Differ(linejunk, charjunk).compare(a, b)
+
+def _mdiff(fromlines, tolines, context=None, linejunk=None,
+ charjunk=IS_CHARACTER_JUNK):
+ r"""Returns generator yielding marked up from/to side by side differences.
+
+ Arguments:
+ fromlines -- list of text lines to compared to tolines
+ tolines -- list of text lines to be compared to fromlines
+ context -- number of context lines to display on each side of difference,
+ if None, all from/to text lines will be generated.
+ linejunk -- passed on to ndiff (see ndiff documentation)
+ charjunk -- passed on to ndiff (see ndiff documentation)
+
+ This function returns an iterator which returns a tuple:
+ (from line tuple, to line tuple, boolean flag)
+
+ from/to line tuple -- (line num, line text)
+ line num -- integer or None (to indicate a context separation)
+ line text -- original line text with following markers inserted:
+ '\0+' -- marks start of added text
+ '\0-' -- marks start of deleted text
+ '\0^' -- marks start of changed text
+ '\1' -- marks end of added/deleted/changed text
+
+ boolean flag -- None indicates context separation, True indicates
+ either "from" or "to" line contains a change, otherwise False.
+
+ This function/iterator was originally developed to generate side by side
+ file difference for making HTML pages (see HtmlDiff class for example
+ usage).
+
+ Note, this function utilizes the ndiff function to generate the side by
+ side difference markup. Optional ndiff arguments may be passed to this
+ function and they in turn will be passed to ndiff.
+ """
+ import re
+
+ # regular expression for finding intraline change indices
+ change_re = re.compile(r'(\++|\-+|\^+)')
+
+ # create the difference iterator to generate the differences
+ diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
+
+ def _make_line(lines, format_key, side, num_lines=[0,0]):
+ """Returns line of text with user's change markup and line formatting.
+
+ lines -- list of lines from the ndiff generator to produce a line of
+ text from. When producing the line of text to return, the
+ lines used are removed from this list.
+ format_key -- '+' return first line in list with "add" markup around
+ the entire line.
+ '-' return first line in list with "delete" markup around
+ the entire line.
+ '?' return first line in list with add/delete/change
+ intraline markup (indices obtained from second line)
+ None return first line in list with no markup
+ side -- indice into the num_lines list (0=from,1=to)
+ num_lines -- from/to current line number. This is NOT intended to be a
+ passed parameter. It is present as a keyword argument to
+ maintain memory of the current line numbers between calls
+ of this function.
+
+ Note, this function is purposefully not defined at the module scope so
+ that data it needs from its parent function (within whose context it
+ is defined) does not need to be of module scope.
+ """
+ num_lines[side] += 1
+ # Handle case where no user markup is to be added, just return line of
+ # text with user's line format to allow for usage of the line number.
+ if format_key is None:
+ return (num_lines[side],lines.pop(0)[2:])
+ # Handle case of intraline changes
+ if format_key == '?':
+ text, markers = lines.pop(0), lines.pop(0)
+ # find intraline changes (store change type and indices in tuples)
+ sub_info = []
+ def record_sub_info(match_object,sub_info=sub_info):
+ sub_info.append([match_object.group(1)[0],match_object.span()])
+ return match_object.group(1)
+ change_re.sub(record_sub_info,markers)
+ # process each tuple inserting our special marks that won't be
+ # noticed by an xml/html escaper.
+ for key,(begin,end) in reversed(sub_info):
+ text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
+ text = text[2:]
+ # Handle case of add/delete entire line
+ else:
+ text = lines.pop(0)[2:]
+ # if line of text is just a newline, insert a space so there is
+ # something for the user to highlight and see.
+ if not text:
+ text = ' '
+ # insert marks that won't be noticed by an xml/html escaper.
+ text = '\0' + format_key + text + '\1'
+ # Return line of text, first allow user's line formatter to do its
+ # thing (such as adding the line number) then replace the special
+ # marks with what the user's change markup.
+ return (num_lines[side],text)
+
+ def _line_iterator():
+ """Yields from/to lines of text with a change indication.
+
+ This function is an iterator. It itself pulls lines from a
+ differencing iterator, processes them and yields them. When it can
+ it yields both a "from" and a "to" line, otherwise it will yield one
+ or the other. In addition to yielding the lines of from/to text, a
+ boolean flag is yielded to indicate if the text line(s) have
+ differences in them.
+
+ Note, this function is purposefully not defined at the module scope so
+ that data it needs from its parent function (within whose context it
+ is defined) does not need to be of module scope.
+ """
+ lines = []
+ num_blanks_pending, num_blanks_to_yield = 0, 0
+ while True:
+ # Load up next 4 lines so we can look ahead, create strings which
+ # are a concatenation of the first character of each of the 4 lines
+ # so we can do some very readable comparisons.
+ while len(lines) < 4:
+ lines.append(next(diff_lines_iterator, 'X'))
+ s = ''.join([line[0] for line in lines])
+ if s.startswith('X'):
+ # When no more lines, pump out any remaining blank lines so the
+ # corresponding add/delete lines get a matching blank line so
+ # all line pairs get yielded at the next level.
+ num_blanks_to_yield = num_blanks_pending
+ elif s.startswith('-?+?'):
+ # simple intraline change
+ yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
+ continue
+ elif s.startswith('--++'):
+ # in delete block, add block coming: we do NOT want to get
+ # caught up on blank lines yet, just process the delete line
+ num_blanks_pending -= 1
+ yield _make_line(lines,'-',0), None, True
+ continue
+ elif s.startswith(('--?+', '--+', '- ')):
+ # in delete block and see an intraline change or unchanged line
+ # coming: yield the delete line and then blanks
+ from_line,to_line = _make_line(lines,'-',0), None
+ num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
+ elif s.startswith('-+?'):
+ # intraline change
+ yield _make_line(lines,None,0), _make_line(lines,'?',1), True
+ continue
+ elif s.startswith('-?+'):
+ # intraline change
+ yield _make_line(lines,'?',0), _make_line(lines,None,1), True
+ continue
+ elif s.startswith('-'):
+ # delete FROM line
+ num_blanks_pending -= 1
+ yield _make_line(lines,'-',0), None, True
+ continue
+ elif s.startswith('+--'):
+ # in add block, delete block coming: we do NOT want to get
+ # caught up on blank lines yet, just process the add line
+ num_blanks_pending += 1
+ yield None, _make_line(lines,'+',1), True
+ continue
+ elif s.startswith(('+ ', '+-')):
+ # will be leaving an add block: yield blanks then add line
+ from_line, to_line = None, _make_line(lines,'+',1)
+ num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
+ elif s.startswith('+'):
+ # inside an add block, yield the add line
+ num_blanks_pending += 1
+ yield None, _make_line(lines,'+',1), True
+ continue
+ elif s.startswith(' '):
+ # unchanged text, yield it to both sides
+ yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
+ continue
+ # Catch up on the blank lines so when we yield the next from/to
+ # pair, they are lined up.
+ while(num_blanks_to_yield < 0):
+ num_blanks_to_yield += 1
+ yield None,('','\n'),True
+ while(num_blanks_to_yield > 0):
+ num_blanks_to_yield -= 1
+ yield ('','\n'),None,True
+ if s.startswith('X'):
+ return
+ else:
+ yield from_line,to_line,True
+
+ def _line_pair_iterator():
+ """Yields from/to lines of text with a change indication.
+
+ This function is an iterator. It itself pulls lines from the line
+ iterator. Its difference from that iterator is that this function
+ always yields a pair of from/to text lines (with the change
+ indication). If necessary it will collect single from/to lines
+ until it has a matching pair from/to pair to yield.
+
+ Note, this function is purposefully not defined at the module scope so
+ that data it needs from its parent function (within whose context it
+ is defined) does not need to be of module scope.
+ """
+ line_iterator = _line_iterator()
+ fromlines,tolines=[],[]
+ while True:
+ # Collecting lines of text until we have a from/to pair
+ while (len(fromlines)==0 or len(tolines)==0):
+ try:
+ from_line, to_line, found_diff = next(line_iterator)
+ except StopIteration:
+ return
+ if from_line is not None:
+ fromlines.append((from_line,found_diff))
+ if to_line is not None:
+ tolines.append((to_line,found_diff))
+ # Once we have a pair, remove them from the collection and yield it
+ from_line, fromDiff = fromlines.pop(0)
+ to_line, to_diff = tolines.pop(0)
+ yield (from_line,to_line,fromDiff or to_diff)
+
+ # Handle case where user does not want context differencing, just yield
+ # them up without doing anything else with them.
+ line_pair_iterator = _line_pair_iterator()
+ if context is None:
+ yield from line_pair_iterator
+ # Handle case where user wants context differencing. We must do some
+ # storage of lines until we know for sure that they are to be yielded.
+ else:
+ context += 1
+ lines_to_write = 0
+ while True:
+ # Store lines up until we find a difference, note use of a
+ # circular queue because we only need to keep around what
+ # we need for context.
+ index, contextLines = 0, [None]*(context)
+ found_diff = False
+ while(found_diff is False):
+ try:
+ from_line, to_line, found_diff = next(line_pair_iterator)
+ except StopIteration:
+ return
+ i = index % context
+ contextLines[i] = (from_line, to_line, found_diff)
+ index += 1
+ # Yield lines that we have collected so far, but first yield
+ # the user's separator.
+ if index > context:
+ yield None, None, None
+ lines_to_write = context
+ else:
+ lines_to_write = index
+ index = 0
+ while(lines_to_write):
+ i = index % context
+ index += 1
+ yield contextLines[i]
+ lines_to_write -= 1
+ # Now yield the context lines after the change
+ lines_to_write = context-1
+ try:
+ while(lines_to_write):
+ from_line, to_line, found_diff = next(line_pair_iterator)
+ # If another change within the context, extend the context
+ if found_diff:
+ lines_to_write = context-1
+ else:
+ lines_to_write -= 1
+ yield from_line, to_line, found_diff
+ except StopIteration:
+ # Catch exception from next() and return normally
+ return
+
+
+_file_template = """
+
+
+
+
+
+
+
+
+
+
+
+ %(table)s%(legend)s
+
+
+"""
+
+_styles = """
+ table.diff {font-family:Courier; border:medium;}
+ .diff_header {background-color:#e0e0e0}
+ td.diff_header {text-align:right}
+ .diff_next {background-color:#c0c0c0}
+ .diff_add {background-color:#aaffaa}
+ .diff_chg {background-color:#ffff77}
+ .diff_sub {background-color:#ffaaaa}"""
+
+_table_template = """
+
+
+
+ %(header_row)s
+
+%(data_rows)s
+
"""
+
+_legend = """
+
+ Legends
+
+ Colors
+ Added
+ Changed
+ Deleted
+
+
+ Links
+ (f)irst change
+ (n)ext change
+ (t)op
+
+
"""
+
+class HtmlDiff(object):
+ """For producing HTML side by side comparison with change highlights.
+
+ This class can be used to create an HTML table (or a complete HTML file
+ containing the table) showing a side by side, line by line comparison
+ of text with inter-line and intra-line change highlights. The table can
+ be generated in either full or contextual difference mode.
+
+ The following methods are provided for HTML generation:
+
+ make_table -- generates HTML for a single side by side table
+ make_file -- generates complete HTML file with a single side by side table
+
+ See tools/scripts/diff.py for an example usage of this class.
+ """
+
+ _file_template = _file_template
+ _styles = _styles
+ _table_template = _table_template
+ _legend = _legend
+ _default_prefix = 0
+
+ def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
+ charjunk=IS_CHARACTER_JUNK):
+ """HtmlDiff instance initializer
+
+ Arguments:
+ tabsize -- tab stop spacing, defaults to 8.
+ wrapcolumn -- column number where lines are broken and wrapped,
+ defaults to None where lines are not wrapped.
+ linejunk,charjunk -- keyword arguments passed into ndiff() (used by
+ HtmlDiff() to generate the side by side HTML differences). See
+ ndiff() documentation for argument default values and descriptions.
+ """
+ self._tabsize = tabsize
+ self._wrapcolumn = wrapcolumn
+ self._linejunk = linejunk
+ self._charjunk = charjunk
+
+ def make_file(self, fromlines, tolines, fromdesc='', todesc='',
+ context=False, numlines=5, *, charset='utf-8'):
+ """Returns HTML file of side by side comparison with change highlights
+
+ Arguments:
+ fromlines -- list of "from" lines
+ tolines -- list of "to" lines
+ fromdesc -- "from" file column header string
+ todesc -- "to" file column header string
+ context -- set to True for contextual differences (defaults to False
+ which shows full differences).
+ numlines -- number of context lines. When context is set True,
+ controls number of lines displayed before and after the change.
+ When context is False, controls the number of lines to place
+ the "next" link anchors before the next change (so click of
+ "next" link jumps to just before the change).
+ charset -- charset of the HTML document
+ """
+
+ return (self._file_template % dict(
+ styles=self._styles,
+ legend=self._legend,
+ table=self.make_table(fromlines, tolines, fromdesc, todesc,
+ context=context, numlines=numlines),
+ charset=charset
+ )).encode(charset, 'xmlcharrefreplace').decode(charset)
+
+ def _tab_newline_replace(self,fromlines,tolines):
+ """Returns from/to line lists with tabs expanded and newlines removed.
+
+ Instead of tab characters being replaced by the number of spaces
+ needed to fill in to the next tab stop, this function will fill
+ the space with tab characters. This is done so that the difference
+ algorithms can identify changes in a file when tabs are replaced by
+ spaces and vice versa. At the end of the HTML generation, the tab
+ characters will be replaced with a nonbreakable space.
+ """
+ def expand_tabs(line):
+ # hide real spaces
+ line = line.replace(' ','\0')
+ # expand tabs into spaces
+ line = line.expandtabs(self._tabsize)
+ # replace spaces from expanded tabs back into tab characters
+ # (we'll replace them with markup after we do differencing)
+ line = line.replace(' ','\t')
+ return line.replace('\0',' ').rstrip('\n')
+ fromlines = [expand_tabs(line) for line in fromlines]
+ tolines = [expand_tabs(line) for line in tolines]
+ return fromlines,tolines
+
+ def _split_line(self,data_list,line_num,text):
+ """Builds list of text lines by splitting text lines at wrap point
+
+ This function will determine if the input text line needs to be
+ wrapped (split) into separate lines. If so, the first wrap point
+ will be determined and the first line appended to the output
+ text line list. This function is used recursively to handle
+ the second part of the split line to further split it.
+ """
+ # if blank line or context separator, just add it to the output list
+ if not line_num:
+ data_list.append((line_num,text))
+ return
+
+ # if line text doesn't need wrapping, just add it to the output list
+ size = len(text)
+ max = self._wrapcolumn
+ if (size <= max) or ((size -(text.count('\0')*3)) <= max):
+ data_list.append((line_num,text))
+ return
+
+ # scan text looking for the wrap point, keeping track if the wrap
+ # point is inside markers
+ i = 0
+ n = 0
+ mark = ''
+ while n < max and i < size:
+ if text[i] == '\0':
+ i += 1
+ mark = text[i]
+ i += 1
+ elif text[i] == '\1':
+ i += 1
+ mark = ''
+ else:
+ i += 1
+ n += 1
+
+ # wrap point is inside text, break it up into separate lines
+ line1 = text[:i]
+ line2 = text[i:]
+
+ # if wrap point is inside markers, place end marker at end of first
+ # line and start marker at beginning of second line because each
+ # line will have its own table tag markup around it.
+ if mark:
+ line1 = line1 + '\1'
+ line2 = '\0' + mark + line2
+
+ # tack on first line onto the output list
+ data_list.append((line_num,line1))
+
+ # use this routine again to wrap the remaining text
+ self._split_line(data_list,'>',line2)
+
+ def _line_wrapper(self,diffs):
+ """Returns iterator that splits (wraps) mdiff text lines"""
+
+ # pull from/to data and flags from mdiff iterator
+ for fromdata,todata,flag in diffs:
+ # check for context separators and pass them through
+ if flag is None:
+ yield fromdata,todata,flag
+ continue
+ (fromline,fromtext),(toline,totext) = fromdata,todata
+ # for each from/to line split it at the wrap column to form
+ # list of text lines.
+ fromlist,tolist = [],[]
+ self._split_line(fromlist,fromline,fromtext)
+ self._split_line(tolist,toline,totext)
+ # yield from/to line in pairs inserting blank lines as
+ # necessary when one side has more wrapped lines
+ while fromlist or tolist:
+ if fromlist:
+ fromdata = fromlist.pop(0)
+ else:
+ fromdata = ('',' ')
+ if tolist:
+ todata = tolist.pop(0)
+ else:
+ todata = ('',' ')
+ yield fromdata,todata,flag
+
+ def _collect_lines(self,diffs):
+ """Collects mdiff output into separate lists
+
+ Before storing the mdiff from/to data into a list, it is converted
+ into a single line of text with HTML markup.
+ """
+
+ fromlist,tolist,flaglist = [],[],[]
+ # pull from/to data and flags from mdiff style iterator
+ for fromdata,todata,flag in diffs:
+ try:
+ # store HTML markup of the lines into the lists
+ fromlist.append(self._format_line(0,flag,*fromdata))
+ tolist.append(self._format_line(1,flag,*todata))
+ except TypeError:
+ # exceptions occur for lines where context separators go
+ fromlist.append(None)
+ tolist.append(None)
+ flaglist.append(flag)
+ return fromlist,tolist,flaglist
+
+ def _format_line(self,side,flag,linenum,text):
+ """Returns HTML markup of "from" / "to" text lines
+
+ side -- 0 or 1 indicating "from" or "to" text
+ flag -- indicates if difference on line
+ linenum -- line number (used for line number column)
+ text -- line text to be marked up
+ """
+ try:
+ linenum = '%d' % linenum
+ id = ' id="%s%s"' % (self._prefix[side],linenum)
+ except TypeError:
+ # handle blank lines where linenum is '>' or ''
+ id = ''
+ # replace those things that would get confused with HTML symbols
+ text=text.replace("&","&").replace(">",">").replace("<","<")
+
+ # make space non-breakable so they don't get compressed or line wrapped
+ text = text.replace(' ',' ').rstrip()
+
+ return '%s ' \
+ % (id,linenum,text)
+
+ def _make_prefix(self):
+ """Create unique anchor prefixes"""
+
+ # Generate a unique anchor prefix so multiple tables
+ # can exist on the same HTML page without conflicts.
+ fromprefix = "from%d_" % HtmlDiff._default_prefix
+ toprefix = "to%d_" % HtmlDiff._default_prefix
+ HtmlDiff._default_prefix += 1
+ # store prefixes so line format method has access
+ self._prefix = [fromprefix,toprefix]
+
+ def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
+ """Makes list of "next" links"""
+
+ # all anchor names will be generated using the unique "to" prefix
+ toprefix = self._prefix[1]
+
+ # process change flags, generating middle column of next anchors/links
+ next_id = ['']*len(flaglist)
+ next_href = ['']*len(flaglist)
+ num_chg, in_change = 0, False
+ last = 0
+ for i,flag in enumerate(flaglist):
+ if flag:
+ if not in_change:
+ in_change = True
+ last = i
+ # at the beginning of a change, drop an anchor a few lines
+ # (the context lines) before the change for the previous
+ # link
+ i = max([0,i-numlines])
+ next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
+ # at the beginning of a change, drop a link to the next
+ # change
+ num_chg += 1
+ next_href[last] = 'n ' % (
+ toprefix,num_chg)
+ else:
+ in_change = False
+ # check for cases where there is no content to avoid exceptions
+ if not flaglist:
+ flaglist = [False]
+ next_id = ['']
+ next_href = ['']
+ last = 0
+ if context:
+ fromlist = [' No Differences Found ']
+ tolist = fromlist
+ else:
+ fromlist = tolist = [' Empty File ']
+ # if not a change on first line, drop a link
+ if not flaglist[0]:
+ next_href[0] = 'f ' % toprefix
+ # redo the last link to link to the top
+ next_href[last] = 't ' % (toprefix)
+
+ return fromlist,tolist,flaglist,next_href,next_id
+
+ def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
+ numlines=5):
+ """Returns HTML table of side by side comparison with change highlights
+
+ Arguments:
+ fromlines -- list of "from" lines
+ tolines -- list of "to" lines
+ fromdesc -- "from" file column header string
+ todesc -- "to" file column header string
+ context -- set to True for contextual differences (defaults to False
+ which shows full differences).
+ numlines -- number of context lines. When context is set True,
+ controls number of lines displayed before and after the change.
+ When context is False, controls the number of lines to place
+ the "next" link anchors before the next change (so click of
+ "next" link jumps to just before the change).
+ """
+
+ # make unique anchor prefixes so that multiple tables may exist
+ # on the same page without conflict.
+ self._make_prefix()
+
+ # change tabs to spaces before it gets more difficult after we insert
+ # markup
+ fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
+
+ # create diffs iterator which generates side by side from/to data
+ if context:
+ context_lines = numlines
+ else:
+ context_lines = None
+ diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
+ charjunk=self._charjunk)
+
+ # set up iterator to wrap lines that exceed desired width
+ if self._wrapcolumn:
+ diffs = self._line_wrapper(diffs)
+
+ # collect up from/to lines and flags into lists (also format the lines)
+ fromlist,tolist,flaglist = self._collect_lines(diffs)
+
+ # process change flags, generating middle column of next anchors/links
+ fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
+ fromlist,tolist,flaglist,context,numlines)
+
+ s = []
+ fmt = ' %s %s' + \
+ '%s %s \n'
+ for i in range(len(flaglist)):
+ if flaglist[i] is None:
+ # mdiff yields None on separator lines skip the bogus ones
+ # generated for the first line
+ if i > 0:
+ s.append(' \n \n')
+ else:
+ s.append( fmt % (next_id[i],next_href[i],fromlist[i],
+ next_href[i],tolist[i]))
+ if fromdesc or todesc:
+ header_row = '%s%s%s%s ' % (
+ ' ',
+ '' % fromdesc,
+ ' ',
+ '' % todesc)
+ else:
+ header_row = ''
+
+ table = self._table_template % dict(
+ data_rows=''.join(s),
+ header_row=header_row,
+ prefix=self._prefix[1])
+
+ return table.replace('\0+',''). \
+ replace('\0-',''). \
+ replace('\0^',''). \
+ replace('\1',' '). \
+ replace('\t',' ')
+
+del re
+
+def restore(delta, which):
+ r"""
+ Generate one of the two sequences that generated a delta.
+
+ Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
+ lines originating from file 1 or 2 (parameter `which`), stripping off line
+ prefixes.
+
+ Examples:
+
+ >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
+ ... 'ore\ntree\nemu\n'.splitlines(keepends=True))
+ >>> diff = list(diff)
+ >>> print(''.join(restore(diff, 1)), end="")
+ one
+ two
+ three
+ >>> print(''.join(restore(diff, 2)), end="")
+ ore
+ tree
+ emu
+ """
+ try:
+ tag = {1: "- ", 2: "+ "}[int(which)]
+ except KeyError:
+ raise ValueError('unknown delta choice (must be 1 or 2): %r'
+ % which) from None
+ prefixes = (" ", tag)
+ for line in delta:
+ if line[:2] in prefixes:
+ yield line[2:]
+
+def _test():
+ import doctest, difflib
+ return doctest.testmod(difflib)
+
+if __name__ == "__main__":
+ _test()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dis.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dis.py
new file mode 100644
index 00000000..90ddf4f3
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dis.py
@@ -0,0 +1,535 @@
+"""Disassembler of Python byte code into mnemonics."""
+
+import sys
+import types
+import collections
+import io
+
+from opcode import *
+from opcode import __all__ as _opcodes_all
+
+__all__ = ["code_info", "dis", "disassemble", "distb", "disco",
+ "findlinestarts", "findlabels", "show_code",
+ "get_instructions", "Instruction", "Bytecode"] + _opcodes_all
+del _opcodes_all
+
+_have_code = (types.MethodType, types.FunctionType, types.CodeType,
+ classmethod, staticmethod, type)
+
+FORMAT_VALUE = opmap['FORMAT_VALUE']
+
+def _try_compile(source, name):
+ """Attempts to compile the given source, first as an expression and
+ then as a statement if the first approach fails.
+
+ Utility function to accept strings in functions that otherwise
+ expect code objects
+ """
+ try:
+ c = compile(source, name, 'eval')
+ except SyntaxError:
+ c = compile(source, name, 'exec')
+ return c
+
+def dis(x=None, *, file=None, depth=None):
+ """Disassemble classes, methods, functions, and other compiled objects.
+
+ With no argument, disassemble the last traceback.
+
+ Compiled objects currently include generator objects, async generator
+ objects, and coroutine objects, all of which store their code object
+ in a special attribute.
+ """
+ if x is None:
+ distb(file=file)
+ return
+ # Extract functions from methods.
+ if hasattr(x, '__func__'):
+ x = x.__func__
+ # Extract compiled code objects from...
+ if hasattr(x, '__code__'): # ...a function, or
+ x = x.__code__
+ elif hasattr(x, 'gi_code'): #...a generator object, or
+ x = x.gi_code
+ elif hasattr(x, 'ag_code'): #...an asynchronous generator object, or
+ x = x.ag_code
+ elif hasattr(x, 'cr_code'): #...a coroutine.
+ x = x.cr_code
+ # Perform the disassembly.
+ if hasattr(x, '__dict__'): # Class or module
+ items = sorted(x.__dict__.items())
+ for name, x1 in items:
+ if isinstance(x1, _have_code):
+ print("Disassembly of %s:" % name, file=file)
+ try:
+ dis(x1, file=file, depth=depth)
+ except TypeError as msg:
+ print("Sorry:", msg, file=file)
+ print(file=file)
+ elif hasattr(x, 'co_code'): # Code object
+ _disassemble_recursive(x, file=file, depth=depth)
+ elif isinstance(x, (bytes, bytearray)): # Raw bytecode
+ _disassemble_bytes(x, file=file)
+ elif isinstance(x, str): # Source code
+ _disassemble_str(x, file=file, depth=depth)
+ else:
+ raise TypeError("don't know how to disassemble %s objects" %
+ type(x).__name__)
+
+def distb(tb=None, *, file=None):
+ """Disassemble a traceback (default: last traceback)."""
+ if tb is None:
+ try:
+ tb = sys.last_traceback
+ except AttributeError:
+ raise RuntimeError("no last traceback to disassemble") from None
+ while tb.tb_next: tb = tb.tb_next
+ disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file)
+
+# The inspect module interrogates this dictionary to build its
+# list of CO_* constants. It is also used by pretty_flags to
+# turn the co_flags field into a human readable list.
+COMPILER_FLAG_NAMES = {
+ 1: "OPTIMIZED",
+ 2: "NEWLOCALS",
+ 4: "VARARGS",
+ 8: "VARKEYWORDS",
+ 16: "NESTED",
+ 32: "GENERATOR",
+ 64: "NOFREE",
+ 128: "COROUTINE",
+ 256: "ITERABLE_COROUTINE",
+ 512: "ASYNC_GENERATOR",
+}
+
+def pretty_flags(flags):
+ """Return pretty representation of code flags."""
+ names = []
+ for i in range(32):
+ flag = 1<")
+ # By now, if we don't have a code object, we can't disassemble x.
+ if hasattr(x, 'co_code'):
+ return x
+ raise TypeError("don't know how to disassemble %s objects" %
+ type(x).__name__)
+
+def code_info(x):
+ """Formatted details of methods, functions, or code."""
+ return _format_code_info(_get_code_object(x))
+
+def _format_code_info(co):
+ lines = []
+ lines.append("Name: %s" % co.co_name)
+ lines.append("Filename: %s" % co.co_filename)
+ lines.append("Argument count: %s" % co.co_argcount)
+ lines.append("Kw-only arguments: %s" % co.co_kwonlyargcount)
+ lines.append("Number of locals: %s" % co.co_nlocals)
+ lines.append("Stack size: %s" % co.co_stacksize)
+ lines.append("Flags: %s" % pretty_flags(co.co_flags))
+ if co.co_consts:
+ lines.append("Constants:")
+ for i_c in enumerate(co.co_consts):
+ lines.append("%4d: %r" % i_c)
+ if co.co_names:
+ lines.append("Names:")
+ for i_n in enumerate(co.co_names):
+ lines.append("%4d: %s" % i_n)
+ if co.co_varnames:
+ lines.append("Variable names:")
+ for i_n in enumerate(co.co_varnames):
+ lines.append("%4d: %s" % i_n)
+ if co.co_freevars:
+ lines.append("Free variables:")
+ for i_n in enumerate(co.co_freevars):
+ lines.append("%4d: %s" % i_n)
+ if co.co_cellvars:
+ lines.append("Cell variables:")
+ for i_n in enumerate(co.co_cellvars):
+ lines.append("%4d: %s" % i_n)
+ return "\n".join(lines)
+
+def show_code(co, *, file=None):
+ """Print details of methods, functions, or code to *file*.
+
+ If *file* is not provided, the output is printed on stdout.
+ """
+ print(code_info(co), file=file)
+
+_Instruction = collections.namedtuple("_Instruction",
+ "opname opcode arg argval argrepr offset starts_line is_jump_target")
+
+_Instruction.opname.__doc__ = "Human readable name for operation"
+_Instruction.opcode.__doc__ = "Numeric code for operation"
+_Instruction.arg.__doc__ = "Numeric argument to operation (if any), otherwise None"
+_Instruction.argval.__doc__ = "Resolved arg value (if known), otherwise same as arg"
+_Instruction.argrepr.__doc__ = "Human readable description of operation argument"
+_Instruction.offset.__doc__ = "Start index of operation within bytecode sequence"
+_Instruction.starts_line.__doc__ = "Line started by this opcode (if any), otherwise None"
+_Instruction.is_jump_target.__doc__ = "True if other code jumps to here, otherwise False"
+
+_OPNAME_WIDTH = 20
+_OPARG_WIDTH = 5
+
+class Instruction(_Instruction):
+ """Details for a bytecode operation
+
+ Defined fields:
+ opname - human readable name for operation
+ opcode - numeric code for operation
+ arg - numeric argument to operation (if any), otherwise None
+ argval - resolved arg value (if known), otherwise same as arg
+ argrepr - human readable description of operation argument
+ offset - start index of operation within bytecode sequence
+ starts_line - line started by this opcode (if any), otherwise None
+ is_jump_target - True if other code jumps to here, otherwise False
+ """
+
+ def _disassemble(self, lineno_width=3, mark_as_current=False, offset_width=4):
+ """Format instruction details for inclusion in disassembly output
+
+ *lineno_width* sets the width of the line number field (0 omits it)
+ *mark_as_current* inserts a '-->' marker arrow as part of the line
+ *offset_width* sets the width of the instruction offset field
+ """
+ fields = []
+ # Column: Source code line number
+ if lineno_width:
+ if self.starts_line is not None:
+ lineno_fmt = "%%%dd" % lineno_width
+ fields.append(lineno_fmt % self.starts_line)
+ else:
+ fields.append(' ' * lineno_width)
+ # Column: Current instruction indicator
+ if mark_as_current:
+ fields.append('-->')
+ else:
+ fields.append(' ')
+ # Column: Jump target marker
+ if self.is_jump_target:
+ fields.append('>>')
+ else:
+ fields.append(' ')
+ # Column: Instruction offset from start of code sequence
+ fields.append(repr(self.offset).rjust(offset_width))
+ # Column: Opcode name
+ fields.append(self.opname.ljust(_OPNAME_WIDTH))
+ # Column: Opcode argument
+ if self.arg is not None:
+ fields.append(repr(self.arg).rjust(_OPARG_WIDTH))
+ # Column: Opcode argument details
+ if self.argrepr:
+ fields.append('(' + self.argrepr + ')')
+ return ' '.join(fields).rstrip()
+
+
+def get_instructions(x, *, first_line=None):
+ """Iterator for the opcodes in methods, functions or code
+
+ Generates a series of Instruction named tuples giving the details of
+ each operations in the supplied code.
+
+ If *first_line* is not None, it indicates the line number that should
+ be reported for the first source line in the disassembled code.
+ Otherwise, the source line information (if any) is taken directly from
+ the disassembled code object.
+ """
+ co = _get_code_object(x)
+ cell_names = co.co_cellvars + co.co_freevars
+ linestarts = dict(findlinestarts(co))
+ if first_line is not None:
+ line_offset = first_line - co.co_firstlineno
+ else:
+ line_offset = 0
+ return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
+ co.co_consts, cell_names, linestarts,
+ line_offset)
+
+def _get_const_info(const_index, const_list):
+ """Helper to get optional details about const references
+
+ Returns the dereferenced constant and its repr if the constant
+ list is defined.
+ Otherwise returns the constant index and its repr().
+ """
+ argval = const_index
+ if const_list is not None:
+ argval = const_list[const_index]
+ return argval, repr(argval)
+
+def _get_name_info(name_index, name_list):
+ """Helper to get optional details about named references
+
+ Returns the dereferenced name as both value and repr if the name
+ list is defined.
+ Otherwise returns the name index and its repr().
+ """
+ argval = name_index
+ if name_list is not None:
+ argval = name_list[name_index]
+ argrepr = argval
+ else:
+ argrepr = repr(argval)
+ return argval, argrepr
+
+
+def _get_instructions_bytes(code, varnames=None, names=None, constants=None,
+ cells=None, linestarts=None, line_offset=0):
+ """Iterate over the instructions in a bytecode string.
+
+ Generates a sequence of Instruction namedtuples giving the details of each
+ opcode. Additional information about the code's runtime environment
+ (e.g. variable names, constants) can be specified using optional
+ arguments.
+
+ """
+ labels = findlabels(code)
+ starts_line = None
+ for offset, op, arg in _unpack_opargs(code):
+ if linestarts is not None:
+ starts_line = linestarts.get(offset, None)
+ if starts_line is not None:
+ starts_line += line_offset
+ is_jump_target = offset in labels
+ argval = None
+ argrepr = ''
+ if arg is not None:
+ # Set argval to the dereferenced value of the argument when
+ # available, and argrepr to the string representation of argval.
+ # _disassemble_bytes needs the string repr of the
+ # raw name index for LOAD_GLOBAL, LOAD_CONST, etc.
+ argval = arg
+ if op in hasconst:
+ argval, argrepr = _get_const_info(arg, constants)
+ elif op in hasname:
+ argval, argrepr = _get_name_info(arg, names)
+ elif op in hasjrel:
+ argval = offset + 2 + arg
+ argrepr = "to " + repr(argval)
+ elif op in haslocal:
+ argval, argrepr = _get_name_info(arg, varnames)
+ elif op in hascompare:
+ argval = cmp_op[arg]
+ argrepr = argval
+ elif op in hasfree:
+ argval, argrepr = _get_name_info(arg, cells)
+ elif op == FORMAT_VALUE:
+ argval = ((None, str, repr, ascii)[arg & 0x3], bool(arg & 0x4))
+ argrepr = ('', 'str', 'repr', 'ascii')[arg & 0x3]
+ if argval[1]:
+ if argrepr:
+ argrepr += ', '
+ argrepr += 'with format'
+ yield Instruction(opname[op], op,
+ arg, argval, argrepr,
+ offset, starts_line, is_jump_target)
+
+def disassemble(co, lasti=-1, *, file=None):
+ """Disassemble a code object."""
+ cell_names = co.co_cellvars + co.co_freevars
+ linestarts = dict(findlinestarts(co))
+ _disassemble_bytes(co.co_code, lasti, co.co_varnames, co.co_names,
+ co.co_consts, cell_names, linestarts, file=file)
+
+def _disassemble_recursive(co, *, file=None, depth=None):
+ disassemble(co, file=file)
+ if depth is None or depth > 0:
+ if depth is not None:
+ depth = depth - 1
+ for x in co.co_consts:
+ if hasattr(x, 'co_code'):
+ print(file=file)
+ print("Disassembly of %r:" % (x,), file=file)
+ _disassemble_recursive(x, file=file, depth=depth)
+
+def _disassemble_bytes(code, lasti=-1, varnames=None, names=None,
+ constants=None, cells=None, linestarts=None,
+ *, file=None, line_offset=0):
+ # Omit the line number column entirely if we have no line number info
+ show_lineno = linestarts is not None
+ if show_lineno:
+ maxlineno = max(linestarts.values()) + line_offset
+ if maxlineno >= 1000:
+ lineno_width = len(str(maxlineno))
+ else:
+ lineno_width = 3
+ else:
+ lineno_width = 0
+ maxoffset = len(code) - 2
+ if maxoffset >= 10000:
+ offset_width = len(str(maxoffset))
+ else:
+ offset_width = 4
+ for instr in _get_instructions_bytes(code, varnames, names,
+ constants, cells, linestarts,
+ line_offset=line_offset):
+ new_source_line = (show_lineno and
+ instr.starts_line is not None and
+ instr.offset > 0)
+ if new_source_line:
+ print(file=file)
+ is_current_instr = instr.offset == lasti
+ print(instr._disassemble(lineno_width, is_current_instr, offset_width),
+ file=file)
+
+def _disassemble_str(source, **kwargs):
+ """Compile the source string, then disassemble the code object."""
+ _disassemble_recursive(_try_compile(source, ''), **kwargs)
+
+disco = disassemble # XXX For backwards compatibility
+
+def _unpack_opargs(code):
+ extended_arg = 0
+ for i in range(0, len(code), 2):
+ op = code[i]
+ if op >= HAVE_ARGUMENT:
+ arg = code[i+1] | extended_arg
+ extended_arg = (arg << 8) if op == EXTENDED_ARG else 0
+ else:
+ arg = None
+ yield (i, op, arg)
+
+def findlabels(code):
+ """Detect all offsets in a byte code which are jump targets.
+
+ Return the list of offsets.
+
+ """
+ labels = []
+ for offset, op, arg in _unpack_opargs(code):
+ if arg is not None:
+ if op in hasjrel:
+ label = offset + 2 + arg
+ elif op in hasjabs:
+ label = arg
+ else:
+ continue
+ if label not in labels:
+ labels.append(label)
+ return labels
+
+def findlinestarts(code):
+ """Find the offsets in a byte code which are start of lines in the source.
+
+ Generate pairs (offset, lineno) as described in Python/compile.c.
+
+ """
+ byte_increments = code.co_lnotab[0::2]
+ line_increments = code.co_lnotab[1::2]
+
+ lastlineno = None
+ lineno = code.co_firstlineno
+ addr = 0
+ for byte_incr, line_incr in zip(byte_increments, line_increments):
+ if byte_incr:
+ if lineno != lastlineno:
+ yield (addr, lineno)
+ lastlineno = lineno
+ addr += byte_incr
+ if line_incr >= 0x80:
+ # line_increments is an array of 8-bit signed integers
+ line_incr -= 0x100
+ lineno += line_incr
+ if lineno != lastlineno:
+ yield (addr, lineno)
+
+class Bytecode:
+ """The bytecode operations of a piece of code
+
+ Instantiate this with a function, method, other compiled object, string of
+ code, or a code object (as returned by compile()).
+
+ Iterating over this yields the bytecode operations as Instruction instances.
+ """
+ def __init__(self, x, *, first_line=None, current_offset=None):
+ self.codeobj = co = _get_code_object(x)
+ if first_line is None:
+ self.first_line = co.co_firstlineno
+ self._line_offset = 0
+ else:
+ self.first_line = first_line
+ self._line_offset = first_line - co.co_firstlineno
+ self._cell_names = co.co_cellvars + co.co_freevars
+ self._linestarts = dict(findlinestarts(co))
+ self._original_object = x
+ self.current_offset = current_offset
+
+ def __iter__(self):
+ co = self.codeobj
+ return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
+ co.co_consts, self._cell_names,
+ self._linestarts,
+ line_offset=self._line_offset)
+
+ def __repr__(self):
+ return "{}({!r})".format(self.__class__.__name__,
+ self._original_object)
+
+ @classmethod
+ def from_traceback(cls, tb):
+ """ Construct a Bytecode from the given traceback """
+ while tb.tb_next:
+ tb = tb.tb_next
+ return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti)
+
+ def info(self):
+ """Return formatted information about the code object."""
+ return _format_code_info(self.codeobj)
+
+ def dis(self):
+ """Return a formatted view of the bytecode operations."""
+ co = self.codeobj
+ if self.current_offset is not None:
+ offset = self.current_offset
+ else:
+ offset = -1
+ with io.StringIO() as output:
+ _disassemble_bytes(co.co_code, varnames=co.co_varnames,
+ names=co.co_names, constants=co.co_consts,
+ cells=self._cell_names,
+ linestarts=self._linestarts,
+ line_offset=self._line_offset,
+ file=output,
+ lasti=offset)
+ return output.getvalue()
+
+
+def _test():
+ """Simple test program to disassemble a file."""
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('infile', type=argparse.FileType(), nargs='?', default='-')
+ args = parser.parse_args()
+ with args.infile as infile:
+ source = infile.read()
+ code = compile(source, args.infile.name, "exec")
+ dis(code)
+
+if __name__ == "__main__":
+ _test()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/README b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/README
new file mode 100644
index 00000000..408a203b
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/README
@@ -0,0 +1,13 @@
+This directory contains the Distutils package.
+
+There's a full documentation available at:
+
+ http://docs.python.org/distutils/
+
+The Distutils-SIG web page is also a good starting point:
+
+ http://www.python.org/sigs/distutils-sig/
+
+WARNING : Distutils must remain compatible with 2.3
+
+$Id$
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__init__.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__init__.py
new file mode 100644
index 00000000..d823d040
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__init__.py
@@ -0,0 +1,13 @@
+"""distutils
+
+The main package for the Python Module Distribution Utilities. Normally
+used from a setup script as
+
+ from distutils.core import setup
+
+ setup (...)
+"""
+
+import sys
+
+__version__ = sys.version[:sys.version.index(' ')]
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/__init__.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 00000000..ff2d6648
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/_msvccompiler.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/_msvccompiler.cpython-37.pyc
new file mode 100644
index 00000000..e162e716
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/_msvccompiler.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/archive_util.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/archive_util.cpython-37.pyc
new file mode 100644
index 00000000..e3fd4f33
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/archive_util.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/ccompiler.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/ccompiler.cpython-37.pyc
new file mode 100644
index 00000000..8b57fc81
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/ccompiler.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/cmd.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/cmd.cpython-37.pyc
new file mode 100644
index 00000000..a4c7c4eb
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/cmd.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/config.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/config.cpython-37.pyc
new file mode 100644
index 00000000..dbf530de
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/config.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/core.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/core.cpython-37.pyc
new file mode 100644
index 00000000..f841c35a
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/core.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/debug.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/debug.cpython-37.pyc
new file mode 100644
index 00000000..13e89f0c
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/debug.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/dep_util.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/dep_util.cpython-37.pyc
new file mode 100644
index 00000000..8f0b555e
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/dep_util.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/dir_util.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/dir_util.cpython-37.pyc
new file mode 100644
index 00000000..a883c507
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/dir_util.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/dist.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/dist.cpython-37.pyc
new file mode 100644
index 00000000..e3159f14
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/dist.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/errors.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/errors.cpython-37.pyc
new file mode 100644
index 00000000..86a76579
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/errors.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/extension.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/extension.cpython-37.pyc
new file mode 100644
index 00000000..db67bf3f
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/extension.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/fancy_getopt.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/fancy_getopt.cpython-37.pyc
new file mode 100644
index 00000000..fafc5237
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/fancy_getopt.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/file_util.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/file_util.cpython-37.pyc
new file mode 100644
index 00000000..e7d9bc6b
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/file_util.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/filelist.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/filelist.cpython-37.pyc
new file mode 100644
index 00000000..8008dddb
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/filelist.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/log.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/log.cpython-37.pyc
new file mode 100644
index 00000000..6dd642af
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/log.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/msvc9compiler.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/msvc9compiler.cpython-37.pyc
new file mode 100644
index 00000000..b2945dfc
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/msvc9compiler.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/spawn.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/spawn.cpython-37.pyc
new file mode 100644
index 00000000..5604c265
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/spawn.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/sysconfig.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/sysconfig.cpython-37.pyc
new file mode 100644
index 00000000..2bc17736
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/sysconfig.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/text_file.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/text_file.cpython-37.pyc
new file mode 100644
index 00000000..bc493db3
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/text_file.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/util.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/util.cpython-37.pyc
new file mode 100644
index 00000000..7d2a241c
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/util.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/version.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/version.cpython-37.pyc
new file mode 100644
index 00000000..eecd1689
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/__pycache__/version.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/_msvccompiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/_msvccompiler.py
new file mode 100644
index 00000000..84b4ef59
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/_msvccompiler.py
@@ -0,0 +1,574 @@
+"""distutils._msvccompiler
+
+Contains MSVCCompiler, an implementation of the abstract CCompiler class
+for Microsoft Visual Studio 2015.
+
+The module is compatible with VS 2015 and later. You can find legacy support
+for older versions in distutils.msvc9compiler and distutils.msvccompiler.
+"""
+
+# Written by Perry Stoll
+# hacked by Robin Becker and Thomas Heller to do a better job of
+# finding DevStudio (through the registry)
+# ported to VS 2005 and VS 2008 by Christian Heimes
+# ported to VS 2015 by Steve Dower
+
+import os
+import shutil
+import stat
+import subprocess
+import winreg
+
+from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
+ CompileError, LibError, LinkError
+from distutils.ccompiler import CCompiler, gen_lib_options
+from distutils import log
+from distutils.util import get_platform
+
+from itertools import count
+
+def _find_vc2015():
+ try:
+ key = winreg.OpenKeyEx(
+ winreg.HKEY_LOCAL_MACHINE,
+ r"Software\Microsoft\VisualStudio\SxS\VC7",
+ access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY
+ )
+ except OSError:
+ log.debug("Visual C++ is not registered")
+ return None, None
+
+ best_version = 0
+ best_dir = None
+ with key:
+ for i in count():
+ try:
+ v, vc_dir, vt = winreg.EnumValue(key, i)
+ except OSError:
+ break
+ if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir):
+ try:
+ version = int(float(v))
+ except (ValueError, TypeError):
+ continue
+ if version >= 14 and version > best_version:
+ best_version, best_dir = version, vc_dir
+ return best_version, best_dir
+
+def _find_vc2017():
+ """Returns "15, path" based on the result of invoking vswhere.exe
+ If no install is found, returns "None, None"
+
+ The version is returned to avoid unnecessarily changing the function
+ result. It may be ignored when the path is not None.
+
+ If vswhere.exe is not available, by definition, VS 2017 is not
+ installed.
+ """
+ import json
+
+ root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles")
+ if not root:
+ return None, None
+
+ try:
+ path = subprocess.check_output([
+ os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
+ "-latest",
+ "-prerelease",
+ "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
+ "-property", "installationPath",
+ ], encoding="mbcs", errors="strict").strip()
+ except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
+ return None, None
+
+ path = os.path.join(path, "VC", "Auxiliary", "Build")
+ if os.path.isdir(path):
+ return 15, path
+
+ return None, None
+
+def _find_vcvarsall(plat_spec):
+ _, best_dir = _find_vc2017()
+ vcruntime = None
+ vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
+ if best_dir:
+ vcredist = os.path.join(best_dir, "..", "..", "redist", "MSVC", "**",
+ "Microsoft.VC141.CRT", "vcruntime140.dll")
+ try:
+ import glob
+ vcruntime = glob.glob(vcredist, recursive=True)[-1]
+ except (ImportError, OSError, LookupError):
+ vcruntime = None
+
+ if not best_dir:
+ best_version, best_dir = _find_vc2015()
+ if best_version:
+ vcruntime = os.path.join(best_dir, 'redist', vcruntime_plat,
+ "Microsoft.VC140.CRT", "vcruntime140.dll")
+
+ if not best_dir:
+ log.debug("No suitable Visual C++ version found")
+ return None, None
+
+ vcvarsall = os.path.join(best_dir, "vcvarsall.bat")
+ if not os.path.isfile(vcvarsall):
+ log.debug("%s cannot be found", vcvarsall)
+ return None, None
+
+ if not vcruntime or not os.path.isfile(vcruntime):
+ log.debug("%s cannot be found", vcruntime)
+ vcruntime = None
+
+ return vcvarsall, vcruntime
+
+def _get_vc_env(plat_spec):
+ if os.getenv("DISTUTILS_USE_SDK"):
+ return {
+ key.lower(): value
+ for key, value in os.environ.items()
+ }
+
+ vcvarsall, vcruntime = _find_vcvarsall(plat_spec)
+ if not vcvarsall:
+ raise DistutilsPlatformError("Unable to find vcvarsall.bat")
+
+ try:
+ out = subprocess.check_output(
+ 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
+ stderr=subprocess.STDOUT,
+ ).decode('utf-16le', errors='replace')
+ except subprocess.CalledProcessError as exc:
+ log.error(exc.output)
+ raise DistutilsPlatformError("Error executing {}"
+ .format(exc.cmd))
+
+ env = {
+ key.lower(): value
+ for key, _, value in
+ (line.partition('=') for line in out.splitlines())
+ if key and value
+ }
+
+ if vcruntime:
+ env['py_vcruntime_redist'] = vcruntime
+ return env
+
+def _find_exe(exe, paths=None):
+ """Return path to an MSVC executable program.
+
+ Tries to find the program in several places: first, one of the
+ MSVC program search paths from the registry; next, the directories
+ in the PATH environment variable. If any of those work, return an
+ absolute path that is known to exist. If none of them work, just
+ return the original program name, 'exe'.
+ """
+ if not paths:
+ paths = os.getenv('path').split(os.pathsep)
+ for p in paths:
+ fn = os.path.join(os.path.abspath(p), exe)
+ if os.path.isfile(fn):
+ return fn
+ return exe
+
+# A map keyed by get_platform() return values to values accepted by
+# 'vcvarsall.bat'. Always cross-compile from x86 to work with the
+# lighter-weight MSVC installs that do not include native 64-bit tools.
+PLAT_TO_VCVARS = {
+ 'win32' : 'x86',
+ 'win-amd64' : 'x86_amd64',
+}
+
+# A set containing the DLLs that are guaranteed to be available for
+# all micro versions of this Python version. Known extension
+# dependencies that are not in this set will be copied to the output
+# path.
+_BUNDLED_DLLS = frozenset(['vcruntime140.dll'])
+
+class MSVCCompiler(CCompiler) :
+ """Concrete class that implements an interface to Microsoft Visual C++,
+ as defined by the CCompiler abstract class."""
+
+ compiler_type = 'msvc'
+
+ # Just set this so CCompiler's constructor doesn't barf. We currently
+ # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+ # as it really isn't necessary for this sort of single-compiler class.
+ # Would be nice to have a consistent interface with UnixCCompiler,
+ # though, so it's worth thinking about.
+ executables = {}
+
+ # Private class data (need to distinguish C from C++ source for compiler)
+ _c_extensions = ['.c']
+ _cpp_extensions = ['.cc', '.cpp', '.cxx']
+ _rc_extensions = ['.rc']
+ _mc_extensions = ['.mc']
+
+ # Needed for the filename generation methods provided by the
+ # base class, CCompiler.
+ src_extensions = (_c_extensions + _cpp_extensions +
+ _rc_extensions + _mc_extensions)
+ res_extension = '.res'
+ obj_extension = '.obj'
+ static_lib_extension = '.lib'
+ shared_lib_extension = '.dll'
+ static_lib_format = shared_lib_format = '%s%s'
+ exe_extension = '.exe'
+
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ CCompiler.__init__ (self, verbose, dry_run, force)
+ # target platform (.plat_name is consistent with 'bdist')
+ self.plat_name = None
+ self.initialized = False
+
+ def initialize(self, plat_name=None):
+ # multi-init means we would need to check platform same each time...
+ assert not self.initialized, "don't init multiple times"
+ if plat_name is None:
+ plat_name = get_platform()
+ # sanity check for platforms to prevent obscure errors later.
+ if plat_name not in PLAT_TO_VCVARS:
+ raise DistutilsPlatformError("--plat-name must be one of {}"
+ .format(tuple(PLAT_TO_VCVARS)))
+
+ # Get the vcvarsall.bat spec for the requested platform.
+ plat_spec = PLAT_TO_VCVARS[plat_name]
+
+ vc_env = _get_vc_env(plat_spec)
+ if not vc_env:
+ raise DistutilsPlatformError("Unable to find a compatible "
+ "Visual Studio installation.")
+
+ self._paths = vc_env.get('path', '')
+ paths = self._paths.split(os.pathsep)
+ self.cc = _find_exe("cl.exe", paths)
+ self.linker = _find_exe("link.exe", paths)
+ self.lib = _find_exe("lib.exe", paths)
+ self.rc = _find_exe("rc.exe", paths) # resource compiler
+ self.mc = _find_exe("mc.exe", paths) # message compiler
+ self.mt = _find_exe("mt.exe", paths) # message compiler
+ self._vcruntime_redist = vc_env.get('py_vcruntime_redist', '')
+
+ for dir in vc_env.get('include', '').split(os.pathsep):
+ if dir:
+ self.add_include_dir(dir.rstrip(os.sep))
+
+ for dir in vc_env.get('lib', '').split(os.pathsep):
+ if dir:
+ self.add_library_dir(dir.rstrip(os.sep))
+
+ self.preprocess_options = None
+ # If vcruntime_redist is available, link against it dynamically. Otherwise,
+ # use /MT[d] to build statically, then switch from libucrt[d].lib to ucrt[d].lib
+ # later to dynamically link to ucrtbase but not vcruntime.
+ self.compile_options = [
+ '/nologo', '/Ox', '/W3', '/GL', '/DNDEBUG'
+ ]
+ self.compile_options.append('/MD' if self._vcruntime_redist else '/MT')
+
+ self.compile_options_debug = [
+ '/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG'
+ ]
+
+ ldflags = [
+ '/nologo', '/INCREMENTAL:NO', '/LTCG'
+ ]
+ if not self._vcruntime_redist:
+ ldflags.extend(('/nodefaultlib:libucrt.lib', 'ucrt.lib'))
+
+ ldflags_debug = [
+ '/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL'
+ ]
+
+ self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1']
+ self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1']
+ self.ldflags_shared = [*ldflags, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
+ self.ldflags_shared_debug = [*ldflags_debug, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
+ self.ldflags_static = [*ldflags]
+ self.ldflags_static_debug = [*ldflags_debug]
+
+ self._ldflags = {
+ (CCompiler.EXECUTABLE, None): self.ldflags_exe,
+ (CCompiler.EXECUTABLE, False): self.ldflags_exe,
+ (CCompiler.EXECUTABLE, True): self.ldflags_exe_debug,
+ (CCompiler.SHARED_OBJECT, None): self.ldflags_shared,
+ (CCompiler.SHARED_OBJECT, False): self.ldflags_shared,
+ (CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug,
+ (CCompiler.SHARED_LIBRARY, None): self.ldflags_static,
+ (CCompiler.SHARED_LIBRARY, False): self.ldflags_static,
+ (CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug,
+ }
+
+ self.initialized = True
+
+ # -- Worker methods ------------------------------------------------
+
+ def object_filenames(self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ ext_map = {
+ **{ext: self.obj_extension for ext in self.src_extensions},
+ **{ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions},
+ }
+
+ output_dir = output_dir or ''
+
+ def make_out_path(p):
+ base, ext = os.path.splitext(p)
+ if strip_dir:
+ base = os.path.basename(base)
+ else:
+ _, base = os.path.splitdrive(base)
+ if base.startswith((os.path.sep, os.path.altsep)):
+ base = base[1:]
+ try:
+ # XXX: This may produce absurdly long paths. We should check
+ # the length of the result and trim base until we fit within
+ # 260 characters.
+ return os.path.join(output_dir, base + ext_map[ext])
+ except LookupError:
+ # Better to raise an exception instead of silently continuing
+ # and later complain about sources and targets having
+ # different lengths
+ raise CompileError("Don't know how to compile {}".format(p))
+
+ return list(map(make_out_path, source_filenames))
+
+
+ def compile(self, sources,
+ output_dir=None, macros=None, include_dirs=None, debug=0,
+ extra_preargs=None, extra_postargs=None, depends=None):
+
+ if not self.initialized:
+ self.initialize()
+ compile_info = self._setup_compile(output_dir, macros, include_dirs,
+ sources, depends, extra_postargs)
+ macros, objects, extra_postargs, pp_opts, build = compile_info
+
+ compile_opts = extra_preargs or []
+ compile_opts.append('/c')
+ if debug:
+ compile_opts.extend(self.compile_options_debug)
+ else:
+ compile_opts.extend(self.compile_options)
+
+
+ add_cpp_opts = False
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ if debug:
+ # pass the full pathname to MSVC in debug mode,
+ # this allows the debugger to find the source file
+ # without asking the user to browse for it
+ src = os.path.abspath(src)
+
+ if ext in self._c_extensions:
+ input_opt = "/Tc" + src
+ elif ext in self._cpp_extensions:
+ input_opt = "/Tp" + src
+ add_cpp_opts = True
+ elif ext in self._rc_extensions:
+ # compile .RC to .RES file
+ input_opt = src
+ output_opt = "/fo" + obj
+ try:
+ self.spawn([self.rc] + pp_opts + [output_opt, input_opt])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ elif ext in self._mc_extensions:
+ # Compile .MC to .RC file to .RES file.
+ # * '-h dir' specifies the directory for the
+ # generated include file
+ # * '-r dir' specifies the target directory of the
+ # generated RC file and the binary message resource
+ # it includes
+ #
+ # For now (since there are no options to change this),
+ # we use the source-directory for the include file and
+ # the build directory for the RC file and message
+ # resources. This works at least for win32all.
+ h_dir = os.path.dirname(src)
+ rc_dir = os.path.dirname(obj)
+ try:
+ # first compile .MC to .RC and .H file
+ self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src])
+ base, _ = os.path.splitext(os.path.basename (src))
+ rc_file = os.path.join(rc_dir, base + '.rc')
+ # then compile .RC to .RES file
+ self.spawn([self.rc, "/fo" + obj, rc_file])
+
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ else:
+ # how to handle this file?
+ raise CompileError("Don't know how to compile {} to {}"
+ .format(src, obj))
+
+ args = [self.cc] + compile_opts + pp_opts
+ if add_cpp_opts:
+ args.append('/EHsc')
+ args.append(input_opt)
+ args.append("/Fo" + obj)
+ args.extend(extra_postargs)
+
+ try:
+ self.spawn(args)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ return objects
+
+
+ def create_static_lib(self,
+ objects,
+ output_libname,
+ output_dir=None,
+ debug=0,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+ output_filename = self.library_filename(output_libname,
+ output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ lib_args = objects + ['/OUT:' + output_filename]
+ if debug:
+ pass # XXX what goes here?
+ try:
+ log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args))
+ self.spawn([self.lib] + lib_args)
+ except DistutilsExecError as msg:
+ raise LibError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+ fixed_args = self._fix_lib_args(libraries, library_dirs,
+ runtime_library_dirs)
+ libraries, library_dirs, runtime_library_dirs = fixed_args
+
+ if runtime_library_dirs:
+ self.warn("I don't know what to do with 'runtime_library_dirs': "
+ + str(runtime_library_dirs))
+
+ lib_opts = gen_lib_options(self,
+ library_dirs, runtime_library_dirs,
+ libraries)
+ if output_dir is not None:
+ output_filename = os.path.join(output_dir, output_filename)
+
+ if self._need_link(objects, output_filename):
+ ldflags = self._ldflags[target_desc, debug]
+
+ export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])]
+
+ ld_args = (ldflags + lib_opts + export_opts +
+ objects + ['/OUT:' + output_filename])
+
+ # The MSVC linker generates .lib and .exp files, which cannot be
+ # suppressed by any linker switches. The .lib files may even be
+ # needed! Make sure they are generated in the temporary build
+ # directory. Since they have different names for debug and release
+ # builds, they can go into the same directory.
+ build_temp = os.path.dirname(objects[0])
+ if export_symbols is not None:
+ (dll_name, dll_ext) = os.path.splitext(
+ os.path.basename(output_filename))
+ implib_file = os.path.join(
+ build_temp,
+ self.library_filename(dll_name))
+ ld_args.append ('/IMPLIB:' + implib_file)
+
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+
+ output_dir = os.path.dirname(os.path.abspath(output_filename))
+ self.mkpath(output_dir)
+ try:
+ log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args))
+ self.spawn([self.linker] + ld_args)
+ self._copy_vcruntime(output_dir)
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ def _copy_vcruntime(self, output_dir):
+ vcruntime = self._vcruntime_redist
+ if not vcruntime or not os.path.isfile(vcruntime):
+ return
+
+ if os.path.basename(vcruntime).lower() in _BUNDLED_DLLS:
+ return
+
+ log.debug('Copying "%s"', vcruntime)
+ vcruntime = shutil.copy(vcruntime, output_dir)
+ os.chmod(vcruntime, stat.S_IWRITE)
+
+ def spawn(self, cmd):
+ old_path = os.getenv('path')
+ try:
+ os.environ['path'] = self._paths
+ return super().spawn(cmd)
+ finally:
+ os.environ['path'] = old_path
+
+ # -- Miscellaneous methods -----------------------------------------
+ # These are all used by the 'gen_lib_options() function, in
+ # ccompiler.py.
+
+ def library_dir_option(self, dir):
+ return "/LIBPATH:" + dir
+
+ def runtime_library_dir_option(self, dir):
+ raise DistutilsPlatformError(
+ "don't know how to set runtime library search path for MSVC")
+
+ def library_option(self, lib):
+ return self.library_filename(lib)
+
+ def find_library_file(self, dirs, lib, debug=0):
+ # Prefer a debugging library if found (and requested), but deal
+ # with it if we don't have one.
+ if debug:
+ try_names = [lib + "_d", lib]
+ else:
+ try_names = [lib]
+ for dir in dirs:
+ for name in try_names:
+ libfile = os.path.join(dir, self.library_filename(name))
+ if os.path.isfile(libfile):
+ return libfile
+ else:
+ # Oops, didn't find it in *any* of 'dirs'
+ return None
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/archive_util.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/archive_util.py
new file mode 100644
index 00000000..b002dc3b
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/archive_util.py
@@ -0,0 +1,256 @@
+"""distutils.archive_util
+
+Utility functions for creating archive files (tarballs, zip files,
+that sort of thing)."""
+
+import os
+from warnings import warn
+import sys
+
+try:
+ import zipfile
+except ImportError:
+ zipfile = None
+
+
+from distutils.errors import DistutilsExecError
+from distutils.spawn import spawn
+from distutils.dir_util import mkpath
+from distutils import log
+
+try:
+ from pwd import getpwnam
+except ImportError:
+ getpwnam = None
+
+try:
+ from grp import getgrnam
+except ImportError:
+ getgrnam = None
+
+def _get_gid(name):
+ """Returns a gid, given a group name."""
+ if getgrnam is None or name is None:
+ return None
+ try:
+ result = getgrnam(name)
+ except KeyError:
+ result = None
+ if result is not None:
+ return result[2]
+ return None
+
+def _get_uid(name):
+ """Returns an uid, given a user name."""
+ if getpwnam is None or name is None:
+ return None
+ try:
+ result = getpwnam(name)
+ except KeyError:
+ result = None
+ if result is not None:
+ return result[2]
+ return None
+
+def make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
+ owner=None, group=None):
+ """Create a (possibly compressed) tar file from all the files under
+ 'base_dir'.
+
+ 'compress' must be "gzip" (the default), "bzip2", "xz", "compress", or
+ None. ("compress" will be deprecated in Python 3.2)
+
+ 'owner' and 'group' can be used to define an owner and a group for the
+ archive that is being built. If not provided, the current owner and group
+ will be used.
+
+ The output tar file will be named 'base_dir' + ".tar", possibly plus
+ the appropriate compression extension (".gz", ".bz2", ".xz" or ".Z").
+
+ Returns the output filename.
+ """
+ tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', 'xz': 'xz', None: '',
+ 'compress': ''}
+ compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz',
+ 'compress': '.Z'}
+
+ # flags for compression program, each element of list will be an argument
+ if compress is not None and compress not in compress_ext.keys():
+ raise ValueError(
+ "bad value for 'compress': must be None, 'gzip', 'bzip2', "
+ "'xz' or 'compress'")
+
+ archive_name = base_name + '.tar'
+ if compress != 'compress':
+ archive_name += compress_ext.get(compress, '')
+
+ mkpath(os.path.dirname(archive_name), dry_run=dry_run)
+
+ # creating the tarball
+ import tarfile # late import so Python build itself doesn't break
+
+ log.info('Creating tar archive')
+
+ uid = _get_uid(owner)
+ gid = _get_gid(group)
+
+ def _set_uid_gid(tarinfo):
+ if gid is not None:
+ tarinfo.gid = gid
+ tarinfo.gname = group
+ if uid is not None:
+ tarinfo.uid = uid
+ tarinfo.uname = owner
+ return tarinfo
+
+ if not dry_run:
+ tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
+ try:
+ tar.add(base_dir, filter=_set_uid_gid)
+ finally:
+ tar.close()
+
+ # compression using `compress`
+ if compress == 'compress':
+ warn("'compress' will be deprecated.", PendingDeprecationWarning)
+ # the option varies depending on the platform
+ compressed_name = archive_name + compress_ext[compress]
+ if sys.platform == 'win32':
+ cmd = [compress, archive_name, compressed_name]
+ else:
+ cmd = [compress, '-f', archive_name]
+ spawn(cmd, dry_run=dry_run)
+ return compressed_name
+
+ return archive_name
+
+def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):
+ """Create a zip file from all the files under 'base_dir'.
+
+ The output zip file will be named 'base_name' + ".zip". Uses either the
+ "zipfile" Python module (if available) or the InfoZIP "zip" utility
+ (if installed and found on the default search path). If neither tool is
+ available, raises DistutilsExecError. Returns the name of the output zip
+ file.
+ """
+ zip_filename = base_name + ".zip"
+ mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
+
+ # If zipfile module is not available, try spawning an external
+ # 'zip' command.
+ if zipfile is None:
+ if verbose:
+ zipoptions = "-r"
+ else:
+ zipoptions = "-rq"
+
+ try:
+ spawn(["zip", zipoptions, zip_filename, base_dir],
+ dry_run=dry_run)
+ except DistutilsExecError:
+ # XXX really should distinguish between "couldn't find
+ # external 'zip' command" and "zip failed".
+ raise DistutilsExecError(("unable to create zip file '%s': "
+ "could neither import the 'zipfile' module nor "
+ "find a standalone zip utility") % zip_filename)
+
+ else:
+ log.info("creating '%s' and adding '%s' to it",
+ zip_filename, base_dir)
+
+ if not dry_run:
+ try:
+ zip = zipfile.ZipFile(zip_filename, "w",
+ compression=zipfile.ZIP_DEFLATED)
+ except RuntimeError:
+ zip = zipfile.ZipFile(zip_filename, "w",
+ compression=zipfile.ZIP_STORED)
+
+ if base_dir != os.curdir:
+ path = os.path.normpath(os.path.join(base_dir, ''))
+ zip.write(path, path)
+ log.info("adding '%s'", path)
+ for dirpath, dirnames, filenames in os.walk(base_dir):
+ for name in dirnames:
+ path = os.path.normpath(os.path.join(dirpath, name, ''))
+ zip.write(path, path)
+ log.info("adding '%s'", path)
+ for name in filenames:
+ path = os.path.normpath(os.path.join(dirpath, name))
+ if os.path.isfile(path):
+ zip.write(path, path)
+ log.info("adding '%s'", path)
+ zip.close()
+
+ return zip_filename
+
+ARCHIVE_FORMATS = {
+ 'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
+ 'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
+ 'xztar': (make_tarball, [('compress', 'xz')], "xz'ed tar-file"),
+ 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
+ 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
+ 'zip': (make_zipfile, [],"ZIP file")
+ }
+
+def check_archive_formats(formats):
+ """Returns the first format from the 'format' list that is unknown.
+
+ If all formats are known, returns None
+ """
+ for format in formats:
+ if format not in ARCHIVE_FORMATS:
+ return format
+ return None
+
+def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
+ dry_run=0, owner=None, group=None):
+ """Create an archive file (eg. zip or tar).
+
+ 'base_name' is the name of the file to create, minus any format-specific
+ extension; 'format' is the archive format: one of "zip", "tar", "gztar",
+ "bztar", "xztar", or "ztar".
+
+ 'root_dir' is a directory that will be the root directory of the
+ archive; ie. we typically chdir into 'root_dir' before creating the
+ archive. 'base_dir' is the directory where we start archiving from;
+ ie. 'base_dir' will be the common prefix of all files and
+ directories in the archive. 'root_dir' and 'base_dir' both default
+ to the current directory. Returns the name of the archive file.
+
+ 'owner' and 'group' are used when creating a tar archive. By default,
+ uses the current owner and group.
+ """
+ save_cwd = os.getcwd()
+ if root_dir is not None:
+ log.debug("changing into '%s'", root_dir)
+ base_name = os.path.abspath(base_name)
+ if not dry_run:
+ os.chdir(root_dir)
+
+ if base_dir is None:
+ base_dir = os.curdir
+
+ kwargs = {'dry_run': dry_run}
+
+ try:
+ format_info = ARCHIVE_FORMATS[format]
+ except KeyError:
+ raise ValueError("unknown archive format '%s'" % format)
+
+ func = format_info[0]
+ for arg, val in format_info[1]:
+ kwargs[arg] = val
+
+ if format != 'zip':
+ kwargs['owner'] = owner
+ kwargs['group'] = group
+
+ try:
+ filename = func(base_name, base_dir, **kwargs)
+ finally:
+ if root_dir is not None:
+ log.debug("changing back to '%s'", save_cwd)
+ os.chdir(save_cwd)
+
+ return filename
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/bcppcompiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/bcppcompiler.py
new file mode 100644
index 00000000..9f4c432d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/bcppcompiler.py
@@ -0,0 +1,393 @@
+"""distutils.bcppcompiler
+
+Contains BorlandCCompiler, an implementation of the abstract CCompiler class
+for the Borland C++ compiler.
+"""
+
+# This implementation by Lyle Johnson, based on the original msvccompiler.py
+# module and using the directions originally published by Gordon Williams.
+
+# XXX looks like there's a LOT of overlap between these two classes:
+# someone should sit down and factor out the common code as
+# WindowsCCompiler! --GPW
+
+
+import os
+from distutils.errors import \
+ DistutilsExecError, DistutilsPlatformError, \
+ CompileError, LibError, LinkError, UnknownFileError
+from distutils.ccompiler import \
+ CCompiler, gen_preprocess_options, gen_lib_options
+from distutils.file_util import write_file
+from distutils.dep_util import newer
+from distutils import log
+
+class BCPPCompiler(CCompiler) :
+ """Concrete class that implements an interface to the Borland C/C++
+ compiler, as defined by the CCompiler abstract class.
+ """
+
+ compiler_type = 'bcpp'
+
+ # Just set this so CCompiler's constructor doesn't barf. We currently
+ # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+ # as it really isn't necessary for this sort of single-compiler class.
+ # Would be nice to have a consistent interface with UnixCCompiler,
+ # though, so it's worth thinking about.
+ executables = {}
+
+ # Private class data (need to distinguish C from C++ source for compiler)
+ _c_extensions = ['.c']
+ _cpp_extensions = ['.cc', '.cpp', '.cxx']
+
+ # Needed for the filename generation methods provided by the
+ # base class, CCompiler.
+ src_extensions = _c_extensions + _cpp_extensions
+ obj_extension = '.obj'
+ static_lib_extension = '.lib'
+ shared_lib_extension = '.dll'
+ static_lib_format = shared_lib_format = '%s%s'
+ exe_extension = '.exe'
+
+
+ def __init__ (self,
+ verbose=0,
+ dry_run=0,
+ force=0):
+
+ CCompiler.__init__ (self, verbose, dry_run, force)
+
+ # These executables are assumed to all be in the path.
+ # Borland doesn't seem to use any special registry settings to
+ # indicate their installation locations.
+
+ self.cc = "bcc32.exe"
+ self.linker = "ilink32.exe"
+ self.lib = "tlib.exe"
+
+ self.preprocess_options = None
+ self.compile_options = ['/tWM', '/O2', '/q', '/g0']
+ self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
+
+ self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
+ self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
+ self.ldflags_static = []
+ self.ldflags_exe = ['/Gn', '/q', '/x']
+ self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
+
+
+ # -- Worker methods ------------------------------------------------
+
+ def compile(self, sources,
+ output_dir=None, macros=None, include_dirs=None, debug=0,
+ extra_preargs=None, extra_postargs=None, depends=None):
+
+ macros, objects, extra_postargs, pp_opts, build = \
+ self._setup_compile(output_dir, macros, include_dirs, sources,
+ depends, extra_postargs)
+ compile_opts = extra_preargs or []
+ compile_opts.append ('-c')
+ if debug:
+ compile_opts.extend (self.compile_options_debug)
+ else:
+ compile_opts.extend (self.compile_options)
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ # XXX why do the normpath here?
+ src = os.path.normpath(src)
+ obj = os.path.normpath(obj)
+ # XXX _setup_compile() did a mkpath() too but before the normpath.
+ # Is it possible to skip the normpath?
+ self.mkpath(os.path.dirname(obj))
+
+ if ext == '.res':
+ # This is already a binary file -- skip it.
+ continue # the 'for' loop
+ if ext == '.rc':
+ # This needs to be compiled to a .res file -- do it now.
+ try:
+ self.spawn (["brcc32", "-fo", obj, src])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue # the 'for' loop
+
+ # The next two are both for the real compiler.
+ if ext in self._c_extensions:
+ input_opt = ""
+ elif ext in self._cpp_extensions:
+ input_opt = "-P"
+ else:
+ # Unknown file type -- no extra options. The compiler
+ # will probably fail, but let it just in case this is a
+ # file the compiler recognizes even if we don't.
+ input_opt = ""
+
+ output_opt = "-o" + obj
+
+ # Compiler command line syntax is: "bcc32 [options] file(s)".
+ # Note that the source file names must appear at the end of
+ # the command line.
+ try:
+ self.spawn ([self.cc] + compile_opts + pp_opts +
+ [input_opt, output_opt] +
+ extra_postargs + [src])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ return objects
+
+ # compile ()
+
+
+ def create_static_lib (self,
+ objects,
+ output_libname,
+ output_dir=None,
+ debug=0,
+ target_lang=None):
+
+ (objects, output_dir) = self._fix_object_args (objects, output_dir)
+ output_filename = \
+ self.library_filename (output_libname, output_dir=output_dir)
+
+ if self._need_link (objects, output_filename):
+ lib_args = [output_filename, '/u'] + objects
+ if debug:
+ pass # XXX what goes here?
+ try:
+ self.spawn ([self.lib] + lib_args)
+ except DistutilsExecError as msg:
+ raise LibError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ # create_static_lib ()
+
+
+ def link (self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+
+ # XXX this ignores 'build_temp'! should follow the lead of
+ # msvccompiler.py
+
+ (objects, output_dir) = self._fix_object_args (objects, output_dir)
+ (libraries, library_dirs, runtime_library_dirs) = \
+ self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
+
+ if runtime_library_dirs:
+ log.warn("I don't know what to do with 'runtime_library_dirs': %s",
+ str(runtime_library_dirs))
+
+ if output_dir is not None:
+ output_filename = os.path.join (output_dir, output_filename)
+
+ if self._need_link (objects, output_filename):
+
+ # Figure out linker args based on type of target.
+ if target_desc == CCompiler.EXECUTABLE:
+ startup_obj = 'c0w32'
+ if debug:
+ ld_args = self.ldflags_exe_debug[:]
+ else:
+ ld_args = self.ldflags_exe[:]
+ else:
+ startup_obj = 'c0d32'
+ if debug:
+ ld_args = self.ldflags_shared_debug[:]
+ else:
+ ld_args = self.ldflags_shared[:]
+
+
+ # Create a temporary exports file for use by the linker
+ if export_symbols is None:
+ def_file = ''
+ else:
+ head, tail = os.path.split (output_filename)
+ modname, ext = os.path.splitext (tail)
+ temp_dir = os.path.dirname(objects[0]) # preserve tree structure
+ def_file = os.path.join (temp_dir, '%s.def' % modname)
+ contents = ['EXPORTS']
+ for sym in (export_symbols or []):
+ contents.append(' %s=_%s' % (sym, sym))
+ self.execute(write_file, (def_file, contents),
+ "writing %s" % def_file)
+
+ # Borland C++ has problems with '/' in paths
+ objects2 = map(os.path.normpath, objects)
+ # split objects in .obj and .res files
+ # Borland C++ needs them at different positions in the command line
+ objects = [startup_obj]
+ resources = []
+ for file in objects2:
+ (base, ext) = os.path.splitext(os.path.normcase(file))
+ if ext == '.res':
+ resources.append(file)
+ else:
+ objects.append(file)
+
+
+ for l in library_dirs:
+ ld_args.append("/L%s" % os.path.normpath(l))
+ ld_args.append("/L.") # we sometimes use relative paths
+
+ # list of object files
+ ld_args.extend(objects)
+
+ # XXX the command-line syntax for Borland C++ is a bit wonky;
+ # certain filenames are jammed together in one big string, but
+ # comma-delimited. This doesn't mesh too well with the
+ # Unix-centric attitude (with a DOS/Windows quoting hack) of
+ # 'spawn()', so constructing the argument list is a bit
+ # awkward. Note that doing the obvious thing and jamming all
+ # the filenames and commas into one argument would be wrong,
+ # because 'spawn()' would quote any filenames with spaces in
+ # them. Arghghh!. Apparently it works fine as coded...
+
+ # name of dll/exe file
+ ld_args.extend([',',output_filename])
+ # no map file and start libraries
+ ld_args.append(',,')
+
+ for lib in libraries:
+ # see if we find it and if there is a bcpp specific lib
+ # (xxx_bcpp.lib)
+ libfile = self.find_library_file(library_dirs, lib, debug)
+ if libfile is None:
+ ld_args.append(lib)
+ # probably a BCPP internal library -- don't warn
+ else:
+ # full name which prefers bcpp_xxx.lib over xxx.lib
+ ld_args.append(libfile)
+
+ # some default libraries
+ ld_args.append ('import32')
+ ld_args.append ('cw32mt')
+
+ # def file for export symbols
+ ld_args.extend([',',def_file])
+ # add resource files
+ ld_args.append(',')
+ ld_args.extend(resources)
+
+
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+
+ self.mkpath (os.path.dirname (output_filename))
+ try:
+ self.spawn ([self.linker] + ld_args)
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ # link ()
+
+ # -- Miscellaneous methods -----------------------------------------
+
+
+ def find_library_file (self, dirs, lib, debug=0):
+ # List of effective library names to try, in order of preference:
+ # xxx_bcpp.lib is better than xxx.lib
+ # and xxx_d.lib is better than xxx.lib if debug is set
+ #
+ # The "_bcpp" suffix is to handle a Python installation for people
+ # with multiple compilers (primarily Distutils hackers, I suspect
+ # ;-). The idea is they'd have one static library for each
+ # compiler they care about, since (almost?) every Windows compiler
+ # seems to have a different format for static libraries.
+ if debug:
+ dlib = (lib + "_d")
+ try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
+ else:
+ try_names = (lib + "_bcpp", lib)
+
+ for dir in dirs:
+ for name in try_names:
+ libfile = os.path.join(dir, self.library_filename(name))
+ if os.path.exists(libfile):
+ return libfile
+ else:
+ # Oops, didn't find it in *any* of 'dirs'
+ return None
+
+ # overwrite the one from CCompiler to support rc and res-files
+ def object_filenames (self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ if output_dir is None: output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ # use normcase to make sure '.rc' is really '.rc' and not '.RC'
+ (base, ext) = os.path.splitext (os.path.normcase(src_name))
+ if ext not in (self.src_extensions + ['.rc','.res']):
+ raise UnknownFileError("unknown file type '%s' (from '%s')" % \
+ (ext, src_name))
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext == '.res':
+ # these can go unchanged
+ obj_names.append (os.path.join (output_dir, base + ext))
+ elif ext == '.rc':
+ # these need to be compiled to .res-files
+ obj_names.append (os.path.join (output_dir, base + '.res'))
+ else:
+ obj_names.append (os.path.join (output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+ # object_filenames ()
+
+ def preprocess (self,
+ source,
+ output_file=None,
+ macros=None,
+ include_dirs=None,
+ extra_preargs=None,
+ extra_postargs=None):
+
+ (_, macros, include_dirs) = \
+ self._fix_compile_args(None, macros, include_dirs)
+ pp_opts = gen_preprocess_options(macros, include_dirs)
+ pp_args = ['cpp32.exe'] + pp_opts
+ if output_file is not None:
+ pp_args.append('-o' + output_file)
+ if extra_preargs:
+ pp_args[:0] = extra_preargs
+ if extra_postargs:
+ pp_args.extend(extra_postargs)
+ pp_args.append(source)
+
+ # We need to preprocess: either we're being forced to, or the
+ # source file is newer than the target (or the target doesn't
+ # exist).
+ if self.force or output_file is None or newer(source, output_file):
+ if output_file:
+ self.mkpath(os.path.dirname(output_file))
+ try:
+ self.spawn(pp_args)
+ except DistutilsExecError as msg:
+ print(msg)
+ raise CompileError(msg)
+
+ # preprocess()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/ccompiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/ccompiler.py
new file mode 100644
index 00000000..b71d1d39
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/ccompiler.py
@@ -0,0 +1,1115 @@
+"""distutils.ccompiler
+
+Contains CCompiler, an abstract base class that defines the interface
+for the Distutils compiler abstraction model."""
+
+import sys, os, re
+from distutils.errors import *
+from distutils.spawn import spawn
+from distutils.file_util import move_file
+from distutils.dir_util import mkpath
+from distutils.dep_util import newer_pairwise, newer_group
+from distutils.util import split_quoted, execute
+from distutils import log
+
+class CCompiler:
+ """Abstract base class to define the interface that must be implemented
+ by real compiler classes. Also has some utility methods used by
+ several compiler classes.
+
+ The basic idea behind a compiler abstraction class is that each
+ instance can be used for all the compile/link steps in building a
+ single project. Thus, attributes common to all of those compile and
+ link steps -- include directories, macros to define, libraries to link
+ against, etc. -- are attributes of the compiler instance. To allow for
+ variability in how individual files are treated, most of those
+ attributes may be varied on a per-compilation or per-link basis.
+ """
+
+ # 'compiler_type' is a class attribute that identifies this class. It
+ # keeps code that wants to know what kind of compiler it's dealing with
+ # from having to import all possible compiler classes just to do an
+ # 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
+ # should really, really be one of the keys of the 'compiler_class'
+ # dictionary (see below -- used by the 'new_compiler()' factory
+ # function) -- authors of new compiler interface classes are
+ # responsible for updating 'compiler_class'!
+ compiler_type = None
+
+ # XXX things not handled by this compiler abstraction model:
+ # * client can't provide additional options for a compiler,
+ # e.g. warning, optimization, debugging flags. Perhaps this
+ # should be the domain of concrete compiler abstraction classes
+ # (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
+ # class should have methods for the common ones.
+ # * can't completely override the include or library searchg
+ # path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
+ # I'm not sure how widely supported this is even by Unix
+ # compilers, much less on other platforms. And I'm even less
+ # sure how useful it is; maybe for cross-compiling, but
+ # support for that is a ways off. (And anyways, cross
+ # compilers probably have a dedicated binary with the
+ # right paths compiled in. I hope.)
+ # * can't do really freaky things with the library list/library
+ # dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
+ # different versions of libfoo.a in different locations. I
+ # think this is useless without the ability to null out the
+ # library search path anyways.
+
+
+ # Subclasses that rely on the standard filename generation methods
+ # implemented below should override these; see the comment near
+ # those methods ('object_filenames()' et. al.) for details:
+ src_extensions = None # list of strings
+ obj_extension = None # string
+ static_lib_extension = None
+ shared_lib_extension = None # string
+ static_lib_format = None # format string
+ shared_lib_format = None # prob. same as static_lib_format
+ exe_extension = None # string
+
+ # Default language settings. language_map is used to detect a source
+ # file or Extension target language, checking source filenames.
+ # language_order is used to detect the language precedence, when deciding
+ # what language to use when mixing source types. For example, if some
+ # extension has two files with ".c" extension, and one with ".cpp", it
+ # is still linked as c++.
+ language_map = {".c" : "c",
+ ".cc" : "c++",
+ ".cpp" : "c++",
+ ".cxx" : "c++",
+ ".m" : "objc",
+ }
+ language_order = ["c++", "objc", "c"]
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ self.dry_run = dry_run
+ self.force = force
+ self.verbose = verbose
+
+ # 'output_dir': a common output directory for object, library,
+ # shared object, and shared library files
+ self.output_dir = None
+
+ # 'macros': a list of macro definitions (or undefinitions). A
+ # macro definition is a 2-tuple (name, value), where the value is
+ # either a string or None (no explicit value). A macro
+ # undefinition is a 1-tuple (name,).
+ self.macros = []
+
+ # 'include_dirs': a list of directories to search for include files
+ self.include_dirs = []
+
+ # 'libraries': a list of libraries to include in any link
+ # (library names, not filenames: eg. "foo" not "libfoo.a")
+ self.libraries = []
+
+ # 'library_dirs': a list of directories to search for libraries
+ self.library_dirs = []
+
+ # 'runtime_library_dirs': a list of directories to search for
+ # shared libraries/objects at runtime
+ self.runtime_library_dirs = []
+
+ # 'objects': a list of object files (or similar, such as explicitly
+ # named library files) to include on any link
+ self.objects = []
+
+ for key in self.executables.keys():
+ self.set_executable(key, self.executables[key])
+
+ def set_executables(self, **kwargs):
+ """Define the executables (and options for them) that will be run
+ to perform the various stages of compilation. The exact set of
+ executables that may be specified here depends on the compiler
+ class (via the 'executables' class attribute), but most will have:
+ compiler the C/C++ compiler
+ linker_so linker used to create shared objects and libraries
+ linker_exe linker used to create binary executables
+ archiver static library creator
+
+ On platforms with a command-line (Unix, DOS/Windows), each of these
+ is a string that will be split into executable name and (optional)
+ list of arguments. (Splitting the string is done similarly to how
+ Unix shells operate: words are delimited by spaces, but quotes and
+ backslashes can override this. See
+ 'distutils.util.split_quoted()'.)
+ """
+
+ # Note that some CCompiler implementation classes will define class
+ # attributes 'cpp', 'cc', etc. with hard-coded executable names;
+ # this is appropriate when a compiler class is for exactly one
+ # compiler/OS combination (eg. MSVCCompiler). Other compiler
+ # classes (UnixCCompiler, in particular) are driven by information
+ # discovered at run-time, since there are many different ways to do
+ # basically the same things with Unix C compilers.
+
+ for key in kwargs:
+ if key not in self.executables:
+ raise ValueError("unknown executable '%s' for class %s" %
+ (key, self.__class__.__name__))
+ self.set_executable(key, kwargs[key])
+
+ def set_executable(self, key, value):
+ if isinstance(value, str):
+ setattr(self, key, split_quoted(value))
+ else:
+ setattr(self, key, value)
+
+ def _find_macro(self, name):
+ i = 0
+ for defn in self.macros:
+ if defn[0] == name:
+ return i
+ i += 1
+ return None
+
+ def _check_macro_definitions(self, definitions):
+ """Ensures that every element of 'definitions' is a valid macro
+ definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
+ nothing if all definitions are OK, raise TypeError otherwise.
+ """
+ for defn in definitions:
+ if not (isinstance(defn, tuple) and
+ (len(defn) in (1, 2) and
+ (isinstance (defn[1], str) or defn[1] is None)) and
+ isinstance (defn[0], str)):
+ raise TypeError(("invalid macro definition '%s': " % defn) + \
+ "must be tuple (string,), (string, string), or " + \
+ "(string, None)")
+
+
+ # -- Bookkeeping methods -------------------------------------------
+
+ def define_macro(self, name, value=None):
+ """Define a preprocessor macro for all compilations driven by this
+ compiler object. The optional parameter 'value' should be a
+ string; if it is not supplied, then the macro will be defined
+ without an explicit value and the exact outcome depends on the
+ compiler used (XXX true? does ANSI say anything about this?)
+ """
+ # Delete from the list of macro definitions/undefinitions if
+ # already there (so that this one will take precedence).
+ i = self._find_macro (name)
+ if i is not None:
+ del self.macros[i]
+
+ self.macros.append((name, value))
+
+ def undefine_macro(self, name):
+ """Undefine a preprocessor macro for all compilations driven by
+ this compiler object. If the same macro is defined by
+ 'define_macro()' and undefined by 'undefine_macro()' the last call
+ takes precedence (including multiple redefinitions or
+ undefinitions). If the macro is redefined/undefined on a
+ per-compilation basis (ie. in the call to 'compile()'), then that
+ takes precedence.
+ """
+ # Delete from the list of macro definitions/undefinitions if
+ # already there (so that this one will take precedence).
+ i = self._find_macro (name)
+ if i is not None:
+ del self.macros[i]
+
+ undefn = (name,)
+ self.macros.append(undefn)
+
+ def add_include_dir(self, dir):
+ """Add 'dir' to the list of directories that will be searched for
+ header files. The compiler is instructed to search directories in
+ the order in which they are supplied by successive calls to
+ 'add_include_dir()'.
+ """
+ self.include_dirs.append(dir)
+
+ def set_include_dirs(self, dirs):
+ """Set the list of directories that will be searched to 'dirs' (a
+ list of strings). Overrides any preceding calls to
+ 'add_include_dir()'; subsequence calls to 'add_include_dir()' add
+ to the list passed to 'set_include_dirs()'. This does not affect
+ any list of standard include directories that the compiler may
+ search by default.
+ """
+ self.include_dirs = dirs[:]
+
+ def add_library(self, libname):
+ """Add 'libname' to the list of libraries that will be included in
+ all links driven by this compiler object. Note that 'libname'
+ should *not* be the name of a file containing a library, but the
+ name of the library itself: the actual filename will be inferred by
+ the linker, the compiler, or the compiler class (depending on the
+ platform).
+
+ The linker will be instructed to link against libraries in the
+ order they were supplied to 'add_library()' and/or
+ 'set_libraries()'. It is perfectly valid to duplicate library
+ names; the linker will be instructed to link against libraries as
+ many times as they are mentioned.
+ """
+ self.libraries.append(libname)
+
+ def set_libraries(self, libnames):
+ """Set the list of libraries to be included in all links driven by
+ this compiler object to 'libnames' (a list of strings). This does
+ not affect any standard system libraries that the linker may
+ include by default.
+ """
+ self.libraries = libnames[:]
+
+ def add_library_dir(self, dir):
+ """Add 'dir' to the list of directories that will be searched for
+ libraries specified to 'add_library()' and 'set_libraries()'. The
+ linker will be instructed to search for libraries in the order they
+ are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
+ """
+ self.library_dirs.append(dir)
+
+ def set_library_dirs(self, dirs):
+ """Set the list of library search directories to 'dirs' (a list of
+ strings). This does not affect any standard library search path
+ that the linker may search by default.
+ """
+ self.library_dirs = dirs[:]
+
+ def add_runtime_library_dir(self, dir):
+ """Add 'dir' to the list of directories that will be searched for
+ shared libraries at runtime.
+ """
+ self.runtime_library_dirs.append(dir)
+
+ def set_runtime_library_dirs(self, dirs):
+ """Set the list of directories to search for shared libraries at
+ runtime to 'dirs' (a list of strings). This does not affect any
+ standard search path that the runtime linker may search by
+ default.
+ """
+ self.runtime_library_dirs = dirs[:]
+
+ def add_link_object(self, object):
+ """Add 'object' to the list of object files (or analogues, such as
+ explicitly named library files or the output of "resource
+ compilers") to be included in every link driven by this compiler
+ object.
+ """
+ self.objects.append(object)
+
+ def set_link_objects(self, objects):
+ """Set the list of object files (or analogues) to be included in
+ every link to 'objects'. This does not affect any standard object
+ files that the linker may include by default (such as system
+ libraries).
+ """
+ self.objects = objects[:]
+
+
+ # -- Private utility methods --------------------------------------
+ # (here for the convenience of subclasses)
+
+ # Helper method to prep compiler in subclass compile() methods
+
+ def _setup_compile(self, outdir, macros, incdirs, sources, depends,
+ extra):
+ """Process arguments and decide which source files to compile."""
+ if outdir is None:
+ outdir = self.output_dir
+ elif not isinstance(outdir, str):
+ raise TypeError("'output_dir' must be a string or None")
+
+ if macros is None:
+ macros = self.macros
+ elif isinstance(macros, list):
+ macros = macros + (self.macros or [])
+ else:
+ raise TypeError("'macros' (if supplied) must be a list of tuples")
+
+ if incdirs is None:
+ incdirs = self.include_dirs
+ elif isinstance(incdirs, (list, tuple)):
+ incdirs = list(incdirs) + (self.include_dirs or [])
+ else:
+ raise TypeError(
+ "'include_dirs' (if supplied) must be a list of strings")
+
+ if extra is None:
+ extra = []
+
+ # Get the list of expected output (object) files
+ objects = self.object_filenames(sources, strip_dir=0,
+ output_dir=outdir)
+ assert len(objects) == len(sources)
+
+ pp_opts = gen_preprocess_options(macros, incdirs)
+
+ build = {}
+ for i in range(len(sources)):
+ src = sources[i]
+ obj = objects[i]
+ ext = os.path.splitext(src)[1]
+ self.mkpath(os.path.dirname(obj))
+ build[obj] = (src, ext)
+
+ return macros, objects, extra, pp_opts, build
+
+ def _get_cc_args(self, pp_opts, debug, before):
+ # works for unixccompiler, cygwinccompiler
+ cc_args = pp_opts + ['-c']
+ if debug:
+ cc_args[:0] = ['-g']
+ if before:
+ cc_args[:0] = before
+ return cc_args
+
+ def _fix_compile_args(self, output_dir, macros, include_dirs):
+ """Typecheck and fix-up some of the arguments to the 'compile()'
+ method, and return fixed-up values. Specifically: if 'output_dir'
+ is None, replaces it with 'self.output_dir'; ensures that 'macros'
+ is a list, and augments it with 'self.macros'; ensures that
+ 'include_dirs' is a list, and augments it with 'self.include_dirs'.
+ Guarantees that the returned values are of the correct type,
+ i.e. for 'output_dir' either string or None, and for 'macros' and
+ 'include_dirs' either list or None.
+ """
+ if output_dir is None:
+ output_dir = self.output_dir
+ elif not isinstance(output_dir, str):
+ raise TypeError("'output_dir' must be a string or None")
+
+ if macros is None:
+ macros = self.macros
+ elif isinstance(macros, list):
+ macros = macros + (self.macros or [])
+ else:
+ raise TypeError("'macros' (if supplied) must be a list of tuples")
+
+ if include_dirs is None:
+ include_dirs = self.include_dirs
+ elif isinstance(include_dirs, (list, tuple)):
+ include_dirs = list(include_dirs) + (self.include_dirs or [])
+ else:
+ raise TypeError(
+ "'include_dirs' (if supplied) must be a list of strings")
+
+ return output_dir, macros, include_dirs
+
+ def _prep_compile(self, sources, output_dir, depends=None):
+ """Decide which souce files must be recompiled.
+
+ Determine the list of object files corresponding to 'sources',
+ and figure out which ones really need to be recompiled.
+ Return a list of all object files and a dictionary telling
+ which source files can be skipped.
+ """
+ # Get the list of expected output (object) files
+ objects = self.object_filenames(sources, output_dir=output_dir)
+ assert len(objects) == len(sources)
+
+ # Return an empty dict for the "which source files can be skipped"
+ # return value to preserve API compatibility.
+ return objects, {}
+
+ def _fix_object_args(self, objects, output_dir):
+ """Typecheck and fix up some arguments supplied to various methods.
+ Specifically: ensure that 'objects' is a list; if output_dir is
+ None, replace with self.output_dir. Return fixed versions of
+ 'objects' and 'output_dir'.
+ """
+ if not isinstance(objects, (list, tuple)):
+ raise TypeError("'objects' must be a list or tuple of strings")
+ objects = list(objects)
+
+ if output_dir is None:
+ output_dir = self.output_dir
+ elif not isinstance(output_dir, str):
+ raise TypeError("'output_dir' must be a string or None")
+
+ return (objects, output_dir)
+
+ def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs):
+ """Typecheck and fix up some of the arguments supplied to the
+ 'link_*' methods. Specifically: ensure that all arguments are
+ lists, and augment them with their permanent versions
+ (eg. 'self.libraries' augments 'libraries'). Return a tuple with
+ fixed versions of all arguments.
+ """
+ if libraries is None:
+ libraries = self.libraries
+ elif isinstance(libraries, (list, tuple)):
+ libraries = list (libraries) + (self.libraries or [])
+ else:
+ raise TypeError(
+ "'libraries' (if supplied) must be a list of strings")
+
+ if library_dirs is None:
+ library_dirs = self.library_dirs
+ elif isinstance(library_dirs, (list, tuple)):
+ library_dirs = list (library_dirs) + (self.library_dirs or [])
+ else:
+ raise TypeError(
+ "'library_dirs' (if supplied) must be a list of strings")
+
+ if runtime_library_dirs is None:
+ runtime_library_dirs = self.runtime_library_dirs
+ elif isinstance(runtime_library_dirs, (list, tuple)):
+ runtime_library_dirs = (list(runtime_library_dirs) +
+ (self.runtime_library_dirs or []))
+ else:
+ raise TypeError("'runtime_library_dirs' (if supplied) "
+ "must be a list of strings")
+
+ return (libraries, library_dirs, runtime_library_dirs)
+
+ def _need_link(self, objects, output_file):
+ """Return true if we need to relink the files listed in 'objects'
+ to recreate 'output_file'.
+ """
+ if self.force:
+ return True
+ else:
+ if self.dry_run:
+ newer = newer_group (objects, output_file, missing='newer')
+ else:
+ newer = newer_group (objects, output_file)
+ return newer
+
+ def detect_language(self, sources):
+ """Detect the language of a given file, or list of files. Uses
+ language_map, and language_order to do the job.
+ """
+ if not isinstance(sources, list):
+ sources = [sources]
+ lang = None
+ index = len(self.language_order)
+ for source in sources:
+ base, ext = os.path.splitext(source)
+ extlang = self.language_map.get(ext)
+ try:
+ extindex = self.language_order.index(extlang)
+ if extindex < index:
+ lang = extlang
+ index = extindex
+ except ValueError:
+ pass
+ return lang
+
+
+ # -- Worker methods ------------------------------------------------
+ # (must be implemented by subclasses)
+
+ def preprocess(self, source, output_file=None, macros=None,
+ include_dirs=None, extra_preargs=None, extra_postargs=None):
+ """Preprocess a single C/C++ source file, named in 'source'.
+ Output will be written to file named 'output_file', or stdout if
+ 'output_file' not supplied. 'macros' is a list of macro
+ definitions as for 'compile()', which will augment the macros set
+ with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
+ list of directory names that will be added to the default list.
+
+ Raises PreprocessError on failure.
+ """
+ pass
+
+ def compile(self, sources, output_dir=None, macros=None,
+ include_dirs=None, debug=0, extra_preargs=None,
+ extra_postargs=None, depends=None):
+ """Compile one or more source files.
+
+ 'sources' must be a list of filenames, most likely C/C++
+ files, but in reality anything that can be handled by a
+ particular compiler and compiler class (eg. MSVCCompiler can
+ handle resource files in 'sources'). Return a list of object
+ filenames, one per source filename in 'sources'. Depending on
+ the implementation, not all source files will necessarily be
+ compiled, but all corresponding object filenames will be
+ returned.
+
+ If 'output_dir' is given, object files will be put under it, while
+ retaining their original path component. That is, "foo/bar.c"
+ normally compiles to "foo/bar.o" (for a Unix implementation); if
+ 'output_dir' is "build", then it would compile to
+ "build/foo/bar.o".
+
+ 'macros', if given, must be a list of macro definitions. A macro
+ definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
+ The former defines a macro; if the value is None, the macro is
+ defined without an explicit value. The 1-tuple case undefines a
+ macro. Later definitions/redefinitions/ undefinitions take
+ precedence.
+
+ 'include_dirs', if given, must be a list of strings, the
+ directories to add to the default include file search path for this
+ compilation only.
+
+ 'debug' is a boolean; if true, the compiler will be instructed to
+ output debug symbols in (or alongside) the object file(s).
+
+ 'extra_preargs' and 'extra_postargs' are implementation- dependent.
+ On platforms that have the notion of a command-line (e.g. Unix,
+ DOS/Windows), they are most likely lists of strings: extra
+ command-line arguments to prepand/append to the compiler command
+ line. On other platforms, consult the implementation class
+ documentation. In any event, they are intended as an escape hatch
+ for those occasions when the abstract compiler framework doesn't
+ cut the mustard.
+
+ 'depends', if given, is a list of filenames that all targets
+ depend on. If a source file is older than any file in
+ depends, then the source file will be recompiled. This
+ supports dependency tracking, but only at a coarse
+ granularity.
+
+ Raises CompileError on failure.
+ """
+ # A concrete compiler class can either override this method
+ # entirely or implement _compile().
+ macros, objects, extra_postargs, pp_opts, build = \
+ self._setup_compile(output_dir, macros, include_dirs, sources,
+ depends, extra_postargs)
+ cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+
+ # Return *all* object filenames, not just the ones we just built.
+ return objects
+
+ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ """Compile 'src' to product 'obj'."""
+ # A concrete compiler class that does not override compile()
+ # should implement _compile().
+ pass
+
+ def create_static_lib(self, objects, output_libname, output_dir=None,
+ debug=0, target_lang=None):
+ """Link a bunch of stuff together to create a static library file.
+ The "bunch of stuff" consists of the list of object files supplied
+ as 'objects', the extra object files supplied to
+ 'add_link_object()' and/or 'set_link_objects()', the libraries
+ supplied to 'add_library()' and/or 'set_libraries()', and the
+ libraries supplied as 'libraries' (if any).
+
+ 'output_libname' should be a library name, not a filename; the
+ filename will be inferred from the library name. 'output_dir' is
+ the directory where the library file will be put.
+
+ 'debug' is a boolean; if true, debugging information will be
+ included in the library (note that on most platforms, it is the
+ compile step where this matters: the 'debug' flag is included here
+ just for consistency).
+
+ 'target_lang' is the target language for which the given objects
+ are being compiled. This allows specific linkage time treatment of
+ certain languages.
+
+ Raises LibError on failure.
+ """
+ pass
+
+
+ # values for target_desc parameter in link()
+ SHARED_OBJECT = "shared_object"
+ SHARED_LIBRARY = "shared_library"
+ EXECUTABLE = "executable"
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+ """Link a bunch of stuff together to create an executable or
+ shared library file.
+
+ The "bunch of stuff" consists of the list of object files supplied
+ as 'objects'. 'output_filename' should be a filename. If
+ 'output_dir' is supplied, 'output_filename' is relative to it
+ (i.e. 'output_filename' can provide directory components if
+ needed).
+
+ 'libraries' is a list of libraries to link against. These are
+ library names, not filenames, since they're translated into
+ filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
+ on Unix and "foo.lib" on DOS/Windows). However, they can include a
+ directory component, which means the linker will look in that
+ specific directory rather than searching all the normal locations.
+
+ 'library_dirs', if supplied, should be a list of directories to
+ search for libraries that were specified as bare library names
+ (ie. no directory component). These are on top of the system
+ default and those supplied to 'add_library_dir()' and/or
+ 'set_library_dirs()'. 'runtime_library_dirs' is a list of
+ directories that will be embedded into the shared library and used
+ to search for other shared libraries that *it* depends on at
+ run-time. (This may only be relevant on Unix.)
+
+ 'export_symbols' is a list of symbols that the shared library will
+ export. (This appears to be relevant only on Windows.)
+
+ 'debug' is as for 'compile()' and 'create_static_lib()', with the
+ slight distinction that it actually matters on most platforms (as
+ opposed to 'create_static_lib()', which includes a 'debug' flag
+ mostly for form's sake).
+
+ 'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
+ of course that they supply command-line arguments for the
+ particular linker being used).
+
+ 'target_lang' is the target language for which the given objects
+ are being compiled. This allows specific linkage time treatment of
+ certain languages.
+
+ Raises LinkError on failure.
+ """
+ raise NotImplementedError
+
+
+ # Old 'link_*()' methods, rewritten to use the new 'link()' method.
+
+ def link_shared_lib(self,
+ objects,
+ output_libname,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+ self.link(CCompiler.SHARED_LIBRARY, objects,
+ self.library_filename(output_libname, lib_type='shared'),
+ output_dir,
+ libraries, library_dirs, runtime_library_dirs,
+ export_symbols, debug,
+ extra_preargs, extra_postargs, build_temp, target_lang)
+
+
+ def link_shared_object(self,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+ self.link(CCompiler.SHARED_OBJECT, objects,
+ output_filename, output_dir,
+ libraries, library_dirs, runtime_library_dirs,
+ export_symbols, debug,
+ extra_preargs, extra_postargs, build_temp, target_lang)
+
+
+ def link_executable(self,
+ objects,
+ output_progname,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ target_lang=None):
+ self.link(CCompiler.EXECUTABLE, objects,
+ self.executable_filename(output_progname), output_dir,
+ libraries, library_dirs, runtime_library_dirs, None,
+ debug, extra_preargs, extra_postargs, None, target_lang)
+
+
+ # -- Miscellaneous methods -----------------------------------------
+ # These are all used by the 'gen_lib_options() function; there is
+ # no appropriate default implementation so subclasses should
+ # implement all of these.
+
+ def library_dir_option(self, dir):
+ """Return the compiler option to add 'dir' to the list of
+ directories searched for libraries.
+ """
+ raise NotImplementedError
+
+ def runtime_library_dir_option(self, dir):
+ """Return the compiler option to add 'dir' to the list of
+ directories searched for runtime libraries.
+ """
+ raise NotImplementedError
+
+ def library_option(self, lib):
+ """Return the compiler option to add 'lib' to the list of libraries
+ linked into the shared library or executable.
+ """
+ raise NotImplementedError
+
+ def has_function(self, funcname, includes=None, include_dirs=None,
+ libraries=None, library_dirs=None):
+ """Return a boolean indicating whether funcname is supported on
+ the current platform. The optional arguments can be used to
+ augment the compilation environment.
+ """
+ # this can't be included at module scope because it tries to
+ # import math which might not be available at that point - maybe
+ # the necessary logic should just be inlined?
+ import tempfile
+ if includes is None:
+ includes = []
+ if include_dirs is None:
+ include_dirs = []
+ if libraries is None:
+ libraries = []
+ if library_dirs is None:
+ library_dirs = []
+ fd, fname = tempfile.mkstemp(".c", funcname, text=True)
+ f = os.fdopen(fd, "w")
+ try:
+ for incl in includes:
+ f.write("""#include "%s"\n""" % incl)
+ f.write("""\
+main (int argc, char **argv) {
+ %s();
+}
+""" % funcname)
+ finally:
+ f.close()
+ try:
+ objects = self.compile([fname], include_dirs=include_dirs)
+ except CompileError:
+ return False
+
+ try:
+ self.link_executable(objects, "a.out",
+ libraries=libraries,
+ library_dirs=library_dirs)
+ except (LinkError, TypeError):
+ return False
+ return True
+
+ def find_library_file (self, dirs, lib, debug=0):
+ """Search the specified list of directories for a static or shared
+ library file 'lib' and return the full path to that file. If
+ 'debug' true, look for a debugging version (if that makes sense on
+ the current platform). Return None if 'lib' wasn't found in any of
+ the specified directories.
+ """
+ raise NotImplementedError
+
+ # -- Filename generation methods -----------------------------------
+
+ # The default implementation of the filename generating methods are
+ # prejudiced towards the Unix/DOS/Windows view of the world:
+ # * object files are named by replacing the source file extension
+ # (eg. .c/.cpp -> .o/.obj)
+ # * library files (shared or static) are named by plugging the
+ # library name and extension into a format string, eg.
+ # "lib%s.%s" % (lib_name, ".a") for Unix static libraries
+ # * executables are named by appending an extension (possibly
+ # empty) to the program name: eg. progname + ".exe" for
+ # Windows
+ #
+ # To reduce redundant code, these methods expect to find
+ # several attributes in the current object (presumably defined
+ # as class attributes):
+ # * src_extensions -
+ # list of C/C++ source file extensions, eg. ['.c', '.cpp']
+ # * obj_extension -
+ # object file extension, eg. '.o' or '.obj'
+ # * static_lib_extension -
+ # extension for static library files, eg. '.a' or '.lib'
+ # * shared_lib_extension -
+ # extension for shared library/object files, eg. '.so', '.dll'
+ # * static_lib_format -
+ # format string for generating static library filenames,
+ # eg. 'lib%s.%s' or '%s.%s'
+ # * shared_lib_format
+ # format string for generating shared library filenames
+ # (probably same as static_lib_format, since the extension
+ # is one of the intended parameters to the format string)
+ # * exe_extension -
+ # extension for executable files, eg. '' or '.exe'
+
+ def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
+ if output_dir is None:
+ output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ base, ext = os.path.splitext(src_name)
+ base = os.path.splitdrive(base)[1] # Chop off the drive
+ base = base[os.path.isabs(base):] # If abs, chop off leading /
+ if ext not in self.src_extensions:
+ raise UnknownFileError(
+ "unknown file type '%s' (from '%s')" % (ext, src_name))
+ if strip_dir:
+ base = os.path.basename(base)
+ obj_names.append(os.path.join(output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+ def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
+ assert output_dir is not None
+ if strip_dir:
+ basename = os.path.basename(basename)
+ return os.path.join(output_dir, basename + self.shared_lib_extension)
+
+ def executable_filename(self, basename, strip_dir=0, output_dir=''):
+ assert output_dir is not None
+ if strip_dir:
+ basename = os.path.basename(basename)
+ return os.path.join(output_dir, basename + (self.exe_extension or ''))
+
+ def library_filename(self, libname, lib_type='static', # or 'shared'
+ strip_dir=0, output_dir=''):
+ assert output_dir is not None
+ if lib_type not in ("static", "shared", "dylib", "xcode_stub"):
+ raise ValueError(
+ "'lib_type' must be \"static\", \"shared\", \"dylib\", or \"xcode_stub\"")
+ fmt = getattr(self, lib_type + "_lib_format")
+ ext = getattr(self, lib_type + "_lib_extension")
+
+ dir, base = os.path.split(libname)
+ filename = fmt % (base, ext)
+ if strip_dir:
+ dir = ''
+
+ return os.path.join(output_dir, dir, filename)
+
+
+ # -- Utility methods -----------------------------------------------
+
+ def announce(self, msg, level=1):
+ log.debug(msg)
+
+ def debug_print(self, msg):
+ from distutils.debug import DEBUG
+ if DEBUG:
+ print(msg)
+
+ def warn(self, msg):
+ sys.stderr.write("warning: %s\n" % msg)
+
+ def execute(self, func, args, msg=None, level=1):
+ execute(func, args, msg, self.dry_run)
+
+ def spawn(self, cmd):
+ spawn(cmd, dry_run=self.dry_run)
+
+ def move_file(self, src, dst):
+ return move_file(src, dst, dry_run=self.dry_run)
+
+ def mkpath (self, name, mode=0o777):
+ mkpath(name, mode, dry_run=self.dry_run)
+
+
+# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
+# type for that platform. Keys are interpreted as re match
+# patterns. Order is important; platform mappings are preferred over
+# OS names.
+_default_compilers = (
+
+ # Platform string mappings
+
+ # on a cygwin built python we can use gcc like an ordinary UNIXish
+ # compiler
+ ('cygwin.*', 'unix'),
+
+ # OS name mappings
+ ('posix', 'unix'),
+ ('nt', 'msvc'),
+
+ )
+
+def get_default_compiler(osname=None, platform=None):
+ """Determine the default compiler to use for the given platform.
+
+ osname should be one of the standard Python OS names (i.e. the
+ ones returned by os.name) and platform the common value
+ returned by sys.platform for the platform in question.
+
+ The default values are os.name and sys.platform in case the
+ parameters are not given.
+ """
+ if osname is None:
+ osname = os.name
+ if platform is None:
+ platform = sys.platform
+ for pattern, compiler in _default_compilers:
+ if re.match(pattern, platform) is not None or \
+ re.match(pattern, osname) is not None:
+ return compiler
+ # Default to Unix compiler
+ return 'unix'
+
+# Map compiler types to (module_name, class_name) pairs -- ie. where to
+# find the code that implements an interface to this compiler. (The module
+# is assumed to be in the 'distutils' package.)
+compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
+ "standard UNIX-style compiler"),
+ 'msvc': ('_msvccompiler', 'MSVCCompiler',
+ "Microsoft Visual C++"),
+ 'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
+ "Cygwin port of GNU C Compiler for Win32"),
+ 'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
+ "Mingw32 port of GNU C Compiler for Win32"),
+ 'bcpp': ('bcppcompiler', 'BCPPCompiler',
+ "Borland C++ Compiler"),
+ }
+
+def show_compilers():
+ """Print list of available compilers (used by the "--help-compiler"
+ options to "build", "build_ext", "build_clib").
+ """
+ # XXX this "knows" that the compiler option it's describing is
+ # "--compiler", which just happens to be the case for the three
+ # commands that use it.
+ from distutils.fancy_getopt import FancyGetopt
+ compilers = []
+ for compiler in compiler_class.keys():
+ compilers.append(("compiler="+compiler, None,
+ compiler_class[compiler][2]))
+ compilers.sort()
+ pretty_printer = FancyGetopt(compilers)
+ pretty_printer.print_help("List of available compilers:")
+
+
+def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0):
+ """Generate an instance of some CCompiler subclass for the supplied
+ platform/compiler combination. 'plat' defaults to 'os.name'
+ (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
+ for that platform. Currently only 'posix' and 'nt' are supported, and
+ the default compilers are "traditional Unix interface" (UnixCCompiler
+ class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
+ possible to ask for a Unix compiler object under Windows, and a
+ Microsoft compiler object under Unix -- if you supply a value for
+ 'compiler', 'plat' is ignored.
+ """
+ if plat is None:
+ plat = os.name
+
+ try:
+ if compiler is None:
+ compiler = get_default_compiler(plat)
+
+ (module_name, class_name, long_description) = compiler_class[compiler]
+ except KeyError:
+ msg = "don't know how to compile C/C++ code on platform '%s'" % plat
+ if compiler is not None:
+ msg = msg + " with '%s' compiler" % compiler
+ raise DistutilsPlatformError(msg)
+
+ try:
+ module_name = "distutils." + module_name
+ __import__ (module_name)
+ module = sys.modules[module_name]
+ klass = vars(module)[class_name]
+ except ImportError:
+ raise DistutilsModuleError(
+ "can't compile C/C++ code: unable to load module '%s'" % \
+ module_name)
+ except KeyError:
+ raise DistutilsModuleError(
+ "can't compile C/C++ code: unable to find class '%s' "
+ "in module '%s'" % (class_name, module_name))
+
+ # XXX The None is necessary to preserve backwards compatibility
+ # with classes that expect verbose to be the first positional
+ # argument.
+ return klass(None, dry_run, force)
+
+
+def gen_preprocess_options(macros, include_dirs):
+ """Generate C pre-processor options (-D, -U, -I) as used by at least
+ two types of compilers: the typical Unix compiler and Visual C++.
+ 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
+ means undefine (-U) macro 'name', and (name,value) means define (-D)
+ macro 'name' to 'value'. 'include_dirs' is just a list of directory
+ names to be added to the header file search path (-I). Returns a list
+ of command-line options suitable for either Unix compilers or Visual
+ C++.
+ """
+ # XXX it would be nice (mainly aesthetic, and so we don't generate
+ # stupid-looking command lines) to go over 'macros' and eliminate
+ # redundant definitions/undefinitions (ie. ensure that only the
+ # latest mention of a particular macro winds up on the command
+ # line). I don't think it's essential, though, since most (all?)
+ # Unix C compilers only pay attention to the latest -D or -U
+ # mention of a macro on their command line. Similar situation for
+ # 'include_dirs'. I'm punting on both for now. Anyways, weeding out
+ # redundancies like this should probably be the province of
+ # CCompiler, since the data structures used are inherited from it
+ # and therefore common to all CCompiler classes.
+ pp_opts = []
+ for macro in macros:
+ if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2):
+ raise TypeError(
+ "bad macro definition '%s': "
+ "each element of 'macros' list must be a 1- or 2-tuple"
+ % macro)
+
+ if len(macro) == 1: # undefine this macro
+ pp_opts.append("-U%s" % macro[0])
+ elif len(macro) == 2:
+ if macro[1] is None: # define with no explicit value
+ pp_opts.append("-D%s" % macro[0])
+ else:
+ # XXX *don't* need to be clever about quoting the
+ # macro value here, because we're going to avoid the
+ # shell at all costs when we spawn the command!
+ pp_opts.append("-D%s=%s" % macro)
+
+ for dir in include_dirs:
+ pp_opts.append("-I%s" % dir)
+ return pp_opts
+
+
+def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
+ """Generate linker options for searching library directories and
+ linking with specific libraries. 'libraries' and 'library_dirs' are,
+ respectively, lists of library names (not filenames!) and search
+ directories. Returns a list of command-line options suitable for use
+ with some compiler (depending on the two format strings passed in).
+ """
+ lib_opts = []
+
+ for dir in library_dirs:
+ lib_opts.append(compiler.library_dir_option(dir))
+
+ for dir in runtime_library_dirs:
+ opt = compiler.runtime_library_dir_option(dir)
+ if isinstance(opt, list):
+ lib_opts = lib_opts + opt
+ else:
+ lib_opts.append(opt)
+
+ # XXX it's important that we *not* remove redundant library mentions!
+ # sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
+ # resolve all symbols. I just hope we never have to say "-lfoo obj.o
+ # -lbar" to get things to work -- that's certainly a possibility, but a
+ # pretty nasty way to arrange your C code.
+
+ for lib in libraries:
+ (lib_dir, lib_name) = os.path.split(lib)
+ if lib_dir:
+ lib_file = compiler.find_library_file([lib_dir], lib_name)
+ if lib_file:
+ lib_opts.append(lib_file)
+ else:
+ compiler.warn("no library file corresponding to "
+ "'%s' found (skipping)" % lib)
+ else:
+ lib_opts.append(compiler.library_option (lib))
+ return lib_opts
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/cmd.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/cmd.py
new file mode 100644
index 00000000..dba3191e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/cmd.py
@@ -0,0 +1,403 @@
+"""distutils.cmd
+
+Provides the Command class, the base class for the command classes
+in the distutils.command package.
+"""
+
+import sys, os, re
+from distutils.errors import DistutilsOptionError
+from distutils import util, dir_util, file_util, archive_util, dep_util
+from distutils import log
+
+class Command:
+ """Abstract base class for defining command classes, the "worker bees"
+ of the Distutils. A useful analogy for command classes is to think of
+ them as subroutines with local variables called "options". The options
+ are "declared" in 'initialize_options()' and "defined" (given their
+ final values, aka "finalized") in 'finalize_options()', both of which
+ must be defined by every command class. The distinction between the
+ two is necessary because option values might come from the outside
+ world (command line, config file, ...), and any options dependent on
+ other options must be computed *after* these outside influences have
+ been processed -- hence 'finalize_options()'. The "body" of the
+ subroutine, where it does all its work based on the values of its
+ options, is the 'run()' method, which must also be implemented by every
+ command class.
+ """
+
+ # 'sub_commands' formalizes the notion of a "family" of commands,
+ # eg. "install" as the parent with sub-commands "install_lib",
+ # "install_headers", etc. The parent of a family of commands
+ # defines 'sub_commands' as a class attribute; it's a list of
+ # (command_name : string, predicate : unbound_method | string | None)
+ # tuples, where 'predicate' is a method of the parent command that
+ # determines whether the corresponding command is applicable in the
+ # current situation. (Eg. we "install_headers" is only applicable if
+ # we have any C header files to install.) If 'predicate' is None,
+ # that command is always applicable.
+ #
+ # 'sub_commands' is usually defined at the *end* of a class, because
+ # predicates can be unbound methods, so they must already have been
+ # defined. The canonical example is the "install" command.
+ sub_commands = []
+
+
+ # -- Creation/initialization methods -------------------------------
+
+ def __init__(self, dist):
+ """Create and initialize a new Command object. Most importantly,
+ invokes the 'initialize_options()' method, which is the real
+ initializer and depends on the actual command being
+ instantiated.
+ """
+ # late import because of mutual dependence between these classes
+ from distutils.dist import Distribution
+
+ if not isinstance(dist, Distribution):
+ raise TypeError("dist must be a Distribution instance")
+ if self.__class__ is Command:
+ raise RuntimeError("Command is an abstract class")
+
+ self.distribution = dist
+ self.initialize_options()
+
+ # Per-command versions of the global flags, so that the user can
+ # customize Distutils' behaviour command-by-command and let some
+ # commands fall back on the Distribution's behaviour. None means
+ # "not defined, check self.distribution's copy", while 0 or 1 mean
+ # false and true (duh). Note that this means figuring out the real
+ # value of each flag is a touch complicated -- hence "self._dry_run"
+ # will be handled by __getattr__, below.
+ # XXX This needs to be fixed.
+ self._dry_run = None
+
+ # verbose is largely ignored, but needs to be set for
+ # backwards compatibility (I think)?
+ self.verbose = dist.verbose
+
+ # Some commands define a 'self.force' option to ignore file
+ # timestamps, but methods defined *here* assume that
+ # 'self.force' exists for all commands. So define it here
+ # just to be safe.
+ self.force = None
+
+ # The 'help' flag is just used for command-line parsing, so
+ # none of that complicated bureaucracy is needed.
+ self.help = 0
+
+ # 'finalized' records whether or not 'finalize_options()' has been
+ # called. 'finalize_options()' itself should not pay attention to
+ # this flag: it is the business of 'ensure_finalized()', which
+ # always calls 'finalize_options()', to respect/update it.
+ self.finalized = 0
+
+ # XXX A more explicit way to customize dry_run would be better.
+ def __getattr__(self, attr):
+ if attr == 'dry_run':
+ myval = getattr(self, "_" + attr)
+ if myval is None:
+ return getattr(self.distribution, attr)
+ else:
+ return myval
+ else:
+ raise AttributeError(attr)
+
+ def ensure_finalized(self):
+ if not self.finalized:
+ self.finalize_options()
+ self.finalized = 1
+
+ # Subclasses must define:
+ # initialize_options()
+ # provide default values for all options; may be customized by
+ # setup script, by options from config file(s), or by command-line
+ # options
+ # finalize_options()
+ # decide on the final values for all options; this is called
+ # after all possible intervention from the outside world
+ # (command-line, option file, etc.) has been processed
+ # run()
+ # run the command: do whatever it is we're here to do,
+ # controlled by the command's various option values
+
+ def initialize_options(self):
+ """Set default values for all the options that this command
+ supports. Note that these defaults may be overridden by other
+ commands, by the setup script, by config files, or by the
+ command-line. Thus, this is not the place to code dependencies
+ between options; generally, 'initialize_options()' implementations
+ are just a bunch of "self.foo = None" assignments.
+
+ This method must be implemented by all command classes.
+ """
+ raise RuntimeError("abstract method -- subclass %s must override"
+ % self.__class__)
+
+ def finalize_options(self):
+ """Set final values for all the options that this command supports.
+ This is always called as late as possible, ie. after any option
+ assignments from the command-line or from other commands have been
+ done. Thus, this is the place to code option dependencies: if
+ 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
+ long as 'foo' still has the same value it was assigned in
+ 'initialize_options()'.
+
+ This method must be implemented by all command classes.
+ """
+ raise RuntimeError("abstract method -- subclass %s must override"
+ % self.__class__)
+
+
+ def dump_options(self, header=None, indent=""):
+ from distutils.fancy_getopt import longopt_xlate
+ if header is None:
+ header = "command options for '%s':" % self.get_command_name()
+ self.announce(indent + header, level=log.INFO)
+ indent = indent + " "
+ for (option, _, _) in self.user_options:
+ option = option.translate(longopt_xlate)
+ if option[-1] == "=":
+ option = option[:-1]
+ value = getattr(self, option)
+ self.announce(indent + "%s = %s" % (option, value),
+ level=log.INFO)
+
+ def run(self):
+ """A command's raison d'etre: carry out the action it exists to
+ perform, controlled by the options initialized in
+ 'initialize_options()', customized by other commands, the setup
+ script, the command-line, and config files, and finalized in
+ 'finalize_options()'. All terminal output and filesystem
+ interaction should be done by 'run()'.
+
+ This method must be implemented by all command classes.
+ """
+ raise RuntimeError("abstract method -- subclass %s must override"
+ % self.__class__)
+
+ def announce(self, msg, level=1):
+ """If the current verbosity level is of greater than or equal to
+ 'level' print 'msg' to stdout.
+ """
+ log.log(level, msg)
+
+ def debug_print(self, msg):
+ """Print 'msg' to stdout if the global DEBUG (taken from the
+ DISTUTILS_DEBUG environment variable) flag is true.
+ """
+ from distutils.debug import DEBUG
+ if DEBUG:
+ print(msg)
+ sys.stdout.flush()
+
+
+ # -- Option validation methods -------------------------------------
+ # (these are very handy in writing the 'finalize_options()' method)
+ #
+ # NB. the general philosophy here is to ensure that a particular option
+ # value meets certain type and value constraints. If not, we try to
+ # force it into conformance (eg. if we expect a list but have a string,
+ # split the string on comma and/or whitespace). If we can't force the
+ # option into conformance, raise DistutilsOptionError. Thus, command
+ # classes need do nothing more than (eg.)
+ # self.ensure_string_list('foo')
+ # and they can be guaranteed that thereafter, self.foo will be
+ # a list of strings.
+
+ def _ensure_stringlike(self, option, what, default=None):
+ val = getattr(self, option)
+ if val is None:
+ setattr(self, option, default)
+ return default
+ elif not isinstance(val, str):
+ raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
+ % (option, what, val))
+ return val
+
+ def ensure_string(self, option, default=None):
+ """Ensure that 'option' is a string; if not defined, set it to
+ 'default'.
+ """
+ self._ensure_stringlike(option, "string", default)
+
+ def ensure_string_list(self, option):
+ r"""Ensure that 'option' is a list of strings. If 'option' is
+ currently a string, we split it either on /,\s*/ or /\s+/, so
+ "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
+ ["foo", "bar", "baz"].
+ """
+ val = getattr(self, option)
+ if val is None:
+ return
+ elif isinstance(val, str):
+ setattr(self, option, re.split(r',\s*|\s+', val))
+ else:
+ if isinstance(val, list):
+ ok = all(isinstance(v, str) for v in val)
+ else:
+ ok = False
+ if not ok:
+ raise DistutilsOptionError(
+ "'%s' must be a list of strings (got %r)"
+ % (option, val))
+
+ def _ensure_tested_string(self, option, tester, what, error_fmt,
+ default=None):
+ val = self._ensure_stringlike(option, what, default)
+ if val is not None and not tester(val):
+ raise DistutilsOptionError(("error in '%s' option: " + error_fmt)
+ % (option, val))
+
+ def ensure_filename(self, option):
+ """Ensure that 'option' is the name of an existing file."""
+ self._ensure_tested_string(option, os.path.isfile,
+ "filename",
+ "'%s' does not exist or is not a file")
+
+ def ensure_dirname(self, option):
+ self._ensure_tested_string(option, os.path.isdir,
+ "directory name",
+ "'%s' does not exist or is not a directory")
+
+
+ # -- Convenience methods for commands ------------------------------
+
+ def get_command_name(self):
+ if hasattr(self, 'command_name'):
+ return self.command_name
+ else:
+ return self.__class__.__name__
+
+ def set_undefined_options(self, src_cmd, *option_pairs):
+ """Set the values of any "undefined" options from corresponding
+ option values in some other command object. "Undefined" here means
+ "is None", which is the convention used to indicate that an option
+ has not been changed between 'initialize_options()' and
+ 'finalize_options()'. Usually called from 'finalize_options()' for
+ options that depend on some other command rather than another
+ option of the same command. 'src_cmd' is the other command from
+ which option values will be taken (a command object will be created
+ for it if necessary); the remaining arguments are
+ '(src_option,dst_option)' tuples which mean "take the value of
+ 'src_option' in the 'src_cmd' command object, and copy it to
+ 'dst_option' in the current command object".
+ """
+ # Option_pairs: list of (src_option, dst_option) tuples
+ src_cmd_obj = self.distribution.get_command_obj(src_cmd)
+ src_cmd_obj.ensure_finalized()
+ for (src_option, dst_option) in option_pairs:
+ if getattr(self, dst_option) is None:
+ setattr(self, dst_option, getattr(src_cmd_obj, src_option))
+
+ def get_finalized_command(self, command, create=1):
+ """Wrapper around Distribution's 'get_command_obj()' method: find
+ (create if necessary and 'create' is true) the command object for
+ 'command', call its 'ensure_finalized()' method, and return the
+ finalized command object.
+ """
+ cmd_obj = self.distribution.get_command_obj(command, create)
+ cmd_obj.ensure_finalized()
+ return cmd_obj
+
+ # XXX rename to 'get_reinitialized_command()'? (should do the
+ # same in dist.py, if so)
+ def reinitialize_command(self, command, reinit_subcommands=0):
+ return self.distribution.reinitialize_command(command,
+ reinit_subcommands)
+
+ def run_command(self, command):
+ """Run some other command: uses the 'run_command()' method of
+ Distribution, which creates and finalizes the command object if
+ necessary and then invokes its 'run()' method.
+ """
+ self.distribution.run_command(command)
+
+ def get_sub_commands(self):
+ """Determine the sub-commands that are relevant in the current
+ distribution (ie., that need to be run). This is based on the
+ 'sub_commands' class attribute: each tuple in that list may include
+ a method that we call to determine if the subcommand needs to be
+ run for the current distribution. Return a list of command names.
+ """
+ commands = []
+ for (cmd_name, method) in self.sub_commands:
+ if method is None or method(self):
+ commands.append(cmd_name)
+ return commands
+
+
+ # -- External world manipulation -----------------------------------
+
+ def warn(self, msg):
+ log.warn("warning: %s: %s\n", self.get_command_name(), msg)
+
+ def execute(self, func, args, msg=None, level=1):
+ util.execute(func, args, msg, dry_run=self.dry_run)
+
+ def mkpath(self, name, mode=0o777):
+ dir_util.mkpath(name, mode, dry_run=self.dry_run)
+
+ def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1,
+ link=None, level=1):
+ """Copy a file respecting verbose, dry-run and force flags. (The
+ former two default to whatever is in the Distribution object, and
+ the latter defaults to false for commands that don't define it.)"""
+ return file_util.copy_file(infile, outfile, preserve_mode,
+ preserve_times, not self.force, link,
+ dry_run=self.dry_run)
+
+ def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1,
+ preserve_symlinks=0, level=1):
+ """Copy an entire directory tree respecting verbose, dry-run,
+ and force flags.
+ """
+ return dir_util.copy_tree(infile, outfile, preserve_mode,
+ preserve_times, preserve_symlinks,
+ not self.force, dry_run=self.dry_run)
+
+ def move_file (self, src, dst, level=1):
+ """Move a file respecting dry-run flag."""
+ return file_util.move_file(src, dst, dry_run=self.dry_run)
+
+ def spawn(self, cmd, search_path=1, level=1):
+ """Spawn an external command respecting dry-run flag."""
+ from distutils.spawn import spawn
+ spawn(cmd, search_path, dry_run=self.dry_run)
+
+ def make_archive(self, base_name, format, root_dir=None, base_dir=None,
+ owner=None, group=None):
+ return archive_util.make_archive(base_name, format, root_dir, base_dir,
+ dry_run=self.dry_run,
+ owner=owner, group=group)
+
+ def make_file(self, infiles, outfile, func, args,
+ exec_msg=None, skip_msg=None, level=1):
+ """Special case of 'execute()' for operations that process one or
+ more input files and generate one output file. Works just like
+ 'execute()', except the operation is skipped and a different
+ message printed if 'outfile' already exists and is newer than all
+ files listed in 'infiles'. If the command defined 'self.force',
+ and it is true, then the command is unconditionally run -- does no
+ timestamp checks.
+ """
+ if skip_msg is None:
+ skip_msg = "skipping %s (inputs unchanged)" % outfile
+
+ # Allow 'infiles' to be a single string
+ if isinstance(infiles, str):
+ infiles = (infiles,)
+ elif not isinstance(infiles, (list, tuple)):
+ raise TypeError(
+ "'infiles' must be a string, or a list or tuple of strings")
+
+ if exec_msg is None:
+ exec_msg = "generating %s from %s" % (outfile, ', '.join(infiles))
+
+ # If 'outfile' must be regenerated (either because it doesn't
+ # exist, is out-of-date, or the 'force' flag is true) then
+ # perform the action that presumably regenerates it
+ if self.force or dep_util.newer_group(infiles, outfile):
+ self.execute(func, args, exec_msg, level)
+ # Otherwise, print the "skip" message
+ else:
+ log.debug(skip_msg)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__init__.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__init__.py
new file mode 100644
index 00000000..481eea9f
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__init__.py
@@ -0,0 +1,31 @@
+"""distutils.command
+
+Package containing implementation of all the standard Distutils
+commands."""
+
+__all__ = ['build',
+ 'build_py',
+ 'build_ext',
+ 'build_clib',
+ 'build_scripts',
+ 'clean',
+ 'install',
+ 'install_lib',
+ 'install_headers',
+ 'install_scripts',
+ 'install_data',
+ 'sdist',
+ 'register',
+ 'bdist',
+ 'bdist_dumb',
+ 'bdist_rpm',
+ 'bdist_wininst',
+ 'check',
+ 'upload',
+ # These two are reserved for future use:
+ #'bdist_sdux',
+ #'bdist_pkgtool',
+ # Note:
+ # bdist_packager is not included because it only provides
+ # an abstract base class
+ ]
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/__init__.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 00000000..0884ad95
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/bdist.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/bdist.cpython-37.pyc
new file mode 100644
index 00000000..6e2a6cfd
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/bdist.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build.cpython-37.pyc
new file mode 100644
index 00000000..fe09c6ab
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build_ext.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build_ext.cpython-37.pyc
new file mode 100644
index 00000000..ef483376
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build_ext.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build_py.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build_py.cpython-37.pyc
new file mode 100644
index 00000000..24257776
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build_py.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build_scripts.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build_scripts.cpython-37.pyc
new file mode 100644
index 00000000..bc85237f
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/build_scripts.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/install.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/install.cpython-37.pyc
new file mode 100644
index 00000000..f9c09ba0
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/install.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/install_lib.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/install_lib.cpython-37.pyc
new file mode 100644
index 00000000..3f6e9f8d
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/install_lib.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/install_scripts.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/install_scripts.cpython-37.pyc
new file mode 100644
index 00000000..83be4d04
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/install_scripts.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/sdist.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/sdist.cpython-37.pyc
new file mode 100644
index 00000000..c2a5d7f1
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/__pycache__/sdist.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist.py
new file mode 100644
index 00000000..014871d2
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist.py
@@ -0,0 +1,143 @@
+"""distutils.command.bdist
+
+Implements the Distutils 'bdist' command (create a built [binary]
+distribution)."""
+
+import os
+from distutils.core import Command
+from distutils.errors import *
+from distutils.util import get_platform
+
+
+def show_formats():
+ """Print list of available formats (arguments to "--format" option).
+ """
+ from distutils.fancy_getopt import FancyGetopt
+ formats = []
+ for format in bdist.format_commands:
+ formats.append(("formats=" + format, None,
+ bdist.format_command[format][1]))
+ pretty_printer = FancyGetopt(formats)
+ pretty_printer.print_help("List of available distribution formats:")
+
+
+class bdist(Command):
+
+ description = "create a built (binary) distribution"
+
+ user_options = [('bdist-base=', 'b',
+ "temporary directory for creating built distributions"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_platform()),
+ ('formats=', None,
+ "formats for distribution (comma-separated list)"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in "
+ "[default: dist]"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ('owner=', 'u',
+ "Owner name used when creating a tar file"
+ " [default: current user]"),
+ ('group=', 'g',
+ "Group name used when creating a tar file"
+ " [default: current group]"),
+ ]
+
+ boolean_options = ['skip-build']
+
+ help_options = [
+ ('help-formats', None,
+ "lists available distribution formats", show_formats),
+ ]
+
+ # The following commands do not take a format option from bdist
+ no_format_option = ('bdist_rpm',)
+
+ # This won't do in reality: will need to distinguish RPM-ish Linux,
+ # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
+ default_format = {'posix': 'gztar',
+ 'nt': 'zip'}
+
+ # Establish the preferred order (for the --help-formats option).
+ format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar',
+ 'wininst', 'zip', 'msi']
+
+ # And the real information.
+ format_command = {'rpm': ('bdist_rpm', "RPM distribution"),
+ 'gztar': ('bdist_dumb', "gzip'ed tar file"),
+ 'bztar': ('bdist_dumb', "bzip2'ed tar file"),
+ 'xztar': ('bdist_dumb', "xz'ed tar file"),
+ 'ztar': ('bdist_dumb', "compressed tar file"),
+ 'tar': ('bdist_dumb', "tar file"),
+ 'wininst': ('bdist_wininst',
+ "Windows executable installer"),
+ 'zip': ('bdist_dumb', "ZIP file"),
+ 'msi': ('bdist_msi', "Microsoft Installer")
+ }
+
+
+ def initialize_options(self):
+ self.bdist_base = None
+ self.plat_name = None
+ self.formats = None
+ self.dist_dir = None
+ self.skip_build = 0
+ self.group = None
+ self.owner = None
+
+ def finalize_options(self):
+ # have to finalize 'plat_name' before 'bdist_base'
+ if self.plat_name is None:
+ if self.skip_build:
+ self.plat_name = get_platform()
+ else:
+ self.plat_name = self.get_finalized_command('build').plat_name
+
+ # 'bdist_base' -- parent of per-built-distribution-format
+ # temporary directories (eg. we'll probably have
+ # "build/bdist./dumb", "build/bdist./rpm", etc.)
+ if self.bdist_base is None:
+ build_base = self.get_finalized_command('build').build_base
+ self.bdist_base = os.path.join(build_base,
+ 'bdist.' + self.plat_name)
+
+ self.ensure_string_list('formats')
+ if self.formats is None:
+ try:
+ self.formats = [self.default_format[os.name]]
+ except KeyError:
+ raise DistutilsPlatformError(
+ "don't know how to create built distributions "
+ "on platform %s" % os.name)
+
+ if self.dist_dir is None:
+ self.dist_dir = "dist"
+
+ def run(self):
+ # Figure out which sub-commands we need to run.
+ commands = []
+ for format in self.formats:
+ try:
+ commands.append(self.format_command[format][0])
+ except KeyError:
+ raise DistutilsOptionError("invalid format '%s'" % format)
+
+ # Reinitialize and run each command.
+ for i in range(len(self.formats)):
+ cmd_name = commands[i]
+ sub_cmd = self.reinitialize_command(cmd_name)
+ if cmd_name not in self.no_format_option:
+ sub_cmd.format = self.formats[i]
+
+ # passing the owner and group names for tar archiving
+ if cmd_name == 'bdist_dumb':
+ sub_cmd.owner = self.owner
+ sub_cmd.group = self.group
+
+ # If we're going to need to run this command again, tell it to
+ # keep its temporary files around so subsequent runs go faster.
+ if cmd_name in commands[i+1:]:
+ sub_cmd.keep_temp = 1
+ self.run_command(cmd_name)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_dumb.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_dumb.py
new file mode 100644
index 00000000..f0d6b5b8
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_dumb.py
@@ -0,0 +1,123 @@
+"""distutils.command.bdist_dumb
+
+Implements the Distutils 'bdist_dumb' command (create a "dumb" built
+distribution -- i.e., just an archive to be unpacked under $prefix or
+$exec_prefix)."""
+
+import os
+from distutils.core import Command
+from distutils.util import get_platform
+from distutils.dir_util import remove_tree, ensure_relative
+from distutils.errors import *
+from distutils.sysconfig import get_python_version
+from distutils import log
+
+class bdist_dumb(Command):
+
+ description = "create a \"dumb\" built distribution"
+
+ user_options = [('bdist-dir=', 'd',
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_platform()),
+ ('format=', 'f',
+ "archive format to create (tar, gztar, bztar, xztar, "
+ "ztar, zip)"),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ('relative', None,
+ "build the archive using relative paths "
+ "(default: false)"),
+ ('owner=', 'u',
+ "Owner name used when creating a tar file"
+ " [default: current user]"),
+ ('group=', 'g',
+ "Group name used when creating a tar file"
+ " [default: current group]"),
+ ]
+
+ boolean_options = ['keep-temp', 'skip-build', 'relative']
+
+ default_format = { 'posix': 'gztar',
+ 'nt': 'zip' }
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.plat_name = None
+ self.format = None
+ self.keep_temp = 0
+ self.dist_dir = None
+ self.skip_build = None
+ self.relative = 0
+ self.owner = None
+ self.group = None
+
+ def finalize_options(self):
+ if self.bdist_dir is None:
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'dumb')
+
+ if self.format is None:
+ try:
+ self.format = self.default_format[os.name]
+ except KeyError:
+ raise DistutilsPlatformError(
+ "don't know how to create dumb built distributions "
+ "on platform %s" % os.name)
+
+ self.set_undefined_options('bdist',
+ ('dist_dir', 'dist_dir'),
+ ('plat_name', 'plat_name'),
+ ('skip_build', 'skip_build'))
+
+ def run(self):
+ if not self.skip_build:
+ self.run_command('build')
+
+ install = self.reinitialize_command('install', reinit_subcommands=1)
+ install.root = self.bdist_dir
+ install.skip_build = self.skip_build
+ install.warn_dir = 0
+
+ log.info("installing to %s", self.bdist_dir)
+ self.run_command('install')
+
+ # And make an archive relative to the root of the
+ # pseudo-installation tree.
+ archive_basename = "%s.%s" % (self.distribution.get_fullname(),
+ self.plat_name)
+
+ pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
+ if not self.relative:
+ archive_root = self.bdist_dir
+ else:
+ if (self.distribution.has_ext_modules() and
+ (install.install_base != install.install_platbase)):
+ raise DistutilsPlatformError(
+ "can't make a dumb built distribution where "
+ "base and platbase are different (%s, %s)"
+ % (repr(install.install_base),
+ repr(install.install_platbase)))
+ else:
+ archive_root = os.path.join(self.bdist_dir,
+ ensure_relative(install.install_base))
+
+ # Make the archive
+ filename = self.make_archive(pseudoinstall_root,
+ self.format, root_dir=archive_root,
+ owner=self.owner, group=self.group)
+ if self.distribution.has_ext_modules():
+ pyversion = get_python_version()
+ else:
+ pyversion = 'any'
+ self.distribution.dist_files.append(('bdist_dumb', pyversion,
+ filename))
+
+ if not self.keep_temp:
+ remove_tree(self.bdist_dir, dry_run=self.dry_run)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_msi.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_msi.py
new file mode 100644
index 00000000..80104c37
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_msi.py
@@ -0,0 +1,741 @@
+# Copyright (C) 2005, 2006 Martin von Löwis
+# Licensed to PSF under a Contributor Agreement.
+# The bdist_wininst command proper
+# based on bdist_wininst
+"""
+Implements the bdist_msi command.
+"""
+
+import sys, os
+from distutils.core import Command
+from distutils.dir_util import remove_tree
+from distutils.sysconfig import get_python_version
+from distutils.version import StrictVersion
+from distutils.errors import DistutilsOptionError
+from distutils.util import get_platform
+from distutils import log
+import msilib
+from msilib import schema, sequence, text
+from msilib import Directory, Feature, Dialog, add_data
+
+class PyDialog(Dialog):
+ """Dialog class with a fixed layout: controls at the top, then a ruler,
+ then a list of buttons: back, next, cancel. Optionally a bitmap at the
+ left."""
+ def __init__(self, *args, **kw):
+ """Dialog(database, name, x, y, w, h, attributes, title, first,
+ default, cancel, bitmap=true)"""
+ Dialog.__init__(self, *args)
+ ruler = self.h - 36
+ bmwidth = 152*ruler/328
+ #if kw.get("bitmap", True):
+ # self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
+ self.line("BottomLine", 0, ruler, self.w, 0)
+
+ def title(self, title):
+ "Set the title text of the dialog at the top."
+ # name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
+ # text, in VerdanaBold10
+ self.text("Title", 15, 10, 320, 60, 0x30003,
+ r"{\VerdanaBold10}%s" % title)
+
+ def back(self, title, next, name = "Back", active = 1):
+ """Add a back button with a given title, the tab-next button,
+ its name in the Control table, possibly initially disabled.
+
+ Return the button, so that events can be associated"""
+ if active:
+ flags = 3 # Visible|Enabled
+ else:
+ flags = 1 # Visible
+ return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
+
+ def cancel(self, title, next, name = "Cancel", active = 1):
+ """Add a cancel button with a given title, the tab-next button,
+ its name in the Control table, possibly initially disabled.
+
+ Return the button, so that events can be associated"""
+ if active:
+ flags = 3 # Visible|Enabled
+ else:
+ flags = 1 # Visible
+ return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
+
+ def next(self, title, next, name = "Next", active = 1):
+ """Add a Next button with a given title, the tab-next button,
+ its name in the Control table, possibly initially disabled.
+
+ Return the button, so that events can be associated"""
+ if active:
+ flags = 3 # Visible|Enabled
+ else:
+ flags = 1 # Visible
+ return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
+
+ def xbutton(self, name, title, next, xpos):
+ """Add a button with a given title, the tab-next button,
+ its name in the Control table, giving its x position; the
+ y-position is aligned with the other buttons.
+
+ Return the button, so that events can be associated"""
+ return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
+
+class bdist_msi(Command):
+
+ description = "create a Microsoft Installer (.msi) binary distribution"
+
+ user_options = [('bdist-dir=', None,
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_platform()),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('target-version=', None,
+ "require a specific python version" +
+ " on the target system"),
+ ('no-target-compile', 'c',
+ "do not compile .py to .pyc on the target system"),
+ ('no-target-optimize', 'o',
+ "do not compile .py to .pyo (optimized) "
+ "on the target system"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ('install-script=', None,
+ "basename of installation script to be run after "
+ "installation or before deinstallation"),
+ ('pre-install-script=', None,
+ "Fully qualified filename of a script to be run before "
+ "any files are installed. This script need not be in the "
+ "distribution"),
+ ]
+
+ boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
+ 'skip-build']
+
+ all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4',
+ '2.5', '2.6', '2.7', '2.8', '2.9',
+ '3.0', '3.1', '3.2', '3.3', '3.4',
+ '3.5', '3.6', '3.7', '3.8', '3.9']
+ other_version = 'X'
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.plat_name = None
+ self.keep_temp = 0
+ self.no_target_compile = 0
+ self.no_target_optimize = 0
+ self.target_version = None
+ self.dist_dir = None
+ self.skip_build = None
+ self.install_script = None
+ self.pre_install_script = None
+ self.versions = None
+
+ def finalize_options(self):
+ self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
+
+ if self.bdist_dir is None:
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'msi')
+
+ short_version = get_python_version()
+ if (not self.target_version) and self.distribution.has_ext_modules():
+ self.target_version = short_version
+
+ if self.target_version:
+ self.versions = [self.target_version]
+ if not self.skip_build and self.distribution.has_ext_modules()\
+ and self.target_version != short_version:
+ raise DistutilsOptionError(
+ "target version can only be %s, or the '--skip-build'"
+ " option must be specified" % (short_version,))
+ else:
+ self.versions = list(self.all_versions)
+
+ self.set_undefined_options('bdist',
+ ('dist_dir', 'dist_dir'),
+ ('plat_name', 'plat_name'),
+ )
+
+ if self.pre_install_script:
+ raise DistutilsOptionError(
+ "the pre-install-script feature is not yet implemented")
+
+ if self.install_script:
+ for script in self.distribution.scripts:
+ if self.install_script == os.path.basename(script):
+ break
+ else:
+ raise DistutilsOptionError(
+ "install_script '%s' not found in scripts"
+ % self.install_script)
+ self.install_script_key = None
+
+ def run(self):
+ if not self.skip_build:
+ self.run_command('build')
+
+ install = self.reinitialize_command('install', reinit_subcommands=1)
+ install.prefix = self.bdist_dir
+ install.skip_build = self.skip_build
+ install.warn_dir = 0
+
+ install_lib = self.reinitialize_command('install_lib')
+ # we do not want to include pyc or pyo files
+ install_lib.compile = 0
+ install_lib.optimize = 0
+
+ if self.distribution.has_ext_modules():
+ # If we are building an installer for a Python version other
+ # than the one we are currently running, then we need to ensure
+ # our build_lib reflects the other Python version rather than ours.
+ # Note that for target_version!=sys.version, we must have skipped the
+ # build step, so there is no issue with enforcing the build of this
+ # version.
+ target_version = self.target_version
+ if not target_version:
+ assert self.skip_build, "Should have already checked this"
+ target_version = '%d.%d' % sys.version_info[:2]
+ plat_specifier = ".%s-%s" % (self.plat_name, target_version)
+ build = self.get_finalized_command('build')
+ build.build_lib = os.path.join(build.build_base,
+ 'lib' + plat_specifier)
+
+ log.info("installing to %s", self.bdist_dir)
+ install.ensure_finalized()
+
+ # avoid warning of 'install_lib' about installing
+ # into a directory not in sys.path
+ sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
+
+ install.run()
+
+ del sys.path[0]
+
+ self.mkpath(self.dist_dir)
+ fullname = self.distribution.get_fullname()
+ installer_name = self.get_installer_filename(fullname)
+ installer_name = os.path.abspath(installer_name)
+ if os.path.exists(installer_name): os.unlink(installer_name)
+
+ metadata = self.distribution.metadata
+ author = metadata.author
+ if not author:
+ author = metadata.maintainer
+ if not author:
+ author = "UNKNOWN"
+ version = metadata.get_version()
+ # ProductVersion must be strictly numeric
+ # XXX need to deal with prerelease versions
+ sversion = "%d.%d.%d" % StrictVersion(version).version
+ # Prefix ProductName with Python x.y, so that
+ # it sorts together with the other Python packages
+ # in Add-Remove-Programs (APR)
+ fullname = self.distribution.get_fullname()
+ if self.target_version:
+ product_name = "Python %s %s" % (self.target_version, fullname)
+ else:
+ product_name = "Python %s" % (fullname)
+ self.db = msilib.init_database(installer_name, schema,
+ product_name, msilib.gen_uuid(),
+ sversion, author)
+ msilib.add_tables(self.db, sequence)
+ props = [('DistVersion', version)]
+ email = metadata.author_email or metadata.maintainer_email
+ if email:
+ props.append(("ARPCONTACT", email))
+ if metadata.url:
+ props.append(("ARPURLINFOABOUT", metadata.url))
+ if props:
+ add_data(self.db, 'Property', props)
+
+ self.add_find_python()
+ self.add_files()
+ self.add_scripts()
+ self.add_ui()
+ self.db.Commit()
+
+ if hasattr(self.distribution, 'dist_files'):
+ tup = 'bdist_msi', self.target_version or 'any', fullname
+ self.distribution.dist_files.append(tup)
+
+ if not self.keep_temp:
+ remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+ def add_files(self):
+ db = self.db
+ cab = msilib.CAB("distfiles")
+ rootdir = os.path.abspath(self.bdist_dir)
+
+ root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
+ f = Feature(db, "Python", "Python", "Everything",
+ 0, 1, directory="TARGETDIR")
+
+ items = [(f, root, '')]
+ for version in self.versions + [self.other_version]:
+ target = "TARGETDIR" + version
+ name = default = "Python" + version
+ desc = "Everything"
+ if version is self.other_version:
+ title = "Python from another location"
+ level = 2
+ else:
+ title = "Python %s from registry" % version
+ level = 1
+ f = Feature(db, name, title, desc, 1, level, directory=target)
+ dir = Directory(db, cab, root, rootdir, target, default)
+ items.append((f, dir, version))
+ db.Commit()
+
+ seen = {}
+ for feature, dir, version in items:
+ todo = [dir]
+ while todo:
+ dir = todo.pop()
+ for file in os.listdir(dir.absolute):
+ afile = os.path.join(dir.absolute, file)
+ if os.path.isdir(afile):
+ short = "%s|%s" % (dir.make_short(file), file)
+ default = file + version
+ newdir = Directory(db, cab, dir, file, default, short)
+ todo.append(newdir)
+ else:
+ if not dir.component:
+ dir.start_component(dir.logical, feature, 0)
+ if afile not in seen:
+ key = seen[afile] = dir.add_file(file)
+ if file==self.install_script:
+ if self.install_script_key:
+ raise DistutilsOptionError(
+ "Multiple files with name %s" % file)
+ self.install_script_key = '[#%s]' % key
+ else:
+ key = seen[afile]
+ add_data(self.db, "DuplicateFile",
+ [(key + version, dir.component, key, None, dir.logical)])
+ db.Commit()
+ cab.commit(db)
+
+ def add_find_python(self):
+ """Adds code to the installer to compute the location of Python.
+
+ Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the
+ registry for each version of Python.
+
+ Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined,
+ else from PYTHON.MACHINE.X.Y.
+
+ Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe"""
+
+ start = 402
+ for ver in self.versions:
+ install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver
+ machine_reg = "python.machine." + ver
+ user_reg = "python.user." + ver
+ machine_prop = "PYTHON.MACHINE." + ver
+ user_prop = "PYTHON.USER." + ver
+ machine_action = "PythonFromMachine" + ver
+ user_action = "PythonFromUser" + ver
+ exe_action = "PythonExe" + ver
+ target_dir_prop = "TARGETDIR" + ver
+ exe_prop = "PYTHON" + ver
+ if msilib.Win64:
+ # type: msidbLocatorTypeRawValue + msidbLocatorType64bit
+ Type = 2+16
+ else:
+ Type = 2
+ add_data(self.db, "RegLocator",
+ [(machine_reg, 2, install_path, None, Type),
+ (user_reg, 1, install_path, None, Type)])
+ add_data(self.db, "AppSearch",
+ [(machine_prop, machine_reg),
+ (user_prop, user_reg)])
+ add_data(self.db, "CustomAction",
+ [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"),
+ (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"),
+ (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"),
+ ])
+ add_data(self.db, "InstallExecuteSequence",
+ [(machine_action, machine_prop, start),
+ (user_action, user_prop, start + 1),
+ (exe_action, None, start + 2),
+ ])
+ add_data(self.db, "InstallUISequence",
+ [(machine_action, machine_prop, start),
+ (user_action, user_prop, start + 1),
+ (exe_action, None, start + 2),
+ ])
+ add_data(self.db, "Condition",
+ [("Python" + ver, 0, "NOT TARGETDIR" + ver)])
+ start += 4
+ assert start < 500
+
+ def add_scripts(self):
+ if self.install_script:
+ start = 6800
+ for ver in self.versions + [self.other_version]:
+ install_action = "install_script." + ver
+ exe_prop = "PYTHON" + ver
+ add_data(self.db, "CustomAction",
+ [(install_action, 50, exe_prop, self.install_script_key)])
+ add_data(self.db, "InstallExecuteSequence",
+ [(install_action, "&Python%s=3" % ver, start)])
+ start += 1
+ # XXX pre-install scripts are currently refused in finalize_options()
+ # but if this feature is completed, it will also need to add
+ # entries for each version as the above code does
+ if self.pre_install_script:
+ scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
+ f = open(scriptfn, "w")
+ # The batch file will be executed with [PYTHON], so that %1
+ # is the path to the Python interpreter; %0 will be the path
+ # of the batch file.
+ # rem ="""
+ # %1 %0
+ # exit
+ # """
+ #
+ f.write('rem ="""\n%1 %0\nexit\n"""\n')
+ f.write(open(self.pre_install_script).read())
+ f.close()
+ add_data(self.db, "Binary",
+ [("PreInstall", msilib.Binary(scriptfn))
+ ])
+ add_data(self.db, "CustomAction",
+ [("PreInstall", 2, "PreInstall", None)
+ ])
+ add_data(self.db, "InstallExecuteSequence",
+ [("PreInstall", "NOT Installed", 450)])
+
+
+ def add_ui(self):
+ db = self.db
+ x = y = 50
+ w = 370
+ h = 300
+ title = "[ProductName] Setup"
+
+ # see "Dialog Style Bits"
+ modal = 3 # visible | modal
+ modeless = 1 # visible
+ track_disk_space = 32
+
+ # UI customization properties
+ add_data(db, "Property",
+ # See "DefaultUIFont Property"
+ [("DefaultUIFont", "DlgFont8"),
+ # See "ErrorDialog Style Bit"
+ ("ErrorDialog", "ErrorDlg"),
+ ("Progress1", "Install"), # modified in maintenance type dlg
+ ("Progress2", "installs"),
+ ("MaintenanceForm_Action", "Repair"),
+ # possible values: ALL, JUSTME
+ ("WhichUsers", "ALL")
+ ])
+
+ # Fonts, see "TextStyle Table"
+ add_data(db, "TextStyle",
+ [("DlgFont8", "Tahoma", 9, None, 0),
+ ("DlgFontBold8", "Tahoma", 8, None, 1), #bold
+ ("VerdanaBold10", "Verdana", 10, None, 1),
+ ("VerdanaRed9", "Verdana", 9, 255, 0),
+ ])
+
+ # UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
+ # Numbers indicate sequence; see sequence.py for how these action integrate
+ add_data(db, "InstallUISequence",
+ [("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
+ ("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
+ # In the user interface, assume all-users installation if privileged.
+ ("SelectFeaturesDlg", "Not Installed", 1230),
+ # XXX no support for resume installations yet
+ #("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
+ ("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
+ ("ProgressDlg", None, 1280)])
+
+ add_data(db, 'ActionText', text.ActionText)
+ add_data(db, 'UIText', text.UIText)
+ #####################################################################
+ # Standard dialogs: FatalError, UserExit, ExitDialog
+ fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
+ "Finish", "Finish", "Finish")
+ fatal.title("[ProductName] Installer ended prematurely")
+ fatal.back("< Back", "Finish", active = 0)
+ fatal.cancel("Cancel", "Back", active = 0)
+ fatal.text("Description1", 15, 70, 320, 80, 0x30003,
+ "[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
+ fatal.text("Description2", 15, 155, 320, 20, 0x30003,
+ "Click the Finish button to exit the Installer.")
+ c=fatal.next("Finish", "Cancel", name="Finish")
+ c.event("EndDialog", "Exit")
+
+ user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
+ "Finish", "Finish", "Finish")
+ user_exit.title("[ProductName] Installer was interrupted")
+ user_exit.back("< Back", "Finish", active = 0)
+ user_exit.cancel("Cancel", "Back", active = 0)
+ user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
+ "[ProductName] setup was interrupted. Your system has not been modified. "
+ "To install this program at a later time, please run the installation again.")
+ user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
+ "Click the Finish button to exit the Installer.")
+ c = user_exit.next("Finish", "Cancel", name="Finish")
+ c.event("EndDialog", "Exit")
+
+ exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
+ "Finish", "Finish", "Finish")
+ exit_dialog.title("Completing the [ProductName] Installer")
+ exit_dialog.back("< Back", "Finish", active = 0)
+ exit_dialog.cancel("Cancel", "Back", active = 0)
+ exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
+ "Click the Finish button to exit the Installer.")
+ c = exit_dialog.next("Finish", "Cancel", name="Finish")
+ c.event("EndDialog", "Return")
+
+ #####################################################################
+ # Required dialog: FilesInUse, ErrorDlg
+ inuse = PyDialog(db, "FilesInUse",
+ x, y, w, h,
+ 19, # KeepModeless|Modal|Visible
+ title,
+ "Retry", "Retry", "Retry", bitmap=False)
+ inuse.text("Title", 15, 6, 200, 15, 0x30003,
+ r"{\DlgFontBold8}Files in Use")
+ inuse.text("Description", 20, 23, 280, 20, 0x30003,
+ "Some files that need to be updated are currently in use.")
+ inuse.text("Text", 20, 55, 330, 50, 3,
+ "The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
+ inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
+ None, None, None)
+ c=inuse.back("Exit", "Ignore", name="Exit")
+ c.event("EndDialog", "Exit")
+ c=inuse.next("Ignore", "Retry", name="Ignore")
+ c.event("EndDialog", "Ignore")
+ c=inuse.cancel("Retry", "Exit", name="Retry")
+ c.event("EndDialog","Retry")
+
+ # See "Error Dialog". See "ICE20" for the required names of the controls.
+ error = Dialog(db, "ErrorDlg",
+ 50, 10, 330, 101,
+ 65543, # Error|Minimize|Modal|Visible
+ title,
+ "ErrorText", None, None)
+ error.text("ErrorText", 50,9,280,48,3, "")
+ #error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
+ error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
+ error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
+ error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
+ error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
+ error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
+ error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
+ error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
+
+ #####################################################################
+ # Global "Query Cancel" dialog
+ cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
+ "No", "No", "No")
+ cancel.text("Text", 48, 15, 194, 30, 3,
+ "Are you sure you want to cancel [ProductName] installation?")
+ #cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
+ # "py.ico", None, None)
+ c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
+ c.event("EndDialog", "Exit")
+
+ c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
+ c.event("EndDialog", "Return")
+
+ #####################################################################
+ # Global "Wait for costing" dialog
+ costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
+ "Return", "Return", "Return")
+ costing.text("Text", 48, 15, 194, 30, 3,
+ "Please wait while the installer finishes determining your disk space requirements.")
+ c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
+ c.event("EndDialog", "Exit")
+
+ #####################################################################
+ # Preparation dialog: no user input except cancellation
+ prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
+ "Cancel", "Cancel", "Cancel")
+ prep.text("Description", 15, 70, 320, 40, 0x30003,
+ "Please wait while the Installer prepares to guide you through the installation.")
+ prep.title("Welcome to the [ProductName] Installer")
+ c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
+ c.mapping("ActionText", "Text")
+ c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
+ c.mapping("ActionData", "Text")
+ prep.back("Back", None, active=0)
+ prep.next("Next", None, active=0)
+ c=prep.cancel("Cancel", None)
+ c.event("SpawnDialog", "CancelDlg")
+
+ #####################################################################
+ # Feature (Python directory) selection
+ seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title,
+ "Next", "Next", "Cancel")
+ seldlg.title("Select Python Installations")
+
+ seldlg.text("Hint", 15, 30, 300, 20, 3,
+ "Select the Python locations where %s should be installed."
+ % self.distribution.get_fullname())
+
+ seldlg.back("< Back", None, active=0)
+ c = seldlg.next("Next >", "Cancel")
+ order = 1
+ c.event("[TARGETDIR]", "[SourceDir]", ordering=order)
+ for version in self.versions + [self.other_version]:
+ order += 1
+ c.event("[TARGETDIR]", "[TARGETDIR%s]" % version,
+ "FEATURE_SELECTED AND &Python%s=3" % version,
+ ordering=order)
+ c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1)
+ c.event("EndDialog", "Return", ordering=order + 2)
+ c = seldlg.cancel("Cancel", "Features")
+ c.event("SpawnDialog", "CancelDlg")
+
+ c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3,
+ "FEATURE", None, "PathEdit", None)
+ c.event("[FEATURE_SELECTED]", "1")
+ ver = self.other_version
+ install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver
+ dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver
+
+ c = seldlg.text("Other", 15, 200, 300, 15, 3,
+ "Provide an alternate Python location")
+ c.condition("Enable", install_other_cond)
+ c.condition("Show", install_other_cond)
+ c.condition("Disable", dont_install_other_cond)
+ c.condition("Hide", dont_install_other_cond)
+
+ c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1,
+ "TARGETDIR" + ver, None, "Next", None)
+ c.condition("Enable", install_other_cond)
+ c.condition("Show", install_other_cond)
+ c.condition("Disable", dont_install_other_cond)
+ c.condition("Hide", dont_install_other_cond)
+
+ #####################################################################
+ # Disk cost
+ cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
+ "OK", "OK", "OK", bitmap=False)
+ cost.text("Title", 15, 6, 200, 15, 0x30003,
+ r"{\DlgFontBold8}Disk Space Requirements")
+ cost.text("Description", 20, 20, 280, 20, 0x30003,
+ "The disk space required for the installation of the selected features.")
+ cost.text("Text", 20, 53, 330, 60, 3,
+ "The highlighted volumes (if any) do not have enough disk space "
+ "available for the currently selected features. You can either "
+ "remove some files from the highlighted volumes, or choose to "
+ "install less features onto local drive(s), or select different "
+ "destination drive(s).")
+ cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
+ None, "{120}{70}{70}{70}{70}", None, None)
+ cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
+
+ #####################################################################
+ # WhichUsers Dialog. Only available on NT, and for privileged users.
+ # This must be run before FindRelatedProducts, because that will
+ # take into account whether the previous installation was per-user
+ # or per-machine. We currently don't support going back to this
+ # dialog after "Next" was selected; to support this, we would need to
+ # find how to reset the ALLUSERS property, and how to re-run
+ # FindRelatedProducts.
+ # On Windows9x, the ALLUSERS property is ignored on the command line
+ # and in the Property table, but installer fails according to the documentation
+ # if a dialog attempts to set ALLUSERS.
+ whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
+ "AdminInstall", "Next", "Cancel")
+ whichusers.title("Select whether to install [ProductName] for all users of this computer.")
+ # A radio group with two options: allusers, justme
+ g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
+ "WhichUsers", "", "Next")
+ g.add("ALL", 0, 5, 150, 20, "Install for all users")
+ g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
+
+ whichusers.back("Back", None, active=0)
+
+ c = whichusers.next("Next >", "Cancel")
+ c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
+ c.event("EndDialog", "Return", ordering = 2)
+
+ c = whichusers.cancel("Cancel", "AdminInstall")
+ c.event("SpawnDialog", "CancelDlg")
+
+ #####################################################################
+ # Installation Progress dialog (modeless)
+ progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
+ "Cancel", "Cancel", "Cancel", bitmap=False)
+ progress.text("Title", 20, 15, 200, 15, 0x30003,
+ r"{\DlgFontBold8}[Progress1] [ProductName]")
+ progress.text("Text", 35, 65, 300, 30, 3,
+ "Please wait while the Installer [Progress2] [ProductName]. "
+ "This may take several minutes.")
+ progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
+
+ c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
+ c.mapping("ActionText", "Text")
+
+ #c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
+ #c.mapping("ActionData", "Text")
+
+ c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
+ None, "Progress done", None, None)
+ c.mapping("SetProgress", "Progress")
+
+ progress.back("< Back", "Next", active=False)
+ progress.next("Next >", "Cancel", active=False)
+ progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
+
+ ###################################################################
+ # Maintenance type: repair/uninstall
+ maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
+ "Next", "Next", "Cancel")
+ maint.title("Welcome to the [ProductName] Setup Wizard")
+ maint.text("BodyText", 15, 63, 330, 42, 3,
+ "Select whether you want to repair or remove [ProductName].")
+ g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
+ "MaintenanceForm_Action", "", "Next")
+ #g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
+ g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
+ g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
+
+ maint.back("< Back", None, active=False)
+ c=maint.next("Finish", "Cancel")
+ # Change installation: Change progress dialog to "Change", then ask
+ # for feature selection
+ #c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
+ #c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
+
+ # Reinstall: Change progress dialog to "Repair", then invoke reinstall
+ # Also set list of reinstalled features to "ALL"
+ c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
+ c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
+ c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
+ c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
+
+ # Uninstall: Change progress to "Remove", then invoke uninstall
+ # Also set list of removed features to "ALL"
+ c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
+ c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
+ c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
+ c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
+
+ # Close dialog when maintenance action scheduled
+ c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
+ #c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
+
+ maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
+
+ def get_installer_filename(self, fullname):
+ # Factored out to allow overriding in subclasses
+ if self.target_version:
+ base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name,
+ self.target_version)
+ else:
+ base_name = "%s.%s.msi" % (fullname, self.plat_name)
+ installer_name = os.path.join(self.dist_dir, base_name)
+ return installer_name
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_rpm.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_rpm.py
new file mode 100644
index 00000000..02f10dd8
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_rpm.py
@@ -0,0 +1,582 @@
+"""distutils.command.bdist_rpm
+
+Implements the Distutils 'bdist_rpm' command (create RPM source and binary
+distributions)."""
+
+import subprocess, sys, os
+from distutils.core import Command
+from distutils.debug import DEBUG
+from distutils.util import get_platform
+from distutils.file_util import write_file
+from distutils.errors import *
+from distutils.sysconfig import get_python_version
+from distutils import log
+
+class bdist_rpm(Command):
+
+ description = "create an RPM distribution"
+
+ user_options = [
+ ('bdist-base=', None,
+ "base directory for creating built distributions"),
+ ('rpm-base=', None,
+ "base directory for creating RPMs (defaults to \"rpm\" under "
+ "--bdist-base; must be specified for RPM 2)"),
+ ('dist-dir=', 'd',
+ "directory to put final RPM files in "
+ "(and .spec files if --spec-only)"),
+ ('python=', None,
+ "path to Python interpreter to hard-code in the .spec file "
+ "(default: \"python\")"),
+ ('fix-python', None,
+ "hard-code the exact path to the current Python interpreter in "
+ "the .spec file"),
+ ('spec-only', None,
+ "only regenerate spec file"),
+ ('source-only', None,
+ "only generate source RPM"),
+ ('binary-only', None,
+ "only generate binary RPM"),
+ ('use-bzip2', None,
+ "use bzip2 instead of gzip to create source distribution"),
+
+ # More meta-data: too RPM-specific to put in the setup script,
+ # but needs to go in the .spec file -- so we make these options
+ # to "bdist_rpm". The idea is that packagers would put this
+ # info in setup.cfg, although they are of course free to
+ # supply it on the command line.
+ ('distribution-name=', None,
+ "name of the (Linux) distribution to which this "
+ "RPM applies (*not* the name of the module distribution!)"),
+ ('group=', None,
+ "package classification [default: \"Development/Libraries\"]"),
+ ('release=', None,
+ "RPM release number"),
+ ('serial=', None,
+ "RPM serial number"),
+ ('vendor=', None,
+ "RPM \"vendor\" (eg. \"Joe Blow \") "
+ "[default: maintainer or author from setup script]"),
+ ('packager=', None,
+ "RPM packager (eg. \"Jane Doe \") "
+ "[default: vendor]"),
+ ('doc-files=', None,
+ "list of documentation files (space or comma-separated)"),
+ ('changelog=', None,
+ "RPM changelog"),
+ ('icon=', None,
+ "name of icon file"),
+ ('provides=', None,
+ "capabilities provided by this package"),
+ ('requires=', None,
+ "capabilities required by this package"),
+ ('conflicts=', None,
+ "capabilities which conflict with this package"),
+ ('build-requires=', None,
+ "capabilities required to build this package"),
+ ('obsoletes=', None,
+ "capabilities made obsolete by this package"),
+ ('no-autoreq', None,
+ "do not automatically calculate dependencies"),
+
+ # Actions to take when building RPM
+ ('keep-temp', 'k',
+ "don't clean up RPM build directory"),
+ ('no-keep-temp', None,
+ "clean up RPM build directory [default]"),
+ ('use-rpm-opt-flags', None,
+ "compile with RPM_OPT_FLAGS when building from source RPM"),
+ ('no-rpm-opt-flags', None,
+ "do not pass any RPM CFLAGS to compiler"),
+ ('rpm3-mode', None,
+ "RPM 3 compatibility mode (default)"),
+ ('rpm2-mode', None,
+ "RPM 2 compatibility mode"),
+
+ # Add the hooks necessary for specifying custom scripts
+ ('prep-script=', None,
+ "Specify a script for the PREP phase of RPM building"),
+ ('build-script=', None,
+ "Specify a script for the BUILD phase of RPM building"),
+
+ ('pre-install=', None,
+ "Specify a script for the pre-INSTALL phase of RPM building"),
+ ('install-script=', None,
+ "Specify a script for the INSTALL phase of RPM building"),
+ ('post-install=', None,
+ "Specify a script for the post-INSTALL phase of RPM building"),
+
+ ('pre-uninstall=', None,
+ "Specify a script for the pre-UNINSTALL phase of RPM building"),
+ ('post-uninstall=', None,
+ "Specify a script for the post-UNINSTALL phase of RPM building"),
+
+ ('clean-script=', None,
+ "Specify a script for the CLEAN phase of RPM building"),
+
+ ('verify-script=', None,
+ "Specify a script for the VERIFY phase of the RPM build"),
+
+ # Allow a packager to explicitly force an architecture
+ ('force-arch=', None,
+ "Force an architecture onto the RPM build process"),
+
+ ('quiet', 'q',
+ "Run the INSTALL phase of RPM building in quiet mode"),
+ ]
+
+ boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode',
+ 'no-autoreq', 'quiet']
+
+ negative_opt = {'no-keep-temp': 'keep-temp',
+ 'no-rpm-opt-flags': 'use-rpm-opt-flags',
+ 'rpm2-mode': 'rpm3-mode'}
+
+
+ def initialize_options(self):
+ self.bdist_base = None
+ self.rpm_base = None
+ self.dist_dir = None
+ self.python = None
+ self.fix_python = None
+ self.spec_only = None
+ self.binary_only = None
+ self.source_only = None
+ self.use_bzip2 = None
+
+ self.distribution_name = None
+ self.group = None
+ self.release = None
+ self.serial = None
+ self.vendor = None
+ self.packager = None
+ self.doc_files = None
+ self.changelog = None
+ self.icon = None
+
+ self.prep_script = None
+ self.build_script = None
+ self.install_script = None
+ self.clean_script = None
+ self.verify_script = None
+ self.pre_install = None
+ self.post_install = None
+ self.pre_uninstall = None
+ self.post_uninstall = None
+ self.prep = None
+ self.provides = None
+ self.requires = None
+ self.conflicts = None
+ self.build_requires = None
+ self.obsoletes = None
+
+ self.keep_temp = 0
+ self.use_rpm_opt_flags = 1
+ self.rpm3_mode = 1
+ self.no_autoreq = 0
+
+ self.force_arch = None
+ self.quiet = 0
+
+ def finalize_options(self):
+ self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
+ if self.rpm_base is None:
+ if not self.rpm3_mode:
+ raise DistutilsOptionError(
+ "you must specify --rpm-base in RPM 2 mode")
+ self.rpm_base = os.path.join(self.bdist_base, "rpm")
+
+ if self.python is None:
+ if self.fix_python:
+ self.python = sys.executable
+ else:
+ self.python = "python3"
+ elif self.fix_python:
+ raise DistutilsOptionError(
+ "--python and --fix-python are mutually exclusive options")
+
+ if os.name != 'posix':
+ raise DistutilsPlatformError("don't know how to create RPM "
+ "distributions on platform %s" % os.name)
+ if self.binary_only and self.source_only:
+ raise DistutilsOptionError(
+ "cannot supply both '--source-only' and '--binary-only'")
+
+ # don't pass CFLAGS to pure python distributions
+ if not self.distribution.has_ext_modules():
+ self.use_rpm_opt_flags = 0
+
+ self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+ self.finalize_package_data()
+
+ def finalize_package_data(self):
+ self.ensure_string('group', "Development/Libraries")
+ self.ensure_string('vendor',
+ "%s <%s>" % (self.distribution.get_contact(),
+ self.distribution.get_contact_email()))
+ self.ensure_string('packager')
+ self.ensure_string_list('doc_files')
+ if isinstance(self.doc_files, list):
+ for readme in ('README', 'README.txt'):
+ if os.path.exists(readme) and readme not in self.doc_files:
+ self.doc_files.append(readme)
+
+ self.ensure_string('release', "1")
+ self.ensure_string('serial') # should it be an int?
+
+ self.ensure_string('distribution_name')
+
+ self.ensure_string('changelog')
+ # Format changelog correctly
+ self.changelog = self._format_changelog(self.changelog)
+
+ self.ensure_filename('icon')
+
+ self.ensure_filename('prep_script')
+ self.ensure_filename('build_script')
+ self.ensure_filename('install_script')
+ self.ensure_filename('clean_script')
+ self.ensure_filename('verify_script')
+ self.ensure_filename('pre_install')
+ self.ensure_filename('post_install')
+ self.ensure_filename('pre_uninstall')
+ self.ensure_filename('post_uninstall')
+
+ # XXX don't forget we punted on summaries and descriptions -- they
+ # should be handled here eventually!
+
+ # Now *this* is some meta-data that belongs in the setup script...
+ self.ensure_string_list('provides')
+ self.ensure_string_list('requires')
+ self.ensure_string_list('conflicts')
+ self.ensure_string_list('build_requires')
+ self.ensure_string_list('obsoletes')
+
+ self.ensure_string('force_arch')
+
+ def run(self):
+ if DEBUG:
+ print("before _get_package_data():")
+ print("vendor =", self.vendor)
+ print("packager =", self.packager)
+ print("doc_files =", self.doc_files)
+ print("changelog =", self.changelog)
+
+ # make directories
+ if self.spec_only:
+ spec_dir = self.dist_dir
+ self.mkpath(spec_dir)
+ else:
+ rpm_dir = {}
+ for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):
+ rpm_dir[d] = os.path.join(self.rpm_base, d)
+ self.mkpath(rpm_dir[d])
+ spec_dir = rpm_dir['SPECS']
+
+ # Spec file goes into 'dist_dir' if '--spec-only specified',
+ # build/rpm. otherwise.
+ spec_path = os.path.join(spec_dir,
+ "%s.spec" % self.distribution.get_name())
+ self.execute(write_file,
+ (spec_path,
+ self._make_spec_file()),
+ "writing '%s'" % spec_path)
+
+ if self.spec_only: # stop if requested
+ return
+
+ # Make a source distribution and copy to SOURCES directory with
+ # optional icon.
+ saved_dist_files = self.distribution.dist_files[:]
+ sdist = self.reinitialize_command('sdist')
+ if self.use_bzip2:
+ sdist.formats = ['bztar']
+ else:
+ sdist.formats = ['gztar']
+ self.run_command('sdist')
+ self.distribution.dist_files = saved_dist_files
+
+ source = sdist.get_archive_files()[0]
+ source_dir = rpm_dir['SOURCES']
+ self.copy_file(source, source_dir)
+
+ if self.icon:
+ if os.path.exists(self.icon):
+ self.copy_file(self.icon, source_dir)
+ else:
+ raise DistutilsFileError(
+ "icon file '%s' does not exist" % self.icon)
+
+ # build package
+ log.info("building RPMs")
+ rpm_cmd = ['rpm']
+ if os.path.exists('/usr/bin/rpmbuild') or \
+ os.path.exists('/bin/rpmbuild'):
+ rpm_cmd = ['rpmbuild']
+
+ if self.source_only: # what kind of RPMs?
+ rpm_cmd.append('-bs')
+ elif self.binary_only:
+ rpm_cmd.append('-bb')
+ else:
+ rpm_cmd.append('-ba')
+ rpm_cmd.extend(['--define', '__python %s' % self.python])
+ if self.rpm3_mode:
+ rpm_cmd.extend(['--define',
+ '_topdir %s' % os.path.abspath(self.rpm_base)])
+ if not self.keep_temp:
+ rpm_cmd.append('--clean')
+
+ if self.quiet:
+ rpm_cmd.append('--quiet')
+
+ rpm_cmd.append(spec_path)
+ # Determine the binary rpm names that should be built out of this spec
+ # file
+ # Note that some of these may not be really built (if the file
+ # list is empty)
+ nvr_string = "%{name}-%{version}-%{release}"
+ src_rpm = nvr_string + ".src.rpm"
+ non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm"
+ q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % (
+ src_rpm, non_src_rpm, spec_path)
+
+ out = os.popen(q_cmd)
+ try:
+ binary_rpms = []
+ source_rpm = None
+ while True:
+ line = out.readline()
+ if not line:
+ break
+ l = line.strip().split()
+ assert(len(l) == 2)
+ binary_rpms.append(l[1])
+ # The source rpm is named after the first entry in the spec file
+ if source_rpm is None:
+ source_rpm = l[0]
+
+ status = out.close()
+ if status:
+ raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd))
+
+ finally:
+ out.close()
+
+ self.spawn(rpm_cmd)
+
+ if not self.dry_run:
+ if self.distribution.has_ext_modules():
+ pyversion = get_python_version()
+ else:
+ pyversion = 'any'
+
+ if not self.binary_only:
+ srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)
+ assert(os.path.exists(srpm))
+ self.move_file(srpm, self.dist_dir)
+ filename = os.path.join(self.dist_dir, source_rpm)
+ self.distribution.dist_files.append(
+ ('bdist_rpm', pyversion, filename))
+
+ if not self.source_only:
+ for rpm in binary_rpms:
+ rpm = os.path.join(rpm_dir['RPMS'], rpm)
+ if os.path.exists(rpm):
+ self.move_file(rpm, self.dist_dir)
+ filename = os.path.join(self.dist_dir,
+ os.path.basename(rpm))
+ self.distribution.dist_files.append(
+ ('bdist_rpm', pyversion, filename))
+
+ def _dist_path(self, path):
+ return os.path.join(self.dist_dir, os.path.basename(path))
+
+ def _make_spec_file(self):
+ """Generate the text of an RPM spec file and return it as a
+ list of strings (one per line).
+ """
+ # definitions and headers
+ spec_file = [
+ '%define name ' + self.distribution.get_name(),
+ '%define version ' + self.distribution.get_version().replace('-','_'),
+ '%define unmangled_version ' + self.distribution.get_version(),
+ '%define release ' + self.release.replace('-','_'),
+ '',
+ 'Summary: ' + self.distribution.get_description(),
+ ]
+
+ # Workaround for #14443 which affects some RPM based systems such as
+ # RHEL6 (and probably derivatives)
+ vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}')
+ # Generate a potential replacement value for __os_install_post (whilst
+ # normalizing the whitespace to simplify the test for whether the
+ # invocation of brp-python-bytecompile passes in __python):
+ vendor_hook = '\n'.join([' %s \\' % line.strip()
+ for line in vendor_hook.splitlines()])
+ problem = "brp-python-bytecompile \\\n"
+ fixed = "brp-python-bytecompile %{__python} \\\n"
+ fixed_hook = vendor_hook.replace(problem, fixed)
+ if fixed_hook != vendor_hook:
+ spec_file.append('# Workaround for http://bugs.python.org/issue14443')
+ spec_file.append('%define __os_install_post ' + fixed_hook + '\n')
+
+ # put locale summaries into spec file
+ # XXX not supported for now (hard to put a dictionary
+ # in a config file -- arg!)
+ #for locale in self.summaries.keys():
+ # spec_file.append('Summary(%s): %s' % (locale,
+ # self.summaries[locale]))
+
+ spec_file.extend([
+ 'Name: %{name}',
+ 'Version: %{version}',
+ 'Release: %{release}',])
+
+ # XXX yuck! this filename is available from the "sdist" command,
+ # but only after it has run: and we create the spec file before
+ # running "sdist", in case of --spec-only.
+ if self.use_bzip2:
+ spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2')
+ else:
+ spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz')
+
+ spec_file.extend([
+ 'License: ' + self.distribution.get_license(),
+ 'Group: ' + self.group,
+ 'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot',
+ 'Prefix: %{_prefix}', ])
+
+ if not self.force_arch:
+ # noarch if no extension modules
+ if not self.distribution.has_ext_modules():
+ spec_file.append('BuildArch: noarch')
+ else:
+ spec_file.append( 'BuildArch: %s' % self.force_arch )
+
+ for field in ('Vendor',
+ 'Packager',
+ 'Provides',
+ 'Requires',
+ 'Conflicts',
+ 'Obsoletes',
+ ):
+ val = getattr(self, field.lower())
+ if isinstance(val, list):
+ spec_file.append('%s: %s' % (field, ' '.join(val)))
+ elif val is not None:
+ spec_file.append('%s: %s' % (field, val))
+
+
+ if self.distribution.get_url() != 'UNKNOWN':
+ spec_file.append('Url: ' + self.distribution.get_url())
+
+ if self.distribution_name:
+ spec_file.append('Distribution: ' + self.distribution_name)
+
+ if self.build_requires:
+ spec_file.append('BuildRequires: ' +
+ ' '.join(self.build_requires))
+
+ if self.icon:
+ spec_file.append('Icon: ' + os.path.basename(self.icon))
+
+ if self.no_autoreq:
+ spec_file.append('AutoReq: 0')
+
+ spec_file.extend([
+ '',
+ '%description',
+ self.distribution.get_long_description()
+ ])
+
+ # put locale descriptions into spec file
+ # XXX again, suppressed because config file syntax doesn't
+ # easily support this ;-(
+ #for locale in self.descriptions.keys():
+ # spec_file.extend([
+ # '',
+ # '%description -l ' + locale,
+ # self.descriptions[locale],
+ # ])
+
+ # rpm scripts
+ # figure out default build script
+ def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0]))
+ def_build = "%s build" % def_setup_call
+ if self.use_rpm_opt_flags:
+ def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build
+
+ # insert contents of files
+
+ # XXX this is kind of misleading: user-supplied options are files
+ # that we open and interpolate into the spec file, but the defaults
+ # are just text that we drop in as-is. Hmmm.
+
+ install_cmd = ('%s install -O1 --root=$RPM_BUILD_ROOT '
+ '--record=INSTALLED_FILES') % def_setup_call
+
+ script_options = [
+ ('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"),
+ ('build', 'build_script', def_build),
+ ('install', 'install_script', install_cmd),
+ ('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"),
+ ('verifyscript', 'verify_script', None),
+ ('pre', 'pre_install', None),
+ ('post', 'post_install', None),
+ ('preun', 'pre_uninstall', None),
+ ('postun', 'post_uninstall', None),
+ ]
+
+ for (rpm_opt, attr, default) in script_options:
+ # Insert contents of file referred to, if no file is referred to
+ # use 'default' as contents of script
+ val = getattr(self, attr)
+ if val or default:
+ spec_file.extend([
+ '',
+ '%' + rpm_opt,])
+ if val:
+ spec_file.extend(open(val, 'r').read().split('\n'))
+ else:
+ spec_file.append(default)
+
+
+ # files section
+ spec_file.extend([
+ '',
+ '%files -f INSTALLED_FILES',
+ '%defattr(-,root,root)',
+ ])
+
+ if self.doc_files:
+ spec_file.append('%doc ' + ' '.join(self.doc_files))
+
+ if self.changelog:
+ spec_file.extend([
+ '',
+ '%changelog',])
+ spec_file.extend(self.changelog)
+
+ return spec_file
+
+ def _format_changelog(self, changelog):
+ """Format the changelog correctly and convert it to a list of strings
+ """
+ if not changelog:
+ return changelog
+ new_changelog = []
+ for line in changelog.strip().split('\n'):
+ line = line.strip()
+ if line[0] == '*':
+ new_changelog.extend(['', line])
+ elif line[0] == '-':
+ new_changelog.append(line)
+ else:
+ new_changelog.append(' ' + line)
+
+ # strip trailing newline inserted by first changelog entry
+ if not new_changelog[0]:
+ del new_changelog[0]
+
+ return new_changelog
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_wininst.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_wininst.py
new file mode 100644
index 00000000..fde56754
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/bdist_wininst.py
@@ -0,0 +1,364 @@
+"""distutils.command.bdist_wininst
+
+Implements the Distutils 'bdist_wininst' command: create a windows installer
+exe-program."""
+
+import sys, os
+from distutils.core import Command
+from distutils.util import get_platform
+from distutils.dir_util import create_tree, remove_tree
+from distutils.errors import *
+from distutils.sysconfig import get_python_version
+from distutils import log
+
+class bdist_wininst(Command):
+
+ description = "create an executable installer for MS Windows"
+
+ user_options = [('bdist-dir=', None,
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_platform()),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('target-version=', None,
+ "require a specific python version" +
+ " on the target system"),
+ ('no-target-compile', 'c',
+ "do not compile .py to .pyc on the target system"),
+ ('no-target-optimize', 'o',
+ "do not compile .py to .pyo (optimized) "
+ "on the target system"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('bitmap=', 'b',
+ "bitmap to use for the installer instead of python-powered logo"),
+ ('title=', 't',
+ "title to display on the installer background instead of default"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ('install-script=', None,
+ "basename of installation script to be run after "
+ "installation or before deinstallation"),
+ ('pre-install-script=', None,
+ "Fully qualified filename of a script to be run before "
+ "any files are installed. This script need not be in the "
+ "distribution"),
+ ('user-access-control=', None,
+ "specify Vista's UAC handling - 'none'/default=no "
+ "handling, 'auto'=use UAC if target Python installed for "
+ "all users, 'force'=always use UAC"),
+ ]
+
+ boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
+ 'skip-build']
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.plat_name = None
+ self.keep_temp = 0
+ self.no_target_compile = 0
+ self.no_target_optimize = 0
+ self.target_version = None
+ self.dist_dir = None
+ self.bitmap = None
+ self.title = None
+ self.skip_build = None
+ self.install_script = None
+ self.pre_install_script = None
+ self.user_access_control = None
+
+
+ def finalize_options(self):
+ self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
+
+ if self.bdist_dir is None:
+ if self.skip_build and self.plat_name:
+ # If build is skipped and plat_name is overridden, bdist will
+ # not see the correct 'plat_name' - so set that up manually.
+ bdist = self.distribution.get_command_obj('bdist')
+ bdist.plat_name = self.plat_name
+ # next the command will be initialized using that name
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'wininst')
+
+ if not self.target_version:
+ self.target_version = ""
+
+ if not self.skip_build and self.distribution.has_ext_modules():
+ short_version = get_python_version()
+ if self.target_version and self.target_version != short_version:
+ raise DistutilsOptionError(
+ "target version can only be %s, or the '--skip-build'" \
+ " option must be specified" % (short_version,))
+ self.target_version = short_version
+
+ self.set_undefined_options('bdist',
+ ('dist_dir', 'dist_dir'),
+ ('plat_name', 'plat_name'),
+ )
+
+ if self.install_script:
+ for script in self.distribution.scripts:
+ if self.install_script == os.path.basename(script):
+ break
+ else:
+ raise DistutilsOptionError(
+ "install_script '%s' not found in scripts"
+ % self.install_script)
+
+ def run(self):
+ if (sys.platform != "win32" and
+ (self.distribution.has_ext_modules() or
+ self.distribution.has_c_libraries())):
+ raise DistutilsPlatformError \
+ ("distribution contains extensions and/or C libraries; "
+ "must be compiled on a Windows 32 platform")
+
+ if not self.skip_build:
+ self.run_command('build')
+
+ install = self.reinitialize_command('install', reinit_subcommands=1)
+ install.root = self.bdist_dir
+ install.skip_build = self.skip_build
+ install.warn_dir = 0
+ install.plat_name = self.plat_name
+
+ install_lib = self.reinitialize_command('install_lib')
+ # we do not want to include pyc or pyo files
+ install_lib.compile = 0
+ install_lib.optimize = 0
+
+ if self.distribution.has_ext_modules():
+ # If we are building an installer for a Python version other
+ # than the one we are currently running, then we need to ensure
+ # our build_lib reflects the other Python version rather than ours.
+ # Note that for target_version!=sys.version, we must have skipped the
+ # build step, so there is no issue with enforcing the build of this
+ # version.
+ target_version = self.target_version
+ if not target_version:
+ assert self.skip_build, "Should have already checked this"
+ target_version = '%d.%d' % sys.version_info[:2]
+ plat_specifier = ".%s-%s" % (self.plat_name, target_version)
+ build = self.get_finalized_command('build')
+ build.build_lib = os.path.join(build.build_base,
+ 'lib' + plat_specifier)
+
+ # Use a custom scheme for the zip-file, because we have to decide
+ # at installation time which scheme to use.
+ for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
+ value = key.upper()
+ if key == 'headers':
+ value = value + '/Include/$dist_name'
+ setattr(install,
+ 'install_' + key,
+ value)
+
+ log.info("installing to %s", self.bdist_dir)
+ install.ensure_finalized()
+
+ # avoid warning of 'install_lib' about installing
+ # into a directory not in sys.path
+ sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
+
+ install.run()
+
+ del sys.path[0]
+
+ # And make an archive relative to the root of the
+ # pseudo-installation tree.
+ from tempfile import mktemp
+ archive_basename = mktemp()
+ fullname = self.distribution.get_fullname()
+ arcname = self.make_archive(archive_basename, "zip",
+ root_dir=self.bdist_dir)
+ # create an exe containing the zip-file
+ self.create_exe(arcname, fullname, self.bitmap)
+ if self.distribution.has_ext_modules():
+ pyversion = get_python_version()
+ else:
+ pyversion = 'any'
+ self.distribution.dist_files.append(('bdist_wininst', pyversion,
+ self.get_installer_filename(fullname)))
+ # remove the zip-file again
+ log.debug("removing temporary file '%s'", arcname)
+ os.remove(arcname)
+
+ if not self.keep_temp:
+ remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+ def get_inidata(self):
+ # Return data describing the installation.
+ lines = []
+ metadata = self.distribution.metadata
+
+ # Write the [metadata] section.
+ lines.append("[metadata]")
+
+ # 'info' will be displayed in the installer's dialog box,
+ # describing the items to be installed.
+ info = (metadata.long_description or '') + '\n'
+
+ # Escape newline characters
+ def escape(s):
+ return s.replace("\n", "\\n")
+
+ for name in ["author", "author_email", "description", "maintainer",
+ "maintainer_email", "name", "url", "version"]:
+ data = getattr(metadata, name, "")
+ if data:
+ info = info + ("\n %s: %s" % \
+ (name.capitalize(), escape(data)))
+ lines.append("%s=%s" % (name, escape(data)))
+
+ # The [setup] section contains entries controlling
+ # the installer runtime.
+ lines.append("\n[Setup]")
+ if self.install_script:
+ lines.append("install_script=%s" % self.install_script)
+ lines.append("info=%s" % escape(info))
+ lines.append("target_compile=%d" % (not self.no_target_compile))
+ lines.append("target_optimize=%d" % (not self.no_target_optimize))
+ if self.target_version:
+ lines.append("target_version=%s" % self.target_version)
+ if self.user_access_control:
+ lines.append("user_access_control=%s" % self.user_access_control)
+
+ title = self.title or self.distribution.get_fullname()
+ lines.append("title=%s" % escape(title))
+ import time
+ import distutils
+ build_info = "Built %s with distutils-%s" % \
+ (time.ctime(time.time()), distutils.__version__)
+ lines.append("build_info=%s" % build_info)
+ return "\n".join(lines)
+
+ def create_exe(self, arcname, fullname, bitmap=None):
+ import struct
+
+ self.mkpath(self.dist_dir)
+
+ cfgdata = self.get_inidata()
+
+ installer_name = self.get_installer_filename(fullname)
+ self.announce("creating %s" % installer_name)
+
+ if bitmap:
+ bitmapdata = open(bitmap, "rb").read()
+ bitmaplen = len(bitmapdata)
+ else:
+ bitmaplen = 0
+
+ file = open(installer_name, "wb")
+ file.write(self.get_exe_bytes())
+ if bitmap:
+ file.write(bitmapdata)
+
+ # Convert cfgdata from unicode to ascii, mbcs encoded
+ if isinstance(cfgdata, str):
+ cfgdata = cfgdata.encode("mbcs")
+
+ # Append the pre-install script
+ cfgdata = cfgdata + b"\0"
+ if self.pre_install_script:
+ # We need to normalize newlines, so we open in text mode and
+ # convert back to bytes. "latin-1" simply avoids any possible
+ # failures.
+ with open(self.pre_install_script, "r",
+ encoding="latin-1") as script:
+ script_data = script.read().encode("latin-1")
+ cfgdata = cfgdata + script_data + b"\n\0"
+ else:
+ # empty pre-install script
+ cfgdata = cfgdata + b"\0"
+ file.write(cfgdata)
+
+ # The 'magic number' 0x1234567B is used to make sure that the
+ # binary layout of 'cfgdata' is what the wininst.exe binary
+ # expects. If the layout changes, increment that number, make
+ # the corresponding changes to the wininst.exe sources, and
+ # recompile them.
+ header = struct.pack("' under the base build directory. We only use one of
+ # them for a given distribution, though --
+ if self.build_purelib is None:
+ self.build_purelib = os.path.join(self.build_base, 'lib')
+ if self.build_platlib is None:
+ self.build_platlib = os.path.join(self.build_base,
+ 'lib' + plat_specifier)
+
+ # 'build_lib' is the actual directory that we will use for this
+ # particular module distribution -- if user didn't supply it, pick
+ # one of 'build_purelib' or 'build_platlib'.
+ if self.build_lib is None:
+ if self.distribution.ext_modules:
+ self.build_lib = self.build_platlib
+ else:
+ self.build_lib = self.build_purelib
+
+ # 'build_temp' -- temporary directory for compiler turds,
+ # "build/temp."
+ if self.build_temp is None:
+ self.build_temp = os.path.join(self.build_base,
+ 'temp' + plat_specifier)
+ if self.build_scripts is None:
+ self.build_scripts = os.path.join(self.build_base,
+ 'scripts-%d.%d' % sys.version_info[:2])
+
+ if self.executable is None:
+ self.executable = os.path.normpath(sys.executable)
+
+ if isinstance(self.parallel, str):
+ try:
+ self.parallel = int(self.parallel)
+ except ValueError:
+ raise DistutilsOptionError("parallel should be an integer")
+
+ def run(self):
+ # Run all relevant sub-commands. This will be some subset of:
+ # - build_py - pure Python modules
+ # - build_clib - standalone C libraries
+ # - build_ext - Python extensions
+ # - build_scripts - (Python) scripts
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+
+ # -- Predicates for the sub-command list ---------------------------
+
+ def has_pure_modules(self):
+ return self.distribution.has_pure_modules()
+
+ def has_c_libraries(self):
+ return self.distribution.has_c_libraries()
+
+ def has_ext_modules(self):
+ return self.distribution.has_ext_modules()
+
+ def has_scripts(self):
+ return self.distribution.has_scripts()
+
+
+ sub_commands = [('build_py', has_pure_modules),
+ ('build_clib', has_c_libraries),
+ ('build_ext', has_ext_modules),
+ ('build_scripts', has_scripts),
+ ]
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_clib.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_clib.py
new file mode 100644
index 00000000..3e20ef23
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_clib.py
@@ -0,0 +1,209 @@
+"""distutils.command.build_clib
+
+Implements the Distutils 'build_clib' command, to build a C/C++ library
+that is included in the module distribution and needed by an extension
+module."""
+
+
+# XXX this module has *lots* of code ripped-off quite transparently from
+# build_ext.py -- not surprisingly really, as the work required to build
+# a static library from a collection of C source files is not really all
+# that different from what's required to build a shared object file from
+# a collection of C source files. Nevertheless, I haven't done the
+# necessary refactoring to account for the overlap in code between the
+# two modules, mainly because a number of subtle details changed in the
+# cut 'n paste. Sigh.
+
+import os
+from distutils.core import Command
+from distutils.errors import *
+from distutils.sysconfig import customize_compiler
+from distutils import log
+
+def show_compilers():
+ from distutils.ccompiler import show_compilers
+ show_compilers()
+
+
+class build_clib(Command):
+
+ description = "build C/C++ libraries used by Python extensions"
+
+ user_options = [
+ ('build-clib=', 'b',
+ "directory to build C/C++ libraries to"),
+ ('build-temp=', 't',
+ "directory to put temporary build by-products"),
+ ('debug', 'g',
+ "compile with debugging information"),
+ ('force', 'f',
+ "forcibly build everything (ignore file timestamps)"),
+ ('compiler=', 'c',
+ "specify the compiler type"),
+ ]
+
+ boolean_options = ['debug', 'force']
+
+ help_options = [
+ ('help-compiler', None,
+ "list available compilers", show_compilers),
+ ]
+
+ def initialize_options(self):
+ self.build_clib = None
+ self.build_temp = None
+
+ # List of libraries to build
+ self.libraries = None
+
+ # Compilation options for all libraries
+ self.include_dirs = None
+ self.define = None
+ self.undef = None
+ self.debug = None
+ self.force = 0
+ self.compiler = None
+
+
+ def finalize_options(self):
+ # This might be confusing: both build-clib and build-temp default
+ # to build-temp as defined by the "build" command. This is because
+ # I think that C libraries are really just temporary build
+ # by-products, at least from the point of view of building Python
+ # extensions -- but I want to keep my options open.
+ self.set_undefined_options('build',
+ ('build_temp', 'build_clib'),
+ ('build_temp', 'build_temp'),
+ ('compiler', 'compiler'),
+ ('debug', 'debug'),
+ ('force', 'force'))
+
+ self.libraries = self.distribution.libraries
+ if self.libraries:
+ self.check_library_list(self.libraries)
+
+ if self.include_dirs is None:
+ self.include_dirs = self.distribution.include_dirs or []
+ if isinstance(self.include_dirs, str):
+ self.include_dirs = self.include_dirs.split(os.pathsep)
+
+ # XXX same as for build_ext -- what about 'self.define' and
+ # 'self.undef' ?
+
+
+ def run(self):
+ if not self.libraries:
+ return
+
+ # Yech -- this is cut 'n pasted from build_ext.py!
+ from distutils.ccompiler import new_compiler
+ self.compiler = new_compiler(compiler=self.compiler,
+ dry_run=self.dry_run,
+ force=self.force)
+ customize_compiler(self.compiler)
+
+ if self.include_dirs is not None:
+ self.compiler.set_include_dirs(self.include_dirs)
+ if self.define is not None:
+ # 'define' option is a list of (name,value) tuples
+ for (name,value) in self.define:
+ self.compiler.define_macro(name, value)
+ if self.undef is not None:
+ for macro in self.undef:
+ self.compiler.undefine_macro(macro)
+
+ self.build_libraries(self.libraries)
+
+
+ def check_library_list(self, libraries):
+ """Ensure that the list of libraries is valid.
+
+ `library` is presumably provided as a command option 'libraries'.
+ This method checks that it is a list of 2-tuples, where the tuples
+ are (library_name, build_info_dict).
+
+ Raise DistutilsSetupError if the structure is invalid anywhere;
+ just returns otherwise.
+ """
+ if not isinstance(libraries, list):
+ raise DistutilsSetupError(
+ "'libraries' option must be a list of tuples")
+
+ for lib in libraries:
+ if not isinstance(lib, tuple) and len(lib) != 2:
+ raise DistutilsSetupError(
+ "each element of 'libraries' must a 2-tuple")
+
+ name, build_info = lib
+
+ if not isinstance(name, str):
+ raise DistutilsSetupError(
+ "first element of each tuple in 'libraries' "
+ "must be a string (the library name)")
+
+ if '/' in name or (os.sep != '/' and os.sep in name):
+ raise DistutilsSetupError("bad library name '%s': "
+ "may not contain directory separators" % lib[0])
+
+ if not isinstance(build_info, dict):
+ raise DistutilsSetupError(
+ "second element of each tuple in 'libraries' "
+ "must be a dictionary (build info)")
+
+
+ def get_library_names(self):
+ # Assume the library list is valid -- 'check_library_list()' is
+ # called from 'finalize_options()', so it should be!
+ if not self.libraries:
+ return None
+
+ lib_names = []
+ for (lib_name, build_info) in self.libraries:
+ lib_names.append(lib_name)
+ return lib_names
+
+
+ def get_source_files(self):
+ self.check_library_list(self.libraries)
+ filenames = []
+ for (lib_name, build_info) in self.libraries:
+ sources = build_info.get('sources')
+ if sources is None or not isinstance(sources, (list, tuple)):
+ raise DistutilsSetupError(
+ "in 'libraries' option (library '%s'), "
+ "'sources' must be present and must be "
+ "a list of source filenames" % lib_name)
+
+ filenames.extend(sources)
+ return filenames
+
+
+ def build_libraries(self, libraries):
+ for (lib_name, build_info) in libraries:
+ sources = build_info.get('sources')
+ if sources is None or not isinstance(sources, (list, tuple)):
+ raise DistutilsSetupError(
+ "in 'libraries' option (library '%s'), "
+ "'sources' must be present and must be "
+ "a list of source filenames" % lib_name)
+ sources = list(sources)
+
+ log.info("building '%s' library", lib_name)
+
+ # First, compile the source code to object files in the library
+ # directory. (This should probably change to putting object
+ # files in a temporary build directory.)
+ macros = build_info.get('macros')
+ include_dirs = build_info.get('include_dirs')
+ objects = self.compiler.compile(sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug)
+
+ # Now "link" the object files together into a static library.
+ # (On Unix at least, this isn't really linking -- it just
+ # builds an archive. Whatever.)
+ self.compiler.create_static_lib(objects, lib_name,
+ output_dir=self.build_clib,
+ debug=self.debug)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_ext.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_ext.py
new file mode 100644
index 00000000..158465d2
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_ext.py
@@ -0,0 +1,732 @@
+"""distutils.command.build_ext
+
+Implements the Distutils 'build_ext' command, for building extension
+modules (currently limited to C extensions, should accommodate C++
+extensions ASAP)."""
+
+import contextlib
+import os
+import re
+import sys
+from distutils.core import Command
+from distutils.errors import *
+from distutils.sysconfig import customize_compiler, get_python_version
+from distutils.sysconfig import get_config_h_filename
+from distutils.dep_util import newer_group
+from distutils.extension import Extension
+from distutils.util import get_platform
+from distutils import log
+
+from site import USER_BASE
+
+# An extension name is just a dot-separated list of Python NAMEs (ie.
+# the same as a fully-qualified module name).
+extension_name_re = re.compile \
+ (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
+
+
+def show_compilers ():
+ from distutils.ccompiler import show_compilers
+ show_compilers()
+
+
+class build_ext(Command):
+
+ description = "build C/C++ extensions (compile/link to build directory)"
+
+ # XXX thoughts on how to deal with complex command-line options like
+ # these, i.e. how to make it so fancy_getopt can suck them off the
+ # command line and make it look like setup.py defined the appropriate
+ # lists of tuples of what-have-you.
+ # - each command needs a callback to process its command-line options
+ # - Command.__init__() needs access to its share of the whole
+ # command line (must ultimately come from
+ # Distribution.parse_command_line())
+ # - it then calls the current command class' option-parsing
+ # callback to deal with weird options like -D, which have to
+ # parse the option text and churn out some custom data
+ # structure
+ # - that data structure (in this case, a list of 2-tuples)
+ # will then be present in the command object by the time
+ # we get to finalize_options() (i.e. the constructor
+ # takes care of both command-line and client options
+ # in between initialize_options() and finalize_options())
+
+ sep_by = " (separated by '%s')" % os.pathsep
+ user_options = [
+ ('build-lib=', 'b',
+ "directory for compiled extension modules"),
+ ('build-temp=', 't',
+ "directory for temporary files (build by-products)"),
+ ('plat-name=', 'p',
+ "platform name to cross-compile for, if supported "
+ "(default: %s)" % get_platform()),
+ ('inplace', 'i',
+ "ignore build-lib and put compiled extensions into the source " +
+ "directory alongside your pure Python modules"),
+ ('include-dirs=', 'I',
+ "list of directories to search for header files" + sep_by),
+ ('define=', 'D',
+ "C preprocessor macros to define"),
+ ('undef=', 'U',
+ "C preprocessor macros to undefine"),
+ ('libraries=', 'l',
+ "external C libraries to link with"),
+ ('library-dirs=', 'L',
+ "directories to search for external C libraries" + sep_by),
+ ('rpath=', 'R',
+ "directories to search for shared C libraries at runtime"),
+ ('link-objects=', 'O',
+ "extra explicit link objects to include in the link"),
+ ('debug', 'g',
+ "compile/link with debugging information"),
+ ('force', 'f',
+ "forcibly build everything (ignore file timestamps)"),
+ ('compiler=', 'c',
+ "specify the compiler type"),
+ ('parallel=', 'j',
+ "number of parallel build jobs"),
+ ('swig-cpp', None,
+ "make SWIG create C++ files (default is C)"),
+ ('swig-opts=', None,
+ "list of SWIG command line options"),
+ ('swig=', None,
+ "path to the SWIG executable"),
+ ('user', None,
+ "add user include, library and rpath")
+ ]
+
+ boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
+
+ help_options = [
+ ('help-compiler', None,
+ "list available compilers", show_compilers),
+ ]
+
+ def initialize_options(self):
+ self.extensions = None
+ self.build_lib = None
+ self.plat_name = None
+ self.build_temp = None
+ self.inplace = 0
+ self.package = None
+
+ self.include_dirs = None
+ self.define = None
+ self.undef = None
+ self.libraries = None
+ self.library_dirs = None
+ self.rpath = None
+ self.link_objects = None
+ self.debug = None
+ self.force = None
+ self.compiler = None
+ self.swig = None
+ self.swig_cpp = None
+ self.swig_opts = None
+ self.user = None
+ self.parallel = None
+
+ def finalize_options(self):
+ from distutils import sysconfig
+
+ self.set_undefined_options('build',
+ ('build_lib', 'build_lib'),
+ ('build_temp', 'build_temp'),
+ ('compiler', 'compiler'),
+ ('debug', 'debug'),
+ ('force', 'force'),
+ ('parallel', 'parallel'),
+ ('plat_name', 'plat_name'),
+ )
+
+ if self.package is None:
+ self.package = self.distribution.ext_package
+
+ self.extensions = self.distribution.ext_modules
+
+ # Make sure Python's include directories (for Python.h, pyconfig.h,
+ # etc.) are in the include search path.
+ py_include = sysconfig.get_python_inc()
+ plat_py_include = sysconfig.get_python_inc(plat_specific=1)
+ if self.include_dirs is None:
+ self.include_dirs = self.distribution.include_dirs or []
+ if isinstance(self.include_dirs, str):
+ self.include_dirs = self.include_dirs.split(os.pathsep)
+
+ # If in a virtualenv, add its include directory
+ # Issue 16116
+ if sys.exec_prefix != sys.base_exec_prefix:
+ self.include_dirs.append(os.path.join(sys.exec_prefix, 'include'))
+
+ # Put the Python "system" include dir at the end, so that
+ # any local include dirs take precedence.
+ self.include_dirs.append(py_include)
+ if plat_py_include != py_include:
+ self.include_dirs.append(plat_py_include)
+
+ self.ensure_string_list('libraries')
+ self.ensure_string_list('link_objects')
+
+ # Life is easier if we're not forever checking for None, so
+ # simplify these options to empty lists if unset
+ if self.libraries is None:
+ self.libraries = []
+ if self.library_dirs is None:
+ self.library_dirs = []
+ elif isinstance(self.library_dirs, str):
+ self.library_dirs = self.library_dirs.split(os.pathsep)
+
+ if self.rpath is None:
+ self.rpath = []
+ elif isinstance(self.rpath, str):
+ self.rpath = self.rpath.split(os.pathsep)
+
+ # for extensions under windows use different directories
+ # for Release and Debug builds.
+ # also Python's library directory must be appended to library_dirs
+ if os.name == 'nt':
+ # the 'libs' directory is for binary installs - we assume that
+ # must be the *native* platform. But we don't really support
+ # cross-compiling via a binary install anyway, so we let it go.
+ self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
+ if sys.base_exec_prefix != sys.prefix: # Issue 16116
+ self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
+ if self.debug:
+ self.build_temp = os.path.join(self.build_temp, "Debug")
+ else:
+ self.build_temp = os.path.join(self.build_temp, "Release")
+
+ # Append the source distribution include and library directories,
+ # this allows distutils on windows to work in the source tree
+ self.include_dirs.append(os.path.dirname(get_config_h_filename()))
+ _sys_home = getattr(sys, '_home', None)
+ if _sys_home:
+ self.library_dirs.append(_sys_home)
+
+ # Use the .lib files for the correct architecture
+ if self.plat_name == 'win32':
+ suffix = 'win32'
+ else:
+ # win-amd64
+ suffix = self.plat_name[4:]
+ new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
+ if suffix:
+ new_lib = os.path.join(new_lib, suffix)
+ self.library_dirs.append(new_lib)
+
+ # For extensions under Cygwin, Python's library directory must be
+ # appended to library_dirs
+ if sys.platform[:6] == 'cygwin':
+ if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
+ # building third party extensions
+ self.library_dirs.append(os.path.join(sys.prefix, "lib",
+ "python" + get_python_version(),
+ "config"))
+ else:
+ # building python standard extensions
+ self.library_dirs.append('.')
+
+ # For building extensions with a shared Python library,
+ # Python's library directory must be appended to library_dirs
+ # See Issues: #1600860, #4366
+ if (sysconfig.get_config_var('Py_ENABLE_SHARED')):
+ if not sysconfig.python_build:
+ # building third party extensions
+ self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
+ else:
+ # building python standard extensions
+ self.library_dirs.append('.')
+
+ # The argument parsing will result in self.define being a string, but
+ # it has to be a list of 2-tuples. All the preprocessor symbols
+ # specified by the 'define' option will be set to '1'. Multiple
+ # symbols can be separated with commas.
+
+ if self.define:
+ defines = self.define.split(',')
+ self.define = [(symbol, '1') for symbol in defines]
+
+ # The option for macros to undefine is also a string from the
+ # option parsing, but has to be a list. Multiple symbols can also
+ # be separated with commas here.
+ if self.undef:
+ self.undef = self.undef.split(',')
+
+ if self.swig_opts is None:
+ self.swig_opts = []
+ else:
+ self.swig_opts = self.swig_opts.split(' ')
+
+ # Finally add the user include and library directories if requested
+ if self.user:
+ user_include = os.path.join(USER_BASE, "include")
+ user_lib = os.path.join(USER_BASE, "lib")
+ if os.path.isdir(user_include):
+ self.include_dirs.append(user_include)
+ if os.path.isdir(user_lib):
+ self.library_dirs.append(user_lib)
+ self.rpath.append(user_lib)
+
+ if isinstance(self.parallel, str):
+ try:
+ self.parallel = int(self.parallel)
+ except ValueError:
+ raise DistutilsOptionError("parallel should be an integer")
+
+ def run(self):
+ from distutils.ccompiler import new_compiler
+
+ # 'self.extensions', as supplied by setup.py, is a list of
+ # Extension instances. See the documentation for Extension (in
+ # distutils.extension) for details.
+ #
+ # For backwards compatibility with Distutils 0.8.2 and earlier, we
+ # also allow the 'extensions' list to be a list of tuples:
+ # (ext_name, build_info)
+ # where build_info is a dictionary containing everything that
+ # Extension instances do except the name, with a few things being
+ # differently named. We convert these 2-tuples to Extension
+ # instances as needed.
+
+ if not self.extensions:
+ return
+
+ # If we were asked to build any C/C++ libraries, make sure that the
+ # directory where we put them is in the library search path for
+ # linking extensions.
+ if self.distribution.has_c_libraries():
+ build_clib = self.get_finalized_command('build_clib')
+ self.libraries.extend(build_clib.get_library_names() or [])
+ self.library_dirs.append(build_clib.build_clib)
+
+ # Setup the CCompiler object that we'll use to do all the
+ # compiling and linking
+ self.compiler = new_compiler(compiler=self.compiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
+ customize_compiler(self.compiler)
+ # If we are cross-compiling, init the compiler now (if we are not
+ # cross-compiling, init would not hurt, but people may rely on
+ # late initialization of compiler even if they shouldn't...)
+ if os.name == 'nt' and self.plat_name != get_platform():
+ self.compiler.initialize(self.plat_name)
+
+ # And make sure that any compile/link-related options (which might
+ # come from the command-line or from the setup script) are set in
+ # that CCompiler object -- that way, they automatically apply to
+ # all compiling and linking done here.
+ if self.include_dirs is not None:
+ self.compiler.set_include_dirs(self.include_dirs)
+ if self.define is not None:
+ # 'define' option is a list of (name,value) tuples
+ for (name, value) in self.define:
+ self.compiler.define_macro(name, value)
+ if self.undef is not None:
+ for macro in self.undef:
+ self.compiler.undefine_macro(macro)
+ if self.libraries is not None:
+ self.compiler.set_libraries(self.libraries)
+ if self.library_dirs is not None:
+ self.compiler.set_library_dirs(self.library_dirs)
+ if self.rpath is not None:
+ self.compiler.set_runtime_library_dirs(self.rpath)
+ if self.link_objects is not None:
+ self.compiler.set_link_objects(self.link_objects)
+
+ # Now actually compile and link everything.
+ self.build_extensions()
+
+ def check_extensions_list(self, extensions):
+ """Ensure that the list of extensions (presumably provided as a
+ command option 'extensions') is valid, i.e. it is a list of
+ Extension objects. We also support the old-style list of 2-tuples,
+ where the tuples are (ext_name, build_info), which are converted to
+ Extension instances here.
+
+ Raise DistutilsSetupError if the structure is invalid anywhere;
+ just returns otherwise.
+ """
+ if not isinstance(extensions, list):
+ raise DistutilsSetupError(
+ "'ext_modules' option must be a list of Extension instances")
+
+ for i, ext in enumerate(extensions):
+ if isinstance(ext, Extension):
+ continue # OK! (assume type-checking done
+ # by Extension constructor)
+
+ if not isinstance(ext, tuple) or len(ext) != 2:
+ raise DistutilsSetupError(
+ "each element of 'ext_modules' option must be an "
+ "Extension instance or 2-tuple")
+
+ ext_name, build_info = ext
+
+ log.warn("old-style (ext_name, build_info) tuple found in "
+ "ext_modules for extension '%s' "
+ "-- please convert to Extension instance", ext_name)
+
+ if not (isinstance(ext_name, str) and
+ extension_name_re.match(ext_name)):
+ raise DistutilsSetupError(
+ "first element of each tuple in 'ext_modules' "
+ "must be the extension name (a string)")
+
+ if not isinstance(build_info, dict):
+ raise DistutilsSetupError(
+ "second element of each tuple in 'ext_modules' "
+ "must be a dictionary (build info)")
+
+ # OK, the (ext_name, build_info) dict is type-safe: convert it
+ # to an Extension instance.
+ ext = Extension(ext_name, build_info['sources'])
+
+ # Easy stuff: one-to-one mapping from dict elements to
+ # instance attributes.
+ for key in ('include_dirs', 'library_dirs', 'libraries',
+ 'extra_objects', 'extra_compile_args',
+ 'extra_link_args'):
+ val = build_info.get(key)
+ if val is not None:
+ setattr(ext, key, val)
+
+ # Medium-easy stuff: same syntax/semantics, different names.
+ ext.runtime_library_dirs = build_info.get('rpath')
+ if 'def_file' in build_info:
+ log.warn("'def_file' element of build info dict "
+ "no longer supported")
+
+ # Non-trivial stuff: 'macros' split into 'define_macros'
+ # and 'undef_macros'.
+ macros = build_info.get('macros')
+ if macros:
+ ext.define_macros = []
+ ext.undef_macros = []
+ for macro in macros:
+ if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
+ raise DistutilsSetupError(
+ "'macros' element of build info dict "
+ "must be 1- or 2-tuple")
+ if len(macro) == 1:
+ ext.undef_macros.append(macro[0])
+ elif len(macro) == 2:
+ ext.define_macros.append(macro)
+
+ extensions[i] = ext
+
+ def get_source_files(self):
+ self.check_extensions_list(self.extensions)
+ filenames = []
+
+ # Wouldn't it be neat if we knew the names of header files too...
+ for ext in self.extensions:
+ filenames.extend(ext.sources)
+ return filenames
+
+ def get_outputs(self):
+ # Sanity check the 'extensions' list -- can't assume this is being
+ # done in the same run as a 'build_extensions()' call (in fact, we
+ # can probably assume that it *isn't*!).
+ self.check_extensions_list(self.extensions)
+
+ # And build the list of output (built) filenames. Note that this
+ # ignores the 'inplace' flag, and assumes everything goes in the
+ # "build" tree.
+ outputs = []
+ for ext in self.extensions:
+ outputs.append(self.get_ext_fullpath(ext.name))
+ return outputs
+
+ def build_extensions(self):
+ # First, sanity-check the 'extensions' list
+ self.check_extensions_list(self.extensions)
+ if self.parallel:
+ self._build_extensions_parallel()
+ else:
+ self._build_extensions_serial()
+
+ def _build_extensions_parallel(self):
+ workers = self.parallel
+ if self.parallel is True:
+ workers = os.cpu_count() # may return None
+ try:
+ from concurrent.futures import ThreadPoolExecutor
+ except ImportError:
+ workers = None
+
+ if workers is None:
+ self._build_extensions_serial()
+ return
+
+ with ThreadPoolExecutor(max_workers=workers) as executor:
+ futures = [executor.submit(self.build_extension, ext)
+ for ext in self.extensions]
+ for ext, fut in zip(self.extensions, futures):
+ with self._filter_build_errors(ext):
+ fut.result()
+
+ def _build_extensions_serial(self):
+ for ext in self.extensions:
+ with self._filter_build_errors(ext):
+ self.build_extension(ext)
+
+ @contextlib.contextmanager
+ def _filter_build_errors(self, ext):
+ try:
+ yield
+ except (CCompilerError, DistutilsError, CompileError) as e:
+ if not ext.optional:
+ raise
+ self.warn('building extension "%s" failed: %s' %
+ (ext.name, e))
+
+ def build_extension(self, ext):
+ sources = ext.sources
+ if sources is None or not isinstance(sources, (list, tuple)):
+ raise DistutilsSetupError(
+ "in 'ext_modules' option (extension '%s'), "
+ "'sources' must be present and must be "
+ "a list of source filenames" % ext.name)
+ sources = list(sources)
+
+ ext_path = self.get_ext_fullpath(ext.name)
+ depends = sources + ext.depends
+ if not (self.force or newer_group(depends, ext_path, 'newer')):
+ log.debug("skipping '%s' extension (up-to-date)", ext.name)
+ return
+ else:
+ log.info("building '%s' extension", ext.name)
+
+ # First, scan the sources for SWIG definition files (.i), run
+ # SWIG on 'em to create .c files, and modify the sources list
+ # accordingly.
+ sources = self.swig_sources(sources, ext)
+
+ # Next, compile the source code to object files.
+
+ # XXX not honouring 'define_macros' or 'undef_macros' -- the
+ # CCompiler API needs to change to accommodate this, and I
+ # want to do one thing at a time!
+
+ # Two possible sources for extra compiler arguments:
+ # - 'extra_compile_args' in Extension object
+ # - CFLAGS environment variable (not particularly
+ # elegant, but people seem to expect it and I
+ # guess it's useful)
+ # The environment variable should take precedence, and
+ # any sensible compiler will give precedence to later
+ # command line args. Hence we combine them in order:
+ extra_args = ext.extra_compile_args or []
+
+ macros = ext.define_macros[:]
+ for undef in ext.undef_macros:
+ macros.append((undef,))
+
+ objects = self.compiler.compile(sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=ext.include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args,
+ depends=ext.depends)
+
+ # XXX outdated variable, kept here in case third-part code
+ # needs it.
+ self._built_objects = objects[:]
+
+ # Now link the object files together into a "shared object" --
+ # of course, first we have to figure out all the other things
+ # that go into the mix.
+ if ext.extra_objects:
+ objects.extend(ext.extra_objects)
+ extra_args = ext.extra_link_args or []
+
+ # Detect target language, if not provided
+ language = ext.language or self.compiler.detect_language(sources)
+
+ self.compiler.link_shared_object(
+ objects, ext_path,
+ libraries=self.get_libraries(ext),
+ library_dirs=ext.library_dirs,
+ runtime_library_dirs=ext.runtime_library_dirs,
+ extra_postargs=extra_args,
+ export_symbols=self.get_export_symbols(ext),
+ debug=self.debug,
+ build_temp=self.build_temp,
+ target_lang=language)
+
+ def swig_sources(self, sources, extension):
+ """Walk the list of source files in 'sources', looking for SWIG
+ interface (.i) files. Run SWIG on all that are found, and
+ return a modified 'sources' list with SWIG source files replaced
+ by the generated C (or C++) files.
+ """
+ new_sources = []
+ swig_sources = []
+ swig_targets = {}
+
+ # XXX this drops generated C/C++ files into the source tree, which
+ # is fine for developers who want to distribute the generated
+ # source -- but there should be an option to put SWIG output in
+ # the temp dir.
+
+ if self.swig_cpp:
+ log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
+
+ if self.swig_cpp or ('-c++' in self.swig_opts) or \
+ ('-c++' in extension.swig_opts):
+ target_ext = '.cpp'
+ else:
+ target_ext = '.c'
+
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == ".i": # SWIG interface file
+ new_sources.append(base + '_wrap' + target_ext)
+ swig_sources.append(source)
+ swig_targets[source] = new_sources[-1]
+ else:
+ new_sources.append(source)
+
+ if not swig_sources:
+ return new_sources
+
+ swig = self.swig or self.find_swig()
+ swig_cmd = [swig, "-python"]
+ swig_cmd.extend(self.swig_opts)
+ if self.swig_cpp:
+ swig_cmd.append("-c++")
+
+ # Do not override commandline arguments
+ if not self.swig_opts:
+ for o in extension.swig_opts:
+ swig_cmd.append(o)
+
+ for source in swig_sources:
+ target = swig_targets[source]
+ log.info("swigging %s to %s", source, target)
+ self.spawn(swig_cmd + ["-o", target, source])
+
+ return new_sources
+
+ def find_swig(self):
+ """Return the name of the SWIG executable. On Unix, this is
+ just "swig" -- it should be in the PATH. Tries a bit harder on
+ Windows.
+ """
+ if os.name == "posix":
+ return "swig"
+ elif os.name == "nt":
+ # Look for SWIG in its standard installation directory on
+ # Windows (or so I presume!). If we find it there, great;
+ # if not, act like Unix and assume it's in the PATH.
+ for vers in ("1.3", "1.2", "1.1"):
+ fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
+ if os.path.isfile(fn):
+ return fn
+ else:
+ return "swig.exe"
+ else:
+ raise DistutilsPlatformError(
+ "I don't know how to find (much less run) SWIG "
+ "on platform '%s'" % os.name)
+
+ # -- Name generators -----------------------------------------------
+ # (extension names, filenames, whatever)
+ def get_ext_fullpath(self, ext_name):
+ """Returns the path of the filename for a given extension.
+
+ The file is located in `build_lib` or directly in the package
+ (inplace option).
+ """
+ fullname = self.get_ext_fullname(ext_name)
+ modpath = fullname.split('.')
+ filename = self.get_ext_filename(modpath[-1])
+
+ if not self.inplace:
+ # no further work needed
+ # returning :
+ # build_dir/package/path/filename
+ filename = os.path.join(*modpath[:-1]+[filename])
+ return os.path.join(self.build_lib, filename)
+
+ # the inplace option requires to find the package directory
+ # using the build_py command for that
+ package = '.'.join(modpath[0:-1])
+ build_py = self.get_finalized_command('build_py')
+ package_dir = os.path.abspath(build_py.get_package_dir(package))
+
+ # returning
+ # package_dir/filename
+ return os.path.join(package_dir, filename)
+
+ def get_ext_fullname(self, ext_name):
+ """Returns the fullname of a given extension name.
+
+ Adds the `package.` prefix"""
+ if self.package is None:
+ return ext_name
+ else:
+ return self.package + '.' + ext_name
+
+ def get_ext_filename(self, ext_name):
+ r"""Convert the name of an extension (eg. "foo.bar") into the name
+ of the file from which it will be loaded (eg. "foo/bar.so", or
+ "foo\bar.pyd").
+ """
+ from distutils.sysconfig import get_config_var
+ ext_path = ext_name.split('.')
+ ext_suffix = get_config_var('EXT_SUFFIX')
+ return os.path.join(*ext_path) + ext_suffix
+
+ def get_export_symbols(self, ext):
+ """Return the list of symbols that a shared extension has to
+ export. This either uses 'ext.export_symbols' or, if it's not
+ provided, "PyInit_" + module_name. Only relevant on Windows, where
+ the .pyd file (DLL) must export the module "PyInit_" function.
+ """
+ initfunc_name = "PyInit_" + ext.name.split('.')[-1]
+ if initfunc_name not in ext.export_symbols:
+ ext.export_symbols.append(initfunc_name)
+ return ext.export_symbols
+
+ def get_libraries(self, ext):
+ """Return the list of libraries to link against when building a
+ shared extension. On most platforms, this is just 'ext.libraries';
+ on Windows, we add the Python library (eg. python20.dll).
+ """
+ # The python library is always needed on Windows. For MSVC, this
+ # is redundant, since the library is mentioned in a pragma in
+ # pyconfig.h that MSVC groks. The other Windows compilers all seem
+ # to need it mentioned explicitly, though, so that's what we do.
+ # Append '_d' to the python import library on debug builds.
+ if sys.platform == "win32":
+ from distutils._msvccompiler import MSVCCompiler
+ if not isinstance(self.compiler, MSVCCompiler):
+ template = "python%d%d"
+ if self.debug:
+ template = template + '_d'
+ pythonlib = (template %
+ (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
+ # don't extend ext.libraries, it may be shared with other
+ # extensions, it is a reference to the original list
+ return ext.libraries + [pythonlib]
+ else:
+ return ext.libraries
+ elif sys.platform == 'darwin':
+ # Don't use the default code below
+ return ext.libraries
+ elif sys.platform[:3] == 'aix':
+ # Don't use the default code below
+ return ext.libraries
+ else:
+ from distutils import sysconfig
+ if sysconfig.get_config_var('Py_ENABLE_SHARED'):
+ pythonlib = 'python{}.{}{}'.format(
+ sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
+ sysconfig.get_config_var('ABIFLAGS'))
+ return ext.libraries + [pythonlib]
+ else:
+ return ext.libraries
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_py.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_py.py
new file mode 100644
index 00000000..cf0ca57c
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_py.py
@@ -0,0 +1,416 @@
+"""distutils.command.build_py
+
+Implements the Distutils 'build_py' command."""
+
+import os
+import importlib.util
+import sys
+from glob import glob
+
+from distutils.core import Command
+from distutils.errors import *
+from distutils.util import convert_path, Mixin2to3
+from distutils import log
+
+class build_py (Command):
+
+ description = "\"build\" pure Python modules (copy to build directory)"
+
+ user_options = [
+ ('build-lib=', 'd', "directory to \"build\" (copy) to"),
+ ('compile', 'c', "compile .py to .pyc"),
+ ('no-compile', None, "don't compile .py files [default]"),
+ ('optimize=', 'O',
+ "also compile with optimization: -O1 for \"python -O\", "
+ "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+ ('force', 'f', "forcibly build everything (ignore file timestamps)"),
+ ]
+
+ boolean_options = ['compile', 'force']
+ negative_opt = {'no-compile' : 'compile'}
+
+ def initialize_options(self):
+ self.build_lib = None
+ self.py_modules = None
+ self.package = None
+ self.package_data = None
+ self.package_dir = None
+ self.compile = 0
+ self.optimize = 0
+ self.force = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build',
+ ('build_lib', 'build_lib'),
+ ('force', 'force'))
+
+ # Get the distribution options that are aliases for build_py
+ # options -- list of packages and list of modules.
+ self.packages = self.distribution.packages
+ self.py_modules = self.distribution.py_modules
+ self.package_data = self.distribution.package_data
+ self.package_dir = {}
+ if self.distribution.package_dir:
+ for name, path in self.distribution.package_dir.items():
+ self.package_dir[name] = convert_path(path)
+ self.data_files = self.get_data_files()
+
+ # Ick, copied straight from install_lib.py (fancy_getopt needs a
+ # type system! Hell, *everything* needs a type system!!!)
+ if not isinstance(self.optimize, int):
+ try:
+ self.optimize = int(self.optimize)
+ assert 0 <= self.optimize <= 2
+ except (ValueError, AssertionError):
+ raise DistutilsOptionError("optimize must be 0, 1, or 2")
+
+ def run(self):
+ # XXX copy_file by default preserves atime and mtime. IMHO this is
+ # the right thing to do, but perhaps it should be an option -- in
+ # particular, a site administrator might want installed files to
+ # reflect the time of installation rather than the last
+ # modification time before the installed release.
+
+ # XXX copy_file by default preserves mode, which appears to be the
+ # wrong thing to do: if a file is read-only in the working
+ # directory, we want it to be installed read/write so that the next
+ # installation of the same module distribution can overwrite it
+ # without problems. (This might be a Unix-specific issue.) Thus
+ # we turn off 'preserve_mode' when copying to the build directory,
+ # since the build directory is supposed to be exactly what the
+ # installation will look like (ie. we preserve mode when
+ # installing).
+
+ # Two options control which modules will be installed: 'packages'
+ # and 'py_modules'. The former lets us work with whole packages, not
+ # specifying individual modules at all; the latter is for
+ # specifying modules one-at-a-time.
+
+ if self.py_modules:
+ self.build_modules()
+ if self.packages:
+ self.build_packages()
+ self.build_package_data()
+
+ self.byte_compile(self.get_outputs(include_bytecode=0))
+
+ def get_data_files(self):
+ """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
+ data = []
+ if not self.packages:
+ return data
+ for package in self.packages:
+ # Locate package source directory
+ src_dir = self.get_package_dir(package)
+
+ # Compute package build directory
+ build_dir = os.path.join(*([self.build_lib] + package.split('.')))
+
+ # Length of path to strip from found files
+ plen = 0
+ if src_dir:
+ plen = len(src_dir)+1
+
+ # Strip directory from globbed filenames
+ filenames = [
+ file[plen:] for file in self.find_data_files(package, src_dir)
+ ]
+ data.append((package, src_dir, build_dir, filenames))
+ return data
+
+ def find_data_files(self, package, src_dir):
+ """Return filenames for package's data files in 'src_dir'"""
+ globs = (self.package_data.get('', [])
+ + self.package_data.get(package, []))
+ files = []
+ for pattern in globs:
+ # Each pattern has to be converted to a platform-specific path
+ filelist = glob(os.path.join(src_dir, convert_path(pattern)))
+ # Files that match more than one pattern are only added once
+ files.extend([fn for fn in filelist if fn not in files
+ and os.path.isfile(fn)])
+ return files
+
+ def build_package_data(self):
+ """Copy data files into build directory"""
+ lastdir = None
+ for package, src_dir, build_dir, filenames in self.data_files:
+ for filename in filenames:
+ target = os.path.join(build_dir, filename)
+ self.mkpath(os.path.dirname(target))
+ self.copy_file(os.path.join(src_dir, filename), target,
+ preserve_mode=False)
+
+ def get_package_dir(self, package):
+ """Return the directory, relative to the top of the source
+ distribution, where package 'package' should be found
+ (at least according to the 'package_dir' option, if any)."""
+ path = package.split('.')
+
+ if not self.package_dir:
+ if path:
+ return os.path.join(*path)
+ else:
+ return ''
+ else:
+ tail = []
+ while path:
+ try:
+ pdir = self.package_dir['.'.join(path)]
+ except KeyError:
+ tail.insert(0, path[-1])
+ del path[-1]
+ else:
+ tail.insert(0, pdir)
+ return os.path.join(*tail)
+ else:
+ # Oops, got all the way through 'path' without finding a
+ # match in package_dir. If package_dir defines a directory
+ # for the root (nameless) package, then fallback on it;
+ # otherwise, we might as well have not consulted
+ # package_dir at all, as we just use the directory implied
+ # by 'tail' (which should be the same as the original value
+ # of 'path' at this point).
+ pdir = self.package_dir.get('')
+ if pdir is not None:
+ tail.insert(0, pdir)
+
+ if tail:
+ return os.path.join(*tail)
+ else:
+ return ''
+
+ def check_package(self, package, package_dir):
+ # Empty dir name means current directory, which we can probably
+ # assume exists. Also, os.path.exists and isdir don't know about
+ # my "empty string means current dir" convention, so we have to
+ # circumvent them.
+ if package_dir != "":
+ if not os.path.exists(package_dir):
+ raise DistutilsFileError(
+ "package directory '%s' does not exist" % package_dir)
+ if not os.path.isdir(package_dir):
+ raise DistutilsFileError(
+ "supposed package directory '%s' exists, "
+ "but is not a directory" % package_dir)
+
+ # Require __init__.py for all but the "root package"
+ if package:
+ init_py = os.path.join(package_dir, "__init__.py")
+ if os.path.isfile(init_py):
+ return init_py
+ else:
+ log.warn(("package init file '%s' not found " +
+ "(or not a regular file)"), init_py)
+
+ # Either not in a package at all (__init__.py not expected), or
+ # __init__.py doesn't exist -- so don't return the filename.
+ return None
+
+ def check_module(self, module, module_file):
+ if not os.path.isfile(module_file):
+ log.warn("file %s (for module %s) not found", module_file, module)
+ return False
+ else:
+ return True
+
+ def find_package_modules(self, package, package_dir):
+ self.check_package(package, package_dir)
+ module_files = glob(os.path.join(package_dir, "*.py"))
+ modules = []
+ setup_script = os.path.abspath(self.distribution.script_name)
+
+ for f in module_files:
+ abs_f = os.path.abspath(f)
+ if abs_f != setup_script:
+ module = os.path.splitext(os.path.basename(f))[0]
+ modules.append((package, module, f))
+ else:
+ self.debug_print("excluding %s" % setup_script)
+ return modules
+
+ def find_modules(self):
+ """Finds individually-specified Python modules, ie. those listed by
+ module name in 'self.py_modules'. Returns a list of tuples (package,
+ module_base, filename): 'package' is a tuple of the path through
+ package-space to the module; 'module_base' is the bare (no
+ packages, no dots) module name, and 'filename' is the path to the
+ ".py" file (relative to the distribution root) that implements the
+ module.
+ """
+ # Map package names to tuples of useful info about the package:
+ # (package_dir, checked)
+ # package_dir - the directory where we'll find source files for
+ # this package
+ # checked - true if we have checked that the package directory
+ # is valid (exists, contains __init__.py, ... ?)
+ packages = {}
+
+ # List of (package, module, filename) tuples to return
+ modules = []
+
+ # We treat modules-in-packages almost the same as toplevel modules,
+ # just the "package" for a toplevel is empty (either an empty
+ # string or empty list, depending on context). Differences:
+ # - don't check for __init__.py in directory for empty package
+ for module in self.py_modules:
+ path = module.split('.')
+ package = '.'.join(path[0:-1])
+ module_base = path[-1]
+
+ try:
+ (package_dir, checked) = packages[package]
+ except KeyError:
+ package_dir = self.get_package_dir(package)
+ checked = 0
+
+ if not checked:
+ init_py = self.check_package(package, package_dir)
+ packages[package] = (package_dir, 1)
+ if init_py:
+ modules.append((package, "__init__", init_py))
+
+ # XXX perhaps we should also check for just .pyc files
+ # (so greedy closed-source bastards can distribute Python
+ # modules too)
+ module_file = os.path.join(package_dir, module_base + ".py")
+ if not self.check_module(module, module_file):
+ continue
+
+ modules.append((package, module_base, module_file))
+
+ return modules
+
+ def find_all_modules(self):
+ """Compute the list of all modules that will be built, whether
+ they are specified one-module-at-a-time ('self.py_modules') or
+ by whole packages ('self.packages'). Return a list of tuples
+ (package, module, module_file), just like 'find_modules()' and
+ 'find_package_modules()' do."""
+ modules = []
+ if self.py_modules:
+ modules.extend(self.find_modules())
+ if self.packages:
+ for package in self.packages:
+ package_dir = self.get_package_dir(package)
+ m = self.find_package_modules(package, package_dir)
+ modules.extend(m)
+ return modules
+
+ def get_source_files(self):
+ return [module[-1] for module in self.find_all_modules()]
+
+ def get_module_outfile(self, build_dir, package, module):
+ outfile_path = [build_dir] + list(package) + [module + ".py"]
+ return os.path.join(*outfile_path)
+
+ def get_outputs(self, include_bytecode=1):
+ modules = self.find_all_modules()
+ outputs = []
+ for (package, module, module_file) in modules:
+ package = package.split('.')
+ filename = self.get_module_outfile(self.build_lib, package, module)
+ outputs.append(filename)
+ if include_bytecode:
+ if self.compile:
+ outputs.append(importlib.util.cache_from_source(
+ filename, optimization=''))
+ if self.optimize > 0:
+ outputs.append(importlib.util.cache_from_source(
+ filename, optimization=self.optimize))
+
+ outputs += [
+ os.path.join(build_dir, filename)
+ for package, src_dir, build_dir, filenames in self.data_files
+ for filename in filenames
+ ]
+
+ return outputs
+
+ def build_module(self, module, module_file, package):
+ if isinstance(package, str):
+ package = package.split('.')
+ elif not isinstance(package, (list, tuple)):
+ raise TypeError(
+ "'package' must be a string (dot-separated), list, or tuple")
+
+ # Now put the module source file into the "build" area -- this is
+ # easy, we just copy it somewhere under self.build_lib (the build
+ # directory for Python source).
+ outfile = self.get_module_outfile(self.build_lib, package, module)
+ dir = os.path.dirname(outfile)
+ self.mkpath(dir)
+ return self.copy_file(module_file, outfile, preserve_mode=0)
+
+ def build_modules(self):
+ modules = self.find_modules()
+ for (package, module, module_file) in modules:
+ # Now "build" the module -- ie. copy the source file to
+ # self.build_lib (the build directory for Python source).
+ # (Actually, it gets copied to the directory for this package
+ # under self.build_lib.)
+ self.build_module(module, module_file, package)
+
+ def build_packages(self):
+ for package in self.packages:
+ # Get list of (package, module, module_file) tuples based on
+ # scanning the package directory. 'package' is only included
+ # in the tuple so that 'find_modules()' and
+ # 'find_package_tuples()' have a consistent interface; it's
+ # ignored here (apart from a sanity check). Also, 'module' is
+ # the *unqualified* module name (ie. no dots, no package -- we
+ # already know its package!), and 'module_file' is the path to
+ # the .py file, relative to the current directory
+ # (ie. including 'package_dir').
+ package_dir = self.get_package_dir(package)
+ modules = self.find_package_modules(package, package_dir)
+
+ # Now loop over the modules we found, "building" each one (just
+ # copy it to self.build_lib).
+ for (package_, module, module_file) in modules:
+ assert package == package_
+ self.build_module(module, module_file, package)
+
+ def byte_compile(self, files):
+ if sys.dont_write_bytecode:
+ self.warn('byte-compiling is disabled, skipping.')
+ return
+
+ from distutils.util import byte_compile
+ prefix = self.build_lib
+ if prefix[-1] != os.sep:
+ prefix = prefix + os.sep
+
+ # XXX this code is essentially the same as the 'byte_compile()
+ # method of the "install_lib" command, except for the determination
+ # of the 'prefix' string. Hmmm.
+ if self.compile:
+ byte_compile(files, optimize=0,
+ force=self.force, prefix=prefix, dry_run=self.dry_run)
+ if self.optimize > 0:
+ byte_compile(files, optimize=self.optimize,
+ force=self.force, prefix=prefix, dry_run=self.dry_run)
+
+class build_py_2to3(build_py, Mixin2to3):
+ def run(self):
+ self.updated_files = []
+
+ # Base class code
+ if self.py_modules:
+ self.build_modules()
+ if self.packages:
+ self.build_packages()
+ self.build_package_data()
+
+ # 2to3
+ self.run_2to3(self.updated_files)
+
+ # Remaining base class code
+ self.byte_compile(self.get_outputs(include_bytecode=0))
+
+ def build_module(self, module, module_file, package):
+ res = build_py.build_module(self, module, module_file, package)
+ if res[1]:
+ # file was copied
+ self.updated_files.append(res[0])
+ return res
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_scripts.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_scripts.py
new file mode 100644
index 00000000..ccc70e64
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/build_scripts.py
@@ -0,0 +1,160 @@
+"""distutils.command.build_scripts
+
+Implements the Distutils 'build_scripts' command."""
+
+import os, re
+from stat import ST_MODE
+from distutils import sysconfig
+from distutils.core import Command
+from distutils.dep_util import newer
+from distutils.util import convert_path, Mixin2to3
+from distutils import log
+import tokenize
+
+# check if Python is called on the first line with this expression
+first_line_re = re.compile(b'^#!.*python[0-9.]*([ \t].*)?$')
+
+class build_scripts(Command):
+
+ description = "\"build\" scripts (copy and fixup #! line)"
+
+ user_options = [
+ ('build-dir=', 'd', "directory to \"build\" (copy) to"),
+ ('force', 'f', "forcibly build everything (ignore file timestamps"),
+ ('executable=', 'e', "specify final destination interpreter path"),
+ ]
+
+ boolean_options = ['force']
+
+
+ def initialize_options(self):
+ self.build_dir = None
+ self.scripts = None
+ self.force = None
+ self.executable = None
+ self.outfiles = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build',
+ ('build_scripts', 'build_dir'),
+ ('force', 'force'),
+ ('executable', 'executable'))
+ self.scripts = self.distribution.scripts
+
+ def get_source_files(self):
+ return self.scripts
+
+ def run(self):
+ if not self.scripts:
+ return
+ self.copy_scripts()
+
+
+ def copy_scripts(self):
+ r"""Copy each script listed in 'self.scripts'; if it's marked as a
+ Python script in the Unix way (first line matches 'first_line_re',
+ ie. starts with "\#!" and contains "python"), then adjust the first
+ line to refer to the current Python interpreter as we copy.
+ """
+ self.mkpath(self.build_dir)
+ outfiles = []
+ updated_files = []
+ for script in self.scripts:
+ adjust = False
+ script = convert_path(script)
+ outfile = os.path.join(self.build_dir, os.path.basename(script))
+ outfiles.append(outfile)
+
+ if not self.force and not newer(script, outfile):
+ log.debug("not copying %s (up-to-date)", script)
+ continue
+
+ # Always open the file, but ignore failures in dry-run mode --
+ # that way, we'll get accurate feedback if we can read the
+ # script.
+ try:
+ f = open(script, "rb")
+ except OSError:
+ if not self.dry_run:
+ raise
+ f = None
+ else:
+ encoding, lines = tokenize.detect_encoding(f.readline)
+ f.seek(0)
+ first_line = f.readline()
+ if not first_line:
+ self.warn("%s is an empty file (skipping)" % script)
+ continue
+
+ match = first_line_re.match(first_line)
+ if match:
+ adjust = True
+ post_interp = match.group(1) or b''
+
+ if adjust:
+ log.info("copying and adjusting %s -> %s", script,
+ self.build_dir)
+ updated_files.append(outfile)
+ if not self.dry_run:
+ if not sysconfig.python_build:
+ executable = self.executable
+ else:
+ executable = os.path.join(
+ sysconfig.get_config_var("BINDIR"),
+ "python%s%s" % (sysconfig.get_config_var("VERSION"),
+ sysconfig.get_config_var("EXE")))
+ executable = os.fsencode(executable)
+ shebang = b"#!" + executable + post_interp + b"\n"
+ # Python parser starts to read a script using UTF-8 until
+ # it gets a #coding:xxx cookie. The shebang has to be the
+ # first line of a file, the #coding:xxx cookie cannot be
+ # written before. So the shebang has to be decodable from
+ # UTF-8.
+ try:
+ shebang.decode('utf-8')
+ except UnicodeDecodeError:
+ raise ValueError(
+ "The shebang ({!r}) is not decodable "
+ "from utf-8".format(shebang))
+ # If the script is encoded to a custom encoding (use a
+ # #coding:xxx cookie), the shebang has to be decodable from
+ # the script encoding too.
+ try:
+ shebang.decode(encoding)
+ except UnicodeDecodeError:
+ raise ValueError(
+ "The shebang ({!r}) is not decodable "
+ "from the script encoding ({})"
+ .format(shebang, encoding))
+ with open(outfile, "wb") as outf:
+ outf.write(shebang)
+ outf.writelines(f.readlines())
+ if f:
+ f.close()
+ else:
+ if f:
+ f.close()
+ updated_files.append(outfile)
+ self.copy_file(script, outfile)
+
+ if os.name == 'posix':
+ for file in outfiles:
+ if self.dry_run:
+ log.info("changing mode of %s", file)
+ else:
+ oldmode = os.stat(file)[ST_MODE] & 0o7777
+ newmode = (oldmode | 0o555) & 0o7777
+ if newmode != oldmode:
+ log.info("changing mode of %s from %o to %o",
+ file, oldmode, newmode)
+ os.chmod(file, newmode)
+ # XXX should we modify self.outfiles?
+ return outfiles, updated_files
+
+class build_scripts_2to3(build_scripts, Mixin2to3):
+
+ def copy_scripts(self):
+ outfiles, updated_files = build_scripts.copy_scripts(self)
+ if not self.dry_run:
+ self.run_2to3(updated_files)
+ return outfiles, updated_files
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/check.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/check.py
new file mode 100644
index 00000000..7ebe707c
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/check.py
@@ -0,0 +1,145 @@
+"""distutils.command.check
+
+Implements the Distutils 'check' command.
+"""
+from distutils.core import Command
+from distutils.errors import DistutilsSetupError
+
+try:
+ # docutils is installed
+ from docutils.utils import Reporter
+ from docutils.parsers.rst import Parser
+ from docutils import frontend
+ from docutils import nodes
+ from io import StringIO
+
+ class SilentReporter(Reporter):
+
+ def __init__(self, source, report_level, halt_level, stream=None,
+ debug=0, encoding='ascii', error_handler='replace'):
+ self.messages = []
+ Reporter.__init__(self, source, report_level, halt_level, stream,
+ debug, encoding, error_handler)
+
+ def system_message(self, level, message, *children, **kwargs):
+ self.messages.append((level, message, children, kwargs))
+ return nodes.system_message(message, level=level,
+ type=self.levels[level],
+ *children, **kwargs)
+
+ HAS_DOCUTILS = True
+except Exception:
+ # Catch all exceptions because exceptions besides ImportError probably
+ # indicate that docutils is not ported to Py3k.
+ HAS_DOCUTILS = False
+
+class check(Command):
+ """This command checks the meta-data of the package.
+ """
+ description = ("perform some checks on the package")
+ user_options = [('metadata', 'm', 'Verify meta-data'),
+ ('restructuredtext', 'r',
+ ('Checks if long string meta-data syntax '
+ 'are reStructuredText-compliant')),
+ ('strict', 's',
+ 'Will exit with an error if a check fails')]
+
+ boolean_options = ['metadata', 'restructuredtext', 'strict']
+
+ def initialize_options(self):
+ """Sets default values for options."""
+ self.restructuredtext = 0
+ self.metadata = 1
+ self.strict = 0
+ self._warnings = 0
+
+ def finalize_options(self):
+ pass
+
+ def warn(self, msg):
+ """Counts the number of warnings that occurs."""
+ self._warnings += 1
+ return Command.warn(self, msg)
+
+ def run(self):
+ """Runs the command."""
+ # perform the various tests
+ if self.metadata:
+ self.check_metadata()
+ if self.restructuredtext:
+ if HAS_DOCUTILS:
+ self.check_restructuredtext()
+ elif self.strict:
+ raise DistutilsSetupError('The docutils package is needed.')
+
+ # let's raise an error in strict mode, if we have at least
+ # one warning
+ if self.strict and self._warnings > 0:
+ raise DistutilsSetupError('Please correct your package.')
+
+ def check_metadata(self):
+ """Ensures that all required elements of meta-data are supplied.
+
+ name, version, URL, (author and author_email) or
+ (maintainer and maintainer_email)).
+
+ Warns if any are missing.
+ """
+ metadata = self.distribution.metadata
+
+ missing = []
+ for attr in ('name', 'version', 'url'):
+ if not (hasattr(metadata, attr) and getattr(metadata, attr)):
+ missing.append(attr)
+
+ if missing:
+ self.warn("missing required meta-data: %s" % ', '.join(missing))
+ if metadata.author:
+ if not metadata.author_email:
+ self.warn("missing meta-data: if 'author' supplied, " +
+ "'author_email' must be supplied too")
+ elif metadata.maintainer:
+ if not metadata.maintainer_email:
+ self.warn("missing meta-data: if 'maintainer' supplied, " +
+ "'maintainer_email' must be supplied too")
+ else:
+ self.warn("missing meta-data: either (author and author_email) " +
+ "or (maintainer and maintainer_email) " +
+ "must be supplied")
+
+ def check_restructuredtext(self):
+ """Checks if the long string fields are reST-compliant."""
+ data = self.distribution.get_long_description()
+ for warning in self._check_rst_data(data):
+ line = warning[-1].get('line')
+ if line is None:
+ warning = warning[1]
+ else:
+ warning = '%s (line %s)' % (warning[1], line)
+ self.warn(warning)
+
+ def _check_rst_data(self, data):
+ """Returns warnings when the provided data doesn't compile."""
+ source_path = StringIO()
+ parser = Parser()
+ settings = frontend.OptionParser(components=(Parser,)).get_default_values()
+ settings.tab_width = 4
+ settings.pep_references = None
+ settings.rfc_references = None
+ reporter = SilentReporter(source_path,
+ settings.report_level,
+ settings.halt_level,
+ stream=settings.warning_stream,
+ debug=settings.debug,
+ encoding=settings.error_encoding,
+ error_handler=settings.error_encoding_error_handler)
+
+ document = nodes.document(settings, reporter, source=source_path)
+ document.note_source(source_path, -1)
+ try:
+ parser.parse(data, document)
+ except AttributeError as e:
+ reporter.messages.append(
+ (-1, 'Could not finish the parsing: %s.' % e, '', {}))
+
+ return reporter.messages
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/clean.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/clean.py
new file mode 100644
index 00000000..0cb27016
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/clean.py
@@ -0,0 +1,76 @@
+"""distutils.command.clean
+
+Implements the Distutils 'clean' command."""
+
+# contributed by Bastian Kleineidam , added 2000-03-18
+
+import os
+from distutils.core import Command
+from distutils.dir_util import remove_tree
+from distutils import log
+
+class clean(Command):
+
+ description = "clean up temporary files from 'build' command"
+ user_options = [
+ ('build-base=', 'b',
+ "base build directory (default: 'build.build-base')"),
+ ('build-lib=', None,
+ "build directory for all modules (default: 'build.build-lib')"),
+ ('build-temp=', 't',
+ "temporary build directory (default: 'build.build-temp')"),
+ ('build-scripts=', None,
+ "build directory for scripts (default: 'build.build-scripts')"),
+ ('bdist-base=', None,
+ "temporary directory for built distributions"),
+ ('all', 'a',
+ "remove all build output, not just temporary by-products")
+ ]
+
+ boolean_options = ['all']
+
+ def initialize_options(self):
+ self.build_base = None
+ self.build_lib = None
+ self.build_temp = None
+ self.build_scripts = None
+ self.bdist_base = None
+ self.all = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build',
+ ('build_base', 'build_base'),
+ ('build_lib', 'build_lib'),
+ ('build_scripts', 'build_scripts'),
+ ('build_temp', 'build_temp'))
+ self.set_undefined_options('bdist',
+ ('bdist_base', 'bdist_base'))
+
+ def run(self):
+ # remove the build/temp. directory (unless it's already
+ # gone)
+ if os.path.exists(self.build_temp):
+ remove_tree(self.build_temp, dry_run=self.dry_run)
+ else:
+ log.debug("'%s' does not exist -- can't clean it",
+ self.build_temp)
+
+ if self.all:
+ # remove build directories
+ for directory in (self.build_lib,
+ self.bdist_base,
+ self.build_scripts):
+ if os.path.exists(directory):
+ remove_tree(directory, dry_run=self.dry_run)
+ else:
+ log.warn("'%s' does not exist -- can't clean it",
+ directory)
+
+ # just for the heck of it, try to remove the base build directory:
+ # we might have emptied it right now, but if not we don't care
+ if not self.dry_run:
+ try:
+ os.rmdir(self.build_base)
+ log.info("removing '%s'", self.build_base)
+ except OSError:
+ pass
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/command_template b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/command_template
new file mode 100644
index 00000000..6106819d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/command_template
@@ -0,0 +1,33 @@
+"""distutils.command.x
+
+Implements the Distutils 'x' command.
+"""
+
+# created 2000/mm/dd, John Doe
+
+__revision__ = "$Id$"
+
+from distutils.core import Command
+
+
+class x(Command):
+
+ # Brief (40-50 characters) description of the command
+ description = ""
+
+ # List of option tuples: long name, short name (None if no short
+ # name), and help string.
+ user_options = [('', '',
+ ""),
+ ]
+
+ def initialize_options(self):
+ self. = None
+ self. = None
+ self. = None
+
+ def finalize_options(self):
+ if self.x is None:
+ self.x =
+
+ def run(self):
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/config.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/config.py
new file mode 100644
index 00000000..4ae153d1
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/config.py
@@ -0,0 +1,347 @@
+"""distutils.command.config
+
+Implements the Distutils 'config' command, a (mostly) empty command class
+that exists mainly to be sub-classed by specific module distributions and
+applications. The idea is that while every "config" command is different,
+at least they're all named the same, and users always see "config" in the
+list of standard commands. Also, this is a good place to put common
+configure-like tasks: "try to compile this C code", or "figure out where
+this header file lives".
+"""
+
+import os, re
+
+from distutils.core import Command
+from distutils.errors import DistutilsExecError
+from distutils.sysconfig import customize_compiler
+from distutils import log
+
+LANG_EXT = {"c": ".c", "c++": ".cxx"}
+
+class config(Command):
+
+ description = "prepare to build"
+
+ user_options = [
+ ('compiler=', None,
+ "specify the compiler type"),
+ ('cc=', None,
+ "specify the compiler executable"),
+ ('include-dirs=', 'I',
+ "list of directories to search for header files"),
+ ('define=', 'D',
+ "C preprocessor macros to define"),
+ ('undef=', 'U',
+ "C preprocessor macros to undefine"),
+ ('libraries=', 'l',
+ "external C libraries to link with"),
+ ('library-dirs=', 'L',
+ "directories to search for external C libraries"),
+
+ ('noisy', None,
+ "show every action (compile, link, run, ...) taken"),
+ ('dump-source', None,
+ "dump generated source files before attempting to compile them"),
+ ]
+
+
+ # The three standard command methods: since the "config" command
+ # does nothing by default, these are empty.
+
+ def initialize_options(self):
+ self.compiler = None
+ self.cc = None
+ self.include_dirs = None
+ self.libraries = None
+ self.library_dirs = None
+
+ # maximal output for now
+ self.noisy = 1
+ self.dump_source = 1
+
+ # list of temporary files generated along-the-way that we have
+ # to clean at some point
+ self.temp_files = []
+
+ def finalize_options(self):
+ if self.include_dirs is None:
+ self.include_dirs = self.distribution.include_dirs or []
+ elif isinstance(self.include_dirs, str):
+ self.include_dirs = self.include_dirs.split(os.pathsep)
+
+ if self.libraries is None:
+ self.libraries = []
+ elif isinstance(self.libraries, str):
+ self.libraries = [self.libraries]
+
+ if self.library_dirs is None:
+ self.library_dirs = []
+ elif isinstance(self.library_dirs, str):
+ self.library_dirs = self.library_dirs.split(os.pathsep)
+
+ def run(self):
+ pass
+
+ # Utility methods for actual "config" commands. The interfaces are
+ # loosely based on Autoconf macros of similar names. Sub-classes
+ # may use these freely.
+
+ def _check_compiler(self):
+ """Check that 'self.compiler' really is a CCompiler object;
+ if not, make it one.
+ """
+ # We do this late, and only on-demand, because this is an expensive
+ # import.
+ from distutils.ccompiler import CCompiler, new_compiler
+ if not isinstance(self.compiler, CCompiler):
+ self.compiler = new_compiler(compiler=self.compiler,
+ dry_run=self.dry_run, force=1)
+ customize_compiler(self.compiler)
+ if self.include_dirs:
+ self.compiler.set_include_dirs(self.include_dirs)
+ if self.libraries:
+ self.compiler.set_libraries(self.libraries)
+ if self.library_dirs:
+ self.compiler.set_library_dirs(self.library_dirs)
+
+ def _gen_temp_sourcefile(self, body, headers, lang):
+ filename = "_configtest" + LANG_EXT[lang]
+ file = open(filename, "w")
+ if headers:
+ for header in headers:
+ file.write("#include <%s>\n" % header)
+ file.write("\n")
+ file.write(body)
+ if body[-1] != "\n":
+ file.write("\n")
+ file.close()
+ return filename
+
+ def _preprocess(self, body, headers, include_dirs, lang):
+ src = self._gen_temp_sourcefile(body, headers, lang)
+ out = "_configtest.i"
+ self.temp_files.extend([src, out])
+ self.compiler.preprocess(src, out, include_dirs=include_dirs)
+ return (src, out)
+
+ def _compile(self, body, headers, include_dirs, lang):
+ src = self._gen_temp_sourcefile(body, headers, lang)
+ if self.dump_source:
+ dump_file(src, "compiling '%s':" % src)
+ (obj,) = self.compiler.object_filenames([src])
+ self.temp_files.extend([src, obj])
+ self.compiler.compile([src], include_dirs=include_dirs)
+ return (src, obj)
+
+ def _link(self, body, headers, include_dirs, libraries, library_dirs,
+ lang):
+ (src, obj) = self._compile(body, headers, include_dirs, lang)
+ prog = os.path.splitext(os.path.basename(src))[0]
+ self.compiler.link_executable([obj], prog,
+ libraries=libraries,
+ library_dirs=library_dirs,
+ target_lang=lang)
+
+ if self.compiler.exe_extension is not None:
+ prog = prog + self.compiler.exe_extension
+ self.temp_files.append(prog)
+
+ return (src, obj, prog)
+
+ def _clean(self, *filenames):
+ if not filenames:
+ filenames = self.temp_files
+ self.temp_files = []
+ log.info("removing: %s", ' '.join(filenames))
+ for filename in filenames:
+ try:
+ os.remove(filename)
+ except OSError:
+ pass
+
+
+ # XXX these ignore the dry-run flag: what to do, what to do? even if
+ # you want a dry-run build, you still need some sort of configuration
+ # info. My inclination is to make it up to the real config command to
+ # consult 'dry_run', and assume a default (minimal) configuration if
+ # true. The problem with trying to do it here is that you'd have to
+ # return either true or false from all the 'try' methods, neither of
+ # which is correct.
+
+ # XXX need access to the header search path and maybe default macros.
+
+ def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
+ """Construct a source file from 'body' (a string containing lines
+ of C/C++ code) and 'headers' (a list of header files to include)
+ and run it through the preprocessor. Return true if the
+ preprocessor succeeded, false if there were any errors.
+ ('body' probably isn't of much use, but what the heck.)
+ """
+ from distutils.ccompiler import CompileError
+ self._check_compiler()
+ ok = True
+ try:
+ self._preprocess(body, headers, include_dirs, lang)
+ except CompileError:
+ ok = False
+
+ self._clean()
+ return ok
+
+ def search_cpp(self, pattern, body=None, headers=None, include_dirs=None,
+ lang="c"):
+ """Construct a source file (just like 'try_cpp()'), run it through
+ the preprocessor, and return true if any line of the output matches
+ 'pattern'. 'pattern' should either be a compiled regex object or a
+ string containing a regex. If both 'body' and 'headers' are None,
+ preprocesses an empty file -- which can be useful to determine the
+ symbols the preprocessor and compiler set by default.
+ """
+ self._check_compiler()
+ src, out = self._preprocess(body, headers, include_dirs, lang)
+
+ if isinstance(pattern, str):
+ pattern = re.compile(pattern)
+
+ file = open(out)
+ match = False
+ while True:
+ line = file.readline()
+ if line == '':
+ break
+ if pattern.search(line):
+ match = True
+ break
+
+ file.close()
+ self._clean()
+ return match
+
+ def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
+ """Try to compile a source file built from 'body' and 'headers'.
+ Return true on success, false otherwise.
+ """
+ from distutils.ccompiler import CompileError
+ self._check_compiler()
+ try:
+ self._compile(body, headers, include_dirs, lang)
+ ok = True
+ except CompileError:
+ ok = False
+
+ log.info(ok and "success!" or "failure.")
+ self._clean()
+ return ok
+
+ def try_link(self, body, headers=None, include_dirs=None, libraries=None,
+ library_dirs=None, lang="c"):
+ """Try to compile and link a source file, built from 'body' and
+ 'headers', to executable form. Return true on success, false
+ otherwise.
+ """
+ from distutils.ccompiler import CompileError, LinkError
+ self._check_compiler()
+ try:
+ self._link(body, headers, include_dirs,
+ libraries, library_dirs, lang)
+ ok = True
+ except (CompileError, LinkError):
+ ok = False
+
+ log.info(ok and "success!" or "failure.")
+ self._clean()
+ return ok
+
+ def try_run(self, body, headers=None, include_dirs=None, libraries=None,
+ library_dirs=None, lang="c"):
+ """Try to compile, link to an executable, and run a program
+ built from 'body' and 'headers'. Return true on success, false
+ otherwise.
+ """
+ from distutils.ccompiler import CompileError, LinkError
+ self._check_compiler()
+ try:
+ src, obj, exe = self._link(body, headers, include_dirs,
+ libraries, library_dirs, lang)
+ self.spawn([exe])
+ ok = True
+ except (CompileError, LinkError, DistutilsExecError):
+ ok = False
+
+ log.info(ok and "success!" or "failure.")
+ self._clean()
+ return ok
+
+
+ # -- High-level methods --------------------------------------------
+ # (these are the ones that are actually likely to be useful
+ # when implementing a real-world config command!)
+
+ def check_func(self, func, headers=None, include_dirs=None,
+ libraries=None, library_dirs=None, decl=0, call=0):
+ """Determine if function 'func' is available by constructing a
+ source file that refers to 'func', and compiles and links it.
+ If everything succeeds, returns true; otherwise returns false.
+
+ The constructed source file starts out by including the header
+ files listed in 'headers'. If 'decl' is true, it then declares
+ 'func' (as "int func()"); you probably shouldn't supply 'headers'
+ and set 'decl' true in the same call, or you might get errors about
+ a conflicting declarations for 'func'. Finally, the constructed
+ 'main()' function either references 'func' or (if 'call' is true)
+ calls it. 'libraries' and 'library_dirs' are used when
+ linking.
+ """
+ self._check_compiler()
+ body = []
+ if decl:
+ body.append("int %s ();" % func)
+ body.append("int main () {")
+ if call:
+ body.append(" %s();" % func)
+ else:
+ body.append(" %s;" % func)
+ body.append("}")
+ body = "\n".join(body) + "\n"
+
+ return self.try_link(body, headers, include_dirs,
+ libraries, library_dirs)
+
+ def check_lib(self, library, library_dirs=None, headers=None,
+ include_dirs=None, other_libraries=[]):
+ """Determine if 'library' is available to be linked against,
+ without actually checking that any particular symbols are provided
+ by it. 'headers' will be used in constructing the source file to
+ be compiled, but the only effect of this is to check if all the
+ header files listed are available. Any libraries listed in
+ 'other_libraries' will be included in the link, in case 'library'
+ has symbols that depend on other libraries.
+ """
+ self._check_compiler()
+ return self.try_link("int main (void) { }", headers, include_dirs,
+ [library] + other_libraries, library_dirs)
+
+ def check_header(self, header, include_dirs=None, library_dirs=None,
+ lang="c"):
+ """Determine if the system header file named by 'header_file'
+ exists and can be found by the preprocessor; return true if so,
+ false otherwise.
+ """
+ return self.try_cpp(body="/* No body */", headers=[header],
+ include_dirs=include_dirs)
+
+
+def dump_file(filename, head=None):
+ """Dumps a file content into log.info.
+
+ If head is not None, will be dumped before the file content.
+ """
+ if head is None:
+ log.info('%s', filename)
+ else:
+ log.info(head)
+ file = open(filename)
+ try:
+ log.info(file.read())
+ finally:
+ file.close()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install.py
new file mode 100644
index 00000000..0258d3de
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install.py
@@ -0,0 +1,656 @@
+"""distutils.command.install
+
+Implements the Distutils 'install' command."""
+
+import sys
+import os
+
+from distutils import log
+from distutils.core import Command
+from distutils.debug import DEBUG
+from distutils.sysconfig import get_config_vars
+from distutils.errors import DistutilsPlatformError
+from distutils.file_util import write_file
+from distutils.util import convert_path, subst_vars, change_root
+from distutils.util import get_platform
+from distutils.errors import DistutilsOptionError
+
+from site import USER_BASE
+from site import USER_SITE
+HAS_USER_SITE = True
+
+WINDOWS_SCHEME = {
+ 'purelib': '$base/Lib/site-packages',
+ 'platlib': '$base/Lib/site-packages',
+ 'headers': '$base/Include/$dist_name',
+ 'scripts': '$base/Scripts',
+ 'data' : '$base',
+}
+
+INSTALL_SCHEMES = {
+ 'unix_prefix': {
+ 'purelib': '$base/lib/python$py_version_short/site-packages',
+ 'platlib': '$platbase/lib/python$py_version_short/site-packages',
+ 'headers': '$base/include/python$py_version_short$abiflags/$dist_name',
+ 'scripts': '$base/bin',
+ 'data' : '$base',
+ },
+ 'unix_home': {
+ 'purelib': '$base/lib/python',
+ 'platlib': '$base/lib/python',
+ 'headers': '$base/include/python/$dist_name',
+ 'scripts': '$base/bin',
+ 'data' : '$base',
+ },
+ 'nt': WINDOWS_SCHEME,
+ }
+
+# user site schemes
+if HAS_USER_SITE:
+ INSTALL_SCHEMES['nt_user'] = {
+ 'purelib': '$usersite',
+ 'platlib': '$usersite',
+ 'headers': '$userbase/Python$py_version_nodot/Include/$dist_name',
+ 'scripts': '$userbase/Python$py_version_nodot/Scripts',
+ 'data' : '$userbase',
+ }
+
+ INSTALL_SCHEMES['unix_user'] = {
+ 'purelib': '$usersite',
+ 'platlib': '$usersite',
+ 'headers':
+ '$userbase/include/python$py_version_short$abiflags/$dist_name',
+ 'scripts': '$userbase/bin',
+ 'data' : '$userbase',
+ }
+
+# The keys to an installation scheme; if any new types of files are to be
+# installed, be sure to add an entry to every installation scheme above,
+# and to SCHEME_KEYS here.
+SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
+
+
+class install(Command):
+
+ description = "install everything from build directory"
+
+ user_options = [
+ # Select installation scheme and set base director(y|ies)
+ ('prefix=', None,
+ "installation prefix"),
+ ('exec-prefix=', None,
+ "(Unix only) prefix for platform-specific files"),
+ ('home=', None,
+ "(Unix only) home directory to install under"),
+
+ # Or, just set the base director(y|ies)
+ ('install-base=', None,
+ "base installation directory (instead of --prefix or --home)"),
+ ('install-platbase=', None,
+ "base installation directory for platform-specific files " +
+ "(instead of --exec-prefix or --home)"),
+ ('root=', None,
+ "install everything relative to this alternate root directory"),
+
+ # Or, explicitly set the installation scheme
+ ('install-purelib=', None,
+ "installation directory for pure Python module distributions"),
+ ('install-platlib=', None,
+ "installation directory for non-pure module distributions"),
+ ('install-lib=', None,
+ "installation directory for all module distributions " +
+ "(overrides --install-purelib and --install-platlib)"),
+
+ ('install-headers=', None,
+ "installation directory for C/C++ headers"),
+ ('install-scripts=', None,
+ "installation directory for Python scripts"),
+ ('install-data=', None,
+ "installation directory for data files"),
+
+ # Byte-compilation options -- see install_lib.py for details, as
+ # these are duplicated from there (but only install_lib does
+ # anything with them).
+ ('compile', 'c', "compile .py to .pyc [default]"),
+ ('no-compile', None, "don't compile .py files"),
+ ('optimize=', 'O',
+ "also compile with optimization: -O1 for \"python -O\", "
+ "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+
+ # Miscellaneous control options
+ ('force', 'f',
+ "force installation (overwrite any existing files)"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+
+ # Where to install documentation (eventually!)
+ #('doc-format=', None, "format of documentation to generate"),
+ #('install-man=', None, "directory for Unix man pages"),
+ #('install-html=', None, "directory for HTML documentation"),
+ #('install-info=', None, "directory for GNU info files"),
+
+ ('record=', None,
+ "filename in which to record list of installed files"),
+ ]
+
+ boolean_options = ['compile', 'force', 'skip-build']
+
+ if HAS_USER_SITE:
+ user_options.append(('user', None,
+ "install in user site-package '%s'" % USER_SITE))
+ boolean_options.append('user')
+
+ negative_opt = {'no-compile' : 'compile'}
+
+
+ def initialize_options(self):
+ """Initializes options."""
+ # High-level options: these select both an installation base
+ # and scheme.
+ self.prefix = None
+ self.exec_prefix = None
+ self.home = None
+ self.user = 0
+
+ # These select only the installation base; it's up to the user to
+ # specify the installation scheme (currently, that means supplying
+ # the --install-{platlib,purelib,scripts,data} options).
+ self.install_base = None
+ self.install_platbase = None
+ self.root = None
+
+ # These options are the actual installation directories; if not
+ # supplied by the user, they are filled in using the installation
+ # scheme implied by prefix/exec-prefix/home and the contents of
+ # that installation scheme.
+ self.install_purelib = None # for pure module distributions
+ self.install_platlib = None # non-pure (dists w/ extensions)
+ self.install_headers = None # for C/C++ headers
+ self.install_lib = None # set to either purelib or platlib
+ self.install_scripts = None
+ self.install_data = None
+ self.install_userbase = USER_BASE
+ self.install_usersite = USER_SITE
+
+ self.compile = None
+ self.optimize = None
+
+ # Deprecated
+ # These two are for putting non-packagized distributions into their
+ # own directory and creating a .pth file if it makes sense.
+ # 'extra_path' comes from the setup file; 'install_path_file' can
+ # be turned off if it makes no sense to install a .pth file. (But
+ # better to install it uselessly than to guess wrong and not
+ # install it when it's necessary and would be used!) Currently,
+ # 'install_path_file' is always true unless some outsider meddles
+ # with it.
+ self.extra_path = None
+ self.install_path_file = 1
+
+ # 'force' forces installation, even if target files are not
+ # out-of-date. 'skip_build' skips running the "build" command,
+ # handy if you know it's not necessary. 'warn_dir' (which is *not*
+ # a user option, it's just there so the bdist_* commands can turn
+ # it off) determines whether we warn about installing to a
+ # directory not in sys.path.
+ self.force = 0
+ self.skip_build = 0
+ self.warn_dir = 1
+
+ # These are only here as a conduit from the 'build' command to the
+ # 'install_*' commands that do the real work. ('build_base' isn't
+ # actually used anywhere, but it might be useful in future.) They
+ # are not user options, because if the user told the install
+ # command where the build directory is, that wouldn't affect the
+ # build command.
+ self.build_base = None
+ self.build_lib = None
+
+ # Not defined yet because we don't know anything about
+ # documentation yet.
+ #self.install_man = None
+ #self.install_html = None
+ #self.install_info = None
+
+ self.record = None
+
+
+ # -- Option finalizing methods -------------------------------------
+ # (This is rather more involved than for most commands,
+ # because this is where the policy for installing third-
+ # party Python modules on various platforms given a wide
+ # array of user input is decided. Yes, it's quite complex!)
+
+ def finalize_options(self):
+ """Finalizes options."""
+ # This method (and its pliant slaves, like 'finalize_unix()',
+ # 'finalize_other()', and 'select_scheme()') is where the default
+ # installation directories for modules, extension modules, and
+ # anything else we care to install from a Python module
+ # distribution. Thus, this code makes a pretty important policy
+ # statement about how third-party stuff is added to a Python
+ # installation! Note that the actual work of installation is done
+ # by the relatively simple 'install_*' commands; they just take
+ # their orders from the installation directory options determined
+ # here.
+
+ # Check for errors/inconsistencies in the options; first, stuff
+ # that's wrong on any platform.
+
+ if ((self.prefix or self.exec_prefix or self.home) and
+ (self.install_base or self.install_platbase)):
+ raise DistutilsOptionError(
+ "must supply either prefix/exec-prefix/home or " +
+ "install-base/install-platbase -- not both")
+
+ if self.home and (self.prefix or self.exec_prefix):
+ raise DistutilsOptionError(
+ "must supply either home or prefix/exec-prefix -- not both")
+
+ if self.user and (self.prefix or self.exec_prefix or self.home or
+ self.install_base or self.install_platbase):
+ raise DistutilsOptionError("can't combine user with prefix, "
+ "exec_prefix/home, or install_(plat)base")
+
+ # Next, stuff that's wrong (or dubious) only on certain platforms.
+ if os.name != "posix":
+ if self.exec_prefix:
+ self.warn("exec-prefix option ignored on this platform")
+ self.exec_prefix = None
+
+ # Now the interesting logic -- so interesting that we farm it out
+ # to other methods. The goal of these methods is to set the final
+ # values for the install_{lib,scripts,data,...} options, using as
+ # input a heady brew of prefix, exec_prefix, home, install_base,
+ # install_platbase, user-supplied versions of
+ # install_{purelib,platlib,lib,scripts,data,...}, and the
+ # INSTALL_SCHEME dictionary above. Phew!
+
+ self.dump_dirs("pre-finalize_{unix,other}")
+
+ if os.name == 'posix':
+ self.finalize_unix()
+ else:
+ self.finalize_other()
+
+ self.dump_dirs("post-finalize_{unix,other}()")
+
+ # Expand configuration variables, tilde, etc. in self.install_base
+ # and self.install_platbase -- that way, we can use $base or
+ # $platbase in the other installation directories and not worry
+ # about needing recursive variable expansion (shudder).
+
+ py_version = sys.version.split()[0]
+ (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
+ try:
+ abiflags = sys.abiflags
+ except AttributeError:
+ # sys.abiflags may not be defined on all platforms.
+ abiflags = ''
+ self.config_vars = {'dist_name': self.distribution.get_name(),
+ 'dist_version': self.distribution.get_version(),
+ 'dist_fullname': self.distribution.get_fullname(),
+ 'py_version': py_version,
+ 'py_version_short': '%d.%d' % sys.version_info[:2],
+ 'py_version_nodot': '%d%d' % sys.version_info[:2],
+ 'sys_prefix': prefix,
+ 'prefix': prefix,
+ 'sys_exec_prefix': exec_prefix,
+ 'exec_prefix': exec_prefix,
+ 'abiflags': abiflags,
+ }
+
+ if HAS_USER_SITE:
+ self.config_vars['userbase'] = self.install_userbase
+ self.config_vars['usersite'] = self.install_usersite
+
+ self.expand_basedirs()
+
+ self.dump_dirs("post-expand_basedirs()")
+
+ # Now define config vars for the base directories so we can expand
+ # everything else.
+ self.config_vars['base'] = self.install_base
+ self.config_vars['platbase'] = self.install_platbase
+
+ if DEBUG:
+ from pprint import pprint
+ print("config vars:")
+ pprint(self.config_vars)
+
+ # Expand "~" and configuration variables in the installation
+ # directories.
+ self.expand_dirs()
+
+ self.dump_dirs("post-expand_dirs()")
+
+ # Create directories in the home dir:
+ if self.user:
+ self.create_home_path()
+
+ # Pick the actual directory to install all modules to: either
+ # install_purelib or install_platlib, depending on whether this
+ # module distribution is pure or not. Of course, if the user
+ # already specified install_lib, use their selection.
+ if self.install_lib is None:
+ if self.distribution.ext_modules: # has extensions: non-pure
+ self.install_lib = self.install_platlib
+ else:
+ self.install_lib = self.install_purelib
+
+
+ # Convert directories from Unix /-separated syntax to the local
+ # convention.
+ self.convert_paths('lib', 'purelib', 'platlib',
+ 'scripts', 'data', 'headers',
+ 'userbase', 'usersite')
+
+ # Deprecated
+ # Well, we're not actually fully completely finalized yet: we still
+ # have to deal with 'extra_path', which is the hack for allowing
+ # non-packagized module distributions (hello, Numerical Python!) to
+ # get their own directories.
+ self.handle_extra_path()
+ self.install_libbase = self.install_lib # needed for .pth file
+ self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
+
+ # If a new root directory was supplied, make all the installation
+ # dirs relative to it.
+ if self.root is not None:
+ self.change_roots('libbase', 'lib', 'purelib', 'platlib',
+ 'scripts', 'data', 'headers')
+
+ self.dump_dirs("after prepending root")
+
+ # Find out the build directories, ie. where to install from.
+ self.set_undefined_options('build',
+ ('build_base', 'build_base'),
+ ('build_lib', 'build_lib'))
+
+ # Punt on doc directories for now -- after all, we're punting on
+ # documentation completely!
+
+ def dump_dirs(self, msg):
+ """Dumps the list of user options."""
+ if not DEBUG:
+ return
+ from distutils.fancy_getopt import longopt_xlate
+ log.debug(msg + ":")
+ for opt in self.user_options:
+ opt_name = opt[0]
+ if opt_name[-1] == "=":
+ opt_name = opt_name[0:-1]
+ if opt_name in self.negative_opt:
+ opt_name = self.negative_opt[opt_name]
+ opt_name = opt_name.translate(longopt_xlate)
+ val = not getattr(self, opt_name)
+ else:
+ opt_name = opt_name.translate(longopt_xlate)
+ val = getattr(self, opt_name)
+ log.debug(" %s: %s", opt_name, val)
+
+ def finalize_unix(self):
+ """Finalizes options for posix platforms."""
+ if self.install_base is not None or self.install_platbase is not None:
+ if ((self.install_lib is None and
+ self.install_purelib is None and
+ self.install_platlib is None) or
+ self.install_headers is None or
+ self.install_scripts is None or
+ self.install_data is None):
+ raise DistutilsOptionError(
+ "install-base or install-platbase supplied, but "
+ "installation scheme is incomplete")
+ return
+
+ if self.user:
+ if self.install_userbase is None:
+ raise DistutilsPlatformError(
+ "User base directory is not specified")
+ self.install_base = self.install_platbase = self.install_userbase
+ self.select_scheme("unix_user")
+ elif self.home is not None:
+ self.install_base = self.install_platbase = self.home
+ self.select_scheme("unix_home")
+ else:
+ if self.prefix is None:
+ if self.exec_prefix is not None:
+ raise DistutilsOptionError(
+ "must not supply exec-prefix without prefix")
+
+ self.prefix = os.path.normpath(sys.prefix)
+ self.exec_prefix = os.path.normpath(sys.exec_prefix)
+
+ else:
+ if self.exec_prefix is None:
+ self.exec_prefix = self.prefix
+
+ self.install_base = self.prefix
+ self.install_platbase = self.exec_prefix
+ self.select_scheme("unix_prefix")
+
+ def finalize_other(self):
+ """Finalizes options for non-posix platforms"""
+ if self.user:
+ if self.install_userbase is None:
+ raise DistutilsPlatformError(
+ "User base directory is not specified")
+ self.install_base = self.install_platbase = self.install_userbase
+ self.select_scheme(os.name + "_user")
+ elif self.home is not None:
+ self.install_base = self.install_platbase = self.home
+ self.select_scheme("unix_home")
+ else:
+ if self.prefix is None:
+ self.prefix = os.path.normpath(sys.prefix)
+
+ self.install_base = self.install_platbase = self.prefix
+ try:
+ self.select_scheme(os.name)
+ except KeyError:
+ raise DistutilsPlatformError(
+ "I don't know how to install stuff on '%s'" % os.name)
+
+ def select_scheme(self, name):
+ """Sets the install directories by applying the install schemes."""
+ # it's the caller's problem if they supply a bad name!
+ scheme = INSTALL_SCHEMES[name]
+ for key in SCHEME_KEYS:
+ attrname = 'install_' + key
+ if getattr(self, attrname) is None:
+ setattr(self, attrname, scheme[key])
+
+ def _expand_attrs(self, attrs):
+ for attr in attrs:
+ val = getattr(self, attr)
+ if val is not None:
+ if os.name == 'posix' or os.name == 'nt':
+ val = os.path.expanduser(val)
+ val = subst_vars(val, self.config_vars)
+ setattr(self, attr, val)
+
+ def expand_basedirs(self):
+ """Calls `os.path.expanduser` on install_base, install_platbase and
+ root."""
+ self._expand_attrs(['install_base', 'install_platbase', 'root'])
+
+ def expand_dirs(self):
+ """Calls `os.path.expanduser` on install dirs."""
+ self._expand_attrs(['install_purelib', 'install_platlib',
+ 'install_lib', 'install_headers',
+ 'install_scripts', 'install_data',])
+
+ def convert_paths(self, *names):
+ """Call `convert_path` over `names`."""
+ for name in names:
+ attr = "install_" + name
+ setattr(self, attr, convert_path(getattr(self, attr)))
+
+ def handle_extra_path(self):
+ """Set `path_file` and `extra_dirs` using `extra_path`."""
+ if self.extra_path is None:
+ self.extra_path = self.distribution.extra_path
+
+ if self.extra_path is not None:
+ log.warn(
+ "Distribution option extra_path is deprecated. "
+ "See issue27919 for details."
+ )
+ if isinstance(self.extra_path, str):
+ self.extra_path = self.extra_path.split(',')
+
+ if len(self.extra_path) == 1:
+ path_file = extra_dirs = self.extra_path[0]
+ elif len(self.extra_path) == 2:
+ path_file, extra_dirs = self.extra_path
+ else:
+ raise DistutilsOptionError(
+ "'extra_path' option must be a list, tuple, or "
+ "comma-separated string with 1 or 2 elements")
+
+ # convert to local form in case Unix notation used (as it
+ # should be in setup scripts)
+ extra_dirs = convert_path(extra_dirs)
+ else:
+ path_file = None
+ extra_dirs = ''
+
+ # XXX should we warn if path_file and not extra_dirs? (in which
+ # case the path file would be harmless but pointless)
+ self.path_file = path_file
+ self.extra_dirs = extra_dirs
+
+ def change_roots(self, *names):
+ """Change the install directories pointed by name using root."""
+ for name in names:
+ attr = "install_" + name
+ setattr(self, attr, change_root(self.root, getattr(self, attr)))
+
+ def create_home_path(self):
+ """Create directories under ~."""
+ if not self.user:
+ return
+ home = convert_path(os.path.expanduser("~"))
+ for name, path in self.config_vars.items():
+ if path.startswith(home) and not os.path.isdir(path):
+ self.debug_print("os.makedirs('%s', 0o700)" % path)
+ os.makedirs(path, 0o700)
+
+ # -- Command execution methods -------------------------------------
+
+ def run(self):
+ """Runs the command."""
+ # Obviously have to build before we can install
+ if not self.skip_build:
+ self.run_command('build')
+ # If we built for any other platform, we can't install.
+ build_plat = self.distribution.get_command_obj('build').plat_name
+ # check warn_dir - it is a clue that the 'install' is happening
+ # internally, and not to sys.path, so we don't check the platform
+ # matches what we are running.
+ if self.warn_dir and build_plat != get_platform():
+ raise DistutilsPlatformError("Can't install when "
+ "cross-compiling")
+
+ # Run all sub-commands (at least those that need to be run)
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ if self.path_file:
+ self.create_path_file()
+
+ # write list of installed files, if requested.
+ if self.record:
+ outputs = self.get_outputs()
+ if self.root: # strip any package prefix
+ root_len = len(self.root)
+ for counter in range(len(outputs)):
+ outputs[counter] = outputs[counter][root_len:]
+ self.execute(write_file,
+ (self.record, outputs),
+ "writing list of installed files to '%s'" %
+ self.record)
+
+ sys_path = map(os.path.normpath, sys.path)
+ sys_path = map(os.path.normcase, sys_path)
+ install_lib = os.path.normcase(os.path.normpath(self.install_lib))
+ if (self.warn_dir and
+ not (self.path_file and self.install_path_file) and
+ install_lib not in sys_path):
+ log.debug(("modules installed to '%s', which is not in "
+ "Python's module search path (sys.path) -- "
+ "you'll have to change the search path yourself"),
+ self.install_lib)
+
+ def create_path_file(self):
+ """Creates the .pth file"""
+ filename = os.path.join(self.install_libbase,
+ self.path_file + ".pth")
+ if self.install_path_file:
+ self.execute(write_file,
+ (filename, [self.extra_dirs]),
+ "creating %s" % filename)
+ else:
+ self.warn("path file '%s' not created" % filename)
+
+
+ # -- Reporting methods ---------------------------------------------
+
+ def get_outputs(self):
+ """Assembles the outputs of all the sub-commands."""
+ outputs = []
+ for cmd_name in self.get_sub_commands():
+ cmd = self.get_finalized_command(cmd_name)
+ # Add the contents of cmd.get_outputs(), ensuring
+ # that outputs doesn't contain duplicate entries
+ for filename in cmd.get_outputs():
+ if filename not in outputs:
+ outputs.append(filename)
+
+ if self.path_file and self.install_path_file:
+ outputs.append(os.path.join(self.install_libbase,
+ self.path_file + ".pth"))
+
+ return outputs
+
+ def get_inputs(self):
+ """Returns the inputs of all the sub-commands"""
+ # XXX gee, this looks familiar ;-(
+ inputs = []
+ for cmd_name in self.get_sub_commands():
+ cmd = self.get_finalized_command(cmd_name)
+ inputs.extend(cmd.get_inputs())
+
+ return inputs
+
+ # -- Predicates for sub-command list -------------------------------
+
+ def has_lib(self):
+ """Returns true if the current distribution has any Python
+ modules to install."""
+ return (self.distribution.has_pure_modules() or
+ self.distribution.has_ext_modules())
+
+ def has_headers(self):
+ """Returns true if the current distribution has any headers to
+ install."""
+ return self.distribution.has_headers()
+
+ def has_scripts(self):
+ """Returns true if the current distribution has any scripts to.
+ install."""
+ return self.distribution.has_scripts()
+
+ def has_data(self):
+ """Returns true if the current distribution has any data to.
+ install."""
+ return self.distribution.has_data_files()
+
+ # 'sub_commands': a list of commands this command might have to run to
+ # get its work done. See cmd.py for more info.
+ sub_commands = [('install_lib', has_lib),
+ ('install_headers', has_headers),
+ ('install_scripts', has_scripts),
+ ('install_data', has_data),
+ ('install_egg_info', lambda self:True),
+ ]
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_data.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_data.py
new file mode 100644
index 00000000..947cd76a
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_data.py
@@ -0,0 +1,79 @@
+"""distutils.command.install_data
+
+Implements the Distutils 'install_data' command, for installing
+platform-independent data files."""
+
+# contributed by Bastian Kleineidam
+
+import os
+from distutils.core import Command
+from distutils.util import change_root, convert_path
+
+class install_data(Command):
+
+ description = "install data files"
+
+ user_options = [
+ ('install-dir=', 'd',
+ "base directory for installing data files "
+ "(default: installation base dir)"),
+ ('root=', None,
+ "install everything relative to this alternate root directory"),
+ ('force', 'f', "force installation (overwrite existing files)"),
+ ]
+
+ boolean_options = ['force']
+
+ def initialize_options(self):
+ self.install_dir = None
+ self.outfiles = []
+ self.root = None
+ self.force = 0
+ self.data_files = self.distribution.data_files
+ self.warn_dir = 1
+
+ def finalize_options(self):
+ self.set_undefined_options('install',
+ ('install_data', 'install_dir'),
+ ('root', 'root'),
+ ('force', 'force'),
+ )
+
+ def run(self):
+ self.mkpath(self.install_dir)
+ for f in self.data_files:
+ if isinstance(f, str):
+ # it's a simple file, so copy it
+ f = convert_path(f)
+ if self.warn_dir:
+ self.warn("setup script did not provide a directory for "
+ "'%s' -- installing right in '%s'" %
+ (f, self.install_dir))
+ (out, _) = self.copy_file(f, self.install_dir)
+ self.outfiles.append(out)
+ else:
+ # it's a tuple with path to install to and a list of files
+ dir = convert_path(f[0])
+ if not os.path.isabs(dir):
+ dir = os.path.join(self.install_dir, dir)
+ elif self.root:
+ dir = change_root(self.root, dir)
+ self.mkpath(dir)
+
+ if f[1] == []:
+ # If there are no files listed, the user must be
+ # trying to create an empty directory, so add the
+ # directory to the list of output files.
+ self.outfiles.append(dir)
+ else:
+ # Copy files, adding them to the list of output files.
+ for data in f[1]:
+ data = convert_path(data)
+ (out, _) = self.copy_file(data, dir)
+ self.outfiles.append(out)
+
+ def get_inputs(self):
+ return self.data_files or []
+
+ def get_outputs(self):
+ return self.outfiles
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_egg_info.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_egg_info.py
new file mode 100644
index 00000000..0ddc7367
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_egg_info.py
@@ -0,0 +1,77 @@
+"""distutils.command.install_egg_info
+
+Implements the Distutils 'install_egg_info' command, for installing
+a package's PKG-INFO metadata."""
+
+
+from distutils.cmd import Command
+from distutils import log, dir_util
+import os, sys, re
+
+class install_egg_info(Command):
+ """Install an .egg-info file for the package"""
+
+ description = "Install package's PKG-INFO metadata as an .egg-info file"
+ user_options = [
+ ('install-dir=', 'd', "directory to install to"),
+ ]
+
+ def initialize_options(self):
+ self.install_dir = None
+
+ def finalize_options(self):
+ self.set_undefined_options('install_lib',('install_dir','install_dir'))
+ basename = "%s-%s-py%d.%d.egg-info" % (
+ to_filename(safe_name(self.distribution.get_name())),
+ to_filename(safe_version(self.distribution.get_version())),
+ *sys.version_info[:2]
+ )
+ self.target = os.path.join(self.install_dir, basename)
+ self.outputs = [self.target]
+
+ def run(self):
+ target = self.target
+ if os.path.isdir(target) and not os.path.islink(target):
+ dir_util.remove_tree(target, dry_run=self.dry_run)
+ elif os.path.exists(target):
+ self.execute(os.unlink,(self.target,),"Removing "+target)
+ elif not os.path.isdir(self.install_dir):
+ self.execute(os.makedirs, (self.install_dir,),
+ "Creating "+self.install_dir)
+ log.info("Writing %s", target)
+ if not self.dry_run:
+ with open(target, 'w', encoding='UTF-8') as f:
+ self.distribution.metadata.write_pkg_file(f)
+
+ def get_outputs(self):
+ return self.outputs
+
+
+# The following routines are taken from setuptools' pkg_resources module and
+# can be replaced by importing them from pkg_resources once it is included
+# in the stdlib.
+
+def safe_name(name):
+ """Convert an arbitrary string to a standard distribution name
+
+ Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+ """Convert an arbitrary string to a standard version string
+
+ Spaces become dots, and all other non-alphanumeric characters become
+ dashes, with runs of multiple dashes condensed to a single dash.
+ """
+ version = version.replace(' ','.')
+ return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def to_filename(name):
+ """Convert a project or version name to its filename-escaped form
+
+ Any '-' characters are currently replaced with '_'.
+ """
+ return name.replace('-','_')
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_headers.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_headers.py
new file mode 100644
index 00000000..9bb0b18d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_headers.py
@@ -0,0 +1,47 @@
+"""distutils.command.install_headers
+
+Implements the Distutils 'install_headers' command, to install C/C++ header
+files to the Python include directory."""
+
+from distutils.core import Command
+
+
+# XXX force is never used
+class install_headers(Command):
+
+ description = "install C/C++ header files"
+
+ user_options = [('install-dir=', 'd',
+ "directory to install header files to"),
+ ('force', 'f',
+ "force installation (overwrite existing files)"),
+ ]
+
+ boolean_options = ['force']
+
+ def initialize_options(self):
+ self.install_dir = None
+ self.force = 0
+ self.outfiles = []
+
+ def finalize_options(self):
+ self.set_undefined_options('install',
+ ('install_headers', 'install_dir'),
+ ('force', 'force'))
+
+
+ def run(self):
+ headers = self.distribution.headers
+ if not headers:
+ return
+
+ self.mkpath(self.install_dir)
+ for header in headers:
+ (out, _) = self.copy_file(header, self.install_dir)
+ self.outfiles.append(out)
+
+ def get_inputs(self):
+ return self.distribution.headers or []
+
+ def get_outputs(self):
+ return self.outfiles
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_lib.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_lib.py
new file mode 100644
index 00000000..6154cf09
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_lib.py
@@ -0,0 +1,217 @@
+"""distutils.command.install_lib
+
+Implements the Distutils 'install_lib' command
+(install all Python modules)."""
+
+import os
+import importlib.util
+import sys
+
+from distutils.core import Command
+from distutils.errors import DistutilsOptionError
+
+
+# Extension for Python source files.
+PYTHON_SOURCE_EXTENSION = ".py"
+
+class install_lib(Command):
+
+ description = "install all Python modules (extensions and pure Python)"
+
+ # The byte-compilation options are a tad confusing. Here are the
+ # possible scenarios:
+ # 1) no compilation at all (--no-compile --no-optimize)
+ # 2) compile .pyc only (--compile --no-optimize; default)
+ # 3) compile .pyc and "opt-1" .pyc (--compile --optimize)
+ # 4) compile "opt-1" .pyc only (--no-compile --optimize)
+ # 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more)
+ # 6) compile "opt-2" .pyc only (--no-compile --optimize-more)
+ #
+ # The UI for this is two options, 'compile' and 'optimize'.
+ # 'compile' is strictly boolean, and only decides whether to
+ # generate .pyc files. 'optimize' is three-way (0, 1, or 2), and
+ # decides both whether to generate .pyc files and what level of
+ # optimization to use.
+
+ user_options = [
+ ('install-dir=', 'd', "directory to install to"),
+ ('build-dir=','b', "build directory (where to install from)"),
+ ('force', 'f', "force installation (overwrite existing files)"),
+ ('compile', 'c', "compile .py to .pyc [default]"),
+ ('no-compile', None, "don't compile .py files"),
+ ('optimize=', 'O',
+ "also compile with optimization: -O1 for \"python -O\", "
+ "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+ ('skip-build', None, "skip the build steps"),
+ ]
+
+ boolean_options = ['force', 'compile', 'skip-build']
+ negative_opt = {'no-compile' : 'compile'}
+
+ def initialize_options(self):
+ # let the 'install' command dictate our installation directory
+ self.install_dir = None
+ self.build_dir = None
+ self.force = 0
+ self.compile = None
+ self.optimize = None
+ self.skip_build = None
+
+ def finalize_options(self):
+ # Get all the information we need to install pure Python modules
+ # from the umbrella 'install' command -- build (source) directory,
+ # install (target) directory, and whether to compile .py files.
+ self.set_undefined_options('install',
+ ('build_lib', 'build_dir'),
+ ('install_lib', 'install_dir'),
+ ('force', 'force'),
+ ('compile', 'compile'),
+ ('optimize', 'optimize'),
+ ('skip_build', 'skip_build'),
+ )
+
+ if self.compile is None:
+ self.compile = True
+ if self.optimize is None:
+ self.optimize = False
+
+ if not isinstance(self.optimize, int):
+ try:
+ self.optimize = int(self.optimize)
+ if self.optimize not in (0, 1, 2):
+ raise AssertionError
+ except (ValueError, AssertionError):
+ raise DistutilsOptionError("optimize must be 0, 1, or 2")
+
+ def run(self):
+ # Make sure we have built everything we need first
+ self.build()
+
+ # Install everything: simply dump the entire contents of the build
+ # directory to the installation directory (that's the beauty of
+ # having a build directory!)
+ outfiles = self.install()
+
+ # (Optionally) compile .py to .pyc
+ if outfiles is not None and self.distribution.has_pure_modules():
+ self.byte_compile(outfiles)
+
+ # -- Top-level worker functions ------------------------------------
+ # (called from 'run()')
+
+ def build(self):
+ if not self.skip_build:
+ if self.distribution.has_pure_modules():
+ self.run_command('build_py')
+ if self.distribution.has_ext_modules():
+ self.run_command('build_ext')
+
+ def install(self):
+ if os.path.isdir(self.build_dir):
+ outfiles = self.copy_tree(self.build_dir, self.install_dir)
+ else:
+ self.warn("'%s' does not exist -- no Python modules to install" %
+ self.build_dir)
+ return
+ return outfiles
+
+ def byte_compile(self, files):
+ if sys.dont_write_bytecode:
+ self.warn('byte-compiling is disabled, skipping.')
+ return
+
+ from distutils.util import byte_compile
+
+ # Get the "--root" directory supplied to the "install" command,
+ # and use it as a prefix to strip off the purported filename
+ # encoded in bytecode files. This is far from complete, but it
+ # should at least generate usable bytecode in RPM distributions.
+ install_root = self.get_finalized_command('install').root
+
+ if self.compile:
+ byte_compile(files, optimize=0,
+ force=self.force, prefix=install_root,
+ dry_run=self.dry_run)
+ if self.optimize > 0:
+ byte_compile(files, optimize=self.optimize,
+ force=self.force, prefix=install_root,
+ verbose=self.verbose, dry_run=self.dry_run)
+
+
+ # -- Utility methods -----------------------------------------------
+
+ def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir):
+ if not has_any:
+ return []
+
+ build_cmd = self.get_finalized_command(build_cmd)
+ build_files = build_cmd.get_outputs()
+ build_dir = getattr(build_cmd, cmd_option)
+
+ prefix_len = len(build_dir) + len(os.sep)
+ outputs = []
+ for file in build_files:
+ outputs.append(os.path.join(output_dir, file[prefix_len:]))
+
+ return outputs
+
+ def _bytecode_filenames(self, py_filenames):
+ bytecode_files = []
+ for py_file in py_filenames:
+ # Since build_py handles package data installation, the
+ # list of outputs can contain more than just .py files.
+ # Make sure we only report bytecode for the .py files.
+ ext = os.path.splitext(os.path.normcase(py_file))[1]
+ if ext != PYTHON_SOURCE_EXTENSION:
+ continue
+ if self.compile:
+ bytecode_files.append(importlib.util.cache_from_source(
+ py_file, optimization=''))
+ if self.optimize > 0:
+ bytecode_files.append(importlib.util.cache_from_source(
+ py_file, optimization=self.optimize))
+
+ return bytecode_files
+
+
+ # -- External interface --------------------------------------------
+ # (called by outsiders)
+
+ def get_outputs(self):
+ """Return the list of files that would be installed if this command
+ were actually run. Not affected by the "dry-run" flag or whether
+ modules have actually been built yet.
+ """
+ pure_outputs = \
+ self._mutate_outputs(self.distribution.has_pure_modules(),
+ 'build_py', 'build_lib',
+ self.install_dir)
+ if self.compile:
+ bytecode_outputs = self._bytecode_filenames(pure_outputs)
+ else:
+ bytecode_outputs = []
+
+ ext_outputs = \
+ self._mutate_outputs(self.distribution.has_ext_modules(),
+ 'build_ext', 'build_lib',
+ self.install_dir)
+
+ return pure_outputs + bytecode_outputs + ext_outputs
+
+ def get_inputs(self):
+ """Get the list of files that are input to this command, ie. the
+ files that get installed as they are named in the build tree.
+ The files in this list correspond one-to-one to the output
+ filenames returned by 'get_outputs()'.
+ """
+ inputs = []
+
+ if self.distribution.has_pure_modules():
+ build_py = self.get_finalized_command('build_py')
+ inputs.extend(build_py.get_outputs())
+
+ if self.distribution.has_ext_modules():
+ build_ext = self.get_finalized_command('build_ext')
+ inputs.extend(build_ext.get_outputs())
+
+ return inputs
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_scripts.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_scripts.py
new file mode 100644
index 00000000..31a1130e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/install_scripts.py
@@ -0,0 +1,60 @@
+"""distutils.command.install_scripts
+
+Implements the Distutils 'install_scripts' command, for installing
+Python scripts."""
+
+# contributed by Bastian Kleineidam
+
+import os
+from distutils.core import Command
+from distutils import log
+from stat import ST_MODE
+
+
+class install_scripts(Command):
+
+ description = "install scripts (Python or otherwise)"
+
+ user_options = [
+ ('install-dir=', 'd', "directory to install scripts to"),
+ ('build-dir=','b', "build directory (where to install from)"),
+ ('force', 'f', "force installation (overwrite existing files)"),
+ ('skip-build', None, "skip the build steps"),
+ ]
+
+ boolean_options = ['force', 'skip-build']
+
+ def initialize_options(self):
+ self.install_dir = None
+ self.force = 0
+ self.build_dir = None
+ self.skip_build = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build', ('build_scripts', 'build_dir'))
+ self.set_undefined_options('install',
+ ('install_scripts', 'install_dir'),
+ ('force', 'force'),
+ ('skip_build', 'skip_build'),
+ )
+
+ def run(self):
+ if not self.skip_build:
+ self.run_command('build_scripts')
+ self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
+ if os.name == 'posix':
+ # Set the executable bits (owner, group, and world) on
+ # all the scripts we just installed.
+ for file in self.get_outputs():
+ if self.dry_run:
+ log.info("changing mode of %s", file)
+ else:
+ mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777
+ log.info("changing mode of %s to %o", file, mode)
+ os.chmod(file, mode)
+
+ def get_inputs(self):
+ return self.distribution.scripts or []
+
+ def get_outputs(self):
+ return self.outfiles or []
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/register.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/register.py
new file mode 100644
index 00000000..0fac94e9
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/register.py
@@ -0,0 +1,304 @@
+"""distutils.command.register
+
+Implements the Distutils 'register' command (register with the repository).
+"""
+
+# created 2002/10/21, Richard Jones
+
+import getpass
+import io
+import urllib.parse, urllib.request
+from warnings import warn
+
+from distutils.core import PyPIRCCommand
+from distutils.errors import *
+from distutils import log
+
+class register(PyPIRCCommand):
+
+ description = ("register the distribution with the Python package index")
+ user_options = PyPIRCCommand.user_options + [
+ ('list-classifiers', None,
+ 'list the valid Trove classifiers'),
+ ('strict', None ,
+ 'Will stop the registering if the meta-data are not fully compliant')
+ ]
+ boolean_options = PyPIRCCommand.boolean_options + [
+ 'verify', 'list-classifiers', 'strict']
+
+ sub_commands = [('check', lambda self: True)]
+
+ def initialize_options(self):
+ PyPIRCCommand.initialize_options(self)
+ self.list_classifiers = 0
+ self.strict = 0
+
+ def finalize_options(self):
+ PyPIRCCommand.finalize_options(self)
+ # setting options for the `check` subcommand
+ check_options = {'strict': ('register', self.strict),
+ 'restructuredtext': ('register', 1)}
+ self.distribution.command_options['check'] = check_options
+
+ def run(self):
+ self.finalize_options()
+ self._set_config()
+
+ # Run sub commands
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ if self.dry_run:
+ self.verify_metadata()
+ elif self.list_classifiers:
+ self.classifiers()
+ else:
+ self.send_metadata()
+
+ def check_metadata(self):
+ """Deprecated API."""
+ warn("distutils.command.register.check_metadata is deprecated, \
+ use the check command instead", PendingDeprecationWarning)
+ check = self.distribution.get_command_obj('check')
+ check.ensure_finalized()
+ check.strict = self.strict
+ check.restructuredtext = 1
+ check.run()
+
+ def _set_config(self):
+ ''' Reads the configuration file and set attributes.
+ '''
+ config = self._read_pypirc()
+ if config != {}:
+ self.username = config['username']
+ self.password = config['password']
+ self.repository = config['repository']
+ self.realm = config['realm']
+ self.has_config = True
+ else:
+ if self.repository not in ('pypi', self.DEFAULT_REPOSITORY):
+ raise ValueError('%s not found in .pypirc' % self.repository)
+ if self.repository == 'pypi':
+ self.repository = self.DEFAULT_REPOSITORY
+ self.has_config = False
+
+ def classifiers(self):
+ ''' Fetch the list of classifiers from the server.
+ '''
+ url = self.repository+'?:action=list_classifiers'
+ response = urllib.request.urlopen(url)
+ log.info(self._read_pypi_response(response))
+
+ def verify_metadata(self):
+ ''' Send the metadata to the package index server to be checked.
+ '''
+ # send the info to the server and report the result
+ (code, result) = self.post_to_server(self.build_post_data('verify'))
+ log.info('Server response (%s): %s', code, result)
+
+ def send_metadata(self):
+ ''' Send the metadata to the package index server.
+
+ Well, do the following:
+ 1. figure who the user is, and then
+ 2. send the data as a Basic auth'ed POST.
+
+ First we try to read the username/password from $HOME/.pypirc,
+ which is a ConfigParser-formatted file with a section
+ [distutils] containing username and password entries (both
+ in clear text). Eg:
+
+ [distutils]
+ index-servers =
+ pypi
+
+ [pypi]
+ username: fred
+ password: sekrit
+
+ Otherwise, to figure who the user is, we offer the user three
+ choices:
+
+ 1. use existing login,
+ 2. register as a new user, or
+ 3. set the password to a random string and email the user.
+
+ '''
+ # see if we can short-cut and get the username/password from the
+ # config
+ if self.has_config:
+ choice = '1'
+ username = self.username
+ password = self.password
+ else:
+ choice = 'x'
+ username = password = ''
+
+ # get the user's login info
+ choices = '1 2 3 4'.split()
+ while choice not in choices:
+ self.announce('''\
+We need to know who you are, so please choose either:
+ 1. use your existing login,
+ 2. register as a new user,
+ 3. have the server generate a new password for you (and email it to you), or
+ 4. quit
+Your selection [default 1]: ''', log.INFO)
+ choice = input()
+ if not choice:
+ choice = '1'
+ elif choice not in choices:
+ print('Please choose one of the four options!')
+
+ if choice == '1':
+ # get the username and password
+ while not username:
+ username = input('Username: ')
+ while not password:
+ password = getpass.getpass('Password: ')
+
+ # set up the authentication
+ auth = urllib.request.HTTPPasswordMgr()
+ host = urllib.parse.urlparse(self.repository)[1]
+ auth.add_password(self.realm, host, username, password)
+ # send the info to the server and report the result
+ code, result = self.post_to_server(self.build_post_data('submit'),
+ auth)
+ self.announce('Server response (%s): %s' % (code, result),
+ log.INFO)
+
+ # possibly save the login
+ if code == 200:
+ if self.has_config:
+ # sharing the password in the distribution instance
+ # so the upload command can reuse it
+ self.distribution.password = password
+ else:
+ self.announce(('I can store your PyPI login so future '
+ 'submissions will be faster.'), log.INFO)
+ self.announce('(the login will be stored in %s)' % \
+ self._get_rc_file(), log.INFO)
+ choice = 'X'
+ while choice.lower() not in 'yn':
+ choice = input('Save your login (y/N)?')
+ if not choice:
+ choice = 'n'
+ if choice.lower() == 'y':
+ self._store_pypirc(username, password)
+
+ elif choice == '2':
+ data = {':action': 'user'}
+ data['name'] = data['password'] = data['email'] = ''
+ data['confirm'] = None
+ while not data['name']:
+ data['name'] = input('Username: ')
+ while data['password'] != data['confirm']:
+ while not data['password']:
+ data['password'] = getpass.getpass('Password: ')
+ while not data['confirm']:
+ data['confirm'] = getpass.getpass(' Confirm: ')
+ if data['password'] != data['confirm']:
+ data['password'] = ''
+ data['confirm'] = None
+ print("Password and confirm don't match!")
+ while not data['email']:
+ data['email'] = input(' EMail: ')
+ code, result = self.post_to_server(data)
+ if code != 200:
+ log.info('Server response (%s): %s', code, result)
+ else:
+ log.info('You will receive an email shortly.')
+ log.info(('Follow the instructions in it to '
+ 'complete registration.'))
+ elif choice == '3':
+ data = {':action': 'password_reset'}
+ data['email'] = ''
+ while not data['email']:
+ data['email'] = input('Your email address: ')
+ code, result = self.post_to_server(data)
+ log.info('Server response (%s): %s', code, result)
+
+ def build_post_data(self, action):
+ # figure the data to send - the metadata plus some additional
+ # information used by the package server
+ meta = self.distribution.metadata
+ data = {
+ ':action': action,
+ 'metadata_version' : '1.0',
+ 'name': meta.get_name(),
+ 'version': meta.get_version(),
+ 'summary': meta.get_description(),
+ 'home_page': meta.get_url(),
+ 'author': meta.get_contact(),
+ 'author_email': meta.get_contact_email(),
+ 'license': meta.get_licence(),
+ 'description': meta.get_long_description(),
+ 'keywords': meta.get_keywords(),
+ 'platform': meta.get_platforms(),
+ 'classifiers': meta.get_classifiers(),
+ 'download_url': meta.get_download_url(),
+ # PEP 314
+ 'provides': meta.get_provides(),
+ 'requires': meta.get_requires(),
+ 'obsoletes': meta.get_obsoletes(),
+ }
+ if data['provides'] or data['requires'] or data['obsoletes']:
+ data['metadata_version'] = '1.1'
+ return data
+
+ def post_to_server(self, data, auth=None):
+ ''' Post a query to the server, and return a string response.
+ '''
+ if 'name' in data:
+ self.announce('Registering %s to %s' % (data['name'],
+ self.repository),
+ log.INFO)
+ # Build up the MIME payload for the urllib2 POST data
+ boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+ sep_boundary = '\n--' + boundary
+ end_boundary = sep_boundary + '--'
+ body = io.StringIO()
+ for key, value in data.items():
+ # handle multiple entries for the same name
+ if type(value) not in (type([]), type( () )):
+ value = [value]
+ for value in value:
+ value = str(value)
+ body.write(sep_boundary)
+ body.write('\nContent-Disposition: form-data; name="%s"'%key)
+ body.write("\n\n")
+ body.write(value)
+ if value and value[-1] == '\r':
+ body.write('\n') # write an extra newline (lurve Macs)
+ body.write(end_boundary)
+ body.write("\n")
+ body = body.getvalue().encode("utf-8")
+
+ # build the Request
+ headers = {
+ 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
+ 'Content-length': str(len(body))
+ }
+ req = urllib.request.Request(self.repository, body, headers)
+
+ # handle HTTP and include the Basic Auth handler
+ opener = urllib.request.build_opener(
+ urllib.request.HTTPBasicAuthHandler(password_mgr=auth)
+ )
+ data = ''
+ try:
+ result = opener.open(req)
+ except urllib.error.HTTPError as e:
+ if self.show_response:
+ data = e.fp.read()
+ result = e.code, e.msg
+ except urllib.error.URLError as e:
+ result = 500, str(e)
+ else:
+ if self.show_response:
+ data = self._read_pypi_response(result)
+ result = 200, 'OK'
+ if self.show_response:
+ msg = '\n'.join(('-' * 75, data, '-' * 75))
+ self.announce(msg, log.INFO)
+ return result
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/sdist.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/sdist.py
new file mode 100644
index 00000000..52eaa15d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/sdist.py
@@ -0,0 +1,495 @@
+"""distutils.command.sdist
+
+Implements the Distutils 'sdist' command (create a source distribution)."""
+
+import os
+import sys
+from glob import glob
+from warnings import warn
+
+from distutils.core import Command
+from distutils import dir_util
+from distutils import file_util
+from distutils import archive_util
+from distutils.text_file import TextFile
+from distutils.filelist import FileList
+from distutils import log
+from distutils.util import convert_path
+from distutils.errors import DistutilsTemplateError, DistutilsOptionError
+
+
+def show_formats():
+ """Print all possible values for the 'formats' option (used by
+ the "--help-formats" command-line option).
+ """
+ from distutils.fancy_getopt import FancyGetopt
+ from distutils.archive_util import ARCHIVE_FORMATS
+ formats = []
+ for format in ARCHIVE_FORMATS.keys():
+ formats.append(("formats=" + format, None,
+ ARCHIVE_FORMATS[format][2]))
+ formats.sort()
+ FancyGetopt(formats).print_help(
+ "List of available source distribution formats:")
+
+
+class sdist(Command):
+
+ description = "create a source distribution (tarball, zip file, etc.)"
+
+ def checking_metadata(self):
+ """Callable used for the check sub-command.
+
+ Placed here so user_options can view it"""
+ return self.metadata_check
+
+ user_options = [
+ ('template=', 't',
+ "name of manifest template file [default: MANIFEST.in]"),
+ ('manifest=', 'm',
+ "name of manifest file [default: MANIFEST]"),
+ ('use-defaults', None,
+ "include the default file set in the manifest "
+ "[default; disable with --no-defaults]"),
+ ('no-defaults', None,
+ "don't include the default file set"),
+ ('prune', None,
+ "specifically exclude files/directories that should not be "
+ "distributed (build tree, RCS/CVS dirs, etc.) "
+ "[default; disable with --no-prune]"),
+ ('no-prune', None,
+ "don't automatically exclude anything"),
+ ('manifest-only', 'o',
+ "just regenerate the manifest and then stop "
+ "(implies --force-manifest)"),
+ ('force-manifest', 'f',
+ "forcibly regenerate the manifest and carry on as usual. "
+ "Deprecated: now the manifest is always regenerated."),
+ ('formats=', None,
+ "formats for source distribution (comma-separated list)"),
+ ('keep-temp', 'k',
+ "keep the distribution tree around after creating " +
+ "archive file(s)"),
+ ('dist-dir=', 'd',
+ "directory to put the source distribution archive(s) in "
+ "[default: dist]"),
+ ('metadata-check', None,
+ "Ensure that all required elements of meta-data "
+ "are supplied. Warn if any missing. [default]"),
+ ('owner=', 'u',
+ "Owner name used when creating a tar file [default: current user]"),
+ ('group=', 'g',
+ "Group name used when creating a tar file [default: current group]"),
+ ]
+
+ boolean_options = ['use-defaults', 'prune',
+ 'manifest-only', 'force-manifest',
+ 'keep-temp', 'metadata-check']
+
+ help_options = [
+ ('help-formats', None,
+ "list available distribution formats", show_formats),
+ ]
+
+ negative_opt = {'no-defaults': 'use-defaults',
+ 'no-prune': 'prune' }
+
+ sub_commands = [('check', checking_metadata)]
+
+ READMES = ('README', 'README.txt', 'README.rst')
+
+ def initialize_options(self):
+ # 'template' and 'manifest' are, respectively, the names of
+ # the manifest template and manifest file.
+ self.template = None
+ self.manifest = None
+
+ # 'use_defaults': if true, we will include the default file set
+ # in the manifest
+ self.use_defaults = 1
+ self.prune = 1
+
+ self.manifest_only = 0
+ self.force_manifest = 0
+
+ self.formats = ['gztar']
+ self.keep_temp = 0
+ self.dist_dir = None
+
+ self.archive_files = None
+ self.metadata_check = 1
+ self.owner = None
+ self.group = None
+
+ def finalize_options(self):
+ if self.manifest is None:
+ self.manifest = "MANIFEST"
+ if self.template is None:
+ self.template = "MANIFEST.in"
+
+ self.ensure_string_list('formats')
+
+ bad_format = archive_util.check_archive_formats(self.formats)
+ if bad_format:
+ raise DistutilsOptionError(
+ "unknown archive format '%s'" % bad_format)
+
+ if self.dist_dir is None:
+ self.dist_dir = "dist"
+
+ def run(self):
+ # 'filelist' contains the list of files that will make up the
+ # manifest
+ self.filelist = FileList()
+
+ # Run sub commands
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ # Do whatever it takes to get the list of files to process
+ # (process the manifest template, read an existing manifest,
+ # whatever). File list is accumulated in 'self.filelist'.
+ self.get_file_list()
+
+ # If user just wanted us to regenerate the manifest, stop now.
+ if self.manifest_only:
+ return
+
+ # Otherwise, go ahead and create the source distribution tarball,
+ # or zipfile, or whatever.
+ self.make_distribution()
+
+ def check_metadata(self):
+ """Deprecated API."""
+ warn("distutils.command.sdist.check_metadata is deprecated, \
+ use the check command instead", PendingDeprecationWarning)
+ check = self.distribution.get_command_obj('check')
+ check.ensure_finalized()
+ check.run()
+
+ def get_file_list(self):
+ """Figure out the list of files to include in the source
+ distribution, and put it in 'self.filelist'. This might involve
+ reading the manifest template (and writing the manifest), or just
+ reading the manifest, or just using the default file set -- it all
+ depends on the user's options.
+ """
+ # new behavior when using a template:
+ # the file list is recalculated every time because
+ # even if MANIFEST.in or setup.py are not changed
+ # the user might have added some files in the tree that
+ # need to be included.
+ #
+ # This makes --force the default and only behavior with templates.
+ template_exists = os.path.isfile(self.template)
+ if not template_exists and self._manifest_is_not_generated():
+ self.read_manifest()
+ self.filelist.sort()
+ self.filelist.remove_duplicates()
+ return
+
+ if not template_exists:
+ self.warn(("manifest template '%s' does not exist " +
+ "(using default file list)") %
+ self.template)
+ self.filelist.findall()
+
+ if self.use_defaults:
+ self.add_defaults()
+
+ if template_exists:
+ self.read_template()
+
+ if self.prune:
+ self.prune_file_list()
+
+ self.filelist.sort()
+ self.filelist.remove_duplicates()
+ self.write_manifest()
+
+ def add_defaults(self):
+ """Add all the default files to self.filelist:
+ - README or README.txt
+ - setup.py
+ - test/test*.py
+ - all pure Python modules mentioned in setup script
+ - all files pointed by package_data (build_py)
+ - all files defined in data_files.
+ - all files defined as scripts.
+ - all C sources listed as part of extensions or C libraries
+ in the setup script (doesn't catch C headers!)
+ Warns if (README or README.txt) or setup.py are missing; everything
+ else is optional.
+ """
+ self._add_defaults_standards()
+ self._add_defaults_optional()
+ self._add_defaults_python()
+ self._add_defaults_data_files()
+ self._add_defaults_ext()
+ self._add_defaults_c_libs()
+ self._add_defaults_scripts()
+
+ @staticmethod
+ def _cs_path_exists(fspath):
+ """
+ Case-sensitive path existence check
+
+ >>> sdist._cs_path_exists(__file__)
+ True
+ >>> sdist._cs_path_exists(__file__.upper())
+ False
+ """
+ if not os.path.exists(fspath):
+ return False
+ # make absolute so we always have a directory
+ abspath = os.path.abspath(fspath)
+ directory, filename = os.path.split(abspath)
+ return filename in os.listdir(directory)
+
+ def _add_defaults_standards(self):
+ standards = [self.READMES, self.distribution.script_name]
+ for fn in standards:
+ if isinstance(fn, tuple):
+ alts = fn
+ got_it = False
+ for fn in alts:
+ if self._cs_path_exists(fn):
+ got_it = True
+ self.filelist.append(fn)
+ break
+
+ if not got_it:
+ self.warn("standard file not found: should have one of " +
+ ', '.join(alts))
+ else:
+ if self._cs_path_exists(fn):
+ self.filelist.append(fn)
+ else:
+ self.warn("standard file '%s' not found" % fn)
+
+ def _add_defaults_optional(self):
+ optional = ['test/test*.py', 'setup.cfg']
+ for pattern in optional:
+ files = filter(os.path.isfile, glob(pattern))
+ self.filelist.extend(files)
+
+ def _add_defaults_python(self):
+ # build_py is used to get:
+ # - python modules
+ # - files defined in package_data
+ build_py = self.get_finalized_command('build_py')
+
+ # getting python files
+ if self.distribution.has_pure_modules():
+ self.filelist.extend(build_py.get_source_files())
+
+ # getting package_data files
+ # (computed in build_py.data_files by build_py.finalize_options)
+ for pkg, src_dir, build_dir, filenames in build_py.data_files:
+ for filename in filenames:
+ self.filelist.append(os.path.join(src_dir, filename))
+
+ def _add_defaults_data_files(self):
+ # getting distribution.data_files
+ if self.distribution.has_data_files():
+ for item in self.distribution.data_files:
+ if isinstance(item, str):
+ # plain file
+ item = convert_path(item)
+ if os.path.isfile(item):
+ self.filelist.append(item)
+ else:
+ # a (dirname, filenames) tuple
+ dirname, filenames = item
+ for f in filenames:
+ f = convert_path(f)
+ if os.path.isfile(f):
+ self.filelist.append(f)
+
+ def _add_defaults_ext(self):
+ if self.distribution.has_ext_modules():
+ build_ext = self.get_finalized_command('build_ext')
+ self.filelist.extend(build_ext.get_source_files())
+
+ def _add_defaults_c_libs(self):
+ if self.distribution.has_c_libraries():
+ build_clib = self.get_finalized_command('build_clib')
+ self.filelist.extend(build_clib.get_source_files())
+
+ def _add_defaults_scripts(self):
+ if self.distribution.has_scripts():
+ build_scripts = self.get_finalized_command('build_scripts')
+ self.filelist.extend(build_scripts.get_source_files())
+
+ def read_template(self):
+ """Read and parse manifest template file named by self.template.
+
+ (usually "MANIFEST.in") The parsing and processing is done by
+ 'self.filelist', which updates itself accordingly.
+ """
+ log.info("reading manifest template '%s'", self.template)
+ template = TextFile(self.template, strip_comments=1, skip_blanks=1,
+ join_lines=1, lstrip_ws=1, rstrip_ws=1,
+ collapse_join=1)
+
+ try:
+ while True:
+ line = template.readline()
+ if line is None: # end of file
+ break
+
+ try:
+ self.filelist.process_template_line(line)
+ # the call above can raise a DistutilsTemplateError for
+ # malformed lines, or a ValueError from the lower-level
+ # convert_path function
+ except (DistutilsTemplateError, ValueError) as msg:
+ self.warn("%s, line %d: %s" % (template.filename,
+ template.current_line,
+ msg))
+ finally:
+ template.close()
+
+ def prune_file_list(self):
+ """Prune off branches that might slip into the file list as created
+ by 'read_template()', but really don't belong there:
+ * the build tree (typically "build")
+ * the release tree itself (only an issue if we ran "sdist"
+ previously with --keep-temp, or it aborted)
+ * any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
+ """
+ build = self.get_finalized_command('build')
+ base_dir = self.distribution.get_fullname()
+
+ self.filelist.exclude_pattern(None, prefix=build.build_base)
+ self.filelist.exclude_pattern(None, prefix=base_dir)
+
+ if sys.platform == 'win32':
+ seps = r'/|\\'
+ else:
+ seps = '/'
+
+ vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr',
+ '_darcs']
+ vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
+ self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
+
+ def write_manifest(self):
+ """Write the file list in 'self.filelist' (presumably as filled in
+ by 'add_defaults()' and 'read_template()') to the manifest file
+ named by 'self.manifest'.
+ """
+ if self._manifest_is_not_generated():
+ log.info("not writing to manually maintained "
+ "manifest file '%s'" % self.manifest)
+ return
+
+ content = self.filelist.files[:]
+ content.insert(0, '# file GENERATED by distutils, do NOT edit')
+ self.execute(file_util.write_file, (self.manifest, content),
+ "writing manifest file '%s'" % self.manifest)
+
+ def _manifest_is_not_generated(self):
+ # check for special comment used in 3.1.3 and higher
+ if not os.path.isfile(self.manifest):
+ return False
+
+ fp = open(self.manifest)
+ try:
+ first_line = fp.readline()
+ finally:
+ fp.close()
+ return first_line != '# file GENERATED by distutils, do NOT edit\n'
+
+ def read_manifest(self):
+ """Read the manifest file (named by 'self.manifest') and use it to
+ fill in 'self.filelist', the list of files to include in the source
+ distribution.
+ """
+ log.info("reading manifest file '%s'", self.manifest)
+ manifest = open(self.manifest)
+ for line in manifest:
+ # ignore comments and blank lines
+ line = line.strip()
+ if line.startswith('#') or not line:
+ continue
+ self.filelist.append(line)
+ manifest.close()
+
+ def make_release_tree(self, base_dir, files):
+ """Create the directory tree that will become the source
+ distribution archive. All directories implied by the filenames in
+ 'files' are created under 'base_dir', and then we hard link or copy
+ (if hard linking is unavailable) those files into place.
+ Essentially, this duplicates the developer's source tree, but in a
+ directory named after the distribution, containing only the files
+ to be distributed.
+ """
+ # Create all the directories under 'base_dir' necessary to
+ # put 'files' there; the 'mkpath()' is just so we don't die
+ # if the manifest happens to be empty.
+ self.mkpath(base_dir)
+ dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
+
+ # And walk over the list of files, either making a hard link (if
+ # os.link exists) to each one that doesn't already exist in its
+ # corresponding location under 'base_dir', or copying each file
+ # that's out-of-date in 'base_dir'. (Usually, all files will be
+ # out-of-date, because by default we blow away 'base_dir' when
+ # we're done making the distribution archives.)
+
+ if hasattr(os, 'link'): # can make hard links on this system
+ link = 'hard'
+ msg = "making hard links in %s..." % base_dir
+ else: # nope, have to copy
+ link = None
+ msg = "copying files to %s..." % base_dir
+
+ if not files:
+ log.warn("no files to distribute -- empty manifest?")
+ else:
+ log.info(msg)
+ for file in files:
+ if not os.path.isfile(file):
+ log.warn("'%s' not a regular file -- skipping", file)
+ else:
+ dest = os.path.join(base_dir, file)
+ self.copy_file(file, dest, link=link)
+
+ self.distribution.metadata.write_pkg_info(base_dir)
+
+ def make_distribution(self):
+ """Create the source distribution(s). First, we create the release
+ tree with 'make_release_tree()'; then, we create all required
+ archive files (according to 'self.formats') from the release tree.
+ Finally, we clean up by blowing away the release tree (unless
+ 'self.keep_temp' is true). The list of archive files created is
+ stored so it can be retrieved later by 'get_archive_files()'.
+ """
+ # Don't warn about missing meta-data here -- should be (and is!)
+ # done elsewhere.
+ base_dir = self.distribution.get_fullname()
+ base_name = os.path.join(self.dist_dir, base_dir)
+
+ self.make_release_tree(base_dir, self.filelist.files)
+ archive_files = [] # remember names of files we create
+ # tar archive must be created last to avoid overwrite and remove
+ if 'tar' in self.formats:
+ self.formats.append(self.formats.pop(self.formats.index('tar')))
+
+ for fmt in self.formats:
+ file = self.make_archive(base_name, fmt, base_dir=base_dir,
+ owner=self.owner, group=self.group)
+ archive_files.append(file)
+ self.distribution.dist_files.append(('sdist', '', file))
+
+ self.archive_files = archive_files
+
+ if not self.keep_temp:
+ dir_util.remove_tree(base_dir, dry_run=self.dry_run)
+
+ def get_archive_files(self):
+ """Return the list of archive files created when the command
+ was run, or None if the command hasn't run yet.
+ """
+ return self.archive_files
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/upload.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/upload.py
new file mode 100644
index 00000000..32dda359
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/upload.py
@@ -0,0 +1,200 @@
+"""
+distutils.command.upload
+
+Implements the Distutils 'upload' subcommand (upload package to a package
+index).
+"""
+
+import os
+import io
+import platform
+import hashlib
+from base64 import standard_b64encode
+from urllib.request import urlopen, Request, HTTPError
+from urllib.parse import urlparse
+from distutils.errors import DistutilsError, DistutilsOptionError
+from distutils.core import PyPIRCCommand
+from distutils.spawn import spawn
+from distutils import log
+
+class upload(PyPIRCCommand):
+
+ description = "upload binary package to PyPI"
+
+ user_options = PyPIRCCommand.user_options + [
+ ('sign', 's',
+ 'sign files to upload using gpg'),
+ ('identity=', 'i', 'GPG identity used to sign files'),
+ ]
+
+ boolean_options = PyPIRCCommand.boolean_options + ['sign']
+
+ def initialize_options(self):
+ PyPIRCCommand.initialize_options(self)
+ self.username = ''
+ self.password = ''
+ self.show_response = 0
+ self.sign = False
+ self.identity = None
+
+ def finalize_options(self):
+ PyPIRCCommand.finalize_options(self)
+ if self.identity and not self.sign:
+ raise DistutilsOptionError(
+ "Must use --sign for --identity to have meaning"
+ )
+ config = self._read_pypirc()
+ if config != {}:
+ self.username = config['username']
+ self.password = config['password']
+ self.repository = config['repository']
+ self.realm = config['realm']
+
+ # getting the password from the distribution
+ # if previously set by the register command
+ if not self.password and self.distribution.password:
+ self.password = self.distribution.password
+
+ def run(self):
+ if not self.distribution.dist_files:
+ msg = ("Must create and upload files in one command "
+ "(e.g. setup.py sdist upload)")
+ raise DistutilsOptionError(msg)
+ for command, pyversion, filename in self.distribution.dist_files:
+ self.upload_file(command, pyversion, filename)
+
+ def upload_file(self, command, pyversion, filename):
+ # Makes sure the repository URL is compliant
+ schema, netloc, url, params, query, fragments = \
+ urlparse(self.repository)
+ if params or query or fragments:
+ raise AssertionError("Incompatible url %s" % self.repository)
+
+ if schema not in ('http', 'https'):
+ raise AssertionError("unsupported schema " + schema)
+
+ # Sign if requested
+ if self.sign:
+ gpg_args = ["gpg", "--detach-sign", "-a", filename]
+ if self.identity:
+ gpg_args[2:2] = ["--local-user", self.identity]
+ spawn(gpg_args,
+ dry_run=self.dry_run)
+
+ # Fill in the data - send all the meta-data in case we need to
+ # register a new release
+ f = open(filename,'rb')
+ try:
+ content = f.read()
+ finally:
+ f.close()
+ meta = self.distribution.metadata
+ data = {
+ # action
+ ':action': 'file_upload',
+ 'protocol_version': '1',
+
+ # identify release
+ 'name': meta.get_name(),
+ 'version': meta.get_version(),
+
+ # file content
+ 'content': (os.path.basename(filename),content),
+ 'filetype': command,
+ 'pyversion': pyversion,
+ 'md5_digest': hashlib.md5(content).hexdigest(),
+
+ # additional meta-data
+ 'metadata_version': '1.0',
+ 'summary': meta.get_description(),
+ 'home_page': meta.get_url(),
+ 'author': meta.get_contact(),
+ 'author_email': meta.get_contact_email(),
+ 'license': meta.get_licence(),
+ 'description': meta.get_long_description(),
+ 'keywords': meta.get_keywords(),
+ 'platform': meta.get_platforms(),
+ 'classifiers': meta.get_classifiers(),
+ 'download_url': meta.get_download_url(),
+ # PEP 314
+ 'provides': meta.get_provides(),
+ 'requires': meta.get_requires(),
+ 'obsoletes': meta.get_obsoletes(),
+ }
+ comment = ''
+ if command == 'bdist_rpm':
+ dist, version, id = platform.dist()
+ if dist:
+ comment = 'built for %s %s' % (dist, version)
+ elif command == 'bdist_dumb':
+ comment = 'built for %s' % platform.platform(terse=1)
+ data['comment'] = comment
+
+ if self.sign:
+ data['gpg_signature'] = (os.path.basename(filename) + ".asc",
+ open(filename+".asc", "rb").read())
+
+ # set up the authentication
+ user_pass = (self.username + ":" + self.password).encode('ascii')
+ # The exact encoding of the authentication string is debated.
+ # Anyway PyPI only accepts ascii for both username or password.
+ auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
+
+ # Build up the MIME payload for the POST data
+ boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+ sep_boundary = b'\r\n--' + boundary.encode('ascii')
+ end_boundary = sep_boundary + b'--\r\n'
+ body = io.BytesIO()
+ for key, value in data.items():
+ title = '\r\nContent-Disposition: form-data; name="%s"' % key
+ # handle multiple entries for the same name
+ if not isinstance(value, list):
+ value = [value]
+ for value in value:
+ if type(value) is tuple:
+ title += '; filename="%s"' % value[0]
+ value = value[1]
+ else:
+ value = str(value).encode('utf-8')
+ body.write(sep_boundary)
+ body.write(title.encode('utf-8'))
+ body.write(b"\r\n\r\n")
+ body.write(value)
+ body.write(end_boundary)
+ body = body.getvalue()
+
+ msg = "Submitting %s to %s" % (filename, self.repository)
+ self.announce(msg, log.INFO)
+
+ # build the Request
+ headers = {
+ 'Content-type': 'multipart/form-data; boundary=%s' % boundary,
+ 'Content-length': str(len(body)),
+ 'Authorization': auth,
+ }
+
+ request = Request(self.repository, data=body,
+ headers=headers)
+ # send the data
+ try:
+ result = urlopen(request)
+ status = result.getcode()
+ reason = result.msg
+ except HTTPError as e:
+ status = e.code
+ reason = e.msg
+ except OSError as e:
+ self.announce(str(e), log.ERROR)
+ raise
+
+ if status == 200:
+ self.announce('Server response (%s): %s' % (status, reason),
+ log.INFO)
+ if self.show_response:
+ text = self._read_pypi_response(result)
+ msg = '\n'.join(('-' * 75, text, '-' * 75))
+ self.announce(msg, log.INFO)
+ else:
+ msg = 'Upload failed (%s): %s' % (status, reason)
+ self.announce(msg, log.ERROR)
+ raise DistutilsError(msg)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-10.0-amd64.exe b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-10.0-amd64.exe
new file mode 100644
index 00000000..6fa0dce1
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-10.0-amd64.exe differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-10.0.exe b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-10.0.exe
new file mode 100644
index 00000000..afc3bc6c
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-10.0.exe differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-14.0-amd64.exe b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-14.0-amd64.exe
new file mode 100644
index 00000000..253c2e2e
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-14.0-amd64.exe differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-14.0.exe b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-14.0.exe
new file mode 100644
index 00000000..46f5f356
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-14.0.exe differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-6.0.exe b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-6.0.exe
new file mode 100644
index 00000000..f57c855a
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-6.0.exe differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-7.1.exe b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-7.1.exe
new file mode 100644
index 00000000..1433bc1a
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-7.1.exe differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-8.0.exe b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-8.0.exe
new file mode 100644
index 00000000..7403bfab
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-8.0.exe differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-9.0-amd64.exe b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-9.0-amd64.exe
new file mode 100644
index 00000000..94fbd434
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-9.0-amd64.exe differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-9.0.exe b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-9.0.exe
new file mode 100644
index 00000000..2ec261f9
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/command/wininst-9.0.exe differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/config.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/config.py
new file mode 100644
index 00000000..2171abd6
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/config.py
@@ -0,0 +1,130 @@
+"""distutils.pypirc
+
+Provides the PyPIRCCommand class, the base class for the command classes
+that uses .pypirc in the distutils.command package.
+"""
+import os
+from configparser import RawConfigParser
+
+from distutils.cmd import Command
+
+DEFAULT_PYPIRC = """\
+[distutils]
+index-servers =
+ pypi
+
+[pypi]
+username:%s
+password:%s
+"""
+
+class PyPIRCCommand(Command):
+ """Base command that knows how to handle the .pypirc file
+ """
+ DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
+ DEFAULT_REALM = 'pypi'
+ repository = None
+ realm = None
+
+ user_options = [
+ ('repository=', 'r',
+ "url of repository [default: %s]" % \
+ DEFAULT_REPOSITORY),
+ ('show-response', None,
+ 'display full response text from server')]
+
+ boolean_options = ['show-response']
+
+ def _get_rc_file(self):
+ """Returns rc file path."""
+ return os.path.join(os.path.expanduser('~'), '.pypirc')
+
+ def _store_pypirc(self, username, password):
+ """Creates a default .pypirc file."""
+ rc = self._get_rc_file()
+ with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
+ f.write(DEFAULT_PYPIRC % (username, password))
+
+ def _read_pypirc(self):
+ """Reads the .pypirc file."""
+ rc = self._get_rc_file()
+ if os.path.exists(rc):
+ self.announce('Using PyPI login from %s' % rc)
+ repository = self.repository or self.DEFAULT_REPOSITORY
+
+ config = RawConfigParser()
+ config.read(rc)
+ sections = config.sections()
+ if 'distutils' in sections:
+ # let's get the list of servers
+ index_servers = config.get('distutils', 'index-servers')
+ _servers = [server.strip() for server in
+ index_servers.split('\n')
+ if server.strip() != '']
+ if _servers == []:
+ # nothing set, let's try to get the default pypi
+ if 'pypi' in sections:
+ _servers = ['pypi']
+ else:
+ # the file is not properly defined, returning
+ # an empty dict
+ return {}
+ for server in _servers:
+ current = {'server': server}
+ current['username'] = config.get(server, 'username')
+
+ # optional params
+ for key, default in (('repository',
+ self.DEFAULT_REPOSITORY),
+ ('realm', self.DEFAULT_REALM),
+ ('password', None)):
+ if config.has_option(server, key):
+ current[key] = config.get(server, key)
+ else:
+ current[key] = default
+
+ # work around people having "repository" for the "pypi"
+ # section of their config set to the HTTP (rather than
+ # HTTPS) URL
+ if (server == 'pypi' and
+ repository in (self.DEFAULT_REPOSITORY, 'pypi')):
+ current['repository'] = self.DEFAULT_REPOSITORY
+ return current
+
+ if (current['server'] == repository or
+ current['repository'] == repository):
+ return current
+ elif 'server-login' in sections:
+ # old format
+ server = 'server-login'
+ if config.has_option(server, 'repository'):
+ repository = config.get(server, 'repository')
+ else:
+ repository = self.DEFAULT_REPOSITORY
+ return {'username': config.get(server, 'username'),
+ 'password': config.get(server, 'password'),
+ 'repository': repository,
+ 'server': server,
+ 'realm': self.DEFAULT_REALM}
+
+ return {}
+
+ def _read_pypi_response(self, response):
+ """Read and decode a PyPI HTTP response."""
+ import cgi
+ content_type = response.getheader('content-type', 'text/plain')
+ encoding = cgi.parse_header(content_type)[1].get('charset', 'ascii')
+ return response.read().decode(encoding)
+
+ def initialize_options(self):
+ """Initialize options."""
+ self.repository = None
+ self.realm = None
+ self.show_response = 0
+
+ def finalize_options(self):
+ """Finalizes options."""
+ if self.repository is None:
+ self.repository = self.DEFAULT_REPOSITORY
+ if self.realm is None:
+ self.realm = self.DEFAULT_REALM
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/core.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/core.py
new file mode 100644
index 00000000..d603d4a4
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/core.py
@@ -0,0 +1,234 @@
+"""distutils.core
+
+The only module that needs to be imported to use the Distutils; provides
+the 'setup' function (which is to be called from the setup script). Also
+indirectly provides the Distribution and Command classes, although they are
+really defined in distutils.dist and distutils.cmd.
+"""
+
+import os
+import sys
+
+from distutils.debug import DEBUG
+from distutils.errors import *
+
+# Mainly import these so setup scripts can "from distutils.core import" them.
+from distutils.dist import Distribution
+from distutils.cmd import Command
+from distutils.config import PyPIRCCommand
+from distutils.extension import Extension
+
+# This is a barebones help message generated displayed when the user
+# runs the setup script with no arguments at all. More useful help
+# is generated with various --help options: global help, list commands,
+# and per-command help.
+USAGE = """\
+usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
+ or: %(script)s --help [cmd1 cmd2 ...]
+ or: %(script)s --help-commands
+ or: %(script)s cmd --help
+"""
+
+def gen_usage (script_name):
+ script = os.path.basename(script_name)
+ return USAGE % vars()
+
+
+# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
+_setup_stop_after = None
+_setup_distribution = None
+
+# Legal keyword arguments for the setup() function
+setup_keywords = ('distclass', 'script_name', 'script_args', 'options',
+ 'name', 'version', 'author', 'author_email',
+ 'maintainer', 'maintainer_email', 'url', 'license',
+ 'description', 'long_description', 'keywords',
+ 'platforms', 'classifiers', 'download_url',
+ 'requires', 'provides', 'obsoletes',
+ )
+
+# Legal keyword arguments for the Extension constructor
+extension_keywords = ('name', 'sources', 'include_dirs',
+ 'define_macros', 'undef_macros',
+ 'library_dirs', 'libraries', 'runtime_library_dirs',
+ 'extra_objects', 'extra_compile_args', 'extra_link_args',
+ 'swig_opts', 'export_symbols', 'depends', 'language')
+
+def setup (**attrs):
+ """The gateway to the Distutils: do everything your setup script needs
+ to do, in a highly flexible and user-driven way. Briefly: create a
+ Distribution instance; find and parse config files; parse the command
+ line; run each Distutils command found there, customized by the options
+ supplied to 'setup()' (as keyword arguments), in config files, and on
+ the command line.
+
+ The Distribution instance might be an instance of a class supplied via
+ the 'distclass' keyword argument to 'setup'; if no such class is
+ supplied, then the Distribution class (in dist.py) is instantiated.
+ All other arguments to 'setup' (except for 'cmdclass') are used to set
+ attributes of the Distribution instance.
+
+ The 'cmdclass' argument, if supplied, is a dictionary mapping command
+ names to command classes. Each command encountered on the command line
+ will be turned into a command class, which is in turn instantiated; any
+ class found in 'cmdclass' is used in place of the default, which is
+ (for command 'foo_bar') class 'foo_bar' in module
+ 'distutils.command.foo_bar'. The command class must provide a
+ 'user_options' attribute which is a list of option specifiers for
+ 'distutils.fancy_getopt'. Any command-line options between the current
+ and the next command are used to set attributes of the current command
+ object.
+
+ When the entire command-line has been successfully parsed, calls the
+ 'run()' method on each command object in turn. This method will be
+ driven entirely by the Distribution object (which each command object
+ has a reference to, thanks to its constructor), and the
+ command-specific options that became attributes of each command
+ object.
+ """
+
+ global _setup_stop_after, _setup_distribution
+
+ # Determine the distribution class -- either caller-supplied or
+ # our Distribution (see below).
+ klass = attrs.get('distclass')
+ if klass:
+ del attrs['distclass']
+ else:
+ klass = Distribution
+
+ if 'script_name' not in attrs:
+ attrs['script_name'] = os.path.basename(sys.argv[0])
+ if 'script_args' not in attrs:
+ attrs['script_args'] = sys.argv[1:]
+
+ # Create the Distribution instance, using the remaining arguments
+ # (ie. everything except distclass) to initialize it
+ try:
+ _setup_distribution = dist = klass(attrs)
+ except DistutilsSetupError as msg:
+ if 'name' not in attrs:
+ raise SystemExit("error in setup command: %s" % msg)
+ else:
+ raise SystemExit("error in %s setup command: %s" % \
+ (attrs['name'], msg))
+
+ if _setup_stop_after == "init":
+ return dist
+
+ # Find and parse the config file(s): they will override options from
+ # the setup script, but be overridden by the command line.
+ dist.parse_config_files()
+
+ if DEBUG:
+ print("options (after parsing config files):")
+ dist.dump_option_dicts()
+
+ if _setup_stop_after == "config":
+ return dist
+
+ # Parse the command line and override config files; any
+ # command-line errors are the end user's fault, so turn them into
+ # SystemExit to suppress tracebacks.
+ try:
+ ok = dist.parse_command_line()
+ except DistutilsArgError as msg:
+ raise SystemExit(gen_usage(dist.script_name) + "\nerror: %s" % msg)
+
+ if DEBUG:
+ print("options (after parsing command line):")
+ dist.dump_option_dicts()
+
+ if _setup_stop_after == "commandline":
+ return dist
+
+ # And finally, run all the commands found on the command line.
+ if ok:
+ try:
+ dist.run_commands()
+ except KeyboardInterrupt:
+ raise SystemExit("interrupted")
+ except OSError as exc:
+ if DEBUG:
+ sys.stderr.write("error: %s\n" % (exc,))
+ raise
+ else:
+ raise SystemExit("error: %s" % (exc,))
+
+ except (DistutilsError,
+ CCompilerError) as msg:
+ if DEBUG:
+ raise
+ else:
+ raise SystemExit("error: " + str(msg))
+
+ return dist
+
+# setup ()
+
+
+def run_setup (script_name, script_args=None, stop_after="run"):
+ """Run a setup script in a somewhat controlled environment, and
+ return the Distribution instance that drives things. This is useful
+ if you need to find out the distribution meta-data (passed as
+ keyword args from 'script' to 'setup()', or the contents of the
+ config files or command-line.
+
+ 'script_name' is a file that will be read and run with 'exec()';
+ 'sys.argv[0]' will be replaced with 'script' for the duration of the
+ call. 'script_args' is a list of strings; if supplied,
+ 'sys.argv[1:]' will be replaced by 'script_args' for the duration of
+ the call.
+
+ 'stop_after' tells 'setup()' when to stop processing; possible
+ values:
+ init
+ stop after the Distribution instance has been created and
+ populated with the keyword arguments to 'setup()'
+ config
+ stop after config files have been parsed (and their data
+ stored in the Distribution instance)
+ commandline
+ stop after the command-line ('sys.argv[1:]' or 'script_args')
+ have been parsed (and the data stored in the Distribution)
+ run [default]
+ stop after all commands have been run (the same as if 'setup()'
+ had been called in the usual way
+
+ Returns the Distribution instance, which provides all information
+ used to drive the Distutils.
+ """
+ if stop_after not in ('init', 'config', 'commandline', 'run'):
+ raise ValueError("invalid value for 'stop_after': %r" % (stop_after,))
+
+ global _setup_stop_after, _setup_distribution
+ _setup_stop_after = stop_after
+
+ save_argv = sys.argv.copy()
+ g = {'__file__': script_name}
+ try:
+ try:
+ sys.argv[0] = script_name
+ if script_args is not None:
+ sys.argv[1:] = script_args
+ with open(script_name, 'rb') as f:
+ exec(f.read(), g)
+ finally:
+ sys.argv = save_argv
+ _setup_stop_after = None
+ except SystemExit:
+ # Hmm, should we do something if exiting with a non-zero code
+ # (ie. error)?
+ pass
+
+ if _setup_distribution is None:
+ raise RuntimeError(("'distutils.core.setup()' was never called -- "
+ "perhaps '%s' is not a Distutils setup script?") % \
+ script_name)
+
+ # I wonder if the setup script's namespace -- g and l -- would be of
+ # any interest to callers?
+ #print "_setup_distribution:", _setup_distribution
+ return _setup_distribution
+
+# run_setup ()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/cygwinccompiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/cygwinccompiler.py
new file mode 100644
index 00000000..6c5d7774
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/cygwinccompiler.py
@@ -0,0 +1,405 @@
+"""distutils.cygwinccompiler
+
+Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
+handles the Cygwin port of the GNU C compiler to Windows. It also contains
+the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
+cygwin in no-cygwin mode).
+"""
+
+# problems:
+#
+# * if you use a msvc compiled python version (1.5.2)
+# 1. you have to insert a __GNUC__ section in its config.h
+# 2. you have to generate an import library for its dll
+# - create a def-file for python??.dll
+# - create an import library using
+# dlltool --dllname python15.dll --def python15.def \
+# --output-lib libpython15.a
+#
+# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
+#
+# * We put export_symbols in a def-file, and don't use
+# --export-all-symbols because it doesn't worked reliable in some
+# tested configurations. And because other windows compilers also
+# need their symbols specified this no serious problem.
+#
+# tested configurations:
+#
+# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
+# (after patching python's config.h and for C++ some other include files)
+# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
+# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
+# (ld doesn't support -shared, so we use dllwrap)
+# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
+# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
+# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
+# - using gcc -mdll instead dllwrap doesn't work without -static because
+# it tries to link against dlls instead their import libraries. (If
+# it finds the dll first.)
+# By specifying -static we force ld to link against the import libraries,
+# this is windows standard and there are normally not the necessary symbols
+# in the dlls.
+# *** only the version of June 2000 shows these problems
+# * cygwin gcc 3.2/ld 2.13.90 works
+# (ld supports -shared)
+# * mingw gcc 3.2/ld 2.13 works
+# (ld supports -shared)
+
+import os
+import sys
+import copy
+from subprocess import Popen, PIPE, check_output
+import re
+
+from distutils.ccompiler import gen_preprocess_options, gen_lib_options
+from distutils.unixccompiler import UnixCCompiler
+from distutils.file_util import write_file
+from distutils.errors import (DistutilsExecError, CCompilerError,
+ CompileError, UnknownFileError)
+from distutils import log
+from distutils.version import LooseVersion
+from distutils.spawn import find_executable
+
+def get_msvcr():
+ """Include the appropriate MSVC runtime library if Python was built
+ with MSVC 7.0 or later.
+ """
+ msc_pos = sys.version.find('MSC v.')
+ if msc_pos != -1:
+ msc_ver = sys.version[msc_pos+6:msc_pos+10]
+ if msc_ver == '1300':
+ # MSVC 7.0
+ return ['msvcr70']
+ elif msc_ver == '1310':
+ # MSVC 7.1
+ return ['msvcr71']
+ elif msc_ver == '1400':
+ # VS2005 / MSVC 8.0
+ return ['msvcr80']
+ elif msc_ver == '1500':
+ # VS2008 / MSVC 9.0
+ return ['msvcr90']
+ elif msc_ver == '1600':
+ # VS2010 / MSVC 10.0
+ return ['msvcr100']
+ else:
+ raise ValueError("Unknown MS Compiler version %s " % msc_ver)
+
+
+class CygwinCCompiler(UnixCCompiler):
+ """ Handles the Cygwin port of the GNU C compiler to Windows.
+ """
+ compiler_type = 'cygwin'
+ obj_extension = ".o"
+ static_lib_extension = ".a"
+ shared_lib_extension = ".dll"
+ static_lib_format = "lib%s%s"
+ shared_lib_format = "%s%s"
+ exe_extension = ".exe"
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
+
+ status, details = check_config_h()
+ self.debug_print("Python's GCC status: %s (details: %s)" %
+ (status, details))
+ if status is not CONFIG_H_OK:
+ self.warn(
+ "Python's pyconfig.h doesn't seem to support your compiler. "
+ "Reason: %s. "
+ "Compiling may fail because of undefined preprocessor macros."
+ % details)
+
+ self.gcc_version, self.ld_version, self.dllwrap_version = \
+ get_versions()
+ self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
+ (self.gcc_version,
+ self.ld_version,
+ self.dllwrap_version) )
+
+ # ld_version >= "2.10.90" and < "2.13" should also be able to use
+ # gcc -mdll instead of dllwrap
+ # Older dllwraps had own version numbers, newer ones use the
+ # same as the rest of binutils ( also ld )
+ # dllwrap 2.10.90 is buggy
+ if self.ld_version >= "2.10.90":
+ self.linker_dll = "gcc"
+ else:
+ self.linker_dll = "dllwrap"
+
+ # ld_version >= "2.13" support -shared so use it instead of
+ # -mdll -static
+ if self.ld_version >= "2.13":
+ shared_option = "-shared"
+ else:
+ shared_option = "-mdll -static"
+
+ # Hard-code GCC because that's what this is all about.
+ # XXX optimization, warnings etc. should be customizable.
+ self.set_executables(compiler='gcc -mcygwin -O -Wall',
+ compiler_so='gcc -mcygwin -mdll -O -Wall',
+ compiler_cxx='g++ -mcygwin -O -Wall',
+ linker_exe='gcc -mcygwin',
+ linker_so=('%s -mcygwin %s' %
+ (self.linker_dll, shared_option)))
+
+ # cygwin and mingw32 need different sets of libraries
+ if self.gcc_version == "2.91.57":
+ # cygwin shouldn't need msvcrt, but without the dlls will crash
+ # (gcc version 2.91.57) -- perhaps something about initialization
+ self.dll_libraries=["msvcrt"]
+ self.warn(
+ "Consider upgrading to a newer version of gcc")
+ else:
+ # Include the appropriate MSVC runtime library if Python was built
+ # with MSVC 7.0 or later.
+ self.dll_libraries = get_msvcr()
+
+ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ """Compiles the source by spawning GCC and windres if needed."""
+ if ext == '.rc' or ext == '.res':
+ # gcc needs '.res' and '.rc' compiled to object files !!!
+ try:
+ self.spawn(["windres", "-i", src, "-o", obj])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ else: # for other files use the C-compiler
+ try:
+ self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
+ extra_postargs)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ def link(self, target_desc, objects, output_filename, output_dir=None,
+ libraries=None, library_dirs=None, runtime_library_dirs=None,
+ export_symbols=None, debug=0, extra_preargs=None,
+ extra_postargs=None, build_temp=None, target_lang=None):
+ """Link the objects."""
+ # use separate copies, so we can modify the lists
+ extra_preargs = copy.copy(extra_preargs or [])
+ libraries = copy.copy(libraries or [])
+ objects = copy.copy(objects or [])
+
+ # Additional libraries
+ libraries.extend(self.dll_libraries)
+
+ # handle export symbols by creating a def-file
+ # with executables this only works with gcc/ld as linker
+ if ((export_symbols is not None) and
+ (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
+ # (The linker doesn't do anything if output is up-to-date.
+ # So it would probably better to check if we really need this,
+ # but for this we had to insert some unchanged parts of
+ # UnixCCompiler, and this is not what we want.)
+
+ # we want to put some files in the same directory as the
+ # object files are, build_temp doesn't help much
+ # where are the object files
+ temp_dir = os.path.dirname(objects[0])
+ # name of dll to give the helper files the same base name
+ (dll_name, dll_extension) = os.path.splitext(
+ os.path.basename(output_filename))
+
+ # generate the filenames for these files
+ def_file = os.path.join(temp_dir, dll_name + ".def")
+ lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
+
+ # Generate .def file
+ contents = [
+ "LIBRARY %s" % os.path.basename(output_filename),
+ "EXPORTS"]
+ for sym in export_symbols:
+ contents.append(sym)
+ self.execute(write_file, (def_file, contents),
+ "writing %s" % def_file)
+
+ # next add options for def-file and to creating import libraries
+
+ # dllwrap uses different options than gcc/ld
+ if self.linker_dll == "dllwrap":
+ extra_preargs.extend(["--output-lib", lib_file])
+ # for dllwrap we have to use a special option
+ extra_preargs.extend(["--def", def_file])
+ # we use gcc/ld here and can be sure ld is >= 2.9.10
+ else:
+ # doesn't work: bfd_close build\...\libfoo.a: Invalid operation
+ #extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
+ # for gcc/ld the def-file is specified as any object files
+ objects.append(def_file)
+
+ #end: if ((export_symbols is not None) and
+ # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
+
+ # who wants symbols and a many times larger output file
+ # should explicitly switch the debug mode on
+ # otherwise we let dllwrap/ld strip the output file
+ # (On my machine: 10KiB < stripped_file < ??100KiB
+ # unstripped_file = stripped_file + XXX KiB
+ # ( XXX=254 for a typical python extension))
+ if not debug:
+ extra_preargs.append("-s")
+
+ UnixCCompiler.link(self, target_desc, objects, output_filename,
+ output_dir, libraries, library_dirs,
+ runtime_library_dirs,
+ None, # export_symbols, we do this in our def-file
+ debug, extra_preargs, extra_postargs, build_temp,
+ target_lang)
+
+ # -- Miscellaneous methods -----------------------------------------
+
+ def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
+ """Adds supports for rc and res files."""
+ if output_dir is None:
+ output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ # use normcase to make sure '.rc' is really '.rc' and not '.RC'
+ base, ext = os.path.splitext(os.path.normcase(src_name))
+ if ext not in (self.src_extensions + ['.rc','.res']):
+ raise UnknownFileError("unknown file type '%s' (from '%s')" % \
+ (ext, src_name))
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext in ('.res', '.rc'):
+ # these need to be compiled to object files
+ obj_names.append (os.path.join(output_dir,
+ base + ext + self.obj_extension))
+ else:
+ obj_names.append (os.path.join(output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+# the same as cygwin plus some additional parameters
+class Mingw32CCompiler(CygwinCCompiler):
+ """ Handles the Mingw32 port of the GNU C compiler to Windows.
+ """
+ compiler_type = 'mingw32'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+
+ CygwinCCompiler.__init__ (self, verbose, dry_run, force)
+
+ # ld_version >= "2.13" support -shared so use it instead of
+ # -mdll -static
+ if self.ld_version >= "2.13":
+ shared_option = "-shared"
+ else:
+ shared_option = "-mdll -static"
+
+ # A real mingw32 doesn't need to specify a different entry point,
+ # but cygwin 2.91.57 in no-cygwin-mode needs it.
+ if self.gcc_version <= "2.91.57":
+ entry_point = '--entry _DllMain@12'
+ else:
+ entry_point = ''
+
+ if is_cygwingcc():
+ raise CCompilerError(
+ 'Cygwin gcc cannot be used with --compiler=mingw32')
+
+ self.set_executables(compiler='gcc -O -Wall',
+ compiler_so='gcc -mdll -O -Wall',
+ compiler_cxx='g++ -O -Wall',
+ linker_exe='gcc',
+ linker_so='%s %s %s'
+ % (self.linker_dll, shared_option,
+ entry_point))
+ # Maybe we should also append -mthreads, but then the finished
+ # dlls need another dll (mingwm10.dll see Mingw32 docs)
+ # (-mthreads: Support thread-safe exception handling on `Mingw32')
+
+ # no additional libraries needed
+ self.dll_libraries=[]
+
+ # Include the appropriate MSVC runtime library if Python was built
+ # with MSVC 7.0 or later.
+ self.dll_libraries = get_msvcr()
+
+# Because these compilers aren't configured in Python's pyconfig.h file by
+# default, we should at least warn the user if he is using an unmodified
+# version.
+
+CONFIG_H_OK = "ok"
+CONFIG_H_NOTOK = "not ok"
+CONFIG_H_UNCERTAIN = "uncertain"
+
+def check_config_h():
+ """Check if the current Python installation appears amenable to building
+ extensions with GCC.
+
+ Returns a tuple (status, details), where 'status' is one of the following
+ constants:
+
+ - CONFIG_H_OK: all is well, go ahead and compile
+ - CONFIG_H_NOTOK: doesn't look good
+ - CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
+
+ 'details' is a human-readable string explaining the situation.
+
+ Note there are two ways to conclude "OK": either 'sys.version' contains
+ the string "GCC" (implying that this Python was built with GCC), or the
+ installed "pyconfig.h" contains the string "__GNUC__".
+ """
+
+ # XXX since this function also checks sys.version, it's not strictly a
+ # "pyconfig.h" check -- should probably be renamed...
+
+ from distutils import sysconfig
+
+ # if sys.version contains GCC then python was compiled with GCC, and the
+ # pyconfig.h file should be OK
+ if "GCC" in sys.version:
+ return CONFIG_H_OK, "sys.version mentions 'GCC'"
+
+ # let's see if __GNUC__ is mentioned in python.h
+ fn = sysconfig.get_config_h_filename()
+ try:
+ config_h = open(fn)
+ try:
+ if "__GNUC__" in config_h.read():
+ return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
+ else:
+ return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
+ finally:
+ config_h.close()
+ except OSError as exc:
+ return (CONFIG_H_UNCERTAIN,
+ "couldn't read '%s': %s" % (fn, exc.strerror))
+
+RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)')
+
+def _find_exe_version(cmd):
+ """Find the version of an executable by running `cmd` in the shell.
+
+ If the command is not found, or the output does not match
+ `RE_VERSION`, returns None.
+ """
+ executable = cmd.split()[0]
+ if find_executable(executable) is None:
+ return None
+ out = Popen(cmd, shell=True, stdout=PIPE).stdout
+ try:
+ out_string = out.read()
+ finally:
+ out.close()
+ result = RE_VERSION.search(out_string)
+ if result is None:
+ return None
+ # LooseVersion works with strings
+ # so we need to decode our bytes
+ return LooseVersion(result.group(1).decode())
+
+def get_versions():
+ """ Try to find out the versions of gcc, ld and dllwrap.
+
+ If not possible it returns None for it.
+ """
+ commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
+ return tuple([_find_exe_version(cmd) for cmd in commands])
+
+def is_cygwingcc():
+ '''Try to determine if the gcc that would be used is from cygwin.'''
+ out_string = check_output(['gcc', '-dumpmachine'])
+ return out_string.strip().endswith(b'cygwin')
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/debug.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/debug.py
new file mode 100644
index 00000000..daf1660f
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/debug.py
@@ -0,0 +1,5 @@
+import os
+
+# If DISTUTILS_DEBUG is anything other than the empty string, we run in
+# debug mode.
+DEBUG = os.environ.get('DISTUTILS_DEBUG')
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/dep_util.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/dep_util.py
new file mode 100644
index 00000000..d74f5e4e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/dep_util.py
@@ -0,0 +1,92 @@
+"""distutils.dep_util
+
+Utility functions for simple, timestamp-based dependency of files
+and groups of files; also, function based entirely on such
+timestamp dependency analysis."""
+
+import os
+from distutils.errors import DistutilsFileError
+
+
+def newer (source, target):
+ """Return true if 'source' exists and is more recently modified than
+ 'target', or if 'source' exists and 'target' doesn't. Return false if
+ both exist and 'target' is the same age or younger than 'source'.
+ Raise DistutilsFileError if 'source' does not exist.
+ """
+ if not os.path.exists(source):
+ raise DistutilsFileError("file '%s' does not exist" %
+ os.path.abspath(source))
+ if not os.path.exists(target):
+ return 1
+
+ from stat import ST_MTIME
+ mtime1 = os.stat(source)[ST_MTIME]
+ mtime2 = os.stat(target)[ST_MTIME]
+
+ return mtime1 > mtime2
+
+# newer ()
+
+
+def newer_pairwise (sources, targets):
+ """Walk two filename lists in parallel, testing if each source is newer
+ than its corresponding target. Return a pair of lists (sources,
+ targets) where source is newer than target, according to the semantics
+ of 'newer()'.
+ """
+ if len(sources) != len(targets):
+ raise ValueError("'sources' and 'targets' must be same length")
+
+ # build a pair of lists (sources, targets) where source is newer
+ n_sources = []
+ n_targets = []
+ for i in range(len(sources)):
+ if newer(sources[i], targets[i]):
+ n_sources.append(sources[i])
+ n_targets.append(targets[i])
+
+ return (n_sources, n_targets)
+
+# newer_pairwise ()
+
+
+def newer_group (sources, target, missing='error'):
+ """Return true if 'target' is out-of-date with respect to any file
+ listed in 'sources'. In other words, if 'target' exists and is newer
+ than every file in 'sources', return false; otherwise return true.
+ 'missing' controls what we do when a source file is missing; the
+ default ("error") is to blow up with an OSError from inside 'stat()';
+ if it is "ignore", we silently drop any missing source files; if it is
+ "newer", any missing source files make us assume that 'target' is
+ out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
+ carry out commands that wouldn't work because inputs are missing, but
+ that doesn't matter because you're not actually going to run the
+ commands).
+ """
+ # If the target doesn't even exist, then it's definitely out-of-date.
+ if not os.path.exists(target):
+ return 1
+
+ # Otherwise we have to find out the hard way: if *any* source file
+ # is more recent than 'target', then 'target' is out-of-date and
+ # we can immediately return true. If we fall through to the end
+ # of the loop, then 'target' is up-to-date and we return false.
+ from stat import ST_MTIME
+ target_mtime = os.stat(target)[ST_MTIME]
+ for source in sources:
+ if not os.path.exists(source):
+ if missing == 'error': # blow up when we stat() the file
+ pass
+ elif missing == 'ignore': # missing source dropped from
+ continue # target's dependency list
+ elif missing == 'newer': # missing source means target is
+ return 1 # out-of-date
+
+ source_mtime = os.stat(source)[ST_MTIME]
+ if source_mtime > target_mtime:
+ return 1
+ else:
+ return 0
+
+# newer_group ()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/dir_util.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/dir_util.py
new file mode 100644
index 00000000..d5cd8e3e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/dir_util.py
@@ -0,0 +1,210 @@
+"""distutils.dir_util
+
+Utility functions for manipulating directories and directory trees."""
+
+import os
+import errno
+from distutils.errors import DistutilsFileError, DistutilsInternalError
+from distutils import log
+
+# cache for by mkpath() -- in addition to cheapening redundant calls,
+# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
+_path_created = {}
+
+# I don't use os.makedirs because a) it's new to Python 1.5.2, and
+# b) it blows up if the directory already exists (I want to silently
+# succeed in that case).
+def mkpath(name, mode=0o777, verbose=1, dry_run=0):
+ """Create a directory and any missing ancestor directories.
+
+ If the directory already exists (or if 'name' is the empty string, which
+ means the current directory, which of course exists), then do nothing.
+ Raise DistutilsFileError if unable to create some directory along the way
+ (eg. some sub-path exists, but is a file rather than a directory).
+ If 'verbose' is true, print a one-line summary of each mkdir to stdout.
+ Return the list of directories actually created.
+ """
+
+ global _path_created
+
+ # Detect a common bug -- name is None
+ if not isinstance(name, str):
+ raise DistutilsInternalError(
+ "mkpath: 'name' must be a string (got %r)" % (name,))
+
+ # XXX what's the better way to handle verbosity? print as we create
+ # each directory in the path (the current behaviour), or only announce
+ # the creation of the whole path? (quite easy to do the latter since
+ # we're not using a recursive algorithm)
+
+ name = os.path.normpath(name)
+ created_dirs = []
+ if os.path.isdir(name) or name == '':
+ return created_dirs
+ if _path_created.get(os.path.abspath(name)):
+ return created_dirs
+
+ (head, tail) = os.path.split(name)
+ tails = [tail] # stack of lone dirs to create
+
+ while head and tail and not os.path.isdir(head):
+ (head, tail) = os.path.split(head)
+ tails.insert(0, tail) # push next higher dir onto stack
+
+ # now 'head' contains the deepest directory that already exists
+ # (that is, the child of 'head' in 'name' is the highest directory
+ # that does *not* exist)
+ for d in tails:
+ #print "head = %s, d = %s: " % (head, d),
+ head = os.path.join(head, d)
+ abs_head = os.path.abspath(head)
+
+ if _path_created.get(abs_head):
+ continue
+
+ if verbose >= 1:
+ log.info("creating %s", head)
+
+ if not dry_run:
+ try:
+ os.mkdir(head, mode)
+ except OSError as exc:
+ if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
+ raise DistutilsFileError(
+ "could not create '%s': %s" % (head, exc.args[-1]))
+ created_dirs.append(head)
+
+ _path_created[abs_head] = 1
+ return created_dirs
+
+def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0):
+ """Create all the empty directories under 'base_dir' needed to put 'files'
+ there.
+
+ 'base_dir' is just the name of a directory which doesn't necessarily
+ exist yet; 'files' is a list of filenames to be interpreted relative to
+ 'base_dir'. 'base_dir' + the directory portion of every file in 'files'
+ will be created if it doesn't already exist. 'mode', 'verbose' and
+ 'dry_run' flags are as for 'mkpath()'.
+ """
+ # First get the list of directories to create
+ need_dir = set()
+ for file in files:
+ need_dir.add(os.path.join(base_dir, os.path.dirname(file)))
+
+ # Now create them
+ for dir in sorted(need_dir):
+ mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
+
+def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
+ preserve_symlinks=0, update=0, verbose=1, dry_run=0):
+ """Copy an entire directory tree 'src' to a new location 'dst'.
+
+ Both 'src' and 'dst' must be directory names. If 'src' is not a
+ directory, raise DistutilsFileError. If 'dst' does not exist, it is
+ created with 'mkpath()'. The end result of the copy is that every
+ file in 'src' is copied to 'dst', and directories under 'src' are
+ recursively copied to 'dst'. Return the list of files that were
+ copied or might have been copied, using their output name. The
+ return value is unaffected by 'update' or 'dry_run': it is simply
+ the list of all files under 'src', with the names changed to be
+ under 'dst'.
+
+ 'preserve_mode' and 'preserve_times' are the same as for
+ 'copy_file'; note that they only apply to regular files, not to
+ directories. If 'preserve_symlinks' is true, symlinks will be
+ copied as symlinks (on platforms that support them!); otherwise
+ (the default), the destination of the symlink will be copied.
+ 'update' and 'verbose' are the same as for 'copy_file'.
+ """
+ from distutils.file_util import copy_file
+
+ if not dry_run and not os.path.isdir(src):
+ raise DistutilsFileError(
+ "cannot copy tree '%s': not a directory" % src)
+ try:
+ names = os.listdir(src)
+ except OSError as e:
+ if dry_run:
+ names = []
+ else:
+ raise DistutilsFileError(
+ "error listing files in '%s': %s" % (src, e.strerror))
+
+ if not dry_run:
+ mkpath(dst, verbose=verbose)
+
+ outputs = []
+
+ for n in names:
+ src_name = os.path.join(src, n)
+ dst_name = os.path.join(dst, n)
+
+ if n.startswith('.nfs'):
+ # skip NFS rename files
+ continue
+
+ if preserve_symlinks and os.path.islink(src_name):
+ link_dest = os.readlink(src_name)
+ if verbose >= 1:
+ log.info("linking %s -> %s", dst_name, link_dest)
+ if not dry_run:
+ os.symlink(link_dest, dst_name)
+ outputs.append(dst_name)
+
+ elif os.path.isdir(src_name):
+ outputs.extend(
+ copy_tree(src_name, dst_name, preserve_mode,
+ preserve_times, preserve_symlinks, update,
+ verbose=verbose, dry_run=dry_run))
+ else:
+ copy_file(src_name, dst_name, preserve_mode,
+ preserve_times, update, verbose=verbose,
+ dry_run=dry_run)
+ outputs.append(dst_name)
+
+ return outputs
+
+def _build_cmdtuple(path, cmdtuples):
+ """Helper for remove_tree()."""
+ for f in os.listdir(path):
+ real_f = os.path.join(path,f)
+ if os.path.isdir(real_f) and not os.path.islink(real_f):
+ _build_cmdtuple(real_f, cmdtuples)
+ else:
+ cmdtuples.append((os.remove, real_f))
+ cmdtuples.append((os.rmdir, path))
+
+def remove_tree(directory, verbose=1, dry_run=0):
+ """Recursively remove an entire directory tree.
+
+ Any errors are ignored (apart from being reported to stdout if 'verbose'
+ is true).
+ """
+ global _path_created
+
+ if verbose >= 1:
+ log.info("removing '%s' (and everything under it)", directory)
+ if dry_run:
+ return
+ cmdtuples = []
+ _build_cmdtuple(directory, cmdtuples)
+ for cmd in cmdtuples:
+ try:
+ cmd[0](cmd[1])
+ # remove dir from cache if it's already there
+ abspath = os.path.abspath(cmd[1])
+ if abspath in _path_created:
+ del _path_created[abspath]
+ except OSError as exc:
+ log.warn("error removing %s: %s", directory, exc)
+
+def ensure_relative(path):
+ """Take the full path 'path', and make it a relative path.
+
+ This is useful to make 'path' the second argument to os.path.join().
+ """
+ drive, path = os.path.splitdrive(path)
+ if path[0:1] == os.sep:
+ path = drive + path[1:]
+ return path
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/dist.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/dist.py
new file mode 100644
index 00000000..6cf0a0d6
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/dist.py
@@ -0,0 +1,1256 @@
+"""distutils.dist
+
+Provides the Distribution class, which represents the module distribution
+being built/installed/distributed.
+"""
+
+import sys
+import os
+import re
+from email import message_from_file
+
+try:
+ import warnings
+except ImportError:
+ warnings = None
+
+from distutils.errors import *
+from distutils.fancy_getopt import FancyGetopt, translate_longopt
+from distutils.util import check_environ, strtobool, rfc822_escape
+from distutils import log
+from distutils.debug import DEBUG
+
+# Regex to define acceptable Distutils command names. This is not *quite*
+# the same as a Python NAME -- I don't allow leading underscores. The fact
+# that they're very similar is no coincidence; the default naming scheme is
+# to look for a Python module named after the command.
+command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
+
+
+def _ensure_list(value, fieldname):
+ if isinstance(value, str):
+ # a string containing comma separated values is okay. It will
+ # be converted to a list by Distribution.finalize_options().
+ pass
+ elif not isinstance(value, list):
+ # passing a tuple or an iterator perhaps, warn and convert
+ typename = type(value).__name__
+ msg = f"Warning: '{fieldname}' should be a list, got type '{typename}'"
+ log.log(log.WARN, msg)
+ value = list(value)
+ return value
+
+
+class Distribution:
+ """The core of the Distutils. Most of the work hiding behind 'setup'
+ is really done within a Distribution instance, which farms the work out
+ to the Distutils commands specified on the command line.
+
+ Setup scripts will almost never instantiate Distribution directly,
+ unless the 'setup()' function is totally inadequate to their needs.
+ However, it is conceivable that a setup script might wish to subclass
+ Distribution for some specialized purpose, and then pass the subclass
+ to 'setup()' as the 'distclass' keyword argument. If so, it is
+ necessary to respect the expectations that 'setup' has of Distribution.
+ See the code for 'setup()', in core.py, for details.
+ """
+
+ # 'global_options' describes the command-line options that may be
+ # supplied to the setup script prior to any actual commands.
+ # Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
+ # these global options. This list should be kept to a bare minimum,
+ # since every global option is also valid as a command option -- and we
+ # don't want to pollute the commands with too many options that they
+ # have minimal control over.
+ # The fourth entry for verbose means that it can be repeated.
+ global_options = [
+ ('verbose', 'v', "run verbosely (default)", 1),
+ ('quiet', 'q', "run quietly (turns verbosity off)"),
+ ('dry-run', 'n', "don't actually do anything"),
+ ('help', 'h', "show detailed help message"),
+ ('no-user-cfg', None,
+ 'ignore pydistutils.cfg in your home directory'),
+ ]
+
+ # 'common_usage' is a short (2-3 line) string describing the common
+ # usage of the setup script.
+ common_usage = """\
+Common commands: (see '--help-commands' for more)
+
+ setup.py build will build the package underneath 'build/'
+ setup.py install will install the package
+"""
+
+ # options that are not propagated to the commands
+ display_options = [
+ ('help-commands', None,
+ "list all available commands"),
+ ('name', None,
+ "print package name"),
+ ('version', 'V',
+ "print package version"),
+ ('fullname', None,
+ "print -"),
+ ('author', None,
+ "print the author's name"),
+ ('author-email', None,
+ "print the author's email address"),
+ ('maintainer', None,
+ "print the maintainer's name"),
+ ('maintainer-email', None,
+ "print the maintainer's email address"),
+ ('contact', None,
+ "print the maintainer's name if known, else the author's"),
+ ('contact-email', None,
+ "print the maintainer's email address if known, else the author's"),
+ ('url', None,
+ "print the URL for this package"),
+ ('license', None,
+ "print the license of the package"),
+ ('licence', None,
+ "alias for --license"),
+ ('description', None,
+ "print the package description"),
+ ('long-description', None,
+ "print the long package description"),
+ ('platforms', None,
+ "print the list of platforms"),
+ ('classifiers', None,
+ "print the list of classifiers"),
+ ('keywords', None,
+ "print the list of keywords"),
+ ('provides', None,
+ "print the list of packages/modules provided"),
+ ('requires', None,
+ "print the list of packages/modules required"),
+ ('obsoletes', None,
+ "print the list of packages/modules made obsolete")
+ ]
+ display_option_names = [translate_longopt(x[0]) for x in display_options]
+
+ # negative options are options that exclude other options
+ negative_opt = {'quiet': 'verbose'}
+
+ # -- Creation/initialization methods -------------------------------
+
+ def __init__(self, attrs=None):
+ """Construct a new Distribution instance: initialize all the
+ attributes of a Distribution, and then use 'attrs' (a dictionary
+ mapping attribute names to values) to assign some of those
+ attributes their "real" values. (Any attributes not mentioned in
+ 'attrs' will be assigned to some null value: 0, None, an empty list
+ or dictionary, etc.) Most importantly, initialize the
+ 'command_obj' attribute to the empty dictionary; this will be
+ filled in with real command objects by 'parse_command_line()'.
+ """
+
+ # Default values for our command-line options
+ self.verbose = 1
+ self.dry_run = 0
+ self.help = 0
+ for attr in self.display_option_names:
+ setattr(self, attr, 0)
+
+ # Store the distribution meta-data (name, version, author, and so
+ # forth) in a separate object -- we're getting to have enough
+ # information here (and enough command-line options) that it's
+ # worth it. Also delegate 'get_XXX()' methods to the 'metadata'
+ # object in a sneaky and underhanded (but efficient!) way.
+ self.metadata = DistributionMetadata()
+ for basename in self.metadata._METHOD_BASENAMES:
+ method_name = "get_" + basename
+ setattr(self, method_name, getattr(self.metadata, method_name))
+
+ # 'cmdclass' maps command names to class objects, so we
+ # can 1) quickly figure out which class to instantiate when
+ # we need to create a new command object, and 2) have a way
+ # for the setup script to override command classes
+ self.cmdclass = {}
+
+ # 'command_packages' is a list of packages in which commands
+ # are searched for. The factory for command 'foo' is expected
+ # to be named 'foo' in the module 'foo' in one of the packages
+ # named here. This list is searched from the left; an error
+ # is raised if no named package provides the command being
+ # searched for. (Always access using get_command_packages().)
+ self.command_packages = None
+
+ # 'script_name' and 'script_args' are usually set to sys.argv[0]
+ # and sys.argv[1:], but they can be overridden when the caller is
+ # not necessarily a setup script run from the command-line.
+ self.script_name = None
+ self.script_args = None
+
+ # 'command_options' is where we store command options between
+ # parsing them (from config files, the command-line, etc.) and when
+ # they are actually needed -- ie. when the command in question is
+ # instantiated. It is a dictionary of dictionaries of 2-tuples:
+ # command_options = { command_name : { option : (source, value) } }
+ self.command_options = {}
+
+ # 'dist_files' is the list of (command, pyversion, file) that
+ # have been created by any dist commands run so far. This is
+ # filled regardless of whether the run is dry or not. pyversion
+ # gives sysconfig.get_python_version() if the dist file is
+ # specific to a Python version, 'any' if it is good for all
+ # Python versions on the target platform, and '' for a source
+ # file. pyversion should not be used to specify minimum or
+ # maximum required Python versions; use the metainfo for that
+ # instead.
+ self.dist_files = []
+
+ # These options are really the business of various commands, rather
+ # than of the Distribution itself. We provide aliases for them in
+ # Distribution as a convenience to the developer.
+ self.packages = None
+ self.package_data = {}
+ self.package_dir = None
+ self.py_modules = None
+ self.libraries = None
+ self.headers = None
+ self.ext_modules = None
+ self.ext_package = None
+ self.include_dirs = None
+ self.extra_path = None
+ self.scripts = None
+ self.data_files = None
+ self.password = ''
+
+ # And now initialize bookkeeping stuff that can't be supplied by
+ # the caller at all. 'command_obj' maps command names to
+ # Command instances -- that's how we enforce that every command
+ # class is a singleton.
+ self.command_obj = {}
+
+ # 'have_run' maps command names to boolean values; it keeps track
+ # of whether we have actually run a particular command, to make it
+ # cheap to "run" a command whenever we think we might need to -- if
+ # it's already been done, no need for expensive filesystem
+ # operations, we just check the 'have_run' dictionary and carry on.
+ # It's only safe to query 'have_run' for a command class that has
+ # been instantiated -- a false value will be inserted when the
+ # command object is created, and replaced with a true value when
+ # the command is successfully run. Thus it's probably best to use
+ # '.get()' rather than a straight lookup.
+ self.have_run = {}
+
+ # Now we'll use the attrs dictionary (ultimately, keyword args from
+ # the setup script) to possibly override any or all of these
+ # distribution options.
+
+ if attrs:
+ # Pull out the set of command options and work on them
+ # specifically. Note that this order guarantees that aliased
+ # command options will override any supplied redundantly
+ # through the general options dictionary.
+ options = attrs.get('options')
+ if options is not None:
+ del attrs['options']
+ for (command, cmd_options) in options.items():
+ opt_dict = self.get_option_dict(command)
+ for (opt, val) in cmd_options.items():
+ opt_dict[opt] = ("setup script", val)
+
+ if 'licence' in attrs:
+ attrs['license'] = attrs['licence']
+ del attrs['licence']
+ msg = "'licence' distribution option is deprecated; use 'license'"
+ if warnings is not None:
+ warnings.warn(msg)
+ else:
+ sys.stderr.write(msg + "\n")
+
+ # Now work on the rest of the attributes. Any attribute that's
+ # not already defined is invalid!
+ for (key, val) in attrs.items():
+ if hasattr(self.metadata, "set_" + key):
+ getattr(self.metadata, "set_" + key)(val)
+ elif hasattr(self.metadata, key):
+ setattr(self.metadata, key, val)
+ elif hasattr(self, key):
+ setattr(self, key, val)
+ else:
+ msg = "Unknown distribution option: %s" % repr(key)
+ warnings.warn(msg)
+
+ # no-user-cfg is handled before other command line args
+ # because other args override the config files, and this
+ # one is needed before we can load the config files.
+ # If attrs['script_args'] wasn't passed, assume false.
+ #
+ # This also make sure we just look at the global options
+ self.want_user_cfg = True
+
+ if self.script_args is not None:
+ for arg in self.script_args:
+ if not arg.startswith('-'):
+ break
+ if arg == '--no-user-cfg':
+ self.want_user_cfg = False
+ break
+
+ self.finalize_options()
+
+ def get_option_dict(self, command):
+ """Get the option dictionary for a given command. If that
+ command's option dictionary hasn't been created yet, then create it
+ and return the new dictionary; otherwise, return the existing
+ option dictionary.
+ """
+ dict = self.command_options.get(command)
+ if dict is None:
+ dict = self.command_options[command] = {}
+ return dict
+
+ def dump_option_dicts(self, header=None, commands=None, indent=""):
+ from pprint import pformat
+
+ if commands is None: # dump all command option dicts
+ commands = sorted(self.command_options.keys())
+
+ if header is not None:
+ self.announce(indent + header)
+ indent = indent + " "
+
+ if not commands:
+ self.announce(indent + "no commands known yet")
+ return
+
+ for cmd_name in commands:
+ opt_dict = self.command_options.get(cmd_name)
+ if opt_dict is None:
+ self.announce(indent +
+ "no option dict for '%s' command" % cmd_name)
+ else:
+ self.announce(indent +
+ "option dict for '%s' command:" % cmd_name)
+ out = pformat(opt_dict)
+ for line in out.split('\n'):
+ self.announce(indent + " " + line)
+
+ # -- Config file finding/parsing methods ---------------------------
+
+ def find_config_files(self):
+ """Find as many configuration files as should be processed for this
+ platform, and return a list of filenames in the order in which they
+ should be parsed. The filenames returned are guaranteed to exist
+ (modulo nasty race conditions).
+
+ There are three possible config files: distutils.cfg in the
+ Distutils installation directory (ie. where the top-level
+ Distutils __inst__.py file lives), a file in the user's home
+ directory named .pydistutils.cfg on Unix and pydistutils.cfg
+ on Windows/Mac; and setup.cfg in the current directory.
+
+ The file in the user's home directory can be disabled with the
+ --no-user-cfg option.
+ """
+ files = []
+ check_environ()
+
+ # Where to look for the system-wide Distutils config file
+ sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
+
+ # Look for the system config file
+ sys_file = os.path.join(sys_dir, "distutils.cfg")
+ if os.path.isfile(sys_file):
+ files.append(sys_file)
+
+ # What to call the per-user config file
+ if os.name == 'posix':
+ user_filename = ".pydistutils.cfg"
+ else:
+ user_filename = "pydistutils.cfg"
+
+ # And look for the user config file
+ if self.want_user_cfg:
+ user_file = os.path.join(os.path.expanduser('~'), user_filename)
+ if os.path.isfile(user_file):
+ files.append(user_file)
+
+ # All platforms support local setup.cfg
+ local_file = "setup.cfg"
+ if os.path.isfile(local_file):
+ files.append(local_file)
+
+ if DEBUG:
+ self.announce("using config files: %s" % ', '.join(files))
+
+ return files
+
+ def parse_config_files(self, filenames=None):
+ from configparser import ConfigParser
+
+ # Ignore install directory options if we have a venv
+ if sys.prefix != sys.base_prefix:
+ ignore_options = [
+ 'install-base', 'install-platbase', 'install-lib',
+ 'install-platlib', 'install-purelib', 'install-headers',
+ 'install-scripts', 'install-data', 'prefix', 'exec-prefix',
+ 'home', 'user', 'root']
+ else:
+ ignore_options = []
+
+ ignore_options = frozenset(ignore_options)
+
+ if filenames is None:
+ filenames = self.find_config_files()
+
+ if DEBUG:
+ self.announce("Distribution.parse_config_files():")
+
+ parser = ConfigParser()
+ for filename in filenames:
+ if DEBUG:
+ self.announce(" reading %s" % filename)
+ parser.read(filename)
+ for section in parser.sections():
+ options = parser.options(section)
+ opt_dict = self.get_option_dict(section)
+
+ for opt in options:
+ if opt != '__name__' and opt not in ignore_options:
+ val = parser.get(section,opt)
+ opt = opt.replace('-', '_')
+ opt_dict[opt] = (filename, val)
+
+ # Make the ConfigParser forget everything (so we retain
+ # the original filenames that options come from)
+ parser.__init__()
+
+ # If there was a "global" section in the config file, use it
+ # to set Distribution options.
+
+ if 'global' in self.command_options:
+ for (opt, (src, val)) in self.command_options['global'].items():
+ alias = self.negative_opt.get(opt)
+ try:
+ if alias:
+ setattr(self, alias, not strtobool(val))
+ elif opt in ('verbose', 'dry_run'): # ugh!
+ setattr(self, opt, strtobool(val))
+ else:
+ setattr(self, opt, val)
+ except ValueError as msg:
+ raise DistutilsOptionError(msg)
+
+ # -- Command-line parsing methods ----------------------------------
+
+ def parse_command_line(self):
+ """Parse the setup script's command line, taken from the
+ 'script_args' instance attribute (which defaults to 'sys.argv[1:]'
+ -- see 'setup()' in core.py). This list is first processed for
+ "global options" -- options that set attributes of the Distribution
+ instance. Then, it is alternately scanned for Distutils commands
+ and options for that command. Each new command terminates the
+ options for the previous command. The allowed options for a
+ command are determined by the 'user_options' attribute of the
+ command class -- thus, we have to be able to load command classes
+ in order to parse the command line. Any error in that 'options'
+ attribute raises DistutilsGetoptError; any error on the
+ command-line raises DistutilsArgError. If no Distutils commands
+ were found on the command line, raises DistutilsArgError. Return
+ true if command-line was successfully parsed and we should carry
+ on with executing commands; false if no errors but we shouldn't
+ execute commands (currently, this only happens if user asks for
+ help).
+ """
+ #
+ # We now have enough information to show the Macintosh dialog
+ # that allows the user to interactively specify the "command line".
+ #
+ toplevel_options = self._get_toplevel_options()
+
+ # We have to parse the command line a bit at a time -- global
+ # options, then the first command, then its options, and so on --
+ # because each command will be handled by a different class, and
+ # the options that are valid for a particular class aren't known
+ # until we have loaded the command class, which doesn't happen
+ # until we know what the command is.
+
+ self.commands = []
+ parser = FancyGetopt(toplevel_options + self.display_options)
+ parser.set_negative_aliases(self.negative_opt)
+ parser.set_aliases({'licence': 'license'})
+ args = parser.getopt(args=self.script_args, object=self)
+ option_order = parser.get_option_order()
+ log.set_verbosity(self.verbose)
+
+ # for display options we return immediately
+ if self.handle_display_options(option_order):
+ return
+ while args:
+ args = self._parse_command_opts(parser, args)
+ if args is None: # user asked for help (and got it)
+ return
+
+ # Handle the cases of --help as a "global" option, ie.
+ # "setup.py --help" and "setup.py --help command ...". For the
+ # former, we show global options (--verbose, --dry-run, etc.)
+ # and display-only options (--name, --version, etc.); for the
+ # latter, we omit the display-only options and show help for
+ # each command listed on the command line.
+ if self.help:
+ self._show_help(parser,
+ display_options=len(self.commands) == 0,
+ commands=self.commands)
+ return
+
+ # Oops, no commands found -- an end-user error
+ if not self.commands:
+ raise DistutilsArgError("no commands supplied")
+
+ # All is well: return true
+ return True
+
+ def _get_toplevel_options(self):
+ """Return the non-display options recognized at the top level.
+
+ This includes options that are recognized *only* at the top
+ level as well as options recognized for commands.
+ """
+ return self.global_options + [
+ ("command-packages=", None,
+ "list of packages that provide distutils commands"),
+ ]
+
+ def _parse_command_opts(self, parser, args):
+ """Parse the command-line options for a single command.
+ 'parser' must be a FancyGetopt instance; 'args' must be the list
+ of arguments, starting with the current command (whose options
+ we are about to parse). Returns a new version of 'args' with
+ the next command at the front of the list; will be the empty
+ list if there are no more commands on the command line. Returns
+ None if the user asked for help on this command.
+ """
+ # late import because of mutual dependence between these modules
+ from distutils.cmd import Command
+
+ # Pull the current command from the head of the command line
+ command = args[0]
+ if not command_re.match(command):
+ raise SystemExit("invalid command name '%s'" % command)
+ self.commands.append(command)
+
+ # Dig up the command class that implements this command, so we
+ # 1) know that it's a valid command, and 2) know which options
+ # it takes.
+ try:
+ cmd_class = self.get_command_class(command)
+ except DistutilsModuleError as msg:
+ raise DistutilsArgError(msg)
+
+ # Require that the command class be derived from Command -- want
+ # to be sure that the basic "command" interface is implemented.
+ if not issubclass(cmd_class, Command):
+ raise DistutilsClassError(
+ "command class %s must subclass Command" % cmd_class)
+
+ # Also make sure that the command object provides a list of its
+ # known options.
+ if not (hasattr(cmd_class, 'user_options') and
+ isinstance(cmd_class.user_options, list)):
+ msg = ("command class %s must provide "
+ "'user_options' attribute (a list of tuples)")
+ raise DistutilsClassError(msg % cmd_class)
+
+ # If the command class has a list of negative alias options,
+ # merge it in with the global negative aliases.
+ negative_opt = self.negative_opt
+ if hasattr(cmd_class, 'negative_opt'):
+ negative_opt = negative_opt.copy()
+ negative_opt.update(cmd_class.negative_opt)
+
+ # Check for help_options in command class. They have a different
+ # format (tuple of four) so we need to preprocess them here.
+ if (hasattr(cmd_class, 'help_options') and
+ isinstance(cmd_class.help_options, list)):
+ help_options = fix_help_options(cmd_class.help_options)
+ else:
+ help_options = []
+
+ # All commands support the global options too, just by adding
+ # in 'global_options'.
+ parser.set_option_table(self.global_options +
+ cmd_class.user_options +
+ help_options)
+ parser.set_negative_aliases(negative_opt)
+ (args, opts) = parser.getopt(args[1:])
+ if hasattr(opts, 'help') and opts.help:
+ self._show_help(parser, display_options=0, commands=[cmd_class])
+ return
+
+ if (hasattr(cmd_class, 'help_options') and
+ isinstance(cmd_class.help_options, list)):
+ help_option_found=0
+ for (help_option, short, desc, func) in cmd_class.help_options:
+ if hasattr(opts, parser.get_attr_name(help_option)):
+ help_option_found=1
+ if callable(func):
+ func()
+ else:
+ raise DistutilsClassError(
+ "invalid help function %r for help option '%s': "
+ "must be a callable object (function, etc.)"
+ % (func, help_option))
+
+ if help_option_found:
+ return
+
+ # Put the options from the command-line into their official
+ # holding pen, the 'command_options' dictionary.
+ opt_dict = self.get_option_dict(command)
+ for (name, value) in vars(opts).items():
+ opt_dict[name] = ("command line", value)
+
+ return args
+
+ def finalize_options(self):
+ """Set final values for all the options on the Distribution
+ instance, analogous to the .finalize_options() method of Command
+ objects.
+ """
+ for attr in ('keywords', 'platforms'):
+ value = getattr(self.metadata, attr)
+ if value is None:
+ continue
+ if isinstance(value, str):
+ value = [elm.strip() for elm in value.split(',')]
+ setattr(self.metadata, attr, value)
+
+ def _show_help(self, parser, global_options=1, display_options=1,
+ commands=[]):
+ """Show help for the setup script command-line in the form of
+ several lists of command-line options. 'parser' should be a
+ FancyGetopt instance; do not expect it to be returned in the
+ same state, as its option table will be reset to make it
+ generate the correct help text.
+
+ If 'global_options' is true, lists the global options:
+ --verbose, --dry-run, etc. If 'display_options' is true, lists
+ the "display-only" options: --name, --version, etc. Finally,
+ lists per-command help for every command name or command class
+ in 'commands'.
+ """
+ # late import because of mutual dependence between these modules
+ from distutils.core import gen_usage
+ from distutils.cmd import Command
+
+ if global_options:
+ if display_options:
+ options = self._get_toplevel_options()
+ else:
+ options = self.global_options
+ parser.set_option_table(options)
+ parser.print_help(self.common_usage + "\nGlobal options:")
+ print('')
+
+ if display_options:
+ parser.set_option_table(self.display_options)
+ parser.print_help(
+ "Information display options (just display " +
+ "information, ignore any commands)")
+ print('')
+
+ for command in self.commands:
+ if isinstance(command, type) and issubclass(command, Command):
+ klass = command
+ else:
+ klass = self.get_command_class(command)
+ if (hasattr(klass, 'help_options') and
+ isinstance(klass.help_options, list)):
+ parser.set_option_table(klass.user_options +
+ fix_help_options(klass.help_options))
+ else:
+ parser.set_option_table(klass.user_options)
+ parser.print_help("Options for '%s' command:" % klass.__name__)
+ print('')
+
+ print(gen_usage(self.script_name))
+
+ def handle_display_options(self, option_order):
+ """If there were any non-global "display-only" options
+ (--help-commands or the metadata display options) on the command
+ line, display the requested info and return true; else return
+ false.
+ """
+ from distutils.core import gen_usage
+
+ # User just wants a list of commands -- we'll print it out and stop
+ # processing now (ie. if they ran "setup --help-commands foo bar",
+ # we ignore "foo bar").
+ if self.help_commands:
+ self.print_commands()
+ print('')
+ print(gen_usage(self.script_name))
+ return 1
+
+ # If user supplied any of the "display metadata" options, then
+ # display that metadata in the order in which the user supplied the
+ # metadata options.
+ any_display_options = 0
+ is_display_option = {}
+ for option in self.display_options:
+ is_display_option[option[0]] = 1
+
+ for (opt, val) in option_order:
+ if val and is_display_option.get(opt):
+ opt = translate_longopt(opt)
+ value = getattr(self.metadata, "get_"+opt)()
+ if opt in ['keywords', 'platforms']:
+ print(','.join(value))
+ elif opt in ('classifiers', 'provides', 'requires',
+ 'obsoletes'):
+ print('\n'.join(value))
+ else:
+ print(value)
+ any_display_options = 1
+
+ return any_display_options
+
+ def print_command_list(self, commands, header, max_length):
+ """Print a subset of the list of all commands -- used by
+ 'print_commands()'.
+ """
+ print(header + ":")
+
+ for cmd in commands:
+ klass = self.cmdclass.get(cmd)
+ if not klass:
+ klass = self.get_command_class(cmd)
+ try:
+ description = klass.description
+ except AttributeError:
+ description = "(no description available)"
+
+ print(" %-*s %s" % (max_length, cmd, description))
+
+ def print_commands(self):
+ """Print out a help message listing all available commands with a
+ description of each. The list is divided into "standard commands"
+ (listed in distutils.command.__all__) and "extra commands"
+ (mentioned in self.cmdclass, but not a standard command). The
+ descriptions come from the command class attribute
+ 'description'.
+ """
+ import distutils.command
+ std_commands = distutils.command.__all__
+ is_std = {}
+ for cmd in std_commands:
+ is_std[cmd] = 1
+
+ extra_commands = []
+ for cmd in self.cmdclass.keys():
+ if not is_std.get(cmd):
+ extra_commands.append(cmd)
+
+ max_length = 0
+ for cmd in (std_commands + extra_commands):
+ if len(cmd) > max_length:
+ max_length = len(cmd)
+
+ self.print_command_list(std_commands,
+ "Standard commands",
+ max_length)
+ if extra_commands:
+ print()
+ self.print_command_list(extra_commands,
+ "Extra commands",
+ max_length)
+
+ def get_command_list(self):
+ """Get a list of (command, description) tuples.
+ The list is divided into "standard commands" (listed in
+ distutils.command.__all__) and "extra commands" (mentioned in
+ self.cmdclass, but not a standard command). The descriptions come
+ from the command class attribute 'description'.
+ """
+ # Currently this is only used on Mac OS, for the Mac-only GUI
+ # Distutils interface (by Jack Jansen)
+ import distutils.command
+ std_commands = distutils.command.__all__
+ is_std = {}
+ for cmd in std_commands:
+ is_std[cmd] = 1
+
+ extra_commands = []
+ for cmd in self.cmdclass.keys():
+ if not is_std.get(cmd):
+ extra_commands.append(cmd)
+
+ rv = []
+ for cmd in (std_commands + extra_commands):
+ klass = self.cmdclass.get(cmd)
+ if not klass:
+ klass = self.get_command_class(cmd)
+ try:
+ description = klass.description
+ except AttributeError:
+ description = "(no description available)"
+ rv.append((cmd, description))
+ return rv
+
+ # -- Command class/object methods ----------------------------------
+
+ def get_command_packages(self):
+ """Return a list of packages from which commands are loaded."""
+ pkgs = self.command_packages
+ if not isinstance(pkgs, list):
+ if pkgs is None:
+ pkgs = ''
+ pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
+ if "distutils.command" not in pkgs:
+ pkgs.insert(0, "distutils.command")
+ self.command_packages = pkgs
+ return pkgs
+
+ def get_command_class(self, command):
+ """Return the class that implements the Distutils command named by
+ 'command'. First we check the 'cmdclass' dictionary; if the
+ command is mentioned there, we fetch the class object from the
+ dictionary and return it. Otherwise we load the command module
+ ("distutils.command." + command) and fetch the command class from
+ the module. The loaded class is also stored in 'cmdclass'
+ to speed future calls to 'get_command_class()'.
+
+ Raises DistutilsModuleError if the expected module could not be
+ found, or if that module does not define the expected class.
+ """
+ klass = self.cmdclass.get(command)
+ if klass:
+ return klass
+
+ for pkgname in self.get_command_packages():
+ module_name = "%s.%s" % (pkgname, command)
+ klass_name = command
+
+ try:
+ __import__(module_name)
+ module = sys.modules[module_name]
+ except ImportError:
+ continue
+
+ try:
+ klass = getattr(module, klass_name)
+ except AttributeError:
+ raise DistutilsModuleError(
+ "invalid command '%s' (no class '%s' in module '%s')"
+ % (command, klass_name, module_name))
+
+ self.cmdclass[command] = klass
+ return klass
+
+ raise DistutilsModuleError("invalid command '%s'" % command)
+
+ def get_command_obj(self, command, create=1):
+ """Return the command object for 'command'. Normally this object
+ is cached on a previous call to 'get_command_obj()'; if no command
+ object for 'command' is in the cache, then we either create and
+ return it (if 'create' is true) or return None.
+ """
+ cmd_obj = self.command_obj.get(command)
+ if not cmd_obj and create:
+ if DEBUG:
+ self.announce("Distribution.get_command_obj(): "
+ "creating '%s' command object" % command)
+
+ klass = self.get_command_class(command)
+ cmd_obj = self.command_obj[command] = klass(self)
+ self.have_run[command] = 0
+
+ # Set any options that were supplied in config files
+ # or on the command line. (NB. support for error
+ # reporting is lame here: any errors aren't reported
+ # until 'finalize_options()' is called, which means
+ # we won't report the source of the error.)
+ options = self.command_options.get(command)
+ if options:
+ self._set_command_options(cmd_obj, options)
+
+ return cmd_obj
+
+ def _set_command_options(self, command_obj, option_dict=None):
+ """Set the options for 'command_obj' from 'option_dict'. Basically
+ this means copying elements of a dictionary ('option_dict') to
+ attributes of an instance ('command').
+
+ 'command_obj' must be a Command instance. If 'option_dict' is not
+ supplied, uses the standard option dictionary for this command
+ (from 'self.command_options').
+ """
+ command_name = command_obj.get_command_name()
+ if option_dict is None:
+ option_dict = self.get_option_dict(command_name)
+
+ if DEBUG:
+ self.announce(" setting options for '%s' command:" % command_name)
+ for (option, (source, value)) in option_dict.items():
+ if DEBUG:
+ self.announce(" %s = %s (from %s)" % (option, value,
+ source))
+ try:
+ bool_opts = [translate_longopt(o)
+ for o in command_obj.boolean_options]
+ except AttributeError:
+ bool_opts = []
+ try:
+ neg_opt = command_obj.negative_opt
+ except AttributeError:
+ neg_opt = {}
+
+ try:
+ is_string = isinstance(value, str)
+ if option in neg_opt and is_string:
+ setattr(command_obj, neg_opt[option], not strtobool(value))
+ elif option in bool_opts and is_string:
+ setattr(command_obj, option, strtobool(value))
+ elif hasattr(command_obj, option):
+ setattr(command_obj, option, value)
+ else:
+ raise DistutilsOptionError(
+ "error in %s: command '%s' has no such option '%s'"
+ % (source, command_name, option))
+ except ValueError as msg:
+ raise DistutilsOptionError(msg)
+
+ def reinitialize_command(self, command, reinit_subcommands=0):
+ """Reinitializes a command to the state it was in when first
+ returned by 'get_command_obj()': ie., initialized but not yet
+ finalized. This provides the opportunity to sneak option
+ values in programmatically, overriding or supplementing
+ user-supplied values from the config files and command line.
+ You'll have to re-finalize the command object (by calling
+ 'finalize_options()' or 'ensure_finalized()') before using it for
+ real.
+
+ 'command' should be a command name (string) or command object. If
+ 'reinit_subcommands' is true, also reinitializes the command's
+ sub-commands, as declared by the 'sub_commands' class attribute (if
+ it has one). See the "install" command for an example. Only
+ reinitializes the sub-commands that actually matter, ie. those
+ whose test predicates return true.
+
+ Returns the reinitialized command object.
+ """
+ from distutils.cmd import Command
+ if not isinstance(command, Command):
+ command_name = command
+ command = self.get_command_obj(command_name)
+ else:
+ command_name = command.get_command_name()
+
+ if not command.finalized:
+ return command
+ command.initialize_options()
+ command.finalized = 0
+ self.have_run[command_name] = 0
+ self._set_command_options(command)
+
+ if reinit_subcommands:
+ for sub in command.get_sub_commands():
+ self.reinitialize_command(sub, reinit_subcommands)
+
+ return command
+
+ # -- Methods that operate on the Distribution ----------------------
+
+ def announce(self, msg, level=log.INFO):
+ log.log(level, msg)
+
+ def run_commands(self):
+ """Run each command that was seen on the setup script command line.
+ Uses the list of commands found and cache of command objects
+ created by 'get_command_obj()'.
+ """
+ for cmd in self.commands:
+ self.run_command(cmd)
+
+ # -- Methods that operate on its Commands --------------------------
+
+ def run_command(self, command):
+ """Do whatever it takes to run a command (including nothing at all,
+ if the command has already been run). Specifically: if we have
+ already created and run the command named by 'command', return
+ silently without doing anything. If the command named by 'command'
+ doesn't even have a command object yet, create one. Then invoke
+ 'run()' on that command object (or an existing one).
+ """
+ # Already been here, done that? then return silently.
+ if self.have_run.get(command):
+ return
+
+ log.info("running %s", command)
+ cmd_obj = self.get_command_obj(command)
+ cmd_obj.ensure_finalized()
+ cmd_obj.run()
+ self.have_run[command] = 1
+
+ # -- Distribution query methods ------------------------------------
+
+ def has_pure_modules(self):
+ return len(self.packages or self.py_modules or []) > 0
+
+ def has_ext_modules(self):
+ return self.ext_modules and len(self.ext_modules) > 0
+
+ def has_c_libraries(self):
+ return self.libraries and len(self.libraries) > 0
+
+ def has_modules(self):
+ return self.has_pure_modules() or self.has_ext_modules()
+
+ def has_headers(self):
+ return self.headers and len(self.headers) > 0
+
+ def has_scripts(self):
+ return self.scripts and len(self.scripts) > 0
+
+ def has_data_files(self):
+ return self.data_files and len(self.data_files) > 0
+
+ def is_pure(self):
+ return (self.has_pure_modules() and
+ not self.has_ext_modules() and
+ not self.has_c_libraries())
+
+ # -- Metadata query methods ----------------------------------------
+
+ # If you're looking for 'get_name()', 'get_version()', and so forth,
+ # they are defined in a sneaky way: the constructor binds self.get_XXX
+ # to self.metadata.get_XXX. The actual code is in the
+ # DistributionMetadata class, below.
+
+class DistributionMetadata:
+ """Dummy class to hold the distribution meta-data: name, version,
+ author, and so forth.
+ """
+
+ _METHOD_BASENAMES = ("name", "version", "author", "author_email",
+ "maintainer", "maintainer_email", "url",
+ "license", "description", "long_description",
+ "keywords", "platforms", "fullname", "contact",
+ "contact_email", "classifiers", "download_url",
+ # PEP 314
+ "provides", "requires", "obsoletes",
+ )
+
+ def __init__(self, path=None):
+ if path is not None:
+ self.read_pkg_file(open(path))
+ else:
+ self.name = None
+ self.version = None
+ self.author = None
+ self.author_email = None
+ self.maintainer = None
+ self.maintainer_email = None
+ self.url = None
+ self.license = None
+ self.description = None
+ self.long_description = None
+ self.keywords = None
+ self.platforms = None
+ self.classifiers = None
+ self.download_url = None
+ # PEP 314
+ self.provides = None
+ self.requires = None
+ self.obsoletes = None
+
+ def read_pkg_file(self, file):
+ """Reads the metadata values from a file object."""
+ msg = message_from_file(file)
+
+ def _read_field(name):
+ value = msg[name]
+ if value == 'UNKNOWN':
+ return None
+ return value
+
+ def _read_list(name):
+ values = msg.get_all(name, None)
+ if values == []:
+ return None
+ return values
+
+ metadata_version = msg['metadata-version']
+ self.name = _read_field('name')
+ self.version = _read_field('version')
+ self.description = _read_field('summary')
+ # we are filling author only.
+ self.author = _read_field('author')
+ self.maintainer = None
+ self.author_email = _read_field('author-email')
+ self.maintainer_email = None
+ self.url = _read_field('home-page')
+ self.license = _read_field('license')
+
+ if 'download-url' in msg:
+ self.download_url = _read_field('download-url')
+ else:
+ self.download_url = None
+
+ self.long_description = _read_field('description')
+ self.description = _read_field('summary')
+
+ if 'keywords' in msg:
+ self.keywords = _read_field('keywords').split(',')
+
+ self.platforms = _read_list('platform')
+ self.classifiers = _read_list('classifier')
+
+ # PEP 314 - these fields only exist in 1.1
+ if metadata_version == '1.1':
+ self.requires = _read_list('requires')
+ self.provides = _read_list('provides')
+ self.obsoletes = _read_list('obsoletes')
+ else:
+ self.requires = None
+ self.provides = None
+ self.obsoletes = None
+
+ def write_pkg_info(self, base_dir):
+ """Write the PKG-INFO file into the release tree.
+ """
+ with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
+ encoding='UTF-8') as pkg_info:
+ self.write_pkg_file(pkg_info)
+
+ def write_pkg_file(self, file):
+ """Write the PKG-INFO format data to a file object.
+ """
+ version = '1.0'
+ if (self.provides or self.requires or self.obsoletes or
+ self.classifiers or self.download_url):
+ version = '1.1'
+
+ file.write('Metadata-Version: %s\n' % version)
+ file.write('Name: %s\n' % self.get_name())
+ file.write('Version: %s\n' % self.get_version())
+ file.write('Summary: %s\n' % self.get_description())
+ file.write('Home-page: %s\n' % self.get_url())
+ file.write('Author: %s\n' % self.get_contact())
+ file.write('Author-email: %s\n' % self.get_contact_email())
+ file.write('License: %s\n' % self.get_license())
+ if self.download_url:
+ file.write('Download-URL: %s\n' % self.download_url)
+
+ long_desc = rfc822_escape(self.get_long_description())
+ file.write('Description: %s\n' % long_desc)
+
+ keywords = ','.join(self.get_keywords())
+ if keywords:
+ file.write('Keywords: %s\n' % keywords)
+
+ self._write_list(file, 'Platform', self.get_platforms())
+ self._write_list(file, 'Classifier', self.get_classifiers())
+
+ # PEP 314
+ self._write_list(file, 'Requires', self.get_requires())
+ self._write_list(file, 'Provides', self.get_provides())
+ self._write_list(file, 'Obsoletes', self.get_obsoletes())
+
+ def _write_list(self, file, name, values):
+ for value in values:
+ file.write('%s: %s\n' % (name, value))
+
+ # -- Metadata query methods ----------------------------------------
+
+ def get_name(self):
+ return self.name or "UNKNOWN"
+
+ def get_version(self):
+ return self.version or "0.0.0"
+
+ def get_fullname(self):
+ return "%s-%s" % (self.get_name(), self.get_version())
+
+ def get_author(self):
+ return self.author or "UNKNOWN"
+
+ def get_author_email(self):
+ return self.author_email or "UNKNOWN"
+
+ def get_maintainer(self):
+ return self.maintainer or "UNKNOWN"
+
+ def get_maintainer_email(self):
+ return self.maintainer_email or "UNKNOWN"
+
+ def get_contact(self):
+ return self.maintainer or self.author or "UNKNOWN"
+
+ def get_contact_email(self):
+ return self.maintainer_email or self.author_email or "UNKNOWN"
+
+ def get_url(self):
+ return self.url or "UNKNOWN"
+
+ def get_license(self):
+ return self.license or "UNKNOWN"
+ get_licence = get_license
+
+ def get_description(self):
+ return self.description or "UNKNOWN"
+
+ def get_long_description(self):
+ return self.long_description or "UNKNOWN"
+
+ def get_keywords(self):
+ return self.keywords or []
+
+ def set_keywords(self, value):
+ self.keywords = _ensure_list(value, 'keywords')
+
+ def get_platforms(self):
+ return self.platforms or ["UNKNOWN"]
+
+ def set_platforms(self, value):
+ self.platforms = _ensure_list(value, 'platforms')
+
+ def get_classifiers(self):
+ return self.classifiers or []
+
+ def set_classifiers(self, value):
+ self.classifiers = _ensure_list(value, 'classifiers')
+
+ def get_download_url(self):
+ return self.download_url or "UNKNOWN"
+
+ # PEP 314
+ def get_requires(self):
+ return self.requires or []
+
+ def set_requires(self, value):
+ import distutils.versionpredicate
+ for v in value:
+ distutils.versionpredicate.VersionPredicate(v)
+ self.requires = list(value)
+
+ def get_provides(self):
+ return self.provides or []
+
+ def set_provides(self, value):
+ value = [v.strip() for v in value]
+ for v in value:
+ import distutils.versionpredicate
+ distutils.versionpredicate.split_provision(v)
+ self.provides = value
+
+ def get_obsoletes(self):
+ return self.obsoletes or []
+
+ def set_obsoletes(self, value):
+ import distutils.versionpredicate
+ for v in value:
+ distutils.versionpredicate.VersionPredicate(v)
+ self.obsoletes = list(value)
+
+def fix_help_options(options):
+ """Convert a 4-tuple 'help_options' list as found in various command
+ classes to the 3-tuple form required by FancyGetopt.
+ """
+ new_options = []
+ for help_tuple in options:
+ new_options.append(help_tuple[0:3])
+ return new_options
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/errors.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/errors.py
new file mode 100644
index 00000000..8b93059e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/errors.py
@@ -0,0 +1,97 @@
+"""distutils.errors
+
+Provides exceptions used by the Distutils modules. Note that Distutils
+modules may raise standard exceptions; in particular, SystemExit is
+usually raised for errors that are obviously the end-user's fault
+(eg. bad command-line arguments).
+
+This module is safe to use in "from ... import *" mode; it only exports
+symbols whose names start with "Distutils" and end with "Error"."""
+
+class DistutilsError (Exception):
+ """The root of all Distutils evil."""
+ pass
+
+class DistutilsModuleError (DistutilsError):
+ """Unable to load an expected module, or to find an expected class
+ within some module (in particular, command modules and classes)."""
+ pass
+
+class DistutilsClassError (DistutilsError):
+ """Some command class (or possibly distribution class, if anyone
+ feels a need to subclass Distribution) is found not to be holding
+ up its end of the bargain, ie. implementing some part of the
+ "command "interface."""
+ pass
+
+class DistutilsGetoptError (DistutilsError):
+ """The option table provided to 'fancy_getopt()' is bogus."""
+ pass
+
+class DistutilsArgError (DistutilsError):
+ """Raised by fancy_getopt in response to getopt.error -- ie. an
+ error in the command line usage."""
+ pass
+
+class DistutilsFileError (DistutilsError):
+ """Any problems in the filesystem: expected file not found, etc.
+ Typically this is for problems that we detect before OSError
+ could be raised."""
+ pass
+
+class DistutilsOptionError (DistutilsError):
+ """Syntactic/semantic errors in command options, such as use of
+ mutually conflicting options, or inconsistent options,
+ badly-spelled values, etc. No distinction is made between option
+ values originating in the setup script, the command line, config
+ files, or what-have-you -- but if we *know* something originated in
+ the setup script, we'll raise DistutilsSetupError instead."""
+ pass
+
+class DistutilsSetupError (DistutilsError):
+ """For errors that can be definitely blamed on the setup script,
+ such as invalid keyword arguments to 'setup()'."""
+ pass
+
+class DistutilsPlatformError (DistutilsError):
+ """We don't know how to do something on the current platform (but
+ we do know how to do it on some platform) -- eg. trying to compile
+ C files on a platform not supported by a CCompiler subclass."""
+ pass
+
+class DistutilsExecError (DistutilsError):
+ """Any problems executing an external program (such as the C
+ compiler, when compiling C files)."""
+ pass
+
+class DistutilsInternalError (DistutilsError):
+ """Internal inconsistencies or impossibilities (obviously, this
+ should never be seen if the code is working!)."""
+ pass
+
+class DistutilsTemplateError (DistutilsError):
+ """Syntax error in a file list template."""
+
+class DistutilsByteCompileError(DistutilsError):
+ """Byte compile error."""
+
+# Exception classes used by the CCompiler implementation classes
+class CCompilerError (Exception):
+ """Some compile/link operation failed."""
+
+class PreprocessError (CCompilerError):
+ """Failure to preprocess one or more C/C++ files."""
+
+class CompileError (CCompilerError):
+ """Failure to compile one or more C/C++ source files."""
+
+class LibError (CCompilerError):
+ """Failure to create a static library from one or more C/C++ object
+ files."""
+
+class LinkError (CCompilerError):
+ """Failure to link one or more C/C++ object files into an executable
+ or shared library file."""
+
+class UnknownFileError (CCompilerError):
+ """Attempt to process an unknown file type."""
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/extension.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/extension.py
new file mode 100644
index 00000000..c507da36
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/extension.py
@@ -0,0 +1,240 @@
+"""distutils.extension
+
+Provides the Extension class, used to describe C/C++ extension
+modules in setup scripts."""
+
+import os
+import warnings
+
+# This class is really only used by the "build_ext" command, so it might
+# make sense to put it in distutils.command.build_ext. However, that
+# module is already big enough, and I want to make this class a bit more
+# complex to simplify some common cases ("foo" module in "foo.c") and do
+# better error-checking ("foo.c" actually exists).
+#
+# Also, putting this in build_ext.py means every setup script would have to
+# import that large-ish module (indirectly, through distutils.core) in
+# order to do anything.
+
+class Extension:
+ """Just a collection of attributes that describes an extension
+ module and everything needed to build it (hopefully in a portable
+ way, but there are hooks that let you be as unportable as you need).
+
+ Instance attributes:
+ name : string
+ the full name of the extension, including any packages -- ie.
+ *not* a filename or pathname, but Python dotted name
+ sources : [string]
+ list of source filenames, relative to the distribution root
+ (where the setup script lives), in Unix form (slash-separated)
+ for portability. Source files may be C, C++, SWIG (.i),
+ platform-specific resource files, or whatever else is recognized
+ by the "build_ext" command as source for a Python extension.
+ include_dirs : [string]
+ list of directories to search for C/C++ header files (in Unix
+ form for portability)
+ define_macros : [(name : string, value : string|None)]
+ list of macros to define; each macro is defined using a 2-tuple,
+ where 'value' is either the string to define it to or None to
+ define it without a particular value (equivalent of "#define
+ FOO" in source or -DFOO on Unix C compiler command line)
+ undef_macros : [string]
+ list of macros to undefine explicitly
+ library_dirs : [string]
+ list of directories to search for C/C++ libraries at link time
+ libraries : [string]
+ list of library names (not filenames or paths) to link against
+ runtime_library_dirs : [string]
+ list of directories to search for C/C++ libraries at run time
+ (for shared extensions, this is when the extension is loaded)
+ extra_objects : [string]
+ list of extra files to link with (eg. object files not implied
+ by 'sources', static library that must be explicitly specified,
+ binary resource files, etc.)
+ extra_compile_args : [string]
+ any extra platform- and compiler-specific information to use
+ when compiling the source files in 'sources'. For platforms and
+ compilers where "command line" makes sense, this is typically a
+ list of command-line arguments, but for other platforms it could
+ be anything.
+ extra_link_args : [string]
+ any extra platform- and compiler-specific information to use
+ when linking object files together to create the extension (or
+ to create a new static Python interpreter). Similar
+ interpretation as for 'extra_compile_args'.
+ export_symbols : [string]
+ list of symbols to be exported from a shared extension. Not
+ used on all platforms, and not generally necessary for Python
+ extensions, which typically export exactly one symbol: "init" +
+ extension_name.
+ swig_opts : [string]
+ any extra options to pass to SWIG if a source file has the .i
+ extension.
+ depends : [string]
+ list of files that the extension depends on
+ language : string
+ extension language (i.e. "c", "c++", "objc"). Will be detected
+ from the source extensions if not provided.
+ optional : boolean
+ specifies that a build failure in the extension should not abort the
+ build process, but simply not install the failing extension.
+ """
+
+ # When adding arguments to this constructor, be sure to update
+ # setup_keywords in core.py.
+ def __init__(self, name, sources,
+ include_dirs=None,
+ define_macros=None,
+ undef_macros=None,
+ library_dirs=None,
+ libraries=None,
+ runtime_library_dirs=None,
+ extra_objects=None,
+ extra_compile_args=None,
+ extra_link_args=None,
+ export_symbols=None,
+ swig_opts = None,
+ depends=None,
+ language=None,
+ optional=None,
+ **kw # To catch unknown keywords
+ ):
+ if not isinstance(name, str):
+ raise AssertionError("'name' must be a string")
+ if not (isinstance(sources, list) and
+ all(isinstance(v, str) for v in sources)):
+ raise AssertionError("'sources' must be a list of strings")
+
+ self.name = name
+ self.sources = sources
+ self.include_dirs = include_dirs or []
+ self.define_macros = define_macros or []
+ self.undef_macros = undef_macros or []
+ self.library_dirs = library_dirs or []
+ self.libraries = libraries or []
+ self.runtime_library_dirs = runtime_library_dirs or []
+ self.extra_objects = extra_objects or []
+ self.extra_compile_args = extra_compile_args or []
+ self.extra_link_args = extra_link_args or []
+ self.export_symbols = export_symbols or []
+ self.swig_opts = swig_opts or []
+ self.depends = depends or []
+ self.language = language
+ self.optional = optional
+
+ # If there are unknown keyword options, warn about them
+ if len(kw) > 0:
+ options = [repr(option) for option in kw]
+ options = ', '.join(sorted(options))
+ msg = "Unknown Extension options: %s" % options
+ warnings.warn(msg)
+
+ def __repr__(self):
+ return '<%s.%s(%r) at %#x>' % (
+ self.__class__.__module__,
+ self.__class__.__qualname__,
+ self.name,
+ id(self))
+
+
+def read_setup_file(filename):
+ """Reads a Setup file and returns Extension instances."""
+ from distutils.sysconfig import (parse_makefile, expand_makefile_vars,
+ _variable_rx)
+
+ from distutils.text_file import TextFile
+ from distutils.util import split_quoted
+
+ # First pass over the file to gather "VAR = VALUE" assignments.
+ vars = parse_makefile(filename)
+
+ # Second pass to gobble up the real content: lines of the form
+ # ... [ ...] [ ...] [ ...]
+ file = TextFile(filename,
+ strip_comments=1, skip_blanks=1, join_lines=1,
+ lstrip_ws=1, rstrip_ws=1)
+ try:
+ extensions = []
+
+ while True:
+ line = file.readline()
+ if line is None: # eof
+ break
+ if _variable_rx.match(line): # VAR=VALUE, handled in first pass
+ continue
+
+ if line[0] == line[-1] == "*":
+ file.warn("'%s' lines not handled yet" % line)
+ continue
+
+ line = expand_makefile_vars(line, vars)
+ words = split_quoted(line)
+
+ # NB. this parses a slightly different syntax than the old
+ # makesetup script: here, there must be exactly one extension per
+ # line, and it must be the first word of the line. I have no idea
+ # why the old syntax supported multiple extensions per line, as
+ # they all wind up being the same.
+
+ module = words[0]
+ ext = Extension(module, [])
+ append_next_word = None
+
+ for word in words[1:]:
+ if append_next_word is not None:
+ append_next_word.append(word)
+ append_next_word = None
+ continue
+
+ suffix = os.path.splitext(word)[1]
+ switch = word[0:2] ; value = word[2:]
+
+ if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
+ # hmm, should we do something about C vs. C++ sources?
+ # or leave it up to the CCompiler implementation to
+ # worry about?
+ ext.sources.append(word)
+ elif switch == "-I":
+ ext.include_dirs.append(value)
+ elif switch == "-D":
+ equals = value.find("=")
+ if equals == -1: # bare "-DFOO" -- no value
+ ext.define_macros.append((value, None))
+ else: # "-DFOO=blah"
+ ext.define_macros.append((value[0:equals],
+ value[equals+2:]))
+ elif switch == "-U":
+ ext.undef_macros.append(value)
+ elif switch == "-C": # only here 'cause makesetup has it!
+ ext.extra_compile_args.append(word)
+ elif switch == "-l":
+ ext.libraries.append(value)
+ elif switch == "-L":
+ ext.library_dirs.append(value)
+ elif switch == "-R":
+ ext.runtime_library_dirs.append(value)
+ elif word == "-rpath":
+ append_next_word = ext.runtime_library_dirs
+ elif word == "-Xlinker":
+ append_next_word = ext.extra_link_args
+ elif word == "-Xcompiler":
+ append_next_word = ext.extra_compile_args
+ elif switch == "-u":
+ ext.extra_link_args.append(word)
+ if not value:
+ append_next_word = ext.extra_link_args
+ elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
+ # NB. a really faithful emulation of makesetup would
+ # append a .o file to extra_objects only if it
+ # had a slash in it; otherwise, it would s/.o/.c/
+ # and append it to sources. Hmmmm.
+ ext.extra_objects.append(word)
+ else:
+ file.warn("unrecognized argument '%s'" % word)
+
+ extensions.append(ext)
+ finally:
+ file.close()
+
+ return extensions
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/fancy_getopt.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/fancy_getopt.py
new file mode 100644
index 00000000..7d170dd2
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/fancy_getopt.py
@@ -0,0 +1,457 @@
+"""distutils.fancy_getopt
+
+Wrapper around the standard getopt module that provides the following
+additional features:
+ * short and long options are tied together
+ * options have help strings, so fancy_getopt could potentially
+ create a complete usage summary
+ * options set attributes of a passed-in object
+"""
+
+import sys, string, re
+import getopt
+from distutils.errors import *
+
+# Much like command_re in distutils.core, this is close to but not quite
+# the same as a Python NAME -- except, in the spirit of most GNU
+# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
+# The similarities to NAME are again not a coincidence...
+longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
+longopt_re = re.compile(r'^%s$' % longopt_pat)
+
+# For recognizing "negative alias" options, eg. "quiet=!verbose"
+neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
+
+# This is used to translate long options to legitimate Python identifiers
+# (for use as attributes of some object).
+longopt_xlate = str.maketrans('-', '_')
+
+class FancyGetopt:
+ """Wrapper around the standard 'getopt()' module that provides some
+ handy extra functionality:
+ * short and long options are tied together
+ * options have help strings, and help text can be assembled
+ from them
+ * options set attributes of a passed-in object
+ * boolean options can have "negative aliases" -- eg. if
+ --quiet is the "negative alias" of --verbose, then "--quiet"
+ on the command line sets 'verbose' to false
+ """
+
+ def __init__(self, option_table=None):
+ # The option table is (currently) a list of tuples. The
+ # tuples may have 3 or four values:
+ # (long_option, short_option, help_string [, repeatable])
+ # if an option takes an argument, its long_option should have '='
+ # appended; short_option should just be a single character, no ':'
+ # in any case. If a long_option doesn't have a corresponding
+ # short_option, short_option should be None. All option tuples
+ # must have long options.
+ self.option_table = option_table
+
+ # 'option_index' maps long option names to entries in the option
+ # table (ie. those 3-tuples).
+ self.option_index = {}
+ if self.option_table:
+ self._build_index()
+
+ # 'alias' records (duh) alias options; {'foo': 'bar'} means
+ # --foo is an alias for --bar
+ self.alias = {}
+
+ # 'negative_alias' keeps track of options that are the boolean
+ # opposite of some other option
+ self.negative_alias = {}
+
+ # These keep track of the information in the option table. We
+ # don't actually populate these structures until we're ready to
+ # parse the command-line, since the 'option_table' passed in here
+ # isn't necessarily the final word.
+ self.short_opts = []
+ self.long_opts = []
+ self.short2long = {}
+ self.attr_name = {}
+ self.takes_arg = {}
+
+ # And 'option_order' is filled up in 'getopt()'; it records the
+ # original order of options (and their values) on the command-line,
+ # but expands short options, converts aliases, etc.
+ self.option_order = []
+
+ def _build_index(self):
+ self.option_index.clear()
+ for option in self.option_table:
+ self.option_index[option[0]] = option
+
+ def set_option_table(self, option_table):
+ self.option_table = option_table
+ self._build_index()
+
+ def add_option(self, long_option, short_option=None, help_string=None):
+ if long_option in self.option_index:
+ raise DistutilsGetoptError(
+ "option conflict: already an option '%s'" % long_option)
+ else:
+ option = (long_option, short_option, help_string)
+ self.option_table.append(option)
+ self.option_index[long_option] = option
+
+ def has_option(self, long_option):
+ """Return true if the option table for this parser has an
+ option with long name 'long_option'."""
+ return long_option in self.option_index
+
+ def get_attr_name(self, long_option):
+ """Translate long option name 'long_option' to the form it
+ has as an attribute of some object: ie., translate hyphens
+ to underscores."""
+ return long_option.translate(longopt_xlate)
+
+ def _check_alias_dict(self, aliases, what):
+ assert isinstance(aliases, dict)
+ for (alias, opt) in aliases.items():
+ if alias not in self.option_index:
+ raise DistutilsGetoptError(("invalid %s '%s': "
+ "option '%s' not defined") % (what, alias, alias))
+ if opt not in self.option_index:
+ raise DistutilsGetoptError(("invalid %s '%s': "
+ "aliased option '%s' not defined") % (what, alias, opt))
+
+ def set_aliases(self, alias):
+ """Set the aliases for this option parser."""
+ self._check_alias_dict(alias, "alias")
+ self.alias = alias
+
+ def set_negative_aliases(self, negative_alias):
+ """Set the negative aliases for this option parser.
+ 'negative_alias' should be a dictionary mapping option names to
+ option names, both the key and value must already be defined
+ in the option table."""
+ self._check_alias_dict(negative_alias, "negative alias")
+ self.negative_alias = negative_alias
+
+ def _grok_option_table(self):
+ """Populate the various data structures that keep tabs on the
+ option table. Called by 'getopt()' before it can do anything
+ worthwhile.
+ """
+ self.long_opts = []
+ self.short_opts = []
+ self.short2long.clear()
+ self.repeat = {}
+
+ for option in self.option_table:
+ if len(option) == 3:
+ long, short, help = option
+ repeat = 0
+ elif len(option) == 4:
+ long, short, help, repeat = option
+ else:
+ # the option table is part of the code, so simply
+ # assert that it is correct
+ raise ValueError("invalid option tuple: %r" % (option,))
+
+ # Type- and value-check the option names
+ if not isinstance(long, str) or len(long) < 2:
+ raise DistutilsGetoptError(("invalid long option '%s': "
+ "must be a string of length >= 2") % long)
+
+ if (not ((short is None) or
+ (isinstance(short, str) and len(short) == 1))):
+ raise DistutilsGetoptError("invalid short option '%s': "
+ "must a single character or None" % short)
+
+ self.repeat[long] = repeat
+ self.long_opts.append(long)
+
+ if long[-1] == '=': # option takes an argument?
+ if short: short = short + ':'
+ long = long[0:-1]
+ self.takes_arg[long] = 1
+ else:
+ # Is option is a "negative alias" for some other option (eg.
+ # "quiet" == "!verbose")?
+ alias_to = self.negative_alias.get(long)
+ if alias_to is not None:
+ if self.takes_arg[alias_to]:
+ raise DistutilsGetoptError(
+ "invalid negative alias '%s': "
+ "aliased option '%s' takes a value"
+ % (long, alias_to))
+
+ self.long_opts[-1] = long # XXX redundant?!
+ self.takes_arg[long] = 0
+
+ # If this is an alias option, make sure its "takes arg" flag is
+ # the same as the option it's aliased to.
+ alias_to = self.alias.get(long)
+ if alias_to is not None:
+ if self.takes_arg[long] != self.takes_arg[alias_to]:
+ raise DistutilsGetoptError(
+ "invalid alias '%s': inconsistent with "
+ "aliased option '%s' (one of them takes a value, "
+ "the other doesn't"
+ % (long, alias_to))
+
+ # Now enforce some bondage on the long option name, so we can
+ # later translate it to an attribute name on some object. Have
+ # to do this a bit late to make sure we've removed any trailing
+ # '='.
+ if not longopt_re.match(long):
+ raise DistutilsGetoptError(
+ "invalid long option name '%s' "
+ "(must be letters, numbers, hyphens only" % long)
+
+ self.attr_name[long] = self.get_attr_name(long)
+ if short:
+ self.short_opts.append(short)
+ self.short2long[short[0]] = long
+
+ def getopt(self, args=None, object=None):
+ """Parse command-line options in args. Store as attributes on object.
+
+ If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
+ 'object' is None or not supplied, creates a new OptionDummy
+ object, stores option values there, and returns a tuple (args,
+ object). If 'object' is supplied, it is modified in place and
+ 'getopt()' just returns 'args'; in both cases, the returned
+ 'args' is a modified copy of the passed-in 'args' list, which
+ is left untouched.
+ """
+ if args is None:
+ args = sys.argv[1:]
+ if object is None:
+ object = OptionDummy()
+ created_object = True
+ else:
+ created_object = False
+
+ self._grok_option_table()
+
+ short_opts = ' '.join(self.short_opts)
+ try:
+ opts, args = getopt.getopt(args, short_opts, self.long_opts)
+ except getopt.error as msg:
+ raise DistutilsArgError(msg)
+
+ for opt, val in opts:
+ if len(opt) == 2 and opt[0] == '-': # it's a short option
+ opt = self.short2long[opt[1]]
+ else:
+ assert len(opt) > 2 and opt[:2] == '--'
+ opt = opt[2:]
+
+ alias = self.alias.get(opt)
+ if alias:
+ opt = alias
+
+ if not self.takes_arg[opt]: # boolean option?
+ assert val == '', "boolean option can't have value"
+ alias = self.negative_alias.get(opt)
+ if alias:
+ opt = alias
+ val = 0
+ else:
+ val = 1
+
+ attr = self.attr_name[opt]
+ # The only repeating option at the moment is 'verbose'.
+ # It has a negative option -q quiet, which should set verbose = 0.
+ if val and self.repeat.get(attr) is not None:
+ val = getattr(object, attr, 0) + 1
+ setattr(object, attr, val)
+ self.option_order.append((opt, val))
+
+ # for opts
+ if created_object:
+ return args, object
+ else:
+ return args
+
+ def get_option_order(self):
+ """Returns the list of (option, value) tuples processed by the
+ previous run of 'getopt()'. Raises RuntimeError if
+ 'getopt()' hasn't been called yet.
+ """
+ if self.option_order is None:
+ raise RuntimeError("'getopt()' hasn't been called yet")
+ else:
+ return self.option_order
+
+ def generate_help(self, header=None):
+ """Generate help text (a list of strings, one per suggested line of
+ output) from the option table for this FancyGetopt object.
+ """
+ # Blithely assume the option table is good: probably wouldn't call
+ # 'generate_help()' unless you've already called 'getopt()'.
+
+ # First pass: determine maximum length of long option names
+ max_opt = 0
+ for option in self.option_table:
+ long = option[0]
+ short = option[1]
+ l = len(long)
+ if long[-1] == '=':
+ l = l - 1
+ if short is not None:
+ l = l + 5 # " (-x)" where short == 'x'
+ if l > max_opt:
+ max_opt = l
+
+ opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
+
+ # Typical help block looks like this:
+ # --foo controls foonabulation
+ # Help block for longest option looks like this:
+ # --flimflam set the flim-flam level
+ # and with wrapped text:
+ # --flimflam set the flim-flam level (must be between
+ # 0 and 100, except on Tuesdays)
+ # Options with short names will have the short name shown (but
+ # it doesn't contribute to max_opt):
+ # --foo (-f) controls foonabulation
+ # If adding the short option would make the left column too wide,
+ # we push the explanation off to the next line
+ # --flimflam (-l)
+ # set the flim-flam level
+ # Important parameters:
+ # - 2 spaces before option block start lines
+ # - 2 dashes for each long option name
+ # - min. 2 spaces between option and explanation (gutter)
+ # - 5 characters (incl. space) for short option name
+
+ # Now generate lines of help text. (If 80 columns were good enough
+ # for Jesus, then 78 columns are good enough for me!)
+ line_width = 78
+ text_width = line_width - opt_width
+ big_indent = ' ' * opt_width
+ if header:
+ lines = [header]
+ else:
+ lines = ['Option summary:']
+
+ for option in self.option_table:
+ long, short, help = option[:3]
+ text = wrap_text(help, text_width)
+ if long[-1] == '=':
+ long = long[0:-1]
+
+ # Case 1: no short option at all (makes life easy)
+ if short is None:
+ if text:
+ lines.append(" --%-*s %s" % (max_opt, long, text[0]))
+ else:
+ lines.append(" --%-*s " % (max_opt, long))
+
+ # Case 2: we have a short option, so we have to include it
+ # just after the long option
+ else:
+ opt_names = "%s (-%s)" % (long, short)
+ if text:
+ lines.append(" --%-*s %s" %
+ (max_opt, opt_names, text[0]))
+ else:
+ lines.append(" --%-*s" % opt_names)
+
+ for l in text[1:]:
+ lines.append(big_indent + l)
+ return lines
+
+ def print_help(self, header=None, file=None):
+ if file is None:
+ file = sys.stdout
+ for line in self.generate_help(header):
+ file.write(line + "\n")
+
+
+def fancy_getopt(options, negative_opt, object, args):
+ parser = FancyGetopt(options)
+ parser.set_negative_aliases(negative_opt)
+ return parser.getopt(args, object)
+
+
+WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
+
+def wrap_text(text, width):
+ """wrap_text(text : string, width : int) -> [string]
+
+ Split 'text' into multiple lines of no more than 'width' characters
+ each, and return the list of strings that results.
+ """
+ if text is None:
+ return []
+ if len(text) <= width:
+ return [text]
+
+ text = text.expandtabs()
+ text = text.translate(WS_TRANS)
+ chunks = re.split(r'( +|-+)', text)
+ chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
+ lines = []
+
+ while chunks:
+ cur_line = [] # list of chunks (to-be-joined)
+ cur_len = 0 # length of current line
+
+ while chunks:
+ l = len(chunks[0])
+ if cur_len + l <= width: # can squeeze (at least) this chunk in
+ cur_line.append(chunks[0])
+ del chunks[0]
+ cur_len = cur_len + l
+ else: # this line is full
+ # drop last chunk if all space
+ if cur_line and cur_line[-1][0] == ' ':
+ del cur_line[-1]
+ break
+
+ if chunks: # any chunks left to process?
+ # if the current line is still empty, then we had a single
+ # chunk that's too big too fit on a line -- so we break
+ # down and break it up at the line width
+ if cur_len == 0:
+ cur_line.append(chunks[0][0:width])
+ chunks[0] = chunks[0][width:]
+
+ # all-whitespace chunks at the end of a line can be discarded
+ # (and we know from the re.split above that if a chunk has
+ # *any* whitespace, it is *all* whitespace)
+ if chunks[0][0] == ' ':
+ del chunks[0]
+
+ # and store this line in the list-of-all-lines -- as a single
+ # string, of course!
+ lines.append(''.join(cur_line))
+
+ return lines
+
+
+def translate_longopt(opt):
+ """Convert a long option name to a valid Python identifier by
+ changing "-" to "_".
+ """
+ return opt.translate(longopt_xlate)
+
+
+class OptionDummy:
+ """Dummy class just used as a place to hold command-line option
+ values as instance attributes."""
+
+ def __init__(self, options=[]):
+ """Create a new OptionDummy instance. The attributes listed in
+ 'options' will be initialized to None."""
+ for opt in options:
+ setattr(self, opt, None)
+
+
+if __name__ == "__main__":
+ text = """\
+Tra-la-la, supercalifragilisticexpialidocious.
+How *do* you spell that odd word, anyways?
+(Someone ask Mary -- she'll know [or she'll
+say, "How should I know?"].)"""
+
+ for w in (10, 20, 30, 40):
+ print("width: %d" % w)
+ print("\n".join(wrap_text(text, w)))
+ print()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/file_util.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/file_util.py
new file mode 100644
index 00000000..b3fee35a
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/file_util.py
@@ -0,0 +1,238 @@
+"""distutils.file_util
+
+Utility functions for operating on single files.
+"""
+
+import os
+from distutils.errors import DistutilsFileError
+from distutils import log
+
+# for generating verbose output in 'copy_file()'
+_copy_action = { None: 'copying',
+ 'hard': 'hard linking',
+ 'sym': 'symbolically linking' }
+
+
+def _copy_file_contents(src, dst, buffer_size=16*1024):
+ """Copy the file 'src' to 'dst'; both must be filenames. Any error
+ opening either file, reading from 'src', or writing to 'dst', raises
+ DistutilsFileError. Data is read/written in chunks of 'buffer_size'
+ bytes (default 16k). No attempt is made to handle anything apart from
+ regular files.
+ """
+ # Stolen from shutil module in the standard library, but with
+ # custom error-handling added.
+ fsrc = None
+ fdst = None
+ try:
+ try:
+ fsrc = open(src, 'rb')
+ except OSError as e:
+ raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror))
+
+ if os.path.exists(dst):
+ try:
+ os.unlink(dst)
+ except OSError as e:
+ raise DistutilsFileError(
+ "could not delete '%s': %s" % (dst, e.strerror))
+
+ try:
+ fdst = open(dst, 'wb')
+ except OSError as e:
+ raise DistutilsFileError(
+ "could not create '%s': %s" % (dst, e.strerror))
+
+ while True:
+ try:
+ buf = fsrc.read(buffer_size)
+ except OSError as e:
+ raise DistutilsFileError(
+ "could not read from '%s': %s" % (src, e.strerror))
+
+ if not buf:
+ break
+
+ try:
+ fdst.write(buf)
+ except OSError as e:
+ raise DistutilsFileError(
+ "could not write to '%s': %s" % (dst, e.strerror))
+ finally:
+ if fdst:
+ fdst.close()
+ if fsrc:
+ fsrc.close()
+
+def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
+ link=None, verbose=1, dry_run=0):
+ """Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
+ copied there with the same name; otherwise, it must be a filename. (If
+ the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
+ is true (the default), the file's mode (type and permission bits, or
+ whatever is analogous on the current platform) is copied. If
+ 'preserve_times' is true (the default), the last-modified and
+ last-access times are copied as well. If 'update' is true, 'src' will
+ only be copied if 'dst' does not exist, or if 'dst' does exist but is
+ older than 'src'.
+
+ 'link' allows you to make hard links (os.link) or symbolic links
+ (os.symlink) instead of copying: set it to "hard" or "sym"; if it is
+ None (the default), files are copied. Don't set 'link' on systems that
+ don't support it: 'copy_file()' doesn't check if hard or symbolic
+ linking is available. If hardlink fails, falls back to
+ _copy_file_contents().
+
+ Under Mac OS, uses the native file copy function in macostools; on
+ other systems, uses '_copy_file_contents()' to copy file contents.
+
+ Return a tuple (dest_name, copied): 'dest_name' is the actual name of
+ the output file, and 'copied' is true if the file was copied (or would
+ have been copied, if 'dry_run' true).
+ """
+ # XXX if the destination file already exists, we clobber it if
+ # copying, but blow up if linking. Hmmm. And I don't know what
+ # macostools.copyfile() does. Should definitely be consistent, and
+ # should probably blow up if destination exists and we would be
+ # changing it (ie. it's not already a hard/soft link to src OR
+ # (not update) and (src newer than dst).
+
+ from distutils.dep_util import newer
+ from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
+
+ if not os.path.isfile(src):
+ raise DistutilsFileError(
+ "can't copy '%s': doesn't exist or not a regular file" % src)
+
+ if os.path.isdir(dst):
+ dir = dst
+ dst = os.path.join(dst, os.path.basename(src))
+ else:
+ dir = os.path.dirname(dst)
+
+ if update and not newer(src, dst):
+ if verbose >= 1:
+ log.debug("not copying %s (output up-to-date)", src)
+ return (dst, 0)
+
+ try:
+ action = _copy_action[link]
+ except KeyError:
+ raise ValueError("invalid value '%s' for 'link' argument" % link)
+
+ if verbose >= 1:
+ if os.path.basename(dst) == os.path.basename(src):
+ log.info("%s %s -> %s", action, src, dir)
+ else:
+ log.info("%s %s -> %s", action, src, dst)
+
+ if dry_run:
+ return (dst, 1)
+
+ # If linking (hard or symbolic), use the appropriate system call
+ # (Unix only, of course, but that's the caller's responsibility)
+ elif link == 'hard':
+ if not (os.path.exists(dst) and os.path.samefile(src, dst)):
+ try:
+ os.link(src, dst)
+ return (dst, 1)
+ except OSError:
+ # If hard linking fails, fall back on copying file
+ # (some special filesystems don't support hard linking
+ # even under Unix, see issue #8876).
+ pass
+ elif link == 'sym':
+ if not (os.path.exists(dst) and os.path.samefile(src, dst)):
+ os.symlink(src, dst)
+ return (dst, 1)
+
+ # Otherwise (non-Mac, not linking), copy the file contents and
+ # (optionally) copy the times and mode.
+ _copy_file_contents(src, dst)
+ if preserve_mode or preserve_times:
+ st = os.stat(src)
+
+ # According to David Ascher , utime() should be done
+ # before chmod() (at least under NT).
+ if preserve_times:
+ os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
+ if preserve_mode:
+ os.chmod(dst, S_IMODE(st[ST_MODE]))
+
+ return (dst, 1)
+
+
+# XXX I suspect this is Unix-specific -- need porting help!
+def move_file (src, dst,
+ verbose=1,
+ dry_run=0):
+
+ """Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
+ be moved into it with the same name; otherwise, 'src' is just renamed
+ to 'dst'. Return the new full name of the file.
+
+ Handles cross-device moves on Unix using 'copy_file()'. What about
+ other systems???
+ """
+ from os.path import exists, isfile, isdir, basename, dirname
+ import errno
+
+ if verbose >= 1:
+ log.info("moving %s -> %s", src, dst)
+
+ if dry_run:
+ return dst
+
+ if not isfile(src):
+ raise DistutilsFileError("can't move '%s': not a regular file" % src)
+
+ if isdir(dst):
+ dst = os.path.join(dst, basename(src))
+ elif exists(dst):
+ raise DistutilsFileError(
+ "can't move '%s': destination '%s' already exists" %
+ (src, dst))
+
+ if not isdir(dirname(dst)):
+ raise DistutilsFileError(
+ "can't move '%s': destination '%s' not a valid path" %
+ (src, dst))
+
+ copy_it = False
+ try:
+ os.rename(src, dst)
+ except OSError as e:
+ (num, msg) = e.args
+ if num == errno.EXDEV:
+ copy_it = True
+ else:
+ raise DistutilsFileError(
+ "couldn't move '%s' to '%s': %s" % (src, dst, msg))
+
+ if copy_it:
+ copy_file(src, dst, verbose=verbose)
+ try:
+ os.unlink(src)
+ except OSError as e:
+ (num, msg) = e.args
+ try:
+ os.unlink(dst)
+ except OSError:
+ pass
+ raise DistutilsFileError(
+ "couldn't move '%s' to '%s' by copy/delete: "
+ "delete '%s' failed: %s"
+ % (src, dst, src, msg))
+ return dst
+
+
+def write_file (filename, contents):
+ """Create a file with the specified name and write 'contents' (a
+ sequence of strings without line terminators) to it.
+ """
+ f = open(filename, "w")
+ try:
+ for line in contents:
+ f.write(line + "\n")
+ finally:
+ f.close()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/filelist.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/filelist.py
new file mode 100644
index 00000000..c92d5fdb
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/filelist.py
@@ -0,0 +1,327 @@
+"""distutils.filelist
+
+Provides the FileList class, used for poking about the filesystem
+and building lists of files.
+"""
+
+import os, re
+import fnmatch
+import functools
+from distutils.util import convert_path
+from distutils.errors import DistutilsTemplateError, DistutilsInternalError
+from distutils import log
+
+class FileList:
+ """A list of files built by on exploring the filesystem and filtered by
+ applying various patterns to what we find there.
+
+ Instance attributes:
+ dir
+ directory from which files will be taken -- only used if
+ 'allfiles' not supplied to constructor
+ files
+ list of filenames currently being built/filtered/manipulated
+ allfiles
+ complete list of files under consideration (ie. without any
+ filtering applied)
+ """
+
+ def __init__(self, warn=None, debug_print=None):
+ # ignore argument to FileList, but keep them for backwards
+ # compatibility
+ self.allfiles = None
+ self.files = []
+
+ def set_allfiles(self, allfiles):
+ self.allfiles = allfiles
+
+ def findall(self, dir=os.curdir):
+ self.allfiles = findall(dir)
+
+ def debug_print(self, msg):
+ """Print 'msg' to stdout if the global DEBUG (taken from the
+ DISTUTILS_DEBUG environment variable) flag is true.
+ """
+ from distutils.debug import DEBUG
+ if DEBUG:
+ print(msg)
+
+ # -- List-like methods ---------------------------------------------
+
+ def append(self, item):
+ self.files.append(item)
+
+ def extend(self, items):
+ self.files.extend(items)
+
+ def sort(self):
+ # Not a strict lexical sort!
+ sortable_files = sorted(map(os.path.split, self.files))
+ self.files = []
+ for sort_tuple in sortable_files:
+ self.files.append(os.path.join(*sort_tuple))
+
+
+ # -- Other miscellaneous utility methods ---------------------------
+
+ def remove_duplicates(self):
+ # Assumes list has been sorted!
+ for i in range(len(self.files) - 1, 0, -1):
+ if self.files[i] == self.files[i - 1]:
+ del self.files[i]
+
+
+ # -- "File template" methods ---------------------------------------
+
+ def _parse_template_line(self, line):
+ words = line.split()
+ action = words[0]
+
+ patterns = dir = dir_pattern = None
+
+ if action in ('include', 'exclude',
+ 'global-include', 'global-exclude'):
+ if len(words) < 2:
+ raise DistutilsTemplateError(
+ "'%s' expects ..." % action)
+ patterns = [convert_path(w) for w in words[1:]]
+ elif action in ('recursive-include', 'recursive-exclude'):
+ if len(words) < 3:
+ raise DistutilsTemplateError(
+ "'%s' expects ..." % action)
+ dir = convert_path(words[1])
+ patterns = [convert_path(w) for w in words[2:]]
+ elif action in ('graft', 'prune'):
+ if len(words) != 2:
+ raise DistutilsTemplateError(
+ "'%s' expects a single " % action)
+ dir_pattern = convert_path(words[1])
+ else:
+ raise DistutilsTemplateError("unknown action '%s'" % action)
+
+ return (action, patterns, dir, dir_pattern)
+
+ def process_template_line(self, line):
+ # Parse the line: split it up, make sure the right number of words
+ # is there, and return the relevant words. 'action' is always
+ # defined: it's the first word of the line. Which of the other
+ # three are defined depends on the action; it'll be either
+ # patterns, (dir and patterns), or (dir_pattern).
+ (action, patterns, dir, dir_pattern) = self._parse_template_line(line)
+
+ # OK, now we know that the action is valid and we have the
+ # right number of words on the line for that action -- so we
+ # can proceed with minimal error-checking.
+ if action == 'include':
+ self.debug_print("include " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.include_pattern(pattern, anchor=1):
+ log.warn("warning: no files found matching '%s'",
+ pattern)
+
+ elif action == 'exclude':
+ self.debug_print("exclude " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.exclude_pattern(pattern, anchor=1):
+ log.warn(("warning: no previously-included files "
+ "found matching '%s'"), pattern)
+
+ elif action == 'global-include':
+ self.debug_print("global-include " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.include_pattern(pattern, anchor=0):
+ log.warn(("warning: no files found matching '%s' "
+ "anywhere in distribution"), pattern)
+
+ elif action == 'global-exclude':
+ self.debug_print("global-exclude " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.exclude_pattern(pattern, anchor=0):
+ log.warn(("warning: no previously-included files matching "
+ "'%s' found anywhere in distribution"),
+ pattern)
+
+ elif action == 'recursive-include':
+ self.debug_print("recursive-include %s %s" %
+ (dir, ' '.join(patterns)))
+ for pattern in patterns:
+ if not self.include_pattern(pattern, prefix=dir):
+ log.warn(("warning: no files found matching '%s' "
+ "under directory '%s'"),
+ pattern, dir)
+
+ elif action == 'recursive-exclude':
+ self.debug_print("recursive-exclude %s %s" %
+ (dir, ' '.join(patterns)))
+ for pattern in patterns:
+ if not self.exclude_pattern(pattern, prefix=dir):
+ log.warn(("warning: no previously-included files matching "
+ "'%s' found under directory '%s'"),
+ pattern, dir)
+
+ elif action == 'graft':
+ self.debug_print("graft " + dir_pattern)
+ if not self.include_pattern(None, prefix=dir_pattern):
+ log.warn("warning: no directories found matching '%s'",
+ dir_pattern)
+
+ elif action == 'prune':
+ self.debug_print("prune " + dir_pattern)
+ if not self.exclude_pattern(None, prefix=dir_pattern):
+ log.warn(("no previously-included directories found "
+ "matching '%s'"), dir_pattern)
+ else:
+ raise DistutilsInternalError(
+ "this cannot happen: invalid action '%s'" % action)
+
+
+ # -- Filtering/selection methods -----------------------------------
+
+ def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
+ """Select strings (presumably filenames) from 'self.files' that
+ match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
+ are not quite the same as implemented by the 'fnmatch' module: '*'
+ and '?' match non-special characters, where "special" is platform-
+ dependent: slash on Unix; colon, slash, and backslash on
+ DOS/Windows; and colon on Mac OS.
+
+ If 'anchor' is true (the default), then the pattern match is more
+ stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
+ 'anchor' is false, both of these will match.
+
+ If 'prefix' is supplied, then only filenames starting with 'prefix'
+ (itself a pattern) and ending with 'pattern', with anything in between
+ them, will match. 'anchor' is ignored in this case.
+
+ If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
+ 'pattern' is assumed to be either a string containing a regex or a
+ regex object -- no translation is done, the regex is just compiled
+ and used as-is.
+
+ Selected strings will be added to self.files.
+
+ Return True if files are found, False otherwise.
+ """
+ # XXX docstring lying about what the special chars are?
+ files_found = False
+ pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
+ self.debug_print("include_pattern: applying regex r'%s'" %
+ pattern_re.pattern)
+
+ # delayed loading of allfiles list
+ if self.allfiles is None:
+ self.findall()
+
+ for name in self.allfiles:
+ if pattern_re.search(name):
+ self.debug_print(" adding " + name)
+ self.files.append(name)
+ files_found = True
+ return files_found
+
+
+ def exclude_pattern (self, pattern,
+ anchor=1, prefix=None, is_regex=0):
+ """Remove strings (presumably filenames) from 'files' that match
+ 'pattern'. Other parameters are the same as for
+ 'include_pattern()', above.
+ The list 'self.files' is modified in place.
+ Return True if files are found, False otherwise.
+ """
+ files_found = False
+ pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
+ self.debug_print("exclude_pattern: applying regex r'%s'" %
+ pattern_re.pattern)
+ for i in range(len(self.files)-1, -1, -1):
+ if pattern_re.search(self.files[i]):
+ self.debug_print(" removing " + self.files[i])
+ del self.files[i]
+ files_found = True
+ return files_found
+
+
+# ----------------------------------------------------------------------
+# Utility functions
+
+def _find_all_simple(path):
+ """
+ Find all files under 'path'
+ """
+ results = (
+ os.path.join(base, file)
+ for base, dirs, files in os.walk(path, followlinks=True)
+ for file in files
+ )
+ return filter(os.path.isfile, results)
+
+
+def findall(dir=os.curdir):
+ """
+ Find all files under 'dir' and return the list of full filenames.
+ Unless dir is '.', return full filenames with dir prepended.
+ """
+ files = _find_all_simple(dir)
+ if dir == os.curdir:
+ make_rel = functools.partial(os.path.relpath, start=dir)
+ files = map(make_rel, files)
+ return list(files)
+
+
+def glob_to_re(pattern):
+ """Translate a shell-like glob pattern to a regular expression; return
+ a string containing the regex. Differs from 'fnmatch.translate()' in
+ that '*' does not match "special characters" (which are
+ platform-specific).
+ """
+ pattern_re = fnmatch.translate(pattern)
+
+ # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
+ # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
+ # and by extension they shouldn't match such "special characters" under
+ # any OS. So change all non-escaped dots in the RE to match any
+ # character except the special characters (currently: just os.sep).
+ sep = os.sep
+ if os.sep == '\\':
+ # we're using a regex to manipulate a regex, so we need
+ # to escape the backslash twice
+ sep = r'\\\\'
+ escaped = r'\1[^%s]' % sep
+ pattern_re = re.sub(r'((?= self.threshold:
+ if args:
+ msg = msg % args
+ if level in (WARN, ERROR, FATAL):
+ stream = sys.stderr
+ else:
+ stream = sys.stdout
+ try:
+ stream.write('%s\n' % msg)
+ except UnicodeEncodeError:
+ # emulate backslashreplace error handler
+ encoding = stream.encoding
+ msg = msg.encode(encoding, "backslashreplace").decode(encoding)
+ stream.write('%s\n' % msg)
+ stream.flush()
+
+ def log(self, level, msg, *args):
+ self._log(level, msg, args)
+
+ def debug(self, msg, *args):
+ self._log(DEBUG, msg, args)
+
+ def info(self, msg, *args):
+ self._log(INFO, msg, args)
+
+ def warn(self, msg, *args):
+ self._log(WARN, msg, args)
+
+ def error(self, msg, *args):
+ self._log(ERROR, msg, args)
+
+ def fatal(self, msg, *args):
+ self._log(FATAL, msg, args)
+
+_global_log = Log()
+log = _global_log.log
+debug = _global_log.debug
+info = _global_log.info
+warn = _global_log.warn
+error = _global_log.error
+fatal = _global_log.fatal
+
+def set_threshold(level):
+ # return the old threshold for use from tests
+ old = _global_log.threshold
+ _global_log.threshold = level
+ return old
+
+def set_verbosity(v):
+ if v <= 0:
+ set_threshold(WARN)
+ elif v == 1:
+ set_threshold(INFO)
+ elif v >= 2:
+ set_threshold(DEBUG)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/msvc9compiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/msvc9compiler.py
new file mode 100644
index 00000000..4c0036a0
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/msvc9compiler.py
@@ -0,0 +1,789 @@
+"""distutils.msvc9compiler
+
+Contains MSVCCompiler, an implementation of the abstract CCompiler class
+for the Microsoft Visual Studio 2008.
+
+The module is compatible with VS 2005 and VS 2008. You can find legacy support
+for older versions of VS in distutils.msvccompiler.
+"""
+
+# Written by Perry Stoll
+# hacked by Robin Becker and Thomas Heller to do a better job of
+# finding DevStudio (through the registry)
+# ported to VS2005 and VS 2008 by Christian Heimes
+
+import os
+import subprocess
+import sys
+import re
+
+from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
+ CompileError, LibError, LinkError
+from distutils.ccompiler import CCompiler, gen_preprocess_options, \
+ gen_lib_options
+from distutils import log
+from distutils.util import get_platform
+
+import winreg
+
+RegOpenKeyEx = winreg.OpenKeyEx
+RegEnumKey = winreg.EnumKey
+RegEnumValue = winreg.EnumValue
+RegError = winreg.error
+
+HKEYS = (winreg.HKEY_USERS,
+ winreg.HKEY_CURRENT_USER,
+ winreg.HKEY_LOCAL_MACHINE,
+ winreg.HKEY_CLASSES_ROOT)
+
+NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
+if NATIVE_WIN64:
+ # Visual C++ is a 32-bit application, so we need to look in
+ # the corresponding registry branch, if we're running a
+ # 64-bit Python on Win64
+ VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
+ WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
+ NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
+else:
+ VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
+ WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
+ NET_BASE = r"Software\Microsoft\.NETFramework"
+
+# A map keyed by get_platform() return values to values accepted by
+# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
+# the param to cross-compile on x86 targeting amd64.)
+PLAT_TO_VCVARS = {
+ 'win32' : 'x86',
+ 'win-amd64' : 'amd64',
+}
+
+class Reg:
+ """Helper class to read values from the registry
+ """
+
+ def get_value(cls, path, key):
+ for base in HKEYS:
+ d = cls.read_values(base, path)
+ if d and key in d:
+ return d[key]
+ raise KeyError(key)
+ get_value = classmethod(get_value)
+
+ def read_keys(cls, base, key):
+ """Return list of registry keys."""
+ try:
+ handle = RegOpenKeyEx(base, key)
+ except RegError:
+ return None
+ L = []
+ i = 0
+ while True:
+ try:
+ k = RegEnumKey(handle, i)
+ except RegError:
+ break
+ L.append(k)
+ i += 1
+ return L
+ read_keys = classmethod(read_keys)
+
+ def read_values(cls, base, key):
+ """Return dict of registry keys and values.
+
+ All names are converted to lowercase.
+ """
+ try:
+ handle = RegOpenKeyEx(base, key)
+ except RegError:
+ return None
+ d = {}
+ i = 0
+ while True:
+ try:
+ name, value, type = RegEnumValue(handle, i)
+ except RegError:
+ break
+ name = name.lower()
+ d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
+ i += 1
+ return d
+ read_values = classmethod(read_values)
+
+ def convert_mbcs(s):
+ dec = getattr(s, "decode", None)
+ if dec is not None:
+ try:
+ s = dec("mbcs")
+ except UnicodeError:
+ pass
+ return s
+ convert_mbcs = staticmethod(convert_mbcs)
+
+class MacroExpander:
+
+ def __init__(self, version):
+ self.macros = {}
+ self.vsbase = VS_BASE % version
+ self.load_macros(version)
+
+ def set_macro(self, macro, path, key):
+ self.macros["$(%s)" % macro] = Reg.get_value(path, key)
+
+ def load_macros(self, version):
+ self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
+ self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
+ self.set_macro("FrameworkDir", NET_BASE, "installroot")
+ try:
+ if version >= 8.0:
+ self.set_macro("FrameworkSDKDir", NET_BASE,
+ "sdkinstallrootv2.0")
+ else:
+ raise KeyError("sdkinstallrootv2.0")
+ except KeyError:
+ raise DistutilsPlatformError(
+ """Python was built with Visual Studio 2008;
+extensions must be built with a compiler than can generate compatible binaries.
+Visual Studio 2008 was not found on this system. If you have Cygwin installed,
+you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
+
+ if version >= 9.0:
+ self.set_macro("FrameworkVersion", self.vsbase, "clr version")
+ self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
+ else:
+ p = r"Software\Microsoft\NET Framework Setup\Product"
+ for base in HKEYS:
+ try:
+ h = RegOpenKeyEx(base, p)
+ except RegError:
+ continue
+ key = RegEnumKey(h, 0)
+ d = Reg.get_value(base, r"%s\%s" % (p, key))
+ self.macros["$(FrameworkVersion)"] = d["version"]
+
+ def sub(self, s):
+ for k, v in self.macros.items():
+ s = s.replace(k, v)
+ return s
+
+def get_build_version():
+ """Return the version of MSVC that was used to build Python.
+
+ For Python 2.3 and up, the version number is included in
+ sys.version. For earlier versions, assume the compiler is MSVC 6.
+ """
+ prefix = "MSC v."
+ i = sys.version.find(prefix)
+ if i == -1:
+ return 6
+ i = i + len(prefix)
+ s, rest = sys.version[i:].split(" ", 1)
+ majorVersion = int(s[:-2]) - 6
+ if majorVersion >= 13:
+ # v13 was skipped and should be v14
+ majorVersion += 1
+ minorVersion = int(s[2:3]) / 10.0
+ # I don't think paths are affected by minor version in version 6
+ if majorVersion == 6:
+ minorVersion = 0
+ if majorVersion >= 6:
+ return majorVersion + minorVersion
+ # else we don't know what version of the compiler this is
+ return None
+
+def normalize_and_reduce_paths(paths):
+ """Return a list of normalized paths with duplicates removed.
+
+ The current order of paths is maintained.
+ """
+ # Paths are normalized so things like: /a and /a/ aren't both preserved.
+ reduced_paths = []
+ for p in paths:
+ np = os.path.normpath(p)
+ # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
+ if np not in reduced_paths:
+ reduced_paths.append(np)
+ return reduced_paths
+
+def removeDuplicates(variable):
+ """Remove duplicate values of an environment variable.
+ """
+ oldList = variable.split(os.pathsep)
+ newList = []
+ for i in oldList:
+ if i not in newList:
+ newList.append(i)
+ newVariable = os.pathsep.join(newList)
+ return newVariable
+
+def find_vcvarsall(version):
+ """Find the vcvarsall.bat file
+
+ At first it tries to find the productdir of VS 2008 in the registry. If
+ that fails it falls back to the VS90COMNTOOLS env var.
+ """
+ vsbase = VS_BASE % version
+ try:
+ productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
+ "productdir")
+ except KeyError:
+ log.debug("Unable to find productdir in registry")
+ productdir = None
+
+ if not productdir or not os.path.isdir(productdir):
+ toolskey = "VS%0.f0COMNTOOLS" % version
+ toolsdir = os.environ.get(toolskey, None)
+
+ if toolsdir and os.path.isdir(toolsdir):
+ productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
+ productdir = os.path.abspath(productdir)
+ if not os.path.isdir(productdir):
+ log.debug("%s is not a valid directory" % productdir)
+ return None
+ else:
+ log.debug("Env var %s is not set or invalid" % toolskey)
+ if not productdir:
+ log.debug("No productdir found")
+ return None
+ vcvarsall = os.path.join(productdir, "vcvarsall.bat")
+ if os.path.isfile(vcvarsall):
+ return vcvarsall
+ log.debug("Unable to find vcvarsall.bat")
+ return None
+
+def query_vcvarsall(version, arch="x86"):
+ """Launch vcvarsall.bat and read the settings from its environment
+ """
+ vcvarsall = find_vcvarsall(version)
+ interesting = {"include", "lib", "libpath", "path"}
+ result = {}
+
+ if vcvarsall is None:
+ raise DistutilsPlatformError("Unable to find vcvarsall.bat")
+ log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
+ popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ try:
+ stdout, stderr = popen.communicate()
+ if popen.wait() != 0:
+ raise DistutilsPlatformError(stderr.decode("mbcs"))
+
+ stdout = stdout.decode("mbcs")
+ for line in stdout.split("\n"):
+ line = Reg.convert_mbcs(line)
+ if '=' not in line:
+ continue
+ line = line.strip()
+ key, value = line.split('=', 1)
+ key = key.lower()
+ if key in interesting:
+ if value.endswith(os.pathsep):
+ value = value[:-1]
+ result[key] = removeDuplicates(value)
+
+ finally:
+ popen.stdout.close()
+ popen.stderr.close()
+
+ if len(result) != len(interesting):
+ raise ValueError(str(list(result.keys())))
+
+ return result
+
+# More globals
+VERSION = get_build_version()
+if VERSION < 8.0:
+ raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
+# MACROS = MacroExpander(VERSION)
+
+class MSVCCompiler(CCompiler) :
+ """Concrete class that implements an interface to Microsoft Visual C++,
+ as defined by the CCompiler abstract class."""
+
+ compiler_type = 'msvc'
+
+ # Just set this so CCompiler's constructor doesn't barf. We currently
+ # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+ # as it really isn't necessary for this sort of single-compiler class.
+ # Would be nice to have a consistent interface with UnixCCompiler,
+ # though, so it's worth thinking about.
+ executables = {}
+
+ # Private class data (need to distinguish C from C++ source for compiler)
+ _c_extensions = ['.c']
+ _cpp_extensions = ['.cc', '.cpp', '.cxx']
+ _rc_extensions = ['.rc']
+ _mc_extensions = ['.mc']
+
+ # Needed for the filename generation methods provided by the
+ # base class, CCompiler.
+ src_extensions = (_c_extensions + _cpp_extensions +
+ _rc_extensions + _mc_extensions)
+ res_extension = '.res'
+ obj_extension = '.obj'
+ static_lib_extension = '.lib'
+ shared_lib_extension = '.dll'
+ static_lib_format = shared_lib_format = '%s%s'
+ exe_extension = '.exe'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ CCompiler.__init__ (self, verbose, dry_run, force)
+ self.__version = VERSION
+ self.__root = r"Software\Microsoft\VisualStudio"
+ # self.__macros = MACROS
+ self.__paths = []
+ # target platform (.plat_name is consistent with 'bdist')
+ self.plat_name = None
+ self.__arch = None # deprecated name
+ self.initialized = False
+
+ def initialize(self, plat_name=None):
+ # multi-init means we would need to check platform same each time...
+ assert not self.initialized, "don't init multiple times"
+ if plat_name is None:
+ plat_name = get_platform()
+ # sanity check for platforms to prevent obscure errors later.
+ ok_plats = 'win32', 'win-amd64'
+ if plat_name not in ok_plats:
+ raise DistutilsPlatformError("--plat-name must be one of %s" %
+ (ok_plats,))
+
+ if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
+ # Assume that the SDK set up everything alright; don't try to be
+ # smarter
+ self.cc = "cl.exe"
+ self.linker = "link.exe"
+ self.lib = "lib.exe"
+ self.rc = "rc.exe"
+ self.mc = "mc.exe"
+ else:
+ # On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
+ # to cross compile, you use 'x86_amd64'.
+ # On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
+ # compile use 'x86' (ie, it runs the x86 compiler directly)
+ if plat_name == get_platform() or plat_name == 'win32':
+ # native build or cross-compile to win32
+ plat_spec = PLAT_TO_VCVARS[plat_name]
+ else:
+ # cross compile from win32 -> some 64bit
+ plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
+ PLAT_TO_VCVARS[plat_name]
+
+ vc_env = query_vcvarsall(VERSION, plat_spec)
+
+ self.__paths = vc_env['path'].split(os.pathsep)
+ os.environ['lib'] = vc_env['lib']
+ os.environ['include'] = vc_env['include']
+
+ if len(self.__paths) == 0:
+ raise DistutilsPlatformError("Python was built with %s, "
+ "and extensions need to be built with the same "
+ "version of the compiler, but it isn't installed."
+ % self.__product)
+
+ self.cc = self.find_exe("cl.exe")
+ self.linker = self.find_exe("link.exe")
+ self.lib = self.find_exe("lib.exe")
+ self.rc = self.find_exe("rc.exe") # resource compiler
+ self.mc = self.find_exe("mc.exe") # message compiler
+ #self.set_path_env_var('lib')
+ #self.set_path_env_var('include')
+
+ # extend the MSVC path with the current path
+ try:
+ for p in os.environ['path'].split(';'):
+ self.__paths.append(p)
+ except KeyError:
+ pass
+ self.__paths = normalize_and_reduce_paths(self.__paths)
+ os.environ['path'] = ";".join(self.__paths)
+
+ self.preprocess_options = None
+ if self.__arch == "x86":
+ self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
+ '/DNDEBUG']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
+ '/Z7', '/D_DEBUG']
+ else:
+ # Win64
+ self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
+ '/DNDEBUG']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
+ '/Z7', '/D_DEBUG']
+
+ self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
+ if self.__version >= 7:
+ self.ldflags_shared_debug = [
+ '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
+ ]
+ self.ldflags_static = [ '/nologo']
+
+ self.initialized = True
+
+ # -- Worker methods ------------------------------------------------
+
+ def object_filenames(self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ # Copied from ccompiler.py, extended to return .res as 'object'-file
+ # for .rc input file
+ if output_dir is None: output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ (base, ext) = os.path.splitext (src_name)
+ base = os.path.splitdrive(base)[1] # Chop off the drive
+ base = base[os.path.isabs(base):] # If abs, chop off leading /
+ if ext not in self.src_extensions:
+ # Better to raise an exception instead of silently continuing
+ # and later complain about sources and targets having
+ # different lengths
+ raise CompileError ("Don't know how to compile %s" % src_name)
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext in self._rc_extensions:
+ obj_names.append (os.path.join (output_dir,
+ base + self.res_extension))
+ elif ext in self._mc_extensions:
+ obj_names.append (os.path.join (output_dir,
+ base + self.res_extension))
+ else:
+ obj_names.append (os.path.join (output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+
+ def compile(self, sources,
+ output_dir=None, macros=None, include_dirs=None, debug=0,
+ extra_preargs=None, extra_postargs=None, depends=None):
+
+ if not self.initialized:
+ self.initialize()
+ compile_info = self._setup_compile(output_dir, macros, include_dirs,
+ sources, depends, extra_postargs)
+ macros, objects, extra_postargs, pp_opts, build = compile_info
+
+ compile_opts = extra_preargs or []
+ compile_opts.append ('/c')
+ if debug:
+ compile_opts.extend(self.compile_options_debug)
+ else:
+ compile_opts.extend(self.compile_options)
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ if debug:
+ # pass the full pathname to MSVC in debug mode,
+ # this allows the debugger to find the source file
+ # without asking the user to browse for it
+ src = os.path.abspath(src)
+
+ if ext in self._c_extensions:
+ input_opt = "/Tc" + src
+ elif ext in self._cpp_extensions:
+ input_opt = "/Tp" + src
+ elif ext in self._rc_extensions:
+ # compile .RC to .RES file
+ input_opt = src
+ output_opt = "/fo" + obj
+ try:
+ self.spawn([self.rc] + pp_opts +
+ [output_opt] + [input_opt])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ elif ext in self._mc_extensions:
+ # Compile .MC to .RC file to .RES file.
+ # * '-h dir' specifies the directory for the
+ # generated include file
+ # * '-r dir' specifies the target directory of the
+ # generated RC file and the binary message resource
+ # it includes
+ #
+ # For now (since there are no options to change this),
+ # we use the source-directory for the include file and
+ # the build directory for the RC file and message
+ # resources. This works at least for win32all.
+ h_dir = os.path.dirname(src)
+ rc_dir = os.path.dirname(obj)
+ try:
+ # first compile .MC to .RC and .H file
+ self.spawn([self.mc] +
+ ['-h', h_dir, '-r', rc_dir] + [src])
+ base, _ = os.path.splitext (os.path.basename (src))
+ rc_file = os.path.join (rc_dir, base + '.rc')
+ # then compile .RC to .RES file
+ self.spawn([self.rc] +
+ ["/fo" + obj] + [rc_file])
+
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ else:
+ # how to handle this file?
+ raise CompileError("Don't know how to compile %s to %s"
+ % (src, obj))
+
+ output_opt = "/Fo" + obj
+ try:
+ self.spawn([self.cc] + compile_opts + pp_opts +
+ [input_opt, output_opt] +
+ extra_postargs)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ return objects
+
+
+ def create_static_lib(self,
+ objects,
+ output_libname,
+ output_dir=None,
+ debug=0,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ (objects, output_dir) = self._fix_object_args(objects, output_dir)
+ output_filename = self.library_filename(output_libname,
+ output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ lib_args = objects + ['/OUT:' + output_filename]
+ if debug:
+ pass # XXX what goes here?
+ try:
+ self.spawn([self.lib] + lib_args)
+ except DistutilsExecError as msg:
+ raise LibError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ (objects, output_dir) = self._fix_object_args(objects, output_dir)
+ fixed_args = self._fix_lib_args(libraries, library_dirs,
+ runtime_library_dirs)
+ (libraries, library_dirs, runtime_library_dirs) = fixed_args
+
+ if runtime_library_dirs:
+ self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ + str (runtime_library_dirs))
+
+ lib_opts = gen_lib_options(self,
+ library_dirs, runtime_library_dirs,
+ libraries)
+ if output_dir is not None:
+ output_filename = os.path.join(output_dir, output_filename)
+
+ if self._need_link(objects, output_filename):
+ if target_desc == CCompiler.EXECUTABLE:
+ if debug:
+ ldflags = self.ldflags_shared_debug[1:]
+ else:
+ ldflags = self.ldflags_shared[1:]
+ else:
+ if debug:
+ ldflags = self.ldflags_shared_debug
+ else:
+ ldflags = self.ldflags_shared
+
+ export_opts = []
+ for sym in (export_symbols or []):
+ export_opts.append("/EXPORT:" + sym)
+
+ ld_args = (ldflags + lib_opts + export_opts +
+ objects + ['/OUT:' + output_filename])
+
+ # The MSVC linker generates .lib and .exp files, which cannot be
+ # suppressed by any linker switches. The .lib files may even be
+ # needed! Make sure they are generated in the temporary build
+ # directory. Since they have different names for debug and release
+ # builds, they can go into the same directory.
+ build_temp = os.path.dirname(objects[0])
+ if export_symbols is not None:
+ (dll_name, dll_ext) = os.path.splitext(
+ os.path.basename(output_filename))
+ implib_file = os.path.join(
+ build_temp,
+ self.library_filename(dll_name))
+ ld_args.append ('/IMPLIB:' + implib_file)
+
+ self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
+
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+
+ self.mkpath(os.path.dirname(output_filename))
+ try:
+ self.spawn([self.linker] + ld_args)
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+
+ # embed the manifest
+ # XXX - this is somewhat fragile - if mt.exe fails, distutils
+ # will still consider the DLL up-to-date, but it will not have a
+ # manifest. Maybe we should link to a temp file? OTOH, that
+ # implies a build environment error that shouldn't go undetected.
+ mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
+ if mfinfo is not None:
+ mffilename, mfid = mfinfo
+ out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
+ try:
+ self.spawn(['mt.exe', '-nologo', '-manifest',
+ mffilename, out_arg])
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
+ # If we need a manifest at all, an embedded manifest is recommended.
+ # See MSDN article titled
+ # "How to: Embed a Manifest Inside a C/C++ Application"
+ # (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
+ # Ask the linker to generate the manifest in the temp dir, so
+ # we can check it, and possibly embed it, later.
+ temp_manifest = os.path.join(
+ build_temp,
+ os.path.basename(output_filename) + ".manifest")
+ ld_args.append('/MANIFESTFILE:' + temp_manifest)
+
+ def manifest_get_embed_info(self, target_desc, ld_args):
+ # If a manifest should be embedded, return a tuple of
+ # (manifest_filename, resource_id). Returns None if no manifest
+ # should be embedded. See http://bugs.python.org/issue7833 for why
+ # we want to avoid any manifest for extension modules if we can)
+ for arg in ld_args:
+ if arg.startswith("/MANIFESTFILE:"):
+ temp_manifest = arg.split(":", 1)[1]
+ break
+ else:
+ # no /MANIFESTFILE so nothing to do.
+ return None
+ if target_desc == CCompiler.EXECUTABLE:
+ # by default, executables always get the manifest with the
+ # CRT referenced.
+ mfid = 1
+ else:
+ # Extension modules try and avoid any manifest if possible.
+ mfid = 2
+ temp_manifest = self._remove_visual_c_ref(temp_manifest)
+ if temp_manifest is None:
+ return None
+ return temp_manifest, mfid
+
+ def _remove_visual_c_ref(self, manifest_file):
+ try:
+ # Remove references to the Visual C runtime, so they will
+ # fall through to the Visual C dependency of Python.exe.
+ # This way, when installed for a restricted user (e.g.
+ # runtimes are not in WinSxS folder, but in Python's own
+ # folder), the runtimes do not need to be in every folder
+ # with .pyd's.
+ # Returns either the filename of the modified manifest or
+ # None if no manifest should be embedded.
+ manifest_f = open(manifest_file)
+ try:
+ manifest_buf = manifest_f.read()
+ finally:
+ manifest_f.close()
+ pattern = re.compile(
+ r""" |)""",
+ re.DOTALL)
+ manifest_buf = re.sub(pattern, "", manifest_buf)
+ pattern = r"\s* "
+ manifest_buf = re.sub(pattern, "", manifest_buf)
+ # Now see if any other assemblies are referenced - if not, we
+ # don't want a manifest embedded.
+ pattern = re.compile(
+ r""" |)""", re.DOTALL)
+ if re.search(pattern, manifest_buf) is None:
+ return None
+
+ manifest_f = open(manifest_file, 'w')
+ try:
+ manifest_f.write(manifest_buf)
+ return manifest_file
+ finally:
+ manifest_f.close()
+ except OSError:
+ pass
+
+ # -- Miscellaneous methods -----------------------------------------
+ # These are all used by the 'gen_lib_options() function, in
+ # ccompiler.py.
+
+ def library_dir_option(self, dir):
+ return "/LIBPATH:" + dir
+
+ def runtime_library_dir_option(self, dir):
+ raise DistutilsPlatformError(
+ "don't know how to set runtime library search path for MSVC++")
+
+ def library_option(self, lib):
+ return self.library_filename(lib)
+
+
+ def find_library_file(self, dirs, lib, debug=0):
+ # Prefer a debugging library if found (and requested), but deal
+ # with it if we don't have one.
+ if debug:
+ try_names = [lib + "_d", lib]
+ else:
+ try_names = [lib]
+ for dir in dirs:
+ for name in try_names:
+ libfile = os.path.join(dir, self.library_filename (name))
+ if os.path.exists(libfile):
+ return libfile
+ else:
+ # Oops, didn't find it in *any* of 'dirs'
+ return None
+
+ # Helper methods for using the MSVC registry settings
+
+ def find_exe(self, exe):
+ """Return path to an MSVC executable program.
+
+ Tries to find the program in several places: first, one of the
+ MSVC program search paths from the registry; next, the directories
+ in the PATH environment variable. If any of those work, return an
+ absolute path that is known to exist. If none of them work, just
+ return the original program name, 'exe'.
+ """
+ for p in self.__paths:
+ fn = os.path.join(os.path.abspath(p), exe)
+ if os.path.isfile(fn):
+ return fn
+
+ # didn't find it; try existing path
+ for p in os.environ['Path'].split(';'):
+ fn = os.path.join(os.path.abspath(p),exe)
+ if os.path.isfile(fn):
+ return fn
+
+ return exe
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/msvccompiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/msvccompiler.py
new file mode 100644
index 00000000..d1de2fbf
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/msvccompiler.py
@@ -0,0 +1,643 @@
+"""distutils.msvccompiler
+
+Contains MSVCCompiler, an implementation of the abstract CCompiler class
+for the Microsoft Visual Studio.
+"""
+
+# Written by Perry Stoll
+# hacked by Robin Becker and Thomas Heller to do a better job of
+# finding DevStudio (through the registry)
+
+import sys, os
+from distutils.errors import \
+ DistutilsExecError, DistutilsPlatformError, \
+ CompileError, LibError, LinkError
+from distutils.ccompiler import \
+ CCompiler, gen_preprocess_options, gen_lib_options
+from distutils import log
+
+_can_read_reg = False
+try:
+ import winreg
+
+ _can_read_reg = True
+ hkey_mod = winreg
+
+ RegOpenKeyEx = winreg.OpenKeyEx
+ RegEnumKey = winreg.EnumKey
+ RegEnumValue = winreg.EnumValue
+ RegError = winreg.error
+
+except ImportError:
+ try:
+ import win32api
+ import win32con
+ _can_read_reg = True
+ hkey_mod = win32con
+
+ RegOpenKeyEx = win32api.RegOpenKeyEx
+ RegEnumKey = win32api.RegEnumKey
+ RegEnumValue = win32api.RegEnumValue
+ RegError = win32api.error
+ except ImportError:
+ log.info("Warning: Can't read registry to find the "
+ "necessary compiler setting\n"
+ "Make sure that Python modules winreg, "
+ "win32api or win32con are installed.")
+ pass
+
+if _can_read_reg:
+ HKEYS = (hkey_mod.HKEY_USERS,
+ hkey_mod.HKEY_CURRENT_USER,
+ hkey_mod.HKEY_LOCAL_MACHINE,
+ hkey_mod.HKEY_CLASSES_ROOT)
+
+def read_keys(base, key):
+ """Return list of registry keys."""
+ try:
+ handle = RegOpenKeyEx(base, key)
+ except RegError:
+ return None
+ L = []
+ i = 0
+ while True:
+ try:
+ k = RegEnumKey(handle, i)
+ except RegError:
+ break
+ L.append(k)
+ i += 1
+ return L
+
+def read_values(base, key):
+ """Return dict of registry keys and values.
+
+ All names are converted to lowercase.
+ """
+ try:
+ handle = RegOpenKeyEx(base, key)
+ except RegError:
+ return None
+ d = {}
+ i = 0
+ while True:
+ try:
+ name, value, type = RegEnumValue(handle, i)
+ except RegError:
+ break
+ name = name.lower()
+ d[convert_mbcs(name)] = convert_mbcs(value)
+ i += 1
+ return d
+
+def convert_mbcs(s):
+ dec = getattr(s, "decode", None)
+ if dec is not None:
+ try:
+ s = dec("mbcs")
+ except UnicodeError:
+ pass
+ return s
+
+class MacroExpander:
+ def __init__(self, version):
+ self.macros = {}
+ self.load_macros(version)
+
+ def set_macro(self, macro, path, key):
+ for base in HKEYS:
+ d = read_values(base, path)
+ if d:
+ self.macros["$(%s)" % macro] = d[key]
+ break
+
+ def load_macros(self, version):
+ vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
+ self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
+ self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
+ net = r"Software\Microsoft\.NETFramework"
+ self.set_macro("FrameworkDir", net, "installroot")
+ try:
+ if version > 7.0:
+ self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
+ else:
+ self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
+ except KeyError as exc: #
+ raise DistutilsPlatformError(
+ """Python was built with Visual Studio 2003;
+extensions must be built with a compiler than can generate compatible binaries.
+Visual Studio 2003 was not found on this system. If you have Cygwin installed,
+you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
+
+ p = r"Software\Microsoft\NET Framework Setup\Product"
+ for base in HKEYS:
+ try:
+ h = RegOpenKeyEx(base, p)
+ except RegError:
+ continue
+ key = RegEnumKey(h, 0)
+ d = read_values(base, r"%s\%s" % (p, key))
+ self.macros["$(FrameworkVersion)"] = d["version"]
+
+ def sub(self, s):
+ for k, v in self.macros.items():
+ s = s.replace(k, v)
+ return s
+
+def get_build_version():
+ """Return the version of MSVC that was used to build Python.
+
+ For Python 2.3 and up, the version number is included in
+ sys.version. For earlier versions, assume the compiler is MSVC 6.
+ """
+ prefix = "MSC v."
+ i = sys.version.find(prefix)
+ if i == -1:
+ return 6
+ i = i + len(prefix)
+ s, rest = sys.version[i:].split(" ", 1)
+ majorVersion = int(s[:-2]) - 6
+ if majorVersion >= 13:
+ # v13 was skipped and should be v14
+ majorVersion += 1
+ minorVersion = int(s[2:3]) / 10.0
+ # I don't think paths are affected by minor version in version 6
+ if majorVersion == 6:
+ minorVersion = 0
+ if majorVersion >= 6:
+ return majorVersion + minorVersion
+ # else we don't know what version of the compiler this is
+ return None
+
+def get_build_architecture():
+ """Return the processor architecture.
+
+ Possible results are "Intel" or "AMD64".
+ """
+
+ prefix = " bit ("
+ i = sys.version.find(prefix)
+ if i == -1:
+ return "Intel"
+ j = sys.version.find(")", i)
+ return sys.version[i+len(prefix):j]
+
+def normalize_and_reduce_paths(paths):
+ """Return a list of normalized paths with duplicates removed.
+
+ The current order of paths is maintained.
+ """
+ # Paths are normalized so things like: /a and /a/ aren't both preserved.
+ reduced_paths = []
+ for p in paths:
+ np = os.path.normpath(p)
+ # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
+ if np not in reduced_paths:
+ reduced_paths.append(np)
+ return reduced_paths
+
+
+class MSVCCompiler(CCompiler) :
+ """Concrete class that implements an interface to Microsoft Visual C++,
+ as defined by the CCompiler abstract class."""
+
+ compiler_type = 'msvc'
+
+ # Just set this so CCompiler's constructor doesn't barf. We currently
+ # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+ # as it really isn't necessary for this sort of single-compiler class.
+ # Would be nice to have a consistent interface with UnixCCompiler,
+ # though, so it's worth thinking about.
+ executables = {}
+
+ # Private class data (need to distinguish C from C++ source for compiler)
+ _c_extensions = ['.c']
+ _cpp_extensions = ['.cc', '.cpp', '.cxx']
+ _rc_extensions = ['.rc']
+ _mc_extensions = ['.mc']
+
+ # Needed for the filename generation methods provided by the
+ # base class, CCompiler.
+ src_extensions = (_c_extensions + _cpp_extensions +
+ _rc_extensions + _mc_extensions)
+ res_extension = '.res'
+ obj_extension = '.obj'
+ static_lib_extension = '.lib'
+ shared_lib_extension = '.dll'
+ static_lib_format = shared_lib_format = '%s%s'
+ exe_extension = '.exe'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ CCompiler.__init__ (self, verbose, dry_run, force)
+ self.__version = get_build_version()
+ self.__arch = get_build_architecture()
+ if self.__arch == "Intel":
+ # x86
+ if self.__version >= 7:
+ self.__root = r"Software\Microsoft\VisualStudio"
+ self.__macros = MacroExpander(self.__version)
+ else:
+ self.__root = r"Software\Microsoft\Devstudio"
+ self.__product = "Visual Studio version %s" % self.__version
+ else:
+ # Win64. Assume this was built with the platform SDK
+ self.__product = "Microsoft SDK compiler %s" % (self.__version + 6)
+
+ self.initialized = False
+
+ def initialize(self):
+ self.__paths = []
+ if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
+ # Assume that the SDK set up everything alright; don't try to be
+ # smarter
+ self.cc = "cl.exe"
+ self.linker = "link.exe"
+ self.lib = "lib.exe"
+ self.rc = "rc.exe"
+ self.mc = "mc.exe"
+ else:
+ self.__paths = self.get_msvc_paths("path")
+
+ if len(self.__paths) == 0:
+ raise DistutilsPlatformError("Python was built with %s, "
+ "and extensions need to be built with the same "
+ "version of the compiler, but it isn't installed."
+ % self.__product)
+
+ self.cc = self.find_exe("cl.exe")
+ self.linker = self.find_exe("link.exe")
+ self.lib = self.find_exe("lib.exe")
+ self.rc = self.find_exe("rc.exe") # resource compiler
+ self.mc = self.find_exe("mc.exe") # message compiler
+ self.set_path_env_var('lib')
+ self.set_path_env_var('include')
+
+ # extend the MSVC path with the current path
+ try:
+ for p in os.environ['path'].split(';'):
+ self.__paths.append(p)
+ except KeyError:
+ pass
+ self.__paths = normalize_and_reduce_paths(self.__paths)
+ os.environ['path'] = ";".join(self.__paths)
+
+ self.preprocess_options = None
+ if self.__arch == "Intel":
+ self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,
+ '/DNDEBUG']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
+ '/Z7', '/D_DEBUG']
+ else:
+ # Win64
+ self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
+ '/DNDEBUG']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
+ '/Z7', '/D_DEBUG']
+
+ self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
+ if self.__version >= 7:
+ self.ldflags_shared_debug = [
+ '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
+ ]
+ else:
+ self.ldflags_shared_debug = [
+ '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
+ ]
+ self.ldflags_static = [ '/nologo']
+
+ self.initialized = True
+
+ # -- Worker methods ------------------------------------------------
+
+ def object_filenames(self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ # Copied from ccompiler.py, extended to return .res as 'object'-file
+ # for .rc input file
+ if output_dir is None: output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ (base, ext) = os.path.splitext (src_name)
+ base = os.path.splitdrive(base)[1] # Chop off the drive
+ base = base[os.path.isabs(base):] # If abs, chop off leading /
+ if ext not in self.src_extensions:
+ # Better to raise an exception instead of silently continuing
+ # and later complain about sources and targets having
+ # different lengths
+ raise CompileError ("Don't know how to compile %s" % src_name)
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext in self._rc_extensions:
+ obj_names.append (os.path.join (output_dir,
+ base + self.res_extension))
+ elif ext in self._mc_extensions:
+ obj_names.append (os.path.join (output_dir,
+ base + self.res_extension))
+ else:
+ obj_names.append (os.path.join (output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+
+ def compile(self, sources,
+ output_dir=None, macros=None, include_dirs=None, debug=0,
+ extra_preargs=None, extra_postargs=None, depends=None):
+
+ if not self.initialized:
+ self.initialize()
+ compile_info = self._setup_compile(output_dir, macros, include_dirs,
+ sources, depends, extra_postargs)
+ macros, objects, extra_postargs, pp_opts, build = compile_info
+
+ compile_opts = extra_preargs or []
+ compile_opts.append ('/c')
+ if debug:
+ compile_opts.extend(self.compile_options_debug)
+ else:
+ compile_opts.extend(self.compile_options)
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ if debug:
+ # pass the full pathname to MSVC in debug mode,
+ # this allows the debugger to find the source file
+ # without asking the user to browse for it
+ src = os.path.abspath(src)
+
+ if ext in self._c_extensions:
+ input_opt = "/Tc" + src
+ elif ext in self._cpp_extensions:
+ input_opt = "/Tp" + src
+ elif ext in self._rc_extensions:
+ # compile .RC to .RES file
+ input_opt = src
+ output_opt = "/fo" + obj
+ try:
+ self.spawn([self.rc] + pp_opts +
+ [output_opt] + [input_opt])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ elif ext in self._mc_extensions:
+ # Compile .MC to .RC file to .RES file.
+ # * '-h dir' specifies the directory for the
+ # generated include file
+ # * '-r dir' specifies the target directory of the
+ # generated RC file and the binary message resource
+ # it includes
+ #
+ # For now (since there are no options to change this),
+ # we use the source-directory for the include file and
+ # the build directory for the RC file and message
+ # resources. This works at least for win32all.
+ h_dir = os.path.dirname(src)
+ rc_dir = os.path.dirname(obj)
+ try:
+ # first compile .MC to .RC and .H file
+ self.spawn([self.mc] +
+ ['-h', h_dir, '-r', rc_dir] + [src])
+ base, _ = os.path.splitext (os.path.basename (src))
+ rc_file = os.path.join (rc_dir, base + '.rc')
+ # then compile .RC to .RES file
+ self.spawn([self.rc] +
+ ["/fo" + obj] + [rc_file])
+
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ else:
+ # how to handle this file?
+ raise CompileError("Don't know how to compile %s to %s"
+ % (src, obj))
+
+ output_opt = "/Fo" + obj
+ try:
+ self.spawn([self.cc] + compile_opts + pp_opts +
+ [input_opt, output_opt] +
+ extra_postargs)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ return objects
+
+
+ def create_static_lib(self,
+ objects,
+ output_libname,
+ output_dir=None,
+ debug=0,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ (objects, output_dir) = self._fix_object_args(objects, output_dir)
+ output_filename = self.library_filename(output_libname,
+ output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ lib_args = objects + ['/OUT:' + output_filename]
+ if debug:
+ pass # XXX what goes here?
+ try:
+ self.spawn([self.lib] + lib_args)
+ except DistutilsExecError as msg:
+ raise LibError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ (objects, output_dir) = self._fix_object_args(objects, output_dir)
+ fixed_args = self._fix_lib_args(libraries, library_dirs,
+ runtime_library_dirs)
+ (libraries, library_dirs, runtime_library_dirs) = fixed_args
+
+ if runtime_library_dirs:
+ self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ + str (runtime_library_dirs))
+
+ lib_opts = gen_lib_options(self,
+ library_dirs, runtime_library_dirs,
+ libraries)
+ if output_dir is not None:
+ output_filename = os.path.join(output_dir, output_filename)
+
+ if self._need_link(objects, output_filename):
+ if target_desc == CCompiler.EXECUTABLE:
+ if debug:
+ ldflags = self.ldflags_shared_debug[1:]
+ else:
+ ldflags = self.ldflags_shared[1:]
+ else:
+ if debug:
+ ldflags = self.ldflags_shared_debug
+ else:
+ ldflags = self.ldflags_shared
+
+ export_opts = []
+ for sym in (export_symbols or []):
+ export_opts.append("/EXPORT:" + sym)
+
+ ld_args = (ldflags + lib_opts + export_opts +
+ objects + ['/OUT:' + output_filename])
+
+ # The MSVC linker generates .lib and .exp files, which cannot be
+ # suppressed by any linker switches. The .lib files may even be
+ # needed! Make sure they are generated in the temporary build
+ # directory. Since they have different names for debug and release
+ # builds, they can go into the same directory.
+ if export_symbols is not None:
+ (dll_name, dll_ext) = os.path.splitext(
+ os.path.basename(output_filename))
+ implib_file = os.path.join(
+ os.path.dirname(objects[0]),
+ self.library_filename(dll_name))
+ ld_args.append ('/IMPLIB:' + implib_file)
+
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+
+ self.mkpath(os.path.dirname(output_filename))
+ try:
+ self.spawn([self.linker] + ld_args)
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+
+ # -- Miscellaneous methods -----------------------------------------
+ # These are all used by the 'gen_lib_options() function, in
+ # ccompiler.py.
+
+ def library_dir_option(self, dir):
+ return "/LIBPATH:" + dir
+
+ def runtime_library_dir_option(self, dir):
+ raise DistutilsPlatformError(
+ "don't know how to set runtime library search path for MSVC++")
+
+ def library_option(self, lib):
+ return self.library_filename(lib)
+
+
+ def find_library_file(self, dirs, lib, debug=0):
+ # Prefer a debugging library if found (and requested), but deal
+ # with it if we don't have one.
+ if debug:
+ try_names = [lib + "_d", lib]
+ else:
+ try_names = [lib]
+ for dir in dirs:
+ for name in try_names:
+ libfile = os.path.join(dir, self.library_filename (name))
+ if os.path.exists(libfile):
+ return libfile
+ else:
+ # Oops, didn't find it in *any* of 'dirs'
+ return None
+
+ # Helper methods for using the MSVC registry settings
+
+ def find_exe(self, exe):
+ """Return path to an MSVC executable program.
+
+ Tries to find the program in several places: first, one of the
+ MSVC program search paths from the registry; next, the directories
+ in the PATH environment variable. If any of those work, return an
+ absolute path that is known to exist. If none of them work, just
+ return the original program name, 'exe'.
+ """
+ for p in self.__paths:
+ fn = os.path.join(os.path.abspath(p), exe)
+ if os.path.isfile(fn):
+ return fn
+
+ # didn't find it; try existing path
+ for p in os.environ['Path'].split(';'):
+ fn = os.path.join(os.path.abspath(p),exe)
+ if os.path.isfile(fn):
+ return fn
+
+ return exe
+
+ def get_msvc_paths(self, path, platform='x86'):
+ """Get a list of devstudio directories (include, lib or path).
+
+ Return a list of strings. The list will be empty if unable to
+ access the registry or appropriate registry keys not found.
+ """
+ if not _can_read_reg:
+ return []
+
+ path = path + " dirs"
+ if self.__version >= 7:
+ key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
+ % (self.__root, self.__version))
+ else:
+ key = (r"%s\6.0\Build System\Components\Platforms"
+ r"\Win32 (%s)\Directories" % (self.__root, platform))
+
+ for base in HKEYS:
+ d = read_values(base, key)
+ if d:
+ if self.__version >= 7:
+ return self.__macros.sub(d[path]).split(";")
+ else:
+ return d[path].split(";")
+ # MSVC 6 seems to create the registry entries we need only when
+ # the GUI is run.
+ if self.__version == 6:
+ for base in HKEYS:
+ if read_values(base, r"%s\6.0" % self.__root) is not None:
+ self.warn("It seems you have Visual Studio 6 installed, "
+ "but the expected registry settings are not present.\n"
+ "You must at least run the Visual Studio GUI once "
+ "so that these entries are created.")
+ break
+ return []
+
+ def set_path_env_var(self, name):
+ """Set environment variable 'name' to an MSVC path type value.
+
+ This is equivalent to a SET command prior to execution of spawned
+ commands.
+ """
+
+ if name == "lib":
+ p = self.get_msvc_paths("library")
+ else:
+ p = self.get_msvc_paths(name)
+ if p:
+ os.environ[name] = ';'.join(p)
+
+
+if get_build_version() >= 8.0:
+ log.debug("Importing new compiler from distutils.msvc9compiler")
+ OldMSVCCompiler = MSVCCompiler
+ from distutils.msvc9compiler import MSVCCompiler
+ # get_build_architecture not really relevant now we support cross-compile
+ from distutils.msvc9compiler import MacroExpander
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/spawn.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/spawn.py
new file mode 100644
index 00000000..53876880
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/spawn.py
@@ -0,0 +1,192 @@
+"""distutils.spawn
+
+Provides the 'spawn()' function, a front-end to various platform-
+specific functions for launching another program in a sub-process.
+Also provides the 'find_executable()' to search the path for a given
+executable name.
+"""
+
+import sys
+import os
+
+from distutils.errors import DistutilsPlatformError, DistutilsExecError
+from distutils.debug import DEBUG
+from distutils import log
+
+def spawn(cmd, search_path=1, verbose=0, dry_run=0):
+ """Run another program, specified as a command list 'cmd', in a new process.
+
+ 'cmd' is just the argument list for the new process, ie.
+ cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
+ There is no way to run a program with a name different from that of its
+ executable.
+
+ If 'search_path' is true (the default), the system's executable
+ search path will be used to find the program; otherwise, cmd[0]
+ must be the exact path to the executable. If 'dry_run' is true,
+ the command will not actually be run.
+
+ Raise DistutilsExecError if running the program fails in any way; just
+ return on success.
+ """
+ # cmd is documented as a list, but just in case some code passes a tuple
+ # in, protect our %-formatting code against horrible death
+ cmd = list(cmd)
+ if os.name == 'posix':
+ _spawn_posix(cmd, search_path, dry_run=dry_run)
+ elif os.name == 'nt':
+ _spawn_nt(cmd, search_path, dry_run=dry_run)
+ else:
+ raise DistutilsPlatformError(
+ "don't know how to spawn programs on platform '%s'" % os.name)
+
+def _nt_quote_args(args):
+ """Quote command-line arguments for DOS/Windows conventions.
+
+ Just wraps every argument which contains blanks in double quotes, and
+ returns a new argument list.
+ """
+ # XXX this doesn't seem very robust to me -- but if the Windows guys
+ # say it'll work, I guess I'll have to accept it. (What if an arg
+ # contains quotes? What other magic characters, other than spaces,
+ # have to be escaped? Is there an escaping mechanism other than
+ # quoting?)
+ for i, arg in enumerate(args):
+ if ' ' in arg:
+ args[i] = '"%s"' % arg
+ return args
+
+def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0):
+ executable = cmd[0]
+ cmd = _nt_quote_args(cmd)
+ if search_path:
+ # either we find one or it stays the same
+ executable = find_executable(executable) or executable
+ log.info(' '.join([executable] + cmd[1:]))
+ if not dry_run:
+ # spawn for NT requires a full path to the .exe
+ try:
+ rc = os.spawnv(os.P_WAIT, executable, cmd)
+ except OSError as exc:
+ # this seems to happen when the command isn't found
+ if not DEBUG:
+ cmd = executable
+ raise DistutilsExecError(
+ "command %r failed: %s" % (cmd, exc.args[-1]))
+ if rc != 0:
+ # and this reflects the command running but failing
+ if not DEBUG:
+ cmd = executable
+ raise DistutilsExecError(
+ "command %r failed with exit status %d" % (cmd, rc))
+
+if sys.platform == 'darwin':
+ from distutils import sysconfig
+ _cfg_target = None
+ _cfg_target_split = None
+
+def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
+ log.info(' '.join(cmd))
+ if dry_run:
+ return
+ executable = cmd[0]
+ exec_fn = search_path and os.execvp or os.execv
+ env = None
+ if sys.platform == 'darwin':
+ global _cfg_target, _cfg_target_split
+ if _cfg_target is None:
+ _cfg_target = sysconfig.get_config_var(
+ 'MACOSX_DEPLOYMENT_TARGET') or ''
+ if _cfg_target:
+ _cfg_target_split = [int(x) for x in _cfg_target.split('.')]
+ if _cfg_target:
+ # ensure that the deployment target of build process is not less
+ # than that used when the interpreter was built. This ensures
+ # extension modules are built with correct compatibility values
+ cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target)
+ if _cfg_target_split > [int(x) for x in cur_target.split('.')]:
+ my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: '
+ 'now "%s" but "%s" during configure'
+ % (cur_target, _cfg_target))
+ raise DistutilsPlatformError(my_msg)
+ env = dict(os.environ,
+ MACOSX_DEPLOYMENT_TARGET=cur_target)
+ exec_fn = search_path and os.execvpe or os.execve
+ pid = os.fork()
+ if pid == 0: # in the child
+ try:
+ if env is None:
+ exec_fn(executable, cmd)
+ else:
+ exec_fn(executable, cmd, env)
+ except OSError as e:
+ if not DEBUG:
+ cmd = executable
+ sys.stderr.write("unable to execute %r: %s\n"
+ % (cmd, e.strerror))
+ os._exit(1)
+
+ if not DEBUG:
+ cmd = executable
+ sys.stderr.write("unable to execute %r for unknown reasons" % cmd)
+ os._exit(1)
+ else: # in the parent
+ # Loop until the child either exits or is terminated by a signal
+ # (ie. keep waiting if it's merely stopped)
+ while True:
+ try:
+ pid, status = os.waitpid(pid, 0)
+ except OSError as exc:
+ if not DEBUG:
+ cmd = executable
+ raise DistutilsExecError(
+ "command %r failed: %s" % (cmd, exc.args[-1]))
+ if os.WIFSIGNALED(status):
+ if not DEBUG:
+ cmd = executable
+ raise DistutilsExecError(
+ "command %r terminated by signal %d"
+ % (cmd, os.WTERMSIG(status)))
+ elif os.WIFEXITED(status):
+ exit_status = os.WEXITSTATUS(status)
+ if exit_status == 0:
+ return # hey, it succeeded!
+ else:
+ if not DEBUG:
+ cmd = executable
+ raise DistutilsExecError(
+ "command %r failed with exit status %d"
+ % (cmd, exit_status))
+ elif os.WIFSTOPPED(status):
+ continue
+ else:
+ if not DEBUG:
+ cmd = executable
+ raise DistutilsExecError(
+ "unknown error executing %r: termination status %d"
+ % (cmd, status))
+
+def find_executable(executable, path=None):
+ """Tries to find 'executable' in the directories listed in 'path'.
+
+ A string listing directories separated by 'os.pathsep'; defaults to
+ os.environ['PATH']. Returns the complete filename or None if not found.
+ """
+ if path is None:
+ path = os.environ.get('PATH', os.defpath)
+
+ paths = path.split(os.pathsep)
+ base, ext = os.path.splitext(executable)
+
+ if (sys.platform == 'win32') and (ext != '.exe'):
+ executable = executable + '.exe'
+
+ if not os.path.isfile(executable):
+ for p in paths:
+ f = os.path.join(p, executable)
+ if os.path.isfile(f):
+ # the file exists, we have a shot at spawn working
+ return f
+ return None
+ else:
+ return executable
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/sysconfig.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/sysconfig.py
new file mode 100644
index 00000000..e07a6c8b
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/sysconfig.py
@@ -0,0 +1,532 @@
+"""Provide access to Python's configuration information. The specific
+configuration variables available depend heavily on the platform and
+configuration. The values may be retrieved using
+get_config_var(name), and the list of variables is available via
+get_config_vars().keys(). Additional convenience functions are also
+available.
+
+Written by: Fred L. Drake, Jr.
+Email:
+"""
+
+import _imp
+import os
+import re
+import sys
+
+from .errors import DistutilsPlatformError
+
+# These are needed in a couple of spots, so just compute them once.
+PREFIX = os.path.normpath(sys.prefix)
+EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
+BASE_PREFIX = os.path.normpath(sys.base_prefix)
+BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
+
+# Path to the base directory of the project. On Windows the binary may
+# live in project/PCbuild/win32 or project/PCbuild/amd64.
+# set for cross builds
+if "_PYTHON_PROJECT_BASE" in os.environ:
+ project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
+else:
+ project_base = os.path.dirname(os.path.abspath(sys.executable))
+if (os.name == 'nt' and
+ project_base.lower().endswith(('\\pcbuild\\win32', '\\pcbuild\\amd64'))):
+ project_base = os.path.dirname(os.path.dirname(project_base))
+
+# python_build: (Boolean) if true, we're either building Python or
+# building an extension with an un-installed Python, so we use
+# different (hard-wired) directories.
+# Setup.local is available for Makefile builds including VPATH builds,
+# Setup.dist is available on Windows
+def _is_python_source_dir(d):
+ for fn in ("Setup.dist", "Setup.local"):
+ if os.path.isfile(os.path.join(d, "Modules", fn)):
+ return True
+ return False
+_sys_home = getattr(sys, '_home', None)
+if (_sys_home and os.name == 'nt' and
+ _sys_home.lower().endswith(('\\pcbuild\\win32', '\\pcbuild\\amd64'))):
+ _sys_home = os.path.dirname(os.path.dirname(_sys_home))
+def _python_build():
+ if _sys_home:
+ return _is_python_source_dir(_sys_home)
+ return _is_python_source_dir(project_base)
+python_build = _python_build()
+
+# Calculate the build qualifier flags if they are defined. Adding the flags
+# to the include and lib directories only makes sense for an installation, not
+# an in-source build.
+build_flags = ''
+try:
+ if not python_build:
+ build_flags = sys.abiflags
+except AttributeError:
+ # It's not a configure-based build, so the sys module doesn't have
+ # this attribute, which is fine.
+ pass
+
+def get_python_version():
+ """Return a string containing the major and minor Python version,
+ leaving off the patchlevel. Sample return values could be '1.5'
+ or '2.2'.
+ """
+ return '%d.%d' % sys.version_info[:2]
+
+
+def get_python_inc(plat_specific=0, prefix=None):
+ """Return the directory containing installed Python header files.
+
+ If 'plat_specific' is false (the default), this is the path to the
+ non-platform-specific header files, i.e. Python.h and so on;
+ otherwise, this is the path to platform-specific header files
+ (namely pyconfig.h).
+
+ If 'prefix' is supplied, use it instead of sys.base_prefix or
+ sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
+ """
+ if prefix is None:
+ prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
+ if os.name == "posix":
+ if python_build:
+ # Assume the executable is in the build directory. The
+ # pyconfig.h file should be in the same directory. Since
+ # the build directory may not be the source directory, we
+ # must use "srcdir" from the makefile to find the "Include"
+ # directory.
+ if plat_specific:
+ return _sys_home or project_base
+ else:
+ incdir = os.path.join(get_config_var('srcdir'), 'Include')
+ return os.path.normpath(incdir)
+ python_dir = 'python' + get_python_version() + build_flags
+ return os.path.join(prefix, "include", python_dir)
+ elif os.name == "nt":
+ return os.path.join(prefix, "include")
+ else:
+ raise DistutilsPlatformError(
+ "I don't know where Python installs its C header files "
+ "on platform '%s'" % os.name)
+
+
+def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
+ """Return the directory containing the Python library (standard or
+ site additions).
+
+ If 'plat_specific' is true, return the directory containing
+ platform-specific modules, i.e. any module from a non-pure-Python
+ module distribution; otherwise, return the platform-shared library
+ directory. If 'standard_lib' is true, return the directory
+ containing standard Python library modules; otherwise, return the
+ directory for site-specific modules.
+
+ If 'prefix' is supplied, use it instead of sys.base_prefix or
+ sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
+ """
+ if prefix is None:
+ if standard_lib:
+ prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
+ else:
+ prefix = plat_specific and EXEC_PREFIX or PREFIX
+
+ if os.name == "posix":
+ libpython = os.path.join(prefix,
+ "lib", "python" + get_python_version())
+ if standard_lib:
+ return libpython
+ else:
+ return os.path.join(libpython, "site-packages")
+ elif os.name == "nt":
+ if standard_lib:
+ return os.path.join(prefix, "Lib")
+ else:
+ return os.path.join(prefix, "Lib", "site-packages")
+ else:
+ raise DistutilsPlatformError(
+ "I don't know where Python installs its library "
+ "on platform '%s'" % os.name)
+
+
+
+def customize_compiler(compiler):
+ """Do any platform-specific customization of a CCompiler instance.
+
+ Mainly needed on Unix, so we can plug in the information that
+ varies across Unices and is stored in Python's Makefile.
+ """
+ if compiler.compiler_type == "unix":
+ if sys.platform == "darwin":
+ # Perform first-time customization of compiler-related
+ # config vars on OS X now that we know we need a compiler.
+ # This is primarily to support Pythons from binary
+ # installers. The kind and paths to build tools on
+ # the user system may vary significantly from the system
+ # that Python itself was built on. Also the user OS
+ # version and build tools may not support the same set
+ # of CPU architectures for universal builds.
+ global _config_vars
+ # Use get_config_var() to ensure _config_vars is initialized.
+ if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
+ import _osx_support
+ _osx_support.customize_compiler(_config_vars)
+ _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
+
+ (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
+ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
+ 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
+
+ if 'CC' in os.environ:
+ newcc = os.environ['CC']
+ if (sys.platform == 'darwin'
+ and 'LDSHARED' not in os.environ
+ and ldshared.startswith(cc)):
+ # On OS X, if CC is overridden, use that as the default
+ # command for LDSHARED as well
+ ldshared = newcc + ldshared[len(cc):]
+ cc = newcc
+ if 'CXX' in os.environ:
+ cxx = os.environ['CXX']
+ if 'LDSHARED' in os.environ:
+ ldshared = os.environ['LDSHARED']
+ if 'CPP' in os.environ:
+ cpp = os.environ['CPP']
+ else:
+ cpp = cc + " -E" # not always
+ if 'LDFLAGS' in os.environ:
+ ldshared = ldshared + ' ' + os.environ['LDFLAGS']
+ if 'CFLAGS' in os.environ:
+ cflags = opt + ' ' + os.environ['CFLAGS']
+ ldshared = ldshared + ' ' + os.environ['CFLAGS']
+ if 'CPPFLAGS' in os.environ:
+ cpp = cpp + ' ' + os.environ['CPPFLAGS']
+ cflags = cflags + ' ' + os.environ['CPPFLAGS']
+ ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
+ if 'AR' in os.environ:
+ ar = os.environ['AR']
+ if 'ARFLAGS' in os.environ:
+ archiver = ar + ' ' + os.environ['ARFLAGS']
+ else:
+ archiver = ar + ' ' + ar_flags
+
+ cc_cmd = cc + ' ' + cflags
+ compiler.set_executables(
+ preprocessor=cpp,
+ compiler=cc_cmd,
+ compiler_so=cc_cmd + ' ' + ccshared,
+ compiler_cxx=cxx,
+ linker_so=ldshared,
+ linker_exe=cc,
+ archiver=archiver)
+
+ compiler.shared_lib_extension = shlib_suffix
+
+
+def get_config_h_filename():
+ """Return full pathname of installed pyconfig.h file."""
+ if python_build:
+ if os.name == "nt":
+ inc_dir = os.path.join(_sys_home or project_base, "PC")
+ else:
+ inc_dir = _sys_home or project_base
+ else:
+ inc_dir = get_python_inc(plat_specific=1)
+
+ return os.path.join(inc_dir, 'pyconfig.h')
+
+
+def get_makefile_filename():
+ """Return full pathname of installed Makefile from the Python build."""
+ if python_build:
+ return os.path.join(_sys_home or project_base, "Makefile")
+ lib_dir = get_python_lib(plat_specific=0, standard_lib=1)
+ config_file = 'config-{}{}'.format(get_python_version(), build_flags)
+ if hasattr(sys.implementation, '_multiarch'):
+ config_file += '-%s' % sys.implementation._multiarch
+ return os.path.join(lib_dir, config_file, 'Makefile')
+
+
+def parse_config_h(fp, g=None):
+ """Parse a config.h-style file.
+
+ A dictionary containing name/value pairs is returned. If an
+ optional dictionary is passed in as the second argument, it is
+ used instead of a new dictionary.
+ """
+ if g is None:
+ g = {}
+ define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
+ undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
+ #
+ while True:
+ line = fp.readline()
+ if not line:
+ break
+ m = define_rx.match(line)
+ if m:
+ n, v = m.group(1, 2)
+ try: v = int(v)
+ except ValueError: pass
+ g[n] = v
+ else:
+ m = undef_rx.match(line)
+ if m:
+ g[m.group(1)] = 0
+ return g
+
+
+# Regexes needed for parsing Makefile (and similar syntaxes,
+# like old-style Setup files).
+_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
+_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
+_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
+
+def parse_makefile(fn, g=None):
+ """Parse a Makefile-style file.
+
+ A dictionary containing name/value pairs is returned. If an
+ optional dictionary is passed in as the second argument, it is
+ used instead of a new dictionary.
+ """
+ from distutils.text_file import TextFile
+ fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
+
+ if g is None:
+ g = {}
+ done = {}
+ notdone = {}
+
+ while True:
+ line = fp.readline()
+ if line is None: # eof
+ break
+ m = _variable_rx.match(line)
+ if m:
+ n, v = m.group(1, 2)
+ v = v.strip()
+ # `$$' is a literal `$' in make
+ tmpv = v.replace('$$', '')
+
+ if "$" in tmpv:
+ notdone[n] = v
+ else:
+ try:
+ v = int(v)
+ except ValueError:
+ # insert literal `$'
+ done[n] = v.replace('$$', '$')
+ else:
+ done[n] = v
+
+ # Variables with a 'PY_' prefix in the makefile. These need to
+ # be made available without that prefix through sysconfig.
+ # Special care is needed to ensure that variable expansion works, even
+ # if the expansion uses the name without a prefix.
+ renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
+
+ # do variable interpolation here
+ while notdone:
+ for name in list(notdone):
+ value = notdone[name]
+ m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
+ if m:
+ n = m.group(1)
+ found = True
+ if n in done:
+ item = str(done[n])
+ elif n in notdone:
+ # get it on a subsequent round
+ found = False
+ elif n in os.environ:
+ # do it like make: fall back to environment
+ item = os.environ[n]
+
+ elif n in renamed_variables:
+ if name.startswith('PY_') and name[3:] in renamed_variables:
+ item = ""
+
+ elif 'PY_' + n in notdone:
+ found = False
+
+ else:
+ item = str(done['PY_' + n])
+ else:
+ done[n] = item = ""
+ if found:
+ after = value[m.end():]
+ value = value[:m.start()] + item + after
+ if "$" in after:
+ notdone[name] = value
+ else:
+ try: value = int(value)
+ except ValueError:
+ done[name] = value.strip()
+ else:
+ done[name] = value
+ del notdone[name]
+
+ if name.startswith('PY_') \
+ and name[3:] in renamed_variables:
+
+ name = name[3:]
+ if name not in done:
+ done[name] = value
+ else:
+ # bogus variable reference; just drop it since we can't deal
+ del notdone[name]
+
+ fp.close()
+
+ # strip spurious spaces
+ for k, v in done.items():
+ if isinstance(v, str):
+ done[k] = v.strip()
+
+ # save the results in the global dictionary
+ g.update(done)
+ return g
+
+
+def expand_makefile_vars(s, vars):
+ """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
+ 'string' according to 'vars' (a dictionary mapping variable names to
+ values). Variables not present in 'vars' are silently expanded to the
+ empty string. The variable values in 'vars' should not contain further
+ variable expansions; if 'vars' is the output of 'parse_makefile()',
+ you're fine. Returns a variable-expanded version of 's'.
+ """
+
+ # This algorithm does multiple expansion, so if vars['foo'] contains
+ # "${bar}", it will expand ${foo} to ${bar}, and then expand
+ # ${bar}... and so forth. This is fine as long as 'vars' comes from
+ # 'parse_makefile()', which takes care of such expansions eagerly,
+ # according to make's variable expansion semantics.
+
+ while True:
+ m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
+ if m:
+ (beg, end) = m.span()
+ s = s[0:beg] + vars.get(m.group(1)) + s[end:]
+ else:
+ break
+ return s
+
+
+_config_vars = None
+
+def _init_posix():
+ """Initialize the module as appropriate for POSIX systems."""
+ # _sysconfigdata is generated at build time, see the sysconfig module
+ name = os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
+ '_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
+ abi=sys.abiflags,
+ platform=sys.platform,
+ multiarch=getattr(sys.implementation, '_multiarch', ''),
+ ))
+ _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0)
+ build_time_vars = _temp.build_time_vars
+ global _config_vars
+ _config_vars = {}
+ _config_vars.update(build_time_vars)
+
+
+def _init_nt():
+ """Initialize the module as appropriate for NT"""
+ g = {}
+ # set basic install directories
+ g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
+ g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
+
+ # XXX hmmm.. a normal install puts include files here
+ g['INCLUDEPY'] = get_python_inc(plat_specific=0)
+
+ g['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
+ g['EXE'] = ".exe"
+ g['VERSION'] = get_python_version().replace(".", "")
+ g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
+
+ global _config_vars
+ _config_vars = g
+
+
+def get_config_vars(*args):
+ """With no arguments, return a dictionary of all configuration
+ variables relevant for the current platform. Generally this includes
+ everything needed to build extensions and install both pure modules and
+ extensions. On Unix, this means every variable defined in Python's
+ installed Makefile; on Windows it's a much smaller set.
+
+ With arguments, return a list of values that result from looking up
+ each argument in the configuration variable dictionary.
+ """
+ global _config_vars
+ if _config_vars is None:
+ func = globals().get("_init_" + os.name)
+ if func:
+ func()
+ else:
+ _config_vars = {}
+
+ # Normalized versions of prefix and exec_prefix are handy to have;
+ # in fact, these are the standard versions used most places in the
+ # Distutils.
+ _config_vars['prefix'] = PREFIX
+ _config_vars['exec_prefix'] = EXEC_PREFIX
+
+ # For backward compatibility, see issue19555
+ SO = _config_vars.get('EXT_SUFFIX')
+ if SO is not None:
+ _config_vars['SO'] = SO
+
+ # Always convert srcdir to an absolute path
+ srcdir = _config_vars.get('srcdir', project_base)
+ if os.name == 'posix':
+ if python_build:
+ # If srcdir is a relative path (typically '.' or '..')
+ # then it should be interpreted relative to the directory
+ # containing Makefile.
+ base = os.path.dirname(get_makefile_filename())
+ srcdir = os.path.join(base, srcdir)
+ else:
+ # srcdir is not meaningful since the installation is
+ # spread about the filesystem. We choose the
+ # directory containing the Makefile since we know it
+ # exists.
+ srcdir = os.path.dirname(get_makefile_filename())
+ _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir))
+
+ # Convert srcdir into an absolute path if it appears necessary.
+ # Normally it is relative to the build directory. However, during
+ # testing, for example, we might be running a non-installed python
+ # from a different directory.
+ if python_build and os.name == "posix":
+ base = project_base
+ if (not os.path.isabs(_config_vars['srcdir']) and
+ base != os.getcwd()):
+ # srcdir is relative and we are not in the same directory
+ # as the executable. Assume executable is in the build
+ # directory and make srcdir absolute.
+ srcdir = os.path.join(base, _config_vars['srcdir'])
+ _config_vars['srcdir'] = os.path.normpath(srcdir)
+
+ # OS X platforms require special customization to handle
+ # multi-architecture, multi-os-version installers
+ if sys.platform == 'darwin':
+ import _osx_support
+ _osx_support.customize_config_vars(_config_vars)
+
+ if args:
+ vals = []
+ for name in args:
+ vals.append(_config_vars.get(name))
+ return vals
+ else:
+ return _config_vars
+
+def get_config_var(name):
+ """Return the value of a single variable using the dictionary
+ returned by 'get_config_vars()'. Equivalent to
+ get_config_vars().get(name)
+ """
+ if name == 'SO':
+ import warnings
+ warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
+ return get_config_vars().get(name)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/Setup.sample b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/Setup.sample
new file mode 100644
index 00000000..36c4290d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/Setup.sample
@@ -0,0 +1,67 @@
+# Setup file from the pygame project
+
+#--StartConfig
+SDL = -I/usr/include/SDL -D_REENTRANT -lSDL
+FONT = -lSDL_ttf
+IMAGE = -lSDL_image
+MIXER = -lSDL_mixer
+SMPEG = -lsmpeg
+PNG = -lpng
+JPEG = -ljpeg
+SCRAP = -lX11
+PORTMIDI = -lportmidi
+PORTTIME = -lporttime
+#--EndConfig
+
+#DEBUG = -C-W -C-Wall
+DEBUG =
+
+#the following modules are optional. you will want to compile
+#everything you can, but you can ignore ones you don't have
+#dependencies for, just comment them out
+
+imageext src/imageext.c $(SDL) $(IMAGE) $(PNG) $(JPEG) $(DEBUG)
+font src/font.c $(SDL) $(FONT) $(DEBUG)
+mixer src/mixer.c $(SDL) $(MIXER) $(DEBUG)
+mixer_music src/music.c $(SDL) $(MIXER) $(DEBUG)
+_numericsurfarray src/_numericsurfarray.c $(SDL) $(DEBUG)
+_numericsndarray src/_numericsndarray.c $(SDL) $(MIXER) $(DEBUG)
+movie src/movie.c $(SDL) $(SMPEG) $(DEBUG)
+scrap src/scrap.c $(SDL) $(SCRAP) $(DEBUG)
+_camera src/_camera.c src/camera_v4l2.c src/camera_v4l.c $(SDL) $(DEBUG)
+pypm src/pypm.c $(SDL) $(PORTMIDI) $(PORTTIME) $(DEBUG)
+
+GFX = src/SDL_gfx/SDL_gfxPrimitives.c
+#GFX = src/SDL_gfx/SDL_gfxBlitFunc.c src/SDL_gfx/SDL_gfxPrimitives.c
+gfxdraw src/gfxdraw.c $(SDL) $(GFX) $(DEBUG)
+
+
+
+#these modules are required for pygame to run. they only require
+#SDL as a dependency. these should not be altered
+
+base src/base.c $(SDL) $(DEBUG)
+cdrom src/cdrom.c $(SDL) $(DEBUG)
+color src/color.c $(SDL) $(DEBUG)
+constants src/constants.c $(SDL) $(DEBUG)
+display src/display.c $(SDL) $(DEBUG)
+event src/event.c $(SDL) $(DEBUG)
+fastevent src/fastevent.c src/fastevents.c $(SDL) $(DEBUG)
+key src/key.c $(SDL) $(DEBUG)
+mouse src/mouse.c $(SDL) $(DEBUG)
+rect src/rect.c $(SDL) $(DEBUG)
+rwobject src/rwobject.c $(SDL) $(DEBUG)
+surface src/surface.c src/alphablit.c src/surface_fill.c $(SDL) $(DEBUG)
+surflock src/surflock.c $(SDL) $(DEBUG)
+time src/time.c $(SDL) $(DEBUG)
+joystick src/joystick.c $(SDL) $(DEBUG)
+draw src/draw.c $(SDL) $(DEBUG)
+image src/image.c $(SDL) $(DEBUG)
+overlay src/overlay.c $(SDL) $(DEBUG)
+transform src/transform.c src/rotozoom.c src/scale2x.c src/scale_mmx.c $(SDL) $(DEBUG)
+mask src/mask.c src/bitmask.c $(SDL) $(DEBUG)
+bufferproxy src/bufferproxy.c $(SDL) $(DEBUG)
+pixelarray src/pixelarray.c $(SDL) $(DEBUG)
+_arraysurfarray src/_arraysurfarray.c $(SDL) $(DEBUG)
+
+
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/__init__.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/__init__.py
new file mode 100644
index 00000000..1b939cbd
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/__init__.py
@@ -0,0 +1,36 @@
+"""Test suite for distutils.
+
+This test suite consists of a collection of test modules in the
+distutils.tests package. Each test module has a name starting with
+'test' and contains a function test_suite(). The function is expected
+to return an initialized unittest.TestSuite instance.
+
+Tests for the command classes in the distutils.command package are
+included in distutils.tests as well, instead of using a separate
+distutils.command.tests package, since command identification is done
+by import rather than matching pre-defined names.
+
+"""
+
+import os
+import sys
+import unittest
+from test.support import run_unittest
+
+
+here = os.path.dirname(__file__) or os.curdir
+
+
+def test_suite():
+ suite = unittest.TestSuite()
+ for fn in os.listdir(here):
+ if fn.startswith("test") and fn.endswith(".py"):
+ modname = "distutils.tests." + fn[:-3]
+ __import__(modname)
+ module = sys.modules[modname]
+ suite.addTest(module.test_suite())
+ return suite
+
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/support.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/support.py
new file mode 100644
index 00000000..7385c6bb
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/support.py
@@ -0,0 +1,210 @@
+"""Support code for distutils test cases."""
+import os
+import sys
+import shutil
+import tempfile
+import unittest
+import sysconfig
+from copy import deepcopy
+
+from distutils import log
+from distutils.log import DEBUG, INFO, WARN, ERROR, FATAL
+from distutils.core import Distribution
+
+
+class LoggingSilencer(object):
+
+ def setUp(self):
+ super().setUp()
+ self.threshold = log.set_threshold(log.FATAL)
+ # catching warnings
+ # when log will be replaced by logging
+ # we won't need such monkey-patch anymore
+ self._old_log = log.Log._log
+ log.Log._log = self._log
+ self.logs = []
+
+ def tearDown(self):
+ log.set_threshold(self.threshold)
+ log.Log._log = self._old_log
+ super().tearDown()
+
+ def _log(self, level, msg, args):
+ if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
+ raise ValueError('%s wrong log level' % str(level))
+ if not isinstance(msg, str):
+ raise TypeError("msg should be str, not '%.200s'"
+ % (type(msg).__name__))
+ self.logs.append((level, msg, args))
+
+ def get_logs(self, *levels):
+ def _format(msg, args):
+ return msg % args
+ return [msg % args for level, msg, args
+ in self.logs if level in levels]
+
+ def clear_logs(self):
+ self.logs = []
+
+
+class TempdirManager(object):
+ """Mix-in class that handles temporary directories for test cases.
+
+ This is intended to be used with unittest.TestCase.
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.old_cwd = os.getcwd()
+ self.tempdirs = []
+
+ def tearDown(self):
+ # Restore working dir, for Solaris and derivatives, where rmdir()
+ # on the current directory fails.
+ os.chdir(self.old_cwd)
+ super().tearDown()
+ while self.tempdirs:
+ d = self.tempdirs.pop()
+ shutil.rmtree(d, os.name in ('nt', 'cygwin'))
+
+ def mkdtemp(self):
+ """Create a temporary directory that will be cleaned up.
+
+ Returns the path of the directory.
+ """
+ d = tempfile.mkdtemp()
+ self.tempdirs.append(d)
+ return d
+
+ def write_file(self, path, content='xxx'):
+ """Writes a file in the given path.
+
+
+ path can be a string or a sequence.
+ """
+ if isinstance(path, (list, tuple)):
+ path = os.path.join(*path)
+ f = open(path, 'w')
+ try:
+ f.write(content)
+ finally:
+ f.close()
+
+ def create_dist(self, pkg_name='foo', **kw):
+ """Will generate a test environment.
+
+ This function creates:
+ - a Distribution instance using keywords
+ - a temporary directory with a package structure
+
+ It returns the package directory and the distribution
+ instance.
+ """
+ tmp_dir = self.mkdtemp()
+ pkg_dir = os.path.join(tmp_dir, pkg_name)
+ os.mkdir(pkg_dir)
+ dist = Distribution(attrs=kw)
+
+ return pkg_dir, dist
+
+
+class DummyCommand:
+ """Class to store options for retrieval via set_undefined_options()."""
+
+ def __init__(self, **kwargs):
+ for kw, val in kwargs.items():
+ setattr(self, kw, val)
+
+ def ensure_finalized(self):
+ pass
+
+
+class EnvironGuard(object):
+
+ def setUp(self):
+ super(EnvironGuard, self).setUp()
+ self.old_environ = deepcopy(os.environ)
+
+ def tearDown(self):
+ for key, value in self.old_environ.items():
+ if os.environ.get(key) != value:
+ os.environ[key] = value
+
+ for key in tuple(os.environ.keys()):
+ if key not in self.old_environ:
+ del os.environ[key]
+
+ super(EnvironGuard, self).tearDown()
+
+
+def copy_xxmodule_c(directory):
+ """Helper for tests that need the xxmodule.c source file.
+
+ Example use:
+
+ def test_compile(self):
+ copy_xxmodule_c(self.tmpdir)
+ self.assertIn('xxmodule.c', os.listdir(self.tmpdir))
+
+ If the source file can be found, it will be copied to *directory*. If not,
+ the test will be skipped. Errors during copy are not caught.
+ """
+ filename = _get_xxmodule_path()
+ if filename is None:
+ raise unittest.SkipTest('cannot find xxmodule.c (test must run in '
+ 'the python build dir)')
+ shutil.copy(filename, directory)
+
+
+def _get_xxmodule_path():
+ srcdir = sysconfig.get_config_var('srcdir')
+ candidates = [
+ # use installed copy if available
+ os.path.join(os.path.dirname(__file__), 'xxmodule.c'),
+ # otherwise try using copy from build directory
+ os.path.join(srcdir, 'Modules', 'xxmodule.c'),
+ # srcdir mysteriously can be $srcdir/Lib/distutils/tests when
+ # this file is run from its parent directory, so walk up the
+ # tree to find the real srcdir
+ os.path.join(srcdir, '..', '..', '..', 'Modules', 'xxmodule.c'),
+ ]
+ for path in candidates:
+ if os.path.exists(path):
+ return path
+
+
+def fixup_build_ext(cmd):
+ """Function needed to make build_ext tests pass.
+
+ When Python was built with --enable-shared on Unix, -L. is not enough to
+ find libpython.so, because regrtest runs in a tempdir, not in the
+ source directory where the .so lives.
+
+ When Python was built with in debug mode on Windows, build_ext commands
+ need their debug attribute set, and it is not done automatically for
+ some reason.
+
+ This function handles both of these things. Example use:
+
+ cmd = build_ext(dist)
+ support.fixup_build_ext(cmd)
+ cmd.ensure_finalized()
+
+ Unlike most other Unix platforms, Mac OS X embeds absolute paths
+ to shared libraries into executables, so the fixup is not needed there.
+ """
+ if os.name == 'nt':
+ cmd.debug = sys.executable.endswith('_d.exe')
+ elif sysconfig.get_config_var('Py_ENABLE_SHARED'):
+ # To further add to the shared builds fun on Unix, we can't just add
+ # library_dirs to the Extension() instance because that doesn't get
+ # plumbed through to the final compiler command.
+ runshared = sysconfig.get_config_var('RUNSHARED')
+ if runshared is None:
+ cmd.library_dirs = ['.']
+ else:
+ if sys.platform == 'darwin':
+ cmd.library_dirs = []
+ else:
+ name, equals, value = runshared.partition('=')
+ cmd.library_dirs = [d for d in value.split(os.pathsep) if d]
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_archive_util.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_archive_util.py
new file mode 100644
index 00000000..e9aad0e4
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_archive_util.py
@@ -0,0 +1,394 @@
+# -*- coding: utf-8 -*-
+"""Tests for distutils.archive_util."""
+import unittest
+import os
+import sys
+import tarfile
+from os.path import splitdrive
+import warnings
+
+from distutils import archive_util
+from distutils.archive_util import (check_archive_formats, make_tarball,
+ make_zipfile, make_archive,
+ ARCHIVE_FORMATS)
+from distutils.spawn import find_executable, spawn
+from distutils.tests import support
+from test.support import check_warnings, run_unittest, patch, change_cwd
+
+try:
+ import grp
+ import pwd
+ UID_GID_SUPPORT = True
+except ImportError:
+ UID_GID_SUPPORT = False
+
+try:
+ import zipfile
+ ZIP_SUPPORT = True
+except ImportError:
+ ZIP_SUPPORT = find_executable('zip')
+
+try:
+ import zlib
+ ZLIB_SUPPORT = True
+except ImportError:
+ ZLIB_SUPPORT = False
+
+try:
+ import bz2
+except ImportError:
+ bz2 = None
+
+try:
+ import lzma
+except ImportError:
+ lzma = None
+
+def can_fs_encode(filename):
+ """
+ Return True if the filename can be saved in the file system.
+ """
+ if os.path.supports_unicode_filenames:
+ return True
+ try:
+ filename.encode(sys.getfilesystemencoding())
+ except UnicodeEncodeError:
+ return False
+ return True
+
+
+class ArchiveUtilTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ def test_make_tarball(self, name='archive'):
+ # creating something to tar
+ tmpdir = self._create_files()
+ self._make_tarball(tmpdir, name, '.tar.gz')
+ # trying an uncompressed one
+ self._make_tarball(tmpdir, name, '.tar', compress=None)
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ def test_make_tarball_gzip(self):
+ tmpdir = self._create_files()
+ self._make_tarball(tmpdir, 'archive', '.tar.gz', compress='gzip')
+
+ @unittest.skipUnless(bz2, 'Need bz2 support to run')
+ def test_make_tarball_bzip2(self):
+ tmpdir = self._create_files()
+ self._make_tarball(tmpdir, 'archive', '.tar.bz2', compress='bzip2')
+
+ @unittest.skipUnless(lzma, 'Need lzma support to run')
+ def test_make_tarball_xz(self):
+ tmpdir = self._create_files()
+ self._make_tarball(tmpdir, 'archive', '.tar.xz', compress='xz')
+
+ @unittest.skipUnless(can_fs_encode('Ã¥rchiv'),
+ 'File system cannot handle this filename')
+ def test_make_tarball_latin1(self):
+ """
+ Mirror test_make_tarball, except filename contains latin characters.
+ """
+ self.test_make_tarball('Ã¥rchiv') # note this isn't a real word
+
+ @unittest.skipUnless(can_fs_encode('ã®ã‚¢ãƒ¼ã‚«ã‚¤ãƒ–'),
+ 'File system cannot handle this filename')
+ def test_make_tarball_extended(self):
+ """
+ Mirror test_make_tarball, except filename contains extended
+ characters outside the latin charset.
+ """
+ self.test_make_tarball('ã®ã‚¢ãƒ¼ã‚«ã‚¤ãƒ–') # japanese for archive
+
+ def _make_tarball(self, tmpdir, target_name, suffix, **kwargs):
+ tmpdir2 = self.mkdtemp()
+ unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
+ "source and target should be on same drive")
+
+ base_name = os.path.join(tmpdir2, target_name)
+
+ # working with relative paths to avoid tar warnings
+ with change_cwd(tmpdir):
+ make_tarball(splitdrive(base_name)[1], 'dist', **kwargs)
+
+ # check if the compressed tarball was created
+ tarball = base_name + suffix
+ self.assertTrue(os.path.exists(tarball))
+ self.assertEqual(self._tarinfo(tarball), self._created_files)
+
+ def _tarinfo(self, path):
+ tar = tarfile.open(path)
+ try:
+ names = tar.getnames()
+ names.sort()
+ return names
+ finally:
+ tar.close()
+
+ _zip_created_files = ['dist/', 'dist/file1', 'dist/file2',
+ 'dist/sub/', 'dist/sub/file3', 'dist/sub2/']
+ _created_files = [p.rstrip('/') for p in _zip_created_files]
+
+ def _create_files(self):
+ # creating something to tar
+ tmpdir = self.mkdtemp()
+ dist = os.path.join(tmpdir, 'dist')
+ os.mkdir(dist)
+ self.write_file([dist, 'file1'], 'xxx')
+ self.write_file([dist, 'file2'], 'xxx')
+ os.mkdir(os.path.join(dist, 'sub'))
+ self.write_file([dist, 'sub', 'file3'], 'xxx')
+ os.mkdir(os.path.join(dist, 'sub2'))
+ return tmpdir
+
+ @unittest.skipUnless(find_executable('tar') and find_executable('gzip')
+ and ZLIB_SUPPORT,
+ 'Need the tar, gzip and zlib command to run')
+ def test_tarfile_vs_tar(self):
+ tmpdir = self._create_files()
+ tmpdir2 = self.mkdtemp()
+ base_name = os.path.join(tmpdir2, 'archive')
+ old_dir = os.getcwd()
+ os.chdir(tmpdir)
+ try:
+ make_tarball(base_name, 'dist')
+ finally:
+ os.chdir(old_dir)
+
+ # check if the compressed tarball was created
+ tarball = base_name + '.tar.gz'
+ self.assertTrue(os.path.exists(tarball))
+
+ # now create another tarball using `tar`
+ tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
+ tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
+ gzip_cmd = ['gzip', '-f', '-9', 'archive2.tar']
+ old_dir = os.getcwd()
+ os.chdir(tmpdir)
+ try:
+ spawn(tar_cmd)
+ spawn(gzip_cmd)
+ finally:
+ os.chdir(old_dir)
+
+ self.assertTrue(os.path.exists(tarball2))
+ # let's compare both tarballs
+ self.assertEqual(self._tarinfo(tarball), self._created_files)
+ self.assertEqual(self._tarinfo(tarball2), self._created_files)
+
+ # trying an uncompressed one
+ base_name = os.path.join(tmpdir2, 'archive')
+ old_dir = os.getcwd()
+ os.chdir(tmpdir)
+ try:
+ make_tarball(base_name, 'dist', compress=None)
+ finally:
+ os.chdir(old_dir)
+ tarball = base_name + '.tar'
+ self.assertTrue(os.path.exists(tarball))
+
+ # now for a dry_run
+ base_name = os.path.join(tmpdir2, 'archive')
+ old_dir = os.getcwd()
+ os.chdir(tmpdir)
+ try:
+ make_tarball(base_name, 'dist', compress=None, dry_run=True)
+ finally:
+ os.chdir(old_dir)
+ tarball = base_name + '.tar'
+ self.assertTrue(os.path.exists(tarball))
+
+ @unittest.skipUnless(find_executable('compress'),
+ 'The compress program is required')
+ def test_compress_deprecated(self):
+ tmpdir = self._create_files()
+ base_name = os.path.join(self.mkdtemp(), 'archive')
+
+ # using compress and testing the PendingDeprecationWarning
+ old_dir = os.getcwd()
+ os.chdir(tmpdir)
+ try:
+ with check_warnings() as w:
+ warnings.simplefilter("always")
+ make_tarball(base_name, 'dist', compress='compress')
+ finally:
+ os.chdir(old_dir)
+ tarball = base_name + '.tar.Z'
+ self.assertTrue(os.path.exists(tarball))
+ self.assertEqual(len(w.warnings), 1)
+
+ # same test with dry_run
+ os.remove(tarball)
+ old_dir = os.getcwd()
+ os.chdir(tmpdir)
+ try:
+ with check_warnings() as w:
+ warnings.simplefilter("always")
+ make_tarball(base_name, 'dist', compress='compress',
+ dry_run=True)
+ finally:
+ os.chdir(old_dir)
+ self.assertFalse(os.path.exists(tarball))
+ self.assertEqual(len(w.warnings), 1)
+
+ @unittest.skipUnless(ZIP_SUPPORT and ZLIB_SUPPORT,
+ 'Need zip and zlib support to run')
+ def test_make_zipfile(self):
+ # creating something to tar
+ tmpdir = self._create_files()
+ base_name = os.path.join(self.mkdtemp(), 'archive')
+ with change_cwd(tmpdir):
+ make_zipfile(base_name, 'dist')
+
+ # check if the compressed tarball was created
+ tarball = base_name + '.zip'
+ self.assertTrue(os.path.exists(tarball))
+ with zipfile.ZipFile(tarball) as zf:
+ self.assertEqual(sorted(zf.namelist()), self._zip_created_files)
+
+ @unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
+ def test_make_zipfile_no_zlib(self):
+ patch(self, archive_util.zipfile, 'zlib', None) # force zlib ImportError
+
+ called = []
+ zipfile_class = zipfile.ZipFile
+ def fake_zipfile(*a, **kw):
+ if kw.get('compression', None) == zipfile.ZIP_STORED:
+ called.append((a, kw))
+ return zipfile_class(*a, **kw)
+
+ patch(self, archive_util.zipfile, 'ZipFile', fake_zipfile)
+
+ # create something to tar and compress
+ tmpdir = self._create_files()
+ base_name = os.path.join(self.mkdtemp(), 'archive')
+ with change_cwd(tmpdir):
+ make_zipfile(base_name, 'dist')
+
+ tarball = base_name + '.zip'
+ self.assertEqual(called,
+ [((tarball, "w"), {'compression': zipfile.ZIP_STORED})])
+ self.assertTrue(os.path.exists(tarball))
+ with zipfile.ZipFile(tarball) as zf:
+ self.assertEqual(sorted(zf.namelist()), self._zip_created_files)
+
+ def test_check_archive_formats(self):
+ self.assertEqual(check_archive_formats(['gztar', 'xxx', 'zip']),
+ 'xxx')
+ self.assertIsNone(check_archive_formats(['gztar', 'bztar', 'xztar',
+ 'ztar', 'tar', 'zip']))
+
+ def test_make_archive(self):
+ tmpdir = self.mkdtemp()
+ base_name = os.path.join(tmpdir, 'archive')
+ self.assertRaises(ValueError, make_archive, base_name, 'xxx')
+
+ def test_make_archive_cwd(self):
+ current_dir = os.getcwd()
+ def _breaks(*args, **kw):
+ raise RuntimeError()
+ ARCHIVE_FORMATS['xxx'] = (_breaks, [], 'xxx file')
+ try:
+ try:
+ make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
+ except:
+ pass
+ self.assertEqual(os.getcwd(), current_dir)
+ finally:
+ del ARCHIVE_FORMATS['xxx']
+
+ def test_make_archive_tar(self):
+ base_dir = self._create_files()
+ base_name = os.path.join(self.mkdtemp() , 'archive')
+ res = make_archive(base_name, 'tar', base_dir, 'dist')
+ self.assertTrue(os.path.exists(res))
+ self.assertEqual(os.path.basename(res), 'archive.tar')
+ self.assertEqual(self._tarinfo(res), self._created_files)
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ def test_make_archive_gztar(self):
+ base_dir = self._create_files()
+ base_name = os.path.join(self.mkdtemp() , 'archive')
+ res = make_archive(base_name, 'gztar', base_dir, 'dist')
+ self.assertTrue(os.path.exists(res))
+ self.assertEqual(os.path.basename(res), 'archive.tar.gz')
+ self.assertEqual(self._tarinfo(res), self._created_files)
+
+ @unittest.skipUnless(bz2, 'Need bz2 support to run')
+ def test_make_archive_bztar(self):
+ base_dir = self._create_files()
+ base_name = os.path.join(self.mkdtemp() , 'archive')
+ res = make_archive(base_name, 'bztar', base_dir, 'dist')
+ self.assertTrue(os.path.exists(res))
+ self.assertEqual(os.path.basename(res), 'archive.tar.bz2')
+ self.assertEqual(self._tarinfo(res), self._created_files)
+
+ @unittest.skipUnless(lzma, 'Need xz support to run')
+ def test_make_archive_xztar(self):
+ base_dir = self._create_files()
+ base_name = os.path.join(self.mkdtemp() , 'archive')
+ res = make_archive(base_name, 'xztar', base_dir, 'dist')
+ self.assertTrue(os.path.exists(res))
+ self.assertEqual(os.path.basename(res), 'archive.tar.xz')
+ self.assertEqual(self._tarinfo(res), self._created_files)
+
+ def test_make_archive_owner_group(self):
+ # testing make_archive with owner and group, with various combinations
+ # this works even if there's not gid/uid support
+ if UID_GID_SUPPORT:
+ group = grp.getgrgid(0)[0]
+ owner = pwd.getpwuid(0)[0]
+ else:
+ group = owner = 'root'
+
+ base_dir = self._create_files()
+ root_dir = self.mkdtemp()
+ base_name = os.path.join(self.mkdtemp() , 'archive')
+ res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
+ group=group)
+ self.assertTrue(os.path.exists(res))
+
+ res = make_archive(base_name, 'zip', root_dir, base_dir)
+ self.assertTrue(os.path.exists(res))
+
+ res = make_archive(base_name, 'tar', root_dir, base_dir,
+ owner=owner, group=group)
+ self.assertTrue(os.path.exists(res))
+
+ res = make_archive(base_name, 'tar', root_dir, base_dir,
+ owner='kjhkjhkjg', group='oihohoh')
+ self.assertTrue(os.path.exists(res))
+
+ @unittest.skipUnless(ZLIB_SUPPORT, "Requires zlib")
+ @unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
+ def test_tarfile_root_owner(self):
+ tmpdir = self._create_files()
+ base_name = os.path.join(self.mkdtemp(), 'archive')
+ old_dir = os.getcwd()
+ os.chdir(tmpdir)
+ group = grp.getgrgid(0)[0]
+ owner = pwd.getpwuid(0)[0]
+ try:
+ archive_name = make_tarball(base_name, 'dist', compress=None,
+ owner=owner, group=group)
+ finally:
+ os.chdir(old_dir)
+
+ # check if the compressed tarball was created
+ self.assertTrue(os.path.exists(archive_name))
+
+ # now checks the rights
+ archive = tarfile.open(archive_name)
+ try:
+ for member in archive.getmembers():
+ self.assertEqual(member.uid, 0)
+ self.assertEqual(member.gid, 0)
+ finally:
+ archive.close()
+
+def test_suite():
+ return unittest.makeSuite(ArchiveUtilTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist.py
new file mode 100644
index 00000000..c80b3edc
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist.py
@@ -0,0 +1,53 @@
+"""Tests for distutils.command.bdist."""
+import os
+import unittest
+from test.support import run_unittest
+
+from distutils.command.bdist import bdist
+from distutils.tests import support
+
+
+class BuildTestCase(support.TempdirManager,
+ unittest.TestCase):
+
+ def test_formats(self):
+ # let's create a command and make sure
+ # we can set the format
+ dist = self.create_dist()[1]
+ cmd = bdist(dist)
+ cmd.formats = ['msi']
+ cmd.ensure_finalized()
+ self.assertEqual(cmd.formats, ['msi'])
+
+ # what formats does bdist offer?
+ formats = ['bztar', 'gztar', 'msi', 'rpm', 'tar',
+ 'wininst', 'xztar', 'zip', 'ztar']
+ found = sorted(cmd.format_command)
+ self.assertEqual(found, formats)
+
+ def test_skip_build(self):
+ # bug #10946: bdist --skip-build should trickle down to subcommands
+ dist = self.create_dist()[1]
+ cmd = bdist(dist)
+ cmd.skip_build = 1
+ cmd.ensure_finalized()
+ dist.command_obj['bdist'] = cmd
+
+ names = ['bdist_dumb', 'bdist_wininst'] # bdist_rpm does not support --skip-build
+ if os.name == 'nt':
+ names.append('bdist_msi')
+
+ for name in names:
+ subcmd = cmd.get_finalized_command(name)
+ if getattr(subcmd, '_unsupported', False):
+ # command is not supported on this build
+ continue
+ self.assertTrue(subcmd.skip_build,
+ '%s should take --skip-build from bdist' % name)
+
+
+def test_suite():
+ return unittest.makeSuite(BuildTestCase)
+
+if __name__ == '__main__':
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_dumb.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_dumb.py
new file mode 100644
index 00000000..01a233bc
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_dumb.py
@@ -0,0 +1,97 @@
+"""Tests for distutils.command.bdist_dumb."""
+
+import os
+import sys
+import zipfile
+import unittest
+from test.support import run_unittest
+
+from distutils.core import Distribution
+from distutils.command.bdist_dumb import bdist_dumb
+from distutils.tests import support
+
+SETUP_PY = """\
+from distutils.core import setup
+import foo
+
+setup(name='foo', version='0.1', py_modules=['foo'],
+ url='xxx', author='xxx', author_email='xxx')
+
+"""
+
+try:
+ import zlib
+ ZLIB_SUPPORT = True
+except ImportError:
+ ZLIB_SUPPORT = False
+
+
+class BuildDumbTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ support.EnvironGuard,
+ unittest.TestCase):
+
+ def setUp(self):
+ super(BuildDumbTestCase, self).setUp()
+ self.old_location = os.getcwd()
+ self.old_sys_argv = sys.argv, sys.argv[:]
+
+ def tearDown(self):
+ os.chdir(self.old_location)
+ sys.argv = self.old_sys_argv[0]
+ sys.argv[:] = self.old_sys_argv[1]
+ super(BuildDumbTestCase, self).tearDown()
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ def test_simple_built(self):
+
+ # let's create a simple package
+ tmp_dir = self.mkdtemp()
+ pkg_dir = os.path.join(tmp_dir, 'foo')
+ os.mkdir(pkg_dir)
+ self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
+ self.write_file((pkg_dir, 'foo.py'), '#')
+ self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
+ self.write_file((pkg_dir, 'README'), '')
+
+ dist = Distribution({'name': 'foo', 'version': '0.1',
+ 'py_modules': ['foo'],
+ 'url': 'xxx', 'author': 'xxx',
+ 'author_email': 'xxx'})
+ dist.script_name = 'setup.py'
+ os.chdir(pkg_dir)
+
+ sys.argv = ['setup.py']
+ cmd = bdist_dumb(dist)
+
+ # so the output is the same no matter
+ # what is the platform
+ cmd.format = 'zip'
+
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # see what we have
+ dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
+ base = "%s.%s.zip" % (dist.get_fullname(), cmd.plat_name)
+
+ self.assertEqual(dist_created, [base])
+
+ # now let's check what we have in the zip file
+ fp = zipfile.ZipFile(os.path.join('dist', base))
+ try:
+ contents = fp.namelist()
+ finally:
+ fp.close()
+
+ contents = sorted(filter(None, map(os.path.basename, contents)))
+ wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py']
+ if not sys.dont_write_bytecode:
+ wanted.append('foo.%s.pyc' % sys.implementation.cache_tag)
+ self.assertEqual(contents, sorted(wanted))
+
+def test_suite():
+ return unittest.makeSuite(BuildDumbTestCase)
+
+if __name__ == '__main__':
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_msi.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_msi.py
new file mode 100644
index 00000000..15d8bdff
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_msi.py
@@ -0,0 +1,25 @@
+"""Tests for distutils.command.bdist_msi."""
+import sys
+import unittest
+from test.support import run_unittest
+from distutils.tests import support
+
+
+@unittest.skipUnless(sys.platform == 'win32', 'these tests require Windows')
+class BDistMSITestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def test_minimal(self):
+ # minimal test XXX need more tests
+ from distutils.command.bdist_msi import bdist_msi
+ project_dir, dist = self.create_dist()
+ cmd = bdist_msi(dist)
+ cmd.ensure_finalized()
+
+
+def test_suite():
+ return unittest.makeSuite(BDistMSITestCase)
+
+if __name__ == '__main__':
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_rpm.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_rpm.py
new file mode 100644
index 00000000..6453a02b
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_rpm.py
@@ -0,0 +1,135 @@
+"""Tests for distutils.command.bdist_rpm."""
+
+import unittest
+import sys
+import os
+from test.support import run_unittest, requires_zlib
+
+from distutils.core import Distribution
+from distutils.command.bdist_rpm import bdist_rpm
+from distutils.tests import support
+from distutils.spawn import find_executable
+
+SETUP_PY = """\
+from distutils.core import setup
+import foo
+
+setup(name='foo', version='0.1', py_modules=['foo'],
+ url='xxx', author='xxx', author_email='xxx')
+
+"""
+
+class BuildRpmTestCase(support.TempdirManager,
+ support.EnvironGuard,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def setUp(self):
+ try:
+ sys.executable.encode("UTF-8")
+ except UnicodeEncodeError:
+ raise unittest.SkipTest("sys.executable is not encodable to UTF-8")
+
+ super(BuildRpmTestCase, self).setUp()
+ self.old_location = os.getcwd()
+ self.old_sys_argv = sys.argv, sys.argv[:]
+
+ def tearDown(self):
+ os.chdir(self.old_location)
+ sys.argv = self.old_sys_argv[0]
+ sys.argv[:] = self.old_sys_argv[1]
+ super(BuildRpmTestCase, self).tearDown()
+
+ # XXX I am unable yet to make this test work without
+ # spurious sdtout/stderr output under Mac OS X
+ @unittest.skipUnless(sys.platform.startswith('linux'),
+ 'spurious sdtout/stderr output under Mac OS X')
+ @requires_zlib
+ @unittest.skipIf(find_executable('rpm') is None,
+ 'the rpm command is not found')
+ @unittest.skipIf(find_executable('rpmbuild') is None,
+ 'the rpmbuild command is not found')
+ def test_quiet(self):
+ # let's create a package
+ tmp_dir = self.mkdtemp()
+ os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation
+ pkg_dir = os.path.join(tmp_dir, 'foo')
+ os.mkdir(pkg_dir)
+ self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
+ self.write_file((pkg_dir, 'foo.py'), '#')
+ self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
+ self.write_file((pkg_dir, 'README'), '')
+
+ dist = Distribution({'name': 'foo', 'version': '0.1',
+ 'py_modules': ['foo'],
+ 'url': 'xxx', 'author': 'xxx',
+ 'author_email': 'xxx'})
+ dist.script_name = 'setup.py'
+ os.chdir(pkg_dir)
+
+ sys.argv = ['setup.py']
+ cmd = bdist_rpm(dist)
+ cmd.fix_python = True
+
+ # running in quiet mode
+ cmd.quiet = 1
+ cmd.ensure_finalized()
+ cmd.run()
+
+ dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
+ self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
+
+ # bug #2945: upload ignores bdist_rpm files
+ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
+ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
+
+ # XXX I am unable yet to make this test work without
+ # spurious sdtout/stderr output under Mac OS X
+ @unittest.skipUnless(sys.platform.startswith('linux'),
+ 'spurious sdtout/stderr output under Mac OS X')
+ @requires_zlib
+ # http://bugs.python.org/issue1533164
+ @unittest.skipIf(find_executable('rpm') is None,
+ 'the rpm command is not found')
+ @unittest.skipIf(find_executable('rpmbuild') is None,
+ 'the rpmbuild command is not found')
+ def test_no_optimize_flag(self):
+ # let's create a package that breaks bdist_rpm
+ tmp_dir = self.mkdtemp()
+ os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation
+ pkg_dir = os.path.join(tmp_dir, 'foo')
+ os.mkdir(pkg_dir)
+ self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
+ self.write_file((pkg_dir, 'foo.py'), '#')
+ self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
+ self.write_file((pkg_dir, 'README'), '')
+
+ dist = Distribution({'name': 'foo', 'version': '0.1',
+ 'py_modules': ['foo'],
+ 'url': 'xxx', 'author': 'xxx',
+ 'author_email': 'xxx'})
+ dist.script_name = 'setup.py'
+ os.chdir(pkg_dir)
+
+ sys.argv = ['setup.py']
+ cmd = bdist_rpm(dist)
+ cmd.fix_python = True
+
+ cmd.quiet = 1
+ cmd.ensure_finalized()
+ cmd.run()
+
+ dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
+ self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
+
+ # bug #2945: upload ignores bdist_rpm files
+ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
+ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
+
+ os.remove(os.path.join(pkg_dir, 'dist', 'foo-0.1-1.noarch.rpm'))
+
+def test_suite():
+ return unittest.makeSuite(BuildRpmTestCase)
+
+if __name__ == '__main__':
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_wininst.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_wininst.py
new file mode 100644
index 00000000..4c19bbab
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_bdist_wininst.py
@@ -0,0 +1,33 @@
+"""Tests for distutils.command.bdist_wininst."""
+import unittest
+from test.support import run_unittest
+
+from distutils.command.bdist_wininst import bdist_wininst
+from distutils.tests import support
+
+@unittest.skipIf(getattr(bdist_wininst, '_unsupported', False),
+ 'bdist_wininst is not supported in this install')
+class BuildWinInstTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def test_get_exe_bytes(self):
+
+ # issue5731: command was broken on non-windows platforms
+ # this test makes sure it works now for every platform
+ # let's create a command
+ pkg_pth, dist = self.create_dist()
+ cmd = bdist_wininst(dist)
+ cmd.ensure_finalized()
+
+ # let's run the code that finds the right wininst*.exe file
+ # and make sure it finds it and returns its content
+ # no matter what platform we have
+ exe_file = cmd.get_exe_bytes()
+ self.assertGreater(len(exe_file), 10)
+
+def test_suite():
+ return unittest.makeSuite(BuildWinInstTestCase)
+
+if __name__ == '__main__':
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build.py
new file mode 100644
index 00000000..b020a5ba
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build.py
@@ -0,0 +1,56 @@
+"""Tests for distutils.command.build."""
+import unittest
+import os
+import sys
+from test.support import run_unittest
+
+from distutils.command.build import build
+from distutils.tests import support
+from sysconfig import get_platform
+
+class BuildTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def test_finalize_options(self):
+ pkg_dir, dist = self.create_dist()
+ cmd = build(dist)
+ cmd.finalize_options()
+
+ # if not specified, plat_name gets the current platform
+ self.assertEqual(cmd.plat_name, get_platform())
+
+ # build_purelib is build + lib
+ wanted = os.path.join(cmd.build_base, 'lib')
+ self.assertEqual(cmd.build_purelib, wanted)
+
+ # build_platlib is 'build/lib.platform-x.x[-pydebug]'
+ # examples:
+ # build/lib.macosx-10.3-i386-2.7
+ plat_spec = '.%s-%d.%d' % (cmd.plat_name, *sys.version_info[:2])
+ if hasattr(sys, 'gettotalrefcount'):
+ self.assertTrue(cmd.build_platlib.endswith('-pydebug'))
+ plat_spec += '-pydebug'
+ wanted = os.path.join(cmd.build_base, 'lib' + plat_spec)
+ self.assertEqual(cmd.build_platlib, wanted)
+
+ # by default, build_lib = build_purelib
+ self.assertEqual(cmd.build_lib, cmd.build_purelib)
+
+ # build_temp is build/temp.
+ wanted = os.path.join(cmd.build_base, 'temp' + plat_spec)
+ self.assertEqual(cmd.build_temp, wanted)
+
+ # build_scripts is build/scripts-x.x
+ wanted = os.path.join(cmd.build_base,
+ 'scripts-%d.%d' % sys.version_info[:2])
+ self.assertEqual(cmd.build_scripts, wanted)
+
+ # executable is os.path.normpath(sys.executable)
+ self.assertEqual(cmd.executable, os.path.normpath(sys.executable))
+
+def test_suite():
+ return unittest.makeSuite(BuildTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_clib.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_clib.py
new file mode 100644
index 00000000..85d09906
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_clib.py
@@ -0,0 +1,135 @@
+"""Tests for distutils.command.build_clib."""
+import unittest
+import os
+import sys
+
+from test.support import run_unittest, missing_compiler_executable
+
+from distutils.command.build_clib import build_clib
+from distutils.errors import DistutilsSetupError
+from distutils.tests import support
+from distutils.spawn import find_executable
+
+class BuildCLibTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def test_check_library_dist(self):
+ pkg_dir, dist = self.create_dist()
+ cmd = build_clib(dist)
+
+ # 'libraries' option must be a list
+ self.assertRaises(DistutilsSetupError, cmd.check_library_list, 'foo')
+
+ # each element of 'libraries' must a 2-tuple
+ self.assertRaises(DistutilsSetupError, cmd.check_library_list,
+ ['foo1', 'foo2'])
+
+ # first element of each tuple in 'libraries'
+ # must be a string (the library name)
+ self.assertRaises(DistutilsSetupError, cmd.check_library_list,
+ [(1, 'foo1'), ('name', 'foo2')])
+
+ # library name may not contain directory separators
+ self.assertRaises(DistutilsSetupError, cmd.check_library_list,
+ [('name', 'foo1'),
+ ('another/name', 'foo2')])
+
+ # second element of each tuple must be a dictionary (build info)
+ self.assertRaises(DistutilsSetupError, cmd.check_library_list,
+ [('name', {}),
+ ('another', 'foo2')])
+
+ # those work
+ libs = [('name', {}), ('name', {'ok': 'good'})]
+ cmd.check_library_list(libs)
+
+ def test_get_source_files(self):
+ pkg_dir, dist = self.create_dist()
+ cmd = build_clib(dist)
+
+ # "in 'libraries' option 'sources' must be present and must be
+ # a list of source filenames
+ cmd.libraries = [('name', {})]
+ self.assertRaises(DistutilsSetupError, cmd.get_source_files)
+
+ cmd.libraries = [('name', {'sources': 1})]
+ self.assertRaises(DistutilsSetupError, cmd.get_source_files)
+
+ cmd.libraries = [('name', {'sources': ['a', 'b']})]
+ self.assertEqual(cmd.get_source_files(), ['a', 'b'])
+
+ cmd.libraries = [('name', {'sources': ('a', 'b')})]
+ self.assertEqual(cmd.get_source_files(), ['a', 'b'])
+
+ cmd.libraries = [('name', {'sources': ('a', 'b')}),
+ ('name2', {'sources': ['c', 'd']})]
+ self.assertEqual(cmd.get_source_files(), ['a', 'b', 'c', 'd'])
+
+ def test_build_libraries(self):
+
+ pkg_dir, dist = self.create_dist()
+ cmd = build_clib(dist)
+ class FakeCompiler:
+ def compile(*args, **kw):
+ pass
+ create_static_lib = compile
+
+ cmd.compiler = FakeCompiler()
+
+ # build_libraries is also doing a bit of typo checking
+ lib = [('name', {'sources': 'notvalid'})]
+ self.assertRaises(DistutilsSetupError, cmd.build_libraries, lib)
+
+ lib = [('name', {'sources': list()})]
+ cmd.build_libraries(lib)
+
+ lib = [('name', {'sources': tuple()})]
+ cmd.build_libraries(lib)
+
+ def test_finalize_options(self):
+ pkg_dir, dist = self.create_dist()
+ cmd = build_clib(dist)
+
+ cmd.include_dirs = 'one-dir'
+ cmd.finalize_options()
+ self.assertEqual(cmd.include_dirs, ['one-dir'])
+
+ cmd.include_dirs = None
+ cmd.finalize_options()
+ self.assertEqual(cmd.include_dirs, [])
+
+ cmd.distribution.libraries = 'WONTWORK'
+ self.assertRaises(DistutilsSetupError, cmd.finalize_options)
+
+ @unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
+ def test_run(self):
+ pkg_dir, dist = self.create_dist()
+ cmd = build_clib(dist)
+
+ foo_c = os.path.join(pkg_dir, 'foo.c')
+ self.write_file(foo_c, 'int main(void) { return 1;}\n')
+ cmd.libraries = [('foo', {'sources': [foo_c]})]
+
+ build_temp = os.path.join(pkg_dir, 'build')
+ os.mkdir(build_temp)
+ cmd.build_temp = build_temp
+ cmd.build_clib = build_temp
+
+ # Before we run the command, we want to make sure
+ # all commands are present on the system.
+ ccmd = missing_compiler_executable()
+ if ccmd is not None:
+ self.skipTest('The %r command is not found' % ccmd)
+
+ # this should work
+ cmd.run()
+
+ # let's check the result
+ self.assertIn('libfoo.a', os.listdir(build_temp))
+
+def test_suite():
+ return unittest.makeSuite(BuildCLibTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_ext.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_ext.py
new file mode 100644
index 00000000..a7221827
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_ext.py
@@ -0,0 +1,521 @@
+import sys
+import os
+from io import StringIO
+import textwrap
+
+from distutils.core import Distribution
+from distutils.command.build_ext import build_ext
+from distutils import sysconfig
+from distutils.tests.support import (TempdirManager, LoggingSilencer,
+ copy_xxmodule_c, fixup_build_ext)
+from distutils.extension import Extension
+from distutils.errors import (
+ CompileError, DistutilsPlatformError, DistutilsSetupError,
+ UnknownFileError)
+
+import unittest
+from test import support
+
+# http://bugs.python.org/issue4373
+# Don't load the xx module more than once.
+ALREADY_TESTED = False
+
+
+class BuildExtTestCase(TempdirManager,
+ LoggingSilencer,
+ unittest.TestCase):
+ def setUp(self):
+ # Create a simple test environment
+ # Note that we're making changes to sys.path
+ super(BuildExtTestCase, self).setUp()
+ self.tmp_dir = self.mkdtemp()
+ self.sys_path = sys.path, sys.path[:]
+ sys.path.append(self.tmp_dir)
+ import site
+ self.old_user_base = site.USER_BASE
+ site.USER_BASE = self.mkdtemp()
+ from distutils.command import build_ext
+ build_ext.USER_BASE = site.USER_BASE
+
+ # bpo-30132: On Windows, a .pdb file may be created in the current
+ # working directory. Create a temporary working directory to cleanup
+ # everything at the end of the test.
+ self.temp_cwd = support.temp_cwd()
+ self.temp_cwd.__enter__()
+ self.addCleanup(self.temp_cwd.__exit__, None, None, None)
+
+ def tearDown(self):
+ # Get everything back to normal
+ support.unload('xx')
+ sys.path = self.sys_path[0]
+ sys.path[:] = self.sys_path[1]
+ import site
+ site.USER_BASE = self.old_user_base
+ from distutils.command import build_ext
+ build_ext.USER_BASE = self.old_user_base
+ super(BuildExtTestCase, self).tearDown()
+
+ def build_ext(self, *args, **kwargs):
+ return build_ext(*args, **kwargs)
+
+ def test_build_ext(self):
+ cmd = support.missing_compiler_executable()
+ if cmd is not None:
+ self.skipTest('The %r command is not found' % cmd)
+ global ALREADY_TESTED
+ copy_xxmodule_c(self.tmp_dir)
+ xx_c = os.path.join(self.tmp_dir, 'xxmodule.c')
+ xx_ext = Extension('xx', [xx_c])
+ dist = Distribution({'name': 'xx', 'ext_modules': [xx_ext]})
+ dist.package_dir = self.tmp_dir
+ cmd = self.build_ext(dist)
+ fixup_build_ext(cmd)
+ cmd.build_lib = self.tmp_dir
+ cmd.build_temp = self.tmp_dir
+
+ old_stdout = sys.stdout
+ if not support.verbose:
+ # silence compiler output
+ sys.stdout = StringIO()
+ try:
+ cmd.ensure_finalized()
+ cmd.run()
+ finally:
+ sys.stdout = old_stdout
+
+ if ALREADY_TESTED:
+ self.skipTest('Already tested in %s' % ALREADY_TESTED)
+ else:
+ ALREADY_TESTED = type(self).__name__
+
+ import xx
+
+ for attr in ('error', 'foo', 'new', 'roj'):
+ self.assertTrue(hasattr(xx, attr))
+
+ self.assertEqual(xx.foo(2, 5), 7)
+ self.assertEqual(xx.foo(13,15), 28)
+ self.assertEqual(xx.new().demo(), None)
+ if support.HAVE_DOCSTRINGS:
+ doc = 'This is a template module just for instruction.'
+ self.assertEqual(xx.__doc__, doc)
+ self.assertIsInstance(xx.Null(), xx.Null)
+ self.assertIsInstance(xx.Str(), xx.Str)
+
+ def test_solaris_enable_shared(self):
+ dist = Distribution({'name': 'xx'})
+ cmd = self.build_ext(dist)
+ old = sys.platform
+
+ sys.platform = 'sunos' # fooling finalize_options
+ from distutils.sysconfig import _config_vars
+ old_var = _config_vars.get('Py_ENABLE_SHARED')
+ _config_vars['Py_ENABLE_SHARED'] = 1
+ try:
+ cmd.ensure_finalized()
+ finally:
+ sys.platform = old
+ if old_var is None:
+ del _config_vars['Py_ENABLE_SHARED']
+ else:
+ _config_vars['Py_ENABLE_SHARED'] = old_var
+
+ # make sure we get some library dirs under solaris
+ self.assertGreater(len(cmd.library_dirs), 0)
+
+ def test_user_site(self):
+ import site
+ dist = Distribution({'name': 'xx'})
+ cmd = self.build_ext(dist)
+
+ # making sure the user option is there
+ options = [name for name, short, lable in
+ cmd.user_options]
+ self.assertIn('user', options)
+
+ # setting a value
+ cmd.user = 1
+
+ # setting user based lib and include
+ lib = os.path.join(site.USER_BASE, 'lib')
+ incl = os.path.join(site.USER_BASE, 'include')
+ os.mkdir(lib)
+ os.mkdir(incl)
+
+ # let's run finalize
+ cmd.ensure_finalized()
+
+ # see if include_dirs and library_dirs
+ # were set
+ self.assertIn(lib, cmd.library_dirs)
+ self.assertIn(lib, cmd.rpath)
+ self.assertIn(incl, cmd.include_dirs)
+
+ def test_optional_extension(self):
+
+ # this extension will fail, but let's ignore this failure
+ # with the optional argument.
+ modules = [Extension('foo', ['xxx'], optional=False)]
+ dist = Distribution({'name': 'xx', 'ext_modules': modules})
+ cmd = self.build_ext(dist)
+ cmd.ensure_finalized()
+ self.assertRaises((UnknownFileError, CompileError),
+ cmd.run) # should raise an error
+
+ modules = [Extension('foo', ['xxx'], optional=True)]
+ dist = Distribution({'name': 'xx', 'ext_modules': modules})
+ cmd = self.build_ext(dist)
+ cmd.ensure_finalized()
+ cmd.run() # should pass
+
+ def test_finalize_options(self):
+ # Make sure Python's include directories (for Python.h, pyconfig.h,
+ # etc.) are in the include search path.
+ modules = [Extension('foo', ['xxx'], optional=False)]
+ dist = Distribution({'name': 'xx', 'ext_modules': modules})
+ cmd = self.build_ext(dist)
+ cmd.finalize_options()
+
+ py_include = sysconfig.get_python_inc()
+ self.assertIn(py_include, cmd.include_dirs)
+
+ plat_py_include = sysconfig.get_python_inc(plat_specific=1)
+ self.assertIn(plat_py_include, cmd.include_dirs)
+
+ # make sure cmd.libraries is turned into a list
+ # if it's a string
+ cmd = self.build_ext(dist)
+ cmd.libraries = 'my_lib, other_lib lastlib'
+ cmd.finalize_options()
+ self.assertEqual(cmd.libraries, ['my_lib', 'other_lib', 'lastlib'])
+
+ # make sure cmd.library_dirs is turned into a list
+ # if it's a string
+ cmd = self.build_ext(dist)
+ cmd.library_dirs = 'my_lib_dir%sother_lib_dir' % os.pathsep
+ cmd.finalize_options()
+ self.assertIn('my_lib_dir', cmd.library_dirs)
+ self.assertIn('other_lib_dir', cmd.library_dirs)
+
+ # make sure rpath is turned into a list
+ # if it's a string
+ cmd = self.build_ext(dist)
+ cmd.rpath = 'one%stwo' % os.pathsep
+ cmd.finalize_options()
+ self.assertEqual(cmd.rpath, ['one', 'two'])
+
+ # make sure cmd.link_objects is turned into a list
+ # if it's a string
+ cmd = build_ext(dist)
+ cmd.link_objects = 'one two,three'
+ cmd.finalize_options()
+ self.assertEqual(cmd.link_objects, ['one', 'two', 'three'])
+
+ # XXX more tests to perform for win32
+
+ # make sure define is turned into 2-tuples
+ # strings if they are ','-separated strings
+ cmd = self.build_ext(dist)
+ cmd.define = 'one,two'
+ cmd.finalize_options()
+ self.assertEqual(cmd.define, [('one', '1'), ('two', '1')])
+
+ # make sure undef is turned into a list of
+ # strings if they are ','-separated strings
+ cmd = self.build_ext(dist)
+ cmd.undef = 'one,two'
+ cmd.finalize_options()
+ self.assertEqual(cmd.undef, ['one', 'two'])
+
+ # make sure swig_opts is turned into a list
+ cmd = self.build_ext(dist)
+ cmd.swig_opts = None
+ cmd.finalize_options()
+ self.assertEqual(cmd.swig_opts, [])
+
+ cmd = self.build_ext(dist)
+ cmd.swig_opts = '1 2'
+ cmd.finalize_options()
+ self.assertEqual(cmd.swig_opts, ['1', '2'])
+
+ def test_check_extensions_list(self):
+ dist = Distribution()
+ cmd = self.build_ext(dist)
+ cmd.finalize_options()
+
+ #'extensions' option must be a list of Extension instances
+ self.assertRaises(DistutilsSetupError,
+ cmd.check_extensions_list, 'foo')
+
+ # each element of 'ext_modules' option must be an
+ # Extension instance or 2-tuple
+ exts = [('bar', 'foo', 'bar'), 'foo']
+ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
+
+ # first element of each tuple in 'ext_modules'
+ # must be the extension name (a string) and match
+ # a python dotted-separated name
+ exts = [('foo-bar', '')]
+ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
+
+ # second element of each tuple in 'ext_modules'
+ # must be a dictionary (build info)
+ exts = [('foo.bar', '')]
+ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
+
+ # ok this one should pass
+ exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
+ 'some': 'bar'})]
+ cmd.check_extensions_list(exts)
+ ext = exts[0]
+ self.assertIsInstance(ext, Extension)
+
+ # check_extensions_list adds in ext the values passed
+ # when they are in ('include_dirs', 'library_dirs', 'libraries'
+ # 'extra_objects', 'extra_compile_args', 'extra_link_args')
+ self.assertEqual(ext.libraries, 'foo')
+ self.assertFalse(hasattr(ext, 'some'))
+
+ # 'macros' element of build info dict must be 1- or 2-tuple
+ exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
+ 'some': 'bar', 'macros': [('1', '2', '3'), 'foo']})]
+ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
+
+ exts[0][1]['macros'] = [('1', '2'), ('3',)]
+ cmd.check_extensions_list(exts)
+ self.assertEqual(exts[0].undef_macros, ['3'])
+ self.assertEqual(exts[0].define_macros, [('1', '2')])
+
+ def test_get_source_files(self):
+ modules = [Extension('foo', ['xxx'], optional=False)]
+ dist = Distribution({'name': 'xx', 'ext_modules': modules})
+ cmd = self.build_ext(dist)
+ cmd.ensure_finalized()
+ self.assertEqual(cmd.get_source_files(), ['xxx'])
+
+ def test_compiler_option(self):
+ # cmd.compiler is an option and
+ # should not be overridden by a compiler instance
+ # when the command is run
+ dist = Distribution()
+ cmd = self.build_ext(dist)
+ cmd.compiler = 'unix'
+ cmd.ensure_finalized()
+ cmd.run()
+ self.assertEqual(cmd.compiler, 'unix')
+
+ def test_get_outputs(self):
+ cmd = support.missing_compiler_executable()
+ if cmd is not None:
+ self.skipTest('The %r command is not found' % cmd)
+ tmp_dir = self.mkdtemp()
+ c_file = os.path.join(tmp_dir, 'foo.c')
+ self.write_file(c_file, 'void PyInit_foo(void) {}\n')
+ ext = Extension('foo', [c_file], optional=False)
+ dist = Distribution({'name': 'xx',
+ 'ext_modules': [ext]})
+ cmd = self.build_ext(dist)
+ fixup_build_ext(cmd)
+ cmd.ensure_finalized()
+ self.assertEqual(len(cmd.get_outputs()), 1)
+
+ cmd.build_lib = os.path.join(self.tmp_dir, 'build')
+ cmd.build_temp = os.path.join(self.tmp_dir, 'tempt')
+
+ # issue #5977 : distutils build_ext.get_outputs
+ # returns wrong result with --inplace
+ other_tmp_dir = os.path.realpath(self.mkdtemp())
+ old_wd = os.getcwd()
+ os.chdir(other_tmp_dir)
+ try:
+ cmd.inplace = 1
+ cmd.run()
+ so_file = cmd.get_outputs()[0]
+ finally:
+ os.chdir(old_wd)
+ self.assertTrue(os.path.exists(so_file))
+ ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
+ self.assertTrue(so_file.endswith(ext_suffix))
+ so_dir = os.path.dirname(so_file)
+ self.assertEqual(so_dir, other_tmp_dir)
+
+ cmd.inplace = 0
+ cmd.compiler = None
+ cmd.run()
+ so_file = cmd.get_outputs()[0]
+ self.assertTrue(os.path.exists(so_file))
+ self.assertTrue(so_file.endswith(ext_suffix))
+ so_dir = os.path.dirname(so_file)
+ self.assertEqual(so_dir, cmd.build_lib)
+
+ # inplace = 0, cmd.package = 'bar'
+ build_py = cmd.get_finalized_command('build_py')
+ build_py.package_dir = {'': 'bar'}
+ path = cmd.get_ext_fullpath('foo')
+ # checking that the last directory is the build_dir
+ path = os.path.split(path)[0]
+ self.assertEqual(path, cmd.build_lib)
+
+ # inplace = 1, cmd.package = 'bar'
+ cmd.inplace = 1
+ other_tmp_dir = os.path.realpath(self.mkdtemp())
+ old_wd = os.getcwd()
+ os.chdir(other_tmp_dir)
+ try:
+ path = cmd.get_ext_fullpath('foo')
+ finally:
+ os.chdir(old_wd)
+ # checking that the last directory is bar
+ path = os.path.split(path)[0]
+ lastdir = os.path.split(path)[-1]
+ self.assertEqual(lastdir, 'bar')
+
+ def test_ext_fullpath(self):
+ ext = sysconfig.get_config_var('EXT_SUFFIX')
+ # building lxml.etree inplace
+ #etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c')
+ #etree_ext = Extension('lxml.etree', [etree_c])
+ #dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]})
+ dist = Distribution()
+ cmd = self.build_ext(dist)
+ cmd.inplace = 1
+ cmd.distribution.package_dir = {'': 'src'}
+ cmd.distribution.packages = ['lxml', 'lxml.html']
+ curdir = os.getcwd()
+ wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
+ path = cmd.get_ext_fullpath('lxml.etree')
+ self.assertEqual(wanted, path)
+
+ # building lxml.etree not inplace
+ cmd.inplace = 0
+ cmd.build_lib = os.path.join(curdir, 'tmpdir')
+ wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext)
+ path = cmd.get_ext_fullpath('lxml.etree')
+ self.assertEqual(wanted, path)
+
+ # building twisted.runner.portmap not inplace
+ build_py = cmd.get_finalized_command('build_py')
+ build_py.package_dir = {}
+ cmd.distribution.packages = ['twisted', 'twisted.runner.portmap']
+ path = cmd.get_ext_fullpath('twisted.runner.portmap')
+ wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner',
+ 'portmap' + ext)
+ self.assertEqual(wanted, path)
+
+ # building twisted.runner.portmap inplace
+ cmd.inplace = 1
+ path = cmd.get_ext_fullpath('twisted.runner.portmap')
+ wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext)
+ self.assertEqual(wanted, path)
+
+
+ @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
+ def test_deployment_target_default(self):
+ # Issue 9516: Test that, in the absence of the environment variable,
+ # an extension module is compiled with the same deployment target as
+ # the interpreter.
+ self._try_compile_deployment_target('==', None)
+
+ @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
+ def test_deployment_target_too_low(self):
+ # Issue 9516: Test that an extension module is not allowed to be
+ # compiled with a deployment target less than that of the interpreter.
+ self.assertRaises(DistutilsPlatformError,
+ self._try_compile_deployment_target, '>', '10.1')
+
+ @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
+ def test_deployment_target_higher_ok(self):
+ # Issue 9516: Test that an extension module can be compiled with a
+ # deployment target higher than that of the interpreter: the ext
+ # module may depend on some newer OS feature.
+ deptarget = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
+ if deptarget:
+ # increment the minor version number (i.e. 10.6 -> 10.7)
+ deptarget = [int(x) for x in deptarget.split('.')]
+ deptarget[-1] += 1
+ deptarget = '.'.join(str(i) for i in deptarget)
+ self._try_compile_deployment_target('<', deptarget)
+
+ def _try_compile_deployment_target(self, operator, target):
+ orig_environ = os.environ
+ os.environ = orig_environ.copy()
+ self.addCleanup(setattr, os, 'environ', orig_environ)
+
+ if target is None:
+ if os.environ.get('MACOSX_DEPLOYMENT_TARGET'):
+ del os.environ['MACOSX_DEPLOYMENT_TARGET']
+ else:
+ os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
+
+ deptarget_c = os.path.join(self.tmp_dir, 'deptargetmodule.c')
+
+ with open(deptarget_c, 'w') as fp:
+ fp.write(textwrap.dedent('''\
+ #include
+
+ int dummy;
+
+ #if TARGET %s MAC_OS_X_VERSION_MIN_REQUIRED
+ #else
+ #error "Unexpected target"
+ #endif
+
+ ''' % operator))
+
+ # get the deployment target that the interpreter was built with
+ target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
+ target = tuple(map(int, target.split('.')[0:2]))
+ # format the target value as defined in the Apple
+ # Availability Macros. We can't use the macro names since
+ # at least one value we test with will not exist yet.
+ if target[1] < 10:
+ # for 10.1 through 10.9.x -> "10n0"
+ target = '%02d%01d0' % target
+ else:
+ # for 10.10 and beyond -> "10nn00"
+ target = '%02d%02d00' % target
+ deptarget_ext = Extension(
+ 'deptarget',
+ [deptarget_c],
+ extra_compile_args=['-DTARGET=%s'%(target,)],
+ )
+ dist = Distribution({
+ 'name': 'deptarget',
+ 'ext_modules': [deptarget_ext]
+ })
+ dist.package_dir = self.tmp_dir
+ cmd = self.build_ext(dist)
+ cmd.build_lib = self.tmp_dir
+ cmd.build_temp = self.tmp_dir
+
+ try:
+ old_stdout = sys.stdout
+ if not support.verbose:
+ # silence compiler output
+ sys.stdout = StringIO()
+ try:
+ cmd.ensure_finalized()
+ cmd.run()
+ finally:
+ sys.stdout = old_stdout
+
+ except CompileError:
+ self.fail("Wrong deployment target during compilation")
+
+
+class ParallelBuildExtTestCase(BuildExtTestCase):
+
+ def build_ext(self, *args, **kwargs):
+ build_ext = super().build_ext(*args, **kwargs)
+ build_ext.parallel = True
+ return build_ext
+
+
+def test_suite():
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.makeSuite(BuildExtTestCase))
+ suite.addTest(unittest.makeSuite(ParallelBuildExtTestCase))
+ return suite
+
+if __name__ == '__main__':
+ support.run_unittest(__name__)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_py.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_py.py
new file mode 100644
index 00000000..0712e92c
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_py.py
@@ -0,0 +1,179 @@
+"""Tests for distutils.command.build_py."""
+
+import os
+import sys
+import unittest
+
+from distutils.command.build_py import build_py
+from distutils.core import Distribution
+from distutils.errors import DistutilsFileError
+
+from distutils.tests import support
+from test.support import run_unittest
+
+
+class BuildPyTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def test_package_data(self):
+ sources = self.mkdtemp()
+ f = open(os.path.join(sources, "__init__.py"), "w")
+ try:
+ f.write("# Pretend this is a package.")
+ finally:
+ f.close()
+ f = open(os.path.join(sources, "README.txt"), "w")
+ try:
+ f.write("Info about this package")
+ finally:
+ f.close()
+
+ destination = self.mkdtemp()
+
+ dist = Distribution({"packages": ["pkg"],
+ "package_dir": {"pkg": sources}})
+ # script_name need not exist, it just need to be initialized
+ dist.script_name = os.path.join(sources, "setup.py")
+ dist.command_obj["build"] = support.DummyCommand(
+ force=0,
+ build_lib=destination)
+ dist.packages = ["pkg"]
+ dist.package_data = {"pkg": ["README.txt"]}
+ dist.package_dir = {"pkg": sources}
+
+ cmd = build_py(dist)
+ cmd.compile = 1
+ cmd.ensure_finalized()
+ self.assertEqual(cmd.package_data, dist.package_data)
+
+ cmd.run()
+
+ # This makes sure the list of outputs includes byte-compiled
+ # files for Python modules but not for package data files
+ # (there shouldn't *be* byte-code files for those!).
+ self.assertEqual(len(cmd.get_outputs()), 3)
+ pkgdest = os.path.join(destination, "pkg")
+ files = os.listdir(pkgdest)
+ pycache_dir = os.path.join(pkgdest, "__pycache__")
+ self.assertIn("__init__.py", files)
+ self.assertIn("README.txt", files)
+ if sys.dont_write_bytecode:
+ self.assertFalse(os.path.exists(pycache_dir))
+ else:
+ pyc_files = os.listdir(pycache_dir)
+ self.assertIn("__init__.%s.pyc" % sys.implementation.cache_tag,
+ pyc_files)
+
+ def test_empty_package_dir(self):
+ # See bugs #1668596/#1720897
+ sources = self.mkdtemp()
+ open(os.path.join(sources, "__init__.py"), "w").close()
+
+ testdir = os.path.join(sources, "doc")
+ os.mkdir(testdir)
+ open(os.path.join(testdir, "testfile"), "w").close()
+
+ os.chdir(sources)
+ dist = Distribution({"packages": ["pkg"],
+ "package_dir": {"pkg": ""},
+ "package_data": {"pkg": ["doc/*"]}})
+ # script_name need not exist, it just need to be initialized
+ dist.script_name = os.path.join(sources, "setup.py")
+ dist.script_args = ["build"]
+ dist.parse_command_line()
+
+ try:
+ dist.run_commands()
+ except DistutilsFileError:
+ self.fail("failed package_data test when package_dir is ''")
+
+ @unittest.skipIf(sys.dont_write_bytecode, 'byte-compile disabled')
+ def test_byte_compile(self):
+ project_dir, dist = self.create_dist(py_modules=['boiledeggs'])
+ os.chdir(project_dir)
+ self.write_file('boiledeggs.py', 'import antigravity')
+ cmd = build_py(dist)
+ cmd.compile = 1
+ cmd.build_lib = 'here'
+ cmd.finalize_options()
+ cmd.run()
+
+ found = os.listdir(cmd.build_lib)
+ self.assertEqual(sorted(found), ['__pycache__', 'boiledeggs.py'])
+ found = os.listdir(os.path.join(cmd.build_lib, '__pycache__'))
+ self.assertEqual(found,
+ ['boiledeggs.%s.pyc' % sys.implementation.cache_tag])
+
+ @unittest.skipIf(sys.dont_write_bytecode, 'byte-compile disabled')
+ def test_byte_compile_optimized(self):
+ project_dir, dist = self.create_dist(py_modules=['boiledeggs'])
+ os.chdir(project_dir)
+ self.write_file('boiledeggs.py', 'import antigravity')
+ cmd = build_py(dist)
+ cmd.compile = 0
+ cmd.optimize = 1
+ cmd.build_lib = 'here'
+ cmd.finalize_options()
+ cmd.run()
+
+ found = os.listdir(cmd.build_lib)
+ self.assertEqual(sorted(found), ['__pycache__', 'boiledeggs.py'])
+ found = os.listdir(os.path.join(cmd.build_lib, '__pycache__'))
+ expect = 'boiledeggs.{}.opt-1.pyc'.format(sys.implementation.cache_tag)
+ self.assertEqual(sorted(found), [expect])
+
+ def test_dir_in_package_data(self):
+ """
+ A directory in package_data should not be added to the filelist.
+ """
+ # See bug 19286
+ sources = self.mkdtemp()
+ pkg_dir = os.path.join(sources, "pkg")
+
+ os.mkdir(pkg_dir)
+ open(os.path.join(pkg_dir, "__init__.py"), "w").close()
+
+ docdir = os.path.join(pkg_dir, "doc")
+ os.mkdir(docdir)
+ open(os.path.join(docdir, "testfile"), "w").close()
+
+ # create the directory that could be incorrectly detected as a file
+ os.mkdir(os.path.join(docdir, 'otherdir'))
+
+ os.chdir(sources)
+ dist = Distribution({"packages": ["pkg"],
+ "package_data": {"pkg": ["doc/*"]}})
+ # script_name need not exist, it just need to be initialized
+ dist.script_name = os.path.join(sources, "setup.py")
+ dist.script_args = ["build"]
+ dist.parse_command_line()
+
+ try:
+ dist.run_commands()
+ except DistutilsFileError:
+ self.fail("failed package_data when data dir includes a dir")
+
+ def test_dont_write_bytecode(self):
+ # makes sure byte_compile is not used
+ dist = self.create_dist()[1]
+ cmd = build_py(dist)
+ cmd.compile = 1
+ cmd.optimize = 1
+
+ old_dont_write_bytecode = sys.dont_write_bytecode
+ sys.dont_write_bytecode = True
+ try:
+ cmd.byte_compile([])
+ finally:
+ sys.dont_write_bytecode = old_dont_write_bytecode
+
+ self.assertIn('byte-compiling is disabled',
+ self.logs[0][1] % self.logs[0][2])
+
+
+def test_suite():
+ return unittest.makeSuite(BuildPyTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_scripts.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_scripts.py
new file mode 100644
index 00000000..954fc763
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_build_scripts.py
@@ -0,0 +1,112 @@
+"""Tests for distutils.command.build_scripts."""
+
+import os
+import unittest
+
+from distutils.command.build_scripts import build_scripts
+from distutils.core import Distribution
+from distutils import sysconfig
+
+from distutils.tests import support
+from test.support import run_unittest
+
+
+class BuildScriptsTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def test_default_settings(self):
+ cmd = self.get_build_scripts_cmd("/foo/bar", [])
+ self.assertFalse(cmd.force)
+ self.assertIsNone(cmd.build_dir)
+
+ cmd.finalize_options()
+
+ self.assertTrue(cmd.force)
+ self.assertEqual(cmd.build_dir, "/foo/bar")
+
+ def test_build(self):
+ source = self.mkdtemp()
+ target = self.mkdtemp()
+ expected = self.write_sample_scripts(source)
+
+ cmd = self.get_build_scripts_cmd(target,
+ [os.path.join(source, fn)
+ for fn in expected])
+ cmd.finalize_options()
+ cmd.run()
+
+ built = os.listdir(target)
+ for name in expected:
+ self.assertIn(name, built)
+
+ def get_build_scripts_cmd(self, target, scripts):
+ import sys
+ dist = Distribution()
+ dist.scripts = scripts
+ dist.command_obj["build"] = support.DummyCommand(
+ build_scripts=target,
+ force=1,
+ executable=sys.executable
+ )
+ return build_scripts(dist)
+
+ def write_sample_scripts(self, dir):
+ expected = []
+ expected.append("script1.py")
+ self.write_script(dir, "script1.py",
+ ("#! /usr/bin/env python2.3\n"
+ "# bogus script w/ Python sh-bang\n"
+ "pass\n"))
+ expected.append("script2.py")
+ self.write_script(dir, "script2.py",
+ ("#!/usr/bin/python\n"
+ "# bogus script w/ Python sh-bang\n"
+ "pass\n"))
+ expected.append("shell.sh")
+ self.write_script(dir, "shell.sh",
+ ("#!/bin/sh\n"
+ "# bogus shell script w/ sh-bang\n"
+ "exit 0\n"))
+ return expected
+
+ def write_script(self, dir, name, text):
+ f = open(os.path.join(dir, name), "w")
+ try:
+ f.write(text)
+ finally:
+ f.close()
+
+ def test_version_int(self):
+ source = self.mkdtemp()
+ target = self.mkdtemp()
+ expected = self.write_sample_scripts(source)
+
+
+ cmd = self.get_build_scripts_cmd(target,
+ [os.path.join(source, fn)
+ for fn in expected])
+ cmd.finalize_options()
+
+ # http://bugs.python.org/issue4524
+ #
+ # On linux-g++-32 with command line `./configure --enable-ipv6
+ # --with-suffix=3`, python is compiled okay but the build scripts
+ # failed when writing the name of the executable
+ old = sysconfig.get_config_vars().get('VERSION')
+ sysconfig._config_vars['VERSION'] = 4
+ try:
+ cmd.run()
+ finally:
+ if old is not None:
+ sysconfig._config_vars['VERSION'] = old
+
+ built = os.listdir(target)
+ for name in expected:
+ self.assertIn(name, built)
+
+def test_suite():
+ return unittest.makeSuite(BuildScriptsTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_check.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_check.py
new file mode 100644
index 00000000..3d22868e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_check.py
@@ -0,0 +1,149 @@
+"""Tests for distutils.command.check."""
+import textwrap
+import unittest
+from test.support import run_unittest
+
+from distutils.command.check import check, HAS_DOCUTILS
+from distutils.tests import support
+from distutils.errors import DistutilsSetupError
+
+try:
+ import pygments
+except ImportError:
+ pygments = None
+
+
+class CheckTestCase(support.LoggingSilencer,
+ support.TempdirManager,
+ unittest.TestCase):
+
+ def _run(self, metadata=None, **options):
+ if metadata is None:
+ metadata = {}
+ pkg_info, dist = self.create_dist(**metadata)
+ cmd = check(dist)
+ cmd.initialize_options()
+ for name, value in options.items():
+ setattr(cmd, name, value)
+ cmd.ensure_finalized()
+ cmd.run()
+ return cmd
+
+ def test_check_metadata(self):
+ # let's run the command with no metadata at all
+ # by default, check is checking the metadata
+ # should have some warnings
+ cmd = self._run()
+ self.assertEqual(cmd._warnings, 2)
+
+ # now let's add the required fields
+ # and run it again, to make sure we don't get
+ # any warning anymore
+ metadata = {'url': 'xxx', 'author': 'xxx',
+ 'author_email': 'xxx',
+ 'name': 'xxx', 'version': 'xxx'}
+ cmd = self._run(metadata)
+ self.assertEqual(cmd._warnings, 0)
+
+ # now with the strict mode, we should
+ # get an error if there are missing metadata
+ self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
+
+ # and of course, no error when all metadata are present
+ cmd = self._run(metadata, strict=1)
+ self.assertEqual(cmd._warnings, 0)
+
+ # now a test with non-ASCII characters
+ metadata = {'url': 'xxx', 'author': '\u00c9ric',
+ 'author_email': 'xxx', 'name': 'xxx',
+ 'version': 'xxx',
+ 'description': 'Something about esszet \u00df',
+ 'long_description': 'More things about esszet \u00df'}
+ cmd = self._run(metadata)
+ self.assertEqual(cmd._warnings, 0)
+
+ @unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
+ def test_check_document(self):
+ pkg_info, dist = self.create_dist()
+ cmd = check(dist)
+
+ # let's see if it detects broken rest
+ broken_rest = 'title\n===\n\ntest'
+ msgs = cmd._check_rst_data(broken_rest)
+ self.assertEqual(len(msgs), 1)
+
+ # and non-broken rest
+ rest = 'title\n=====\n\ntest'
+ msgs = cmd._check_rst_data(rest)
+ self.assertEqual(len(msgs), 0)
+
+ @unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
+ def test_check_restructuredtext(self):
+ # let's see if it detects broken rest in long_description
+ broken_rest = 'title\n===\n\ntest'
+ pkg_info, dist = self.create_dist(long_description=broken_rest)
+ cmd = check(dist)
+ cmd.check_restructuredtext()
+ self.assertEqual(cmd._warnings, 1)
+
+ # let's see if we have an error with strict=1
+ metadata = {'url': 'xxx', 'author': 'xxx',
+ 'author_email': 'xxx',
+ 'name': 'xxx', 'version': 'xxx',
+ 'long_description': broken_rest}
+ self.assertRaises(DistutilsSetupError, self._run, metadata,
+ **{'strict': 1, 'restructuredtext': 1})
+
+ # and non-broken rest, including a non-ASCII character to test #12114
+ metadata['long_description'] = 'title\n=====\n\ntest \u00df'
+ cmd = self._run(metadata, strict=1, restructuredtext=1)
+ self.assertEqual(cmd._warnings, 0)
+
+ @unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
+ def test_check_restructuredtext_with_syntax_highlight(self):
+ # Don't fail if there is a `code` or `code-block` directive
+
+ example_rst_docs = []
+ example_rst_docs.append(textwrap.dedent("""\
+ Here's some code:
+
+ .. code:: python
+
+ def foo():
+ pass
+ """))
+ example_rst_docs.append(textwrap.dedent("""\
+ Here's some code:
+
+ .. code-block:: python
+
+ def foo():
+ pass
+ """))
+
+ for rest_with_code in example_rst_docs:
+ pkg_info, dist = self.create_dist(long_description=rest_with_code)
+ cmd = check(dist)
+ cmd.check_restructuredtext()
+ msgs = cmd._check_rst_data(rest_with_code)
+ if pygments is not None:
+ self.assertEqual(len(msgs), 0)
+ else:
+ self.assertEqual(len(msgs), 1)
+ self.assertEqual(
+ str(msgs[0][1]),
+ 'Cannot analyze code. Pygments package not found.'
+ )
+
+ def test_check_all(self):
+
+ metadata = {'url': 'xxx', 'author': 'xxx'}
+ self.assertRaises(DistutilsSetupError, self._run,
+ {}, **{'strict': 1,
+ 'restructuredtext': 1})
+
+def test_suite():
+ return unittest.makeSuite(CheckTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_clean.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_clean.py
new file mode 100644
index 00000000..c605afd8
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_clean.py
@@ -0,0 +1,49 @@
+"""Tests for distutils.command.clean."""
+import os
+import unittest
+
+from distutils.command.clean import clean
+from distutils.tests import support
+from test.support import run_unittest
+
+class cleanTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def test_simple_run(self):
+ pkg_dir, dist = self.create_dist()
+ cmd = clean(dist)
+
+ # let's add some elements clean should remove
+ dirs = [(d, os.path.join(pkg_dir, d))
+ for d in ('build_temp', 'build_lib', 'bdist_base',
+ 'build_scripts', 'build_base')]
+
+ for name, path in dirs:
+ os.mkdir(path)
+ setattr(cmd, name, path)
+ if name == 'build_base':
+ continue
+ for f in ('one', 'two', 'three'):
+ self.write_file(os.path.join(path, f))
+
+ # let's run the command
+ cmd.all = 1
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # make sure the files where removed
+ for name, path in dirs:
+ self.assertFalse(os.path.exists(path),
+ '%s was not removed' % path)
+
+ # let's run the command again (should spit warnings but succeed)
+ cmd.all = 1
+ cmd.ensure_finalized()
+ cmd.run()
+
+def test_suite():
+ return unittest.makeSuite(cleanTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_cmd.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_cmd.py
new file mode 100644
index 00000000..cf5197c3
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_cmd.py
@@ -0,0 +1,126 @@
+"""Tests for distutils.cmd."""
+import unittest
+import os
+from test.support import captured_stdout, run_unittest
+
+from distutils.cmd import Command
+from distutils.dist import Distribution
+from distutils.errors import DistutilsOptionError
+from distutils import debug
+
+class MyCmd(Command):
+ def initialize_options(self):
+ pass
+
+class CommandTestCase(unittest.TestCase):
+
+ def setUp(self):
+ dist = Distribution()
+ self.cmd = MyCmd(dist)
+
+ def test_ensure_string_list(self):
+
+ cmd = self.cmd
+ cmd.not_string_list = ['one', 2, 'three']
+ cmd.yes_string_list = ['one', 'two', 'three']
+ cmd.not_string_list2 = object()
+ cmd.yes_string_list2 = 'ok'
+ cmd.ensure_string_list('yes_string_list')
+ cmd.ensure_string_list('yes_string_list2')
+
+ self.assertRaises(DistutilsOptionError,
+ cmd.ensure_string_list, 'not_string_list')
+
+ self.assertRaises(DistutilsOptionError,
+ cmd.ensure_string_list, 'not_string_list2')
+
+ cmd.option1 = 'ok,dok'
+ cmd.ensure_string_list('option1')
+ self.assertEqual(cmd.option1, ['ok', 'dok'])
+
+ cmd.option2 = ['xxx', 'www']
+ cmd.ensure_string_list('option2')
+
+ cmd.option3 = ['ok', 2]
+ self.assertRaises(DistutilsOptionError, cmd.ensure_string_list,
+ 'option3')
+
+
+ def test_make_file(self):
+
+ cmd = self.cmd
+
+ # making sure it raises when infiles is not a string or a list/tuple
+ self.assertRaises(TypeError, cmd.make_file,
+ infiles=1, outfile='', func='func', args=())
+
+ # making sure execute gets called properly
+ def _execute(func, args, exec_msg, level):
+ self.assertEqual(exec_msg, 'generating out from in')
+ cmd.force = True
+ cmd.execute = _execute
+ cmd.make_file(infiles='in', outfile='out', func='func', args=())
+
+ def test_dump_options(self):
+
+ msgs = []
+ def _announce(msg, level):
+ msgs.append(msg)
+ cmd = self.cmd
+ cmd.announce = _announce
+ cmd.option1 = 1
+ cmd.option2 = 1
+ cmd.user_options = [('option1', '', ''), ('option2', '', '')]
+ cmd.dump_options()
+
+ wanted = ["command options for 'MyCmd':", ' option1 = 1',
+ ' option2 = 1']
+ self.assertEqual(msgs, wanted)
+
+ def test_ensure_string(self):
+ cmd = self.cmd
+ cmd.option1 = 'ok'
+ cmd.ensure_string('option1')
+
+ cmd.option2 = None
+ cmd.ensure_string('option2', 'xxx')
+ self.assertTrue(hasattr(cmd, 'option2'))
+
+ cmd.option3 = 1
+ self.assertRaises(DistutilsOptionError, cmd.ensure_string, 'option3')
+
+ def test_ensure_filename(self):
+ cmd = self.cmd
+ cmd.option1 = __file__
+ cmd.ensure_filename('option1')
+ cmd.option2 = 'xxx'
+ self.assertRaises(DistutilsOptionError, cmd.ensure_filename, 'option2')
+
+ def test_ensure_dirname(self):
+ cmd = self.cmd
+ cmd.option1 = os.path.dirname(__file__) or os.curdir
+ cmd.ensure_dirname('option1')
+ cmd.option2 = 'xxx'
+ self.assertRaises(DistutilsOptionError, cmd.ensure_dirname, 'option2')
+
+ def test_debug_print(self):
+ cmd = self.cmd
+ with captured_stdout() as stdout:
+ cmd.debug_print('xxx')
+ stdout.seek(0)
+ self.assertEqual(stdout.read(), '')
+
+ debug.DEBUG = True
+ try:
+ with captured_stdout() as stdout:
+ cmd.debug_print('xxx')
+ stdout.seek(0)
+ self.assertEqual(stdout.read(), 'xxx\n')
+ finally:
+ debug.DEBUG = False
+
+def test_suite():
+ return unittest.makeSuite(CommandTestCase)
+
+if __name__ == '__main__':
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_config.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_config.py
new file mode 100644
index 00000000..77ef788e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_config.py
@@ -0,0 +1,140 @@
+"""Tests for distutils.pypirc.pypirc."""
+import os
+import unittest
+
+from distutils.core import PyPIRCCommand
+from distutils.core import Distribution
+from distutils.log import set_threshold
+from distutils.log import WARN
+
+from distutils.tests import support
+from test.support import run_unittest
+
+PYPIRC = """\
+[distutils]
+
+index-servers =
+ server1
+ server2
+ server3
+
+[server1]
+username:me
+password:secret
+
+[server2]
+username:meagain
+password: secret
+realm:acme
+repository:http://another.pypi/
+
+[server3]
+username:cbiggles
+password:yh^%#rest-of-my-password
+"""
+
+PYPIRC_OLD = """\
+[server-login]
+username:tarek
+password:secret
+"""
+
+WANTED = """\
+[distutils]
+index-servers =
+ pypi
+
+[pypi]
+username:tarek
+password:xxx
+"""
+
+
+class BasePyPIRCCommandTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ support.EnvironGuard,
+ unittest.TestCase):
+
+ def setUp(self):
+ """Patches the environment."""
+ super(BasePyPIRCCommandTestCase, self).setUp()
+ self.tmp_dir = self.mkdtemp()
+ os.environ['HOME'] = self.tmp_dir
+ self.rc = os.path.join(self.tmp_dir, '.pypirc')
+ self.dist = Distribution()
+
+ class command(PyPIRCCommand):
+ def __init__(self, dist):
+ PyPIRCCommand.__init__(self, dist)
+ def initialize_options(self):
+ pass
+ finalize_options = initialize_options
+
+ self._cmd = command
+ self.old_threshold = set_threshold(WARN)
+
+ def tearDown(self):
+ """Removes the patch."""
+ set_threshold(self.old_threshold)
+ super(BasePyPIRCCommandTestCase, self).tearDown()
+
+
+class PyPIRCCommandTestCase(BasePyPIRCCommandTestCase):
+
+ def test_server_registration(self):
+ # This test makes sure PyPIRCCommand knows how to:
+ # 1. handle several sections in .pypirc
+ # 2. handle the old format
+
+ # new format
+ self.write_file(self.rc, PYPIRC)
+ cmd = self._cmd(self.dist)
+ config = cmd._read_pypirc()
+
+ config = list(sorted(config.items()))
+ waited = [('password', 'secret'), ('realm', 'pypi'),
+ ('repository', 'https://upload.pypi.org/legacy/'),
+ ('server', 'server1'), ('username', 'me')]
+ self.assertEqual(config, waited)
+
+ # old format
+ self.write_file(self.rc, PYPIRC_OLD)
+ config = cmd._read_pypirc()
+ config = list(sorted(config.items()))
+ waited = [('password', 'secret'), ('realm', 'pypi'),
+ ('repository', 'https://upload.pypi.org/legacy/'),
+ ('server', 'server-login'), ('username', 'tarek')]
+ self.assertEqual(config, waited)
+
+ def test_server_empty_registration(self):
+ cmd = self._cmd(self.dist)
+ rc = cmd._get_rc_file()
+ self.assertFalse(os.path.exists(rc))
+ cmd._store_pypirc('tarek', 'xxx')
+ self.assertTrue(os.path.exists(rc))
+ f = open(rc)
+ try:
+ content = f.read()
+ self.assertEqual(content, WANTED)
+ finally:
+ f.close()
+
+ def test_config_interpolation(self):
+ # using the % character in .pypirc should not raise an error (#20120)
+ self.write_file(self.rc, PYPIRC)
+ cmd = self._cmd(self.dist)
+ cmd.repository = 'server3'
+ config = cmd._read_pypirc()
+
+ config = list(sorted(config.items()))
+ waited = [('password', 'yh^%#rest-of-my-password'), ('realm', 'pypi'),
+ ('repository', 'https://upload.pypi.org/legacy/'),
+ ('server', 'server3'), ('username', 'cbiggles')]
+ self.assertEqual(config, waited)
+
+
+def test_suite():
+ return unittest.makeSuite(PyPIRCCommandTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_config_cmd.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_config_cmd.py
new file mode 100644
index 00000000..6e566e79
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_config_cmd.py
@@ -0,0 +1,92 @@
+"""Tests for distutils.command.config."""
+import unittest
+import os
+import sys
+from test.support import run_unittest, missing_compiler_executable
+
+from distutils.command.config import dump_file, config
+from distutils.tests import support
+from distutils import log
+
+class ConfigTestCase(support.LoggingSilencer,
+ support.TempdirManager,
+ unittest.TestCase):
+
+ def _info(self, msg, *args):
+ for line in msg.splitlines():
+ self._logs.append(line)
+
+ def setUp(self):
+ super(ConfigTestCase, self).setUp()
+ self._logs = []
+ self.old_log = log.info
+ log.info = self._info
+
+ def tearDown(self):
+ log.info = self.old_log
+ super(ConfigTestCase, self).tearDown()
+
+ def test_dump_file(self):
+ this_file = os.path.splitext(__file__)[0] + '.py'
+ f = open(this_file)
+ try:
+ numlines = len(f.readlines())
+ finally:
+ f.close()
+
+ dump_file(this_file, 'I am the header')
+ self.assertEqual(len(self._logs), numlines+1)
+
+ @unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
+ def test_search_cpp(self):
+ cmd = missing_compiler_executable(['preprocessor'])
+ if cmd is not None:
+ self.skipTest('The %r command is not found' % cmd)
+ pkg_dir, dist = self.create_dist()
+ cmd = config(dist)
+
+ # simple pattern searches
+ match = cmd.search_cpp(pattern='xxx', body='/* xxx */')
+ self.assertEqual(match, 0)
+
+ match = cmd.search_cpp(pattern='_configtest', body='/* xxx */')
+ self.assertEqual(match, 1)
+
+ def test_finalize_options(self):
+ # finalize_options does a bit of transformation
+ # on options
+ pkg_dir, dist = self.create_dist()
+ cmd = config(dist)
+ cmd.include_dirs = 'one%stwo' % os.pathsep
+ cmd.libraries = 'one'
+ cmd.library_dirs = 'three%sfour' % os.pathsep
+ cmd.ensure_finalized()
+
+ self.assertEqual(cmd.include_dirs, ['one', 'two'])
+ self.assertEqual(cmd.libraries, ['one'])
+ self.assertEqual(cmd.library_dirs, ['three', 'four'])
+
+ def test_clean(self):
+ # _clean removes files
+ tmp_dir = self.mkdtemp()
+ f1 = os.path.join(tmp_dir, 'one')
+ f2 = os.path.join(tmp_dir, 'two')
+
+ self.write_file(f1, 'xxx')
+ self.write_file(f2, 'xxx')
+
+ for f in (f1, f2):
+ self.assertTrue(os.path.exists(f))
+
+ pkg_dir, dist = self.create_dist()
+ cmd = config(dist)
+ cmd._clean(f1, f2)
+
+ for f in (f1, f2):
+ self.assertFalse(os.path.exists(f))
+
+def test_suite():
+ return unittest.makeSuite(ConfigTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_core.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_core.py
new file mode 100644
index 00000000..27ce7324
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_core.py
@@ -0,0 +1,140 @@
+"""Tests for distutils.core."""
+
+import io
+import distutils.core
+import os
+import shutil
+import sys
+import test.support
+from test.support import captured_stdout, run_unittest
+import unittest
+from distutils.tests import support
+from distutils import log
+
+# setup script that uses __file__
+setup_using___file__ = """\
+
+__file__
+
+from distutils.core import setup
+setup()
+"""
+
+setup_prints_cwd = """\
+
+import os
+print(os.getcwd())
+
+from distutils.core import setup
+setup()
+"""
+
+setup_does_nothing = """\
+from distutils.core import setup
+setup()
+"""
+
+
+setup_defines_subclass = """\
+from distutils.core import setup
+from distutils.command.install import install as _install
+
+class install(_install):
+ sub_commands = _install.sub_commands + ['cmd']
+
+setup(cmdclass={'install': install})
+"""
+
+class CoreTestCase(support.EnvironGuard, unittest.TestCase):
+
+ def setUp(self):
+ super(CoreTestCase, self).setUp()
+ self.old_stdout = sys.stdout
+ self.cleanup_testfn()
+ self.old_argv = sys.argv, sys.argv[:]
+ self.addCleanup(log.set_threshold, log._global_log.threshold)
+
+ def tearDown(self):
+ sys.stdout = self.old_stdout
+ self.cleanup_testfn()
+ sys.argv = self.old_argv[0]
+ sys.argv[:] = self.old_argv[1]
+ super(CoreTestCase, self).tearDown()
+
+ def cleanup_testfn(self):
+ path = test.support.TESTFN
+ if os.path.isfile(path):
+ os.remove(path)
+ elif os.path.isdir(path):
+ shutil.rmtree(path)
+
+ def write_setup(self, text, path=test.support.TESTFN):
+ f = open(path, "w")
+ try:
+ f.write(text)
+ finally:
+ f.close()
+ return path
+
+ def test_run_setup_provides_file(self):
+ # Make sure the script can use __file__; if that's missing, the test
+ # setup.py script will raise NameError.
+ distutils.core.run_setup(
+ self.write_setup(setup_using___file__))
+
+ def test_run_setup_preserves_sys_argv(self):
+ # Make sure run_setup does not clobber sys.argv
+ argv_copy = sys.argv.copy()
+ distutils.core.run_setup(
+ self.write_setup(setup_does_nothing))
+ self.assertEqual(sys.argv, argv_copy)
+
+ def test_run_setup_defines_subclass(self):
+ # Make sure the script can use __file__; if that's missing, the test
+ # setup.py script will raise NameError.
+ dist = distutils.core.run_setup(
+ self.write_setup(setup_defines_subclass))
+ install = dist.get_command_obj('install')
+ self.assertIn('cmd', install.sub_commands)
+
+ def test_run_setup_uses_current_dir(self):
+ # This tests that the setup script is run with the current directory
+ # as its own current directory; this was temporarily broken by a
+ # previous patch when TESTFN did not use the current directory.
+ sys.stdout = io.StringIO()
+ cwd = os.getcwd()
+
+ # Create a directory and write the setup.py file there:
+ os.mkdir(test.support.TESTFN)
+ setup_py = os.path.join(test.support.TESTFN, "setup.py")
+ distutils.core.run_setup(
+ self.write_setup(setup_prints_cwd, path=setup_py))
+
+ output = sys.stdout.getvalue()
+ if output.endswith("\n"):
+ output = output[:-1]
+ self.assertEqual(cwd, output)
+
+ def test_debug_mode(self):
+ # this covers the code called when DEBUG is set
+ sys.argv = ['setup.py', '--name']
+ with captured_stdout() as stdout:
+ distutils.core.setup(name='bar')
+ stdout.seek(0)
+ self.assertEqual(stdout.read(), 'bar\n')
+
+ distutils.core.DEBUG = True
+ try:
+ with captured_stdout() as stdout:
+ distutils.core.setup(name='bar')
+ finally:
+ distutils.core.DEBUG = False
+ stdout.seek(0)
+ wanted = "options (after parsing config files):\n"
+ self.assertEqual(stdout.readlines()[0], wanted)
+
+def test_suite():
+ return unittest.makeSuite(CoreTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_cygwinccompiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_cygwinccompiler.py
new file mode 100644
index 00000000..9dc869de
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_cygwinccompiler.py
@@ -0,0 +1,154 @@
+"""Tests for distutils.cygwinccompiler."""
+import unittest
+import sys
+import os
+from io import BytesIO
+from test.support import run_unittest
+
+from distutils import cygwinccompiler
+from distutils.cygwinccompiler import (check_config_h,
+ CONFIG_H_OK, CONFIG_H_NOTOK,
+ CONFIG_H_UNCERTAIN, get_versions,
+ get_msvcr)
+from distutils.tests import support
+
+class FakePopen(object):
+ test_class = None
+
+ def __init__(self, cmd, shell, stdout):
+ self.cmd = cmd.split()[0]
+ exes = self.test_class._exes
+ if self.cmd in exes:
+ # issue #6438 in Python 3.x, Popen returns bytes
+ self.stdout = BytesIO(exes[self.cmd])
+ else:
+ self.stdout = os.popen(cmd, 'r')
+
+
+class CygwinCCompilerTestCase(support.TempdirManager,
+ unittest.TestCase):
+
+ def setUp(self):
+ super(CygwinCCompilerTestCase, self).setUp()
+ self.version = sys.version
+ self.python_h = os.path.join(self.mkdtemp(), 'python.h')
+ from distutils import sysconfig
+ self.old_get_config_h_filename = sysconfig.get_config_h_filename
+ sysconfig.get_config_h_filename = self._get_config_h_filename
+ self.old_find_executable = cygwinccompiler.find_executable
+ cygwinccompiler.find_executable = self._find_executable
+ self._exes = {}
+ self.old_popen = cygwinccompiler.Popen
+ FakePopen.test_class = self
+ cygwinccompiler.Popen = FakePopen
+
+ def tearDown(self):
+ sys.version = self.version
+ from distutils import sysconfig
+ sysconfig.get_config_h_filename = self.old_get_config_h_filename
+ cygwinccompiler.find_executable = self.old_find_executable
+ cygwinccompiler.Popen = self.old_popen
+ super(CygwinCCompilerTestCase, self).tearDown()
+
+ def _get_config_h_filename(self):
+ return self.python_h
+
+ def _find_executable(self, name):
+ if name in self._exes:
+ return name
+ return None
+
+ def test_check_config_h(self):
+
+ # check_config_h looks for "GCC" in sys.version first
+ # returns CONFIG_H_OK if found
+ sys.version = ('2.6.1 (r261:67515, Dec 6 2008, 16:42:21) \n[GCC '
+ '4.0.1 (Apple Computer, Inc. build 5370)]')
+
+ self.assertEqual(check_config_h()[0], CONFIG_H_OK)
+
+ # then it tries to see if it can find "__GNUC__" in pyconfig.h
+ sys.version = 'something without the *CC word'
+
+ # if the file doesn't exist it returns CONFIG_H_UNCERTAIN
+ self.assertEqual(check_config_h()[0], CONFIG_H_UNCERTAIN)
+
+ # if it exists but does not contain __GNUC__, it returns CONFIG_H_NOTOK
+ self.write_file(self.python_h, 'xxx')
+ self.assertEqual(check_config_h()[0], CONFIG_H_NOTOK)
+
+ # and CONFIG_H_OK if __GNUC__ is found
+ self.write_file(self.python_h, 'xxx __GNUC__ xxx')
+ self.assertEqual(check_config_h()[0], CONFIG_H_OK)
+
+ def test_get_versions(self):
+
+ # get_versions calls distutils.spawn.find_executable on
+ # 'gcc', 'ld' and 'dllwrap'
+ self.assertEqual(get_versions(), (None, None, None))
+
+ # Let's fake we have 'gcc' and it returns '3.4.5'
+ self._exes['gcc'] = b'gcc (GCC) 3.4.5 (mingw special)\nFSF'
+ res = get_versions()
+ self.assertEqual(str(res[0]), '3.4.5')
+
+ # and let's see what happens when the version
+ # doesn't match the regular expression
+ # (\d+\.\d+(\.\d+)*)
+ self._exes['gcc'] = b'very strange output'
+ res = get_versions()
+ self.assertEqual(res[0], None)
+
+ # same thing for ld
+ self._exes['ld'] = b'GNU ld version 2.17.50 20060824'
+ res = get_versions()
+ self.assertEqual(str(res[1]), '2.17.50')
+ self._exes['ld'] = b'@(#)PROGRAM:ld PROJECT:ld64-77'
+ res = get_versions()
+ self.assertEqual(res[1], None)
+
+ # and dllwrap
+ self._exes['dllwrap'] = b'GNU dllwrap 2.17.50 20060824\nFSF'
+ res = get_versions()
+ self.assertEqual(str(res[2]), '2.17.50')
+ self._exes['dllwrap'] = b'Cheese Wrap'
+ res = get_versions()
+ self.assertEqual(res[2], None)
+
+ def test_get_msvcr(self):
+
+ # none
+ sys.version = ('2.6.1 (r261:67515, Dec 6 2008, 16:42:21) '
+ '\n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]')
+ self.assertEqual(get_msvcr(), None)
+
+ # MSVC 7.0
+ sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) '
+ '[MSC v.1300 32 bits (Intel)]')
+ self.assertEqual(get_msvcr(), ['msvcr70'])
+
+ # MSVC 7.1
+ sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) '
+ '[MSC v.1310 32 bits (Intel)]')
+ self.assertEqual(get_msvcr(), ['msvcr71'])
+
+ # VS2005 / MSVC 8.0
+ sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) '
+ '[MSC v.1400 32 bits (Intel)]')
+ self.assertEqual(get_msvcr(), ['msvcr80'])
+
+ # VS2008 / MSVC 9.0
+ sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) '
+ '[MSC v.1500 32 bits (Intel)]')
+ self.assertEqual(get_msvcr(), ['msvcr90'])
+
+ # unknown
+ sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) '
+ '[MSC v.1999 32 bits (Intel)]')
+ self.assertRaises(ValueError, get_msvcr)
+
+def test_suite():
+ return unittest.makeSuite(CygwinCCompilerTestCase)
+
+if __name__ == '__main__':
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_dep_util.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_dep_util.py
new file mode 100644
index 00000000..c6fae39c
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_dep_util.py
@@ -0,0 +1,80 @@
+"""Tests for distutils.dep_util."""
+import unittest
+import os
+
+from distutils.dep_util import newer, newer_pairwise, newer_group
+from distutils.errors import DistutilsFileError
+from distutils.tests import support
+from test.support import run_unittest
+
+class DepUtilTestCase(support.TempdirManager, unittest.TestCase):
+
+ def test_newer(self):
+
+ tmpdir = self.mkdtemp()
+ new_file = os.path.join(tmpdir, 'new')
+ old_file = os.path.abspath(__file__)
+
+ # Raise DistutilsFileError if 'new_file' does not exist.
+ self.assertRaises(DistutilsFileError, newer, new_file, old_file)
+
+ # Return true if 'new_file' exists and is more recently modified than
+ # 'old_file', or if 'new_file' exists and 'old_file' doesn't.
+ self.write_file(new_file)
+ self.assertTrue(newer(new_file, 'I_dont_exist'))
+ self.assertTrue(newer(new_file, old_file))
+
+ # Return false if both exist and 'old_file' is the same age or younger
+ # than 'new_file'.
+ self.assertFalse(newer(old_file, new_file))
+
+ def test_newer_pairwise(self):
+ tmpdir = self.mkdtemp()
+ sources = os.path.join(tmpdir, 'sources')
+ targets = os.path.join(tmpdir, 'targets')
+ os.mkdir(sources)
+ os.mkdir(targets)
+ one = os.path.join(sources, 'one')
+ two = os.path.join(sources, 'two')
+ three = os.path.abspath(__file__) # I am the old file
+ four = os.path.join(targets, 'four')
+ self.write_file(one)
+ self.write_file(two)
+ self.write_file(four)
+
+ self.assertEqual(newer_pairwise([one, two], [three, four]),
+ ([one],[three]))
+
+ def test_newer_group(self):
+ tmpdir = self.mkdtemp()
+ sources = os.path.join(tmpdir, 'sources')
+ os.mkdir(sources)
+ one = os.path.join(sources, 'one')
+ two = os.path.join(sources, 'two')
+ three = os.path.join(sources, 'three')
+ old_file = os.path.abspath(__file__)
+
+ # return true if 'old_file' is out-of-date with respect to any file
+ # listed in 'sources'.
+ self.write_file(one)
+ self.write_file(two)
+ self.write_file(three)
+ self.assertTrue(newer_group([one, two, three], old_file))
+ self.assertFalse(newer_group([one, two, old_file], three))
+
+ # missing handling
+ os.remove(one)
+ self.assertRaises(OSError, newer_group, [one, two, old_file], three)
+
+ self.assertFalse(newer_group([one, two, old_file], three,
+ missing='ignore'))
+
+ self.assertTrue(newer_group([one, two, old_file], three,
+ missing='newer'))
+
+
+def test_suite():
+ return unittest.makeSuite(DepUtilTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_dir_util.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_dir_util.py
new file mode 100644
index 00000000..d436cf83
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_dir_util.py
@@ -0,0 +1,139 @@
+"""Tests for distutils.dir_util."""
+import unittest
+import os
+import stat
+import sys
+from unittest.mock import patch
+
+from distutils import dir_util, errors
+from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree,
+ ensure_relative)
+
+from distutils import log
+from distutils.tests import support
+from test.support import run_unittest
+
+
+class DirUtilTestCase(support.TempdirManager, unittest.TestCase):
+
+ def _log(self, msg, *args):
+ if len(args) > 0:
+ self._logs.append(msg % args)
+ else:
+ self._logs.append(msg)
+
+ def setUp(self):
+ super(DirUtilTestCase, self).setUp()
+ self._logs = []
+ tmp_dir = self.mkdtemp()
+ self.root_target = os.path.join(tmp_dir, 'deep')
+ self.target = os.path.join(self.root_target, 'here')
+ self.target2 = os.path.join(tmp_dir, 'deep2')
+ self.old_log = log.info
+ log.info = self._log
+
+ def tearDown(self):
+ log.info = self.old_log
+ super(DirUtilTestCase, self).tearDown()
+
+ def test_mkpath_remove_tree_verbosity(self):
+
+ mkpath(self.target, verbose=0)
+ wanted = []
+ self.assertEqual(self._logs, wanted)
+ remove_tree(self.root_target, verbose=0)
+
+ mkpath(self.target, verbose=1)
+ wanted = ['creating %s' % self.root_target,
+ 'creating %s' % self.target]
+ self.assertEqual(self._logs, wanted)
+ self._logs = []
+
+ remove_tree(self.root_target, verbose=1)
+ wanted = ["removing '%s' (and everything under it)" % self.root_target]
+ self.assertEqual(self._logs, wanted)
+
+ @unittest.skipIf(sys.platform.startswith('win'),
+ "This test is only appropriate for POSIX-like systems.")
+ def test_mkpath_with_custom_mode(self):
+ # Get and set the current umask value for testing mode bits.
+ umask = os.umask(0o002)
+ os.umask(umask)
+ mkpath(self.target, 0o700)
+ self.assertEqual(
+ stat.S_IMODE(os.stat(self.target).st_mode), 0o700 & ~umask)
+ mkpath(self.target2, 0o555)
+ self.assertEqual(
+ stat.S_IMODE(os.stat(self.target2).st_mode), 0o555 & ~umask)
+
+ def test_create_tree_verbosity(self):
+
+ create_tree(self.root_target, ['one', 'two', 'three'], verbose=0)
+ self.assertEqual(self._logs, [])
+ remove_tree(self.root_target, verbose=0)
+
+ wanted = ['creating %s' % self.root_target]
+ create_tree(self.root_target, ['one', 'two', 'three'], verbose=1)
+ self.assertEqual(self._logs, wanted)
+
+ remove_tree(self.root_target, verbose=0)
+
+ def test_copy_tree_verbosity(self):
+
+ mkpath(self.target, verbose=0)
+
+ copy_tree(self.target, self.target2, verbose=0)
+ self.assertEqual(self._logs, [])
+
+ remove_tree(self.root_target, verbose=0)
+
+ mkpath(self.target, verbose=0)
+ a_file = os.path.join(self.target, 'ok.txt')
+ with open(a_file, 'w') as f:
+ f.write('some content')
+
+ wanted = ['copying %s -> %s' % (a_file, self.target2)]
+ copy_tree(self.target, self.target2, verbose=1)
+ self.assertEqual(self._logs, wanted)
+
+ remove_tree(self.root_target, verbose=0)
+ remove_tree(self.target2, verbose=0)
+
+ def test_copy_tree_skips_nfs_temp_files(self):
+ mkpath(self.target, verbose=0)
+
+ a_file = os.path.join(self.target, 'ok.txt')
+ nfs_file = os.path.join(self.target, '.nfs123abc')
+ for f in a_file, nfs_file:
+ with open(f, 'w') as fh:
+ fh.write('some content')
+
+ copy_tree(self.target, self.target2)
+ self.assertEqual(os.listdir(self.target2), ['ok.txt'])
+
+ remove_tree(self.root_target, verbose=0)
+ remove_tree(self.target2, verbose=0)
+
+ def test_ensure_relative(self):
+ if os.sep == '/':
+ self.assertEqual(ensure_relative('/home/foo'), 'home/foo')
+ self.assertEqual(ensure_relative('some/path'), 'some/path')
+ else: # \\
+ self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo')
+ self.assertEqual(ensure_relative('home\\foo'), 'home\\foo')
+
+ def test_copy_tree_exception_in_listdir(self):
+ """
+ An exception in listdir should raise a DistutilsFileError
+ """
+ with patch("os.listdir", side_effect=OSError()), \
+ self.assertRaises(errors.DistutilsFileError):
+ src = self.tempdirs[-1]
+ dir_util.copy_tree(src, None)
+
+
+def test_suite():
+ return unittest.makeSuite(DirUtilTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_dist.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_dist.py
new file mode 100644
index 00000000..0a19f0fb
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_dist.py
@@ -0,0 +1,528 @@
+"""Tests for distutils.dist."""
+import os
+import io
+import sys
+import unittest
+import warnings
+import textwrap
+
+from unittest import mock
+
+from distutils.dist import Distribution, fix_help_options, DistributionMetadata
+from distutils.cmd import Command
+
+from test.support import (
+ TESTFN, captured_stdout, captured_stderr, run_unittest
+)
+from distutils.tests import support
+from distutils import log
+
+
+class test_dist(Command):
+ """Sample distutils extension command."""
+
+ user_options = [
+ ("sample-option=", "S", "help text"),
+ ]
+
+ def initialize_options(self):
+ self.sample_option = None
+
+
+class TestDistribution(Distribution):
+ """Distribution subclasses that avoids the default search for
+ configuration files.
+
+ The ._config_files attribute must be set before
+ .parse_config_files() is called.
+ """
+
+ def find_config_files(self):
+ return self._config_files
+
+
+class DistributionTestCase(support.LoggingSilencer,
+ support.TempdirManager,
+ support.EnvironGuard,
+ unittest.TestCase):
+
+ def setUp(self):
+ super(DistributionTestCase, self).setUp()
+ self.argv = sys.argv, sys.argv[:]
+ del sys.argv[1:]
+
+ def tearDown(self):
+ sys.argv = self.argv[0]
+ sys.argv[:] = self.argv[1]
+ super(DistributionTestCase, self).tearDown()
+
+ def create_distribution(self, configfiles=()):
+ d = TestDistribution()
+ d._config_files = configfiles
+ d.parse_config_files()
+ d.parse_command_line()
+ return d
+
+ def test_command_packages_unspecified(self):
+ sys.argv.append("build")
+ d = self.create_distribution()
+ self.assertEqual(d.get_command_packages(), ["distutils.command"])
+
+ def test_command_packages_cmdline(self):
+ from distutils.tests.test_dist import test_dist
+ sys.argv.extend(["--command-packages",
+ "foo.bar,distutils.tests",
+ "test_dist",
+ "-Ssometext",
+ ])
+ d = self.create_distribution()
+ # let's actually try to load our test command:
+ self.assertEqual(d.get_command_packages(),
+ ["distutils.command", "foo.bar", "distutils.tests"])
+ cmd = d.get_command_obj("test_dist")
+ self.assertIsInstance(cmd, test_dist)
+ self.assertEqual(cmd.sample_option, "sometext")
+
+ def test_venv_install_options(self):
+ sys.argv.append("install")
+ self.addCleanup(os.unlink, TESTFN)
+
+ fakepath = '/somedir'
+
+ with open(TESTFN, "w") as f:
+ print(("[install]\n"
+ "install-base = {0}\n"
+ "install-platbase = {0}\n"
+ "install-lib = {0}\n"
+ "install-platlib = {0}\n"
+ "install-purelib = {0}\n"
+ "install-headers = {0}\n"
+ "install-scripts = {0}\n"
+ "install-data = {0}\n"
+ "prefix = {0}\n"
+ "exec-prefix = {0}\n"
+ "home = {0}\n"
+ "user = {0}\n"
+ "root = {0}").format(fakepath), file=f)
+
+ # Base case: Not in a Virtual Environment
+ with mock.patch.multiple(sys, prefix='/a', base_prefix='/a') as values:
+ d = self.create_distribution([TESTFN])
+
+ option_tuple = (TESTFN, fakepath)
+
+ result_dict = {
+ 'install_base': option_tuple,
+ 'install_platbase': option_tuple,
+ 'install_lib': option_tuple,
+ 'install_platlib': option_tuple,
+ 'install_purelib': option_tuple,
+ 'install_headers': option_tuple,
+ 'install_scripts': option_tuple,
+ 'install_data': option_tuple,
+ 'prefix': option_tuple,
+ 'exec_prefix': option_tuple,
+ 'home': option_tuple,
+ 'user': option_tuple,
+ 'root': option_tuple,
+ }
+
+ self.assertEqual(
+ sorted(d.command_options.get('install').keys()),
+ sorted(result_dict.keys()))
+
+ for (key, value) in d.command_options.get('install').items():
+ self.assertEqual(value, result_dict[key])
+
+ # Test case: In a Virtual Environment
+ with mock.patch.multiple(sys, prefix='/a', base_prefix='/b') as values:
+ d = self.create_distribution([TESTFN])
+
+ for key in result_dict.keys():
+ self.assertNotIn(key, d.command_options.get('install', {}))
+
+ def test_command_packages_configfile(self):
+ sys.argv.append("build")
+ self.addCleanup(os.unlink, TESTFN)
+ f = open(TESTFN, "w")
+ try:
+ print("[global]", file=f)
+ print("command_packages = foo.bar, splat", file=f)
+ finally:
+ f.close()
+
+ d = self.create_distribution([TESTFN])
+ self.assertEqual(d.get_command_packages(),
+ ["distutils.command", "foo.bar", "splat"])
+
+ # ensure command line overrides config:
+ sys.argv[1:] = ["--command-packages", "spork", "build"]
+ d = self.create_distribution([TESTFN])
+ self.assertEqual(d.get_command_packages(),
+ ["distutils.command", "spork"])
+
+ # Setting --command-packages to '' should cause the default to
+ # be used even if a config file specified something else:
+ sys.argv[1:] = ["--command-packages", "", "build"]
+ d = self.create_distribution([TESTFN])
+ self.assertEqual(d.get_command_packages(), ["distutils.command"])
+
+ def test_empty_options(self):
+ # an empty options dictionary should not stay in the
+ # list of attributes
+
+ # catching warnings
+ warns = []
+
+ def _warn(msg):
+ warns.append(msg)
+
+ self.addCleanup(setattr, warnings, 'warn', warnings.warn)
+ warnings.warn = _warn
+ dist = Distribution(attrs={'author': 'xxx', 'name': 'xxx',
+ 'version': 'xxx', 'url': 'xxxx',
+ 'options': {}})
+
+ self.assertEqual(len(warns), 0)
+ self.assertNotIn('options', dir(dist))
+
+ def test_finalize_options(self):
+ attrs = {'keywords': 'one,two',
+ 'platforms': 'one,two'}
+
+ dist = Distribution(attrs=attrs)
+ dist.finalize_options()
+
+ # finalize_option splits platforms and keywords
+ self.assertEqual(dist.metadata.platforms, ['one', 'two'])
+ self.assertEqual(dist.metadata.keywords, ['one', 'two'])
+
+ attrs = {'keywords': 'foo bar',
+ 'platforms': 'foo bar'}
+ dist = Distribution(attrs=attrs)
+ dist.finalize_options()
+ self.assertEqual(dist.metadata.platforms, ['foo bar'])
+ self.assertEqual(dist.metadata.keywords, ['foo bar'])
+
+ def test_get_command_packages(self):
+ dist = Distribution()
+ self.assertEqual(dist.command_packages, None)
+ cmds = dist.get_command_packages()
+ self.assertEqual(cmds, ['distutils.command'])
+ self.assertEqual(dist.command_packages,
+ ['distutils.command'])
+
+ dist.command_packages = 'one,two'
+ cmds = dist.get_command_packages()
+ self.assertEqual(cmds, ['distutils.command', 'one', 'two'])
+
+ def test_announce(self):
+ # make sure the level is known
+ dist = Distribution()
+ args = ('ok',)
+ kwargs = {'level': 'ok2'}
+ self.assertRaises(ValueError, dist.announce, args, kwargs)
+
+
+ def test_find_config_files_disable(self):
+ # Ticket #1180: Allow user to disable their home config file.
+ temp_home = self.mkdtemp()
+ if os.name == 'posix':
+ user_filename = os.path.join(temp_home, ".pydistutils.cfg")
+ else:
+ user_filename = os.path.join(temp_home, "pydistutils.cfg")
+
+ with open(user_filename, 'w') as f:
+ f.write('[distutils]\n')
+
+ def _expander(path):
+ return temp_home
+
+ old_expander = os.path.expanduser
+ os.path.expanduser = _expander
+ try:
+ d = Distribution()
+ all_files = d.find_config_files()
+
+ d = Distribution(attrs={'script_args': ['--no-user-cfg']})
+ files = d.find_config_files()
+ finally:
+ os.path.expanduser = old_expander
+
+ # make sure --no-user-cfg disables the user cfg file
+ self.assertEqual(len(all_files)-1, len(files))
+
+class MetadataTestCase(support.TempdirManager, support.EnvironGuard,
+ unittest.TestCase):
+
+ def setUp(self):
+ super(MetadataTestCase, self).setUp()
+ self.argv = sys.argv, sys.argv[:]
+
+ def tearDown(self):
+ sys.argv = self.argv[0]
+ sys.argv[:] = self.argv[1]
+ super(MetadataTestCase, self).tearDown()
+
+ def format_metadata(self, dist):
+ sio = io.StringIO()
+ dist.metadata.write_pkg_file(sio)
+ return sio.getvalue()
+
+ def test_simple_metadata(self):
+ attrs = {"name": "package",
+ "version": "1.0"}
+ dist = Distribution(attrs)
+ meta = self.format_metadata(dist)
+ self.assertIn("Metadata-Version: 1.0", meta)
+ self.assertNotIn("provides:", meta.lower())
+ self.assertNotIn("requires:", meta.lower())
+ self.assertNotIn("obsoletes:", meta.lower())
+
+ def test_provides(self):
+ attrs = {"name": "package",
+ "version": "1.0",
+ "provides": ["package", "package.sub"]}
+ dist = Distribution(attrs)
+ self.assertEqual(dist.metadata.get_provides(),
+ ["package", "package.sub"])
+ self.assertEqual(dist.get_provides(),
+ ["package", "package.sub"])
+ meta = self.format_metadata(dist)
+ self.assertIn("Metadata-Version: 1.1", meta)
+ self.assertNotIn("requires:", meta.lower())
+ self.assertNotIn("obsoletes:", meta.lower())
+
+ def test_provides_illegal(self):
+ self.assertRaises(ValueError, Distribution,
+ {"name": "package",
+ "version": "1.0",
+ "provides": ["my.pkg (splat)"]})
+
+ def test_requires(self):
+ attrs = {"name": "package",
+ "version": "1.0",
+ "requires": ["other", "another (==1.0)"]}
+ dist = Distribution(attrs)
+ self.assertEqual(dist.metadata.get_requires(),
+ ["other", "another (==1.0)"])
+ self.assertEqual(dist.get_requires(),
+ ["other", "another (==1.0)"])
+ meta = self.format_metadata(dist)
+ self.assertIn("Metadata-Version: 1.1", meta)
+ self.assertNotIn("provides:", meta.lower())
+ self.assertIn("Requires: other", meta)
+ self.assertIn("Requires: another (==1.0)", meta)
+ self.assertNotIn("obsoletes:", meta.lower())
+
+ def test_requires_illegal(self):
+ self.assertRaises(ValueError, Distribution,
+ {"name": "package",
+ "version": "1.0",
+ "requires": ["my.pkg (splat)"]})
+
+ def test_requires_to_list(self):
+ attrs = {"name": "package",
+ "requires": iter(["other"])}
+ dist = Distribution(attrs)
+ self.assertIsInstance(dist.metadata.requires, list)
+
+
+ def test_obsoletes(self):
+ attrs = {"name": "package",
+ "version": "1.0",
+ "obsoletes": ["other", "another (<1.0)"]}
+ dist = Distribution(attrs)
+ self.assertEqual(dist.metadata.get_obsoletes(),
+ ["other", "another (<1.0)"])
+ self.assertEqual(dist.get_obsoletes(),
+ ["other", "another (<1.0)"])
+ meta = self.format_metadata(dist)
+ self.assertIn("Metadata-Version: 1.1", meta)
+ self.assertNotIn("provides:", meta.lower())
+ self.assertNotIn("requires:", meta.lower())
+ self.assertIn("Obsoletes: other", meta)
+ self.assertIn("Obsoletes: another (<1.0)", meta)
+
+ def test_obsoletes_illegal(self):
+ self.assertRaises(ValueError, Distribution,
+ {"name": "package",
+ "version": "1.0",
+ "obsoletes": ["my.pkg (splat)"]})
+
+ def test_obsoletes_to_list(self):
+ attrs = {"name": "package",
+ "obsoletes": iter(["other"])}
+ dist = Distribution(attrs)
+ self.assertIsInstance(dist.metadata.obsoletes, list)
+
+ def test_classifier(self):
+ attrs = {'name': 'Boa', 'version': '3.0',
+ 'classifiers': ['Programming Language :: Python :: 3']}
+ dist = Distribution(attrs)
+ self.assertEqual(dist.get_classifiers(),
+ ['Programming Language :: Python :: 3'])
+ meta = self.format_metadata(dist)
+ self.assertIn('Metadata-Version: 1.1', meta)
+
+ def test_classifier_invalid_type(self):
+ attrs = {'name': 'Boa', 'version': '3.0',
+ 'classifiers': ('Programming Language :: Python :: 3',)}
+ with captured_stderr() as error:
+ d = Distribution(attrs)
+ # should have warning about passing a non-list
+ self.assertIn('should be a list', error.getvalue())
+ # should be converted to a list
+ self.assertIsInstance(d.metadata.classifiers, list)
+ self.assertEqual(d.metadata.classifiers,
+ list(attrs['classifiers']))
+
+ def test_keywords(self):
+ attrs = {'name': 'Monty', 'version': '1.0',
+ 'keywords': ['spam', 'eggs', 'life of brian']}
+ dist = Distribution(attrs)
+ self.assertEqual(dist.get_keywords(),
+ ['spam', 'eggs', 'life of brian'])
+
+ def test_keywords_invalid_type(self):
+ attrs = {'name': 'Monty', 'version': '1.0',
+ 'keywords': ('spam', 'eggs', 'life of brian')}
+ with captured_stderr() as error:
+ d = Distribution(attrs)
+ # should have warning about passing a non-list
+ self.assertIn('should be a list', error.getvalue())
+ # should be converted to a list
+ self.assertIsInstance(d.metadata.keywords, list)
+ self.assertEqual(d.metadata.keywords, list(attrs['keywords']))
+
+ def test_platforms(self):
+ attrs = {'name': 'Monty', 'version': '1.0',
+ 'platforms': ['GNU/Linux', 'Some Evil Platform']}
+ dist = Distribution(attrs)
+ self.assertEqual(dist.get_platforms(),
+ ['GNU/Linux', 'Some Evil Platform'])
+
+ def test_platforms_invalid_types(self):
+ attrs = {'name': 'Monty', 'version': '1.0',
+ 'platforms': ('GNU/Linux', 'Some Evil Platform')}
+ with captured_stderr() as error:
+ d = Distribution(attrs)
+ # should have warning about passing a non-list
+ self.assertIn('should be a list', error.getvalue())
+ # should be converted to a list
+ self.assertIsInstance(d.metadata.platforms, list)
+ self.assertEqual(d.metadata.platforms, list(attrs['platforms']))
+
+ def test_download_url(self):
+ attrs = {'name': 'Boa', 'version': '3.0',
+ 'download_url': 'http://example.org/boa'}
+ dist = Distribution(attrs)
+ meta = self.format_metadata(dist)
+ self.assertIn('Metadata-Version: 1.1', meta)
+
+ def test_long_description(self):
+ long_desc = textwrap.dedent("""\
+ example::
+ We start here
+ and continue here
+ and end here.""")
+ attrs = {"name": "package",
+ "version": "1.0",
+ "long_description": long_desc}
+
+ dist = Distribution(attrs)
+ meta = self.format_metadata(dist)
+ meta = meta.replace('\n' + 8 * ' ', '\n')
+ self.assertIn(long_desc, meta)
+
+ def test_custom_pydistutils(self):
+ # fixes #2166
+ # make sure pydistutils.cfg is found
+ if os.name == 'posix':
+ user_filename = ".pydistutils.cfg"
+ else:
+ user_filename = "pydistutils.cfg"
+
+ temp_dir = self.mkdtemp()
+ user_filename = os.path.join(temp_dir, user_filename)
+ f = open(user_filename, 'w')
+ try:
+ f.write('.')
+ finally:
+ f.close()
+
+ try:
+ dist = Distribution()
+
+ # linux-style
+ if sys.platform in ('linux', 'darwin'):
+ os.environ['HOME'] = temp_dir
+ files = dist.find_config_files()
+ self.assertIn(user_filename, files)
+
+ # win32-style
+ if sys.platform == 'win32':
+ # home drive should be found
+ os.environ['HOME'] = temp_dir
+ files = dist.find_config_files()
+ self.assertIn(user_filename, files,
+ '%r not found in %r' % (user_filename, files))
+ finally:
+ os.remove(user_filename)
+
+ def test_fix_help_options(self):
+ help_tuples = [('a', 'b', 'c', 'd'), (1, 2, 3, 4)]
+ fancy_options = fix_help_options(help_tuples)
+ self.assertEqual(fancy_options[0], ('a', 'b', 'c'))
+ self.assertEqual(fancy_options[1], (1, 2, 3))
+
+ def test_show_help(self):
+ # smoke test, just makes sure some help is displayed
+ self.addCleanup(log.set_threshold, log._global_log.threshold)
+ dist = Distribution()
+ sys.argv = []
+ dist.help = 1
+ dist.script_name = 'setup.py'
+ with captured_stdout() as s:
+ dist.parse_command_line()
+
+ output = [line for line in s.getvalue().split('\n')
+ if line.strip() != '']
+ self.assertTrue(output)
+
+
+ def test_read_metadata(self):
+ attrs = {"name": "package",
+ "version": "1.0",
+ "long_description": "desc",
+ "description": "xxx",
+ "download_url": "http://example.com",
+ "keywords": ['one', 'two'],
+ "requires": ['foo']}
+
+ dist = Distribution(attrs)
+ metadata = dist.metadata
+
+ # write it then reloads it
+ PKG_INFO = io.StringIO()
+ metadata.write_pkg_file(PKG_INFO)
+ PKG_INFO.seek(0)
+ metadata.read_pkg_file(PKG_INFO)
+
+ self.assertEqual(metadata.name, "package")
+ self.assertEqual(metadata.version, "1.0")
+ self.assertEqual(metadata.description, "xxx")
+ self.assertEqual(metadata.download_url, 'http://example.com')
+ self.assertEqual(metadata.keywords, ['one', 'two'])
+ self.assertEqual(metadata.platforms, ['UNKNOWN'])
+ self.assertEqual(metadata.obsoletes, None)
+ self.assertEqual(metadata.requires, ['foo'])
+
+def test_suite():
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.makeSuite(DistributionTestCase))
+ suite.addTest(unittest.makeSuite(MetadataTestCase))
+ return suite
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_extension.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_extension.py
new file mode 100644
index 00000000..e35f2738
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_extension.py
@@ -0,0 +1,69 @@
+"""Tests for distutils.extension."""
+import unittest
+import os
+import warnings
+
+from test.support import check_warnings, run_unittest
+from distutils.extension import read_setup_file, Extension
+
+class ExtensionTestCase(unittest.TestCase):
+
+ def test_read_setup_file(self):
+ # trying to read a Setup file
+ # (sample extracted from the PyGame project)
+ setup = os.path.join(os.path.dirname(__file__), 'Setup.sample')
+
+ exts = read_setup_file(setup)
+ names = [ext.name for ext in exts]
+ names.sort()
+
+ # here are the extensions read_setup_file should have created
+ # out of the file
+ wanted = ['_arraysurfarray', '_camera', '_numericsndarray',
+ '_numericsurfarray', 'base', 'bufferproxy', 'cdrom',
+ 'color', 'constants', 'display', 'draw', 'event',
+ 'fastevent', 'font', 'gfxdraw', 'image', 'imageext',
+ 'joystick', 'key', 'mask', 'mixer', 'mixer_music',
+ 'mouse', 'movie', 'overlay', 'pixelarray', 'pypm',
+ 'rect', 'rwobject', 'scrap', 'surface', 'surflock',
+ 'time', 'transform']
+
+ self.assertEqual(names, wanted)
+
+ def test_extension_init(self):
+ # the first argument, which is the name, must be a string
+ self.assertRaises(AssertionError, Extension, 1, [])
+ ext = Extension('name', [])
+ self.assertEqual(ext.name, 'name')
+
+ # the second argument, which is the list of files, must
+ # be a list of strings
+ self.assertRaises(AssertionError, Extension, 'name', 'file')
+ self.assertRaises(AssertionError, Extension, 'name', ['file', 1])
+ ext = Extension('name', ['file1', 'file2'])
+ self.assertEqual(ext.sources, ['file1', 'file2'])
+
+ # others arguments have defaults
+ for attr in ('include_dirs', 'define_macros', 'undef_macros',
+ 'library_dirs', 'libraries', 'runtime_library_dirs',
+ 'extra_objects', 'extra_compile_args', 'extra_link_args',
+ 'export_symbols', 'swig_opts', 'depends'):
+ self.assertEqual(getattr(ext, attr), [])
+
+ self.assertEqual(ext.language, None)
+ self.assertEqual(ext.optional, None)
+
+ # if there are unknown keyword options, warn about them
+ with check_warnings() as w:
+ warnings.simplefilter('always')
+ ext = Extension('name', ['file1', 'file2'], chic=True)
+
+ self.assertEqual(len(w.warnings), 1)
+ self.assertEqual(str(w.warnings[0].message),
+ "Unknown Extension options: 'chic'")
+
+def test_suite():
+ return unittest.makeSuite(ExtensionTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_file_util.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_file_util.py
new file mode 100644
index 00000000..a4e2d025
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_file_util.py
@@ -0,0 +1,122 @@
+"""Tests for distutils.file_util."""
+import unittest
+import os
+import errno
+from unittest.mock import patch
+
+from distutils.file_util import move_file, copy_file
+from distutils import log
+from distutils.tests import support
+from distutils.errors import DistutilsFileError
+from test.support import run_unittest, unlink
+
+class FileUtilTestCase(support.TempdirManager, unittest.TestCase):
+
+ def _log(self, msg, *args):
+ if len(args) > 0:
+ self._logs.append(msg % args)
+ else:
+ self._logs.append(msg)
+
+ def setUp(self):
+ super(FileUtilTestCase, self).setUp()
+ self._logs = []
+ self.old_log = log.info
+ log.info = self._log
+ tmp_dir = self.mkdtemp()
+ self.source = os.path.join(tmp_dir, 'f1')
+ self.target = os.path.join(tmp_dir, 'f2')
+ self.target_dir = os.path.join(tmp_dir, 'd1')
+
+ def tearDown(self):
+ log.info = self.old_log
+ super(FileUtilTestCase, self).tearDown()
+
+ def test_move_file_verbosity(self):
+ f = open(self.source, 'w')
+ try:
+ f.write('some content')
+ finally:
+ f.close()
+
+ move_file(self.source, self.target, verbose=0)
+ wanted = []
+ self.assertEqual(self._logs, wanted)
+
+ # back to original state
+ move_file(self.target, self.source, verbose=0)
+
+ move_file(self.source, self.target, verbose=1)
+ wanted = ['moving %s -> %s' % (self.source, self.target)]
+ self.assertEqual(self._logs, wanted)
+
+ # back to original state
+ move_file(self.target, self.source, verbose=0)
+
+ self._logs = []
+ # now the target is a dir
+ os.mkdir(self.target_dir)
+ move_file(self.source, self.target_dir, verbose=1)
+ wanted = ['moving %s -> %s' % (self.source, self.target_dir)]
+ self.assertEqual(self._logs, wanted)
+
+ def test_move_file_exception_unpacking_rename(self):
+ # see issue 22182
+ with patch("os.rename", side_effect=OSError("wrong", 1)), \
+ self.assertRaises(DistutilsFileError):
+ with open(self.source, 'w') as fobj:
+ fobj.write('spam eggs')
+ move_file(self.source, self.target, verbose=0)
+
+ def test_move_file_exception_unpacking_unlink(self):
+ # see issue 22182
+ with patch("os.rename", side_effect=OSError(errno.EXDEV, "wrong")), \
+ patch("os.unlink", side_effect=OSError("wrong", 1)), \
+ self.assertRaises(DistutilsFileError):
+ with open(self.source, 'w') as fobj:
+ fobj.write('spam eggs')
+ move_file(self.source, self.target, verbose=0)
+
+ def test_copy_file_hard_link(self):
+ with open(self.source, 'w') as f:
+ f.write('some content')
+ # Check first that copy_file() will not fall back on copying the file
+ # instead of creating the hard link.
+ try:
+ os.link(self.source, self.target)
+ except OSError as e:
+ self.skipTest('os.link: %s' % e)
+ else:
+ unlink(self.target)
+ st = os.stat(self.source)
+ copy_file(self.source, self.target, link='hard')
+ st2 = os.stat(self.source)
+ st3 = os.stat(self.target)
+ self.assertTrue(os.path.samestat(st, st2), (st, st2))
+ self.assertTrue(os.path.samestat(st2, st3), (st2, st3))
+ with open(self.source, 'r') as f:
+ self.assertEqual(f.read(), 'some content')
+
+ def test_copy_file_hard_link_failure(self):
+ # If hard linking fails, copy_file() falls back on copying file
+ # (some special filesystems don't support hard linking even under
+ # Unix, see issue #8876).
+ with open(self.source, 'w') as f:
+ f.write('some content')
+ st = os.stat(self.source)
+ with patch("os.link", side_effect=OSError(0, "linking unsupported")):
+ copy_file(self.source, self.target, link='hard')
+ st2 = os.stat(self.source)
+ st3 = os.stat(self.target)
+ self.assertTrue(os.path.samestat(st, st2), (st, st2))
+ self.assertFalse(os.path.samestat(st2, st3), (st2, st3))
+ for fn in (self.source, self.target):
+ with open(fn, 'r') as f:
+ self.assertEqual(f.read(), 'some content')
+
+
+def test_suite():
+ return unittest.makeSuite(FileUtilTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_filelist.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_filelist.py
new file mode 100644
index 00000000..c71342d0
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_filelist.py
@@ -0,0 +1,340 @@
+"""Tests for distutils.filelist."""
+import os
+import re
+import unittest
+from distutils import debug
+from distutils.log import WARN
+from distutils.errors import DistutilsTemplateError
+from distutils.filelist import glob_to_re, translate_pattern, FileList
+from distutils import filelist
+
+import test.support
+from test.support import captured_stdout, run_unittest
+from distutils.tests import support
+
+MANIFEST_IN = """\
+include ok
+include xo
+exclude xo
+include foo.tmp
+include buildout.cfg
+global-include *.x
+global-include *.txt
+global-exclude *.tmp
+recursive-include f *.oo
+recursive-exclude global *.x
+graft dir
+prune dir3
+"""
+
+
+def make_local_path(s):
+ """Converts '/' in a string to os.sep"""
+ return s.replace('/', os.sep)
+
+
+class FileListTestCase(support.LoggingSilencer,
+ unittest.TestCase):
+
+ def assertNoWarnings(self):
+ self.assertEqual(self.get_logs(WARN), [])
+ self.clear_logs()
+
+ def assertWarnings(self):
+ self.assertGreater(len(self.get_logs(WARN)), 0)
+ self.clear_logs()
+
+ def test_glob_to_re(self):
+ sep = os.sep
+ if os.sep == '\\':
+ sep = re.escape(os.sep)
+
+ for glob, regex in (
+ # simple cases
+ ('foo*', r'(?s:foo[^%(sep)s]*)\Z'),
+ ('foo?', r'(?s:foo[^%(sep)s])\Z'),
+ ('foo??', r'(?s:foo[^%(sep)s][^%(sep)s])\Z'),
+ # special cases
+ (r'foo\\*', r'(?s:foo\\\\[^%(sep)s]*)\Z'),
+ (r'foo\\\*', r'(?s:foo\\\\\\[^%(sep)s]*)\Z'),
+ ('foo????', r'(?s:foo[^%(sep)s][^%(sep)s][^%(sep)s][^%(sep)s])\Z'),
+ (r'foo\\??', r'(?s:foo\\\\[^%(sep)s][^%(sep)s])\Z')):
+ regex = regex % {'sep': sep}
+ self.assertEqual(glob_to_re(glob), regex)
+
+ def test_process_template_line(self):
+ # testing all MANIFEST.in template patterns
+ file_list = FileList()
+ l = make_local_path
+
+ # simulated file list
+ file_list.allfiles = ['foo.tmp', 'ok', 'xo', 'four.txt',
+ 'buildout.cfg',
+ # filelist does not filter out VCS directories,
+ # it's sdist that does
+ l('.hg/last-message.txt'),
+ l('global/one.txt'),
+ l('global/two.txt'),
+ l('global/files.x'),
+ l('global/here.tmp'),
+ l('f/o/f.oo'),
+ l('dir/graft-one'),
+ l('dir/dir2/graft2'),
+ l('dir3/ok'),
+ l('dir3/sub/ok.txt'),
+ ]
+
+ for line in MANIFEST_IN.split('\n'):
+ if line.strip() == '':
+ continue
+ file_list.process_template_line(line)
+
+ wanted = ['ok',
+ 'buildout.cfg',
+ 'four.txt',
+ l('.hg/last-message.txt'),
+ l('global/one.txt'),
+ l('global/two.txt'),
+ l('f/o/f.oo'),
+ l('dir/graft-one'),
+ l('dir/dir2/graft2'),
+ ]
+
+ self.assertEqual(file_list.files, wanted)
+
+ def test_debug_print(self):
+ file_list = FileList()
+ with captured_stdout() as stdout:
+ file_list.debug_print('xxx')
+ self.assertEqual(stdout.getvalue(), '')
+
+ debug.DEBUG = True
+ try:
+ with captured_stdout() as stdout:
+ file_list.debug_print('xxx')
+ self.assertEqual(stdout.getvalue(), 'xxx\n')
+ finally:
+ debug.DEBUG = False
+
+ def test_set_allfiles(self):
+ file_list = FileList()
+ files = ['a', 'b', 'c']
+ file_list.set_allfiles(files)
+ self.assertEqual(file_list.allfiles, files)
+
+ def test_remove_duplicates(self):
+ file_list = FileList()
+ file_list.files = ['a', 'b', 'a', 'g', 'c', 'g']
+ # files must be sorted beforehand (sdist does it)
+ file_list.sort()
+ file_list.remove_duplicates()
+ self.assertEqual(file_list.files, ['a', 'b', 'c', 'g'])
+
+ def test_translate_pattern(self):
+ # not regex
+ self.assertTrue(hasattr(
+ translate_pattern('a', anchor=True, is_regex=False),
+ 'search'))
+
+ # is a regex
+ regex = re.compile('a')
+ self.assertEqual(
+ translate_pattern(regex, anchor=True, is_regex=True),
+ regex)
+
+ # plain string flagged as regex
+ self.assertTrue(hasattr(
+ translate_pattern('a', anchor=True, is_regex=True),
+ 'search'))
+
+ # glob support
+ self.assertTrue(translate_pattern(
+ '*.py', anchor=True, is_regex=False).search('filelist.py'))
+
+ def test_exclude_pattern(self):
+ # return False if no match
+ file_list = FileList()
+ self.assertFalse(file_list.exclude_pattern('*.py'))
+
+ # return True if files match
+ file_list = FileList()
+ file_list.files = ['a.py', 'b.py']
+ self.assertTrue(file_list.exclude_pattern('*.py'))
+
+ # test excludes
+ file_list = FileList()
+ file_list.files = ['a.py', 'a.txt']
+ file_list.exclude_pattern('*.py')
+ self.assertEqual(file_list.files, ['a.txt'])
+
+ def test_include_pattern(self):
+ # return False if no match
+ file_list = FileList()
+ file_list.set_allfiles([])
+ self.assertFalse(file_list.include_pattern('*.py'))
+
+ # return True if files match
+ file_list = FileList()
+ file_list.set_allfiles(['a.py', 'b.txt'])
+ self.assertTrue(file_list.include_pattern('*.py'))
+
+ # test * matches all files
+ file_list = FileList()
+ self.assertIsNone(file_list.allfiles)
+ file_list.set_allfiles(['a.py', 'b.txt'])
+ file_list.include_pattern('*')
+ self.assertEqual(file_list.allfiles, ['a.py', 'b.txt'])
+
+ def test_process_template(self):
+ l = make_local_path
+ # invalid lines
+ file_list = FileList()
+ for action in ('include', 'exclude', 'global-include',
+ 'global-exclude', 'recursive-include',
+ 'recursive-exclude', 'graft', 'prune', 'blarg'):
+ self.assertRaises(DistutilsTemplateError,
+ file_list.process_template_line, action)
+
+ # include
+ file_list = FileList()
+ file_list.set_allfiles(['a.py', 'b.txt', l('d/c.py')])
+
+ file_list.process_template_line('include *.py')
+ self.assertEqual(file_list.files, ['a.py'])
+ self.assertNoWarnings()
+
+ file_list.process_template_line('include *.rb')
+ self.assertEqual(file_list.files, ['a.py'])
+ self.assertWarnings()
+
+ # exclude
+ file_list = FileList()
+ file_list.files = ['a.py', 'b.txt', l('d/c.py')]
+
+ file_list.process_template_line('exclude *.py')
+ self.assertEqual(file_list.files, ['b.txt', l('d/c.py')])
+ self.assertNoWarnings()
+
+ file_list.process_template_line('exclude *.rb')
+ self.assertEqual(file_list.files, ['b.txt', l('d/c.py')])
+ self.assertWarnings()
+
+ # global-include
+ file_list = FileList()
+ file_list.set_allfiles(['a.py', 'b.txt', l('d/c.py')])
+
+ file_list.process_template_line('global-include *.py')
+ self.assertEqual(file_list.files, ['a.py', l('d/c.py')])
+ self.assertNoWarnings()
+
+ file_list.process_template_line('global-include *.rb')
+ self.assertEqual(file_list.files, ['a.py', l('d/c.py')])
+ self.assertWarnings()
+
+ # global-exclude
+ file_list = FileList()
+ file_list.files = ['a.py', 'b.txt', l('d/c.py')]
+
+ file_list.process_template_line('global-exclude *.py')
+ self.assertEqual(file_list.files, ['b.txt'])
+ self.assertNoWarnings()
+
+ file_list.process_template_line('global-exclude *.rb')
+ self.assertEqual(file_list.files, ['b.txt'])
+ self.assertWarnings()
+
+ # recursive-include
+ file_list = FileList()
+ file_list.set_allfiles(['a.py', l('d/b.py'), l('d/c.txt'),
+ l('d/d/e.py')])
+
+ file_list.process_template_line('recursive-include d *.py')
+ self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')])
+ self.assertNoWarnings()
+
+ file_list.process_template_line('recursive-include e *.py')
+ self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')])
+ self.assertWarnings()
+
+ # recursive-exclude
+ file_list = FileList()
+ file_list.files = ['a.py', l('d/b.py'), l('d/c.txt'), l('d/d/e.py')]
+
+ file_list.process_template_line('recursive-exclude d *.py')
+ self.assertEqual(file_list.files, ['a.py', l('d/c.txt')])
+ self.assertNoWarnings()
+
+ file_list.process_template_line('recursive-exclude e *.py')
+ self.assertEqual(file_list.files, ['a.py', l('d/c.txt')])
+ self.assertWarnings()
+
+ # graft
+ file_list = FileList()
+ file_list.set_allfiles(['a.py', l('d/b.py'), l('d/d/e.py'),
+ l('f/f.py')])
+
+ file_list.process_template_line('graft d')
+ self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')])
+ self.assertNoWarnings()
+
+ file_list.process_template_line('graft e')
+ self.assertEqual(file_list.files, [l('d/b.py'), l('d/d/e.py')])
+ self.assertWarnings()
+
+ # prune
+ file_list = FileList()
+ file_list.files = ['a.py', l('d/b.py'), l('d/d/e.py'), l('f/f.py')]
+
+ file_list.process_template_line('prune d')
+ self.assertEqual(file_list.files, ['a.py', l('f/f.py')])
+ self.assertNoWarnings()
+
+ file_list.process_template_line('prune e')
+ self.assertEqual(file_list.files, ['a.py', l('f/f.py')])
+ self.assertWarnings()
+
+
+class FindAllTestCase(unittest.TestCase):
+ @test.support.skip_unless_symlink
+ def test_missing_symlink(self):
+ with test.support.temp_cwd():
+ os.symlink('foo', 'bar')
+ self.assertEqual(filelist.findall(), [])
+
+ def test_basic_discovery(self):
+ """
+ When findall is called with no parameters or with
+ '.' as the parameter, the dot should be omitted from
+ the results.
+ """
+ with test.support.temp_cwd():
+ os.mkdir('foo')
+ file1 = os.path.join('foo', 'file1.txt')
+ test.support.create_empty_file(file1)
+ os.mkdir('bar')
+ file2 = os.path.join('bar', 'file2.txt')
+ test.support.create_empty_file(file2)
+ expected = [file2, file1]
+ self.assertEqual(sorted(filelist.findall()), expected)
+
+ def test_non_local_discovery(self):
+ """
+ When findall is called with another path, the full
+ path name should be returned.
+ """
+ with test.support.temp_dir() as temp_dir:
+ file1 = os.path.join(temp_dir, 'file1.txt')
+ test.support.create_empty_file(file1)
+ expected = [file1]
+ self.assertEqual(filelist.findall(temp_dir), expected)
+
+
+def test_suite():
+ return unittest.TestSuite([
+ unittest.makeSuite(FileListTestCase),
+ unittest.makeSuite(FindAllTestCase),
+ ])
+
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install.py
new file mode 100644
index 00000000..287ab198
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install.py
@@ -0,0 +1,248 @@
+"""Tests for distutils.command.install."""
+
+import os
+import sys
+import unittest
+import site
+
+from test.support import captured_stdout, run_unittest
+
+from distutils import sysconfig
+from distutils.command.install import install
+from distutils.command import install as install_module
+from distutils.command.build_ext import build_ext
+from distutils.command.install import INSTALL_SCHEMES
+from distutils.core import Distribution
+from distutils.errors import DistutilsOptionError
+from distutils.extension import Extension
+
+from distutils.tests import support
+from test import support as test_support
+
+
+def _make_ext_name(modname):
+ return modname + sysconfig.get_config_var('EXT_SUFFIX')
+
+
+class InstallTestCase(support.TempdirManager,
+ support.EnvironGuard,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def test_home_installation_scheme(self):
+ # This ensure two things:
+ # - that --home generates the desired set of directory names
+ # - test --home is supported on all platforms
+ builddir = self.mkdtemp()
+ destination = os.path.join(builddir, "installation")
+
+ dist = Distribution({"name": "foopkg"})
+ # script_name need not exist, it just need to be initialized
+ dist.script_name = os.path.join(builddir, "setup.py")
+ dist.command_obj["build"] = support.DummyCommand(
+ build_base=builddir,
+ build_lib=os.path.join(builddir, "lib"),
+ )
+
+ cmd = install(dist)
+ cmd.home = destination
+ cmd.ensure_finalized()
+
+ self.assertEqual(cmd.install_base, destination)
+ self.assertEqual(cmd.install_platbase, destination)
+
+ def check_path(got, expected):
+ got = os.path.normpath(got)
+ expected = os.path.normpath(expected)
+ self.assertEqual(got, expected)
+
+ libdir = os.path.join(destination, "lib", "python")
+ check_path(cmd.install_lib, libdir)
+ check_path(cmd.install_platlib, libdir)
+ check_path(cmd.install_purelib, libdir)
+ check_path(cmd.install_headers,
+ os.path.join(destination, "include", "python", "foopkg"))
+ check_path(cmd.install_scripts, os.path.join(destination, "bin"))
+ check_path(cmd.install_data, destination)
+
+ def test_user_site(self):
+ # test install with --user
+ # preparing the environment for the test
+ self.old_user_base = site.USER_BASE
+ self.old_user_site = site.USER_SITE
+ self.tmpdir = self.mkdtemp()
+ self.user_base = os.path.join(self.tmpdir, 'B')
+ self.user_site = os.path.join(self.tmpdir, 'S')
+ site.USER_BASE = self.user_base
+ site.USER_SITE = self.user_site
+ install_module.USER_BASE = self.user_base
+ install_module.USER_SITE = self.user_site
+
+ def _expanduser(path):
+ return self.tmpdir
+ self.old_expand = os.path.expanduser
+ os.path.expanduser = _expanduser
+
+ def cleanup():
+ site.USER_BASE = self.old_user_base
+ site.USER_SITE = self.old_user_site
+ install_module.USER_BASE = self.old_user_base
+ install_module.USER_SITE = self.old_user_site
+ os.path.expanduser = self.old_expand
+
+ self.addCleanup(cleanup)
+
+ for key in ('nt_user', 'unix_user'):
+ self.assertIn(key, INSTALL_SCHEMES)
+
+ dist = Distribution({'name': 'xx'})
+ cmd = install(dist)
+
+ # making sure the user option is there
+ options = [name for name, short, lable in
+ cmd.user_options]
+ self.assertIn('user', options)
+
+ # setting a value
+ cmd.user = 1
+
+ # user base and site shouldn't be created yet
+ self.assertFalse(os.path.exists(self.user_base))
+ self.assertFalse(os.path.exists(self.user_site))
+
+ # let's run finalize
+ cmd.ensure_finalized()
+
+ # now they should
+ self.assertTrue(os.path.exists(self.user_base))
+ self.assertTrue(os.path.exists(self.user_site))
+
+ self.assertIn('userbase', cmd.config_vars)
+ self.assertIn('usersite', cmd.config_vars)
+
+ def test_handle_extra_path(self):
+ dist = Distribution({'name': 'xx', 'extra_path': 'path,dirs'})
+ cmd = install(dist)
+
+ # two elements
+ cmd.handle_extra_path()
+ self.assertEqual(cmd.extra_path, ['path', 'dirs'])
+ self.assertEqual(cmd.extra_dirs, 'dirs')
+ self.assertEqual(cmd.path_file, 'path')
+
+ # one element
+ cmd.extra_path = ['path']
+ cmd.handle_extra_path()
+ self.assertEqual(cmd.extra_path, ['path'])
+ self.assertEqual(cmd.extra_dirs, 'path')
+ self.assertEqual(cmd.path_file, 'path')
+
+ # none
+ dist.extra_path = cmd.extra_path = None
+ cmd.handle_extra_path()
+ self.assertEqual(cmd.extra_path, None)
+ self.assertEqual(cmd.extra_dirs, '')
+ self.assertEqual(cmd.path_file, None)
+
+ # three elements (no way !)
+ cmd.extra_path = 'path,dirs,again'
+ self.assertRaises(DistutilsOptionError, cmd.handle_extra_path)
+
+ def test_finalize_options(self):
+ dist = Distribution({'name': 'xx'})
+ cmd = install(dist)
+
+ # must supply either prefix/exec-prefix/home or
+ # install-base/install-platbase -- not both
+ cmd.prefix = 'prefix'
+ cmd.install_base = 'base'
+ self.assertRaises(DistutilsOptionError, cmd.finalize_options)
+
+ # must supply either home or prefix/exec-prefix -- not both
+ cmd.install_base = None
+ cmd.home = 'home'
+ self.assertRaises(DistutilsOptionError, cmd.finalize_options)
+
+ # can't combine user with prefix/exec_prefix/home or
+ # install_(plat)base
+ cmd.prefix = None
+ cmd.user = 'user'
+ self.assertRaises(DistutilsOptionError, cmd.finalize_options)
+
+ def test_record(self):
+ install_dir = self.mkdtemp()
+ project_dir, dist = self.create_dist(py_modules=['hello'],
+ scripts=['sayhi'])
+ os.chdir(project_dir)
+ self.write_file('hello.py', "def main(): print('o hai')")
+ self.write_file('sayhi', 'from hello import main; main()')
+
+ cmd = install(dist)
+ dist.command_obj['install'] = cmd
+ cmd.root = install_dir
+ cmd.record = os.path.join(project_dir, 'filelist')
+ cmd.ensure_finalized()
+ cmd.run()
+
+ f = open(cmd.record)
+ try:
+ content = f.read()
+ finally:
+ f.close()
+
+ found = [os.path.basename(line) for line in content.splitlines()]
+ expected = ['hello.py', 'hello.%s.pyc' % sys.implementation.cache_tag,
+ 'sayhi',
+ 'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
+ self.assertEqual(found, expected)
+
+ def test_record_extensions(self):
+ cmd = test_support.missing_compiler_executable()
+ if cmd is not None:
+ self.skipTest('The %r command is not found' % cmd)
+ install_dir = self.mkdtemp()
+ project_dir, dist = self.create_dist(ext_modules=[
+ Extension('xx', ['xxmodule.c'])])
+ os.chdir(project_dir)
+ support.copy_xxmodule_c(project_dir)
+
+ buildextcmd = build_ext(dist)
+ support.fixup_build_ext(buildextcmd)
+ buildextcmd.ensure_finalized()
+
+ cmd = install(dist)
+ dist.command_obj['install'] = cmd
+ dist.command_obj['build_ext'] = buildextcmd
+ cmd.root = install_dir
+ cmd.record = os.path.join(project_dir, 'filelist')
+ cmd.ensure_finalized()
+ cmd.run()
+
+ f = open(cmd.record)
+ try:
+ content = f.read()
+ finally:
+ f.close()
+
+ found = [os.path.basename(line) for line in content.splitlines()]
+ expected = [_make_ext_name('xx'),
+ 'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
+ self.assertEqual(found, expected)
+
+ def test_debug_mode(self):
+ # this covers the code called when DEBUG is set
+ old_logs_len = len(self.logs)
+ install_module.DEBUG = True
+ try:
+ with captured_stdout():
+ self.test_record()
+ finally:
+ install_module.DEBUG = False
+ self.assertGreater(len(self.logs), old_logs_len)
+
+
+def test_suite():
+ return unittest.makeSuite(InstallTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_data.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_data.py
new file mode 100644
index 00000000..32ab296a
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_data.py
@@ -0,0 +1,75 @@
+"""Tests for distutils.command.install_data."""
+import os
+import unittest
+
+from distutils.command.install_data import install_data
+from distutils.tests import support
+from test.support import run_unittest
+
+class InstallDataTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ support.EnvironGuard,
+ unittest.TestCase):
+
+ def test_simple_run(self):
+ pkg_dir, dist = self.create_dist()
+ cmd = install_data(dist)
+ cmd.install_dir = inst = os.path.join(pkg_dir, 'inst')
+
+ # data_files can contain
+ # - simple files
+ # - a tuple with a path, and a list of file
+ one = os.path.join(pkg_dir, 'one')
+ self.write_file(one, 'xxx')
+ inst2 = os.path.join(pkg_dir, 'inst2')
+ two = os.path.join(pkg_dir, 'two')
+ self.write_file(two, 'xxx')
+
+ cmd.data_files = [one, (inst2, [two])]
+ self.assertEqual(cmd.get_inputs(), [one, (inst2, [two])])
+
+ # let's run the command
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # let's check the result
+ self.assertEqual(len(cmd.get_outputs()), 2)
+ rtwo = os.path.split(two)[-1]
+ self.assertTrue(os.path.exists(os.path.join(inst2, rtwo)))
+ rone = os.path.split(one)[-1]
+ self.assertTrue(os.path.exists(os.path.join(inst, rone)))
+ cmd.outfiles = []
+
+ # let's try with warn_dir one
+ cmd.warn_dir = 1
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # let's check the result
+ self.assertEqual(len(cmd.get_outputs()), 2)
+ self.assertTrue(os.path.exists(os.path.join(inst2, rtwo)))
+ self.assertTrue(os.path.exists(os.path.join(inst, rone)))
+ cmd.outfiles = []
+
+ # now using root and empty dir
+ cmd.root = os.path.join(pkg_dir, 'root')
+ inst3 = os.path.join(cmd.install_dir, 'inst3')
+ inst4 = os.path.join(pkg_dir, 'inst4')
+ three = os.path.join(cmd.install_dir, 'three')
+ self.write_file(three, 'xx')
+ cmd.data_files = [one, (inst2, [two]),
+ ('inst3', [three]),
+ (inst4, [])]
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # let's check the result
+ self.assertEqual(len(cmd.get_outputs()), 4)
+ self.assertTrue(os.path.exists(os.path.join(inst2, rtwo)))
+ self.assertTrue(os.path.exists(os.path.join(inst, rone)))
+
+def test_suite():
+ return unittest.makeSuite(InstallDataTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_headers.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_headers.py
new file mode 100644
index 00000000..2217b321
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_headers.py
@@ -0,0 +1,39 @@
+"""Tests for distutils.command.install_headers."""
+import os
+import unittest
+
+from distutils.command.install_headers import install_headers
+from distutils.tests import support
+from test.support import run_unittest
+
+class InstallHeadersTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ support.EnvironGuard,
+ unittest.TestCase):
+
+ def test_simple_run(self):
+ # we have two headers
+ header_list = self.mkdtemp()
+ header1 = os.path.join(header_list, 'header1')
+ header2 = os.path.join(header_list, 'header2')
+ self.write_file(header1)
+ self.write_file(header2)
+ headers = [header1, header2]
+
+ pkg_dir, dist = self.create_dist(headers=headers)
+ cmd = install_headers(dist)
+ self.assertEqual(cmd.get_inputs(), headers)
+
+ # let's run the command
+ cmd.install_dir = os.path.join(pkg_dir, 'inst')
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # let's check the results
+ self.assertEqual(len(cmd.get_outputs()), 2)
+
+def test_suite():
+ return unittest.makeSuite(InstallHeadersTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_lib.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_lib.py
new file mode 100644
index 00000000..fda6315b
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_lib.py
@@ -0,0 +1,115 @@
+"""Tests for distutils.command.install_data."""
+import sys
+import os
+import importlib.util
+import unittest
+
+from distutils.command.install_lib import install_lib
+from distutils.extension import Extension
+from distutils.tests import support
+from distutils.errors import DistutilsOptionError
+from test.support import run_unittest
+
+
+class InstallLibTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ support.EnvironGuard,
+ unittest.TestCase):
+
+ def test_finalize_options(self):
+ dist = self.create_dist()[1]
+ cmd = install_lib(dist)
+
+ cmd.finalize_options()
+ self.assertEqual(cmd.compile, 1)
+ self.assertEqual(cmd.optimize, 0)
+
+ # optimize must be 0, 1, or 2
+ cmd.optimize = 'foo'
+ self.assertRaises(DistutilsOptionError, cmd.finalize_options)
+ cmd.optimize = '4'
+ self.assertRaises(DistutilsOptionError, cmd.finalize_options)
+
+ cmd.optimize = '2'
+ cmd.finalize_options()
+ self.assertEqual(cmd.optimize, 2)
+
+ @unittest.skipIf(sys.dont_write_bytecode, 'byte-compile disabled')
+ def test_byte_compile(self):
+ project_dir, dist = self.create_dist()
+ os.chdir(project_dir)
+ cmd = install_lib(dist)
+ cmd.compile = cmd.optimize = 1
+
+ f = os.path.join(project_dir, 'foo.py')
+ self.write_file(f, '# python file')
+ cmd.byte_compile([f])
+ pyc_file = importlib.util.cache_from_source('foo.py', optimization='')
+ pyc_opt_file = importlib.util.cache_from_source('foo.py',
+ optimization=cmd.optimize)
+ self.assertTrue(os.path.exists(pyc_file))
+ self.assertTrue(os.path.exists(pyc_opt_file))
+
+ def test_get_outputs(self):
+ project_dir, dist = self.create_dist()
+ os.chdir(project_dir)
+ os.mkdir('spam')
+ cmd = install_lib(dist)
+
+ # setting up a dist environment
+ cmd.compile = cmd.optimize = 1
+ cmd.install_dir = self.mkdtemp()
+ f = os.path.join(project_dir, 'spam', '__init__.py')
+ self.write_file(f, '# python package')
+ cmd.distribution.ext_modules = [Extension('foo', ['xxx'])]
+ cmd.distribution.packages = ['spam']
+ cmd.distribution.script_name = 'setup.py'
+
+ # get_outputs should return 4 elements: spam/__init__.py and .pyc,
+ # foo.import-tag-abiflags.so / foo.pyd
+ outputs = cmd.get_outputs()
+ self.assertEqual(len(outputs), 4, outputs)
+
+ def test_get_inputs(self):
+ project_dir, dist = self.create_dist()
+ os.chdir(project_dir)
+ os.mkdir('spam')
+ cmd = install_lib(dist)
+
+ # setting up a dist environment
+ cmd.compile = cmd.optimize = 1
+ cmd.install_dir = self.mkdtemp()
+ f = os.path.join(project_dir, 'spam', '__init__.py')
+ self.write_file(f, '# python package')
+ cmd.distribution.ext_modules = [Extension('foo', ['xxx'])]
+ cmd.distribution.packages = ['spam']
+ cmd.distribution.script_name = 'setup.py'
+
+ # get_inputs should return 2 elements: spam/__init__.py and
+ # foo.import-tag-abiflags.so / foo.pyd
+ inputs = cmd.get_inputs()
+ self.assertEqual(len(inputs), 2, inputs)
+
+ def test_dont_write_bytecode(self):
+ # makes sure byte_compile is not used
+ dist = self.create_dist()[1]
+ cmd = install_lib(dist)
+ cmd.compile = 1
+ cmd.optimize = 1
+
+ old_dont_write_bytecode = sys.dont_write_bytecode
+ sys.dont_write_bytecode = True
+ try:
+ cmd.byte_compile([])
+ finally:
+ sys.dont_write_bytecode = old_dont_write_bytecode
+
+ self.assertIn('byte-compiling is disabled',
+ self.logs[0][1] % self.logs[0][2])
+
+
+def test_suite():
+ return unittest.makeSuite(InstallLibTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_scripts.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_scripts.py
new file mode 100644
index 00000000..1f7b1038
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_install_scripts.py
@@ -0,0 +1,82 @@
+"""Tests for distutils.command.install_scripts."""
+
+import os
+import unittest
+
+from distutils.command.install_scripts import install_scripts
+from distutils.core import Distribution
+
+from distutils.tests import support
+from test.support import run_unittest
+
+
+class InstallScriptsTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def test_default_settings(self):
+ dist = Distribution()
+ dist.command_obj["build"] = support.DummyCommand(
+ build_scripts="/foo/bar")
+ dist.command_obj["install"] = support.DummyCommand(
+ install_scripts="/splat/funk",
+ force=1,
+ skip_build=1,
+ )
+ cmd = install_scripts(dist)
+ self.assertFalse(cmd.force)
+ self.assertFalse(cmd.skip_build)
+ self.assertIsNone(cmd.build_dir)
+ self.assertIsNone(cmd.install_dir)
+
+ cmd.finalize_options()
+
+ self.assertTrue(cmd.force)
+ self.assertTrue(cmd.skip_build)
+ self.assertEqual(cmd.build_dir, "/foo/bar")
+ self.assertEqual(cmd.install_dir, "/splat/funk")
+
+ def test_installation(self):
+ source = self.mkdtemp()
+ expected = []
+
+ def write_script(name, text):
+ expected.append(name)
+ f = open(os.path.join(source, name), "w")
+ try:
+ f.write(text)
+ finally:
+ f.close()
+
+ write_script("script1.py", ("#! /usr/bin/env python2.3\n"
+ "# bogus script w/ Python sh-bang\n"
+ "pass\n"))
+ write_script("script2.py", ("#!/usr/bin/python\n"
+ "# bogus script w/ Python sh-bang\n"
+ "pass\n"))
+ write_script("shell.sh", ("#!/bin/sh\n"
+ "# bogus shell script w/ sh-bang\n"
+ "exit 0\n"))
+
+ target = self.mkdtemp()
+ dist = Distribution()
+ dist.command_obj["build"] = support.DummyCommand(build_scripts=source)
+ dist.command_obj["install"] = support.DummyCommand(
+ install_scripts=target,
+ force=1,
+ skip_build=1,
+ )
+ cmd = install_scripts(dist)
+ cmd.finalize_options()
+ cmd.run()
+
+ installed = os.listdir(target)
+ for name in expected:
+ self.assertIn(name, installed)
+
+
+def test_suite():
+ return unittest.makeSuite(InstallScriptsTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_log.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_log.py
new file mode 100644
index 00000000..75cf9006
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_log.py
@@ -0,0 +1,46 @@
+"""Tests for distutils.log"""
+
+import io
+import sys
+import unittest
+from test.support import swap_attr, run_unittest
+
+from distutils import log
+
+class TestLog(unittest.TestCase):
+ def test_non_ascii(self):
+ # Issues #8663, #34421: test that non-encodable text is escaped with
+ # backslashreplace error handler and encodable non-ASCII text is
+ # output as is.
+ for errors in ('strict', 'backslashreplace', 'surrogateescape',
+ 'replace', 'ignore'):
+ with self.subTest(errors=errors):
+ stdout = io.TextIOWrapper(io.BytesIO(),
+ encoding='cp437', errors=errors)
+ stderr = io.TextIOWrapper(io.BytesIO(),
+ encoding='cp437', errors=errors)
+ old_threshold = log.set_threshold(log.DEBUG)
+ try:
+ with swap_attr(sys, 'stdout', stdout), \
+ swap_attr(sys, 'stderr', stderr):
+ log.debug('Dεbug\tMėssãge')
+ log.fatal('Fαtal\tÈrrÅr')
+ finally:
+ log.set_threshold(old_threshold)
+
+ stdout.seek(0)
+ self.assertEqual(stdout.read().rstrip(),
+ 'Dεbug\tM?ss?ge' if errors == 'replace' else
+ 'Dεbug\tMssge' if errors == 'ignore' else
+ 'Dεbug\tM\\u0117ss\\xe3ge')
+ stderr.seek(0)
+ self.assertEqual(stderr.read().rstrip(),
+ 'Fαtal\t?rr?r' if errors == 'replace' else
+ 'Fαtal\trrr' if errors == 'ignore' else
+ 'Fαtal\t\\xc8rr\\u014dr')
+
+def test_suite():
+ return unittest.makeSuite(TestLog)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_msvc9compiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_msvc9compiler.py
new file mode 100644
index 00000000..77a07ef3
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_msvc9compiler.py
@@ -0,0 +1,184 @@
+"""Tests for distutils.msvc9compiler."""
+import sys
+import unittest
+import os
+
+from distutils.errors import DistutilsPlatformError
+from distutils.tests import support
+from test.support import run_unittest
+
+# A manifest with the only assembly reference being the msvcrt assembly, so
+# should have the assembly completely stripped. Note that although the
+# assembly has a reference the assembly is removed - that is
+# currently a "feature", not a bug :)
+_MANIFEST_WITH_ONLY_MSVC_REFERENCE = """\
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+"""
+
+# A manifest with references to assemblies other than msvcrt. When processed,
+# this assembly should be returned with just the msvcrt part removed.
+_MANIFEST_WITH_MULTIPLE_REFERENCES = """\
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+"""
+
+_CLEANED_MANIFEST = """\
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ """
+
+if sys.platform=="win32":
+ from distutils.msvccompiler import get_build_version
+ if get_build_version()>=8.0:
+ SKIP_MESSAGE = None
+ else:
+ SKIP_MESSAGE = "These tests are only for MSVC8.0 or above"
+else:
+ SKIP_MESSAGE = "These tests are only for win32"
+
+@unittest.skipUnless(SKIP_MESSAGE is None, SKIP_MESSAGE)
+class msvc9compilerTestCase(support.TempdirManager,
+ unittest.TestCase):
+
+ def test_no_compiler(self):
+ # makes sure query_vcvarsall raises
+ # a DistutilsPlatformError if the compiler
+ # is not found
+ from distutils.msvc9compiler import query_vcvarsall
+ def _find_vcvarsall(version):
+ return None
+
+ from distutils import msvc9compiler
+ old_find_vcvarsall = msvc9compiler.find_vcvarsall
+ msvc9compiler.find_vcvarsall = _find_vcvarsall
+ try:
+ self.assertRaises(DistutilsPlatformError, query_vcvarsall,
+ 'wont find this version')
+ finally:
+ msvc9compiler.find_vcvarsall = old_find_vcvarsall
+
+ def test_reg_class(self):
+ from distutils.msvc9compiler import Reg
+ self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx')
+
+ # looking for values that should exist on all
+ # windows registry versions.
+ path = r'Control Panel\Desktop'
+ v = Reg.get_value(path, 'dragfullwindows')
+ self.assertIn(v, ('0', '1', '2'))
+
+ import winreg
+ HKCU = winreg.HKEY_CURRENT_USER
+ keys = Reg.read_keys(HKCU, 'xxxx')
+ self.assertEqual(keys, None)
+
+ keys = Reg.read_keys(HKCU, r'Control Panel')
+ self.assertIn('Desktop', keys)
+
+ def test_remove_visual_c_ref(self):
+ from distutils.msvc9compiler import MSVCCompiler
+ tempdir = self.mkdtemp()
+ manifest = os.path.join(tempdir, 'manifest')
+ f = open(manifest, 'w')
+ try:
+ f.write(_MANIFEST_WITH_MULTIPLE_REFERENCES)
+ finally:
+ f.close()
+
+ compiler = MSVCCompiler()
+ compiler._remove_visual_c_ref(manifest)
+
+ # see what we got
+ f = open(manifest)
+ try:
+ # removing trailing spaces
+ content = '\n'.join([line.rstrip() for line in f.readlines()])
+ finally:
+ f.close()
+
+ # makes sure the manifest was properly cleaned
+ self.assertEqual(content, _CLEANED_MANIFEST)
+
+ def test_remove_entire_manifest(self):
+ from distutils.msvc9compiler import MSVCCompiler
+ tempdir = self.mkdtemp()
+ manifest = os.path.join(tempdir, 'manifest')
+ f = open(manifest, 'w')
+ try:
+ f.write(_MANIFEST_WITH_ONLY_MSVC_REFERENCE)
+ finally:
+ f.close()
+
+ compiler = MSVCCompiler()
+ got = compiler._remove_visual_c_ref(manifest)
+ self.assertIsNone(got)
+
+
+def test_suite():
+ return unittest.makeSuite(msvc9compilerTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_msvccompiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_msvccompiler.py
new file mode 100644
index 00000000..70a9c93a
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_msvccompiler.py
@@ -0,0 +1,132 @@
+"""Tests for distutils._msvccompiler."""
+import sys
+import unittest
+import os
+
+from distutils.errors import DistutilsPlatformError
+from distutils.tests import support
+from test.support import run_unittest
+
+
+SKIP_MESSAGE = (None if sys.platform == "win32" else
+ "These tests are only for win32")
+
+@unittest.skipUnless(SKIP_MESSAGE is None, SKIP_MESSAGE)
+class msvccompilerTestCase(support.TempdirManager,
+ unittest.TestCase):
+
+ def test_no_compiler(self):
+ import distutils._msvccompiler as _msvccompiler
+ # makes sure query_vcvarsall raises
+ # a DistutilsPlatformError if the compiler
+ # is not found
+ def _find_vcvarsall(plat_spec):
+ return None, None
+
+ old_find_vcvarsall = _msvccompiler._find_vcvarsall
+ _msvccompiler._find_vcvarsall = _find_vcvarsall
+ try:
+ self.assertRaises(DistutilsPlatformError,
+ _msvccompiler._get_vc_env,
+ 'wont find this version')
+ finally:
+ _msvccompiler._find_vcvarsall = old_find_vcvarsall
+
+ def test_compiler_options(self):
+ import distutils._msvccompiler as _msvccompiler
+ # suppress path to vcruntime from _find_vcvarsall to
+ # check that /MT is added to compile options
+ old_find_vcvarsall = _msvccompiler._find_vcvarsall
+ def _find_vcvarsall(plat_spec):
+ return old_find_vcvarsall(plat_spec)[0], None
+ _msvccompiler._find_vcvarsall = _find_vcvarsall
+ try:
+ compiler = _msvccompiler.MSVCCompiler()
+ compiler.initialize()
+
+ self.assertIn('/MT', compiler.compile_options)
+ self.assertNotIn('/MD', compiler.compile_options)
+ finally:
+ _msvccompiler._find_vcvarsall = old_find_vcvarsall
+
+ def test_vcruntime_copy(self):
+ import distutils._msvccompiler as _msvccompiler
+ # force path to a known file - it doesn't matter
+ # what we copy as long as its name is not in
+ # _msvccompiler._BUNDLED_DLLS
+ old_find_vcvarsall = _msvccompiler._find_vcvarsall
+ def _find_vcvarsall(plat_spec):
+ return old_find_vcvarsall(plat_spec)[0], __file__
+ _msvccompiler._find_vcvarsall = _find_vcvarsall
+ try:
+ tempdir = self.mkdtemp()
+ compiler = _msvccompiler.MSVCCompiler()
+ compiler.initialize()
+ compiler._copy_vcruntime(tempdir)
+
+ self.assertTrue(os.path.isfile(os.path.join(
+ tempdir, os.path.basename(__file__))))
+ finally:
+ _msvccompiler._find_vcvarsall = old_find_vcvarsall
+
+ def test_vcruntime_skip_copy(self):
+ import distutils._msvccompiler as _msvccompiler
+
+ tempdir = self.mkdtemp()
+ compiler = _msvccompiler.MSVCCompiler()
+ compiler.initialize()
+ dll = compiler._vcruntime_redist
+ self.assertTrue(os.path.isfile(dll), dll or "")
+
+ compiler._copy_vcruntime(tempdir)
+
+ self.assertFalse(os.path.isfile(os.path.join(
+ tempdir, os.path.basename(dll))), dll or "")
+
+ def test_get_vc_env_unicode(self):
+ import distutils._msvccompiler as _msvccompiler
+
+ test_var = 'ṰḖṤṪ┅ṼẨṜ'
+ test_value = '₃â´â‚…'
+
+ # Ensure we don't early exit from _get_vc_env
+ old_distutils_use_sdk = os.environ.pop('DISTUTILS_USE_SDK', None)
+ os.environ[test_var] = test_value
+ try:
+ env = _msvccompiler._get_vc_env('x86')
+ self.assertIn(test_var.lower(), env)
+ self.assertEqual(test_value, env[test_var.lower()])
+ finally:
+ os.environ.pop(test_var)
+ if old_distutils_use_sdk:
+ os.environ['DISTUTILS_USE_SDK'] = old_distutils_use_sdk
+
+ def test_get_vc2017(self):
+ import distutils._msvccompiler as _msvccompiler
+
+ # This function cannot be mocked, so pass it if we find VS 2017
+ # and mark it skipped if we do not.
+ version, path = _msvccompiler._find_vc2017()
+ if version:
+ self.assertGreaterEqual(version, 15)
+ self.assertTrue(os.path.isdir(path))
+ else:
+ raise unittest.SkipTest("VS 2017 is not installed")
+
+ def test_get_vc2015(self):
+ import distutils._msvccompiler as _msvccompiler
+
+ # This function cannot be mocked, so pass it if we find VS 2015
+ # and mark it skipped if we do not.
+ version, path = _msvccompiler._find_vc2015()
+ if version:
+ self.assertGreaterEqual(version, 14)
+ self.assertTrue(os.path.isdir(path))
+ else:
+ raise unittest.SkipTest("VS 2015 is not installed")
+
+def test_suite():
+ return unittest.makeSuite(msvccompilerTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_register.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_register.py
new file mode 100644
index 00000000..e68b0af3
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_register.py
@@ -0,0 +1,323 @@
+"""Tests for distutils.command.register."""
+import os
+import unittest
+import getpass
+import urllib
+import warnings
+
+from test.support import check_warnings, run_unittest
+
+from distutils.command import register as register_module
+from distutils.command.register import register
+from distutils.errors import DistutilsSetupError
+from distutils.log import INFO
+
+from distutils.tests.test_config import BasePyPIRCCommandTestCase
+
+try:
+ import docutils
+except ImportError:
+ docutils = None
+
+PYPIRC_NOPASSWORD = """\
+[distutils]
+
+index-servers =
+ server1
+
+[server1]
+username:me
+"""
+
+WANTED_PYPIRC = """\
+[distutils]
+index-servers =
+ pypi
+
+[pypi]
+username:tarek
+password:password
+"""
+
+class Inputs(object):
+ """Fakes user inputs."""
+ def __init__(self, *answers):
+ self.answers = answers
+ self.index = 0
+
+ def __call__(self, prompt=''):
+ try:
+ return self.answers[self.index]
+ finally:
+ self.index += 1
+
+class FakeOpener(object):
+ """Fakes a PyPI server"""
+ def __init__(self):
+ self.reqs = []
+
+ def __call__(self, *args):
+ return self
+
+ def open(self, req, data=None, timeout=None):
+ self.reqs.append(req)
+ return self
+
+ def read(self):
+ return b'xxx'
+
+ def getheader(self, name, default=None):
+ return {
+ 'content-type': 'text/plain; charset=utf-8',
+ }.get(name.lower(), default)
+
+
+class RegisterTestCase(BasePyPIRCCommandTestCase):
+
+ def setUp(self):
+ super(RegisterTestCase, self).setUp()
+ # patching the password prompt
+ self._old_getpass = getpass.getpass
+ def _getpass(prompt):
+ return 'password'
+ getpass.getpass = _getpass
+ urllib.request._opener = None
+ self.old_opener = urllib.request.build_opener
+ self.conn = urllib.request.build_opener = FakeOpener()
+
+ def tearDown(self):
+ getpass.getpass = self._old_getpass
+ urllib.request._opener = None
+ urllib.request.build_opener = self.old_opener
+ super(RegisterTestCase, self).tearDown()
+
+ def _get_cmd(self, metadata=None):
+ if metadata is None:
+ metadata = {'url': 'xxx', 'author': 'xxx',
+ 'author_email': 'xxx',
+ 'name': 'xxx', 'version': 'xxx'}
+ pkg_info, dist = self.create_dist(**metadata)
+ return register(dist)
+
+ def test_create_pypirc(self):
+ # this test makes sure a .pypirc file
+ # is created when requested.
+
+ # let's create a register instance
+ cmd = self._get_cmd()
+
+ # we shouldn't have a .pypirc file yet
+ self.assertFalse(os.path.exists(self.rc))
+
+ # patching input and getpass.getpass
+ # so register gets happy
+ #
+ # Here's what we are faking :
+ # use your existing login (choice 1.)
+ # Username : 'tarek'
+ # Password : 'password'
+ # Save your login (y/N)? : 'y'
+ inputs = Inputs('1', 'tarek', 'y')
+ register_module.input = inputs.__call__
+ # let's run the command
+ try:
+ cmd.run()
+ finally:
+ del register_module.input
+
+ # we should have a brand new .pypirc file
+ self.assertTrue(os.path.exists(self.rc))
+
+ # with the content similar to WANTED_PYPIRC
+ f = open(self.rc)
+ try:
+ content = f.read()
+ self.assertEqual(content, WANTED_PYPIRC)
+ finally:
+ f.close()
+
+ # now let's make sure the .pypirc file generated
+ # really works : we shouldn't be asked anything
+ # if we run the command again
+ def _no_way(prompt=''):
+ raise AssertionError(prompt)
+ register_module.input = _no_way
+
+ cmd.show_response = 1
+ cmd.run()
+
+ # let's see what the server received : we should
+ # have 2 similar requests
+ self.assertEqual(len(self.conn.reqs), 2)
+ req1 = dict(self.conn.reqs[0].headers)
+ req2 = dict(self.conn.reqs[1].headers)
+
+ self.assertEqual(req1['Content-length'], '1374')
+ self.assertEqual(req2['Content-length'], '1374')
+ self.assertIn(b'xxx', self.conn.reqs[1].data)
+
+ def test_password_not_in_file(self):
+
+ self.write_file(self.rc, PYPIRC_NOPASSWORD)
+ cmd = self._get_cmd()
+ cmd._set_config()
+ cmd.finalize_options()
+ cmd.send_metadata()
+
+ # dist.password should be set
+ # therefore used afterwards by other commands
+ self.assertEqual(cmd.distribution.password, 'password')
+
+ def test_registering(self):
+ # this test runs choice 2
+ cmd = self._get_cmd()
+ inputs = Inputs('2', 'tarek', 'tarek@ziade.org')
+ register_module.input = inputs.__call__
+ try:
+ # let's run the command
+ cmd.run()
+ finally:
+ del register_module.input
+
+ # we should have send a request
+ self.assertEqual(len(self.conn.reqs), 1)
+ req = self.conn.reqs[0]
+ headers = dict(req.headers)
+ self.assertEqual(headers['Content-length'], '608')
+ self.assertIn(b'tarek', req.data)
+
+ def test_password_reset(self):
+ # this test runs choice 3
+ cmd = self._get_cmd()
+ inputs = Inputs('3', 'tarek@ziade.org')
+ register_module.input = inputs.__call__
+ try:
+ # let's run the command
+ cmd.run()
+ finally:
+ del register_module.input
+
+ # we should have send a request
+ self.assertEqual(len(self.conn.reqs), 1)
+ req = self.conn.reqs[0]
+ headers = dict(req.headers)
+ self.assertEqual(headers['Content-length'], '290')
+ self.assertIn(b'tarek', req.data)
+
+ @unittest.skipUnless(docutils is not None, 'needs docutils')
+ def test_strict(self):
+ # testing the script option
+ # when on, the register command stops if
+ # the metadata is incomplete or if
+ # long_description is not reSt compliant
+
+ # empty metadata
+ cmd = self._get_cmd({})
+ cmd.ensure_finalized()
+ cmd.strict = 1
+ self.assertRaises(DistutilsSetupError, cmd.run)
+
+ # metadata are OK but long_description is broken
+ metadata = {'url': 'xxx', 'author': 'xxx',
+ 'author_email': 'éxéxé',
+ 'name': 'xxx', 'version': 'xxx',
+ 'long_description': 'title\n==\n\ntext'}
+
+ cmd = self._get_cmd(metadata)
+ cmd.ensure_finalized()
+ cmd.strict = 1
+ self.assertRaises(DistutilsSetupError, cmd.run)
+
+ # now something that works
+ metadata['long_description'] = 'title\n=====\n\ntext'
+ cmd = self._get_cmd(metadata)
+ cmd.ensure_finalized()
+ cmd.strict = 1
+ inputs = Inputs('1', 'tarek', 'y')
+ register_module.input = inputs.__call__
+ # let's run the command
+ try:
+ cmd.run()
+ finally:
+ del register_module.input
+
+ # strict is not by default
+ cmd = self._get_cmd()
+ cmd.ensure_finalized()
+ inputs = Inputs('1', 'tarek', 'y')
+ register_module.input = inputs.__call__
+ # let's run the command
+ try:
+ cmd.run()
+ finally:
+ del register_module.input
+
+ # and finally a Unicode test (bug #12114)
+ metadata = {'url': 'xxx', 'author': '\u00c9ric',
+ 'author_email': 'xxx', 'name': 'xxx',
+ 'version': 'xxx',
+ 'description': 'Something about esszet \u00df',
+ 'long_description': 'More things about esszet \u00df'}
+
+ cmd = self._get_cmd(metadata)
+ cmd.ensure_finalized()
+ cmd.strict = 1
+ inputs = Inputs('1', 'tarek', 'y')
+ register_module.input = inputs.__call__
+ # let's run the command
+ try:
+ cmd.run()
+ finally:
+ del register_module.input
+
+ @unittest.skipUnless(docutils is not None, 'needs docutils')
+ def test_register_invalid_long_description(self):
+ description = ':funkie:`str`' # mimic Sphinx-specific markup
+ metadata = {'url': 'xxx', 'author': 'xxx',
+ 'author_email': 'xxx',
+ 'name': 'xxx', 'version': 'xxx',
+ 'long_description': description}
+ cmd = self._get_cmd(metadata)
+ cmd.ensure_finalized()
+ cmd.strict = True
+ inputs = Inputs('2', 'tarek', 'tarek@ziade.org')
+ register_module.input = inputs
+ self.addCleanup(delattr, register_module, 'input')
+
+ self.assertRaises(DistutilsSetupError, cmd.run)
+
+ def test_check_metadata_deprecated(self):
+ # makes sure make_metadata is deprecated
+ cmd = self._get_cmd()
+ with check_warnings() as w:
+ warnings.simplefilter("always")
+ cmd.check_metadata()
+ self.assertEqual(len(w.warnings), 1)
+
+ def test_list_classifiers(self):
+ cmd = self._get_cmd()
+ cmd.list_classifiers = 1
+ cmd.run()
+ results = self.get_logs(INFO)
+ self.assertEqual(results, ['running check', 'xxx'])
+
+ def test_show_response(self):
+ # test that the --show-response option return a well formatted response
+ cmd = self._get_cmd()
+ inputs = Inputs('1', 'tarek', 'y')
+ register_module.input = inputs.__call__
+ cmd.show_response = 1
+ try:
+ cmd.run()
+ finally:
+ del register_module.input
+
+ results = self.get_logs(INFO)
+ self.assertEqual(results[3], 75 * '-' + '\nxxx\n' + 75 * '-')
+
+
+def test_suite():
+ return unittest.makeSuite(RegisterTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_sdist.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_sdist.py
new file mode 100644
index 00000000..23db1269
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_sdist.py
@@ -0,0 +1,492 @@
+"""Tests for distutils.command.sdist."""
+import os
+import tarfile
+import unittest
+import warnings
+import zipfile
+from os.path import join
+from textwrap import dedent
+from test.support import captured_stdout, check_warnings, run_unittest
+
+try:
+ import zlib
+ ZLIB_SUPPORT = True
+except ImportError:
+ ZLIB_SUPPORT = False
+
+try:
+ import grp
+ import pwd
+ UID_GID_SUPPORT = True
+except ImportError:
+ UID_GID_SUPPORT = False
+
+from distutils.command.sdist import sdist, show_formats
+from distutils.core import Distribution
+from distutils.tests.test_config import BasePyPIRCCommandTestCase
+from distutils.errors import DistutilsOptionError
+from distutils.spawn import find_executable
+from distutils.log import WARN
+from distutils.filelist import FileList
+from distutils.archive_util import ARCHIVE_FORMATS
+
+SETUP_PY = """
+from distutils.core import setup
+import somecode
+
+setup(name='fake')
+"""
+
+MANIFEST = """\
+# file GENERATED by distutils, do NOT edit
+README
+buildout.cfg
+inroot.txt
+setup.py
+data%(sep)sdata.dt
+scripts%(sep)sscript.py
+some%(sep)sfile.txt
+some%(sep)sother_file.txt
+somecode%(sep)s__init__.py
+somecode%(sep)sdoc.dat
+somecode%(sep)sdoc.txt
+"""
+
+class SDistTestCase(BasePyPIRCCommandTestCase):
+
+ def setUp(self):
+ # PyPIRCCommandTestCase creates a temp dir already
+ # and put it in self.tmp_dir
+ super(SDistTestCase, self).setUp()
+ # setting up an environment
+ self.old_path = os.getcwd()
+ os.mkdir(join(self.tmp_dir, 'somecode'))
+ os.mkdir(join(self.tmp_dir, 'dist'))
+ # a package, and a README
+ self.write_file((self.tmp_dir, 'README'), 'xxx')
+ self.write_file((self.tmp_dir, 'somecode', '__init__.py'), '#')
+ self.write_file((self.tmp_dir, 'setup.py'), SETUP_PY)
+ os.chdir(self.tmp_dir)
+
+ def tearDown(self):
+ # back to normal
+ os.chdir(self.old_path)
+ super(SDistTestCase, self).tearDown()
+
+ def get_cmd(self, metadata=None):
+ """Returns a cmd"""
+ if metadata is None:
+ metadata = {'name': 'fake', 'version': '1.0',
+ 'url': 'xxx', 'author': 'xxx',
+ 'author_email': 'xxx'}
+ dist = Distribution(metadata)
+ dist.script_name = 'setup.py'
+ dist.packages = ['somecode']
+ dist.include_package_data = True
+ cmd = sdist(dist)
+ cmd.dist_dir = 'dist'
+ return dist, cmd
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ def test_prune_file_list(self):
+ # this test creates a project with some VCS dirs and an NFS rename
+ # file, then launches sdist to check they get pruned on all systems
+
+ # creating VCS directories with some files in them
+ os.mkdir(join(self.tmp_dir, 'somecode', '.svn'))
+ self.write_file((self.tmp_dir, 'somecode', '.svn', 'ok.py'), 'xxx')
+
+ os.mkdir(join(self.tmp_dir, 'somecode', '.hg'))
+ self.write_file((self.tmp_dir, 'somecode', '.hg',
+ 'ok'), 'xxx')
+
+ os.mkdir(join(self.tmp_dir, 'somecode', '.git'))
+ self.write_file((self.tmp_dir, 'somecode', '.git',
+ 'ok'), 'xxx')
+
+ self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx')
+
+ # now building a sdist
+ dist, cmd = self.get_cmd()
+
+ # zip is available universally
+ # (tar might not be installed under win32)
+ cmd.formats = ['zip']
+
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # now let's check what we have
+ dist_folder = join(self.tmp_dir, 'dist')
+ files = os.listdir(dist_folder)
+ self.assertEqual(files, ['fake-1.0.zip'])
+
+ zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
+ try:
+ content = zip_file.namelist()
+ finally:
+ zip_file.close()
+
+ # making sure everything has been pruned correctly
+ expected = ['', 'PKG-INFO', 'README', 'setup.py',
+ 'somecode/', 'somecode/__init__.py']
+ self.assertEqual(sorted(content), ['fake-1.0/' + x for x in expected])
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ @unittest.skipIf(find_executable('tar') is None,
+ "The tar command is not found")
+ @unittest.skipIf(find_executable('gzip') is None,
+ "The gzip command is not found")
+ def test_make_distribution(self):
+ # now building a sdist
+ dist, cmd = self.get_cmd()
+
+ # creating a gztar then a tar
+ cmd.formats = ['gztar', 'tar']
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # making sure we have two files
+ dist_folder = join(self.tmp_dir, 'dist')
+ result = os.listdir(dist_folder)
+ result.sort()
+ self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
+
+ os.remove(join(dist_folder, 'fake-1.0.tar'))
+ os.remove(join(dist_folder, 'fake-1.0.tar.gz'))
+
+ # now trying a tar then a gztar
+ cmd.formats = ['tar', 'gztar']
+
+ cmd.ensure_finalized()
+ cmd.run()
+
+ result = os.listdir(dist_folder)
+ result.sort()
+ self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ def test_add_defaults(self):
+
+ # http://bugs.python.org/issue2279
+
+ # add_default should also include
+ # data_files and package_data
+ dist, cmd = self.get_cmd()
+
+ # filling data_files by pointing files
+ # in package_data
+ dist.package_data = {'': ['*.cfg', '*.dat'],
+ 'somecode': ['*.txt']}
+ self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
+ self.write_file((self.tmp_dir, 'somecode', 'doc.dat'), '#')
+
+ # adding some data in data_files
+ data_dir = join(self.tmp_dir, 'data')
+ os.mkdir(data_dir)
+ self.write_file((data_dir, 'data.dt'), '#')
+ some_dir = join(self.tmp_dir, 'some')
+ os.mkdir(some_dir)
+ # make sure VCS directories are pruned (#14004)
+ hg_dir = join(self.tmp_dir, '.hg')
+ os.mkdir(hg_dir)
+ self.write_file((hg_dir, 'last-message.txt'), '#')
+ # a buggy regex used to prevent this from working on windows (#6884)
+ self.write_file((self.tmp_dir, 'buildout.cfg'), '#')
+ self.write_file((self.tmp_dir, 'inroot.txt'), '#')
+ self.write_file((some_dir, 'file.txt'), '#')
+ self.write_file((some_dir, 'other_file.txt'), '#')
+
+ dist.data_files = [('data', ['data/data.dt',
+ 'buildout.cfg',
+ 'inroot.txt',
+ 'notexisting']),
+ 'some/file.txt',
+ 'some/other_file.txt']
+
+ # adding a script
+ script_dir = join(self.tmp_dir, 'scripts')
+ os.mkdir(script_dir)
+ self.write_file((script_dir, 'script.py'), '#')
+ dist.scripts = [join('scripts', 'script.py')]
+
+ cmd.formats = ['zip']
+ cmd.use_defaults = True
+
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # now let's check what we have
+ dist_folder = join(self.tmp_dir, 'dist')
+ files = os.listdir(dist_folder)
+ self.assertEqual(files, ['fake-1.0.zip'])
+
+ zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
+ try:
+ content = zip_file.namelist()
+ finally:
+ zip_file.close()
+
+ # making sure everything was added
+ expected = ['', 'PKG-INFO', 'README', 'buildout.cfg',
+ 'data/', 'data/data.dt', 'inroot.txt',
+ 'scripts/', 'scripts/script.py', 'setup.py',
+ 'some/', 'some/file.txt', 'some/other_file.txt',
+ 'somecode/', 'somecode/__init__.py', 'somecode/doc.dat',
+ 'somecode/doc.txt']
+ self.assertEqual(sorted(content), ['fake-1.0/' + x for x in expected])
+
+ # checking the MANIFEST
+ f = open(join(self.tmp_dir, 'MANIFEST'))
+ try:
+ manifest = f.read()
+ finally:
+ f.close()
+ self.assertEqual(manifest, MANIFEST % {'sep': os.sep})
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ def test_metadata_check_option(self):
+ # testing the `medata-check` option
+ dist, cmd = self.get_cmd(metadata={})
+
+ # this should raise some warnings !
+ # with the `check` subcommand
+ cmd.ensure_finalized()
+ cmd.run()
+ warnings = [msg for msg in self.get_logs(WARN) if
+ msg.startswith('warning: check:')]
+ self.assertEqual(len(warnings), 2)
+
+ # trying with a complete set of metadata
+ self.clear_logs()
+ dist, cmd = self.get_cmd()
+ cmd.ensure_finalized()
+ cmd.metadata_check = 0
+ cmd.run()
+ warnings = [msg for msg in self.get_logs(WARN) if
+ msg.startswith('warning: check:')]
+ self.assertEqual(len(warnings), 0)
+
+ def test_check_metadata_deprecated(self):
+ # makes sure make_metadata is deprecated
+ dist, cmd = self.get_cmd()
+ with check_warnings() as w:
+ warnings.simplefilter("always")
+ cmd.check_metadata()
+ self.assertEqual(len(w.warnings), 1)
+
+ def test_show_formats(self):
+ with captured_stdout() as stdout:
+ show_formats()
+
+ # the output should be a header line + one line per format
+ num_formats = len(ARCHIVE_FORMATS.keys())
+ output = [line for line in stdout.getvalue().split('\n')
+ if line.strip().startswith('--formats=')]
+ self.assertEqual(len(output), num_formats)
+
+ def test_finalize_options(self):
+ dist, cmd = self.get_cmd()
+ cmd.finalize_options()
+
+ # default options set by finalize
+ self.assertEqual(cmd.manifest, 'MANIFEST')
+ self.assertEqual(cmd.template, 'MANIFEST.in')
+ self.assertEqual(cmd.dist_dir, 'dist')
+
+ # formats has to be a string splitable on (' ', ',') or
+ # a stringlist
+ cmd.formats = 1
+ self.assertRaises(DistutilsOptionError, cmd.finalize_options)
+ cmd.formats = ['zip']
+ cmd.finalize_options()
+
+ # formats has to be known
+ cmd.formats = 'supazipa'
+ self.assertRaises(DistutilsOptionError, cmd.finalize_options)
+
+ # the following tests make sure there is a nice error message instead
+ # of a traceback when parsing an invalid manifest template
+
+ def _check_template(self, content):
+ dist, cmd = self.get_cmd()
+ os.chdir(self.tmp_dir)
+ self.write_file('MANIFEST.in', content)
+ cmd.ensure_finalized()
+ cmd.filelist = FileList()
+ cmd.read_template()
+ warnings = self.get_logs(WARN)
+ self.assertEqual(len(warnings), 1)
+
+ def test_invalid_template_unknown_command(self):
+ self._check_template('taunt knights *')
+
+ def test_invalid_template_wrong_arguments(self):
+ # this manifest command takes one argument
+ self._check_template('prune')
+
+ @unittest.skipIf(os.name != 'nt', 'test relevant for Windows only')
+ def test_invalid_template_wrong_path(self):
+ # on Windows, trailing slashes are not allowed
+ # this used to crash instead of raising a warning: #8286
+ self._check_template('include examples/')
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ def test_get_file_list(self):
+ # make sure MANIFEST is recalculated
+ dist, cmd = self.get_cmd()
+
+ # filling data_files by pointing files in package_data
+ dist.package_data = {'somecode': ['*.txt']}
+ self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
+ cmd.formats = ['gztar']
+ cmd.ensure_finalized()
+ cmd.run()
+
+ f = open(cmd.manifest)
+ try:
+ manifest = [line.strip() for line in f.read().split('\n')
+ if line.strip() != '']
+ finally:
+ f.close()
+
+ self.assertEqual(len(manifest), 5)
+
+ # adding a file
+ self.write_file((self.tmp_dir, 'somecode', 'doc2.txt'), '#')
+
+ # make sure build_py is reinitialized, like a fresh run
+ build_py = dist.get_command_obj('build_py')
+ build_py.finalized = False
+ build_py.ensure_finalized()
+
+ cmd.run()
+
+ f = open(cmd.manifest)
+ try:
+ manifest2 = [line.strip() for line in f.read().split('\n')
+ if line.strip() != '']
+ finally:
+ f.close()
+
+ # do we have the new file in MANIFEST ?
+ self.assertEqual(len(manifest2), 6)
+ self.assertIn('doc2.txt', manifest2[-1])
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ def test_manifest_marker(self):
+ # check that autogenerated MANIFESTs have a marker
+ dist, cmd = self.get_cmd()
+ cmd.ensure_finalized()
+ cmd.run()
+
+ f = open(cmd.manifest)
+ try:
+ manifest = [line.strip() for line in f.read().split('\n')
+ if line.strip() != '']
+ finally:
+ f.close()
+
+ self.assertEqual(manifest[0],
+ '# file GENERATED by distutils, do NOT edit')
+
+ @unittest.skipUnless(ZLIB_SUPPORT, "Need zlib support to run")
+ def test_manifest_comments(self):
+ # make sure comments don't cause exceptions or wrong includes
+ contents = dedent("""\
+ # bad.py
+ #bad.py
+ good.py
+ """)
+ dist, cmd = self.get_cmd()
+ cmd.ensure_finalized()
+ self.write_file((self.tmp_dir, cmd.manifest), contents)
+ self.write_file((self.tmp_dir, 'good.py'), '# pick me!')
+ self.write_file((self.tmp_dir, 'bad.py'), "# don't pick me!")
+ self.write_file((self.tmp_dir, '#bad.py'), "# don't pick me!")
+ cmd.run()
+ self.assertEqual(cmd.filelist.files, ['good.py'])
+
+ @unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
+ def test_manual_manifest(self):
+ # check that a MANIFEST without a marker is left alone
+ dist, cmd = self.get_cmd()
+ cmd.formats = ['gztar']
+ cmd.ensure_finalized()
+ self.write_file((self.tmp_dir, cmd.manifest), 'README.manual')
+ self.write_file((self.tmp_dir, 'README.manual'),
+ 'This project maintains its MANIFEST file itself.')
+ cmd.run()
+ self.assertEqual(cmd.filelist.files, ['README.manual'])
+
+ f = open(cmd.manifest)
+ try:
+ manifest = [line.strip() for line in f.read().split('\n')
+ if line.strip() != '']
+ finally:
+ f.close()
+
+ self.assertEqual(manifest, ['README.manual'])
+
+ archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
+ archive = tarfile.open(archive_name)
+ try:
+ filenames = [tarinfo.name for tarinfo in archive]
+ finally:
+ archive.close()
+ self.assertEqual(sorted(filenames), ['fake-1.0', 'fake-1.0/PKG-INFO',
+ 'fake-1.0/README.manual'])
+
+ @unittest.skipUnless(ZLIB_SUPPORT, "requires zlib")
+ @unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
+ @unittest.skipIf(find_executable('tar') is None,
+ "The tar command is not found")
+ @unittest.skipIf(find_executable('gzip') is None,
+ "The gzip command is not found")
+ def test_make_distribution_owner_group(self):
+ # now building a sdist
+ dist, cmd = self.get_cmd()
+
+ # creating a gztar and specifying the owner+group
+ cmd.formats = ['gztar']
+ cmd.owner = pwd.getpwuid(0)[0]
+ cmd.group = grp.getgrgid(0)[0]
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # making sure we have the good rights
+ archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
+ archive = tarfile.open(archive_name)
+ try:
+ for member in archive.getmembers():
+ self.assertEqual(member.uid, 0)
+ self.assertEqual(member.gid, 0)
+ finally:
+ archive.close()
+
+ # building a sdist again
+ dist, cmd = self.get_cmd()
+
+ # creating a gztar
+ cmd.formats = ['gztar']
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # making sure we have the good rights
+ archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
+ archive = tarfile.open(archive_name)
+
+ # note that we are not testing the group ownership here
+ # because, depending on the platforms and the container
+ # rights (see #7408)
+ try:
+ for member in archive.getmembers():
+ self.assertEqual(member.uid, os.getuid())
+ finally:
+ archive.close()
+
+def test_suite():
+ return unittest.makeSuite(SDistTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_spawn.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_spawn.py
new file mode 100644
index 00000000..0d455385
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_spawn.py
@@ -0,0 +1,103 @@
+"""Tests for distutils.spawn."""
+import os
+import stat
+import sys
+import unittest
+from unittest import mock
+from test.support import run_unittest, unix_shell
+from test import support as test_support
+
+from distutils.spawn import find_executable
+from distutils.spawn import _nt_quote_args
+from distutils.spawn import spawn
+from distutils.errors import DistutilsExecError
+from distutils.tests import support
+
+class SpawnTestCase(support.TempdirManager,
+ support.LoggingSilencer,
+ unittest.TestCase):
+
+ def test_nt_quote_args(self):
+
+ for (args, wanted) in ((['with space', 'nospace'],
+ ['"with space"', 'nospace']),
+ (['nochange', 'nospace'],
+ ['nochange', 'nospace'])):
+ res = _nt_quote_args(args)
+ self.assertEqual(res, wanted)
+
+
+ @unittest.skipUnless(os.name in ('nt', 'posix'),
+ 'Runs only under posix or nt')
+ def test_spawn(self):
+ tmpdir = self.mkdtemp()
+
+ # creating something executable
+ # through the shell that returns 1
+ if sys.platform != 'win32':
+ exe = os.path.join(tmpdir, 'foo.sh')
+ self.write_file(exe, '#!%s\nexit 1' % unix_shell)
+ else:
+ exe = os.path.join(tmpdir, 'foo.bat')
+ self.write_file(exe, 'exit 1')
+
+ os.chmod(exe, 0o777)
+ self.assertRaises(DistutilsExecError, spawn, [exe])
+
+ # now something that works
+ if sys.platform != 'win32':
+ exe = os.path.join(tmpdir, 'foo.sh')
+ self.write_file(exe, '#!%s\nexit 0' % unix_shell)
+ else:
+ exe = os.path.join(tmpdir, 'foo.bat')
+ self.write_file(exe, 'exit 0')
+
+ os.chmod(exe, 0o777)
+ spawn([exe]) # should work without any error
+
+ def test_find_executable(self):
+ with test_support.temp_dir() as tmp_dir:
+ # use TESTFN to get a pseudo-unique filename
+ program_noeext = test_support.TESTFN
+ # Give the temporary program an ".exe" suffix for all.
+ # It's needed on Windows and not harmful on other platforms.
+ program = program_noeext + ".exe"
+
+ filename = os.path.join(tmp_dir, program)
+ with open(filename, "wb"):
+ pass
+ os.chmod(filename, stat.S_IXUSR)
+
+ # test path parameter
+ rv = find_executable(program, path=tmp_dir)
+ self.assertEqual(rv, filename)
+
+ if sys.platform == 'win32':
+ # test without ".exe" extension
+ rv = find_executable(program_noeext, path=tmp_dir)
+ self.assertEqual(rv, filename)
+
+ # test find in the current directory
+ with test_support.change_cwd(tmp_dir):
+ rv = find_executable(program)
+ self.assertEqual(rv, program)
+
+ # test non-existent program
+ dont_exist_program = "dontexist_" + program
+ rv = find_executable(dont_exist_program , path=tmp_dir)
+ self.assertIsNone(rv)
+
+ # test os.defpath: missing PATH environment variable
+ with test_support.EnvironmentVarGuard() as env:
+ with mock.patch('distutils.spawn.os.defpath', tmp_dir):
+ env.pop('PATH')
+
+ rv = find_executable(program)
+ self.assertEqual(rv, filename)
+
+
+def test_suite():
+ return unittest.makeSuite(SpawnTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_sysconfig.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_sysconfig.py
new file mode 100644
index 00000000..fe4a2994
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_sysconfig.py
@@ -0,0 +1,198 @@
+"""Tests for distutils.sysconfig."""
+import os
+import shutil
+import subprocess
+import sys
+import textwrap
+import unittest
+
+from distutils import sysconfig
+from distutils.ccompiler import get_default_compiler
+from distutils.tests import support
+from test.support import TESTFN, run_unittest, check_warnings
+
+class SysconfigTestCase(support.EnvironGuard, unittest.TestCase):
+ def setUp(self):
+ super(SysconfigTestCase, self).setUp()
+ self.makefile = None
+
+ def tearDown(self):
+ if self.makefile is not None:
+ os.unlink(self.makefile)
+ self.cleanup_testfn()
+ super(SysconfigTestCase, self).tearDown()
+
+ def cleanup_testfn(self):
+ if os.path.isfile(TESTFN):
+ os.remove(TESTFN)
+ elif os.path.isdir(TESTFN):
+ shutil.rmtree(TESTFN)
+
+ def test_get_config_h_filename(self):
+ config_h = sysconfig.get_config_h_filename()
+ self.assertTrue(os.path.isfile(config_h), config_h)
+
+ def test_get_python_lib(self):
+ # XXX doesn't work on Linux when Python was never installed before
+ #self.assertTrue(os.path.isdir(lib_dir), lib_dir)
+ # test for pythonxx.lib?
+ self.assertNotEqual(sysconfig.get_python_lib(),
+ sysconfig.get_python_lib(prefix=TESTFN))
+
+ def test_get_config_vars(self):
+ cvars = sysconfig.get_config_vars()
+ self.assertIsInstance(cvars, dict)
+ self.assertTrue(cvars)
+
+ def test_srcdir(self):
+ # See Issues #15322, #15364.
+ srcdir = sysconfig.get_config_var('srcdir')
+
+ self.assertTrue(os.path.isabs(srcdir), srcdir)
+ self.assertTrue(os.path.isdir(srcdir), srcdir)
+
+ if sysconfig.python_build:
+ # The python executable has not been installed so srcdir
+ # should be a full source checkout.
+ Python_h = os.path.join(srcdir, 'Include', 'Python.h')
+ self.assertTrue(os.path.exists(Python_h), Python_h)
+ self.assertTrue(sysconfig._is_python_source_dir(srcdir))
+ elif os.name == 'posix':
+ self.assertEqual(
+ os.path.dirname(sysconfig.get_makefile_filename()),
+ srcdir)
+
+ def test_srcdir_independent_of_cwd(self):
+ # srcdir should be independent of the current working directory
+ # See Issues #15322, #15364.
+ srcdir = sysconfig.get_config_var('srcdir')
+ cwd = os.getcwd()
+ try:
+ os.chdir('..')
+ srcdir2 = sysconfig.get_config_var('srcdir')
+ finally:
+ os.chdir(cwd)
+ self.assertEqual(srcdir, srcdir2)
+
+ @unittest.skipUnless(get_default_compiler() == 'unix',
+ 'not testing if default compiler is not unix')
+ def test_customize_compiler(self):
+ os.environ['AR'] = 'my_ar'
+ os.environ['ARFLAGS'] = '-arflags'
+
+ # make sure AR gets caught
+ class compiler:
+ compiler_type = 'unix'
+
+ def set_executables(self, **kw):
+ self.exes = kw
+
+ comp = compiler()
+ sysconfig.customize_compiler(comp)
+ self.assertEqual(comp.exes['archiver'], 'my_ar -arflags')
+
+ def test_parse_makefile_base(self):
+ self.makefile = TESTFN
+ fd = open(self.makefile, 'w')
+ try:
+ fd.write(r"CONFIG_ARGS= '--arg1=optarg1' 'ENV=LIB'" '\n')
+ fd.write('VAR=$OTHER\nOTHER=foo')
+ finally:
+ fd.close()
+ d = sysconfig.parse_makefile(self.makefile)
+ self.assertEqual(d, {'CONFIG_ARGS': "'--arg1=optarg1' 'ENV=LIB'",
+ 'OTHER': 'foo'})
+
+ def test_parse_makefile_literal_dollar(self):
+ self.makefile = TESTFN
+ fd = open(self.makefile, 'w')
+ try:
+ fd.write(r"CONFIG_ARGS= '--arg1=optarg1' 'ENV=\$$LIB'" '\n')
+ fd.write('VAR=$OTHER\nOTHER=foo')
+ finally:
+ fd.close()
+ d = sysconfig.parse_makefile(self.makefile)
+ self.assertEqual(d, {'CONFIG_ARGS': r"'--arg1=optarg1' 'ENV=\$LIB'",
+ 'OTHER': 'foo'})
+
+
+ def test_sysconfig_module(self):
+ import sysconfig as global_sysconfig
+ self.assertEqual(global_sysconfig.get_config_var('CFLAGS'),
+ sysconfig.get_config_var('CFLAGS'))
+ self.assertEqual(global_sysconfig.get_config_var('LDFLAGS'),
+ sysconfig.get_config_var('LDFLAGS'))
+
+ @unittest.skipIf(sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'),
+ 'compiler flags customized')
+ def test_sysconfig_compiler_vars(self):
+ # On OS X, binary installers support extension module building on
+ # various levels of the operating system with differing Xcode
+ # configurations. This requires customization of some of the
+ # compiler configuration directives to suit the environment on
+ # the installed machine. Some of these customizations may require
+ # running external programs and, so, are deferred until needed by
+ # the first extension module build. With Python 3.3, only
+ # the Distutils version of sysconfig is used for extension module
+ # builds, which happens earlier in the Distutils tests. This may
+ # cause the following tests to fail since no tests have caused
+ # the global version of sysconfig to call the customization yet.
+ # The solution for now is to simply skip this test in this case.
+ # The longer-term solution is to only have one version of sysconfig.
+
+ import sysconfig as global_sysconfig
+ if sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'):
+ self.skipTest('compiler flags customized')
+ self.assertEqual(global_sysconfig.get_config_var('LDSHARED'),
+ sysconfig.get_config_var('LDSHARED'))
+ self.assertEqual(global_sysconfig.get_config_var('CC'),
+ sysconfig.get_config_var('CC'))
+
+ @unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
+ 'EXT_SUFFIX required for this test')
+ def test_SO_deprecation(self):
+ self.assertWarns(DeprecationWarning,
+ sysconfig.get_config_var, 'SO')
+
+ @unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
+ 'EXT_SUFFIX required for this test')
+ def test_SO_value(self):
+ with check_warnings(('', DeprecationWarning)):
+ self.assertEqual(sysconfig.get_config_var('SO'),
+ sysconfig.get_config_var('EXT_SUFFIX'))
+
+ @unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
+ 'EXT_SUFFIX required for this test')
+ def test_SO_in_vars(self):
+ vars = sysconfig.get_config_vars()
+ self.assertIsNotNone(vars['SO'])
+ self.assertEqual(vars['SO'], vars['EXT_SUFFIX'])
+
+ def test_customize_compiler_before_get_config_vars(self):
+ # Issue #21923: test that a Distribution compiler
+ # instance can be called without an explicit call to
+ # get_config_vars().
+ with open(TESTFN, 'w') as f:
+ f.writelines(textwrap.dedent('''\
+ from distutils.core import Distribution
+ config = Distribution().get_command_obj('config')
+ # try_compile may pass or it may fail if no compiler
+ # is found but it should not raise an exception.
+ rc = config.try_compile('int x;')
+ '''))
+ p = subprocess.Popen([str(sys.executable), TESTFN],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ outs, errs = p.communicate()
+ self.assertEqual(0, p.returncode, "Subprocess failed: " + outs)
+
+
+def test_suite():
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.makeSuite(SysconfigTestCase))
+ return suite
+
+
+if __name__ == '__main__':
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_text_file.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_text_file.py
new file mode 100644
index 00000000..7e76240a
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_text_file.py
@@ -0,0 +1,107 @@
+"""Tests for distutils.text_file."""
+import os
+import unittest
+from distutils.text_file import TextFile
+from distutils.tests import support
+from test.support import run_unittest
+
+TEST_DATA = """# test file
+
+line 3 \\
+# intervening comment
+ continues on next line
+"""
+
+class TextFileTestCase(support.TempdirManager, unittest.TestCase):
+
+ def test_class(self):
+ # old tests moved from text_file.__main__
+ # so they are really called by the buildbots
+
+ # result 1: no fancy options
+ result1 = ['# test file\n', '\n', 'line 3 \\\n',
+ '# intervening comment\n',
+ ' continues on next line\n']
+
+ # result 2: just strip comments
+ result2 = ["\n",
+ "line 3 \\\n",
+ " continues on next line\n"]
+
+ # result 3: just strip blank lines
+ result3 = ["# test file\n",
+ "line 3 \\\n",
+ "# intervening comment\n",
+ " continues on next line\n"]
+
+ # result 4: default, strip comments, blank lines,
+ # and trailing whitespace
+ result4 = ["line 3 \\",
+ " continues on next line"]
+
+ # result 5: strip comments and blanks, plus join lines (but don't
+ # "collapse" joined lines
+ result5 = ["line 3 continues on next line"]
+
+ # result 6: strip comments and blanks, plus join lines (and
+ # "collapse" joined lines
+ result6 = ["line 3 continues on next line"]
+
+ def test_input(count, description, file, expected_result):
+ result = file.readlines()
+ self.assertEqual(result, expected_result)
+
+ tmpdir = self.mkdtemp()
+ filename = os.path.join(tmpdir, "test.txt")
+ out_file = open(filename, "w")
+ try:
+ out_file.write(TEST_DATA)
+ finally:
+ out_file.close()
+
+ in_file = TextFile(filename, strip_comments=0, skip_blanks=0,
+ lstrip_ws=0, rstrip_ws=0)
+ try:
+ test_input(1, "no processing", in_file, result1)
+ finally:
+ in_file.close()
+
+ in_file = TextFile(filename, strip_comments=1, skip_blanks=0,
+ lstrip_ws=0, rstrip_ws=0)
+ try:
+ test_input(2, "strip comments", in_file, result2)
+ finally:
+ in_file.close()
+
+ in_file = TextFile(filename, strip_comments=0, skip_blanks=1,
+ lstrip_ws=0, rstrip_ws=0)
+ try:
+ test_input(3, "strip blanks", in_file, result3)
+ finally:
+ in_file.close()
+
+ in_file = TextFile(filename)
+ try:
+ test_input(4, "default processing", in_file, result4)
+ finally:
+ in_file.close()
+
+ in_file = TextFile(filename, strip_comments=1, skip_blanks=1,
+ join_lines=1, rstrip_ws=1)
+ try:
+ test_input(5, "join lines without collapsing", in_file, result5)
+ finally:
+ in_file.close()
+
+ in_file = TextFile(filename, strip_comments=1, skip_blanks=1,
+ join_lines=1, rstrip_ws=1, collapse_join=1)
+ try:
+ test_input(6, "join lines with collapsing", in_file, result6)
+ finally:
+ in_file.close()
+
+def test_suite():
+ return unittest.makeSuite(TextFileTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_unixccompiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_unixccompiler.py
new file mode 100644
index 00000000..eef702cf
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_unixccompiler.py
@@ -0,0 +1,141 @@
+"""Tests for distutils.unixccompiler."""
+import sys
+import unittest
+from test.support import EnvironmentVarGuard, run_unittest
+
+from distutils import sysconfig
+from distutils.unixccompiler import UnixCCompiler
+
+class UnixCCompilerTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self._backup_platform = sys.platform
+ self._backup_get_config_var = sysconfig.get_config_var
+ class CompilerWrapper(UnixCCompiler):
+ def rpath_foo(self):
+ return self.runtime_library_dir_option('/foo')
+ self.cc = CompilerWrapper()
+
+ def tearDown(self):
+ sys.platform = self._backup_platform
+ sysconfig.get_config_var = self._backup_get_config_var
+
+ @unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
+ def test_runtime_libdir_option(self):
+ # Issue#5900
+ #
+ # Ensure RUNPATH is added to extension modules with RPATH if
+ # GNU ld is used
+
+ # darwin
+ sys.platform = 'darwin'
+ self.assertEqual(self.cc.rpath_foo(), '-L/foo')
+
+ # hp-ux
+ sys.platform = 'hp-ux'
+ old_gcv = sysconfig.get_config_var
+ def gcv(v):
+ return 'xxx'
+ sysconfig.get_config_var = gcv
+ self.assertEqual(self.cc.rpath_foo(), ['+s', '-L/foo'])
+
+ def gcv(v):
+ return 'gcc'
+ sysconfig.get_config_var = gcv
+ self.assertEqual(self.cc.rpath_foo(), ['-Wl,+s', '-L/foo'])
+
+ def gcv(v):
+ return 'g++'
+ sysconfig.get_config_var = gcv
+ self.assertEqual(self.cc.rpath_foo(), ['-Wl,+s', '-L/foo'])
+
+ sysconfig.get_config_var = old_gcv
+
+ # GCC GNULD
+ sys.platform = 'bar'
+ def gcv(v):
+ if v == 'CC':
+ return 'gcc'
+ elif v == 'GNULD':
+ return 'yes'
+ sysconfig.get_config_var = gcv
+ self.assertEqual(self.cc.rpath_foo(), '-Wl,--enable-new-dtags,-R/foo')
+
+ # GCC non-GNULD
+ sys.platform = 'bar'
+ def gcv(v):
+ if v == 'CC':
+ return 'gcc'
+ elif v == 'GNULD':
+ return 'no'
+ sysconfig.get_config_var = gcv
+ self.assertEqual(self.cc.rpath_foo(), '-Wl,-R/foo')
+
+ # GCC GNULD with fully qualified configuration prefix
+ # see #7617
+ sys.platform = 'bar'
+ def gcv(v):
+ if v == 'CC':
+ return 'x86_64-pc-linux-gnu-gcc-4.4.2'
+ elif v == 'GNULD':
+ return 'yes'
+ sysconfig.get_config_var = gcv
+ self.assertEqual(self.cc.rpath_foo(), '-Wl,--enable-new-dtags,-R/foo')
+
+ # non-GCC GNULD
+ sys.platform = 'bar'
+ def gcv(v):
+ if v == 'CC':
+ return 'cc'
+ elif v == 'GNULD':
+ return 'yes'
+ sysconfig.get_config_var = gcv
+ self.assertEqual(self.cc.rpath_foo(), '-R/foo')
+
+ # non-GCC non-GNULD
+ sys.platform = 'bar'
+ def gcv(v):
+ if v == 'CC':
+ return 'cc'
+ elif v == 'GNULD':
+ return 'no'
+ sysconfig.get_config_var = gcv
+ self.assertEqual(self.cc.rpath_foo(), '-R/foo')
+
+ @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X')
+ def test_osx_cc_overrides_ldshared(self):
+ # Issue #18080:
+ # ensure that setting CC env variable also changes default linker
+ def gcv(v):
+ if v == 'LDSHARED':
+ return 'gcc-4.2 -bundle -undefined dynamic_lookup '
+ return 'gcc-4.2'
+ sysconfig.get_config_var = gcv
+ with EnvironmentVarGuard() as env:
+ env['CC'] = 'my_cc'
+ del env['LDSHARED']
+ sysconfig.customize_compiler(self.cc)
+ self.assertEqual(self.cc.linker_so[0], 'my_cc')
+
+ @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X')
+ def test_osx_explicit_ldshared(self):
+ # Issue #18080:
+ # ensure that setting CC env variable does not change
+ # explicit LDSHARED setting for linker
+ def gcv(v):
+ if v == 'LDSHARED':
+ return 'gcc-4.2 -bundle -undefined dynamic_lookup '
+ return 'gcc-4.2'
+ sysconfig.get_config_var = gcv
+ with EnvironmentVarGuard() as env:
+ env['CC'] = 'my_cc'
+ env['LDSHARED'] = 'my_ld -bundle -dynamic'
+ sysconfig.customize_compiler(self.cc)
+ self.assertEqual(self.cc.linker_so[0], 'my_ld')
+
+
+def test_suite():
+ return unittest.makeSuite(UnixCCompilerTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_upload.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_upload.py
new file mode 100644
index 00000000..c17d8e7d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_upload.py
@@ -0,0 +1,207 @@
+"""Tests for distutils.command.upload."""
+import os
+import unittest
+import unittest.mock as mock
+from urllib.request import HTTPError
+
+from test.support import run_unittest
+
+from distutils.command import upload as upload_mod
+from distutils.command.upload import upload
+from distutils.core import Distribution
+from distutils.errors import DistutilsError
+from distutils.log import ERROR, INFO
+
+from distutils.tests.test_config import PYPIRC, BasePyPIRCCommandTestCase
+
+PYPIRC_LONG_PASSWORD = """\
+[distutils]
+
+index-servers =
+ server1
+ server2
+
+[server1]
+username:me
+password:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+
+[server2]
+username:meagain
+password: secret
+realm:acme
+repository:http://another.pypi/
+"""
+
+
+PYPIRC_NOPASSWORD = """\
+[distutils]
+
+index-servers =
+ server1
+
+[server1]
+username:me
+"""
+
+class FakeOpen(object):
+
+ def __init__(self, url, msg=None, code=None):
+ self.url = url
+ if not isinstance(url, str):
+ self.req = url
+ else:
+ self.req = None
+ self.msg = msg or 'OK'
+ self.code = code or 200
+
+ def getheader(self, name, default=None):
+ return {
+ 'content-type': 'text/plain; charset=utf-8',
+ }.get(name.lower(), default)
+
+ def read(self):
+ return b'xyzzy'
+
+ def getcode(self):
+ return self.code
+
+
+class uploadTestCase(BasePyPIRCCommandTestCase):
+
+ def setUp(self):
+ super(uploadTestCase, self).setUp()
+ self.old_open = upload_mod.urlopen
+ upload_mod.urlopen = self._urlopen
+ self.last_open = None
+ self.next_msg = None
+ self.next_code = None
+
+ def tearDown(self):
+ upload_mod.urlopen = self.old_open
+ super(uploadTestCase, self).tearDown()
+
+ def _urlopen(self, url):
+ self.last_open = FakeOpen(url, msg=self.next_msg, code=self.next_code)
+ return self.last_open
+
+ def test_finalize_options(self):
+
+ # new format
+ self.write_file(self.rc, PYPIRC)
+ dist = Distribution()
+ cmd = upload(dist)
+ cmd.finalize_options()
+ for attr, waited in (('username', 'me'), ('password', 'secret'),
+ ('realm', 'pypi'),
+ ('repository', 'https://upload.pypi.org/legacy/')):
+ self.assertEqual(getattr(cmd, attr), waited)
+
+ def test_saved_password(self):
+ # file with no password
+ self.write_file(self.rc, PYPIRC_NOPASSWORD)
+
+ # make sure it passes
+ dist = Distribution()
+ cmd = upload(dist)
+ cmd.finalize_options()
+ self.assertEqual(cmd.password, None)
+
+ # make sure we get it as well, if another command
+ # initialized it at the dist level
+ dist.password = 'xxx'
+ cmd = upload(dist)
+ cmd.finalize_options()
+ self.assertEqual(cmd.password, 'xxx')
+
+ def test_upload(self):
+ tmp = self.mkdtemp()
+ path = os.path.join(tmp, 'xxx')
+ self.write_file(path)
+ command, pyversion, filename = 'xxx', '2.6', path
+ dist_files = [(command, pyversion, filename)]
+ self.write_file(self.rc, PYPIRC_LONG_PASSWORD)
+
+ # lets run it
+ pkg_dir, dist = self.create_dist(dist_files=dist_files)
+ cmd = upload(dist)
+ cmd.show_response = 1
+ cmd.ensure_finalized()
+ cmd.run()
+
+ # what did we send ?
+ headers = dict(self.last_open.req.headers)
+ self.assertEqual(headers['Content-length'], '2162')
+ content_type = headers['Content-type']
+ self.assertTrue(content_type.startswith('multipart/form-data'))
+ self.assertEqual(self.last_open.req.get_method(), 'POST')
+ expected_url = 'https://upload.pypi.org/legacy/'
+ self.assertEqual(self.last_open.req.get_full_url(), expected_url)
+ self.assertTrue(b'xxx' in self.last_open.req.data)
+ self.assertIn(b'protocol_version', self.last_open.req.data)
+
+ # The PyPI response body was echoed
+ results = self.get_logs(INFO)
+ self.assertEqual(results[-1], 75 * '-' + '\nxyzzy\n' + 75 * '-')
+
+ # bpo-32304: archives whose last byte was b'\r' were corrupted due to
+ # normalization intended for Mac OS 9.
+ def test_upload_correct_cr(self):
+ # content that ends with \r should not be modified.
+ tmp = self.mkdtemp()
+ path = os.path.join(tmp, 'xxx')
+ self.write_file(path, content='yy\r')
+ command, pyversion, filename = 'xxx', '2.6', path
+ dist_files = [(command, pyversion, filename)]
+ self.write_file(self.rc, PYPIRC_LONG_PASSWORD)
+
+ # other fields that ended with \r used to be modified, now are
+ # preserved.
+ pkg_dir, dist = self.create_dist(
+ dist_files=dist_files,
+ description='long description\r'
+ )
+ cmd = upload(dist)
+ cmd.show_response = 1
+ cmd.ensure_finalized()
+ cmd.run()
+
+ headers = dict(self.last_open.req.headers)
+ self.assertEqual(headers['Content-length'], '2172')
+ self.assertIn(b'long description\r', self.last_open.req.data)
+
+ def test_upload_fails(self):
+ self.next_msg = "Not Found"
+ self.next_code = 404
+ self.assertRaises(DistutilsError, self.test_upload)
+
+ def test_wrong_exception_order(self):
+ tmp = self.mkdtemp()
+ path = os.path.join(tmp, 'xxx')
+ self.write_file(path)
+ dist_files = [('xxx', '2.6', path)] # command, pyversion, filename
+ self.write_file(self.rc, PYPIRC_LONG_PASSWORD)
+
+ pkg_dir, dist = self.create_dist(dist_files=dist_files)
+ tests = [
+ (OSError('oserror'), 'oserror', OSError),
+ (HTTPError('url', 400, 'httperror', {}, None),
+ 'Upload failed (400): httperror', DistutilsError),
+ ]
+ for exception, expected, raised_exception in tests:
+ with self.subTest(exception=type(exception).__name__):
+ with mock.patch('distutils.command.upload.urlopen',
+ new=mock.Mock(side_effect=exception)):
+ with self.assertRaises(raised_exception):
+ cmd = upload(dist)
+ cmd.ensure_finalized()
+ cmd.run()
+ results = self.get_logs(ERROR)
+ self.assertIn(expected, results[-1])
+ self.clear_logs()
+
+
+def test_suite():
+ return unittest.makeSuite(uploadTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_util.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_util.py
new file mode 100644
index 00000000..e2fc3809
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_util.py
@@ -0,0 +1,293 @@
+"""Tests for distutils.util."""
+import os
+import sys
+import unittest
+from copy import copy
+from test.support import run_unittest
+
+from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError
+from distutils.util import (get_platform, convert_path, change_root,
+ check_environ, split_quoted, strtobool,
+ rfc822_escape, byte_compile,
+ grok_environment_error)
+from distutils import util # used to patch _environ_checked
+from distutils.sysconfig import get_config_vars
+from distutils import sysconfig
+from distutils.tests import support
+import _osx_support
+
+class UtilTestCase(support.EnvironGuard, unittest.TestCase):
+
+ def setUp(self):
+ super(UtilTestCase, self).setUp()
+ # saving the environment
+ self.name = os.name
+ self.platform = sys.platform
+ self.version = sys.version
+ self.sep = os.sep
+ self.join = os.path.join
+ self.isabs = os.path.isabs
+ self.splitdrive = os.path.splitdrive
+ self._config_vars = copy(sysconfig._config_vars)
+
+ # patching os.uname
+ if hasattr(os, 'uname'):
+ self.uname = os.uname
+ self._uname = os.uname()
+ else:
+ self.uname = None
+ self._uname = None
+
+ os.uname = self._get_uname
+
+ def tearDown(self):
+ # getting back the environment
+ os.name = self.name
+ sys.platform = self.platform
+ sys.version = self.version
+ os.sep = self.sep
+ os.path.join = self.join
+ os.path.isabs = self.isabs
+ os.path.splitdrive = self.splitdrive
+ if self.uname is not None:
+ os.uname = self.uname
+ else:
+ del os.uname
+ sysconfig._config_vars = copy(self._config_vars)
+ super(UtilTestCase, self).tearDown()
+
+ def _set_uname(self, uname):
+ self._uname = uname
+
+ def _get_uname(self):
+ return self._uname
+
+ def test_get_platform(self):
+
+ # windows XP, 32bits
+ os.name = 'nt'
+ sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
+ '[MSC v.1310 32 bit (Intel)]')
+ sys.platform = 'win32'
+ self.assertEqual(get_platform(), 'win32')
+
+ # windows XP, amd64
+ os.name = 'nt'
+ sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
+ '[MSC v.1310 32 bit (Amd64)]')
+ sys.platform = 'win32'
+ self.assertEqual(get_platform(), 'win-amd64')
+
+ # macbook
+ os.name = 'posix'
+ sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
+ '\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
+ sys.platform = 'darwin'
+ self._set_uname(('Darwin', 'macziade', '8.11.1',
+ ('Darwin Kernel Version 8.11.1: '
+ 'Wed Oct 10 18:23:28 PDT 2007; '
+ 'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
+ _osx_support._remove_original_values(get_config_vars())
+ get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
+
+ get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
+ '-fwrapv -O3 -Wall -Wstrict-prototypes')
+
+ cursize = sys.maxsize
+ sys.maxsize = (2 ** 31)-1
+ try:
+ self.assertEqual(get_platform(), 'macosx-10.3-i386')
+ finally:
+ sys.maxsize = cursize
+
+ # macbook with fat binaries (fat, universal or fat64)
+ _osx_support._remove_original_values(get_config_vars())
+ get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
+ get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
+ '/Developer/SDKs/MacOSX10.4u.sdk '
+ '-fno-strict-aliasing -fno-common '
+ '-dynamic -DNDEBUG -g -O3')
+
+ self.assertEqual(get_platform(), 'macosx-10.4-fat')
+
+ _osx_support._remove_original_values(get_config_vars())
+ os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
+ self.assertEqual(get_platform(), 'macosx-10.4-fat')
+
+
+ _osx_support._remove_original_values(get_config_vars())
+ get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
+ '/Developer/SDKs/MacOSX10.4u.sdk '
+ '-fno-strict-aliasing -fno-common '
+ '-dynamic -DNDEBUG -g -O3')
+
+ self.assertEqual(get_platform(), 'macosx-10.4-intel')
+
+ _osx_support._remove_original_values(get_config_vars())
+ get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
+ '/Developer/SDKs/MacOSX10.4u.sdk '
+ '-fno-strict-aliasing -fno-common '
+ '-dynamic -DNDEBUG -g -O3')
+ self.assertEqual(get_platform(), 'macosx-10.4-fat3')
+
+ _osx_support._remove_original_values(get_config_vars())
+ get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
+ '/Developer/SDKs/MacOSX10.4u.sdk '
+ '-fno-strict-aliasing -fno-common '
+ '-dynamic -DNDEBUG -g -O3')
+ self.assertEqual(get_platform(), 'macosx-10.4-universal')
+
+ _osx_support._remove_original_values(get_config_vars())
+ get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
+ '/Developer/SDKs/MacOSX10.4u.sdk '
+ '-fno-strict-aliasing -fno-common '
+ '-dynamic -DNDEBUG -g -O3')
+
+ self.assertEqual(get_platform(), 'macosx-10.4-fat64')
+
+ for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
+ _osx_support._remove_original_values(get_config_vars())
+ get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
+ '/Developer/SDKs/MacOSX10.4u.sdk '
+ '-fno-strict-aliasing -fno-common '
+ '-dynamic -DNDEBUG -g -O3'%(arch,))
+
+ self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,))
+
+
+ # linux debian sarge
+ os.name = 'posix'
+ sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
+ '\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
+ sys.platform = 'linux2'
+ self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
+ '#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
+
+ self.assertEqual(get_platform(), 'linux-i686')
+
+ # XXX more platforms to tests here
+
+ def test_convert_path(self):
+ # linux/mac
+ os.sep = '/'
+ def _join(path):
+ return '/'.join(path)
+ os.path.join = _join
+
+ self.assertEqual(convert_path('/home/to/my/stuff'),
+ '/home/to/my/stuff')
+
+ # win
+ os.sep = '\\'
+ def _join(*path):
+ return '\\'.join(path)
+ os.path.join = _join
+
+ self.assertRaises(ValueError, convert_path, '/home/to/my/stuff')
+ self.assertRaises(ValueError, convert_path, 'home/to/my/stuff/')
+
+ self.assertEqual(convert_path('home/to/my/stuff'),
+ 'home\\to\\my\\stuff')
+ self.assertEqual(convert_path('.'),
+ os.curdir)
+
+ def test_change_root(self):
+ # linux/mac
+ os.name = 'posix'
+ def _isabs(path):
+ return path[0] == '/'
+ os.path.isabs = _isabs
+ def _join(*path):
+ return '/'.join(path)
+ os.path.join = _join
+
+ self.assertEqual(change_root('/root', '/old/its/here'),
+ '/root/old/its/here')
+ self.assertEqual(change_root('/root', 'its/here'),
+ '/root/its/here')
+
+ # windows
+ os.name = 'nt'
+ def _isabs(path):
+ return path.startswith('c:\\')
+ os.path.isabs = _isabs
+ def _splitdrive(path):
+ if path.startswith('c:'):
+ return ('', path.replace('c:', ''))
+ return ('', path)
+ os.path.splitdrive = _splitdrive
+ def _join(*path):
+ return '\\'.join(path)
+ os.path.join = _join
+
+ self.assertEqual(change_root('c:\\root', 'c:\\old\\its\\here'),
+ 'c:\\root\\old\\its\\here')
+ self.assertEqual(change_root('c:\\root', 'its\\here'),
+ 'c:\\root\\its\\here')
+
+ # BugsBunny os (it's a great os)
+ os.name = 'BugsBunny'
+ self.assertRaises(DistutilsPlatformError,
+ change_root, 'c:\\root', 'its\\here')
+
+ # XXX platforms to be covered: mac
+
+ def test_check_environ(self):
+ util._environ_checked = 0
+ if 'HOME' in os.environ:
+ del os.environ['HOME']
+
+ # posix without HOME
+ if os.name == 'posix': # this test won't run on windows
+ check_environ()
+ import pwd
+ self.assertEqual(os.environ['HOME'], pwd.getpwuid(os.getuid())[5])
+ else:
+ check_environ()
+
+ self.assertEqual(os.environ['PLAT'], get_platform())
+ self.assertEqual(util._environ_checked, 1)
+
+ def test_split_quoted(self):
+ self.assertEqual(split_quoted('""one"" "two" \'three\' \\four'),
+ ['one', 'two', 'three', 'four'])
+
+ def test_strtobool(self):
+ yes = ('y', 'Y', 'yes', 'True', 't', 'true', 'True', 'On', 'on', '1')
+ no = ('n', 'no', 'f', 'false', 'off', '0', 'Off', 'No', 'N')
+
+ for y in yes:
+ self.assertTrue(strtobool(y))
+
+ for n in no:
+ self.assertFalse(strtobool(n))
+
+ def test_rfc822_escape(self):
+ header = 'I am a\npoor\nlonesome\nheader\n'
+ res = rfc822_escape(header)
+ wanted = ('I am a%(8s)spoor%(8s)slonesome%(8s)s'
+ 'header%(8s)s') % {'8s': '\n'+8*' '}
+ self.assertEqual(res, wanted)
+
+ def test_dont_write_bytecode(self):
+ # makes sure byte_compile raise a DistutilsError
+ # if sys.dont_write_bytecode is True
+ old_dont_write_bytecode = sys.dont_write_bytecode
+ sys.dont_write_bytecode = True
+ try:
+ self.assertRaises(DistutilsByteCompileError, byte_compile, [])
+ finally:
+ sys.dont_write_bytecode = old_dont_write_bytecode
+
+ def test_grok_environment_error(self):
+ # test obsolete function to ensure backward compat (#4931)
+ exc = IOError("Unable to find batch file")
+ msg = grok_environment_error(exc)
+ self.assertEqual(msg, "error: Unable to find batch file")
+
+
+def test_suite():
+ return unittest.makeSuite(UtilTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_version.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_version.py
new file mode 100644
index 00000000..15f14c7d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_version.py
@@ -0,0 +1,71 @@
+"""Tests for distutils.version."""
+import unittest
+from distutils.version import LooseVersion
+from distutils.version import StrictVersion
+from test.support import run_unittest
+
+class VersionTestCase(unittest.TestCase):
+
+ def test_prerelease(self):
+ version = StrictVersion('1.2.3a1')
+ self.assertEqual(version.version, (1, 2, 3))
+ self.assertEqual(version.prerelease, ('a', 1))
+ self.assertEqual(str(version), '1.2.3a1')
+
+ version = StrictVersion('1.2.0')
+ self.assertEqual(str(version), '1.2')
+
+ def test_cmp_strict(self):
+ versions = (('1.5.1', '1.5.2b2', -1),
+ ('161', '3.10a', ValueError),
+ ('8.02', '8.02', 0),
+ ('3.4j', '1996.07.12', ValueError),
+ ('3.2.pl0', '3.1.1.6', ValueError),
+ ('2g6', '11g', ValueError),
+ ('0.9', '2.2', -1),
+ ('1.2.1', '1.2', 1),
+ ('1.1', '1.2.2', -1),
+ ('1.2', '1.1', 1),
+ ('1.2.1', '1.2.2', -1),
+ ('1.2.2', '1.2', 1),
+ ('1.2', '1.2.2', -1),
+ ('0.4.0', '0.4', 0),
+ ('1.13++', '5.5.kw', ValueError))
+
+ for v1, v2, wanted in versions:
+ try:
+ res = StrictVersion(v1)._cmp(StrictVersion(v2))
+ except ValueError:
+ if wanted is ValueError:
+ continue
+ else:
+ raise AssertionError(("cmp(%s, %s) "
+ "shouldn't raise ValueError")
+ % (v1, v2))
+ self.assertEqual(res, wanted,
+ 'cmp(%s, %s) should be %s, got %s' %
+ (v1, v2, wanted, res))
+
+
+ def test_cmp(self):
+ versions = (('1.5.1', '1.5.2b2', -1),
+ ('161', '3.10a', 1),
+ ('8.02', '8.02', 0),
+ ('3.4j', '1996.07.12', -1),
+ ('3.2.pl0', '3.1.1.6', 1),
+ ('2g6', '11g', -1),
+ ('0.960923', '2.2beta29', -1),
+ ('1.13++', '5.5.kw', -1))
+
+
+ for v1, v2, wanted in versions:
+ res = LooseVersion(v1)._cmp(LooseVersion(v2))
+ self.assertEqual(res, wanted,
+ 'cmp(%s, %s) should be %s, got %s' %
+ (v1, v2, wanted, res))
+
+def test_suite():
+ return unittest.makeSuite(VersionTestCase)
+
+if __name__ == "__main__":
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_versionpredicate.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_versionpredicate.py
new file mode 100644
index 00000000..28ae09dc
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/tests/test_versionpredicate.py
@@ -0,0 +1,13 @@
+"""Tests harness for distutils.versionpredicate.
+
+"""
+
+import distutils.versionpredicate
+import doctest
+from test.support import run_unittest
+
+def test_suite():
+ return doctest.DocTestSuite(distutils.versionpredicate)
+
+if __name__ == '__main__':
+ run_unittest(test_suite())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/text_file.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/text_file.py
new file mode 100644
index 00000000..93abad38
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/text_file.py
@@ -0,0 +1,286 @@
+"""text_file
+
+provides the TextFile class, which gives an interface to text files
+that (optionally) takes care of stripping comments, ignoring blank
+lines, and joining lines with backslashes."""
+
+import sys, io
+
+
+class TextFile:
+ """Provides a file-like object that takes care of all the things you
+ commonly want to do when processing a text file that has some
+ line-by-line syntax: strip comments (as long as "#" is your
+ comment character), skip blank lines, join adjacent lines by
+ escaping the newline (ie. backslash at end of line), strip
+ leading and/or trailing whitespace. All of these are optional
+ and independently controllable.
+
+ Provides a 'warn()' method so you can generate warning messages that
+ report physical line number, even if the logical line in question
+ spans multiple physical lines. Also provides 'unreadline()' for
+ implementing line-at-a-time lookahead.
+
+ Constructor is called as:
+
+ TextFile (filename=None, file=None, **options)
+
+ It bombs (RuntimeError) if both 'filename' and 'file' are None;
+ 'filename' should be a string, and 'file' a file object (or
+ something that provides 'readline()' and 'close()' methods). It is
+ recommended that you supply at least 'filename', so that TextFile
+ can include it in warning messages. If 'file' is not supplied,
+ TextFile creates its own using 'io.open()'.
+
+ The options are all boolean, and affect the value returned by
+ 'readline()':
+ strip_comments [default: true]
+ strip from "#" to end-of-line, as well as any whitespace
+ leading up to the "#" -- unless it is escaped by a backslash
+ lstrip_ws [default: false]
+ strip leading whitespace from each line before returning it
+ rstrip_ws [default: true]
+ strip trailing whitespace (including line terminator!) from
+ each line before returning it
+ skip_blanks [default: true}
+ skip lines that are empty *after* stripping comments and
+ whitespace. (If both lstrip_ws and rstrip_ws are false,
+ then some lines may consist of solely whitespace: these will
+ *not* be skipped, even if 'skip_blanks' is true.)
+ join_lines [default: false]
+ if a backslash is the last non-newline character on a line
+ after stripping comments and whitespace, join the following line
+ to it to form one "logical line"; if N consecutive lines end
+ with a backslash, then N+1 physical lines will be joined to
+ form one logical line.
+ collapse_join [default: false]
+ strip leading whitespace from lines that are joined to their
+ predecessor; only matters if (join_lines and not lstrip_ws)
+ errors [default: 'strict']
+ error handler used to decode the file content
+
+ Note that since 'rstrip_ws' can strip the trailing newline, the
+ semantics of 'readline()' must differ from those of the builtin file
+ object's 'readline()' method! In particular, 'readline()' returns
+ None for end-of-file: an empty string might just be a blank line (or
+ an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
+ not."""
+
+ default_options = { 'strip_comments': 1,
+ 'skip_blanks': 1,
+ 'lstrip_ws': 0,
+ 'rstrip_ws': 1,
+ 'join_lines': 0,
+ 'collapse_join': 0,
+ 'errors': 'strict',
+ }
+
+ def __init__(self, filename=None, file=None, **options):
+ """Construct a new TextFile object. At least one of 'filename'
+ (a string) and 'file' (a file-like object) must be supplied.
+ They keyword argument options are described above and affect
+ the values returned by 'readline()'."""
+ if filename is None and file is None:
+ raise RuntimeError("you must supply either or both of 'filename' and 'file'")
+
+ # set values for all options -- either from client option hash
+ # or fallback to default_options
+ for opt in self.default_options.keys():
+ if opt in options:
+ setattr(self, opt, options[opt])
+ else:
+ setattr(self, opt, self.default_options[opt])
+
+ # sanity check client option hash
+ for opt in options.keys():
+ if opt not in self.default_options:
+ raise KeyError("invalid TextFile option '%s'" % opt)
+
+ if file is None:
+ self.open(filename)
+ else:
+ self.filename = filename
+ self.file = file
+ self.current_line = 0 # assuming that file is at BOF!
+
+ # 'linebuf' is a stack of lines that will be emptied before we
+ # actually read from the file; it's only populated by an
+ # 'unreadline()' operation
+ self.linebuf = []
+
+ def open(self, filename):
+ """Open a new file named 'filename'. This overrides both the
+ 'filename' and 'file' arguments to the constructor."""
+ self.filename = filename
+ self.file = io.open(self.filename, 'r', errors=self.errors)
+ self.current_line = 0
+
+ def close(self):
+ """Close the current file and forget everything we know about it
+ (filename, current line number)."""
+ file = self.file
+ self.file = None
+ self.filename = None
+ self.current_line = None
+ file.close()
+
+ def gen_error(self, msg, line=None):
+ outmsg = []
+ if line is None:
+ line = self.current_line
+ outmsg.append(self.filename + ", ")
+ if isinstance(line, (list, tuple)):
+ outmsg.append("lines %d-%d: " % tuple(line))
+ else:
+ outmsg.append("line %d: " % line)
+ outmsg.append(str(msg))
+ return "".join(outmsg)
+
+ def error(self, msg, line=None):
+ raise ValueError("error: " + self.gen_error(msg, line))
+
+ def warn(self, msg, line=None):
+ """Print (to stderr) a warning message tied to the current logical
+ line in the current file. If the current logical line in the
+ file spans multiple physical lines, the warning refers to the
+ whole range, eg. "lines 3-5". If 'line' supplied, it overrides
+ the current line number; it may be a list or tuple to indicate a
+ range of physical lines, or an integer for a single physical
+ line."""
+ sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
+
+ def readline(self):
+ """Read and return a single logical line from the current file (or
+ from an internal buffer if lines have previously been "unread"
+ with 'unreadline()'). If the 'join_lines' option is true, this
+ may involve reading multiple physical lines concatenated into a
+ single string. Updates the current line number, so calling
+ 'warn()' after 'readline()' emits a warning about the physical
+ line(s) just read. Returns None on end-of-file, since the empty
+ string can occur if 'rstrip_ws' is true but 'strip_blanks' is
+ not."""
+ # If any "unread" lines waiting in 'linebuf', return the top
+ # one. (We don't actually buffer read-ahead data -- lines only
+ # get put in 'linebuf' if the client explicitly does an
+ # 'unreadline()'.
+ if self.linebuf:
+ line = self.linebuf[-1]
+ del self.linebuf[-1]
+ return line
+
+ buildup_line = ''
+
+ while True:
+ # read the line, make it None if EOF
+ line = self.file.readline()
+ if line == '':
+ line = None
+
+ if self.strip_comments and line:
+
+ # Look for the first "#" in the line. If none, never
+ # mind. If we find one and it's the first character, or
+ # is not preceded by "\", then it starts a comment --
+ # strip the comment, strip whitespace before it, and
+ # carry on. Otherwise, it's just an escaped "#", so
+ # unescape it (and any other escaped "#"'s that might be
+ # lurking in there) and otherwise leave the line alone.
+
+ pos = line.find("#")
+ if pos == -1: # no "#" -- no comments
+ pass
+
+ # It's definitely a comment -- either "#" is the first
+ # character, or it's elsewhere and unescaped.
+ elif pos == 0 or line[pos-1] != "\\":
+ # Have to preserve the trailing newline, because it's
+ # the job of a later step (rstrip_ws) to remove it --
+ # and if rstrip_ws is false, we'd better preserve it!
+ # (NB. this means that if the final line is all comment
+ # and has no trailing newline, we will think that it's
+ # EOF; I think that's OK.)
+ eol = (line[-1] == '\n') and '\n' or ''
+ line = line[0:pos] + eol
+
+ # If all that's left is whitespace, then skip line
+ # *now*, before we try to join it to 'buildup_line' --
+ # that way constructs like
+ # hello \\
+ # # comment that should be ignored
+ # there
+ # result in "hello there".
+ if line.strip() == "":
+ continue
+ else: # it's an escaped "#"
+ line = line.replace("\\#", "#")
+
+ # did previous line end with a backslash? then accumulate
+ if self.join_lines and buildup_line:
+ # oops: end of file
+ if line is None:
+ self.warn("continuation line immediately precedes "
+ "end-of-file")
+ return buildup_line
+
+ if self.collapse_join:
+ line = line.lstrip()
+ line = buildup_line + line
+
+ # careful: pay attention to line number when incrementing it
+ if isinstance(self.current_line, list):
+ self.current_line[1] = self.current_line[1] + 1
+ else:
+ self.current_line = [self.current_line,
+ self.current_line + 1]
+ # just an ordinary line, read it as usual
+ else:
+ if line is None: # eof
+ return None
+
+ # still have to be careful about incrementing the line number!
+ if isinstance(self.current_line, list):
+ self.current_line = self.current_line[1] + 1
+ else:
+ self.current_line = self.current_line + 1
+
+ # strip whitespace however the client wants (leading and
+ # trailing, or one or the other, or neither)
+ if self.lstrip_ws and self.rstrip_ws:
+ line = line.strip()
+ elif self.lstrip_ws:
+ line = line.lstrip()
+ elif self.rstrip_ws:
+ line = line.rstrip()
+
+ # blank line (whether we rstrip'ed or not)? skip to next line
+ # if appropriate
+ if (line == '' or line == '\n') and self.skip_blanks:
+ continue
+
+ if self.join_lines:
+ if line[-1] == '\\':
+ buildup_line = line[:-1]
+ continue
+
+ if line[-2:] == '\\\n':
+ buildup_line = line[0:-2] + '\n'
+ continue
+
+ # well, I guess there's some actual content there: return it
+ return line
+
+ def readlines(self):
+ """Read and return the list of all logical lines remaining in the
+ current file."""
+ lines = []
+ while True:
+ line = self.readline()
+ if line is None:
+ return lines
+ lines.append(line)
+
+ def unreadline(self, line):
+ """Push 'line' (a string) onto an internal buffer that will be
+ checked by future 'readline()' calls. Handy for implementing
+ a parser with line-at-a-time lookahead."""
+ self.linebuf.append(line)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/unixccompiler.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/unixccompiler.py
new file mode 100644
index 00000000..ab4d4de1
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/unixccompiler.py
@@ -0,0 +1,320 @@
+"""distutils.unixccompiler
+
+Contains the UnixCCompiler class, a subclass of CCompiler that handles
+the "typical" Unix-style command-line C compiler:
+ * macros defined with -Dname[=value]
+ * macros undefined with -Uname
+ * include search directories specified with -Idir
+ * libraries specified with -lllib
+ * library search directories specified with -Ldir
+ * compile handled by 'cc' (or similar) executable with -c option:
+ compiles .c to .o
+ * link static library handled by 'ar' command (possibly with 'ranlib')
+ * link shared library handled by 'cc -shared'
+"""
+
+import os, sys, re
+
+from distutils import sysconfig
+from distutils.dep_util import newer
+from distutils.ccompiler import \
+ CCompiler, gen_preprocess_options, gen_lib_options
+from distutils.errors import \
+ DistutilsExecError, CompileError, LibError, LinkError
+from distutils import log
+
+if sys.platform == 'darwin':
+ import _osx_support
+
+# XXX Things not currently handled:
+# * optimization/debug/warning flags; we just use whatever's in Python's
+# Makefile and live with it. Is this adequate? If not, we might
+# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
+# SunCCompiler, and I suspect down that road lies madness.
+# * even if we don't know a warning flag from an optimization flag,
+# we need some way for outsiders to feed preprocessor/compiler/linker
+# flags in to us -- eg. a sysadmin might want to mandate certain flags
+# via a site config file, or a user might want to set something for
+# compiling this module distribution only via the setup.py command
+# line, whatever. As long as these options come from something on the
+# current system, they can be as system-dependent as they like, and we
+# should just happily stuff them into the preprocessor/compiler/linker
+# options and carry on.
+
+
+class UnixCCompiler(CCompiler):
+
+ compiler_type = 'unix'
+
+ # These are used by CCompiler in two places: the constructor sets
+ # instance attributes 'preprocessor', 'compiler', etc. from them, and
+ # 'set_executable()' allows any of these to be set. The defaults here
+ # are pretty generic; they will probably have to be set by an outsider
+ # (eg. using information discovered by the sysconfig about building
+ # Python extensions).
+ executables = {'preprocessor' : None,
+ 'compiler' : ["cc"],
+ 'compiler_so' : ["cc"],
+ 'compiler_cxx' : ["cc"],
+ 'linker_so' : ["cc", "-shared"],
+ 'linker_exe' : ["cc"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : None,
+ }
+
+ if sys.platform[:6] == "darwin":
+ executables['ranlib'] = ["ranlib"]
+
+ # Needed for the filename generation methods provided by the base
+ # class, CCompiler. NB. whoever instantiates/uses a particular
+ # UnixCCompiler instance should set 'shared_lib_ext' -- we set a
+ # reasonable common default here, but it's not necessarily used on all
+ # Unices!
+
+ src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
+ obj_extension = ".o"
+ static_lib_extension = ".a"
+ shared_lib_extension = ".so"
+ dylib_lib_extension = ".dylib"
+ xcode_stub_lib_extension = ".tbd"
+ static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
+ xcode_stub_lib_format = dylib_lib_format
+ if sys.platform == "cygwin":
+ exe_extension = ".exe"
+
+ def preprocess(self, source, output_file=None, macros=None,
+ include_dirs=None, extra_preargs=None, extra_postargs=None):
+ fixed_args = self._fix_compile_args(None, macros, include_dirs)
+ ignore, macros, include_dirs = fixed_args
+ pp_opts = gen_preprocess_options(macros, include_dirs)
+ pp_args = self.preprocessor + pp_opts
+ if output_file:
+ pp_args.extend(['-o', output_file])
+ if extra_preargs:
+ pp_args[:0] = extra_preargs
+ if extra_postargs:
+ pp_args.extend(extra_postargs)
+ pp_args.append(source)
+
+ # We need to preprocess: either we're being forced to, or we're
+ # generating output to stdout, or there's a target output file and
+ # the source file is newer than the target (or the target doesn't
+ # exist).
+ if self.force or output_file is None or newer(source, output_file):
+ if output_file:
+ self.mkpath(os.path.dirname(output_file))
+ try:
+ self.spawn(pp_args)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ compiler_so = self.compiler_so
+ if sys.platform == 'darwin':
+ compiler_so = _osx_support.compiler_fixup(compiler_so,
+ cc_args + extra_postargs)
+ try:
+ self.spawn(compiler_so + cc_args + [src, '-o', obj] +
+ extra_postargs)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ def create_static_lib(self, objects, output_libname,
+ output_dir=None, debug=0, target_lang=None):
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+
+ output_filename = \
+ self.library_filename(output_libname, output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ self.mkpath(os.path.dirname(output_filename))
+ self.spawn(self.archiver +
+ [output_filename] +
+ objects + self.objects)
+
+ # Not many Unices required ranlib anymore -- SunOS 4.x is, I
+ # think the only major Unix that does. Maybe we need some
+ # platform intelligence here to skip ranlib if it's not
+ # needed -- or maybe Python's configure script took care of
+ # it for us, hence the check for leading colon.
+ if self.ranlib:
+ try:
+ self.spawn(self.ranlib + [output_filename])
+ except DistutilsExecError as msg:
+ raise LibError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ def link(self, target_desc, objects,
+ output_filename, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None,
+ export_symbols=None, debug=0, extra_preargs=None,
+ extra_postargs=None, build_temp=None, target_lang=None):
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+ fixed_args = self._fix_lib_args(libraries, library_dirs,
+ runtime_library_dirs)
+ libraries, library_dirs, runtime_library_dirs = fixed_args
+
+ lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
+ libraries)
+ if not isinstance(output_dir, (str, type(None))):
+ raise TypeError("'output_dir' must be a string or None")
+ if output_dir is not None:
+ output_filename = os.path.join(output_dir, output_filename)
+
+ if self._need_link(objects, output_filename):
+ ld_args = (objects + self.objects +
+ lib_opts + ['-o', output_filename])
+ if debug:
+ ld_args[:0] = ['-g']
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+ self.mkpath(os.path.dirname(output_filename))
+ try:
+ if target_desc == CCompiler.EXECUTABLE:
+ linker = self.linker_exe[:]
+ else:
+ linker = self.linker_so[:]
+ if target_lang == "c++" and self.compiler_cxx:
+ # skip over environment variable settings if /usr/bin/env
+ # is used to set up the linker's environment.
+ # This is needed on OSX. Note: this assumes that the
+ # normal and C++ compiler have the same environment
+ # settings.
+ i = 0
+ if os.path.basename(linker[0]) == "env":
+ i = 1
+ while '=' in linker[i]:
+ i += 1
+ linker[i] = self.compiler_cxx[i]
+
+ if sys.platform == 'darwin':
+ linker = _osx_support.compiler_fixup(linker, ld_args)
+
+ self.spawn(linker + ld_args)
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ # -- Miscellaneous methods -----------------------------------------
+ # These are all used by the 'gen_lib_options() function, in
+ # ccompiler.py.
+
+ def library_dir_option(self, dir):
+ return "-L" + dir
+
+ def _is_gcc(self, compiler_name):
+ return "gcc" in compiler_name or "g++" in compiler_name
+
+ def runtime_library_dir_option(self, dir):
+ # XXX Hackish, at the very least. See Python bug #445902:
+ # http://sourceforge.net/tracker/index.php
+ # ?func=detail&aid=445902&group_id=5470&atid=105470
+ # Linkers on different platforms need different options to
+ # specify that directories need to be added to the list of
+ # directories searched for dependencies when a dynamic library
+ # is sought. GCC on GNU systems (Linux, FreeBSD, ...) has to
+ # be told to pass the -R option through to the linker, whereas
+ # other compilers and gcc on other systems just know this.
+ # Other compilers may need something slightly different. At
+ # this time, there's no way to determine this information from
+ # the configuration data stored in the Python installation, so
+ # we use this hack.
+ compiler = os.path.basename(sysconfig.get_config_var("CC"))
+ if sys.platform[:6] == "darwin":
+ # MacOSX's linker doesn't understand the -R flag at all
+ return "-L" + dir
+ elif sys.platform[:7] == "freebsd":
+ return "-Wl,-rpath=" + dir
+ elif sys.platform[:5] == "hp-ux":
+ if self._is_gcc(compiler):
+ return ["-Wl,+s", "-L" + dir]
+ return ["+s", "-L" + dir]
+ else:
+ if self._is_gcc(compiler):
+ # gcc on non-GNU systems does not need -Wl, but can
+ # use it anyway. Since distutils has always passed in
+ # -Wl whenever gcc was used in the past it is probably
+ # safest to keep doing so.
+ if sysconfig.get_config_var("GNULD") == "yes":
+ # GNU ld needs an extra option to get a RUNPATH
+ # instead of just an RPATH.
+ return "-Wl,--enable-new-dtags,-R" + dir
+ else:
+ return "-Wl,-R" + dir
+ else:
+ # No idea how --enable-new-dtags would be passed on to
+ # ld if this system was using GNU ld. Don't know if a
+ # system like this even exists.
+ return "-R" + dir
+
+ def library_option(self, lib):
+ return "-l" + lib
+
+ def find_library_file(self, dirs, lib, debug=0):
+ shared_f = self.library_filename(lib, lib_type='shared')
+ dylib_f = self.library_filename(lib, lib_type='dylib')
+ xcode_stub_f = self.library_filename(lib, lib_type='xcode_stub')
+ static_f = self.library_filename(lib, lib_type='static')
+
+ if sys.platform == 'darwin':
+ # On OSX users can specify an alternate SDK using
+ # '-isysroot', calculate the SDK root if it is specified
+ # (and use it further on)
+ #
+ # Note that, as of Xcode 7, Apple SDKs may contain textual stub
+ # libraries with .tbd extensions rather than the normal .dylib
+ # shared libraries installed in /. The Apple compiler tool
+ # chain handles this transparently but it can cause problems
+ # for programs that are being built with an SDK and searching
+ # for specific libraries. Callers of find_library_file need to
+ # keep in mind that the base filename of the returned SDK library
+ # file might have a different extension from that of the library
+ # file installed on the running system, for example:
+ # /Applications/Xcode.app/Contents/Developer/Platforms/
+ # MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/
+ # usr/lib/libedit.tbd
+ # vs
+ # /usr/lib/libedit.dylib
+ cflags = sysconfig.get_config_var('CFLAGS')
+ m = re.search(r'-isysroot\s+(\S+)', cflags)
+ if m is None:
+ sysroot = '/'
+ else:
+ sysroot = m.group(1)
+
+
+
+ for dir in dirs:
+ shared = os.path.join(dir, shared_f)
+ dylib = os.path.join(dir, dylib_f)
+ static = os.path.join(dir, static_f)
+ xcode_stub = os.path.join(dir, xcode_stub_f)
+
+ if sys.platform == 'darwin' and (
+ dir.startswith('/System/') or (
+ dir.startswith('/usr/') and not dir.startswith('/usr/local/'))):
+
+ shared = os.path.join(sysroot, dir[1:], shared_f)
+ dylib = os.path.join(sysroot, dir[1:], dylib_f)
+ static = os.path.join(sysroot, dir[1:], static_f)
+ xcode_stub = os.path.join(sysroot, dir[1:], xcode_stub_f)
+
+ # We're second-guessing the linker here, with not much hard
+ # data to go on: GCC seems to prefer the shared library, so I'm
+ # assuming that *all* Unix C compilers do. And of course I'm
+ # ignoring even GCC's "-static" option. So sue me.
+ if os.path.exists(dylib):
+ return dylib
+ elif os.path.exists(xcode_stub):
+ return xcode_stub
+ elif os.path.exists(shared):
+ return shared
+ elif os.path.exists(static):
+ return static
+
+ # Oops, didn't find it in *any* of 'dirs'
+ return None
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/util.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/util.py
new file mode 100644
index 00000000..83682628
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/util.py
@@ -0,0 +1,542 @@
+"""distutils.util
+
+Miscellaneous utility functions -- anything that doesn't fit into
+one of the other *util.py modules.
+"""
+
+import os
+import re
+import importlib.util
+import string
+import sys
+from distutils.errors import DistutilsPlatformError
+from distutils.dep_util import newer
+from distutils.spawn import spawn
+from distutils import log
+from distutils.errors import DistutilsByteCompileError
+
+def get_platform ():
+ """Return a string that identifies the current platform. This is used mainly to
+ distinguish platform-specific build directories and platform-specific built
+ distributions. Typically includes the OS name and version and the
+ architecture (as supplied by 'os.uname()'), although the exact information
+ included depends on the OS; eg. on Linux, the kernel version isn't
+ particularly important.
+
+ Examples of returned values:
+ linux-i586
+ linux-alpha (?)
+ solaris-2.6-sun4u
+
+ Windows will return one of:
+ win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
+ win32 (all others - specifically, sys.platform is returned)
+
+ For other non-POSIX platforms, currently just returns 'sys.platform'.
+
+ """
+ if os.name == 'nt':
+ if 'amd64' in sys.version.lower():
+ return 'win-amd64'
+ return sys.platform
+
+ # Set for cross builds explicitly
+ if "_PYTHON_HOST_PLATFORM" in os.environ:
+ return os.environ["_PYTHON_HOST_PLATFORM"]
+
+ if os.name != "posix" or not hasattr(os, 'uname'):
+ # XXX what about the architecture? NT is Intel or Alpha,
+ # Mac OS is M68k or PPC, etc.
+ return sys.platform
+
+ # Try to distinguish various flavours of Unix
+
+ (osname, host, release, version, machine) = os.uname()
+
+ # Convert the OS name to lowercase, remove '/' characters, and translate
+ # spaces (for "Power Macintosh")
+ osname = osname.lower().replace('/', '')
+ machine = machine.replace(' ', '_')
+ machine = machine.replace('/', '-')
+
+ if osname[:5] == "linux":
+ # At least on Linux/Intel, 'machine' is the processor --
+ # i386, etc.
+ # XXX what about Alpha, SPARC, etc?
+ return "%s-%s" % (osname, machine)
+ elif osname[:5] == "sunos":
+ if release[0] >= "5": # SunOS 5 == Solaris 2
+ osname = "solaris"
+ release = "%d.%s" % (int(release[0]) - 3, release[2:])
+ # We can't use "platform.architecture()[0]" because a
+ # bootstrap problem. We use a dict to get an error
+ # if some suspicious happens.
+ bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
+ machine += ".%s" % bitness[sys.maxsize]
+ # fall through to standard osname-release-machine representation
+ elif osname[:3] == "aix":
+ return "%s-%s.%s" % (osname, version, release)
+ elif osname[:6] == "cygwin":
+ osname = "cygwin"
+ rel_re = re.compile (r'[\d.]+', re.ASCII)
+ m = rel_re.match(release)
+ if m:
+ release = m.group()
+ elif osname[:6] == "darwin":
+ import _osx_support, distutils.sysconfig
+ osname, release, machine = _osx_support.get_platform_osx(
+ distutils.sysconfig.get_config_vars(),
+ osname, release, machine)
+
+ return "%s-%s-%s" % (osname, release, machine)
+
+# get_platform ()
+
+
+def convert_path (pathname):
+ """Return 'pathname' as a name that will work on the native filesystem,
+ i.e. split it on '/' and put it back together again using the current
+ directory separator. Needed because filenames in the setup script are
+ always supplied in Unix style, and have to be converted to the local
+ convention before we can actually use them in the filesystem. Raises
+ ValueError on non-Unix-ish systems if 'pathname' either starts or
+ ends with a slash.
+ """
+ if os.sep == '/':
+ return pathname
+ if not pathname:
+ return pathname
+ if pathname[0] == '/':
+ raise ValueError("path '%s' cannot be absolute" % pathname)
+ if pathname[-1] == '/':
+ raise ValueError("path '%s' cannot end with '/'" % pathname)
+
+ paths = pathname.split('/')
+ while '.' in paths:
+ paths.remove('.')
+ if not paths:
+ return os.curdir
+ return os.path.join(*paths)
+
+# convert_path ()
+
+
+def change_root (new_root, pathname):
+ """Return 'pathname' with 'new_root' prepended. If 'pathname' is
+ relative, this is equivalent to "os.path.join(new_root,pathname)".
+ Otherwise, it requires making 'pathname' relative and then joining the
+ two, which is tricky on DOS/Windows and Mac OS.
+ """
+ if os.name == 'posix':
+ if not os.path.isabs(pathname):
+ return os.path.join(new_root, pathname)
+ else:
+ return os.path.join(new_root, pathname[1:])
+
+ elif os.name == 'nt':
+ (drive, path) = os.path.splitdrive(pathname)
+ if path[0] == '\\':
+ path = path[1:]
+ return os.path.join(new_root, path)
+
+ else:
+ raise DistutilsPlatformError("nothing known about platform '%s'" % os.name)
+
+
+_environ_checked = 0
+def check_environ ():
+ """Ensure that 'os.environ' has all the environment variables we
+ guarantee that users can use in config files, command-line options,
+ etc. Currently this includes:
+ HOME - user's home directory (Unix only)
+ PLAT - description of the current platform, including hardware
+ and OS (see 'get_platform()')
+ """
+ global _environ_checked
+ if _environ_checked:
+ return
+
+ if os.name == 'posix' and 'HOME' not in os.environ:
+ import pwd
+ os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
+
+ if 'PLAT' not in os.environ:
+ os.environ['PLAT'] = get_platform()
+
+ _environ_checked = 1
+
+
+def subst_vars (s, local_vars):
+ """Perform shell/Perl-style variable substitution on 'string'. Every
+ occurrence of '$' followed by a name is considered a variable, and
+ variable is substituted by the value found in the 'local_vars'
+ dictionary, or in 'os.environ' if it's not in 'local_vars'.
+ 'os.environ' is first checked/augmented to guarantee that it contains
+ certain values: see 'check_environ()'. Raise ValueError for any
+ variables not found in either 'local_vars' or 'os.environ'.
+ """
+ check_environ()
+ def _subst (match, local_vars=local_vars):
+ var_name = match.group(1)
+ if var_name in local_vars:
+ return str(local_vars[var_name])
+ else:
+ return os.environ[var_name]
+
+ try:
+ return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
+ except KeyError as var:
+ raise ValueError("invalid variable '$%s'" % var)
+
+# subst_vars ()
+
+
+def grok_environment_error (exc, prefix="error: "):
+ # Function kept for backward compatibility.
+ # Used to try clever things with EnvironmentErrors,
+ # but nowadays str(exception) produces good messages.
+ return prefix + str(exc)
+
+
+# Needed by 'split_quoted()'
+_wordchars_re = _squote_re = _dquote_re = None
+def _init_regex():
+ global _wordchars_re, _squote_re, _dquote_re
+ _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
+ _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
+ _dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
+
+def split_quoted (s):
+ """Split a string up according to Unix shell-like rules for quotes and
+ backslashes. In short: words are delimited by spaces, as long as those
+ spaces are not escaped by a backslash, or inside a quoted string.
+ Single and double quotes are equivalent, and the quote characters can
+ be backslash-escaped. The backslash is stripped from any two-character
+ escape sequence, leaving only the escaped character. The quote
+ characters are stripped from any quoted string. Returns a list of
+ words.
+ """
+
+ # This is a nice algorithm for splitting up a single string, since it
+ # doesn't require character-by-character examination. It was a little
+ # bit of a brain-bender to get it working right, though...
+ if _wordchars_re is None: _init_regex()
+
+ s = s.strip()
+ words = []
+ pos = 0
+
+ while s:
+ m = _wordchars_re.match(s, pos)
+ end = m.end()
+ if end == len(s):
+ words.append(s[:end])
+ break
+
+ if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
+ words.append(s[:end]) # we definitely have a word delimiter
+ s = s[end:].lstrip()
+ pos = 0
+
+ elif s[end] == '\\': # preserve whatever is being escaped;
+ # will become part of the current word
+ s = s[:end] + s[end+1:]
+ pos = end+1
+
+ else:
+ if s[end] == "'": # slurp singly-quoted string
+ m = _squote_re.match(s, end)
+ elif s[end] == '"': # slurp doubly-quoted string
+ m = _dquote_re.match(s, end)
+ else:
+ raise RuntimeError("this can't happen (bad char '%c')" % s[end])
+
+ if m is None:
+ raise ValueError("bad string (mismatched %s quotes?)" % s[end])
+
+ (beg, end) = m.span()
+ s = s[:beg] + s[beg+1:end-1] + s[end:]
+ pos = m.end() - 2
+
+ if pos >= len(s):
+ words.append(s)
+ break
+
+ return words
+
+# split_quoted ()
+
+
+def execute (func, args, msg=None, verbose=0, dry_run=0):
+ """Perform some action that affects the outside world (eg. by
+ writing to the filesystem). Such actions are special because they
+ are disabled by the 'dry_run' flag. This method takes care of all
+ that bureaucracy for you; all you have to do is supply the
+ function to call and an argument tuple for it (to embody the
+ "external action" being performed), and an optional message to
+ print.
+ """
+ if msg is None:
+ msg = "%s%r" % (func.__name__, args)
+ if msg[-2:] == ',)': # correct for singleton tuple
+ msg = msg[0:-2] + ')'
+
+ log.info(msg)
+ if not dry_run:
+ func(*args)
+
+
+def strtobool (val):
+ """Convert a string representation of truth to true (1) or false (0).
+
+ True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
+ are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
+ 'val' is anything else.
+ """
+ val = val.lower()
+ if val in ('y', 'yes', 't', 'true', 'on', '1'):
+ return 1
+ elif val in ('n', 'no', 'f', 'false', 'off', '0'):
+ return 0
+ else:
+ raise ValueError("invalid truth value %r" % (val,))
+
+
+def byte_compile (py_files,
+ optimize=0, force=0,
+ prefix=None, base_dir=None,
+ verbose=1, dry_run=0,
+ direct=None):
+ """Byte-compile a collection of Python source files to .pyc
+ files in a __pycache__ subdirectory. 'py_files' is a list
+ of files to compile; any files that don't end in ".py" are silently
+ skipped. 'optimize' must be one of the following:
+ 0 - don't optimize
+ 1 - normal optimization (like "python -O")
+ 2 - extra optimization (like "python -OO")
+ If 'force' is true, all files are recompiled regardless of
+ timestamps.
+
+ The source filename encoded in each bytecode file defaults to the
+ filenames listed in 'py_files'; you can modify these with 'prefix' and
+ 'basedir'. 'prefix' is a string that will be stripped off of each
+ source filename, and 'base_dir' is a directory name that will be
+ prepended (after 'prefix' is stripped). You can supply either or both
+ (or neither) of 'prefix' and 'base_dir', as you wish.
+
+ If 'dry_run' is true, doesn't actually do anything that would
+ affect the filesystem.
+
+ Byte-compilation is either done directly in this interpreter process
+ with the standard py_compile module, or indirectly by writing a
+ temporary script and executing it. Normally, you should let
+ 'byte_compile()' figure out to use direct compilation or not (see
+ the source for details). The 'direct' flag is used by the script
+ generated in indirect mode; unless you know what you're doing, leave
+ it set to None.
+ """
+
+ # Late import to fix a bootstrap issue: _posixsubprocess is built by
+ # setup.py, but setup.py uses distutils.
+ import subprocess
+
+ # nothing is done if sys.dont_write_bytecode is True
+ if sys.dont_write_bytecode:
+ raise DistutilsByteCompileError('byte-compiling is disabled.')
+
+ # First, if the caller didn't force us into direct or indirect mode,
+ # figure out which mode we should be in. We take a conservative
+ # approach: choose direct mode *only* if the current interpreter is
+ # in debug mode and optimize is 0. If we're not in debug mode (-O
+ # or -OO), we don't know which level of optimization this
+ # interpreter is running with, so we can't do direct
+ # byte-compilation and be certain that it's the right thing. Thus,
+ # always compile indirectly if the current interpreter is in either
+ # optimize mode, or if either optimization level was requested by
+ # the caller.
+ if direct is None:
+ direct = (__debug__ and optimize == 0)
+
+ # "Indirect" byte-compilation: write a temporary script and then
+ # run it with the appropriate flags.
+ if not direct:
+ try:
+ from tempfile import mkstemp
+ (script_fd, script_name) = mkstemp(".py")
+ except ImportError:
+ from tempfile import mktemp
+ (script_fd, script_name) = None, mktemp(".py")
+ log.info("writing byte-compilation script '%s'", script_name)
+ if not dry_run:
+ if script_fd is not None:
+ script = os.fdopen(script_fd, "w")
+ else:
+ script = open(script_name, "w")
+
+ script.write("""\
+from distutils.util import byte_compile
+files = [
+""")
+
+ # XXX would be nice to write absolute filenames, just for
+ # safety's sake (script should be more robust in the face of
+ # chdir'ing before running it). But this requires abspath'ing
+ # 'prefix' as well, and that breaks the hack in build_lib's
+ # 'byte_compile()' method that carefully tacks on a trailing
+ # slash (os.sep really) to make sure the prefix here is "just
+ # right". This whole prefix business is rather delicate -- the
+ # problem is that it's really a directory, but I'm treating it
+ # as a dumb string, so trailing slashes and so forth matter.
+
+ #py_files = map(os.path.abspath, py_files)
+ #if prefix:
+ # prefix = os.path.abspath(prefix)
+
+ script.write(",\n".join(map(repr, py_files)) + "]\n")
+ script.write("""
+byte_compile(files, optimize=%r, force=%r,
+ prefix=%r, base_dir=%r,
+ verbose=%r, dry_run=0,
+ direct=1)
+""" % (optimize, force, prefix, base_dir, verbose))
+
+ script.close()
+
+ cmd = [sys.executable]
+ cmd.extend(subprocess._optim_args_from_interpreter_flags())
+ cmd.append(script_name)
+ spawn(cmd, dry_run=dry_run)
+ execute(os.remove, (script_name,), "removing %s" % script_name,
+ dry_run=dry_run)
+
+ # "Direct" byte-compilation: use the py_compile module to compile
+ # right here, right now. Note that the script generated in indirect
+ # mode simply calls 'byte_compile()' in direct mode, a weird sort of
+ # cross-process recursion. Hey, it works!
+ else:
+ from py_compile import compile
+
+ for file in py_files:
+ if file[-3:] != ".py":
+ # This lets us be lazy and not filter filenames in
+ # the "install_lib" command.
+ continue
+
+ # Terminology from the py_compile module:
+ # cfile - byte-compiled file
+ # dfile - purported source filename (same as 'file' by default)
+ if optimize >= 0:
+ opt = '' if optimize == 0 else optimize
+ cfile = importlib.util.cache_from_source(
+ file, optimization=opt)
+ else:
+ cfile = importlib.util.cache_from_source(file)
+ dfile = file
+ if prefix:
+ if file[:len(prefix)] != prefix:
+ raise ValueError("invalid prefix: filename %r doesn't start with %r"
+ % (file, prefix))
+ dfile = dfile[len(prefix):]
+ if base_dir:
+ dfile = os.path.join(base_dir, dfile)
+
+ cfile_base = os.path.basename(cfile)
+ if direct:
+ if force or newer(file, cfile):
+ log.info("byte-compiling %s to %s", file, cfile_base)
+ if not dry_run:
+ compile(file, cfile, dfile)
+ else:
+ log.debug("skipping byte-compilation of %s to %s",
+ file, cfile_base)
+
+# byte_compile ()
+
+def rfc822_escape (header):
+ """Return a version of the string escaped for inclusion in an
+ RFC-822 header, by ensuring there are 8 spaces space after each newline.
+ """
+ lines = header.split('\n')
+ sep = '\n' + 8 * ' '
+ return sep.join(lines)
+
+# 2to3 support
+
+def run_2to3(files, fixer_names=None, options=None, explicit=None):
+ """Invoke 2to3 on a list of Python files.
+ The files should all come from the build area, as the
+ modification is done in-place. To reduce the build time,
+ only files modified since the last invocation of this
+ function should be passed in the files argument."""
+
+ if not files:
+ return
+
+ # Make this class local, to delay import of 2to3
+ from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+ class DistutilsRefactoringTool(RefactoringTool):
+ def log_error(self, msg, *args, **kw):
+ log.error(msg, *args)
+
+ def log_message(self, msg, *args):
+ log.info(msg, *args)
+
+ def log_debug(self, msg, *args):
+ log.debug(msg, *args)
+
+ if fixer_names is None:
+ fixer_names = get_fixers_from_package('lib2to3.fixes')
+ r = DistutilsRefactoringTool(fixer_names, options=options)
+ r.refactor(files, write=True)
+
+def copydir_run_2to3(src, dest, template=None, fixer_names=None,
+ options=None, explicit=None):
+ """Recursively copy a directory, only copying new and changed files,
+ running run_2to3 over all newly copied Python modules afterward.
+
+ If you give a template string, it's parsed like a MANIFEST.in.
+ """
+ from distutils.dir_util import mkpath
+ from distutils.file_util import copy_file
+ from distutils.filelist import FileList
+ filelist = FileList()
+ curdir = os.getcwd()
+ os.chdir(src)
+ try:
+ filelist.findall()
+ finally:
+ os.chdir(curdir)
+ filelist.files[:] = filelist.allfiles
+ if template:
+ for line in template.splitlines():
+ line = line.strip()
+ if not line: continue
+ filelist.process_template_line(line)
+ copied = []
+ for filename in filelist.files:
+ outname = os.path.join(dest, filename)
+ mkpath(os.path.dirname(outname))
+ res = copy_file(os.path.join(src, filename), outname, update=1)
+ if res[1]: copied.append(outname)
+ run_2to3([fn for fn in copied if fn.lower().endswith('.py')],
+ fixer_names=fixer_names, options=options, explicit=explicit)
+ return copied
+
+class Mixin2to3:
+ '''Mixin class for commands that run 2to3.
+ To configure 2to3, setup scripts may either change
+ the class variables, or inherit from individual commands
+ to override how 2to3 is invoked.'''
+
+ # provide list of fixers to run;
+ # defaults to all from lib2to3.fixers
+ fixer_names = None
+
+ # options dictionary
+ options = None
+
+ # list of fixers to invoke even though they are marked as explicit
+ explicit = None
+
+ def run_2to3(self, files):
+ return run_2to3(files, self.fixer_names, self.options, self.explicit)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/version.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/version.py
new file mode 100644
index 00000000..af14cc13
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/version.py
@@ -0,0 +1,343 @@
+#
+# distutils/version.py
+#
+# Implements multiple version numbering conventions for the
+# Python Module Distribution Utilities.
+#
+# $Id$
+#
+
+"""Provides classes to represent module version numbers (one class for
+each style of version numbering). There are currently two such classes
+implemented: StrictVersion and LooseVersion.
+
+Every version number class implements the following interface:
+ * the 'parse' method takes a string and parses it to some internal
+ representation; if the string is an invalid version number,
+ 'parse' raises a ValueError exception
+ * the class constructor takes an optional string argument which,
+ if supplied, is passed to 'parse'
+ * __str__ reconstructs the string that was passed to 'parse' (or
+ an equivalent string -- ie. one that will generate an equivalent
+ version number instance)
+ * __repr__ generates Python code to recreate the version number instance
+ * _cmp compares the current instance with either another instance
+ of the same class or a string (which will be parsed to an instance
+ of the same class, thus must follow the same rules)
+"""
+
+import re
+
+class Version:
+ """Abstract base class for version numbering classes. Just provides
+ constructor (__init__) and reproducer (__repr__), because those
+ seem to be the same for all version numbering classes; and route
+ rich comparisons to _cmp.
+ """
+
+ def __init__ (self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def __repr__ (self):
+ return "%s ('%s')" % (self.__class__.__name__, str(self))
+
+ def __eq__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c == 0
+
+ def __lt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c < 0
+
+ def __le__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c <= 0
+
+ def __gt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c > 0
+
+ def __ge__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c >= 0
+
+
+# Interface for version-number classes -- must be implemented
+# by the following classes (the concrete ones -- Version should
+# be treated as an abstract class).
+# __init__ (string) - create and take same action as 'parse'
+# (string parameter is optional)
+# parse (string) - convert a string representation to whatever
+# internal representation is appropriate for
+# this style of version numbering
+# __str__ (self) - convert back to a string; should be very similar
+# (if not identical to) the string supplied to parse
+# __repr__ (self) - generate Python code to recreate
+# the instance
+# _cmp (self, other) - compare two version numbers ('other' may
+# be an unparsed version string, or another
+# instance of your version class)
+
+
+class StrictVersion (Version):
+
+ """Version numbering for anal retentives and software idealists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of two or three
+ dot-separated numeric components, with an optional "pre-release" tag
+ on the end. The pre-release tag consists of the letter 'a' or 'b'
+ followed by a number. If the numeric components of two version
+ numbers are equal, then one with a pre-release tag will always
+ be deemed earlier (lesser) than one without.
+
+ The following are valid version numbers (shown in the order that
+ would be obtained by sorting according to the supplied cmp function):
+
+ 0.4 0.4.0 (these two are equivalent)
+ 0.4.1
+ 0.5a1
+ 0.5b3
+ 0.5
+ 0.9.6
+ 1.0
+ 1.0.4a3
+ 1.0.4b1
+ 1.0.4
+
+ The following are examples of invalid version numbers:
+
+ 1
+ 2.7.2.2
+ 1.3.a4
+ 1.3pl1
+ 1.3c4
+
+ The rationale for this version numbering system will be explained
+ in the distutils documentation.
+ """
+
+ version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
+ re.VERBOSE | re.ASCII)
+
+
+ def parse (self, vstring):
+ match = self.version_re.match(vstring)
+ if not match:
+ raise ValueError("invalid version number '%s'" % vstring)
+
+ (major, minor, patch, prerelease, prerelease_num) = \
+ match.group(1, 2, 4, 5, 6)
+
+ if patch:
+ self.version = tuple(map(int, [major, minor, patch]))
+ else:
+ self.version = tuple(map(int, [major, minor])) + (0,)
+
+ if prerelease:
+ self.prerelease = (prerelease[0], int(prerelease_num))
+ else:
+ self.prerelease = None
+
+
+ def __str__ (self):
+
+ if self.version[2] == 0:
+ vstring = '.'.join(map(str, self.version[0:2]))
+ else:
+ vstring = '.'.join(map(str, self.version))
+
+ if self.prerelease:
+ vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
+
+ return vstring
+
+
+ def _cmp (self, other):
+ if isinstance(other, str):
+ other = StrictVersion(other)
+
+ if self.version != other.version:
+ # numeric versions don't match
+ # prerelease stuff doesn't matter
+ if self.version < other.version:
+ return -1
+ else:
+ return 1
+
+ # have to compare prerelease
+ # case 1: neither has prerelease; they're equal
+ # case 2: self has prerelease, other doesn't; other is greater
+ # case 3: self doesn't have prerelease, other does: self is greater
+ # case 4: both have prerelease: must compare them!
+
+ if (not self.prerelease and not other.prerelease):
+ return 0
+ elif (self.prerelease and not other.prerelease):
+ return -1
+ elif (not self.prerelease and other.prerelease):
+ return 1
+ elif (self.prerelease and other.prerelease):
+ if self.prerelease == other.prerelease:
+ return 0
+ elif self.prerelease < other.prerelease:
+ return -1
+ else:
+ return 1
+ else:
+ assert False, "never get here"
+
+# end class StrictVersion
+
+
+# The rules according to Greg Stein:
+# 1) a version number has 1 or more numbers separated by a period or by
+# sequences of letters. If only periods, then these are compared
+# left-to-right to determine an ordering.
+# 2) sequences of letters are part of the tuple for comparison and are
+# compared lexicographically
+# 3) recognize the numeric components may have leading zeroes
+#
+# The LooseVersion class below implements these rules: a version number
+# string is split up into a tuple of integer and string components, and
+# comparison is a simple tuple comparison. This means that version
+# numbers behave in a predictable and obvious way, but a way that might
+# not necessarily be how people *want* version numbers to behave. There
+# wouldn't be a problem if people could stick to purely numeric version
+# numbers: just split on period and compare the numbers as tuples.
+# However, people insist on putting letters into their version numbers;
+# the most common purpose seems to be:
+# - indicating a "pre-release" version
+# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
+# - indicating a post-release patch ('p', 'pl', 'patch')
+# but of course this can't cover all version number schemes, and there's
+# no way to know what a programmer means without asking him.
+#
+# The problem is what to do with letters (and other non-numeric
+# characters) in a version number. The current implementation does the
+# obvious and predictable thing: keep them as strings and compare
+# lexically within a tuple comparison. This has the desired effect if
+# an appended letter sequence implies something "post-release":
+# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
+#
+# However, if letters in a version number imply a pre-release version,
+# the "obvious" thing isn't correct. Eg. you would expect that
+# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
+# implemented here, this just isn't so.
+#
+# Two possible solutions come to mind. The first is to tie the
+# comparison algorithm to a particular set of semantic rules, as has
+# been done in the StrictVersion class above. This works great as long
+# as everyone can go along with bondage and discipline. Hopefully a
+# (large) subset of Python module programmers will agree that the
+# particular flavour of bondage and discipline provided by StrictVersion
+# provides enough benefit to be worth using, and will submit their
+# version numbering scheme to its domination. The free-thinking
+# anarchists in the lot will never give in, though, and something needs
+# to be done to accommodate them.
+#
+# Perhaps a "moderately strict" version class could be implemented that
+# lets almost anything slide (syntactically), and makes some heuristic
+# assumptions about non-digits in version number strings. This could
+# sink into special-case-hell, though; if I was as talented and
+# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
+# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
+# just as happy dealing with things like "2g6" and "1.13++". I don't
+# think I'm smart enough to do it right though.
+#
+# In any case, I've coded the test suite for this module (see
+# ../test/test_version.py) specifically to fail on things like comparing
+# "1.2a2" and "1.2". That's not because the *code* is doing anything
+# wrong, it's because the simple, obvious design doesn't match my
+# complicated, hairy expectations for real-world version numbers. It
+# would be a snap to fix the test suite to say, "Yep, LooseVersion does
+# the Right Thing" (ie. the code matches the conception). But I'd rather
+# have a conception that matches common notions about version numbers.
+
+class LooseVersion (Version):
+
+ """Version numbering for anarchists and software realists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of a series of numbers,
+ separated by either periods or strings of letters. When comparing
+ version numbers, the numeric components will be compared
+ numerically, and the alphabetic components lexically. The following
+ are all valid version numbers, in no particular order:
+
+ 1.5.1
+ 1.5.2b2
+ 161
+ 3.10a
+ 8.02
+ 3.4j
+ 1996.07.12
+ 3.2.pl0
+ 3.1.1.6
+ 2g6
+ 11g
+ 0.960923
+ 2.2beta29
+ 1.13++
+ 5.5.kw
+ 2.0b1pl0
+
+ In fact, there is no such thing as an invalid version number under
+ this scheme; the rules for comparison are simple and predictable,
+ but may not always give the results you want (for some definition
+ of "want").
+ """
+
+ component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
+
+ def __init__ (self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+
+ def parse (self, vstring):
+ # I've given up on thinking I can reconstruct the version string
+ # from the parsed tuple -- so I just store the string here for
+ # use by __str__
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring)
+ if x and x != '.']
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ self.version = components
+
+
+ def __str__ (self):
+ return self.vstring
+
+
+ def __repr__ (self):
+ return "LooseVersion ('%s')" % str(self)
+
+
+ def _cmp (self, other):
+ if isinstance(other, str):
+ other = LooseVersion(other)
+
+ if self.version == other.version:
+ return 0
+ if self.version < other.version:
+ return -1
+ if self.version > other.version:
+ return 1
+
+
+# end class LooseVersion
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/versionpredicate.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/versionpredicate.py
new file mode 100644
index 00000000..062c98f2
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/distutils/versionpredicate.py
@@ -0,0 +1,166 @@
+"""Module for parsing and testing package version predicate strings.
+"""
+import re
+import distutils.version
+import operator
+
+
+re_validPackage = re.compile(r"(?i)^\s*([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)",
+ re.ASCII)
+# (package) (rest)
+
+re_paren = re.compile(r"^\s*\((.*)\)\s*$") # (list) inside of parentheses
+re_splitComparison = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$")
+# (comp) (version)
+
+
+def splitUp(pred):
+ """Parse a single version comparison.
+
+ Return (comparison string, StrictVersion)
+ """
+ res = re_splitComparison.match(pred)
+ if not res:
+ raise ValueError("bad package restriction syntax: %r" % pred)
+ comp, verStr = res.groups()
+ return (comp, distutils.version.StrictVersion(verStr))
+
+compmap = {"<": operator.lt, "<=": operator.le, "==": operator.eq,
+ ">": operator.gt, ">=": operator.ge, "!=": operator.ne}
+
+class VersionPredicate:
+ """Parse and test package version predicates.
+
+ >>> v = VersionPredicate('pyepat.abc (>1.0, <3333.3a1, !=1555.1b3)')
+
+ The `name` attribute provides the full dotted name that is given::
+
+ >>> v.name
+ 'pyepat.abc'
+
+ The str() of a `VersionPredicate` provides a normalized
+ human-readable version of the expression::
+
+ >>> print(v)
+ pyepat.abc (> 1.0, < 3333.3a1, != 1555.1b3)
+
+ The `satisfied_by()` method can be used to determine with a given
+ version number is included in the set described by the version
+ restrictions::
+
+ >>> v.satisfied_by('1.1')
+ True
+ >>> v.satisfied_by('1.4')
+ True
+ >>> v.satisfied_by('1.0')
+ False
+ >>> v.satisfied_by('4444.4')
+ False
+ >>> v.satisfied_by('1555.1b3')
+ False
+
+ `VersionPredicate` is flexible in accepting extra whitespace::
+
+ >>> v = VersionPredicate(' pat( == 0.1 ) ')
+ >>> v.name
+ 'pat'
+ >>> v.satisfied_by('0.1')
+ True
+ >>> v.satisfied_by('0.2')
+ False
+
+ If any version numbers passed in do not conform to the
+ restrictions of `StrictVersion`, a `ValueError` is raised::
+
+ >>> v = VersionPredicate('p1.p2.p3.p4(>=1.0, <=1.3a1, !=1.2zb3)')
+ Traceback (most recent call last):
+ ...
+ ValueError: invalid version number '1.2zb3'
+
+ It the module or package name given does not conform to what's
+ allowed as a legal module or package name, `ValueError` is
+ raised::
+
+ >>> v = VersionPredicate('foo-bar')
+ Traceback (most recent call last):
+ ...
+ ValueError: expected parenthesized list: '-bar'
+
+ >>> v = VersionPredicate('foo bar (12.21)')
+ Traceback (most recent call last):
+ ...
+ ValueError: expected parenthesized list: 'bar (12.21)'
+
+ """
+
+ def __init__(self, versionPredicateStr):
+ """Parse a version predicate string.
+ """
+ # Fields:
+ # name: package name
+ # pred: list of (comparison string, StrictVersion)
+
+ versionPredicateStr = versionPredicateStr.strip()
+ if not versionPredicateStr:
+ raise ValueError("empty package restriction")
+ match = re_validPackage.match(versionPredicateStr)
+ if not match:
+ raise ValueError("bad package name in %r" % versionPredicateStr)
+ self.name, paren = match.groups()
+ paren = paren.strip()
+ if paren:
+ match = re_paren.match(paren)
+ if not match:
+ raise ValueError("expected parenthesized list: %r" % paren)
+ str = match.groups()[0]
+ self.pred = [splitUp(aPred) for aPred in str.split(",")]
+ if not self.pred:
+ raise ValueError("empty parenthesized list in %r"
+ % versionPredicateStr)
+ else:
+ self.pred = []
+
+ def __str__(self):
+ if self.pred:
+ seq = [cond + " " + str(ver) for cond, ver in self.pred]
+ return self.name + " (" + ", ".join(seq) + ")"
+ else:
+ return self.name
+
+ def satisfied_by(self, version):
+ """True if version is compatible with all the predicates in self.
+ The parameter version must be acceptable to the StrictVersion
+ constructor. It may be either a string or StrictVersion.
+ """
+ for cond, ver in self.pred:
+ if not compmap[cond](version, ver):
+ return False
+ return True
+
+
+_provision_rx = None
+
+def split_provision(value):
+ """Return the name and optional version number of a provision.
+
+ The version number, if given, will be returned as a `StrictVersion`
+ instance, otherwise it will be `None`.
+
+ >>> split_provision('mypkg')
+ ('mypkg', None)
+ >>> split_provision(' mypkg( 1.2 ) ')
+ ('mypkg', StrictVersion ('1.2'))
+ """
+ global _provision_rx
+ if _provision_rx is None:
+ _provision_rx = re.compile(
+ r"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$",
+ re.ASCII)
+ value = value.strip()
+ m = _provision_rx.match(value)
+ if not m:
+ raise ValueError("illegal provides specification: %r" % value)
+ ver = m.group(2) or None
+ if ver:
+ ver = distutils.version.StrictVersion(ver)
+ return m.group(1), ver
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/doctest.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/doctest.py
new file mode 100644
index 00000000..c1d8a1db
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/doctest.py
@@ -0,0 +1,2788 @@
+# Module doctest.
+# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
+# Major enhancements and refactoring by:
+# Jim Fulton
+# Edward Loper
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+
+r"""Module doctest -- a framework for running examples in docstrings.
+
+In simplest use, end each module M to be tested with:
+
+def _test():
+ import doctest
+ doctest.testmod()
+
+if __name__ == "__main__":
+ _test()
+
+Then running the module as a script will cause the examples in the
+docstrings to get executed and verified:
+
+python M.py
+
+This won't display anything unless an example fails, in which case the
+failing example(s) and the cause(s) of the failure(s) are printed to stdout
+(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
+line of output is "Test failed.".
+
+Run it with the -v switch instead:
+
+python M.py -v
+
+and a detailed report of all examples tried is printed to stdout, along
+with assorted summaries at the end.
+
+You can force verbose mode by passing "verbose=True" to testmod, or prohibit
+it by passing "verbose=False". In either of those cases, sys.argv is not
+examined by testmod.
+
+There are a variety of other ways to run doctests, including integration
+with the unittest framework, and support for running non-Python text
+files containing doctests. There are also many ways to override parts
+of doctest's default behaviors. See the Library Reference Manual for
+details.
+"""
+
+__docformat__ = 'reStructuredText en'
+
+__all__ = [
+ # 0, Option Flags
+ 'register_optionflag',
+ 'DONT_ACCEPT_TRUE_FOR_1',
+ 'DONT_ACCEPT_BLANKLINE',
+ 'NORMALIZE_WHITESPACE',
+ 'ELLIPSIS',
+ 'SKIP',
+ 'IGNORE_EXCEPTION_DETAIL',
+ 'COMPARISON_FLAGS',
+ 'REPORT_UDIFF',
+ 'REPORT_CDIFF',
+ 'REPORT_NDIFF',
+ 'REPORT_ONLY_FIRST_FAILURE',
+ 'REPORTING_FLAGS',
+ 'FAIL_FAST',
+ # 1. Utility Functions
+ # 2. Example & DocTest
+ 'Example',
+ 'DocTest',
+ # 3. Doctest Parser
+ 'DocTestParser',
+ # 4. Doctest Finder
+ 'DocTestFinder',
+ # 5. Doctest Runner
+ 'DocTestRunner',
+ 'OutputChecker',
+ 'DocTestFailure',
+ 'UnexpectedException',
+ 'DebugRunner',
+ # 6. Test Functions
+ 'testmod',
+ 'testfile',
+ 'run_docstring_examples',
+ # 7. Unittest Support
+ 'DocTestSuite',
+ 'DocFileSuite',
+ 'set_unittest_reportflags',
+ # 8. Debugging Support
+ 'script_from_examples',
+ 'testsource',
+ 'debug_src',
+ 'debug',
+]
+
+import __future__
+import difflib
+import inspect
+import linecache
+import os
+import pdb
+import re
+import sys
+import traceback
+import unittest
+from io import StringIO
+from collections import namedtuple
+
+TestResults = namedtuple('TestResults', 'failed attempted')
+
+# There are 4 basic classes:
+# - Example: a pair, plus an intra-docstring line number.
+# - DocTest: a collection of examples, parsed from a docstring, plus
+# info about where the docstring came from (name, filename, lineno).
+# - DocTestFinder: extracts DocTests from a given object's docstring and
+# its contained objects' docstrings.
+# - DocTestRunner: runs DocTest cases, and accumulates statistics.
+#
+# So the basic picture is:
+#
+# list of:
+# +------+ +---------+ +-------+
+# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
+# +------+ +---------+ +-------+
+# | Example |
+# | ... |
+# | Example |
+# +---------+
+
+# Option constants.
+
+OPTIONFLAGS_BY_NAME = {}
+def register_optionflag(name):
+ # Create a new flag unless `name` is already known.
+ return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
+
+DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
+DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
+NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
+ELLIPSIS = register_optionflag('ELLIPSIS')
+SKIP = register_optionflag('SKIP')
+IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
+
+COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
+ DONT_ACCEPT_BLANKLINE |
+ NORMALIZE_WHITESPACE |
+ ELLIPSIS |
+ SKIP |
+ IGNORE_EXCEPTION_DETAIL)
+
+REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
+REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
+REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
+REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
+FAIL_FAST = register_optionflag('FAIL_FAST')
+
+REPORTING_FLAGS = (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF |
+ REPORT_ONLY_FIRST_FAILURE |
+ FAIL_FAST)
+
+# Special string markers for use in `want` strings:
+BLANKLINE_MARKER = ''
+ELLIPSIS_MARKER = '...'
+
+######################################################################
+## Table of Contents
+######################################################################
+# 1. Utility Functions
+# 2. Example & DocTest -- store test cases
+# 3. DocTest Parser -- extracts examples from strings
+# 4. DocTest Finder -- extracts test cases from objects
+# 5. DocTest Runner -- runs test cases
+# 6. Test Functions -- convenient wrappers for testing
+# 7. Unittest Support
+# 8. Debugging Support
+# 9. Example Usage
+
+######################################################################
+## 1. Utility Functions
+######################################################################
+
+def _extract_future_flags(globs):
+ """
+ Return the compiler-flags associated with the future features that
+ have been imported into the given namespace (globs).
+ """
+ flags = 0
+ for fname in __future__.all_feature_names:
+ feature = globs.get(fname, None)
+ if feature is getattr(__future__, fname):
+ flags |= feature.compiler_flag
+ return flags
+
+def _normalize_module(module, depth=2):
+ """
+ Return the module specified by `module`. In particular:
+ - If `module` is a module, then return module.
+ - If `module` is a string, then import and return the
+ module with that name.
+ - If `module` is None, then return the calling module.
+ The calling module is assumed to be the module of
+ the stack frame at the given depth in the call stack.
+ """
+ if inspect.ismodule(module):
+ return module
+ elif isinstance(module, str):
+ return __import__(module, globals(), locals(), ["*"])
+ elif module is None:
+ return sys.modules[sys._getframe(depth).f_globals['__name__']]
+ else:
+ raise TypeError("Expected a module, string, or None")
+
+def _load_testfile(filename, package, module_relative, encoding):
+ if module_relative:
+ package = _normalize_module(package, 3)
+ filename = _module_relative_path(package, filename)
+ if getattr(package, '__loader__', None) is not None:
+ if hasattr(package.__loader__, 'get_data'):
+ file_contents = package.__loader__.get_data(filename)
+ file_contents = file_contents.decode(encoding)
+ # get_data() opens files as 'rb', so one must do the equivalent
+ # conversion as universal newlines would do.
+ return file_contents.replace(os.linesep, '\n'), filename
+ with open(filename, encoding=encoding) as f:
+ return f.read(), filename
+
+def _indent(s, indent=4):
+ """
+ Add the given number of space characters to the beginning of
+ every non-blank line in `s`, and return the result.
+ """
+ # This regexp matches the start of non-blank lines:
+ return re.sub('(?m)^(?!$)', indent*' ', s)
+
+def _exception_traceback(exc_info):
+ """
+ Return a string containing a traceback message for the given
+ exc_info tuple (as returned by sys.exc_info()).
+ """
+ # Get a traceback message.
+ excout = StringIO()
+ exc_type, exc_val, exc_tb = exc_info
+ traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
+ return excout.getvalue()
+
+# Override some StringIO methods.
+class _SpoofOut(StringIO):
+ def getvalue(self):
+ result = StringIO.getvalue(self)
+ # If anything at all was written, make sure there's a trailing
+ # newline. There's no way for the expected output to indicate
+ # that a trailing newline is missing.
+ if result and not result.endswith("\n"):
+ result += "\n"
+ return result
+
+ def truncate(self, size=None):
+ self.seek(size)
+ StringIO.truncate(self)
+
+# Worst-case linear-time ellipsis matching.
+def _ellipsis_match(want, got):
+ """
+ Essentially the only subtle case:
+ >>> _ellipsis_match('aa...aa', 'aaa')
+ False
+ """
+ if ELLIPSIS_MARKER not in want:
+ return want == got
+
+ # Find "the real" strings.
+ ws = want.split(ELLIPSIS_MARKER)
+ assert len(ws) >= 2
+
+ # Deal with exact matches possibly needed at one or both ends.
+ startpos, endpos = 0, len(got)
+ w = ws[0]
+ if w: # starts with exact match
+ if got.startswith(w):
+ startpos = len(w)
+ del ws[0]
+ else:
+ return False
+ w = ws[-1]
+ if w: # ends with exact match
+ if got.endswith(w):
+ endpos -= len(w)
+ del ws[-1]
+ else:
+ return False
+
+ if startpos > endpos:
+ # Exact end matches required more characters than we have, as in
+ # _ellipsis_match('aa...aa', 'aaa')
+ return False
+
+ # For the rest, we only need to find the leftmost non-overlapping
+ # match for each piece. If there's no overall match that way alone,
+ # there's no overall match period.
+ for w in ws:
+ # w may be '' at times, if there are consecutive ellipses, or
+ # due to an ellipsis at the start or end of `want`. That's OK.
+ # Search for an empty string succeeds, and doesn't change startpos.
+ startpos = got.find(w, startpos, endpos)
+ if startpos < 0:
+ return False
+ startpos += len(w)
+
+ return True
+
+def _comment_line(line):
+ "Return a commented form of the given line"
+ line = line.rstrip()
+ if line:
+ return '# '+line
+ else:
+ return '#'
+
+def _strip_exception_details(msg):
+ # Support for IGNORE_EXCEPTION_DETAIL.
+ # Get rid of everything except the exception name; in particular, drop
+ # the possibly dotted module path (if any) and the exception message (if
+ # any). We assume that a colon is never part of a dotted name, or of an
+ # exception name.
+ # E.g., given
+ # "foo.bar.MyError: la di da"
+ # return "MyError"
+ # Or for "abc.def" or "abc.def:\n" return "def".
+
+ start, end = 0, len(msg)
+ # The exception name must appear on the first line.
+ i = msg.find("\n")
+ if i >= 0:
+ end = i
+ # retain up to the first colon (if any)
+ i = msg.find(':', 0, end)
+ if i >= 0:
+ end = i
+ # retain just the exception name
+ i = msg.rfind('.', 0, end)
+ if i >= 0:
+ start = i+1
+ return msg[start: end]
+
+class _OutputRedirectingPdb(pdb.Pdb):
+ """
+ A specialized version of the python debugger that redirects stdout
+ to a given stream when interacting with the user. Stdout is *not*
+ redirected when traced code is executed.
+ """
+ def __init__(self, out):
+ self.__out = out
+ self.__debugger_used = False
+ # do not play signal games in the pdb
+ pdb.Pdb.__init__(self, stdout=out, nosigint=True)
+ # still use input() to get user input
+ self.use_rawinput = 1
+
+ def set_trace(self, frame=None):
+ self.__debugger_used = True
+ if frame is None:
+ frame = sys._getframe().f_back
+ pdb.Pdb.set_trace(self, frame)
+
+ def set_continue(self):
+ # Calling set_continue unconditionally would break unit test
+ # coverage reporting, as Bdb.set_continue calls sys.settrace(None).
+ if self.__debugger_used:
+ pdb.Pdb.set_continue(self)
+
+ def trace_dispatch(self, *args):
+ # Redirect stdout to the given stream.
+ save_stdout = sys.stdout
+ sys.stdout = self.__out
+ # Call Pdb's trace dispatch method.
+ try:
+ return pdb.Pdb.trace_dispatch(self, *args)
+ finally:
+ sys.stdout = save_stdout
+
+# [XX] Normalize with respect to os.path.pardir?
+def _module_relative_path(module, test_path):
+ if not inspect.ismodule(module):
+ raise TypeError('Expected a module: %r' % module)
+ if test_path.startswith('/'):
+ raise ValueError('Module-relative files may not have absolute paths')
+
+ # Normalize the path. On Windows, replace "/" with "\".
+ test_path = os.path.join(*(test_path.split('/')))
+
+ # Find the base directory for the path.
+ if hasattr(module, '__file__'):
+ # A normal module/package
+ basedir = os.path.split(module.__file__)[0]
+ elif module.__name__ == '__main__':
+ # An interactive session.
+ if len(sys.argv)>0 and sys.argv[0] != '':
+ basedir = os.path.split(sys.argv[0])[0]
+ else:
+ basedir = os.curdir
+ else:
+ if hasattr(module, '__path__'):
+ for directory in module.__path__:
+ fullpath = os.path.join(directory, test_path)
+ if os.path.exists(fullpath):
+ return fullpath
+
+ # A module w/o __file__ (this includes builtins)
+ raise ValueError("Can't resolve paths relative to the module "
+ "%r (it has no __file__)"
+ % module.__name__)
+
+ # Combine the base directory and the test path.
+ return os.path.join(basedir, test_path)
+
+######################################################################
+## 2. Example & DocTest
+######################################################################
+## - An "example" is a pair, where "source" is a
+## fragment of source code, and "want" is the expected output for
+## "source." The Example class also includes information about
+## where the example was extracted from.
+##
+## - A "doctest" is a collection of examples, typically extracted from
+## a string (such as an object's docstring). The DocTest class also
+## includes information about where the string was extracted from.
+
+class Example:
+ """
+ A single doctest example, consisting of source code and expected
+ output. `Example` defines the following attributes:
+
+ - source: A single Python statement, always ending with a newline.
+ The constructor adds a newline if needed.
+
+ - want: The expected output from running the source code (either
+ from stdout, or a traceback in case of exception). `want` ends
+ with a newline unless it's empty, in which case it's an empty
+ string. The constructor adds a newline if needed.
+
+ - exc_msg: The exception message generated by the example, if
+ the example is expected to generate an exception; or `None` if
+ it is not expected to generate an exception. This exception
+ message is compared against the return value of
+ `traceback.format_exception_only()`. `exc_msg` ends with a
+ newline unless it's `None`. The constructor adds a newline
+ if needed.
+
+ - lineno: The line number within the DocTest string containing
+ this Example where the Example begins. This line number is
+ zero-based, with respect to the beginning of the DocTest.
+
+ - indent: The example's indentation in the DocTest string.
+ I.e., the number of space characters that precede the
+ example's first prompt.
+
+ - options: A dictionary mapping from option flags to True or
+ False, which is used to override default options for this
+ example. Any option flags not contained in this dictionary
+ are left at their default value (as specified by the
+ DocTestRunner's optionflags). By default, no options are set.
+ """
+ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
+ options=None):
+ # Normalize inputs.
+ if not source.endswith('\n'):
+ source += '\n'
+ if want and not want.endswith('\n'):
+ want += '\n'
+ if exc_msg is not None and not exc_msg.endswith('\n'):
+ exc_msg += '\n'
+ # Store properties.
+ self.source = source
+ self.want = want
+ self.lineno = lineno
+ self.indent = indent
+ if options is None: options = {}
+ self.options = options
+ self.exc_msg = exc_msg
+
+ def __eq__(self, other):
+ if type(self) is not type(other):
+ return NotImplemented
+
+ return self.source == other.source and \
+ self.want == other.want and \
+ self.lineno == other.lineno and \
+ self.indent == other.indent and \
+ self.options == other.options and \
+ self.exc_msg == other.exc_msg
+
+ def __hash__(self):
+ return hash((self.source, self.want, self.lineno, self.indent,
+ self.exc_msg))
+
+class DocTest:
+ """
+ A collection of doctest examples that should be run in a single
+ namespace. Each `DocTest` defines the following attributes:
+
+ - examples: the list of examples.
+
+ - globs: The namespace (aka globals) that the examples should
+ be run in.
+
+ - name: A name identifying the DocTest (typically, the name of
+ the object whose docstring this DocTest was extracted from).
+
+ - filename: The name of the file that this DocTest was extracted
+ from, or `None` if the filename is unknown.
+
+ - lineno: The line number within filename where this DocTest
+ begins, or `None` if the line number is unavailable. This
+ line number is zero-based, with respect to the beginning of
+ the file.
+
+ - docstring: The string that the examples were extracted from,
+ or `None` if the string is unavailable.
+ """
+ def __init__(self, examples, globs, name, filename, lineno, docstring):
+ """
+ Create a new DocTest containing the given examples. The
+ DocTest's globals are initialized with a copy of `globs`.
+ """
+ assert not isinstance(examples, str), \
+ "DocTest no longer accepts str; use DocTestParser instead"
+ self.examples = examples
+ self.docstring = docstring
+ self.globs = globs.copy()
+ self.name = name
+ self.filename = filename
+ self.lineno = lineno
+
+ def __repr__(self):
+ if len(self.examples) == 0:
+ examples = 'no examples'
+ elif len(self.examples) == 1:
+ examples = '1 example'
+ else:
+ examples = '%d examples' % len(self.examples)
+ return ('<%s %s from %s:%s (%s)>' %
+ (self.__class__.__name__,
+ self.name, self.filename, self.lineno, examples))
+
+ def __eq__(self, other):
+ if type(self) is not type(other):
+ return NotImplemented
+
+ return self.examples == other.examples and \
+ self.docstring == other.docstring and \
+ self.globs == other.globs and \
+ self.name == other.name and \
+ self.filename == other.filename and \
+ self.lineno == other.lineno
+
+ def __hash__(self):
+ return hash((self.docstring, self.name, self.filename, self.lineno))
+
+ # This lets us sort tests by name:
+ def __lt__(self, other):
+ if not isinstance(other, DocTest):
+ return NotImplemented
+ return ((self.name, self.filename, self.lineno, id(self))
+ <
+ (other.name, other.filename, other.lineno, id(other)))
+
+######################################################################
+## 3. DocTestParser
+######################################################################
+
+class DocTestParser:
+ """
+ A class used to parse strings containing doctest examples.
+ """
+ # This regular expression is used to find doctest examples in a
+ # string. It defines three groups: `source` is the source code
+ # (including leading indentation and prompts); `indent` is the
+ # indentation of the first (PS1) line of the source code; and
+ # `want` is the expected output (including leading indentation).
+ _EXAMPLE_RE = re.compile(r'''
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?P
+ (?:^(?P [ ]*) >>> .*) # PS1 line
+ (?:\n [ ]* \.\.\. .*)*) # PS2 lines
+ \n?
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P (?:(?![ ]*$) # Not a blank line
+ (?![ ]*>>>) # Not a line starting with PS1
+ .+$\n? # But any other line
+ )*)
+ ''', re.MULTILINE | re.VERBOSE)
+
+ # A regular expression for handling `want` strings that contain
+ # expected exceptions. It divides `want` into three pieces:
+ # - the traceback header line (`hdr`)
+ # - the traceback stack (`stack`)
+ # - the exception message (`msg`), as generated by
+ # traceback.format_exception_only()
+ # `msg` may have multiple lines. We assume/require that the
+ # exception message is the first non-indented line starting with a word
+ # character following the traceback header line.
+ _EXCEPTION_RE = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P .*?) # don't blink: absorb stuff until...
+ ^ (?P \w+ .*) # a line *starts* with alphanum.
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+
+ # A callable returning a true value iff its argument is a blank line
+ # or contains a single comment.
+ _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
+
+ def parse(self, string, name=''):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+ # Find all doctest examples in the string:
+ for m in self._EXAMPLE_RE.finditer(string):
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno)
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ output.append( Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent')),
+ options=options) )
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+ def get_doctest(self, string, globs, name, filename, lineno):
+ """
+ Extract all doctest examples from the given string, and
+ collect them into a `DocTest` object.
+
+ `globs`, `name`, `filename`, and `lineno` are attributes for
+ the new `DocTest` object. See the documentation for `DocTest`
+ for more information.
+ """
+ return DocTest(self.get_examples(string, name), globs,
+ name, filename, lineno, string)
+
+ def get_examples(self, string, name=''):
+ """
+ Extract all doctest examples from the given string, and return
+ them as a list of `Example` objects. Line numbers are
+ 0-based, because it's most common in doctests that nothing
+ interesting appears on the same line as opening triple-quote,
+ and so the first interesting line is called \"line 1\" then.
+
+ The optional argument `name` is a name identifying this
+ string, and is only used for error messages.
+ """
+ return [x for x in self.parse(string, name)
+ if isinstance(x, Example)]
+
+ def _parse_example(self, m, name, lineno):
+ """
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ # Get the example's indentation level.
+ indent = len(m.group('indent'))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('source').split('\n')
+ self._check_prompt_blank(source_lines, indent, name, lineno)
+ self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
+ source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+ # This regular expression looks for option directives in the
+ # source code of an example. Option directives are comments
+ # starting with "doctest:". Warning: this may give false
+ # positives for string-literals that contain the string
+ # "#doctest:". Eliminating these false positives would require
+ # actually parsing the string; but we limit them by ignoring any
+ # line containing "#doctest:" that is *followed* by a quote mark.
+ _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
+ re.MULTILINE)
+
+ def _find_options(self, source, name, lineno):
+ """
+ Return a dictionary containing option overrides extracted from
+ option directives in the given source string.
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ options = {}
+ # (note: with the current regexp, this will match at most once:)
+ for m in self._OPTION_DIRECTIVE_RE.finditer(source):
+ option_strings = m.group(1).replace(',', ' ').split()
+ for option in option_strings:
+ if (option[0] not in '+-' or
+ option[1:] not in OPTIONFLAGS_BY_NAME):
+ raise ValueError('line %r of the doctest for %s '
+ 'has an invalid option: %r' %
+ (lineno+1, name, option))
+ flag = OPTIONFLAGS_BY_NAME[option[1:]]
+ options[flag] = (option[0] == '+')
+ if options and self._IS_BLANK_OR_COMMENT(source):
+ raise ValueError('line %r of the doctest for %s has an option '
+ 'directive on a line with no example: %r' %
+ (lineno, name, source))
+ return options
+
+ # This regular expression finds the indentation of every non-blank
+ # line in a string.
+ _INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE)
+
+ def _min_indent(self, s):
+ "Return the minimum indentation of any non-blank line in `s`"
+ indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
+ if len(indents) > 0:
+ return min(indents)
+ else:
+ return 0
+
+ def _check_prompt_blank(self, lines, indent, name, lineno):
+ """
+ Given the lines of a source string (including prompts and
+ leading indentation), check to make sure that every prompt is
+ followed by a space character. If any line is not followed by
+ a space character, then raise ValueError.
+ """
+ for i, line in enumerate(lines):
+ if len(line) >= indent+4 and line[indent+3] != ' ':
+ raise ValueError('line %r of the docstring for %s '
+ 'lacks blank after %s: %r' %
+ (lineno+i+1, name,
+ line[indent:indent+3], line))
+
+ def _check_prefix(self, lines, prefix, name, lineno):
+ """
+ Check that every line in the given list starts with the given
+ prefix; if any line does not, then raise a ValueError.
+ """
+ for i, line in enumerate(lines):
+ if line and not line.startswith(prefix):
+ raise ValueError('line %r of the docstring for %s has '
+ 'inconsistent leading whitespace: %r' %
+ (lineno+i+1, name, line))
+
+
+######################################################################
+## 4. DocTest Finder
+######################################################################
+
+class DocTestFinder:
+ """
+ A class used to extract the DocTests that are relevant to a given
+ object, from its docstring and the docstrings of its contained
+ objects. Doctests can currently be extracted from the following
+ object types: modules, functions, classes, methods, staticmethods,
+ classmethods, and properties.
+ """
+
+ def __init__(self, verbose=False, parser=DocTestParser(),
+ recurse=True, exclude_empty=True):
+ """
+ Create a new doctest finder.
+
+ The optional argument `parser` specifies a class or
+ function that should be used to create new DocTest objects (or
+ objects that implement the same interface as DocTest). The
+ signature for this factory function should match the signature
+ of the DocTest constructor.
+
+ If the optional argument `recurse` is false, then `find` will
+ only examine the given object, and not any contained objects.
+
+ If the optional argument `exclude_empty` is false, then `find`
+ will include tests for objects with empty docstrings.
+ """
+ self._parser = parser
+ self._verbose = verbose
+ self._recurse = recurse
+ self._exclude_empty = exclude_empty
+
+ def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
+ """
+ Return a list of the DocTests that are defined by the given
+ object's docstring, or by any of its contained objects'
+ docstrings.
+
+ The optional parameter `module` is the module that contains
+ the given object. If the module is not specified or is None, then
+ the test finder will attempt to automatically determine the
+ correct module. The object's module is used:
+
+ - As a default namespace, if `globs` is not specified.
+ - To prevent the DocTestFinder from extracting DocTests
+ from objects that are imported from other modules.
+ - To find the name of the file containing the object.
+ - To help find the line number of the object within its
+ file.
+
+ Contained objects whose module does not match `module` are ignored.
+
+ If `module` is False, no attempt to find the module will be made.
+ This is obscure, of use mostly in tests: if `module` is False, or
+ is None but cannot be found automatically, then all objects are
+ considered to belong to the (non-existent) module, so all contained
+ objects will (recursively) be searched for doctests.
+
+ The globals for each DocTest is formed by combining `globs`
+ and `extraglobs` (bindings in `extraglobs` override bindings
+ in `globs`). A new copy of the globals dictionary is created
+ for each DocTest. If `globs` is not specified, then it
+ defaults to the module's `__dict__`, if specified, or {}
+ otherwise. If `extraglobs` is not specified, then it defaults
+ to {}.
+
+ """
+ # If name was not specified, then extract it from the object.
+ if name is None:
+ name = getattr(obj, '__name__', None)
+ if name is None:
+ raise ValueError("DocTestFinder.find: name must be given "
+ "when obj.__name__ doesn't exist: %r" %
+ (type(obj),))
+
+ # Find the module that contains the given object (if obj is
+ # a module, then module=obj.). Note: this may fail, in which
+ # case module will be None.
+ if module is False:
+ module = None
+ elif module is None:
+ module = inspect.getmodule(obj)
+
+ # Read the module's source code. This is used by
+ # DocTestFinder._find_lineno to find the line number for a
+ # given object's docstring.
+ try:
+ file = inspect.getsourcefile(obj)
+ except TypeError:
+ source_lines = None
+ else:
+ if not file:
+ # Check to see if it's one of our special internal "files"
+ # (see __patched_linecache_getlines).
+ file = inspect.getfile(obj)
+ if not file[0]+file[-2:] == '<]>': file = None
+ if file is None:
+ source_lines = None
+ else:
+ if module is not None:
+ # Supply the module globals in case the module was
+ # originally loaded via a PEP 302 loader and
+ # file is not a valid filesystem path
+ source_lines = linecache.getlines(file, module.__dict__)
+ else:
+ # No access to a loader, so assume it's a normal
+ # filesystem path
+ source_lines = linecache.getlines(file)
+ if not source_lines:
+ source_lines = None
+
+ # Initialize globals, and merge in extraglobs.
+ if globs is None:
+ if module is None:
+ globs = {}
+ else:
+ globs = module.__dict__.copy()
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+ if '__name__' not in globs:
+ globs['__name__'] = '__main__' # provide a default module name
+
+ # Recursively explore `obj`, extracting DocTests.
+ tests = []
+ self._find(tests, obj, name, module, source_lines, globs, {})
+ # Sort the tests by alpha order of names, for consistency in
+ # verbose-mode output. This was a feature of doctest in Pythons
+ # <= 2.3 that got lost by accident in 2.4. It was repaired in
+ # 2.4.4 and 2.5.
+ tests.sort()
+ return tests
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.__globals__
+ elif inspect.ismethoddescriptor(object):
+ if hasattr(object, '__objclass__'):
+ obj_mod = object.__objclass__.__module__
+ elif hasattr(object, '__module__'):
+ obj_mod = object.__module__
+ else:
+ return True # [XX] no easy way to tell otherwise
+ return module.__name__ == obj_mod
+ elif inspect.isclass(object):
+ return module.__name__ == object.__module__
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+ if self._verbose:
+ print('Finding tests in %s' % name)
+
+ # If we've already processed this object, then ignore it.
+ if id(obj) in seen:
+ return
+ seen[id(obj)] = 1
+
+ # Find a test for this object, and add it to the list of tests.
+ test = self._get_test(obj, name, module, globs, source_lines)
+ if test is not None:
+ tests.append(test)
+
+ # Look for tests in a module's contained objects.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ valname = '%s.%s' % (name, valname)
+ # Recurse to functions & classes.
+ if ((inspect.isroutine(inspect.unwrap(val))
+ or inspect.isclass(val)) and
+ self._from_module(module, val)):
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a module's __test__ dictionary.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in getattr(obj, '__test__', {}).items():
+ if not isinstance(valname, str):
+ raise ValueError("DocTestFinder.find: __test__ keys "
+ "must be strings: %r" %
+ (type(valname),))
+ if not (inspect.isroutine(val) or inspect.isclass(val) or
+ inspect.ismodule(val) or isinstance(val, str)):
+ raise ValueError("DocTestFinder.find: __test__ values "
+ "must be strings, functions, methods, "
+ "classes, or modules: %r" %
+ (type(val),))
+ valname = '%s.__test__.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if inspect.isclass(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).__func__
+
+ # Recurse to methods, properties, and nested classes.
+ if ((inspect.isroutine(val) or inspect.isclass(val) or
+ isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ def _get_test(self, obj, name, module, globs, source_lines):
+ """
+ Return a DocTest for the given object, if it defines a docstring;
+ otherwise, return None.
+ """
+ # Extract the object's docstring. If it doesn't have one,
+ # then return None (no test for this object).
+ if isinstance(obj, str):
+ docstring = obj
+ else:
+ try:
+ if obj.__doc__ is None:
+ docstring = ''
+ else:
+ docstring = obj.__doc__
+ if not isinstance(docstring, str):
+ docstring = str(docstring)
+ except (TypeError, AttributeError):
+ docstring = ''
+
+ # Find the docstring's location in the file.
+ lineno = self._find_lineno(obj, source_lines)
+
+ # Don't bother if the docstring is empty.
+ if self._exclude_empty and not docstring:
+ return None
+
+ # Return a DocTest for this object.
+ if module is None:
+ filename = None
+ else:
+ filename = getattr(module, '__file__', module.__name__)
+ if filename[-4:] == ".pyc":
+ filename = filename[:-1]
+ return self._parser.get_doctest(docstring, globs, name,
+ filename, lineno)
+
+ def _find_lineno(self, obj, source_lines):
+ """
+ Return a line number of the given object's docstring. Note:
+ this method assumes that the object has a docstring.
+ """
+ lineno = None
+
+ # Find the line number for modules.
+ if inspect.ismodule(obj):
+ lineno = 0
+
+ # Find the line number for classes.
+ # Note: this could be fooled if a class is defined multiple
+ # times in a single file.
+ if inspect.isclass(obj):
+ if source_lines is None:
+ return None
+ pat = re.compile(r'^\s*class\s*%s\b' %
+ getattr(obj, '__name__', '-'))
+ for i, line in enumerate(source_lines):
+ if pat.match(line):
+ lineno = i
+ break
+
+ # Find the line number for functions & methods.
+ if inspect.ismethod(obj): obj = obj.__func__
+ if inspect.isfunction(obj): obj = obj.__code__
+ if inspect.istraceback(obj): obj = obj.tb_frame
+ if inspect.isframe(obj): obj = obj.f_code
+ if inspect.iscode(obj):
+ lineno = getattr(obj, 'co_firstlineno', None)-1
+
+ # Find the line number where the docstring starts. Assume
+ # that it's the first line that begins with a quote mark.
+ # Note: this could be fooled by a multiline function
+ # signature, where a continuation line begins with a quote
+ # mark.
+ if lineno is not None:
+ if source_lines is None:
+ return lineno+1
+ pat = re.compile(r'(^|.*:)\s*\w*("|\')')
+ for lineno in range(lineno, len(source_lines)):
+ if pat.match(source_lines[lineno]):
+ return lineno
+
+ # We couldn't find the line number.
+ return None
+
+######################################################################
+## 5. DocTest Runner
+######################################################################
+
+class DocTestRunner:
+ """
+ A class used to run DocTest test cases, and accumulate statistics.
+ The `run` method is used to process a single DocTest case. It
+ returns a tuple `(f, t)`, where `t` is the number of test cases
+ tried, and `f` is the number of test cases that failed.
+
+ >>> tests = DocTestFinder().find(_TestClass)
+ >>> runner = DocTestRunner(verbose=False)
+ >>> tests.sort(key = lambda test: test.name)
+ >>> for test in tests:
+ ... print(test.name, '->', runner.run(test))
+ _TestClass -> TestResults(failed=0, attempted=2)
+ _TestClass.__init__ -> TestResults(failed=0, attempted=2)
+ _TestClass.get -> TestResults(failed=0, attempted=2)
+ _TestClass.square -> TestResults(failed=0, attempted=1)
+
+ The `summarize` method prints a summary of all the test cases that
+ have been run by the runner, and returns an aggregated `(f, t)`
+ tuple:
+
+ >>> runner.summarize(verbose=1)
+ 4 items passed all tests:
+ 2 tests in _TestClass
+ 2 tests in _TestClass.__init__
+ 2 tests in _TestClass.get
+ 1 tests in _TestClass.square
+ 7 tests in 4 items.
+ 7 passed and 0 failed.
+ Test passed.
+ TestResults(failed=0, attempted=7)
+
+ The aggregated number of tried examples and failed examples is
+ also available via the `tries` and `failures` attributes:
+
+ >>> runner.tries
+ 7
+ >>> runner.failures
+ 0
+
+ The comparison between expected outputs and actual outputs is done
+ by an `OutputChecker`. This comparison may be customized with a
+ number of option flags; see the documentation for `testmod` for
+ more information. If the option flags are insufficient, then the
+ comparison may also be customized by passing a subclass of
+ `OutputChecker` to the constructor.
+
+ The test runner's display output can be controlled in two ways.
+ First, an output function (`out) can be passed to
+ `TestRunner.run`; this function will be called with strings that
+ should be displayed. It defaults to `sys.stdout.write`. If
+ capturing the output is not sufficient, then the display output
+ can be also customized by subclassing DocTestRunner, and
+ overriding the methods `report_start`, `report_success`,
+ `report_unexpected_exception`, and `report_failure`.
+ """
+ # This divider string is used to separate failure messages, and to
+ # separate sections of the summary.
+ DIVIDER = "*" * 70
+
+ def __init__(self, checker=None, verbose=None, optionflags=0):
+ """
+ Create a new test runner.
+
+ Optional keyword arg `checker` is the `OutputChecker` that
+ should be used to compare the expected outputs and actual
+ outputs of doctest examples.
+
+ Optional keyword arg 'verbose' prints lots of stuff if true,
+ only failures if false; by default, it's true iff '-v' is in
+ sys.argv.
+
+ Optional argument `optionflags` can be used to control how the
+ test runner compares expected output to actual output, and how
+ it displays failures. See the documentation for `testmod` for
+ more information.
+ """
+ self._checker = checker or OutputChecker()
+ if verbose is None:
+ verbose = '-v' in sys.argv
+ self._verbose = verbose
+ self.optionflags = optionflags
+ self.original_optionflags = optionflags
+
+ # Keep track of the examples we've run.
+ self.tries = 0
+ self.failures = 0
+ self._name2ft = {}
+
+ # Create a fake output target for capturing doctest output.
+ self._fakeout = _SpoofOut()
+
+ #/////////////////////////////////////////////////////////////////
+ # Reporting methods
+ #/////////////////////////////////////////////////////////////////
+
+ def report_start(self, out, test, example):
+ """
+ Report that the test runner is about to process the given
+ example. (Only displays a message if verbose=True)
+ """
+ if self._verbose:
+ if example.want:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting:\n' + _indent(example.want))
+ else:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting nothing\n')
+
+ def report_success(self, out, test, example, got):
+ """
+ Report that the given example ran successfully. (Only
+ displays a message if verbose=True)
+ """
+ if self._verbose:
+ out("ok\n")
+
+ def report_failure(self, out, test, example, got):
+ """
+ Report that the given example failed.
+ """
+ out(self._failure_header(test, example) +
+ self._checker.output_difference(example, got, self.optionflags))
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ """
+ Report that the given example raised an unexpected exception.
+ """
+ out(self._failure_header(test, example) +
+ 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
+
+ def _failure_header(self, test, example):
+ out = [self.DIVIDER]
+ if test.filename:
+ if test.lineno is not None and example.lineno is not None:
+ lineno = test.lineno + example.lineno + 1
+ else:
+ lineno = '?'
+ out.append('File "%s", line %s, in %s' %
+ (test.filename, lineno, test.name))
+ else:
+ out.append('Line %s, in %s' % (example.lineno+1, test.name))
+ out.append('Failed example:')
+ source = example.source
+ out.append(_indent(source))
+ return '\n'.join(out)
+
+ #/////////////////////////////////////////////////////////////////
+ # DocTest Running
+ #/////////////////////////////////////////////////////////////////
+
+ def __run(self, test, compileflags, out):
+ """
+ Run the examples in `test`. Write the outcome of each example
+ with one of the `DocTestRunner.report_*` methods, using the
+ writer function `out`. `compileflags` is the set of compiler
+ flags that should be used to execute examples. Return a tuple
+ `(f, t)`, where `t` is the number of examples tried, and `f`
+ is the number of examples that failed. The examples are run
+ in the namespace `test.globs`.
+ """
+ # Keep track of the number of failures and tries.
+ failures = tries = 0
+
+ # Save the option flags (since option directives can be used
+ # to modify them).
+ original_optionflags = self.optionflags
+
+ SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
+
+ check = self._checker.check_output
+
+ # Process each example.
+ for examplenum, example in enumerate(test.examples):
+
+ # If REPORT_ONLY_FIRST_FAILURE is set, then suppress
+ # reporting after the first failure.
+ quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
+ failures > 0)
+
+ # Merge in the example's options.
+ self.optionflags = original_optionflags
+ if example.options:
+ for (optionflag, val) in example.options.items():
+ if val:
+ self.optionflags |= optionflag
+ else:
+ self.optionflags &= ~optionflag
+
+ # If 'SKIP' is set, then skip this example.
+ if self.optionflags & SKIP:
+ continue
+
+ # Record that we started this example.
+ tries += 1
+ if not quiet:
+ self.report_start(out, test, example)
+
+ # Use a special filename for compile(), so we can retrieve
+ # the source code during interactive debugging (see
+ # __patched_linecache_getlines).
+ filename = '' % (test.name, examplenum)
+
+ # Run the example in the given context (globs), and record
+ # any exception that gets raised. (But don't intercept
+ # keyboard interrupts.)
+ try:
+ # Don't blink! This is where the user's code gets run.
+ exec(compile(example.source, filename, "single",
+ compileflags, 1), test.globs)
+ self.debugger.set_continue() # ==== Example Finished ====
+ exception = None
+ except KeyboardInterrupt:
+ raise
+ except:
+ exception = sys.exc_info()
+ self.debugger.set_continue() # ==== Example Finished ====
+
+ got = self._fakeout.getvalue() # the actual output
+ self._fakeout.truncate(0)
+ outcome = FAILURE # guilty until proved innocent or insane
+
+ # If the example executed without raising any exceptions,
+ # verify its output.
+ if exception is None:
+ if check(example.want, got, self.optionflags):
+ outcome = SUCCESS
+
+ # The example raised an exception: check if it was expected.
+ else:
+ exc_msg = traceback.format_exception_only(*exception[:2])[-1]
+ if not quiet:
+ got += _exception_traceback(exception)
+
+ # If `example.exc_msg` is None, then we weren't expecting
+ # an exception.
+ if example.exc_msg is None:
+ outcome = BOOM
+
+ # We expected an exception: see whether it matches.
+ elif check(example.exc_msg, exc_msg, self.optionflags):
+ outcome = SUCCESS
+
+ # Another chance if they didn't care about the detail.
+ elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
+ if check(_strip_exception_details(example.exc_msg),
+ _strip_exception_details(exc_msg),
+ self.optionflags):
+ outcome = SUCCESS
+
+ # Report the outcome.
+ if outcome is SUCCESS:
+ if not quiet:
+ self.report_success(out, test, example, got)
+ elif outcome is FAILURE:
+ if not quiet:
+ self.report_failure(out, test, example, got)
+ failures += 1
+ elif outcome is BOOM:
+ if not quiet:
+ self.report_unexpected_exception(out, test, example,
+ exception)
+ failures += 1
+ else:
+ assert False, ("unknown outcome", outcome)
+
+ if failures and self.optionflags & FAIL_FAST:
+ break
+
+ # Restore the option flags (in case they were modified)
+ self.optionflags = original_optionflags
+
+ # Record and return the number of failures and tries.
+ self.__record_outcome(test, failures, tries)
+ return TestResults(failures, tries)
+
+ def __record_outcome(self, test, f, t):
+ """
+ Record the fact that the given DocTest (`test`) generated `f`
+ failures out of `t` tried examples.
+ """
+ f2, t2 = self._name2ft.get(test.name, (0,0))
+ self._name2ft[test.name] = (f+f2, t+t2)
+ self.failures += f
+ self.tries += t
+
+ __LINECACHE_FILENAME_RE = re.compile(r'.+)'
+ r'\[(?P\d+)\]>$')
+ def __patched_linecache_getlines(self, filename, module_globals=None):
+ m = self.__LINECACHE_FILENAME_RE.match(filename)
+ if m and m.group('name') == self.test.name:
+ example = self.test.examples[int(m.group('examplenum'))]
+ return example.source.splitlines(keepends=True)
+ else:
+ return self.save_linecache_getlines(filename, module_globals)
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ """
+ Run the examples in `test`, and display the results using the
+ writer function `out`.
+
+ The examples are run in the namespace `test.globs`. If
+ `clear_globs` is true (the default), then this namespace will
+ be cleared after the test runs, to help with garbage
+ collection. If you would like to examine the namespace after
+ the test completes, then use `clear_globs=False`.
+
+ `compileflags` gives the set of flags that should be used by
+ the Python compiler when running the examples. If not
+ specified, then it will default to the set of future-import
+ flags that apply to `globs`.
+
+ The output of each example is checked using
+ `DocTestRunner.check_output`, and the results are formatted by
+ the `DocTestRunner.report_*` methods.
+ """
+ self.test = test
+
+ if compileflags is None:
+ compileflags = _extract_future_flags(test.globs)
+
+ save_stdout = sys.stdout
+ if out is None:
+ encoding = save_stdout.encoding
+ if encoding is None or encoding.lower() == 'utf-8':
+ out = save_stdout.write
+ else:
+ # Use backslashreplace error handling on write
+ def out(s):
+ s = str(s.encode(encoding, 'backslashreplace'), encoding)
+ save_stdout.write(s)
+ sys.stdout = self._fakeout
+
+ # Patch pdb.set_trace to restore sys.stdout during interactive
+ # debugging (so it's not still redirected to self._fakeout).
+ # Note that the interactive output will go to *our*
+ # save_stdout, even if that's not the real sys.stdout; this
+ # allows us to write test cases for the set_trace behavior.
+ save_trace = sys.gettrace()
+ save_set_trace = pdb.set_trace
+ self.debugger = _OutputRedirectingPdb(save_stdout)
+ self.debugger.reset()
+ pdb.set_trace = self.debugger.set_trace
+
+ # Patch linecache.getlines, so we can see the example's source
+ # when we're inside the debugger.
+ self.save_linecache_getlines = linecache.getlines
+ linecache.getlines = self.__patched_linecache_getlines
+
+ # Make sure sys.displayhook just prints the value to stdout
+ save_displayhook = sys.displayhook
+ sys.displayhook = sys.__displayhook__
+
+ try:
+ return self.__run(test, compileflags, out)
+ finally:
+ sys.stdout = save_stdout
+ pdb.set_trace = save_set_trace
+ sys.settrace(save_trace)
+ linecache.getlines = self.save_linecache_getlines
+ sys.displayhook = save_displayhook
+ if clear_globs:
+ test.globs.clear()
+ import builtins
+ builtins._ = None
+
+ #/////////////////////////////////////////////////////////////////
+ # Summarization
+ #/////////////////////////////////////////////////////////////////
+ def summarize(self, verbose=None):
+ """
+ Print a summary of all the test cases that have been run by
+ this DocTestRunner, and return a tuple `(f, t)`, where `f` is
+ the total number of failed examples, and `t` is the total
+ number of tried examples.
+
+ The optional `verbose` argument controls how detailed the
+ summary is. If the verbosity is not specified, then the
+ DocTestRunner's verbosity is used.
+ """
+ if verbose is None:
+ verbose = self._verbose
+ notests = []
+ passed = []
+ failed = []
+ totalt = totalf = 0
+ for x in self._name2ft.items():
+ name, (f, t) = x
+ assert f <= t
+ totalt += t
+ totalf += f
+ if t == 0:
+ notests.append(name)
+ elif f == 0:
+ passed.append( (name, t) )
+ else:
+ failed.append(x)
+ if verbose:
+ if notests:
+ print(len(notests), "items had no tests:")
+ notests.sort()
+ for thing in notests:
+ print(" ", thing)
+ if passed:
+ print(len(passed), "items passed all tests:")
+ passed.sort()
+ for thing, count in passed:
+ print(" %3d tests in %s" % (count, thing))
+ if failed:
+ print(self.DIVIDER)
+ print(len(failed), "items had failures:")
+ failed.sort()
+ for thing, (f, t) in failed:
+ print(" %3d of %3d in %s" % (f, t, thing))
+ if verbose:
+ print(totalt, "tests in", len(self._name2ft), "items.")
+ print(totalt - totalf, "passed and", totalf, "failed.")
+ if totalf:
+ print("***Test Failed***", totalf, "failures.")
+ elif verbose:
+ print("Test passed.")
+ return TestResults(totalf, totalt)
+
+ #/////////////////////////////////////////////////////////////////
+ # Backward compatibility cruft to maintain doctest.master.
+ #/////////////////////////////////////////////////////////////////
+ def merge(self, other):
+ d = self._name2ft
+ for name, (f, t) in other._name2ft.items():
+ if name in d:
+ # Don't print here by default, since doing
+ # so breaks some of the buildbots
+ #print("*** DocTestRunner.merge: '" + name + "' in both" \
+ # " testers; summing outcomes.")
+ f2, t2 = d[name]
+ f = f + f2
+ t = t + t2
+ d[name] = f, t
+
+class OutputChecker:
+ """
+ A class used to check the whether the actual output from a doctest
+ example matches the expected output. `OutputChecker` defines two
+ methods: `check_output`, which compares a given pair of outputs,
+ and returns true if they match; and `output_difference`, which
+ returns a string describing the differences between two outputs.
+ """
+ def _toAscii(self, s):
+ """
+ Convert string to hex-escaped ASCII string.
+ """
+ return str(s.encode('ASCII', 'backslashreplace'), "ASCII")
+
+ def check_output(self, want, got, optionflags):
+ """
+ Return True iff the actual output from an example (`got`)
+ matches the expected output (`want`). These strings are
+ always considered to match if they are identical; but
+ depending on what option flags the test runner is using,
+ several non-exact match types are also possible. See the
+ documentation for `TestRunner` for more information about
+ option flags.
+ """
+
+ # If `want` contains hex-escaped character such as "\u1234",
+ # then `want` is a string of six characters(e.g. [\,u,1,2,3,4]).
+ # On the other hand, `got` could be another sequence of
+ # characters such as [\u1234], so `want` and `got` should
+ # be folded to hex-escaped ASCII string to compare.
+ got = self._toAscii(got)
+ want = self._toAscii(want)
+
+ # Handle the common case first, for efficiency:
+ # if they're string-identical, always return true.
+ if got == want:
+ return True
+
+ # The values True and False replaced 1 and 0 as the return
+ # value for boolean comparisons in Python 2.3.
+ if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
+ if (got,want) == ("True\n", "1\n"):
+ return True
+ if (got,want) == ("False\n", "0\n"):
+ return True
+
+ # can be used as a special sequence to signify a
+ # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ # Replace in want with a blank line.
+ want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
+ '', want)
+ # If a line in got contains only spaces, then remove the
+ # spaces.
+ got = re.sub(r'(?m)^[^\S\n]+$', '', got)
+ if got == want:
+ return True
+
+ # This flag causes doctest to ignore any differences in the
+ # contents of whitespace strings. Note that this can be used
+ # in conjunction with the ELLIPSIS flag.
+ if optionflags & NORMALIZE_WHITESPACE:
+ got = ' '.join(got.split())
+ want = ' '.join(want.split())
+ if got == want:
+ return True
+
+ # The ELLIPSIS flag says to let the sequence "..." in `want`
+ # match any substring in `got`.
+ if optionflags & ELLIPSIS:
+ if _ellipsis_match(want, got):
+ return True
+
+ # We didn't find any match; return false.
+ return False
+
+ # Should we do a fancy diff?
+ def _do_a_fancy_diff(self, want, got, optionflags):
+ # Not unless they asked for a fancy diff.
+ if not optionflags & (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF):
+ return False
+
+ # If expected output uses ellipsis, a meaningful fancy diff is
+ # too hard ... or maybe not. In two real-life failures Tim saw,
+ # a diff was a major help anyway, so this is commented out.
+ # [todo] _ellipsis_match() knows which pieces do and don't match,
+ # and could be the basis for a kick-ass diff in this case.
+ ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
+ ## return False
+
+ # ndiff does intraline difference marking, so can be useful even
+ # for 1-line differences.
+ if optionflags & REPORT_NDIFF:
+ return True
+
+ # The other diff types need at least a few lines to be helpful.
+ return want.count('\n') > 2 and got.count('\n') > 2
+
+ def output_difference(self, example, got, optionflags):
+ """
+ Return a string describing the differences between the
+ expected output for a given example (`example`) and the actual
+ output (`got`). `optionflags` is the set of option flags used
+ to compare `want` and `got`.
+ """
+ want = example.want
+ # If s are being used, then replace blank lines
+ # with in the actual output string.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
+
+ # Check if we should use diff.
+ if self._do_a_fancy_diff(want, got, optionflags):
+ # Split want & got into lines.
+ want_lines = want.splitlines(keepends=True)
+ got_lines = got.splitlines(keepends=True)
+ # Use difflib to find their differences.
+ if optionflags & REPORT_UDIFF:
+ diff = difflib.unified_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'unified diff with -expected +actual'
+ elif optionflags & REPORT_CDIFF:
+ diff = difflib.context_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'context diff with expected followed by actual'
+ elif optionflags & REPORT_NDIFF:
+ engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
+ diff = list(engine.compare(want_lines, got_lines))
+ kind = 'ndiff with -expected +actual'
+ else:
+ assert 0, 'Bad diff option'
+ # Remove trailing whitespace on diff output.
+ diff = [line.rstrip() + '\n' for line in diff]
+ return 'Differences (%s):\n' % kind + _indent(''.join(diff))
+
+ # If we're not using diff, then simply list the expected
+ # output followed by the actual output.
+ if want and got:
+ return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
+ elif want:
+ return 'Expected:\n%sGot nothing\n' % _indent(want)
+ elif got:
+ return 'Expected nothing\nGot:\n%s' % _indent(got)
+ else:
+ return 'Expected nothing\nGot nothing\n'
+
+class DocTestFailure(Exception):
+ """A DocTest example has failed in debugging mode.
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - example: the Example object that failed
+
+ - got: the actual output
+ """
+ def __init__(self, test, example, got):
+ self.test = test
+ self.example = example
+ self.got = got
+
+ def __str__(self):
+ return str(self.test)
+
+class UnexpectedException(Exception):
+ """A DocTest example has encountered an unexpected exception
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - example: the Example object that failed
+
+ - exc_info: the exception info
+ """
+ def __init__(self, test, example, exc_info):
+ self.test = test
+ self.example = example
+ self.exc_info = exc_info
+
+ def __str__(self):
+ return str(self.test)
+
+class DebugRunner(DocTestRunner):
+ r"""Run doc tests but raise an exception as soon as there is a failure.
+
+ If an unexpected exception occurs, an UnexpectedException is raised.
+ It contains the test, the example, and the original exception:
+
+ >>> runner = DebugRunner(verbose=False)
+ >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
+ ... {}, 'foo', 'foo.py', 0)
+ >>> try:
+ ... runner.run(test)
+ ... except UnexpectedException as f:
+ ... failure = f
+
+ >>> failure.test is test
+ True
+
+ >>> failure.example.want
+ '42\n'
+
+ >>> exc_info = failure.exc_info
+ >>> raise exc_info[1] # Already has the traceback
+ Traceback (most recent call last):
+ ...
+ KeyError
+
+ We wrap the original exception to give the calling application
+ access to the test and example information.
+
+ If the output doesn't match, then a DocTestFailure is raised:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 1
+ ... >>> x
+ ... 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> try:
+ ... runner.run(test)
+ ... except DocTestFailure as f:
+ ... failure = f
+
+ DocTestFailure objects provide access to the test:
+
+ >>> failure.test is test
+ True
+
+ As well as to the example:
+
+ >>> failure.example.want
+ '2\n'
+
+ and the actual output:
+
+ >>> failure.got
+ '1\n'
+
+ If a failure or error occurs, the globals are left intact:
+
+ >>> del test.globs['__builtins__']
+ >>> test.globs
+ {'x': 1}
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 2
+ ... >>> raise KeyError
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> runner.run(test)
+ Traceback (most recent call last):
+ ...
+ doctest.UnexpectedException:
+
+ >>> del test.globs['__builtins__']
+ >>> test.globs
+ {'x': 2}
+
+ But the globals are cleared if there is no error:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> runner.run(test)
+ TestResults(failed=0, attempted=1)
+
+ >>> test.globs
+ {}
+
+ """
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ r = DocTestRunner.run(self, test, compileflags, out, False)
+ if clear_globs:
+ test.globs.clear()
+ return r
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ raise UnexpectedException(test, example, exc_info)
+
+ def report_failure(self, out, test, example, got):
+ raise DocTestFailure(test, example, got)
+
+######################################################################
+## 6. Test Functions
+######################################################################
+# These should be backwards compatible.
+
+# For backward compatibility, a global instance of a DocTestRunner
+# class, updated by testmod.
+master = None
+
+def testmod(m=None, name=None, globs=None, verbose=None,
+ report=True, optionflags=0, extraglobs=None,
+ raise_on_error=False, exclude_empty=False):
+ """m=None, name=None, globs=None, verbose=None, report=True,
+ optionflags=0, extraglobs=None, raise_on_error=False,
+ exclude_empty=False
+
+ Test examples in docstrings in functions and classes reachable
+ from module m (or the current module if m is not supplied), starting
+ with m.__doc__.
+
+ Also test examples reachable from dict m.__test__ if it exists and is
+ not None. m.__test__ maps names to functions, classes and strings;
+ function and class docstrings are tested even if the name is private;
+ strings are tested directly, as if they were docstrings.
+
+ Return (#failures, #tests).
+
+ See help(doctest) for an overview.
+
+ Optional keyword arg "name" gives the name of the module; by default
+ use m.__name__.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use m.__dict__. A copy of this
+ dict is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used. This is new in 2.4.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. This is new in 2.3. Possible values (see the
+ docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ SKIP
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ # If no module was given, then use __main__.
+ if m is None:
+ # DWA - m will still be None if this wasn't invoked from the command
+ # line, in which case the following TypeError is about as good an error
+ # as we should expect
+ m = sys.modules.get('__main__')
+
+ # Check that we were actually given a module.
+ if not inspect.ismodule(m):
+ raise TypeError("testmod: module required; %r" % (m,))
+
+ # If no name was given, then use the module's name.
+ if name is None:
+ name = m.__name__
+
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(exclude_empty=exclude_empty)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return TestResults(runner.failures, runner.tries)
+
+def testfile(filename, module_relative=True, name=None, package=None,
+ globs=None, verbose=None, report=True, optionflags=0,
+ extraglobs=None, raise_on_error=False, parser=DocTestParser(),
+ encoding=None):
+ """
+ Test examples in the given file. Return (#failures, #tests).
+
+ Optional keyword arg "module_relative" specifies how filenames
+ should be interpreted:
+
+ - If "module_relative" is True (the default), then "filename"
+ specifies a module-relative path. By default, this path is
+ relative to the calling module's directory; but if the
+ "package" argument is specified, then it is relative to that
+ package. To ensure os-independence, "filename" should use
+ "/" characters to separate path segments, and should not
+ be an absolute path (i.e., it may not begin with "/").
+
+ - If "module_relative" is False, then "filename" specifies an
+ os-specific path. The path may be absolute or relative (to
+ the current working directory).
+
+ Optional keyword arg "name" gives the name of the test; by default
+ use the file's basename.
+
+ Optional keyword argument "package" is a Python package or the
+ name of a Python package whose directory should be used as the
+ base directory for a module relative filename. If no package is
+ specified, then the calling module's directory is used as the base
+ directory for module relative filenames. It is an error to
+ specify "package" if "module_relative" is False.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use {}. A copy of this dict
+ is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. Possible values (see the docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ SKIP
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Optional keyword arg "parser" specifies a DocTestParser (or
+ subclass) that should be used to extract tests from the files.
+
+ Optional keyword arg "encoding" specifies an encoding that should
+ be used to convert the file to unicode.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path
+ text, filename = _load_testfile(filename, package, module_relative,
+ encoding or "utf-8")
+
+ # If no name was given, then use the file's name.
+ if name is None:
+ name = os.path.basename(filename)
+
+ # Assemble the globals.
+ if globs is None:
+ globs = {}
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+ if '__name__' not in globs:
+ globs['__name__'] = '__main__'
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ # Read the file, convert it to a test, and run it.
+ test = parser.get_doctest(text, globs, name, filename, 0)
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return TestResults(runner.failures, runner.tries)
+
+def run_docstring_examples(f, globs, verbose=False, name="NoName",
+ compileflags=None, optionflags=0):
+ """
+ Test examples in the given object's docstring (`f`), using `globs`
+ as globals. Optional argument `name` is used in failure messages.
+ If the optional argument `verbose` is true, then generate output
+ even if there are no failures.
+
+ `compileflags` gives the set of flags that should be used by the
+ Python compiler when running the examples. If not specified, then
+ it will default to the set of future-import flags that apply to
+ `globs`.
+
+ Optional keyword arg `optionflags` specifies options for the
+ testing and output. See the documentation for `testmod` for more
+ information.
+ """
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(verbose=verbose, recurse=False)
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+ for test in finder.find(f, name, globs=globs):
+ runner.run(test, compileflags=compileflags)
+
+######################################################################
+## 7. Unittest Support
+######################################################################
+
+_unittest_reportflags = 0
+
+def set_unittest_reportflags(flags):
+ """Sets the unittest option flags.
+
+ The old flag is returned so that a runner could restore the old
+ value if it wished to:
+
+ >>> import doctest
+ >>> old = doctest._unittest_reportflags
+ >>> doctest.set_unittest_reportflags(REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE) == old
+ True
+
+ >>> doctest._unittest_reportflags == (REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE)
+ True
+
+ Only reporting flags can be set:
+
+ >>> doctest.set_unittest_reportflags(ELLIPSIS)
+ Traceback (most recent call last):
+ ...
+ ValueError: ('Only reporting flags allowed', 8)
+
+ >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE)
+ True
+ """
+ global _unittest_reportflags
+
+ if (flags & REPORTING_FLAGS) != flags:
+ raise ValueError("Only reporting flags allowed", flags)
+ old = _unittest_reportflags
+ _unittest_reportflags = flags
+ return old
+
+
+class DocTestCase(unittest.TestCase):
+
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None):
+
+ unittest.TestCase.__init__(self)
+ self._dt_optionflags = optionflags
+ self._dt_checker = checker
+ self._dt_test = test
+ self._dt_setUp = setUp
+ self._dt_tearDown = tearDown
+
+ def setUp(self):
+ test = self._dt_test
+
+ if self._dt_setUp is not None:
+ self._dt_setUp(test)
+
+ def tearDown(self):
+ test = self._dt_test
+
+ if self._dt_tearDown is not None:
+ self._dt_tearDown(test)
+
+ test.globs.clear()
+
+ def runTest(self):
+ test = self._dt_test
+ old = sys.stdout
+ new = StringIO()
+ optionflags = self._dt_optionflags
+
+ if not (optionflags & REPORTING_FLAGS):
+ # The option flags don't include any reporting flags,
+ # so add the default reporting flags
+ optionflags |= _unittest_reportflags
+
+ runner = DocTestRunner(optionflags=optionflags,
+ checker=self._dt_checker, verbose=False)
+
+ try:
+ runner.DIVIDER = "-"*70
+ failures, tries = runner.run(
+ test, out=new.write, clear_globs=False)
+ finally:
+ sys.stdout = old
+
+ if failures:
+ raise self.failureException(self.format_failure(new.getvalue()))
+
+ def format_failure(self, err):
+ test = self._dt_test
+ if test.lineno is None:
+ lineno = 'unknown line number'
+ else:
+ lineno = '%s' % test.lineno
+ lname = '.'.join(test.name.split('.')[-1:])
+ return ('Failed doctest test for %s\n'
+ ' File "%s", line %s, in %s\n\n%s'
+ % (test.name, test.filename, lineno, lname, err)
+ )
+
+ def debug(self):
+ r"""Run the test case without results and without catching exceptions
+
+ The unit test framework includes a debug method on test cases
+ and test suites to support post-mortem debugging. The test code
+ is run in such a way that errors are not caught. This way a
+ caller can catch the errors and initiate post-mortem debugging.
+
+ The DocTestCase provides a debug method that raises
+ UnexpectedException errors if there is an unexpected
+ exception:
+
+ >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
+ ... {}, 'foo', 'foo.py', 0)
+ >>> case = DocTestCase(test)
+ >>> try:
+ ... case.debug()
+ ... except UnexpectedException as f:
+ ... failure = f
+
+ The UnexpectedException contains the test, the example, and
+ the original exception:
+
+ >>> failure.test is test
+ True
+
+ >>> failure.example.want
+ '42\n'
+
+ >>> exc_info = failure.exc_info
+ >>> raise exc_info[1] # Already has the traceback
+ Traceback (most recent call last):
+ ...
+ KeyError
+
+ If the output doesn't match, then a DocTestFailure is raised:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 1
+ ... >>> x
+ ... 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+ >>> case = DocTestCase(test)
+
+ >>> try:
+ ... case.debug()
+ ... except DocTestFailure as f:
+ ... failure = f
+
+ DocTestFailure objects provide access to the test:
+
+ >>> failure.test is test
+ True
+
+ As well as to the example:
+
+ >>> failure.example.want
+ '2\n'
+
+ and the actual output:
+
+ >>> failure.got
+ '1\n'
+
+ """
+
+ self.setUp()
+ runner = DebugRunner(optionflags=self._dt_optionflags,
+ checker=self._dt_checker, verbose=False)
+ runner.run(self._dt_test, clear_globs=False)
+ self.tearDown()
+
+ def id(self):
+ return self._dt_test.name
+
+ def __eq__(self, other):
+ if type(self) is not type(other):
+ return NotImplemented
+
+ return self._dt_test == other._dt_test and \
+ self._dt_optionflags == other._dt_optionflags and \
+ self._dt_setUp == other._dt_setUp and \
+ self._dt_tearDown == other._dt_tearDown and \
+ self._dt_checker == other._dt_checker
+
+ def __hash__(self):
+ return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown,
+ self._dt_checker))
+
+ def __repr__(self):
+ name = self._dt_test.name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return "Doctest: " + self._dt_test.name
+
+class SkipDocTestCase(DocTestCase):
+ def __init__(self, module):
+ self.module = module
+ DocTestCase.__init__(self, None)
+
+ def setUp(self):
+ self.skipTest("DocTestSuite will not work with -O2 and above")
+
+ def test_skip(self):
+ pass
+
+ def shortDescription(self):
+ return "Skipping tests from %s" % self.module.__name__
+
+ __str__ = shortDescription
+
+
+class _DocTestSuite(unittest.TestSuite):
+
+ def _removeTestAtIndex(self, index):
+ pass
+
+
+def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
+ **options):
+ """
+ Convert doctest tests for a module to a unittest test suite.
+
+ This converts each documentation string in a module that
+ contains doctest tests to a unittest test case. If any of the
+ tests in a doc string fail, then the test case fails. An exception
+ is raised showing the name of the file containing the test and a
+ (sometimes approximate) line number.
+
+ The `module` argument provides the module to be tested. The argument
+ can be either a module or a module name.
+
+ If no argument is given, the calling module is used.
+
+ A number of options may be provided as keyword arguments:
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+ """
+
+ if test_finder is None:
+ test_finder = DocTestFinder()
+
+ module = _normalize_module(module)
+ tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
+
+ if not tests and sys.flags.optimize >=2:
+ # Skip doctests when running with -O2
+ suite = _DocTestSuite()
+ suite.addTest(SkipDocTestCase(module))
+ return suite
+
+ tests.sort()
+ suite = _DocTestSuite()
+
+ for test in tests:
+ if len(test.examples) == 0:
+ continue
+ if not test.filename:
+ filename = module.__file__
+ if filename[-4:] == ".pyc":
+ filename = filename[:-1]
+ test.filename = filename
+ suite.addTest(DocTestCase(test, **options))
+
+ return suite
+
+class DocFileCase(DocTestCase):
+
+ def id(self):
+ return '_'.join(self._dt_test.name.split('.'))
+
+ def __repr__(self):
+ return self._dt_test.filename
+ __str__ = __repr__
+
+ def format_failure(self, err):
+ return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
+ % (self._dt_test.name, self._dt_test.filename, err)
+ )
+
+def DocFileTest(path, module_relative=True, package=None,
+ globs=None, parser=DocTestParser(),
+ encoding=None, **options):
+ if globs is None:
+ globs = {}
+ else:
+ globs = globs.copy()
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path.
+ doc, path = _load_testfile(path, package, module_relative,
+ encoding or "utf-8")
+
+ if "__file__" not in globs:
+ globs["__file__"] = path
+
+ # Find the file and read it.
+ name = os.path.basename(path)
+
+ # Convert it to a test, and wrap it in a DocFileCase.
+ test = parser.get_doctest(doc, globs, name, path, 0)
+ return DocFileCase(test, **options)
+
+def DocFileSuite(*paths, **kw):
+ """A unittest suite for one or more doctest files.
+
+ The path to each doctest file is given as a string; the
+ interpretation of that string depends on the keyword argument
+ "module_relative".
+
+ A number of options may be provided as keyword arguments:
+
+ module_relative
+ If "module_relative" is True, then the given file paths are
+ interpreted as os-independent module-relative paths. By
+ default, these paths are relative to the calling module's
+ directory; but if the "package" argument is specified, then
+ they are relative to that package. To ensure os-independence,
+ "filename" should use "/" characters to separate path
+ segments, and may not be an absolute path (i.e., it may not
+ begin with "/").
+
+ If "module_relative" is False, then the given file paths are
+ interpreted as os-specific paths. These paths may be absolute
+ or relative (to the current working directory).
+
+ package
+ A Python package or the name of a Python package whose directory
+ should be used as the base directory for module relative paths.
+ If "package" is not specified, then the calling module's
+ directory is used as the base directory for module relative
+ filenames. It is an error to specify "package" if
+ "module_relative" is False.
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+
+ parser
+ A DocTestParser (or subclass) that should be used to extract
+ tests from the files.
+
+ encoding
+ An encoding that will be used to convert the files to unicode.
+ """
+ suite = _DocTestSuite()
+
+ # We do this here so that _normalize_module is called at the right
+ # level. If it were called in DocFileTest, then this function
+ # would be the caller and we might guess the package incorrectly.
+ if kw.get('module_relative', True):
+ kw['package'] = _normalize_module(kw.get('package'))
+
+ for path in paths:
+ suite.addTest(DocFileTest(path, **kw))
+
+ return suite
+
+######################################################################
+## 8. Debugging Support
+######################################################################
+
+def script_from_examples(s):
+ r"""Extract script from text with examples.
+
+ Converts text with examples to a Python script. Example input is
+ converted to regular code. Example output and all other words
+ are converted to comments:
+
+ >>> text = '''
+ ... Here are examples of simple math.
+ ...
+ ... Python has super accurate integer addition
+ ...
+ ... >>> 2 + 2
+ ... 5
+ ...
+ ... And very friendly error messages:
+ ...
+ ... >>> 1/0
+ ... To Infinity
+ ... And
+ ... Beyond
+ ...
+ ... You can use logic if you want:
+ ...
+ ... >>> if 0:
+ ... ... blah
+ ... ... blah
+ ... ...
+ ...
+ ... Ho hum
+ ... '''
+
+ >>> print(script_from_examples(text))
+ # Here are examples of simple math.
+ #
+ # Python has super accurate integer addition
+ #
+ 2 + 2
+ # Expected:
+ ## 5
+ #
+ # And very friendly error messages:
+ #
+ 1/0
+ # Expected:
+ ## To Infinity
+ ## And
+ ## Beyond
+ #
+ # You can use logic if you want:
+ #
+ if 0:
+ blah
+ blah
+ #
+ # Ho hum
+
+ """
+ output = []
+ for piece in DocTestParser().parse(s):
+ if isinstance(piece, Example):
+ # Add the example's source code (strip trailing NL)
+ output.append(piece.source[:-1])
+ # Add the expected output:
+ want = piece.want
+ if want:
+ output.append('# Expected:')
+ output += ['## '+l for l in want.split('\n')[:-1]]
+ else:
+ # Add non-example text.
+ output += [_comment_line(l)
+ for l in piece.split('\n')[:-1]]
+
+ # Trim junk on both ends.
+ while output and output[-1] == '#':
+ output.pop()
+ while output and output[0] == '#':
+ output.pop(0)
+ # Combine the output, and return it.
+ # Add a courtesy newline to prevent exec from choking (see bug #1172785)
+ return '\n'.join(output) + '\n'
+
+def testsource(module, name):
+ """Extract the test sources from a doctest docstring as a script.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the doc string with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ tests = DocTestFinder().find(module)
+ test = [t for t in tests if t.name == name]
+ if not test:
+ raise ValueError(name, "not found in tests")
+ test = test[0]
+ testsrc = script_from_examples(test.docstring)
+ return testsrc
+
+def debug_src(src, pm=False, globs=None):
+ """Debug a single doctest docstring, in argument `src`'"""
+ testsrc = script_from_examples(src)
+ debug_script(testsrc, pm, globs)
+
+def debug_script(src, pm=False, globs=None):
+ "Debug a test script. `src` is the script, as a string."
+ import pdb
+
+ if globs:
+ globs = globs.copy()
+ else:
+ globs = {}
+
+ if pm:
+ try:
+ exec(src, globs, globs)
+ except:
+ print(sys.exc_info()[1])
+ p = pdb.Pdb(nosigint=True)
+ p.reset()
+ p.interaction(None, sys.exc_info()[2])
+ else:
+ pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs)
+
+def debug(module, name, pm=False):
+ """Debug a single doctest docstring.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the docstring with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ testsrc = testsource(module, name)
+ debug_script(testsrc, pm, module.__dict__)
+
+######################################################################
+## 9. Example Usage
+######################################################################
+class _TestClass:
+ """
+ A pointless class, for sanity-checking of docstring testing.
+
+ Methods:
+ square()
+ get()
+
+ >>> _TestClass(13).get() + _TestClass(-12).get()
+ 1
+ >>> hex(_TestClass(13).square().get())
+ '0xa9'
+ """
+
+ def __init__(self, val):
+ """val -> _TestClass object with associated value val.
+
+ >>> t = _TestClass(123)
+ >>> print(t.get())
+ 123
+ """
+
+ self.val = val
+
+ def square(self):
+ """square() -> square TestClass's associated value
+
+ >>> _TestClass(13).square().get()
+ 169
+ """
+
+ self.val = self.val ** 2
+ return self
+
+ def get(self):
+ """get() -> return TestClass's associated value.
+
+ >>> x = _TestClass(-42)
+ >>> print(x.get())
+ -42
+ """
+
+ return self.val
+
+__test__ = {"_TestClass": _TestClass,
+ "string": r"""
+ Example of a string object, searched as-is.
+ >>> x = 1; y = 2
+ >>> x + y, x * y
+ (3, 2)
+ """,
+
+ "bool-int equivalence": r"""
+ In 2.2, boolean expressions displayed
+ 0 or 1. By default, we still accept
+ them. This can be disabled by passing
+ DONT_ACCEPT_TRUE_FOR_1 to the new
+ optionflags argument.
+ >>> 4 == 4
+ 1
+ >>> 4 == 4
+ True
+ >>> 4 > 4
+ 0
+ >>> 4 > 4
+ False
+ """,
+
+ "blank lines": r"""
+ Blank lines can be marked with :
+ >>> print('foo\n\nbar\n')
+ foo
+
+ bar
+
+ """,
+
+ "ellipsis": r"""
+ If the ellipsis flag is used, then '...' can be used to
+ elide substrings in the desired output:
+ >>> print(list(range(1000))) #doctest: +ELLIPSIS
+ [0, 1, 2, ..., 999]
+ """,
+
+ "whitespace normalization": r"""
+ If the whitespace normalization flag is used, then
+ differences in whitespace are ignored.
+ >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29]
+ """,
+ }
+
+
+def _test():
+ import argparse
+
+ parser = argparse.ArgumentParser(description="doctest runner")
+ parser.add_argument('-v', '--verbose', action='store_true', default=False,
+ help='print very verbose output for all tests')
+ parser.add_argument('-o', '--option', action='append',
+ choices=OPTIONFLAGS_BY_NAME.keys(), default=[],
+ help=('specify a doctest option flag to apply'
+ ' to the test run; may be specified more'
+ ' than once to apply multiple options'))
+ parser.add_argument('-f', '--fail-fast', action='store_true',
+ help=('stop running tests after first failure (this'
+ ' is a shorthand for -o FAIL_FAST, and is'
+ ' in addition to any other -o options)'))
+ parser.add_argument('file', nargs='+',
+ help='file containing the tests to run')
+ args = parser.parse_args()
+ testfiles = args.file
+ # Verbose used to be handled by the "inspect argv" magic in DocTestRunner,
+ # but since we are using argparse we are passing it manually now.
+ verbose = args.verbose
+ options = 0
+ for option in args.option:
+ options |= OPTIONFLAGS_BY_NAME[option]
+ if args.fail_fast:
+ options |= FAIL_FAST
+ for filename in testfiles:
+ if filename.endswith(".py"):
+ # It is a module -- insert its dir into sys.path and try to
+ # import it. If it is part of a package, that possibly
+ # won't work because of package imports.
+ dirname, filename = os.path.split(filename)
+ sys.path.insert(0, dirname)
+ m = __import__(filename[:-3])
+ del sys.path[0]
+ failures, _ = testmod(m, verbose=verbose, optionflags=options)
+ else:
+ failures, _ = testfile(filename, module_relative=False,
+ verbose=verbose, optionflags=options)
+ if failures:
+ return 1
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(_test())
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dummy_threading.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dummy_threading.py
new file mode 100644
index 00000000..1bb7eee3
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/dummy_threading.py
@@ -0,0 +1,78 @@
+"""Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
+
+The module ``_dummy_threading`` is added to ``sys.modules`` in order
+to not have ``threading`` considered imported. Had ``threading`` been
+directly imported it would have made all subsequent imports succeed
+regardless of whether ``_thread`` was available which is not desired.
+
+"""
+from sys import modules as sys_modules
+
+import _dummy_thread
+
+# Declaring now so as to not have to nest ``try``s to get proper clean-up.
+holding_thread = False
+holding_threading = False
+holding__threading_local = False
+
+try:
+ # Could have checked if ``_thread`` was not in sys.modules and gone
+ # a different route, but decided to mirror technique used with
+ # ``threading`` below.
+ if '_thread' in sys_modules:
+ held_thread = sys_modules['_thread']
+ holding_thread = True
+ # Must have some module named ``_thread`` that implements its API
+ # in order to initially import ``threading``.
+ sys_modules['_thread'] = sys_modules['_dummy_thread']
+
+ if 'threading' in sys_modules:
+ # If ``threading`` is already imported, might as well prevent
+ # trying to import it more than needed by saving it if it is
+ # already imported before deleting it.
+ held_threading = sys_modules['threading']
+ holding_threading = True
+ del sys_modules['threading']
+
+ if '_threading_local' in sys_modules:
+ # If ``_threading_local`` is already imported, might as well prevent
+ # trying to import it more than needed by saving it if it is
+ # already imported before deleting it.
+ held__threading_local = sys_modules['_threading_local']
+ holding__threading_local = True
+ del sys_modules['_threading_local']
+
+ import threading
+ # Need a copy of the code kept somewhere...
+ sys_modules['_dummy_threading'] = sys_modules['threading']
+ del sys_modules['threading']
+ sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
+ del sys_modules['_threading_local']
+ from _dummy_threading import *
+ from _dummy_threading import __all__
+
+finally:
+ # Put back ``threading`` if we overwrote earlier
+
+ if holding_threading:
+ sys_modules['threading'] = held_threading
+ del held_threading
+ del holding_threading
+
+ # Put back ``_threading_local`` if we overwrote earlier
+
+ if holding__threading_local:
+ sys_modules['_threading_local'] = held__threading_local
+ del held__threading_local
+ del holding__threading_local
+
+ # Put back ``thread`` if we overwrote, else del the entry we made
+ if holding_thread:
+ sys_modules['_thread'] = held_thread
+ del held_thread
+ else:
+ del sys_modules['_thread']
+ del holding_thread
+
+ del _dummy_thread
+ del sys_modules
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__init__.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__init__.py
new file mode 100644
index 00000000..fae87243
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__init__.py
@@ -0,0 +1,62 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""A package for parsing, handling, and generating email messages."""
+
+__all__ = [
+ 'base64mime',
+ 'charset',
+ 'encoders',
+ 'errors',
+ 'feedparser',
+ 'generator',
+ 'header',
+ 'iterators',
+ 'message',
+ 'message_from_file',
+ 'message_from_binary_file',
+ 'message_from_string',
+ 'message_from_bytes',
+ 'mime',
+ 'parser',
+ 'quoprimime',
+ 'utils',
+ ]
+
+
+
+# Some convenience routines. Don't import Parser and Message as side-effects
+# of importing email since those cascadingly import most of the rest of the
+# email package.
+def message_from_string(s, *args, **kws):
+ """Parse a string into a Message object model.
+
+ Optional _class and strict are passed to the Parser constructor.
+ """
+ from email.parser import Parser
+ return Parser(*args, **kws).parsestr(s)
+
+def message_from_bytes(s, *args, **kws):
+ """Parse a bytes string into a Message object model.
+
+ Optional _class and strict are passed to the Parser constructor.
+ """
+ from email.parser import BytesParser
+ return BytesParser(*args, **kws).parsebytes(s)
+
+def message_from_file(fp, *args, **kws):
+ """Read a file and parse its contents into a Message object model.
+
+ Optional _class and strict are passed to the Parser constructor.
+ """
+ from email.parser import Parser
+ return Parser(*args, **kws).parse(fp)
+
+def message_from_binary_file(fp, *args, **kws):
+ """Read a binary file and parse its contents into a Message object model.
+
+ Optional _class and strict are passed to the Parser constructor.
+ """
+ from email.parser import BytesParser
+ return BytesParser(*args, **kws).parse(fp)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/__init__.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 00000000..4c3b1e4b
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/_encoded_words.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/_encoded_words.cpython-37.pyc
new file mode 100644
index 00000000..d05a31ce
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/_encoded_words.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/_parseaddr.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/_parseaddr.cpython-37.pyc
new file mode 100644
index 00000000..9a6eca24
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/_parseaddr.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/_policybase.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/_policybase.cpython-37.pyc
new file mode 100644
index 00000000..586606d8
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/_policybase.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/base64mime.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/base64mime.cpython-37.pyc
new file mode 100644
index 00000000..ed4f9759
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/base64mime.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/charset.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/charset.cpython-37.pyc
new file mode 100644
index 00000000..1302ebc4
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/charset.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/encoders.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/encoders.cpython-37.pyc
new file mode 100644
index 00000000..0be76db6
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/encoders.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/errors.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/errors.cpython-37.pyc
new file mode 100644
index 00000000..1acb541f
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/errors.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/feedparser.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/feedparser.cpython-37.pyc
new file mode 100644
index 00000000..1fa2a404
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/feedparser.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/generator.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/generator.cpython-37.pyc
new file mode 100644
index 00000000..3ab330ba
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/generator.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/header.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/header.cpython-37.pyc
new file mode 100644
index 00000000..ee8cab81
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/header.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/iterators.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/iterators.cpython-37.pyc
new file mode 100644
index 00000000..b2f0487e
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/iterators.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/message.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/message.cpython-37.pyc
new file mode 100644
index 00000000..50d0f987
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/message.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/parser.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/parser.cpython-37.pyc
new file mode 100644
index 00000000..81a540fe
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/parser.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/quoprimime.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/quoprimime.cpython-37.pyc
new file mode 100644
index 00000000..391d11c8
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/quoprimime.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/utils.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 00000000..65fa97f9
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/__pycache__/utils.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_encoded_words.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_encoded_words.py
new file mode 100644
index 00000000..295ae7eb
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_encoded_words.py
@@ -0,0 +1,233 @@
+""" Routines for manipulating RFC2047 encoded words.
+
+This is currently a package-private API, but will be considered for promotion
+to a public API if there is demand.
+
+"""
+
+# An ecoded word looks like this:
+#
+# =?charset[*lang]?cte?encoded_string?=
+#
+# for more information about charset see the charset module. Here it is one
+# of the preferred MIME charset names (hopefully; you never know when parsing).
+# cte (Content Transfer Encoding) is either 'q' or 'b' (ignoring case). In
+# theory other letters could be used for other encodings, but in practice this
+# (almost?) never happens. There could be a public API for adding entries
+# to the CTE tables, but YAGNI for now. 'q' is Quoted Printable, 'b' is
+# Base64. The meaning of encoded_string should be obvious. 'lang' is optional
+# as indicated by the brackets (they are not part of the syntax) but is almost
+# never encountered in practice.
+#
+# The general interface for a CTE decoder is that it takes the encoded_string
+# as its argument, and returns a tuple (cte_decoded_string, defects). The
+# cte_decoded_string is the original binary that was encoded using the
+# specified cte. 'defects' is a list of MessageDefect instances indicating any
+# problems encountered during conversion. 'charset' and 'lang' are the
+# corresponding strings extracted from the EW, case preserved.
+#
+# The general interface for a CTE encoder is that it takes a binary sequence
+# as input and returns the cte_encoded_string, which is an ascii-only string.
+#
+# Each decoder must also supply a length function that takes the binary
+# sequence as its argument and returns the length of the resulting encoded
+# string.
+#
+# The main API functions for the module are decode, which calls the decoder
+# referenced by the cte specifier, and encode, which adds the appropriate
+# RFC 2047 "chrome" to the encoded string, and can optionally automatically
+# select the shortest possible encoding. See their docstrings below for
+# details.
+
+import re
+import base64
+import binascii
+import functools
+from string import ascii_letters, digits
+from email import errors
+
+__all__ = ['decode_q',
+ 'encode_q',
+ 'decode_b',
+ 'encode_b',
+ 'len_q',
+ 'len_b',
+ 'decode',
+ 'encode',
+ ]
+
+#
+# Quoted Printable
+#
+
+# regex based decoder.
+_q_byte_subber = functools.partial(re.compile(br'=([a-fA-F0-9]{2})').sub,
+ lambda m: bytes.fromhex(m.group(1).decode()))
+
+def decode_q(encoded):
+ encoded = encoded.replace(b'_', b' ')
+ return _q_byte_subber(encoded), []
+
+
+# dict mapping bytes to their encoded form
+class _QByteMap(dict):
+
+ safe = b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii')
+
+ def __missing__(self, key):
+ if key in self.safe:
+ self[key] = chr(key)
+ else:
+ self[key] = "={:02X}".format(key)
+ return self[key]
+
+_q_byte_map = _QByteMap()
+
+# In headers spaces are mapped to '_'.
+_q_byte_map[ord(' ')] = '_'
+
+def encode_q(bstring):
+ return ''.join(_q_byte_map[x] for x in bstring)
+
+def len_q(bstring):
+ return sum(len(_q_byte_map[x]) for x in bstring)
+
+
+#
+# Base64
+#
+
+def decode_b(encoded):
+ # First try encoding with validate=True, fixing the padding if needed.
+ # This will succeed only if encoded includes no invalid characters.
+ pad_err = len(encoded) % 4
+ missing_padding = b'==='[:4-pad_err] if pad_err else b''
+ try:
+ return (
+ base64.b64decode(encoded + missing_padding, validate=True),
+ [errors.InvalidBase64PaddingDefect()] if pad_err else [],
+ )
+ except binascii.Error:
+ # Since we had correct padding, this is likely an invalid char error.
+ #
+ # The non-alphabet characters are ignored as far as padding
+ # goes, but we don't know how many there are. So try without adding
+ # padding to see if it works.
+ try:
+ return (
+ base64.b64decode(encoded, validate=False),
+ [errors.InvalidBase64CharactersDefect()],
+ )
+ except binascii.Error:
+ # Add as much padding as could possibly be necessary (extra padding
+ # is ignored).
+ try:
+ return (
+ base64.b64decode(encoded + b'==', validate=False),
+ [errors.InvalidBase64CharactersDefect(),
+ errors.InvalidBase64PaddingDefect()],
+ )
+ except binascii.Error:
+ # This only happens when the encoded string's length is 1 more
+ # than a multiple of 4, which is invalid.
+ #
+ # bpo-27397: Just return the encoded string since there's no
+ # way to decode.
+ return encoded, [errors.InvalidBase64LengthDefect()]
+
+def encode_b(bstring):
+ return base64.b64encode(bstring).decode('ascii')
+
+def len_b(bstring):
+ groups_of_3, leftover = divmod(len(bstring), 3)
+ # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
+ return groups_of_3 * 4 + (4 if leftover else 0)
+
+
+_cte_decoders = {
+ 'q': decode_q,
+ 'b': decode_b,
+ }
+
+def decode(ew):
+ """Decode encoded word and return (string, charset, lang, defects) tuple.
+
+ An RFC 2047/2243 encoded word has the form:
+
+ =?charset*lang?cte?encoded_string?=
+
+ where '*lang' may be omitted but the other parts may not be.
+
+ This function expects exactly such a string (that is, it does not check the
+ syntax and may raise errors if the string is not well formed), and returns
+ the encoded_string decoded first from its Content Transfer Encoding and
+ then from the resulting bytes into unicode using the specified charset. If
+ the cte-decoded string does not successfully decode using the specified
+ character set, a defect is added to the defects list and the unknown octets
+ are replaced by the unicode 'unknown' character \\uFDFF.
+
+ The specified charset and language are returned. The default for language,
+ which is rarely if ever encountered, is the empty string.
+
+ """
+ _, charset, cte, cte_string, _ = ew.split('?')
+ charset, _, lang = charset.partition('*')
+ cte = cte.lower()
+ # Recover the original bytes and do CTE decoding.
+ bstring = cte_string.encode('ascii', 'surrogateescape')
+ bstring, defects = _cte_decoders[cte](bstring)
+ # Turn the CTE decoded bytes into unicode.
+ try:
+ string = bstring.decode(charset)
+ except UnicodeError:
+ defects.append(errors.UndecodableBytesDefect("Encoded word "
+ "contains bytes not decodable using {} charset".format(charset)))
+ string = bstring.decode(charset, 'surrogateescape')
+ except LookupError:
+ string = bstring.decode('ascii', 'surrogateescape')
+ if charset.lower() != 'unknown-8bit':
+ defects.append(errors.CharsetError("Unknown charset {} "
+ "in encoded word; decoded as unknown bytes".format(charset)))
+ return string, charset, lang, defects
+
+
+_cte_encoders = {
+ 'q': encode_q,
+ 'b': encode_b,
+ }
+
+_cte_encode_length = {
+ 'q': len_q,
+ 'b': len_b,
+ }
+
+def encode(string, charset='utf-8', encoding=None, lang=''):
+ """Encode string using the CTE encoding that produces the shorter result.
+
+ Produces an RFC 2047/2243 encoded word of the form:
+
+ =?charset*lang?cte?encoded_string?=
+
+ where '*lang' is omitted unless the 'lang' parameter is given a value.
+ Optional argument charset (defaults to utf-8) specifies the charset to use
+ to encode the string to binary before CTE encoding it. Optional argument
+ 'encoding' is the cte specifier for the encoding that should be used ('q'
+ or 'b'); if it is None (the default) the encoding which produces the
+ shortest encoded sequence is used, except that 'q' is preferred if it is up
+ to five characters longer. Optional argument 'lang' (default '') gives the
+ RFC 2243 language string to specify in the encoded word.
+
+ """
+ if charset == 'unknown-8bit':
+ bstring = string.encode('ascii', 'surrogateescape')
+ else:
+ bstring = string.encode(charset)
+ if encoding is None:
+ qlen = _cte_encode_length['q'](bstring)
+ blen = _cte_encode_length['b'](bstring)
+ # Bias toward q. 5 is arbitrary.
+ encoding = 'q' if qlen - blen < 5 else 'b'
+ encoded = _cte_encoders[encoding](bstring)
+ if lang:
+ lang = '*' + lang
+ return "=?{}{}?{}?{}?=".format(charset, lang, encoding, encoded)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_header_value_parser.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_header_value_parser.py
new file mode 100644
index 00000000..416da1a8
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_header_value_parser.py
@@ -0,0 +1,2816 @@
+"""Header value parser implementing various email-related RFC parsing rules.
+
+The parsing methods defined in this module implement various email related
+parsing rules. Principal among them is RFC 5322, which is the followon
+to RFC 2822 and primarily a clarification of the former. It also implements
+RFC 2047 encoded word decoding.
+
+RFC 5322 goes to considerable trouble to maintain backward compatibility with
+RFC 822 in the parse phase, while cleaning up the structure on the generation
+phase. This parser supports correct RFC 5322 generation by tagging white space
+as folding white space only when folding is allowed in the non-obsolete rule
+sets. Actually, the parser is even more generous when accepting input than RFC
+5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
+Where possible deviations from the standard are annotated on the 'defects'
+attribute of tokens that deviate.
+
+The general structure of the parser follows RFC 5322, and uses its terminology
+where there is a direct correspondence. Where the implementation requires a
+somewhat different structure than that used by the formal grammar, new terms
+that mimic the closest existing terms are used. Thus, it really helps to have
+a copy of RFC 5322 handy when studying this code.
+
+Input to the parser is a string that has already been unfolded according to
+RFC 5322 rules. According to the RFC this unfolding is the very first step, and
+this parser leaves the unfolding step to a higher level message parser, which
+will have already detected the line breaks that need unfolding while
+determining the beginning and end of each header.
+
+The output of the parser is a TokenList object, which is a list subclass. A
+TokenList is a recursive data structure. The terminal nodes of the structure
+are Terminal objects, which are subclasses of str. These do not correspond
+directly to terminal objects in the formal grammar, but are instead more
+practical higher level combinations of true terminals.
+
+All TokenList and Terminal objects have a 'value' attribute, which produces the
+semantically meaningful value of that part of the parse subtree. The value of
+all whitespace tokens (no matter how many sub-tokens they may contain) is a
+single space, as per the RFC rules. This includes 'CFWS', which is herein
+included in the general class of whitespace tokens. There is one exception to
+the rule that whitespace tokens are collapsed into single spaces in values: in
+the value of a 'bare-quoted-string' (a quoted-string with no leading or
+trailing whitespace), any whitespace that appeared between the quotation marks
+is preserved in the returned value. Note that in all Terminal strings quoted
+pairs are turned into their unquoted values.
+
+All TokenList and Terminal objects also have a string value, which attempts to
+be a "canonical" representation of the RFC-compliant form of the substring that
+produced the parsed subtree, including minimal use of quoted pair quoting.
+Whitespace runs are not collapsed.
+
+Comment tokens also have a 'content' attribute providing the string found
+between the parens (including any nested comments) with whitespace preserved.
+
+All TokenList and Terminal objects have a 'defects' attribute which is a
+possibly empty list all of the defects found while creating the token. Defects
+may appear on any token in the tree, and a composite list of all defects in the
+subtree is available through the 'all_defects' attribute of any node. (For
+Terminal notes x.defects == x.all_defects.)
+
+Each object in a parse tree is called a 'token', and each has a 'token_type'
+attribute that gives the name from the RFC 5322 grammar that it represents.
+Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
+may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
+It is returned in place of lists of (ctext/quoted-pair) and
+(qtext/quoted-pair).
+
+XXX: provide complete list of token types.
+"""
+
+import re
+import urllib # For urllib.parse.unquote
+from string import hexdigits
+from collections import OrderedDict
+from operator import itemgetter
+from email import _encoded_words as _ew
+from email import errors
+from email import utils
+
+#
+# Useful constants and functions
+#
+
+WSP = set(' \t')
+CFWS_LEADER = WSP | set('(')
+SPECIALS = set(r'()<>@,:;.\"[]')
+ATOM_ENDS = SPECIALS | WSP
+DOT_ATOM_ENDS = ATOM_ENDS - set('.')
+# '.', '"', and '(' do not end phrases in order to support obs-phrase
+PHRASE_ENDS = SPECIALS - set('."(')
+TSPECIALS = (SPECIALS | set('/?=')) - set('.')
+TOKEN_ENDS = TSPECIALS | WSP
+ASPECIALS = TSPECIALS | set("*'%")
+ATTRIBUTE_ENDS = ASPECIALS | WSP
+EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
+
+def quote_string(value):
+ return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
+
+#
+# TokenList and its subclasses
+#
+
+class TokenList(list):
+
+ token_type = None
+ syntactic_break = True
+ ew_combine_allowed = True
+
+ def __init__(self, *args, **kw):
+ super().__init__(*args, **kw)
+ self.defects = []
+
+ def __str__(self):
+ return ''.join(str(x) for x in self)
+
+ def __repr__(self):
+ return '{}({})'.format(self.__class__.__name__,
+ super().__repr__())
+
+ @property
+ def value(self):
+ return ''.join(x.value for x in self if x.value)
+
+ @property
+ def all_defects(self):
+ return sum((x.all_defects for x in self), self.defects)
+
+ def startswith_fws(self):
+ return self[0].startswith_fws()
+
+ @property
+ def as_ew_allowed(self):
+ """True if all top level tokens of this part may be RFC2047 encoded."""
+ return all(part.as_ew_allowed for part in self)
+
+ @property
+ def comments(self):
+ comments = []
+ for token in self:
+ comments.extend(token.comments)
+ return comments
+
+ def fold(self, *, policy):
+ return _refold_parse_tree(self, policy=policy)
+
+ def pprint(self, indent=''):
+ print(self.ppstr(indent=indent))
+
+ def ppstr(self, indent=''):
+ return '\n'.join(self._pp(indent=indent))
+
+ def _pp(self, indent=''):
+ yield '{}{}/{}('.format(
+ indent,
+ self.__class__.__name__,
+ self.token_type)
+ for token in self:
+ if not hasattr(token, '_pp'):
+ yield (indent + ' !! invalid element in token '
+ 'list: {!r}'.format(token))
+ else:
+ yield from token._pp(indent+' ')
+ if self.defects:
+ extra = ' Defects: {}'.format(self.defects)
+ else:
+ extra = ''
+ yield '{}){}'.format(indent, extra)
+
+
+class WhiteSpaceTokenList(TokenList):
+
+ @property
+ def value(self):
+ return ' '
+
+ @property
+ def comments(self):
+ return [x.content for x in self if x.token_type=='comment']
+
+
+class UnstructuredTokenList(TokenList):
+
+ token_type = 'unstructured'
+
+
+class Phrase(TokenList):
+
+ token_type = 'phrase'
+
+class Word(TokenList):
+
+ token_type = 'word'
+
+
+class CFWSList(WhiteSpaceTokenList):
+
+ token_type = 'cfws'
+
+
+class Atom(TokenList):
+
+ token_type = 'atom'
+
+
+class Token(TokenList):
+
+ token_type = 'token'
+ encode_as_ew = False
+
+
+class EncodedWord(TokenList):
+
+ token_type = 'encoded-word'
+ cte = None
+ charset = None
+ lang = None
+
+
+class QuotedString(TokenList):
+
+ token_type = 'quoted-string'
+
+ @property
+ def content(self):
+ for x in self:
+ if x.token_type == 'bare-quoted-string':
+ return x.value
+
+ @property
+ def quoted_value(self):
+ res = []
+ for x in self:
+ if x.token_type == 'bare-quoted-string':
+ res.append(str(x))
+ else:
+ res.append(x.value)
+ return ''.join(res)
+
+ @property
+ def stripped_value(self):
+ for token in self:
+ if token.token_type == 'bare-quoted-string':
+ return token.value
+
+
+class BareQuotedString(QuotedString):
+
+ token_type = 'bare-quoted-string'
+
+ def __str__(self):
+ return quote_string(''.join(str(x) for x in self))
+
+ @property
+ def value(self):
+ return ''.join(str(x) for x in self)
+
+
+class Comment(WhiteSpaceTokenList):
+
+ token_type = 'comment'
+
+ def __str__(self):
+ return ''.join(sum([
+ ["("],
+ [self.quote(x) for x in self],
+ [")"],
+ ], []))
+
+ def quote(self, value):
+ if value.token_type == 'comment':
+ return str(value)
+ return str(value).replace('\\', '\\\\').replace(
+ '(', r'\(').replace(
+ ')', r'\)')
+
+ @property
+ def content(self):
+ return ''.join(str(x) for x in self)
+
+ @property
+ def comments(self):
+ return [self.content]
+
+class AddressList(TokenList):
+
+ token_type = 'address-list'
+
+ @property
+ def addresses(self):
+ return [x for x in self if x.token_type=='address']
+
+ @property
+ def mailboxes(self):
+ return sum((x.mailboxes
+ for x in self if x.token_type=='address'), [])
+
+ @property
+ def all_mailboxes(self):
+ return sum((x.all_mailboxes
+ for x in self if x.token_type=='address'), [])
+
+
+class Address(TokenList):
+
+ token_type = 'address'
+
+ @property
+ def display_name(self):
+ if self[0].token_type == 'group':
+ return self[0].display_name
+
+ @property
+ def mailboxes(self):
+ if self[0].token_type == 'mailbox':
+ return [self[0]]
+ elif self[0].token_type == 'invalid-mailbox':
+ return []
+ return self[0].mailboxes
+
+ @property
+ def all_mailboxes(self):
+ if self[0].token_type == 'mailbox':
+ return [self[0]]
+ elif self[0].token_type == 'invalid-mailbox':
+ return [self[0]]
+ return self[0].all_mailboxes
+
+class MailboxList(TokenList):
+
+ token_type = 'mailbox-list'
+
+ @property
+ def mailboxes(self):
+ return [x for x in self if x.token_type=='mailbox']
+
+ @property
+ def all_mailboxes(self):
+ return [x for x in self
+ if x.token_type in ('mailbox', 'invalid-mailbox')]
+
+
+class GroupList(TokenList):
+
+ token_type = 'group-list'
+
+ @property
+ def mailboxes(self):
+ if not self or self[0].token_type != 'mailbox-list':
+ return []
+ return self[0].mailboxes
+
+ @property
+ def all_mailboxes(self):
+ if not self or self[0].token_type != 'mailbox-list':
+ return []
+ return self[0].all_mailboxes
+
+
+class Group(TokenList):
+
+ token_type = "group"
+
+ @property
+ def mailboxes(self):
+ if self[2].token_type != 'group-list':
+ return []
+ return self[2].mailboxes
+
+ @property
+ def all_mailboxes(self):
+ if self[2].token_type != 'group-list':
+ return []
+ return self[2].all_mailboxes
+
+ @property
+ def display_name(self):
+ return self[0].display_name
+
+
+class NameAddr(TokenList):
+
+ token_type = 'name-addr'
+
+ @property
+ def display_name(self):
+ if len(self) == 1:
+ return None
+ return self[0].display_name
+
+ @property
+ def local_part(self):
+ return self[-1].local_part
+
+ @property
+ def domain(self):
+ return self[-1].domain
+
+ @property
+ def route(self):
+ return self[-1].route
+
+ @property
+ def addr_spec(self):
+ return self[-1].addr_spec
+
+
+class AngleAddr(TokenList):
+
+ token_type = 'angle-addr'
+
+ @property
+ def local_part(self):
+ for x in self:
+ if x.token_type == 'addr-spec':
+ return x.local_part
+
+ @property
+ def domain(self):
+ for x in self:
+ if x.token_type == 'addr-spec':
+ return x.domain
+
+ @property
+ def route(self):
+ for x in self:
+ if x.token_type == 'obs-route':
+ return x.domains
+
+ @property
+ def addr_spec(self):
+ for x in self:
+ if x.token_type == 'addr-spec':
+ if x.local_part:
+ return x.addr_spec
+ else:
+ return quote_string(x.local_part) + x.addr_spec
+ else:
+ return '<>'
+
+
+class ObsRoute(TokenList):
+
+ token_type = 'obs-route'
+
+ @property
+ def domains(self):
+ return [x.domain for x in self if x.token_type == 'domain']
+
+
+class Mailbox(TokenList):
+
+ token_type = 'mailbox'
+
+ @property
+ def display_name(self):
+ if self[0].token_type == 'name-addr':
+ return self[0].display_name
+
+ @property
+ def local_part(self):
+ return self[0].local_part
+
+ @property
+ def domain(self):
+ return self[0].domain
+
+ @property
+ def route(self):
+ if self[0].token_type == 'name-addr':
+ return self[0].route
+
+ @property
+ def addr_spec(self):
+ return self[0].addr_spec
+
+
+class InvalidMailbox(TokenList):
+
+ token_type = 'invalid-mailbox'
+
+ @property
+ def display_name(self):
+ return None
+
+ local_part = domain = route = addr_spec = display_name
+
+
+class Domain(TokenList):
+
+ token_type = 'domain'
+ as_ew_allowed = False
+
+ @property
+ def domain(self):
+ return ''.join(super().value.split())
+
+
+class DotAtom(TokenList):
+
+ token_type = 'dot-atom'
+
+
+class DotAtomText(TokenList):
+
+ token_type = 'dot-atom-text'
+ as_ew_allowed = True
+
+
+class AddrSpec(TokenList):
+
+ token_type = 'addr-spec'
+ as_ew_allowed = False
+
+ @property
+ def local_part(self):
+ return self[0].local_part
+
+ @property
+ def domain(self):
+ if len(self) < 3:
+ return None
+ return self[-1].domain
+
+ @property
+ def value(self):
+ if len(self) < 3:
+ return self[0].value
+ return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
+
+ @property
+ def addr_spec(self):
+ nameset = set(self.local_part)
+ if len(nameset) > len(nameset-DOT_ATOM_ENDS):
+ lp = quote_string(self.local_part)
+ else:
+ lp = self.local_part
+ if self.domain is not None:
+ return lp + '@' + self.domain
+ return lp
+
+
+class ObsLocalPart(TokenList):
+
+ token_type = 'obs-local-part'
+ as_ew_allowed = False
+
+
+class DisplayName(Phrase):
+
+ token_type = 'display-name'
+ ew_combine_allowed = False
+
+ @property
+ def display_name(self):
+ res = TokenList(self)
+ if res[0].token_type == 'cfws':
+ res.pop(0)
+ else:
+ if res[0][0].token_type == 'cfws':
+ res[0] = TokenList(res[0][1:])
+ if res[-1].token_type == 'cfws':
+ res.pop()
+ else:
+ if res[-1][-1].token_type == 'cfws':
+ res[-1] = TokenList(res[-1][:-1])
+ return res.value
+
+ @property
+ def value(self):
+ quote = False
+ if self.defects:
+ quote = True
+ else:
+ for x in self:
+ if x.token_type == 'quoted-string':
+ quote = True
+ if quote:
+ pre = post = ''
+ if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
+ pre = ' '
+ if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
+ post = ' '
+ return pre+quote_string(self.display_name)+post
+ else:
+ return super().value
+
+
+class LocalPart(TokenList):
+
+ token_type = 'local-part'
+ as_ew_allowed = False
+
+ @property
+ def value(self):
+ if self[0].token_type == "quoted-string":
+ return self[0].quoted_value
+ else:
+ return self[0].value
+
+ @property
+ def local_part(self):
+ # Strip whitespace from front, back, and around dots.
+ res = [DOT]
+ last = DOT
+ last_is_tl = False
+ for tok in self[0] + [DOT]:
+ if tok.token_type == 'cfws':
+ continue
+ if (last_is_tl and tok.token_type == 'dot' and
+ last[-1].token_type == 'cfws'):
+ res[-1] = TokenList(last[:-1])
+ is_tl = isinstance(tok, TokenList)
+ if (is_tl and last.token_type == 'dot' and
+ tok[0].token_type == 'cfws'):
+ res.append(TokenList(tok[1:]))
+ else:
+ res.append(tok)
+ last = res[-1]
+ last_is_tl = is_tl
+ res = TokenList(res[1:-1])
+ return res.value
+
+
+class DomainLiteral(TokenList):
+
+ token_type = 'domain-literal'
+ as_ew_allowed = False
+
+ @property
+ def domain(self):
+ return ''.join(super().value.split())
+
+ @property
+ def ip(self):
+ for x in self:
+ if x.token_type == 'ptext':
+ return x.value
+
+
+class MIMEVersion(TokenList):
+
+ token_type = 'mime-version'
+ major = None
+ minor = None
+
+
+class Parameter(TokenList):
+
+ token_type = 'parameter'
+ sectioned = False
+ extended = False
+ charset = 'us-ascii'
+
+ @property
+ def section_number(self):
+ # Because the first token, the attribute (name) eats CFWS, the second
+ # token is always the section if there is one.
+ return self[1].number if self.sectioned else 0
+
+ @property
+ def param_value(self):
+ # This is part of the "handle quoted extended parameters" hack.
+ for token in self:
+ if token.token_type == 'value':
+ return token.stripped_value
+ if token.token_type == 'quoted-string':
+ for token in token:
+ if token.token_type == 'bare-quoted-string':
+ for token in token:
+ if token.token_type == 'value':
+ return token.stripped_value
+ return ''
+
+
+class InvalidParameter(Parameter):
+
+ token_type = 'invalid-parameter'
+
+
+class Attribute(TokenList):
+
+ token_type = 'attribute'
+
+ @property
+ def stripped_value(self):
+ for token in self:
+ if token.token_type.endswith('attrtext'):
+ return token.value
+
+class Section(TokenList):
+
+ token_type = 'section'
+ number = None
+
+
+class Value(TokenList):
+
+ token_type = 'value'
+
+ @property
+ def stripped_value(self):
+ token = self[0]
+ if token.token_type == 'cfws':
+ token = self[1]
+ if token.token_type.endswith(
+ ('quoted-string', 'attribute', 'extended-attribute')):
+ return token.stripped_value
+ return self.value
+
+
+class MimeParameters(TokenList):
+
+ token_type = 'mime-parameters'
+ syntactic_break = False
+
+ @property
+ def params(self):
+ # The RFC specifically states that the ordering of parameters is not
+ # guaranteed and may be reordered by the transport layer. So we have
+ # to assume the RFC 2231 pieces can come in any order. However, we
+ # output them in the order that we first see a given name, which gives
+ # us a stable __str__.
+ params = OrderedDict()
+ for token in self:
+ if not token.token_type.endswith('parameter'):
+ continue
+ if token[0].token_type != 'attribute':
+ continue
+ name = token[0].value.strip()
+ if name not in params:
+ params[name] = []
+ params[name].append((token.section_number, token))
+ for name, parts in params.items():
+ parts = sorted(parts, key=itemgetter(0))
+ first_param = parts[0][1]
+ charset = first_param.charset
+ # Our arbitrary error recovery is to ignore duplicate parameters,
+ # to use appearance order if there are duplicate rfc 2231 parts,
+ # and to ignore gaps. This mimics the error recovery of get_param.
+ if not first_param.extended and len(parts) > 1:
+ if parts[1][0] == 0:
+ parts[1][1].defects.append(errors.InvalidHeaderDefect(
+ 'duplicate parameter name; duplicate(s) ignored'))
+ parts = parts[:1]
+ # Else assume the *0* was missing...note that this is different
+ # from get_param, but we registered a defect for this earlier.
+ value_parts = []
+ i = 0
+ for section_number, param in parts:
+ if section_number != i:
+ # We could get fancier here and look for a complete
+ # duplicate extended parameter and ignore the second one
+ # seen. But we're not doing that. The old code didn't.
+ if not param.extended:
+ param.defects.append(errors.InvalidHeaderDefect(
+ 'duplicate parameter name; duplicate ignored'))
+ continue
+ else:
+ param.defects.append(errors.InvalidHeaderDefect(
+ "inconsistent RFC2231 parameter numbering"))
+ i += 1
+ value = param.param_value
+ if param.extended:
+ try:
+ value = urllib.parse.unquote_to_bytes(value)
+ except UnicodeEncodeError:
+ # source had surrogate escaped bytes. What we do now
+ # is a bit of an open question. I'm not sure this is
+ # the best choice, but it is what the old algorithm did
+ value = urllib.parse.unquote(value, encoding='latin-1')
+ else:
+ try:
+ value = value.decode(charset, 'surrogateescape')
+ except LookupError:
+ # XXX: there should really be a custom defect for
+ # unknown character set to make it easy to find,
+ # because otherwise unknown charset is a silent
+ # failure.
+ value = value.decode('us-ascii', 'surrogateescape')
+ if utils._has_surrogates(value):
+ param.defects.append(errors.UndecodableBytesDefect())
+ value_parts.append(value)
+ value = ''.join(value_parts)
+ yield name, value
+
+ def __str__(self):
+ params = []
+ for name, value in self.params:
+ if value:
+ params.append('{}={}'.format(name, quote_string(value)))
+ else:
+ params.append(name)
+ params = '; '.join(params)
+ return ' ' + params if params else ''
+
+
+class ParameterizedHeaderValue(TokenList):
+
+ # Set this false so that the value doesn't wind up on a new line even
+ # if it and the parameters would fit there but not on the first line.
+ syntactic_break = False
+
+ @property
+ def params(self):
+ for token in reversed(self):
+ if token.token_type == 'mime-parameters':
+ return token.params
+ return {}
+
+
+class ContentType(ParameterizedHeaderValue):
+
+ token_type = 'content-type'
+ as_ew_allowed = False
+ maintype = 'text'
+ subtype = 'plain'
+
+
+class ContentDisposition(ParameterizedHeaderValue):
+
+ token_type = 'content-disposition'
+ as_ew_allowed = False
+ content_disposition = None
+
+
+class ContentTransferEncoding(TokenList):
+
+ token_type = 'content-transfer-encoding'
+ as_ew_allowed = False
+ cte = '7bit'
+
+
+class HeaderLabel(TokenList):
+
+ token_type = 'header-label'
+ as_ew_allowed = False
+
+
+class Header(TokenList):
+
+ token_type = 'header'
+
+
+#
+# Terminal classes and instances
+#
+
+class Terminal(str):
+
+ as_ew_allowed = True
+ ew_combine_allowed = True
+ syntactic_break = True
+
+ def __new__(cls, value, token_type):
+ self = super().__new__(cls, value)
+ self.token_type = token_type
+ self.defects = []
+ return self
+
+ def __repr__(self):
+ return "{}({})".format(self.__class__.__name__, super().__repr__())
+
+ def pprint(self):
+ print(self.__class__.__name__ + '/' + self.token_type)
+
+ @property
+ def all_defects(self):
+ return list(self.defects)
+
+ def _pp(self, indent=''):
+ return ["{}{}/{}({}){}".format(
+ indent,
+ self.__class__.__name__,
+ self.token_type,
+ super().__repr__(),
+ '' if not self.defects else ' {}'.format(self.defects),
+ )]
+
+ def pop_trailing_ws(self):
+ # This terminates the recursion.
+ return None
+
+ @property
+ def comments(self):
+ return []
+
+ def __getnewargs__(self):
+ return(str(self), self.token_type)
+
+
+class WhiteSpaceTerminal(Terminal):
+
+ @property
+ def value(self):
+ return ' '
+
+ def startswith_fws(self):
+ return True
+
+
+class ValueTerminal(Terminal):
+
+ @property
+ def value(self):
+ return self
+
+ def startswith_fws(self):
+ return False
+
+
+class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
+
+ @property
+ def value(self):
+ return ''
+
+ def __str__(self):
+ return ''
+
+
+# XXX these need to become classes and used as instances so
+# that a program can't change them in a parse tree and screw
+# up other parse trees. Maybe should have tests for that, too.
+DOT = ValueTerminal('.', 'dot')
+ListSeparator = ValueTerminal(',', 'list-separator')
+RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
+
+#
+# Parser
+#
+
+# Parse strings according to RFC822/2047/2822/5322 rules.
+#
+# This is a stateless parser. Each get_XXX function accepts a string and
+# returns either a Terminal or a TokenList representing the RFC object named
+# by the method and a string containing the remaining unparsed characters
+# from the input. Thus a parser method consumes the next syntactic construct
+# of a given type and returns a token representing the construct plus the
+# unparsed remainder of the input string.
+#
+# For example, if the first element of a structured header is a 'phrase',
+# then:
+#
+# phrase, value = get_phrase(value)
+#
+# returns the complete phrase from the start of the string value, plus any
+# characters left in the string after the phrase is removed.
+
+_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
+_non_atom_end_matcher = re.compile(r"[^{}]+".format(
+ re.escape(''.join(ATOM_ENDS)))).match
+_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
+_non_token_end_matcher = re.compile(r"[^{}]+".format(
+ re.escape(''.join(TOKEN_ENDS)))).match
+_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
+ re.escape(''.join(ATTRIBUTE_ENDS)))).match
+_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
+ re.escape(''.join(EXTENDED_ATTRIBUTE_ENDS)))).match
+
+def _validate_xtext(xtext):
+ """If input token contains ASCII non-printables, register a defect."""
+
+ non_printables = _non_printable_finder(xtext)
+ if non_printables:
+ xtext.defects.append(errors.NonPrintableDefect(non_printables))
+ if utils._has_surrogates(xtext):
+ xtext.defects.append(errors.UndecodableBytesDefect(
+ "Non-ASCII characters found in header token"))
+
+def _get_ptext_to_endchars(value, endchars):
+ """Scan printables/quoted-pairs until endchars and return unquoted ptext.
+
+ This function turns a run of qcontent, ccontent-without-comments, or
+ dtext-with-quoted-printables into a single string by unquoting any
+ quoted printables. It returns the string, the remaining value, and
+ a flag that is True iff there were any quoted printables decoded.
+
+ """
+ fragment, *remainder = _wsp_splitter(value, 1)
+ vchars = []
+ escape = False
+ had_qp = False
+ for pos in range(len(fragment)):
+ if fragment[pos] == '\\':
+ if escape:
+ escape = False
+ had_qp = True
+ else:
+ escape = True
+ continue
+ if escape:
+ escape = False
+ elif fragment[pos] in endchars:
+ break
+ vchars.append(fragment[pos])
+ else:
+ pos = pos + 1
+ return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
+
+def get_fws(value):
+ """FWS = 1*WSP
+
+ This isn't the RFC definition. We're using fws to represent tokens where
+ folding can be done, but when we are parsing the *un*folding has already
+ been done so we don't need to watch out for CRLF.
+
+ """
+ newvalue = value.lstrip()
+ fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
+ return fws, newvalue
+
+def get_encoded_word(value):
+ """ encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
+
+ """
+ ew = EncodedWord()
+ if not value.startswith('=?'):
+ raise errors.HeaderParseError(
+ "expected encoded word but found {}".format(value))
+ tok, *remainder = value[2:].split('?=', 1)
+ if tok == value[2:]:
+ raise errors.HeaderParseError(
+ "expected encoded word but found {}".format(value))
+ remstr = ''.join(remainder)
+ if len(remstr) > 1 and remstr[0] in hexdigits and remstr[1] in hexdigits:
+ # The ? after the CTE was followed by an encoded word escape (=XX).
+ rest, *remainder = remstr.split('?=', 1)
+ tok = tok + '?=' + rest
+ if len(tok.split()) > 1:
+ ew.defects.append(errors.InvalidHeaderDefect(
+ "whitespace inside encoded word"))
+ ew.cte = value
+ value = ''.join(remainder)
+ try:
+ text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
+ except ValueError:
+ raise errors.HeaderParseError(
+ "encoded word format invalid: '{}'".format(ew.cte))
+ ew.charset = charset
+ ew.lang = lang
+ ew.defects.extend(defects)
+ while text:
+ if text[0] in WSP:
+ token, text = get_fws(text)
+ ew.append(token)
+ continue
+ chars, *remainder = _wsp_splitter(text, 1)
+ vtext = ValueTerminal(chars, 'vtext')
+ _validate_xtext(vtext)
+ ew.append(vtext)
+ text = ''.join(remainder)
+ return ew, value
+
+def get_unstructured(value):
+ """unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
+ obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
+ obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
+
+ obs-NO-WS-CTL is control characters except WSP/CR/LF.
+
+ So, basically, we have printable runs, plus control characters or nulls in
+ the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
+ obsolete syntax in its specification, but requires whitespace on either
+ side of the encoded words, I can see no reason to need to separate the
+ non-printable-non-whitespace from the printable runs if they occur, so we
+ parse this into xtext tokens separated by WSP tokens.
+
+ Because an 'unstructured' value must by definition constitute the entire
+ value, this 'get' routine does not return a remaining value, only the
+ parsed TokenList.
+
+ """
+ # XXX: but what about bare CR and LF? They might signal the start or
+ # end of an encoded word. YAGNI for now, since our current parsers
+ # will never send us strings with bare CR or LF.
+
+ unstructured = UnstructuredTokenList()
+ while value:
+ if value[0] in WSP:
+ token, value = get_fws(value)
+ unstructured.append(token)
+ continue
+ if value.startswith('=?'):
+ try:
+ token, value = get_encoded_word(value)
+ except errors.HeaderParseError:
+ # XXX: Need to figure out how to register defects when
+ # appropriate here.
+ pass
+ else:
+ have_ws = True
+ if len(unstructured) > 0:
+ if unstructured[-1].token_type != 'fws':
+ unstructured.defects.append(errors.InvalidHeaderDefect(
+ "missing whitespace before encoded word"))
+ have_ws = False
+ if have_ws and len(unstructured) > 1:
+ if unstructured[-2].token_type == 'encoded-word':
+ unstructured[-1] = EWWhiteSpaceTerminal(
+ unstructured[-1], 'fws')
+ unstructured.append(token)
+ continue
+ tok, *remainder = _wsp_splitter(value, 1)
+ vtext = ValueTerminal(tok, 'vtext')
+ _validate_xtext(vtext)
+ unstructured.append(vtext)
+ value = ''.join(remainder)
+ return unstructured
+
+def get_qp_ctext(value):
+ r"""ctext =
+
+ This is not the RFC ctext, since we are handling nested comments in comment
+ and unquoting quoted-pairs here. We allow anything except the '()'
+ characters, but if we find any ASCII other than the RFC defined printable
+ ASCII, a NonPrintableDefect is added to the token's defects list. Since
+ quoted pairs are converted to their unquoted values, what is returned is
+ a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
+ is ' '.
+
+ """
+ ptext, value, _ = _get_ptext_to_endchars(value, '()')
+ ptext = WhiteSpaceTerminal(ptext, 'ptext')
+ _validate_xtext(ptext)
+ return ptext, value
+
+def get_qcontent(value):
+ """qcontent = qtext / quoted-pair
+
+ We allow anything except the DQUOTE character, but if we find any ASCII
+ other than the RFC defined printable ASCII, a NonPrintableDefect is
+ added to the token's defects list. Any quoted pairs are converted to their
+ unquoted values, so what is returned is a 'ptext' token. In this case it
+ is a ValueTerminal.
+
+ """
+ ptext, value, _ = _get_ptext_to_endchars(value, '"')
+ ptext = ValueTerminal(ptext, 'ptext')
+ _validate_xtext(ptext)
+ return ptext, value
+
+def get_atext(value):
+ """atext =
+
+ We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
+ the token's defects list if we find non-atext characters.
+ """
+ m = _non_atom_end_matcher(value)
+ if not m:
+ raise errors.HeaderParseError(
+ "expected atext but found '{}'".format(value))
+ atext = m.group()
+ value = value[len(atext):]
+ atext = ValueTerminal(atext, 'atext')
+ _validate_xtext(atext)
+ return atext, value
+
+def get_bare_quoted_string(value):
+ """bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
+
+ A quoted-string without the leading or trailing white space. Its
+ value is the text between the quote marks, with whitespace
+ preserved and quoted pairs decoded.
+ """
+ if value[0] != '"':
+ raise errors.HeaderParseError(
+ "expected '\"' but found '{}'".format(value))
+ bare_quoted_string = BareQuotedString()
+ value = value[1:]
+ if value[0] == '"':
+ token, value = get_qcontent(value)
+ bare_quoted_string.append(token)
+ while value and value[0] != '"':
+ if value[0] in WSP:
+ token, value = get_fws(value)
+ elif value[:2] == '=?':
+ try:
+ token, value = get_encoded_word(value)
+ bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
+ "encoded word inside quoted string"))
+ except errors.HeaderParseError:
+ token, value = get_qcontent(value)
+ else:
+ token, value = get_qcontent(value)
+ bare_quoted_string.append(token)
+ if not value:
+ bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
+ "end of header inside quoted string"))
+ return bare_quoted_string, value
+ return bare_quoted_string, value[1:]
+
+def get_comment(value):
+ """comment = "(" *([FWS] ccontent) [FWS] ")"
+ ccontent = ctext / quoted-pair / comment
+
+ We handle nested comments here, and quoted-pair in our qp-ctext routine.
+ """
+ if value and value[0] != '(':
+ raise errors.HeaderParseError(
+ "expected '(' but found '{}'".format(value))
+ comment = Comment()
+ value = value[1:]
+ while value and value[0] != ")":
+ if value[0] in WSP:
+ token, value = get_fws(value)
+ elif value[0] == '(':
+ token, value = get_comment(value)
+ else:
+ token, value = get_qp_ctext(value)
+ comment.append(token)
+ if not value:
+ comment.defects.append(errors.InvalidHeaderDefect(
+ "end of header inside comment"))
+ return comment, value
+ return comment, value[1:]
+
+def get_cfws(value):
+ """CFWS = (1*([FWS] comment) [FWS]) / FWS
+
+ """
+ cfws = CFWSList()
+ while value and value[0] in CFWS_LEADER:
+ if value[0] in WSP:
+ token, value = get_fws(value)
+ else:
+ token, value = get_comment(value)
+ cfws.append(token)
+ return cfws, value
+
+def get_quoted_string(value):
+ """quoted-string = [CFWS] [CFWS]
+
+ 'bare-quoted-string' is an intermediate class defined by this
+ parser and not by the RFC grammar. It is the quoted string
+ without any attached CFWS.
+ """
+ quoted_string = QuotedString()
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ quoted_string.append(token)
+ token, value = get_bare_quoted_string(value)
+ quoted_string.append(token)
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ quoted_string.append(token)
+ return quoted_string, value
+
+def get_atom(value):
+ """atom = [CFWS] 1*atext [CFWS]
+
+ An atom could be an rfc2047 encoded word.
+ """
+ atom = Atom()
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ atom.append(token)
+ if value and value[0] in ATOM_ENDS:
+ raise errors.HeaderParseError(
+ "expected atom but found '{}'".format(value))
+ if value.startswith('=?'):
+ try:
+ token, value = get_encoded_word(value)
+ except errors.HeaderParseError:
+ # XXX: need to figure out how to register defects when
+ # appropriate here.
+ token, value = get_atext(value)
+ else:
+ token, value = get_atext(value)
+ atom.append(token)
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ atom.append(token)
+ return atom, value
+
+def get_dot_atom_text(value):
+ """ dot-text = 1*atext *("." 1*atext)
+
+ """
+ dot_atom_text = DotAtomText()
+ if not value or value[0] in ATOM_ENDS:
+ raise errors.HeaderParseError("expected atom at a start of "
+ "dot-atom-text but found '{}'".format(value))
+ while value and value[0] not in ATOM_ENDS:
+ token, value = get_atext(value)
+ dot_atom_text.append(token)
+ if value and value[0] == '.':
+ dot_atom_text.append(DOT)
+ value = value[1:]
+ if dot_atom_text[-1] is DOT:
+ raise errors.HeaderParseError("expected atom at end of dot-atom-text "
+ "but found '{}'".format('.'+value))
+ return dot_atom_text, value
+
+def get_dot_atom(value):
+ """ dot-atom = [CFWS] dot-atom-text [CFWS]
+
+ Any place we can have a dot atom, we could instead have an rfc2047 encoded
+ word.
+ """
+ dot_atom = DotAtom()
+ if value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ dot_atom.append(token)
+ if value.startswith('=?'):
+ try:
+ token, value = get_encoded_word(value)
+ except errors.HeaderParseError:
+ # XXX: need to figure out how to register defects when
+ # appropriate here.
+ token, value = get_dot_atom_text(value)
+ else:
+ token, value = get_dot_atom_text(value)
+ dot_atom.append(token)
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ dot_atom.append(token)
+ return dot_atom, value
+
+def get_word(value):
+ """word = atom / quoted-string
+
+ Either atom or quoted-string may start with CFWS. We have to peel off this
+ CFWS first to determine which type of word to parse. Afterward we splice
+ the leading CFWS, if any, into the parsed sub-token.
+
+ If neither an atom or a quoted-string is found before the next special, a
+ HeaderParseError is raised.
+
+ The token returned is either an Atom or a QuotedString, as appropriate.
+ This means the 'word' level of the formal grammar is not represented in the
+ parse tree; this is because having that extra layer when manipulating the
+ parse tree is more confusing than it is helpful.
+
+ """
+ if value[0] in CFWS_LEADER:
+ leader, value = get_cfws(value)
+ else:
+ leader = None
+ if value[0]=='"':
+ token, value = get_quoted_string(value)
+ elif value[0] in SPECIALS:
+ raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
+ "but found '{}'".format(value))
+ else:
+ token, value = get_atom(value)
+ if leader is not None:
+ token[:0] = [leader]
+ return token, value
+
+def get_phrase(value):
+ """ phrase = 1*word / obs-phrase
+ obs-phrase = word *(word / "." / CFWS)
+
+ This means a phrase can be a sequence of words, periods, and CFWS in any
+ order as long as it starts with at least one word. If anything other than
+ words is detected, an ObsoleteHeaderDefect is added to the token's defect
+ list. We also accept a phrase that starts with CFWS followed by a dot;
+ this is registered as an InvalidHeaderDefect, since it is not supported by
+ even the obsolete grammar.
+
+ """
+ phrase = Phrase()
+ try:
+ token, value = get_word(value)
+ phrase.append(token)
+ except errors.HeaderParseError:
+ phrase.defects.append(errors.InvalidHeaderDefect(
+ "phrase does not start with word"))
+ while value and value[0] not in PHRASE_ENDS:
+ if value[0]=='.':
+ phrase.append(DOT)
+ phrase.defects.append(errors.ObsoleteHeaderDefect(
+ "period in 'phrase'"))
+ value = value[1:]
+ else:
+ try:
+ token, value = get_word(value)
+ except errors.HeaderParseError:
+ if value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ phrase.defects.append(errors.ObsoleteHeaderDefect(
+ "comment found without atom"))
+ else:
+ raise
+ phrase.append(token)
+ return phrase, value
+
+def get_local_part(value):
+ """ local-part = dot-atom / quoted-string / obs-local-part
+
+ """
+ local_part = LocalPart()
+ leader = None
+ if value[0] in CFWS_LEADER:
+ leader, value = get_cfws(value)
+ if not value:
+ raise errors.HeaderParseError(
+ "expected local-part but found '{}'".format(value))
+ try:
+ token, value = get_dot_atom(value)
+ except errors.HeaderParseError:
+ try:
+ token, value = get_word(value)
+ except errors.HeaderParseError:
+ if value[0] != '\\' and value[0] in PHRASE_ENDS:
+ raise
+ token = TokenList()
+ if leader is not None:
+ token[:0] = [leader]
+ local_part.append(token)
+ if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
+ obs_local_part, value = get_obs_local_part(str(local_part) + value)
+ if obs_local_part.token_type == 'invalid-obs-local-part':
+ local_part.defects.append(errors.InvalidHeaderDefect(
+ "local-part is not dot-atom, quoted-string, or obs-local-part"))
+ else:
+ local_part.defects.append(errors.ObsoleteHeaderDefect(
+ "local-part is not a dot-atom (contains CFWS)"))
+ local_part[0] = obs_local_part
+ try:
+ local_part.value.encode('ascii')
+ except UnicodeEncodeError:
+ local_part.defects.append(errors.NonASCIILocalPartDefect(
+ "local-part contains non-ASCII characters)"))
+ return local_part, value
+
+def get_obs_local_part(value):
+ """ obs-local-part = word *("." word)
+ """
+ obs_local_part = ObsLocalPart()
+ last_non_ws_was_dot = False
+ while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
+ if value[0] == '.':
+ if last_non_ws_was_dot:
+ obs_local_part.defects.append(errors.InvalidHeaderDefect(
+ "invalid repeated '.'"))
+ obs_local_part.append(DOT)
+ last_non_ws_was_dot = True
+ value = value[1:]
+ continue
+ elif value[0]=='\\':
+ obs_local_part.append(ValueTerminal(value[0],
+ 'misplaced-special'))
+ value = value[1:]
+ obs_local_part.defects.append(errors.InvalidHeaderDefect(
+ "'\\' character outside of quoted-string/ccontent"))
+ last_non_ws_was_dot = False
+ continue
+ if obs_local_part and obs_local_part[-1].token_type != 'dot':
+ obs_local_part.defects.append(errors.InvalidHeaderDefect(
+ "missing '.' between words"))
+ try:
+ token, value = get_word(value)
+ last_non_ws_was_dot = False
+ except errors.HeaderParseError:
+ if value[0] not in CFWS_LEADER:
+ raise
+ token, value = get_cfws(value)
+ obs_local_part.append(token)
+ if (obs_local_part[0].token_type == 'dot' or
+ obs_local_part[0].token_type=='cfws' and
+ obs_local_part[1].token_type=='dot'):
+ obs_local_part.defects.append(errors.InvalidHeaderDefect(
+ "Invalid leading '.' in local part"))
+ if (obs_local_part[-1].token_type == 'dot' or
+ obs_local_part[-1].token_type=='cfws' and
+ obs_local_part[-2].token_type=='dot'):
+ obs_local_part.defects.append(errors.InvalidHeaderDefect(
+ "Invalid trailing '.' in local part"))
+ if obs_local_part.defects:
+ obs_local_part.token_type = 'invalid-obs-local-part'
+ return obs_local_part, value
+
+def get_dtext(value):
+ r""" dtext = / obs-dtext
+ obs-dtext = obs-NO-WS-CTL / quoted-pair
+
+ We allow anything except the excluded characters, but if we find any
+ ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is
+ added to the token's defects list. Quoted pairs are converted to their
+ unquoted values, so what is returned is a ptext token, in this case a
+ ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
+ added to the returned token's defect list.
+
+ """
+ ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
+ ptext = ValueTerminal(ptext, 'ptext')
+ if had_qp:
+ ptext.defects.append(errors.ObsoleteHeaderDefect(
+ "quoted printable found in domain-literal"))
+ _validate_xtext(ptext)
+ return ptext, value
+
+def _check_for_early_dl_end(value, domain_literal):
+ if value:
+ return False
+ domain_literal.append(errors.InvalidHeaderDefect(
+ "end of input inside domain-literal"))
+ domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
+ return True
+
+def get_domain_literal(value):
+ """ domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
+
+ """
+ domain_literal = DomainLiteral()
+ if value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ domain_literal.append(token)
+ if not value:
+ raise errors.HeaderParseError("expected domain-literal")
+ if value[0] != '[':
+ raise errors.HeaderParseError("expected '[' at start of domain-literal "
+ "but found '{}'".format(value))
+ value = value[1:]
+ if _check_for_early_dl_end(value, domain_literal):
+ return domain_literal, value
+ domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
+ if value[0] in WSP:
+ token, value = get_fws(value)
+ domain_literal.append(token)
+ token, value = get_dtext(value)
+ domain_literal.append(token)
+ if _check_for_early_dl_end(value, domain_literal):
+ return domain_literal, value
+ if value[0] in WSP:
+ token, value = get_fws(value)
+ domain_literal.append(token)
+ if _check_for_early_dl_end(value, domain_literal):
+ return domain_literal, value
+ if value[0] != ']':
+ raise errors.HeaderParseError("expected ']' at end of domain-literal "
+ "but found '{}'".format(value))
+ domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
+ value = value[1:]
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ domain_literal.append(token)
+ return domain_literal, value
+
+def get_domain(value):
+ """ domain = dot-atom / domain-literal / obs-domain
+ obs-domain = atom *("." atom))
+
+ """
+ domain = Domain()
+ leader = None
+ if value[0] in CFWS_LEADER:
+ leader, value = get_cfws(value)
+ if not value:
+ raise errors.HeaderParseError(
+ "expected domain but found '{}'".format(value))
+ if value[0] == '[':
+ token, value = get_domain_literal(value)
+ if leader is not None:
+ token[:0] = [leader]
+ domain.append(token)
+ return domain, value
+ try:
+ token, value = get_dot_atom(value)
+ except errors.HeaderParseError:
+ token, value = get_atom(value)
+ if leader is not None:
+ token[:0] = [leader]
+ domain.append(token)
+ if value and value[0] == '.':
+ domain.defects.append(errors.ObsoleteHeaderDefect(
+ "domain is not a dot-atom (contains CFWS)"))
+ if domain[0].token_type == 'dot-atom':
+ domain[:] = domain[0]
+ while value and value[0] == '.':
+ domain.append(DOT)
+ token, value = get_atom(value[1:])
+ domain.append(token)
+ return domain, value
+
+def get_addr_spec(value):
+ """ addr-spec = local-part "@" domain
+
+ """
+ addr_spec = AddrSpec()
+ token, value = get_local_part(value)
+ addr_spec.append(token)
+ if not value or value[0] != '@':
+ addr_spec.defects.append(errors.InvalidHeaderDefect(
+ "add-spec local part with no domain"))
+ return addr_spec, value
+ addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
+ token, value = get_domain(value[1:])
+ addr_spec.append(token)
+ return addr_spec, value
+
+def get_obs_route(value):
+ """ obs-route = obs-domain-list ":"
+ obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
+
+ Returns an obs-route token with the appropriate sub-tokens (that is,
+ there is no obs-domain-list in the parse tree).
+ """
+ obs_route = ObsRoute()
+ while value and (value[0]==',' or value[0] in CFWS_LEADER):
+ if value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ obs_route.append(token)
+ elif value[0] == ',':
+ obs_route.append(ListSeparator)
+ value = value[1:]
+ if not value or value[0] != '@':
+ raise errors.HeaderParseError(
+ "expected obs-route domain but found '{}'".format(value))
+ obs_route.append(RouteComponentMarker)
+ token, value = get_domain(value[1:])
+ obs_route.append(token)
+ while value and value[0]==',':
+ obs_route.append(ListSeparator)
+ value = value[1:]
+ if not value:
+ break
+ if value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ obs_route.append(token)
+ if value[0] == '@':
+ obs_route.append(RouteComponentMarker)
+ token, value = get_domain(value[1:])
+ obs_route.append(token)
+ if not value:
+ raise errors.HeaderParseError("end of header while parsing obs-route")
+ if value[0] != ':':
+ raise errors.HeaderParseError( "expected ':' marking end of "
+ "obs-route but found '{}'".format(value))
+ obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
+ return obs_route, value[1:]
+
+def get_angle_addr(value):
+ """ angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
+ obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
+
+ """
+ angle_addr = AngleAddr()
+ if value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ angle_addr.append(token)
+ if not value or value[0] != '<':
+ raise errors.HeaderParseError(
+ "expected angle-addr but found '{}'".format(value))
+ angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
+ value = value[1:]
+ # Although it is not legal per RFC5322, SMTP uses '<>' in certain
+ # circumstances.
+ if value[0] == '>':
+ angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
+ angle_addr.defects.append(errors.InvalidHeaderDefect(
+ "null addr-spec in angle-addr"))
+ value = value[1:]
+ return angle_addr, value
+ try:
+ token, value = get_addr_spec(value)
+ except errors.HeaderParseError:
+ try:
+ token, value = get_obs_route(value)
+ angle_addr.defects.append(errors.ObsoleteHeaderDefect(
+ "obsolete route specification in angle-addr"))
+ except errors.HeaderParseError:
+ raise errors.HeaderParseError(
+ "expected addr-spec or obs-route but found '{}'".format(value))
+ angle_addr.append(token)
+ token, value = get_addr_spec(value)
+ angle_addr.append(token)
+ if value and value[0] == '>':
+ value = value[1:]
+ else:
+ angle_addr.defects.append(errors.InvalidHeaderDefect(
+ "missing trailing '>' on angle-addr"))
+ angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ angle_addr.append(token)
+ return angle_addr, value
+
+def get_display_name(value):
+ """ display-name = phrase
+
+ Because this is simply a name-rule, we don't return a display-name
+ token containing a phrase, but rather a display-name token with
+ the content of the phrase.
+
+ """
+ display_name = DisplayName()
+ token, value = get_phrase(value)
+ display_name.extend(token[:])
+ display_name.defects = token.defects[:]
+ return display_name, value
+
+
+def get_name_addr(value):
+ """ name-addr = [display-name] angle-addr
+
+ """
+ name_addr = NameAddr()
+ # Both the optional display name and the angle-addr can start with cfws.
+ leader = None
+ if value[0] in CFWS_LEADER:
+ leader, value = get_cfws(value)
+ if not value:
+ raise errors.HeaderParseError(
+ "expected name-addr but found '{}'".format(leader))
+ if value[0] != '<':
+ if value[0] in PHRASE_ENDS:
+ raise errors.HeaderParseError(
+ "expected name-addr but found '{}'".format(value))
+ token, value = get_display_name(value)
+ if not value:
+ raise errors.HeaderParseError(
+ "expected name-addr but found '{}'".format(token))
+ if leader is not None:
+ token[0][:0] = [leader]
+ leader = None
+ name_addr.append(token)
+ token, value = get_angle_addr(value)
+ if leader is not None:
+ token[:0] = [leader]
+ name_addr.append(token)
+ return name_addr, value
+
+def get_mailbox(value):
+ """ mailbox = name-addr / addr-spec
+
+ """
+ # The only way to figure out if we are dealing with a name-addr or an
+ # addr-spec is to try parsing each one.
+ mailbox = Mailbox()
+ try:
+ token, value = get_name_addr(value)
+ except errors.HeaderParseError:
+ try:
+ token, value = get_addr_spec(value)
+ except errors.HeaderParseError:
+ raise errors.HeaderParseError(
+ "expected mailbox but found '{}'".format(value))
+ if any(isinstance(x, errors.InvalidHeaderDefect)
+ for x in token.all_defects):
+ mailbox.token_type = 'invalid-mailbox'
+ mailbox.append(token)
+ return mailbox, value
+
+def get_invalid_mailbox(value, endchars):
+ """ Read everything up to one of the chars in endchars.
+
+ This is outside the formal grammar. The InvalidMailbox TokenList that is
+ returned acts like a Mailbox, but the data attributes are None.
+
+ """
+ invalid_mailbox = InvalidMailbox()
+ while value and value[0] not in endchars:
+ if value[0] in PHRASE_ENDS:
+ invalid_mailbox.append(ValueTerminal(value[0],
+ 'misplaced-special'))
+ value = value[1:]
+ else:
+ token, value = get_phrase(value)
+ invalid_mailbox.append(token)
+ return invalid_mailbox, value
+
+def get_mailbox_list(value):
+ """ mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
+ obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
+
+ For this routine we go outside the formal grammar in order to improve error
+ handling. We recognize the end of the mailbox list only at the end of the
+ value or at a ';' (the group terminator). This is so that we can turn
+ invalid mailboxes into InvalidMailbox tokens and continue parsing any
+ remaining valid mailboxes. We also allow all mailbox entries to be null,
+ and this condition is handled appropriately at a higher level.
+
+ """
+ mailbox_list = MailboxList()
+ while value and value[0] != ';':
+ try:
+ token, value = get_mailbox(value)
+ mailbox_list.append(token)
+ except errors.HeaderParseError:
+ leader = None
+ if value[0] in CFWS_LEADER:
+ leader, value = get_cfws(value)
+ if not value or value[0] in ',;':
+ mailbox_list.append(leader)
+ mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
+ "empty element in mailbox-list"))
+ else:
+ token, value = get_invalid_mailbox(value, ',;')
+ if leader is not None:
+ token[:0] = [leader]
+ mailbox_list.append(token)
+ mailbox_list.defects.append(errors.InvalidHeaderDefect(
+ "invalid mailbox in mailbox-list"))
+ elif value[0] == ',':
+ mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
+ "empty element in mailbox-list"))
+ else:
+ token, value = get_invalid_mailbox(value, ',;')
+ if leader is not None:
+ token[:0] = [leader]
+ mailbox_list.append(token)
+ mailbox_list.defects.append(errors.InvalidHeaderDefect(
+ "invalid mailbox in mailbox-list"))
+ if value and value[0] not in ',;':
+ # Crap after mailbox; treat it as an invalid mailbox.
+ # The mailbox info will still be available.
+ mailbox = mailbox_list[-1]
+ mailbox.token_type = 'invalid-mailbox'
+ token, value = get_invalid_mailbox(value, ',;')
+ mailbox.extend(token)
+ mailbox_list.defects.append(errors.InvalidHeaderDefect(
+ "invalid mailbox in mailbox-list"))
+ if value and value[0] == ',':
+ mailbox_list.append(ListSeparator)
+ value = value[1:]
+ return mailbox_list, value
+
+
+def get_group_list(value):
+ """ group-list = mailbox-list / CFWS / obs-group-list
+ obs-group-list = 1*([CFWS] ",") [CFWS]
+
+ """
+ group_list = GroupList()
+ if not value:
+ group_list.defects.append(errors.InvalidHeaderDefect(
+ "end of header before group-list"))
+ return group_list, value
+ leader = None
+ if value and value[0] in CFWS_LEADER:
+ leader, value = get_cfws(value)
+ if not value:
+ # This should never happen in email parsing, since CFWS-only is a
+ # legal alternative to group-list in a group, which is the only
+ # place group-list appears.
+ group_list.defects.append(errors.InvalidHeaderDefect(
+ "end of header in group-list"))
+ group_list.append(leader)
+ return group_list, value
+ if value[0] == ';':
+ group_list.append(leader)
+ return group_list, value
+ token, value = get_mailbox_list(value)
+ if len(token.all_mailboxes)==0:
+ if leader is not None:
+ group_list.append(leader)
+ group_list.extend(token)
+ group_list.defects.append(errors.ObsoleteHeaderDefect(
+ "group-list with empty entries"))
+ return group_list, value
+ if leader is not None:
+ token[:0] = [leader]
+ group_list.append(token)
+ return group_list, value
+
+def get_group(value):
+ """ group = display-name ":" [group-list] ";" [CFWS]
+
+ """
+ group = Group()
+ token, value = get_display_name(value)
+ if not value or value[0] != ':':
+ raise errors.HeaderParseError("expected ':' at end of group "
+ "display name but found '{}'".format(value))
+ group.append(token)
+ group.append(ValueTerminal(':', 'group-display-name-terminator'))
+ value = value[1:]
+ if value and value[0] == ';':
+ group.append(ValueTerminal(';', 'group-terminator'))
+ return group, value[1:]
+ token, value = get_group_list(value)
+ group.append(token)
+ if not value:
+ group.defects.append(errors.InvalidHeaderDefect(
+ "end of header in group"))
+ elif value[0] != ';':
+ raise errors.HeaderParseError(
+ "expected ';' at end of group but found {}".format(value))
+ group.append(ValueTerminal(';', 'group-terminator'))
+ value = value[1:]
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ group.append(token)
+ return group, value
+
+def get_address(value):
+ """ address = mailbox / group
+
+ Note that counter-intuitively, an address can be either a single address or
+ a list of addresses (a group). This is why the returned Address object has
+ a 'mailboxes' attribute which treats a single address as a list of length
+ one. When you need to differentiate between to two cases, extract the single
+ element, which is either a mailbox or a group token.
+
+ """
+ # The formal grammar isn't very helpful when parsing an address. mailbox
+ # and group, especially when allowing for obsolete forms, start off very
+ # similarly. It is only when you reach one of @, <, or : that you know
+ # what you've got. So, we try each one in turn, starting with the more
+ # likely of the two. We could perhaps make this more efficient by looking
+ # for a phrase and then branching based on the next character, but that
+ # would be a premature optimization.
+ address = Address()
+ try:
+ token, value = get_group(value)
+ except errors.HeaderParseError:
+ try:
+ token, value = get_mailbox(value)
+ except errors.HeaderParseError:
+ raise errors.HeaderParseError(
+ "expected address but found '{}'".format(value))
+ address.append(token)
+ return address, value
+
+def get_address_list(value):
+ """ address_list = (address *("," address)) / obs-addr-list
+ obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
+
+ We depart from the formal grammar here by continuing to parse until the end
+ of the input, assuming the input to be entirely composed of an
+ address-list. This is always true in email parsing, and allows us
+ to skip invalid addresses to parse additional valid ones.
+
+ """
+ address_list = AddressList()
+ while value:
+ try:
+ token, value = get_address(value)
+ address_list.append(token)
+ except errors.HeaderParseError as err:
+ leader = None
+ if value[0] in CFWS_LEADER:
+ leader, value = get_cfws(value)
+ if not value or value[0] == ',':
+ address_list.append(leader)
+ address_list.defects.append(errors.ObsoleteHeaderDefect(
+ "address-list entry with no content"))
+ else:
+ token, value = get_invalid_mailbox(value, ',')
+ if leader is not None:
+ token[:0] = [leader]
+ address_list.append(Address([token]))
+ address_list.defects.append(errors.InvalidHeaderDefect(
+ "invalid address in address-list"))
+ elif value[0] == ',':
+ address_list.defects.append(errors.ObsoleteHeaderDefect(
+ "empty element in address-list"))
+ else:
+ token, value = get_invalid_mailbox(value, ',')
+ if leader is not None:
+ token[:0] = [leader]
+ address_list.append(Address([token]))
+ address_list.defects.append(errors.InvalidHeaderDefect(
+ "invalid address in address-list"))
+ if value and value[0] != ',':
+ # Crap after address; treat it as an invalid mailbox.
+ # The mailbox info will still be available.
+ mailbox = address_list[-1][0]
+ mailbox.token_type = 'invalid-mailbox'
+ token, value = get_invalid_mailbox(value, ',')
+ mailbox.extend(token)
+ address_list.defects.append(errors.InvalidHeaderDefect(
+ "invalid address in address-list"))
+ if value: # Must be a , at this point.
+ address_list.append(ValueTerminal(',', 'list-separator'))
+ value = value[1:]
+ return address_list, value
+
+#
+# XXX: As I begin to add additional header parsers, I'm realizing we probably
+# have two level of parser routines: the get_XXX methods that get a token in
+# the grammar, and parse_XXX methods that parse an entire field value. So
+# get_address_list above should really be a parse_ method, as probably should
+# be get_unstructured.
+#
+
+def parse_mime_version(value):
+ """ mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
+
+ """
+ # The [CFWS] is implicit in the RFC 2045 BNF.
+ # XXX: This routine is a bit verbose, should factor out a get_int method.
+ mime_version = MIMEVersion()
+ if not value:
+ mime_version.defects.append(errors.HeaderMissingRequiredValue(
+ "Missing MIME version number (eg: 1.0)"))
+ return mime_version
+ if value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ mime_version.append(token)
+ if not value:
+ mime_version.defects.append(errors.HeaderMissingRequiredValue(
+ "Expected MIME version number but found only CFWS"))
+ digits = ''
+ while value and value[0] != '.' and value[0] not in CFWS_LEADER:
+ digits += value[0]
+ value = value[1:]
+ if not digits.isdigit():
+ mime_version.defects.append(errors.InvalidHeaderDefect(
+ "Expected MIME major version number but found {!r}".format(digits)))
+ mime_version.append(ValueTerminal(digits, 'xtext'))
+ else:
+ mime_version.major = int(digits)
+ mime_version.append(ValueTerminal(digits, 'digits'))
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ mime_version.append(token)
+ if not value or value[0] != '.':
+ if mime_version.major is not None:
+ mime_version.defects.append(errors.InvalidHeaderDefect(
+ "Incomplete MIME version; found only major number"))
+ if value:
+ mime_version.append(ValueTerminal(value, 'xtext'))
+ return mime_version
+ mime_version.append(ValueTerminal('.', 'version-separator'))
+ value = value[1:]
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ mime_version.append(token)
+ if not value:
+ if mime_version.major is not None:
+ mime_version.defects.append(errors.InvalidHeaderDefect(
+ "Incomplete MIME version; found only major number"))
+ return mime_version
+ digits = ''
+ while value and value[0] not in CFWS_LEADER:
+ digits += value[0]
+ value = value[1:]
+ if not digits.isdigit():
+ mime_version.defects.append(errors.InvalidHeaderDefect(
+ "Expected MIME minor version number but found {!r}".format(digits)))
+ mime_version.append(ValueTerminal(digits, 'xtext'))
+ else:
+ mime_version.minor = int(digits)
+ mime_version.append(ValueTerminal(digits, 'digits'))
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ mime_version.append(token)
+ if value:
+ mime_version.defects.append(errors.InvalidHeaderDefect(
+ "Excess non-CFWS text after MIME version"))
+ mime_version.append(ValueTerminal(value, 'xtext'))
+ return mime_version
+
+def get_invalid_parameter(value):
+ """ Read everything up to the next ';'.
+
+ This is outside the formal grammar. The InvalidParameter TokenList that is
+ returned acts like a Parameter, but the data attributes are None.
+
+ """
+ invalid_parameter = InvalidParameter()
+ while value and value[0] != ';':
+ if value[0] in PHRASE_ENDS:
+ invalid_parameter.append(ValueTerminal(value[0],
+ 'misplaced-special'))
+ value = value[1:]
+ else:
+ token, value = get_phrase(value)
+ invalid_parameter.append(token)
+ return invalid_parameter, value
+
+def get_ttext(value):
+ """ttext =
+
+ We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
+ defects list if we find non-ttext characters. We also register defects for
+ *any* non-printables even though the RFC doesn't exclude all of them,
+ because we follow the spirit of RFC 5322.
+
+ """
+ m = _non_token_end_matcher(value)
+ if not m:
+ raise errors.HeaderParseError(
+ "expected ttext but found '{}'".format(value))
+ ttext = m.group()
+ value = value[len(ttext):]
+ ttext = ValueTerminal(ttext, 'ttext')
+ _validate_xtext(ttext)
+ return ttext, value
+
+def get_token(value):
+ """token = [CFWS] 1*ttext [CFWS]
+
+ The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
+ tspecials. We also exclude tabs even though the RFC doesn't.
+
+ The RFC implies the CFWS but is not explicit about it in the BNF.
+
+ """
+ mtoken = Token()
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ mtoken.append(token)
+ if value and value[0] in TOKEN_ENDS:
+ raise errors.HeaderParseError(
+ "expected token but found '{}'".format(value))
+ token, value = get_ttext(value)
+ mtoken.append(token)
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ mtoken.append(token)
+ return mtoken, value
+
+def get_attrtext(value):
+ """attrtext = 1*(any non-ATTRIBUTE_ENDS character)
+
+ We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
+ token's defects list if we find non-attrtext characters. We also register
+ defects for *any* non-printables even though the RFC doesn't exclude all of
+ them, because we follow the spirit of RFC 5322.
+
+ """
+ m = _non_attribute_end_matcher(value)
+ if not m:
+ raise errors.HeaderParseError(
+ "expected attrtext but found {!r}".format(value))
+ attrtext = m.group()
+ value = value[len(attrtext):]
+ attrtext = ValueTerminal(attrtext, 'attrtext')
+ _validate_xtext(attrtext)
+ return attrtext, value
+
+def get_attribute(value):
+ """ [CFWS] 1*attrtext [CFWS]
+
+ This version of the BNF makes the CFWS explicit, and as usual we use a
+ value terminal for the actual run of characters. The RFC equivalent of
+ attrtext is the token characters, with the subtraction of '*', "'", and '%'.
+ We include tab in the excluded set just as we do for token.
+
+ """
+ attribute = Attribute()
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ attribute.append(token)
+ if value and value[0] in ATTRIBUTE_ENDS:
+ raise errors.HeaderParseError(
+ "expected token but found '{}'".format(value))
+ token, value = get_attrtext(value)
+ attribute.append(token)
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ attribute.append(token)
+ return attribute, value
+
+def get_extended_attrtext(value):
+ """attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
+
+ This is a special parsing routine so that we get a value that
+ includes % escapes as a single string (which we decode as a single
+ string later).
+
+ """
+ m = _non_extended_attribute_end_matcher(value)
+ if not m:
+ raise errors.HeaderParseError(
+ "expected extended attrtext but found {!r}".format(value))
+ attrtext = m.group()
+ value = value[len(attrtext):]
+ attrtext = ValueTerminal(attrtext, 'extended-attrtext')
+ _validate_xtext(attrtext)
+ return attrtext, value
+
+def get_extended_attribute(value):
+ """ [CFWS] 1*extended_attrtext [CFWS]
+
+ This is like the non-extended version except we allow % characters, so that
+ we can pick up an encoded value as a single string.
+
+ """
+ # XXX: should we have an ExtendedAttribute TokenList?
+ attribute = Attribute()
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ attribute.append(token)
+ if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
+ raise errors.HeaderParseError(
+ "expected token but found '{}'".format(value))
+ token, value = get_extended_attrtext(value)
+ attribute.append(token)
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ attribute.append(token)
+ return attribute, value
+
+def get_section(value):
+ """ '*' digits
+
+ The formal BNF is more complicated because leading 0s are not allowed. We
+ check for that and add a defect. We also assume no CFWS is allowed between
+ the '*' and the digits, though the RFC is not crystal clear on that.
+ The caller should already have dealt with leading CFWS.
+
+ """
+ section = Section()
+ if not value or value[0] != '*':
+ raise errors.HeaderParseError("Expected section but found {}".format(
+ value))
+ section.append(ValueTerminal('*', 'section-marker'))
+ value = value[1:]
+ if not value or not value[0].isdigit():
+ raise errors.HeaderParseError("Expected section number but "
+ "found {}".format(value))
+ digits = ''
+ while value and value[0].isdigit():
+ digits += value[0]
+ value = value[1:]
+ if digits[0] == '0' and digits != '0':
+ section.defects.append(errors.InvalidHeaderError(
+ "section number has an invalid leading 0"))
+ section.number = int(digits)
+ section.append(ValueTerminal(digits, 'digits'))
+ return section, value
+
+
+def get_value(value):
+ """ quoted-string / attribute
+
+ """
+ v = Value()
+ if not value:
+ raise errors.HeaderParseError("Expected value but found end of string")
+ leader = None
+ if value[0] in CFWS_LEADER:
+ leader, value = get_cfws(value)
+ if not value:
+ raise errors.HeaderParseError("Expected value but found "
+ "only {}".format(leader))
+ if value[0] == '"':
+ token, value = get_quoted_string(value)
+ else:
+ token, value = get_extended_attribute(value)
+ if leader is not None:
+ token[:0] = [leader]
+ v.append(token)
+ return v, value
+
+def get_parameter(value):
+ """ attribute [section] ["*"] [CFWS] "=" value
+
+ The CFWS is implied by the RFC but not made explicit in the BNF. This
+ simplified form of the BNF from the RFC is made to conform with the RFC BNF
+ through some extra checks. We do it this way because it makes both error
+ recovery and working with the resulting parse tree easier.
+ """
+ # It is possible CFWS would also be implicitly allowed between the section
+ # and the 'extended-attribute' marker (the '*') , but we've never seen that
+ # in the wild and we will therefore ignore the possibility.
+ param = Parameter()
+ token, value = get_attribute(value)
+ param.append(token)
+ if not value or value[0] == ';':
+ param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
+ "name ({}) but no value".format(token)))
+ return param, value
+ if value[0] == '*':
+ try:
+ token, value = get_section(value)
+ param.sectioned = True
+ param.append(token)
+ except errors.HeaderParseError:
+ pass
+ if not value:
+ raise errors.HeaderParseError("Incomplete parameter")
+ if value[0] == '*':
+ param.append(ValueTerminal('*', 'extended-parameter-marker'))
+ value = value[1:]
+ param.extended = True
+ if value[0] != '=':
+ raise errors.HeaderParseError("Parameter not followed by '='")
+ param.append(ValueTerminal('=', 'parameter-separator'))
+ value = value[1:]
+ leader = None
+ if value and value[0] in CFWS_LEADER:
+ token, value = get_cfws(value)
+ param.append(token)
+ remainder = None
+ appendto = param
+ if param.extended and value and value[0] == '"':
+ # Now for some serious hackery to handle the common invalid case of
+ # double quotes around an extended value. We also accept (with defect)
+ # a value marked as encoded that isn't really.
+ qstring, remainder = get_quoted_string(value)
+ inner_value = qstring.stripped_value
+ semi_valid = False
+ if param.section_number == 0:
+ if inner_value and inner_value[0] == "'":
+ semi_valid = True
+ else:
+ token, rest = get_attrtext(inner_value)
+ if rest and rest[0] == "'":
+ semi_valid = True
+ else:
+ try:
+ token, rest = get_extended_attrtext(inner_value)
+ except:
+ pass
+ else:
+ if not rest:
+ semi_valid = True
+ if semi_valid:
+ param.defects.append(errors.InvalidHeaderDefect(
+ "Quoted string value for extended parameter is invalid"))
+ param.append(qstring)
+ for t in qstring:
+ if t.token_type == 'bare-quoted-string':
+ t[:] = []
+ appendto = t
+ break
+ value = inner_value
+ else:
+ remainder = None
+ param.defects.append(errors.InvalidHeaderDefect(
+ "Parameter marked as extended but appears to have a "
+ "quoted string value that is non-encoded"))
+ if value and value[0] == "'":
+ token = None
+ else:
+ token, value = get_value(value)
+ if not param.extended or param.section_number > 0:
+ if not value or value[0] != "'":
+ appendto.append(token)
+ if remainder is not None:
+ assert not value, value
+ value = remainder
+ return param, value
+ param.defects.append(errors.InvalidHeaderDefect(
+ "Apparent initial-extended-value but attribute "
+ "was not marked as extended or was not initial section"))
+ if not value:
+ # Assume the charset/lang is missing and the token is the value.
+ param.defects.append(errors.InvalidHeaderDefect(
+ "Missing required charset/lang delimiters"))
+ appendto.append(token)
+ if remainder is None:
+ return param, value
+ else:
+ if token is not None:
+ for t in token:
+ if t.token_type == 'extended-attrtext':
+ break
+ t.token_type == 'attrtext'
+ appendto.append(t)
+ param.charset = t.value
+ if value[0] != "'":
+ raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
+ "delimiter, but found {!r}".format(value))
+ appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
+ value = value[1:]
+ if value and value[0] != "'":
+ token, value = get_attrtext(value)
+ appendto.append(token)
+ param.lang = token.value
+ if not value or value[0] != "'":
+ raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
+ "delimiter, but found {}".format(value))
+ appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
+ value = value[1:]
+ if remainder is not None:
+ # Treat the rest of value as bare quoted string content.
+ v = Value()
+ while value:
+ if value[0] in WSP:
+ token, value = get_fws(value)
+ else:
+ token, value = get_qcontent(value)
+ v.append(token)
+ token = v
+ else:
+ token, value = get_value(value)
+ appendto.append(token)
+ if remainder is not None:
+ assert not value, value
+ value = remainder
+ return param, value
+
+def parse_mime_parameters(value):
+ """ parameter *( ";" parameter )
+
+ That BNF is meant to indicate this routine should only be called after
+ finding and handling the leading ';'. There is no corresponding rule in
+ the formal RFC grammar, but it is more convenient for us for the set of
+ parameters to be treated as its own TokenList.
+
+ This is 'parse' routine because it consumes the reminaing value, but it
+ would never be called to parse a full header. Instead it is called to
+ parse everything after the non-parameter value of a specific MIME header.
+
+ """
+ mime_parameters = MimeParameters()
+ while value:
+ try:
+ token, value = get_parameter(value)
+ mime_parameters.append(token)
+ except errors.HeaderParseError as err:
+ leader = None
+ if value[0] in CFWS_LEADER:
+ leader, value = get_cfws(value)
+ if not value:
+ mime_parameters.append(leader)
+ return mime_parameters
+ if value[0] == ';':
+ if leader is not None:
+ mime_parameters.append(leader)
+ mime_parameters.defects.append(errors.InvalidHeaderDefect(
+ "parameter entry with no content"))
+ else:
+ token, value = get_invalid_parameter(value)
+ if leader:
+ token[:0] = [leader]
+ mime_parameters.append(token)
+ mime_parameters.defects.append(errors.InvalidHeaderDefect(
+ "invalid parameter {!r}".format(token)))
+ if value and value[0] != ';':
+ # Junk after the otherwise valid parameter. Mark it as
+ # invalid, but it will have a value.
+ param = mime_parameters[-1]
+ param.token_type = 'invalid-parameter'
+ token, value = get_invalid_parameter(value)
+ param.extend(token)
+ mime_parameters.defects.append(errors.InvalidHeaderDefect(
+ "parameter with invalid trailing text {!r}".format(token)))
+ if value:
+ # Must be a ';' at this point.
+ mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
+ value = value[1:]
+ return mime_parameters
+
+def _find_mime_parameters(tokenlist, value):
+ """Do our best to find the parameters in an invalid MIME header
+
+ """
+ while value and value[0] != ';':
+ if value[0] in PHRASE_ENDS:
+ tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
+ value = value[1:]
+ else:
+ token, value = get_phrase(value)
+ tokenlist.append(token)
+ if not value:
+ return
+ tokenlist.append(ValueTerminal(';', 'parameter-separator'))
+ tokenlist.append(parse_mime_parameters(value[1:]))
+
+def parse_content_type_header(value):
+ """ maintype "/" subtype *( ";" parameter )
+
+ The maintype and substype are tokens. Theoretically they could
+ be checked against the official IANA list + x-token, but we
+ don't do that.
+ """
+ ctype = ContentType()
+ recover = False
+ if not value:
+ ctype.defects.append(errors.HeaderMissingRequiredValue(
+ "Missing content type specification"))
+ return ctype
+ try:
+ token, value = get_token(value)
+ except errors.HeaderParseError:
+ ctype.defects.append(errors.InvalidHeaderDefect(
+ "Expected content maintype but found {!r}".format(value)))
+ _find_mime_parameters(ctype, value)
+ return ctype
+ ctype.append(token)
+ # XXX: If we really want to follow the formal grammar we should make
+ # mantype and subtype specialized TokenLists here. Probably not worth it.
+ if not value or value[0] != '/':
+ ctype.defects.append(errors.InvalidHeaderDefect(
+ "Invalid content type"))
+ if value:
+ _find_mime_parameters(ctype, value)
+ return ctype
+ ctype.maintype = token.value.strip().lower()
+ ctype.append(ValueTerminal('/', 'content-type-separator'))
+ value = value[1:]
+ try:
+ token, value = get_token(value)
+ except errors.HeaderParseError:
+ ctype.defects.append(errors.InvalidHeaderDefect(
+ "Expected content subtype but found {!r}".format(value)))
+ _find_mime_parameters(ctype, value)
+ return ctype
+ ctype.append(token)
+ ctype.subtype = token.value.strip().lower()
+ if not value:
+ return ctype
+ if value[0] != ';':
+ ctype.defects.append(errors.InvalidHeaderDefect(
+ "Only parameters are valid after content type, but "
+ "found {!r}".format(value)))
+ # The RFC requires that a syntactically invalid content-type be treated
+ # as text/plain. Perhaps we should postel this, but we should probably
+ # only do that if we were checking the subtype value against IANA.
+ del ctype.maintype, ctype.subtype
+ _find_mime_parameters(ctype, value)
+ return ctype
+ ctype.append(ValueTerminal(';', 'parameter-separator'))
+ ctype.append(parse_mime_parameters(value[1:]))
+ return ctype
+
+def parse_content_disposition_header(value):
+ """ disposition-type *( ";" parameter )
+
+ """
+ disp_header = ContentDisposition()
+ if not value:
+ disp_header.defects.append(errors.HeaderMissingRequiredValue(
+ "Missing content disposition"))
+ return disp_header
+ try:
+ token, value = get_token(value)
+ except errors.HeaderParseError:
+ disp_header.defects.append(errors.InvalidHeaderDefect(
+ "Expected content disposition but found {!r}".format(value)))
+ _find_mime_parameters(disp_header, value)
+ return disp_header
+ disp_header.append(token)
+ disp_header.content_disposition = token.value.strip().lower()
+ if not value:
+ return disp_header
+ if value[0] != ';':
+ disp_header.defects.append(errors.InvalidHeaderDefect(
+ "Only parameters are valid after content disposition, but "
+ "found {!r}".format(value)))
+ _find_mime_parameters(disp_header, value)
+ return disp_header
+ disp_header.append(ValueTerminal(';', 'parameter-separator'))
+ disp_header.append(parse_mime_parameters(value[1:]))
+ return disp_header
+
+def parse_content_transfer_encoding_header(value):
+ """ mechanism
+
+ """
+ # We should probably validate the values, since the list is fixed.
+ cte_header = ContentTransferEncoding()
+ if not value:
+ cte_header.defects.append(errors.HeaderMissingRequiredValue(
+ "Missing content transfer encoding"))
+ return cte_header
+ try:
+ token, value = get_token(value)
+ except errors.HeaderParseError:
+ cte_header.defects.append(errors.InvalidHeaderDefect(
+ "Expected content transfer encoding but found {!r}".format(value)))
+ else:
+ cte_header.append(token)
+ cte_header.cte = token.value.strip().lower()
+ if not value:
+ return cte_header
+ while value:
+ cte_header.defects.append(errors.InvalidHeaderDefect(
+ "Extra text after content transfer encoding"))
+ if value[0] in PHRASE_ENDS:
+ cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
+ value = value[1:]
+ else:
+ token, value = get_phrase(value)
+ cte_header.append(token)
+ return cte_header
+
+
+#
+# Header folding
+#
+# Header folding is complex, with lots of rules and corner cases. The
+# following code does its best to obey the rules and handle the corner
+# cases, but you can be sure there are few bugs:)
+#
+# This folder generally canonicalizes as it goes, preferring the stringified
+# version of each token. The tokens contain information that supports the
+# folder, including which tokens can be encoded in which ways.
+#
+# Folded text is accumulated in a simple list of strings ('lines'), each
+# one of which should be less than policy.max_line_length ('maxlen').
+#
+
+def _steal_trailing_WSP_if_exists(lines):
+ wsp = ''
+ if lines and lines[-1] and lines[-1][-1] in WSP:
+ wsp = lines[-1][-1]
+ lines[-1] = lines[-1][:-1]
+ return wsp
+
+def _refold_parse_tree(parse_tree, *, policy):
+ """Return string of contents of parse_tree folded according to RFC rules.
+
+ """
+ # max_line_length 0/None means no limit, ie: infinitely long.
+ maxlen = policy.max_line_length or float("+inf")
+ encoding = 'utf-8' if policy.utf8 else 'us-ascii'
+ lines = ['']
+ last_ew = None
+ wrap_as_ew_blocked = 0
+ want_encoding = False
+ end_ew_not_allowed = Terminal('', 'wrap_as_ew_blocked')
+ parts = list(parse_tree)
+ while parts:
+ part = parts.pop(0)
+ if part is end_ew_not_allowed:
+ wrap_as_ew_blocked -= 1
+ continue
+ tstr = str(part)
+ try:
+ tstr.encode(encoding)
+ charset = encoding
+ except UnicodeEncodeError:
+ if any(isinstance(x, errors.UndecodableBytesDefect)
+ for x in part.all_defects):
+ charset = 'unknown-8bit'
+ else:
+ # If policy.utf8 is false this should really be taken from a
+ # 'charset' property on the policy.
+ charset = 'utf-8'
+ want_encoding = True
+ if part.token_type == 'mime-parameters':
+ # Mime parameter folding (using RFC2231) is extra special.
+ _fold_mime_parameters(part, lines, maxlen, encoding)
+ continue
+ if want_encoding and not wrap_as_ew_blocked:
+ if not part.as_ew_allowed:
+ want_encoding = False
+ last_ew = None
+ if part.syntactic_break:
+ encoded_part = part.fold(policy=policy)[:-1] # strip nl
+ if policy.linesep not in encoded_part:
+ # It fits on a single line
+ if len(encoded_part) > maxlen - len(lines[-1]):
+ # But not on this one, so start a new one.
+ newline = _steal_trailing_WSP_if_exists(lines)
+ # XXX what if encoded_part has no leading FWS?
+ lines.append(newline)
+ lines[-1] += encoded_part
+ continue
+ # Either this is not a major syntactic break, so we don't
+ # want it on a line by itself even if it fits, or it
+ # doesn't fit on a line by itself. Either way, fall through
+ # to unpacking the subparts and wrapping them.
+ if not hasattr(part, 'encode'):
+ # It's not a Terminal, do each piece individually.
+ parts = list(part) + parts
+ else:
+ # It's a terminal, wrap it as an encoded word, possibly
+ # combining it with previously encoded words if allowed.
+ last_ew = _fold_as_ew(tstr, lines, maxlen, last_ew,
+ part.ew_combine_allowed, charset)
+ want_encoding = False
+ continue
+ if len(tstr) <= maxlen - len(lines[-1]):
+ lines[-1] += tstr
+ continue
+ # This part is too long to fit. The RFC wants us to break at
+ # "major syntactic breaks", so unless we don't consider this
+ # to be one, check if it will fit on the next line by itself.
+ if (part.syntactic_break and
+ len(tstr) + 1 <= maxlen):
+ newline = _steal_trailing_WSP_if_exists(lines)
+ if newline or part.startswith_fws():
+ lines.append(newline + tstr)
+ continue
+ if not hasattr(part, 'encode'):
+ # It's not a terminal, try folding the subparts.
+ newparts = list(part)
+ if not part.as_ew_allowed:
+ wrap_as_ew_blocked += 1
+ newparts.append(end_ew_not_allowed)
+ parts = newparts + parts
+ continue
+ if part.as_ew_allowed and not wrap_as_ew_blocked:
+ # It doesn't need CTE encoding, but encode it anyway so we can
+ # wrap it.
+ parts.insert(0, part)
+ want_encoding = True
+ continue
+ # We can't figure out how to wrap, it, so give up.
+ newline = _steal_trailing_WSP_if_exists(lines)
+ if newline or part.startswith_fws():
+ lines.append(newline + tstr)
+ else:
+ # We can't fold it onto the next line either...
+ lines[-1] += tstr
+ return policy.linesep.join(lines) + policy.linesep
+
+def _fold_as_ew(to_encode, lines, maxlen, last_ew, ew_combine_allowed, charset):
+ """Fold string to_encode into lines as encoded word, combining if allowed.
+ Return the new value for last_ew, or None if ew_combine_allowed is False.
+
+ If there is already an encoded word in the last line of lines (indicated by
+ a non-None value for last_ew) and ew_combine_allowed is true, decode the
+ existing ew, combine it with to_encode, and re-encode. Otherwise, encode
+ to_encode. In either case, split to_encode as necessary so that the
+ encoded segments fit within maxlen.
+
+ """
+ if last_ew is not None and ew_combine_allowed:
+ to_encode = str(
+ get_unstructured(lines[-1][last_ew:] + to_encode))
+ lines[-1] = lines[-1][:last_ew]
+ if to_encode[0] in WSP:
+ # We're joining this to non-encoded text, so don't encode
+ # the leading blank.
+ leading_wsp = to_encode[0]
+ to_encode = to_encode[1:]
+ if (len(lines[-1]) == maxlen):
+ lines.append(_steal_trailing_WSP_if_exists(lines))
+ lines[-1] += leading_wsp
+ trailing_wsp = ''
+ if to_encode[-1] in WSP:
+ # Likewise for the trailing space.
+ trailing_wsp = to_encode[-1]
+ to_encode = to_encode[:-1]
+ new_last_ew = len(lines[-1]) if last_ew is None else last_ew
+ while to_encode:
+ remaining_space = maxlen - len(lines[-1])
+ # The RFC2047 chrome takes up 7 characters plus the length
+ # of the charset name.
+ encode_as = 'utf-8' if charset == 'us-ascii' else charset
+ text_space = remaining_space - len(encode_as) - 7
+ if text_space <= 0:
+ lines.append(' ')
+ # XXX We'll get an infinite loop here if maxlen is <= 7
+ continue
+ first_part = to_encode[:text_space]
+ ew = _ew.encode(first_part, charset=encode_as)
+ excess = len(ew) - remaining_space
+ if excess > 0:
+ # encode always chooses the shortest encoding, so this
+ # is guaranteed to fit at this point.
+ first_part = first_part[:-excess]
+ ew = _ew.encode(first_part)
+ lines[-1] += ew
+ to_encode = to_encode[len(first_part):]
+ if to_encode:
+ lines.append(' ')
+ new_last_ew = len(lines[-1])
+ lines[-1] += trailing_wsp
+ return new_last_ew if ew_combine_allowed else None
+
+def _fold_mime_parameters(part, lines, maxlen, encoding):
+ """Fold TokenList 'part' into the 'lines' list as mime parameters.
+
+ Using the decoded list of parameters and values, format them according to
+ the RFC rules, including using RFC2231 encoding if the value cannot be
+ expressed in 'encoding' and/or the parameter+value is too long to fit
+ within 'maxlen'.
+
+ """
+ # Special case for RFC2231 encoding: start from decoded values and use
+ # RFC2231 encoding iff needed.
+ #
+ # Note that the 1 and 2s being added to the length calculations are
+ # accounting for the possibly-needed spaces and semicolons we'll be adding.
+ #
+ for name, value in part.params:
+ # XXX What if this ';' puts us over maxlen the first time through the
+ # loop? We should split the header value onto a newline in that case,
+ # but to do that we need to recognize the need earlier or reparse the
+ # header, so I'm going to ignore that bug for now. It'll only put us
+ # one character over.
+ if not lines[-1].rstrip().endswith(';'):
+ lines[-1] += ';'
+ charset = encoding
+ error_handler = 'strict'
+ try:
+ value.encode(encoding)
+ encoding_required = False
+ except UnicodeEncodeError:
+ encoding_required = True
+ if utils._has_surrogates(value):
+ charset = 'unknown-8bit'
+ error_handler = 'surrogateescape'
+ else:
+ charset = 'utf-8'
+ if encoding_required:
+ encoded_value = urllib.parse.quote(
+ value, safe='', errors=error_handler)
+ tstr = "{}*={}''{}".format(name, charset, encoded_value)
+ else:
+ tstr = '{}={}'.format(name, quote_string(value))
+ if len(lines[-1]) + len(tstr) + 1 < maxlen:
+ lines[-1] = lines[-1] + ' ' + tstr
+ continue
+ elif len(tstr) + 2 <= maxlen:
+ lines.append(' ' + tstr)
+ continue
+ # We need multiple sections. We are allowed to mix encoded and
+ # non-encoded sections, but we aren't going to. We'll encode them all.
+ section = 0
+ extra_chrome = charset + "''"
+ while value:
+ chrome_len = len(name) + len(str(section)) + 3 + len(extra_chrome)
+ if maxlen <= chrome_len + 3:
+ # We need room for the leading blank, the trailing semicolon,
+ # and at least one character of the value. If we don't
+ # have that, we'd be stuck, so in that case fall back to
+ # the RFC standard width.
+ maxlen = 78
+ splitpoint = maxchars = maxlen - chrome_len - 2
+ while True:
+ partial = value[:splitpoint]
+ encoded_value = urllib.parse.quote(
+ partial, safe='', errors=error_handler)
+ if len(encoded_value) <= maxchars:
+ break
+ splitpoint -= 1
+ lines.append(" {}*{}*={}{}".format(
+ name, section, extra_chrome, encoded_value))
+ extra_chrome = ''
+ section += 1
+ value = value[splitpoint:]
+ if value:
+ lines[-1] += ';'
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_parseaddr.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_parseaddr.py
new file mode 100644
index 00000000..cdfa3729
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_parseaddr.py
@@ -0,0 +1,540 @@
+# Copyright (C) 2002-2007 Python Software Foundation
+# Contact: email-sig@python.org
+
+"""Email address parsing code.
+
+Lifted directly from rfc822.py. This should eventually be rewritten.
+"""
+
+__all__ = [
+ 'mktime_tz',
+ 'parsedate',
+ 'parsedate_tz',
+ 'quote',
+ ]
+
+import time, calendar
+
+SPACE = ' '
+EMPTYSTRING = ''
+COMMASPACE = ', '
+
+# Parse a date field
+_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
+ 'aug', 'sep', 'oct', 'nov', 'dec',
+ 'january', 'february', 'march', 'april', 'may', 'june', 'july',
+ 'august', 'september', 'october', 'november', 'december']
+
+_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
+
+# The timezone table does not include the military time zones defined
+# in RFC822, other than Z. According to RFC1123, the description in
+# RFC822 gets the signs wrong, so we can't rely on any such time
+# zones. RFC1123 recommends that numeric timezone indicators be used
+# instead of timezone names.
+
+_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
+ 'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
+ 'EST': -500, 'EDT': -400, # Eastern
+ 'CST': -600, 'CDT': -500, # Central
+ 'MST': -700, 'MDT': -600, # Mountain
+ 'PST': -800, 'PDT': -700 # Pacific
+ }
+
+
+def parsedate_tz(data):
+ """Convert a date string to a time tuple.
+
+ Accounts for military timezones.
+ """
+ res = _parsedate_tz(data)
+ if not res:
+ return
+ if res[9] is None:
+ res[9] = 0
+ return tuple(res)
+
+def _parsedate_tz(data):
+ """Convert date to extended time tuple.
+
+ The last (additional) element is the time zone offset in seconds, except if
+ the timezone was specified as -0000. In that case the last element is
+ None. This indicates a UTC timestamp that explicitly declaims knowledge of
+ the source timezone, as opposed to a +0000 timestamp that indicates the
+ source timezone really was UTC.
+
+ """
+ if not data:
+ return
+ data = data.split()
+ # The FWS after the comma after the day-of-week is optional, so search and
+ # adjust for this.
+ if data[0].endswith(',') or data[0].lower() in _daynames:
+ # There's a dayname here. Skip it
+ del data[0]
+ else:
+ i = data[0].rfind(',')
+ if i >= 0:
+ data[0] = data[0][i+1:]
+ if len(data) == 3: # RFC 850 date, deprecated
+ stuff = data[0].split('-')
+ if len(stuff) == 3:
+ data = stuff + data[1:]
+ if len(data) == 4:
+ s = data[3]
+ i = s.find('+')
+ if i == -1:
+ i = s.find('-')
+ if i > 0:
+ data[3:] = [s[:i], s[i:]]
+ else:
+ data.append('') # Dummy tz
+ if len(data) < 5:
+ return None
+ data = data[:5]
+ [dd, mm, yy, tm, tz] = data
+ mm = mm.lower()
+ if mm not in _monthnames:
+ dd, mm = mm, dd.lower()
+ if mm not in _monthnames:
+ return None
+ mm = _monthnames.index(mm) + 1
+ if mm > 12:
+ mm -= 12
+ if dd[-1] == ',':
+ dd = dd[:-1]
+ i = yy.find(':')
+ if i > 0:
+ yy, tm = tm, yy
+ if yy[-1] == ',':
+ yy = yy[:-1]
+ if not yy[0].isdigit():
+ yy, tz = tz, yy
+ if tm[-1] == ',':
+ tm = tm[:-1]
+ tm = tm.split(':')
+ if len(tm) == 2:
+ [thh, tmm] = tm
+ tss = '0'
+ elif len(tm) == 3:
+ [thh, tmm, tss] = tm
+ elif len(tm) == 1 and '.' in tm[0]:
+ # Some non-compliant MUAs use '.' to separate time elements.
+ tm = tm[0].split('.')
+ if len(tm) == 2:
+ [thh, tmm] = tm
+ tss = 0
+ elif len(tm) == 3:
+ [thh, tmm, tss] = tm
+ else:
+ return None
+ try:
+ yy = int(yy)
+ dd = int(dd)
+ thh = int(thh)
+ tmm = int(tmm)
+ tss = int(tss)
+ except ValueError:
+ return None
+ # Check for a yy specified in two-digit format, then convert it to the
+ # appropriate four-digit format, according to the POSIX standard. RFC 822
+ # calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
+ # mandates a 4-digit yy. For more information, see the documentation for
+ # the time module.
+ if yy < 100:
+ # The year is between 1969 and 1999 (inclusive).
+ if yy > 68:
+ yy += 1900
+ # The year is between 2000 and 2068 (inclusive).
+ else:
+ yy += 2000
+ tzoffset = None
+ tz = tz.upper()
+ if tz in _timezones:
+ tzoffset = _timezones[tz]
+ else:
+ try:
+ tzoffset = int(tz)
+ except ValueError:
+ pass
+ if tzoffset==0 and tz.startswith('-'):
+ tzoffset = None
+ # Convert a timezone offset into seconds ; -0500 -> -18000
+ if tzoffset:
+ if tzoffset < 0:
+ tzsign = -1
+ tzoffset = -tzoffset
+ else:
+ tzsign = 1
+ tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
+ # Daylight Saving Time flag is set to -1, since DST is unknown.
+ return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset]
+
+
+def parsedate(data):
+ """Convert a time string to a time tuple."""
+ t = parsedate_tz(data)
+ if isinstance(t, tuple):
+ return t[:9]
+ else:
+ return t
+
+
+def mktime_tz(data):
+ """Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
+ if data[9] is None:
+ # No zone info, so localtime is better assumption than GMT
+ return time.mktime(data[:8] + (-1,))
+ else:
+ t = calendar.timegm(data)
+ return t - data[9]
+
+
+def quote(str):
+ """Prepare string to be used in a quoted string.
+
+ Turns backslash and double quote characters into quoted pairs. These
+ are the only characters that need to be quoted inside a quoted string.
+ Does not add the surrounding double quotes.
+ """
+ return str.replace('\\', '\\\\').replace('"', '\\"')
+
+
+class AddrlistClass:
+ """Address parser class by Ben Escoto.
+
+ To understand what this class does, it helps to have a copy of RFC 2822 in
+ front of you.
+
+ Note: this class interface is deprecated and may be removed in the future.
+ Use email.utils.AddressList instead.
+ """
+
+ def __init__(self, field):
+ """Initialize a new instance.
+
+ `field' is an unparsed address header field, containing
+ one or more addresses.
+ """
+ self.specials = '()<>@,:;.\"[]'
+ self.pos = 0
+ self.LWS = ' \t'
+ self.CR = '\r\n'
+ self.FWS = self.LWS + self.CR
+ self.atomends = self.specials + self.LWS + self.CR
+ # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
+ # is obsolete syntax. RFC 2822 requires that we recognize obsolete
+ # syntax, so allow dots in phrases.
+ self.phraseends = self.atomends.replace('.', '')
+ self.field = field
+ self.commentlist = []
+
+ def gotonext(self):
+ """Skip white space and extract comments."""
+ wslist = []
+ while self.pos < len(self.field):
+ if self.field[self.pos] in self.LWS + '\n\r':
+ if self.field[self.pos] not in '\n\r':
+ wslist.append(self.field[self.pos])
+ self.pos += 1
+ elif self.field[self.pos] == '(':
+ self.commentlist.append(self.getcomment())
+ else:
+ break
+ return EMPTYSTRING.join(wslist)
+
+ def getaddrlist(self):
+ """Parse all addresses.
+
+ Returns a list containing all of the addresses.
+ """
+ result = []
+ while self.pos < len(self.field):
+ ad = self.getaddress()
+ if ad:
+ result += ad
+ else:
+ result.append(('', ''))
+ return result
+
+ def getaddress(self):
+ """Parse the next address."""
+ self.commentlist = []
+ self.gotonext()
+
+ oldpos = self.pos
+ oldcl = self.commentlist
+ plist = self.getphraselist()
+
+ self.gotonext()
+ returnlist = []
+
+ if self.pos >= len(self.field):
+ # Bad email address technically, no domain.
+ if plist:
+ returnlist = [(SPACE.join(self.commentlist), plist[0])]
+
+ elif self.field[self.pos] in '.@':
+ # email address is just an addrspec
+ # this isn't very efficient since we start over
+ self.pos = oldpos
+ self.commentlist = oldcl
+ addrspec = self.getaddrspec()
+ returnlist = [(SPACE.join(self.commentlist), addrspec)]
+
+ elif self.field[self.pos] == ':':
+ # address is a group
+ returnlist = []
+
+ fieldlen = len(self.field)
+ self.pos += 1
+ while self.pos < len(self.field):
+ self.gotonext()
+ if self.pos < fieldlen and self.field[self.pos] == ';':
+ self.pos += 1
+ break
+ returnlist = returnlist + self.getaddress()
+
+ elif self.field[self.pos] == '<':
+ # Address is a phrase then a route addr
+ routeaddr = self.getrouteaddr()
+
+ if self.commentlist:
+ returnlist = [(SPACE.join(plist) + ' (' +
+ ' '.join(self.commentlist) + ')', routeaddr)]
+ else:
+ returnlist = [(SPACE.join(plist), routeaddr)]
+
+ else:
+ if plist:
+ returnlist = [(SPACE.join(self.commentlist), plist[0])]
+ elif self.field[self.pos] in self.specials:
+ self.pos += 1
+
+ self.gotonext()
+ if self.pos < len(self.field) and self.field[self.pos] == ',':
+ self.pos += 1
+ return returnlist
+
+ def getrouteaddr(self):
+ """Parse a route address (Return-path value).
+
+ This method just skips all the route stuff and returns the addrspec.
+ """
+ if self.field[self.pos] != '<':
+ return
+
+ expectroute = False
+ self.pos += 1
+ self.gotonext()
+ adlist = ''
+ while self.pos < len(self.field):
+ if expectroute:
+ self.getdomain()
+ expectroute = False
+ elif self.field[self.pos] == '>':
+ self.pos += 1
+ break
+ elif self.field[self.pos] == '@':
+ self.pos += 1
+ expectroute = True
+ elif self.field[self.pos] == ':':
+ self.pos += 1
+ else:
+ adlist = self.getaddrspec()
+ self.pos += 1
+ break
+ self.gotonext()
+
+ return adlist
+
+ def getaddrspec(self):
+ """Parse an RFC 2822 addr-spec."""
+ aslist = []
+
+ self.gotonext()
+ while self.pos < len(self.field):
+ preserve_ws = True
+ if self.field[self.pos] == '.':
+ if aslist and not aslist[-1].strip():
+ aslist.pop()
+ aslist.append('.')
+ self.pos += 1
+ preserve_ws = False
+ elif self.field[self.pos] == '"':
+ aslist.append('"%s"' % quote(self.getquote()))
+ elif self.field[self.pos] in self.atomends:
+ if aslist and not aslist[-1].strip():
+ aslist.pop()
+ break
+ else:
+ aslist.append(self.getatom())
+ ws = self.gotonext()
+ if preserve_ws and ws:
+ aslist.append(ws)
+
+ if self.pos >= len(self.field) or self.field[self.pos] != '@':
+ return EMPTYSTRING.join(aslist)
+
+ aslist.append('@')
+ self.pos += 1
+ self.gotonext()
+ return EMPTYSTRING.join(aslist) + self.getdomain()
+
+ def getdomain(self):
+ """Get the complete domain name from an address."""
+ sdlist = []
+ while self.pos < len(self.field):
+ if self.field[self.pos] in self.LWS:
+ self.pos += 1
+ elif self.field[self.pos] == '(':
+ self.commentlist.append(self.getcomment())
+ elif self.field[self.pos] == '[':
+ sdlist.append(self.getdomainliteral())
+ elif self.field[self.pos] == '.':
+ self.pos += 1
+ sdlist.append('.')
+ elif self.field[self.pos] in self.atomends:
+ break
+ else:
+ sdlist.append(self.getatom())
+ return EMPTYSTRING.join(sdlist)
+
+ def getdelimited(self, beginchar, endchars, allowcomments=True):
+ """Parse a header fragment delimited by special characters.
+
+ `beginchar' is the start character for the fragment.
+ If self is not looking at an instance of `beginchar' then
+ getdelimited returns the empty string.
+
+ `endchars' is a sequence of allowable end-delimiting characters.
+ Parsing stops when one of these is encountered.
+
+ If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
+ within the parsed fragment.
+ """
+ if self.field[self.pos] != beginchar:
+ return ''
+
+ slist = ['']
+ quote = False
+ self.pos += 1
+ while self.pos < len(self.field):
+ if quote:
+ slist.append(self.field[self.pos])
+ quote = False
+ elif self.field[self.pos] in endchars:
+ self.pos += 1
+ break
+ elif allowcomments and self.field[self.pos] == '(':
+ slist.append(self.getcomment())
+ continue # have already advanced pos from getcomment
+ elif self.field[self.pos] == '\\':
+ quote = True
+ else:
+ slist.append(self.field[self.pos])
+ self.pos += 1
+
+ return EMPTYSTRING.join(slist)
+
+ def getquote(self):
+ """Get a quote-delimited fragment from self's field."""
+ return self.getdelimited('"', '"\r', False)
+
+ def getcomment(self):
+ """Get a parenthesis-delimited fragment from self's field."""
+ return self.getdelimited('(', ')\r', True)
+
+ def getdomainliteral(self):
+ """Parse an RFC 2822 domain-literal."""
+ return '[%s]' % self.getdelimited('[', ']\r', False)
+
+ def getatom(self, atomends=None):
+ """Parse an RFC 2822 atom.
+
+ Optional atomends specifies a different set of end token delimiters
+ (the default is to use self.atomends). This is used e.g. in
+ getphraselist() since phrase endings must not include the `.' (which
+ is legal in phrases)."""
+ atomlist = ['']
+ if atomends is None:
+ atomends = self.atomends
+
+ while self.pos < len(self.field):
+ if self.field[self.pos] in atomends:
+ break
+ else:
+ atomlist.append(self.field[self.pos])
+ self.pos += 1
+
+ return EMPTYSTRING.join(atomlist)
+
+ def getphraselist(self):
+ """Parse a sequence of RFC 2822 phrases.
+
+ A phrase is a sequence of words, which are in turn either RFC 2822
+ atoms or quoted-strings. Phrases are canonicalized by squeezing all
+ runs of continuous whitespace into one space.
+ """
+ plist = []
+
+ while self.pos < len(self.field):
+ if self.field[self.pos] in self.FWS:
+ self.pos += 1
+ elif self.field[self.pos] == '"':
+ plist.append(self.getquote())
+ elif self.field[self.pos] == '(':
+ self.commentlist.append(self.getcomment())
+ elif self.field[self.pos] in self.phraseends:
+ break
+ else:
+ plist.append(self.getatom(self.phraseends))
+
+ return plist
+
+class AddressList(AddrlistClass):
+ """An AddressList encapsulates a list of parsed RFC 2822 addresses."""
+ def __init__(self, field):
+ AddrlistClass.__init__(self, field)
+ if field:
+ self.addresslist = self.getaddrlist()
+ else:
+ self.addresslist = []
+
+ def __len__(self):
+ return len(self.addresslist)
+
+ def __add__(self, other):
+ # Set union
+ newaddr = AddressList(None)
+ newaddr.addresslist = self.addresslist[:]
+ for x in other.addresslist:
+ if not x in self.addresslist:
+ newaddr.addresslist.append(x)
+ return newaddr
+
+ def __iadd__(self, other):
+ # Set union, in-place
+ for x in other.addresslist:
+ if not x in self.addresslist:
+ self.addresslist.append(x)
+ return self
+
+ def __sub__(self, other):
+ # Set difference
+ newaddr = AddressList(None)
+ for x in self.addresslist:
+ if not x in other.addresslist:
+ newaddr.addresslist.append(x)
+ return newaddr
+
+ def __isub__(self, other):
+ # Set difference, in-place
+ for x in other.addresslist:
+ if x in self.addresslist:
+ self.addresslist.remove(x)
+ return self
+
+ def __getitem__(self, index):
+ # Make indexing, slices, and 'in' work
+ return self.addresslist[index]
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_policybase.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_policybase.py
new file mode 100644
index 00000000..c9cbadd2
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/_policybase.py
@@ -0,0 +1,374 @@
+"""Policy framework for the email package.
+
+Allows fine grained feature control of how the package parses and emits data.
+"""
+
+import abc
+from email import header
+from email import charset as _charset
+from email.utils import _has_surrogates
+
+__all__ = [
+ 'Policy',
+ 'Compat32',
+ 'compat32',
+ ]
+
+
+class _PolicyBase:
+
+ """Policy Object basic framework.
+
+ This class is useless unless subclassed. A subclass should define
+ class attributes with defaults for any values that are to be
+ managed by the Policy object. The constructor will then allow
+ non-default values to be set for these attributes at instance
+ creation time. The instance will be callable, taking these same
+ attributes keyword arguments, and returning a new instance
+ identical to the called instance except for those values changed
+ by the keyword arguments. Instances may be added, yielding new
+ instances with any non-default values from the right hand
+ operand overriding those in the left hand operand. That is,
+
+ A + B == A()
+
+ The repr of an instance can be used to reconstruct the object
+ if and only if the repr of the values can be used to reconstruct
+ those values.
+
+ """
+
+ def __init__(self, **kw):
+ """Create new Policy, possibly overriding some defaults.
+
+ See class docstring for a list of overridable attributes.
+
+ """
+ for name, value in kw.items():
+ if hasattr(self, name):
+ super(_PolicyBase,self).__setattr__(name, value)
+ else:
+ raise TypeError(
+ "{!r} is an invalid keyword argument for {}".format(
+ name, self.__class__.__name__))
+
+ def __repr__(self):
+ args = [ "{}={!r}".format(name, value)
+ for name, value in self.__dict__.items() ]
+ return "{}({})".format(self.__class__.__name__, ', '.join(args))
+
+ def clone(self, **kw):
+ """Return a new instance with specified attributes changed.
+
+ The new instance has the same attribute values as the current object,
+ except for the changes passed in as keyword arguments.
+
+ """
+ newpolicy = self.__class__.__new__(self.__class__)
+ for attr, value in self.__dict__.items():
+ object.__setattr__(newpolicy, attr, value)
+ for attr, value in kw.items():
+ if not hasattr(self, attr):
+ raise TypeError(
+ "{!r} is an invalid keyword argument for {}".format(
+ attr, self.__class__.__name__))
+ object.__setattr__(newpolicy, attr, value)
+ return newpolicy
+
+ def __setattr__(self, name, value):
+ if hasattr(self, name):
+ msg = "{!r} object attribute {!r} is read-only"
+ else:
+ msg = "{!r} object has no attribute {!r}"
+ raise AttributeError(msg.format(self.__class__.__name__, name))
+
+ def __add__(self, other):
+ """Non-default values from right operand override those from left.
+
+ The object returned is a new instance of the subclass.
+
+ """
+ return self.clone(**other.__dict__)
+
+
+def _append_doc(doc, added_doc):
+ doc = doc.rsplit('\n', 1)[0]
+ added_doc = added_doc.split('\n', 1)[1]
+ return doc + '\n' + added_doc
+
+def _extend_docstrings(cls):
+ if cls.__doc__ and cls.__doc__.startswith('+'):
+ cls.__doc__ = _append_doc(cls.__bases__[0].__doc__, cls.__doc__)
+ for name, attr in cls.__dict__.items():
+ if attr.__doc__ and attr.__doc__.startswith('+'):
+ for c in (c for base in cls.__bases__ for c in base.mro()):
+ doc = getattr(getattr(c, name), '__doc__')
+ if doc:
+ attr.__doc__ = _append_doc(doc, attr.__doc__)
+ break
+ return cls
+
+
+class Policy(_PolicyBase, metaclass=abc.ABCMeta):
+
+ r"""Controls for how messages are interpreted and formatted.
+
+ Most of the classes and many of the methods in the email package accept
+ Policy objects as parameters. A Policy object contains a set of values and
+ functions that control how input is interpreted and how output is rendered.
+ For example, the parameter 'raise_on_defect' controls whether or not an RFC
+ violation results in an error being raised or not, while 'max_line_length'
+ controls the maximum length of output lines when a Message is serialized.
+
+ Any valid attribute may be overridden when a Policy is created by passing
+ it as a keyword argument to the constructor. Policy objects are immutable,
+ but a new Policy object can be created with only certain values changed by
+ calling the Policy instance with keyword arguments. Policy objects can
+ also be added, producing a new Policy object in which the non-default
+ attributes set in the right hand operand overwrite those specified in the
+ left operand.
+
+ Settable attributes:
+
+ raise_on_defect -- If true, then defects should be raised as errors.
+ Default: False.
+
+ linesep -- string containing the value to use as separation
+ between output lines. Default '\n'.
+
+ cte_type -- Type of allowed content transfer encodings
+
+ 7bit -- ASCII only
+ 8bit -- Content-Transfer-Encoding: 8bit is allowed
+
+ Default: 8bit. Also controls the disposition of
+ (RFC invalid) binary data in headers; see the
+ documentation of the binary_fold method.
+
+ max_line_length -- maximum length of lines, excluding 'linesep',
+ during serialization. None or 0 means no line
+ wrapping is done. Default is 78.
+
+ mangle_from_ -- a flag that, when True escapes From_ lines in the
+ body of the message by putting a `>' in front of
+ them. This is used when the message is being
+ serialized by a generator. Default: True.
+
+ message_factory -- the class to use to create new message objects.
+ If the value is None, the default is Message.
+
+ """
+
+ raise_on_defect = False
+ linesep = '\n'
+ cte_type = '8bit'
+ max_line_length = 78
+ mangle_from_ = False
+ message_factory = None
+
+ def handle_defect(self, obj, defect):
+ """Based on policy, either raise defect or call register_defect.
+
+ handle_defect(obj, defect)
+
+ defect should be a Defect subclass, but in any case must be an
+ Exception subclass. obj is the object on which the defect should be
+ registered if it is not raised. If the raise_on_defect is True, the
+ defect is raised as an error, otherwise the object and the defect are
+ passed to register_defect.
+
+ This method is intended to be called by parsers that discover defects.
+ The email package parsers always call it with Defect instances.
+
+ """
+ if self.raise_on_defect:
+ raise defect
+ self.register_defect(obj, defect)
+
+ def register_defect(self, obj, defect):
+ """Record 'defect' on 'obj'.
+
+ Called by handle_defect if raise_on_defect is False. This method is
+ part of the Policy API so that Policy subclasses can implement custom
+ defect handling. The default implementation calls the append method of
+ the defects attribute of obj. The objects used by the email package by
+ default that get passed to this method will always have a defects
+ attribute with an append method.
+
+ """
+ obj.defects.append(defect)
+
+ def header_max_count(self, name):
+ """Return the maximum allowed number of headers named 'name'.
+
+ Called when a header is added to a Message object. If the returned
+ value is not 0 or None, and there are already a number of headers with
+ the name 'name' equal to the value returned, a ValueError is raised.
+
+ Because the default behavior of Message's __setitem__ is to append the
+ value to the list of headers, it is easy to create duplicate headers
+ without realizing it. This method allows certain headers to be limited
+ in the number of instances of that header that may be added to a
+ Message programmatically. (The limit is not observed by the parser,
+ which will faithfully produce as many headers as exist in the message
+ being parsed.)
+
+ The default implementation returns None for all header names.
+ """
+ return None
+
+ @abc.abstractmethod
+ def header_source_parse(self, sourcelines):
+ """Given a list of linesep terminated strings constituting the lines of
+ a single header, return the (name, value) tuple that should be stored
+ in the model. The input lines should retain their terminating linesep
+ characters. The lines passed in by the email package may contain
+ surrogateescaped binary data.
+ """
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def header_store_parse(self, name, value):
+ """Given the header name and the value provided by the application
+ program, return the (name, value) that should be stored in the model.
+ """
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def header_fetch_parse(self, name, value):
+ """Given the header name and the value from the model, return the value
+ to be returned to the application program that is requesting that
+ header. The value passed in by the email package may contain
+ surrogateescaped binary data if the lines were parsed by a BytesParser.
+ The returned value should not contain any surrogateescaped data.
+
+ """
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def fold(self, name, value):
+ """Given the header name and the value from the model, return a string
+ containing linesep characters that implement the folding of the header
+ according to the policy controls. The value passed in by the email
+ package may contain surrogateescaped binary data if the lines were
+ parsed by a BytesParser. The returned value should not contain any
+ surrogateescaped data.
+
+ """
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def fold_binary(self, name, value):
+ """Given the header name and the value from the model, return binary
+ data containing linesep characters that implement the folding of the
+ header according to the policy controls. The value passed in by the
+ email package may contain surrogateescaped binary data.
+
+ """
+ raise NotImplementedError
+
+
+@_extend_docstrings
+class Compat32(Policy):
+
+ """+
+ This particular policy is the backward compatibility Policy. It
+ replicates the behavior of the email package version 5.1.
+ """
+
+ mangle_from_ = True
+
+ def _sanitize_header(self, name, value):
+ # If the header value contains surrogates, return a Header using
+ # the unknown-8bit charset to encode the bytes as encoded words.
+ if not isinstance(value, str):
+ # Assume it is already a header object
+ return value
+ if _has_surrogates(value):
+ return header.Header(value, charset=_charset.UNKNOWN8BIT,
+ header_name=name)
+ else:
+ return value
+
+ def header_source_parse(self, sourcelines):
+ """+
+ The name is parsed as everything up to the ':' and returned unmodified.
+ The value is determined by stripping leading whitespace off the
+ remainder of the first line, joining all subsequent lines together, and
+ stripping any trailing carriage return or linefeed characters.
+
+ """
+ name, value = sourcelines[0].split(':', 1)
+ value = value.lstrip(' \t') + ''.join(sourcelines[1:])
+ return (name, value.rstrip('\r\n'))
+
+ def header_store_parse(self, name, value):
+ """+
+ The name and value are returned unmodified.
+ """
+ return (name, value)
+
+ def header_fetch_parse(self, name, value):
+ """+
+ If the value contains binary data, it is converted into a Header object
+ using the unknown-8bit charset. Otherwise it is returned unmodified.
+ """
+ return self._sanitize_header(name, value)
+
+ def fold(self, name, value):
+ """+
+ Headers are folded using the Header folding algorithm, which preserves
+ existing line breaks in the value, and wraps each resulting line to the
+ max_line_length. Non-ASCII binary data are CTE encoded using the
+ unknown-8bit charset.
+
+ """
+ return self._fold(name, value, sanitize=True)
+
+ def fold_binary(self, name, value):
+ """+
+ Headers are folded using the Header folding algorithm, which preserves
+ existing line breaks in the value, and wraps each resulting line to the
+ max_line_length. If cte_type is 7bit, non-ascii binary data is CTE
+ encoded using the unknown-8bit charset. Otherwise the original source
+ header is used, with its existing line breaks and/or binary data.
+
+ """
+ folded = self._fold(name, value, sanitize=self.cte_type=='7bit')
+ return folded.encode('ascii', 'surrogateescape')
+
+ def _fold(self, name, value, sanitize):
+ parts = []
+ parts.append('%s: ' % name)
+ if isinstance(value, str):
+ if _has_surrogates(value):
+ if sanitize:
+ h = header.Header(value,
+ charset=_charset.UNKNOWN8BIT,
+ header_name=name)
+ else:
+ # If we have raw 8bit data in a byte string, we have no idea
+ # what the encoding is. There is no safe way to split this
+ # string. If it's ascii-subset, then we could do a normal
+ # ascii split, but if it's multibyte then we could break the
+ # string. There's no way to know so the least harm seems to
+ # be to not split the string and risk it being too long.
+ parts.append(value)
+ h = None
+ else:
+ h = header.Header(value, header_name=name)
+ else:
+ # Assume it is a Header-like object.
+ h = value
+ if h is not None:
+ # The Header class interprets a value of None for maxlinelen as the
+ # default value of 78, as recommended by RFC 2822.
+ maxlinelen = 0
+ if self.max_line_length is not None:
+ maxlinelen = self.max_line_length
+ parts.append(h.encode(linesep=self.linesep, maxlinelen=maxlinelen))
+ parts.append(self.linesep)
+ return ''.join(parts)
+
+
+compat32 = Compat32()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/architecture.rst b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/architecture.rst
new file mode 100644
index 00000000..fcd10bde
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/architecture.rst
@@ -0,0 +1,216 @@
+:mod:`email` Package Architecture
+=================================
+
+Overview
+--------
+
+The email package consists of three major components:
+
+ Model
+ An object structure that represents an email message, and provides an
+ API for creating, querying, and modifying a message.
+
+ Parser
+ Takes a sequence of characters or bytes and produces a model of the
+ email message represented by those characters or bytes.
+
+ Generator
+ Takes a model and turns it into a sequence of characters or bytes. The
+ sequence can either be intended for human consumption (a printable
+ unicode string) or bytes suitable for transmission over the wire. In
+ the latter case all data is properly encoded using the content transfer
+ encodings specified by the relevant RFCs.
+
+Conceptually the package is organized around the model. The model provides both
+"external" APIs intended for use by application programs using the library,
+and "internal" APIs intended for use by the Parser and Generator components.
+This division is intentionally a bit fuzzy; the API described by this
+documentation is all a public, stable API. This allows for an application
+with special needs to implement its own parser and/or generator.
+
+In addition to the three major functional components, there is a third key
+component to the architecture:
+
+ Policy
+ An object that specifies various behavioral settings and carries
+ implementations of various behavior-controlling methods.
+
+The Policy framework provides a simple and convenient way to control the
+behavior of the library, making it possible for the library to be used in a
+very flexible fashion while leveraging the common code required to parse,
+represent, and generate message-like objects. For example, in addition to the
+default :rfc:`5322` email message policy, we also have a policy that manages
+HTTP headers in a fashion compliant with :rfc:`2616`. Individual policy
+controls, such as the maximum line length produced by the generator, can also
+be controlled individually to meet specialized application requirements.
+
+
+The Model
+---------
+
+The message model is implemented by the :class:`~email.message.Message` class.
+The model divides a message into the two fundamental parts discussed by the
+RFC: the header section and the body. The `Message` object acts as a
+pseudo-dictionary of named headers. Its dictionary interface provides
+convenient access to individual headers by name. However, all headers are kept
+internally in an ordered list, so that the information about the order of the
+headers in the original message is preserved.
+
+The `Message` object also has a `payload` that holds the body. A `payload` can
+be one of two things: data, or a list of `Message` objects. The latter is used
+to represent a multipart MIME message. Lists can be nested arbitrarily deeply
+in order to represent the message, with all terminal leaves having non-list
+data payloads.
+
+
+Message Lifecycle
+-----------------
+
+The general lifecycle of a message is:
+
+ Creation
+ A `Message` object can be created by a Parser, or it can be
+ instantiated as an empty message by an application.
+
+ Manipulation
+ The application may examine one or more headers, and/or the
+ payload, and it may modify one or more headers and/or
+ the payload. This may be done on the top level `Message`
+ object, or on any sub-object.
+
+ Finalization
+ The Model is converted into a unicode or binary stream,
+ or the model is discarded.
+
+
+
+Header Policy Control During Lifecycle
+--------------------------------------
+
+One of the major controls exerted by the Policy is the management of headers
+during the `Message` lifecycle. Most applications don't need to be aware of
+this.
+
+A header enters the model in one of two ways: via a Parser, or by being set to
+a specific value by an application program after the Model already exists.
+Similarly, a header exits the model in one of two ways: by being serialized by
+a Generator, or by being retrieved from a Model by an application program. The
+Policy object provides hooks for all four of these pathways.
+
+The model storage for headers is a list of (name, value) tuples.
+
+The Parser identifies headers during parsing, and passes them to the
+:meth:`~email.policy.Policy.header_source_parse` method of the Policy. The
+result of that method is the (name, value) tuple to be stored in the model.
+
+When an application program supplies a header value (for example, through the
+`Message` object `__setitem__` interface), the name and the value are passed to
+the :meth:`~email.policy.Policy.header_store_parse` method of the Policy, which
+returns the (name, value) tuple to be stored in the model.
+
+When an application program retrieves a header (through any of the dict or list
+interfaces of `Message`), the name and value are passed to the
+:meth:`~email.policy.Policy.header_fetch_parse` method of the Policy to
+obtain the value returned to the application.
+
+When a Generator requests a header during serialization, the name and value are
+passed to the :meth:`~email.policy.Policy.fold` method of the Policy, which
+returns a string containing line breaks in the appropriate places. The
+:meth:`~email.policy.Policy.cte_type` Policy control determines whether or
+not Content Transfer Encoding is performed on the data in the header. There is
+also a :meth:`~email.policy.Policy.binary_fold` method for use by generators
+that produce binary output, which returns the folded header as binary data,
+possibly folded at different places than the corresponding string would be.
+
+
+Handling Binary Data
+--------------------
+
+In an ideal world all message data would conform to the RFCs, meaning that the
+parser could decode the message into the idealized unicode message that the
+sender originally wrote. In the real world, the email package must also be
+able to deal with badly formatted messages, including messages containing
+non-ASCII characters that either have no indicated character set or are not
+valid characters in the indicated character set.
+
+Since email messages are *primarily* text data, and operations on message data
+are primarily text operations (except for binary payloads of course), the model
+stores all text data as unicode strings. Un-decodable binary inside text
+data is handled by using the `surrogateescape` error handler of the ASCII
+codec. As with the binary filenames the error handler was introduced to
+handle, this allows the email package to "carry" the binary data received
+during parsing along until the output stage, at which time it is regenerated
+in its original form.
+
+This carried binary data is almost entirely an implementation detail. The one
+place where it is visible in the API is in the "internal" API. A Parser must
+do the `surrogateescape` encoding of binary input data, and pass that data to
+the appropriate Policy method. The "internal" interface used by the Generator
+to access header values preserves the `surrogateescaped` bytes. All other
+interfaces convert the binary data either back into bytes or into a safe form
+(losing information in some cases).
+
+
+Backward Compatibility
+----------------------
+
+The :class:`~email.policy.Policy.Compat32` Policy provides backward
+compatibility with version 5.1 of the email package. It does this via the
+following implementation of the four+1 Policy methods described above:
+
+header_source_parse
+ Splits the first line on the colon to obtain the name, discards any spaces
+ after the colon, and joins the remainder of the line with all of the
+ remaining lines, preserving the linesep characters to obtain the value.
+ Trailing carriage return and/or linefeed characters are stripped from the
+ resulting value string.
+
+header_store_parse
+ Returns the name and value exactly as received from the application.
+
+header_fetch_parse
+ If the value contains any `surrogateescaped` binary data, return the value
+ as a :class:`~email.header.Header` object, using the character set
+ `unknown-8bit`. Otherwise just returns the value.
+
+fold
+ Uses :class:`~email.header.Header`'s folding to fold headers in the
+ same way the email5.1 generator did.
+
+binary_fold
+ Same as fold, but encodes to 'ascii'.
+
+
+New Algorithm
+-------------
+
+header_source_parse
+ Same as legacy behavior.
+
+header_store_parse
+ Same as legacy behavior.
+
+header_fetch_parse
+ If the value is already a header object, returns it. Otherwise, parses the
+ value using the new parser, and returns the resulting object as the value.
+ `surrogateescaped` bytes get turned into unicode unknown character code
+ points.
+
+fold
+ Uses the new header folding algorithm, respecting the policy settings.
+ surrogateescaped bytes are encoded using the ``unknown-8bit`` charset for
+ ``cte_type=7bit`` or ``8bit``. Returns a string.
+
+ At some point there will also be a ``cte_type=unicode``, and for that
+ policy fold will serialize the idealized unicode message with RFC-like
+ folding, converting any surrogateescaped bytes into the unicode
+ unknown character glyph.
+
+binary_fold
+ Uses the new header folding algorithm, respecting the policy settings.
+ surrogateescaped bytes are encoded using the `unknown-8bit` charset for
+ ``cte_type=7bit``, and get turned back into bytes for ``cte_type=8bit``.
+ Returns bytes.
+
+ At some point there will also be a ``cte_type=unicode``, and for that
+ policy binary_fold will serialize the message according to :rfc:``5335``.
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/base64mime.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/base64mime.py
new file mode 100644
index 00000000..17f0818f
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/base64mime.py
@@ -0,0 +1,119 @@
+# Copyright (C) 2002-2007 Python Software Foundation
+# Author: Ben Gertzfield
+# Contact: email-sig@python.org
+
+"""Base64 content transfer encoding per RFCs 2045-2047.
+
+This module handles the content transfer encoding method defined in RFC 2045
+to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
+characters encoding known as Base64.
+
+It is used in the MIME standards for email to attach images, audio, and text
+using some 8-bit character sets to messages.
+
+This module provides an interface to encode and decode both headers and bodies
+with Base64 encoding.
+
+RFC 2045 defines a method for including character set information in an
+`encoded-word' in a header. This method is commonly used for 8-bit real names
+in To:, From:, Cc:, etc. fields, as well as Subject: lines.
+
+This module does not do the line wrapping or end-of-line character conversion
+necessary for proper internationalized headers; it only does dumb encoding and
+decoding. To deal with the various line wrapping issues, use the email.header
+module.
+"""
+
+__all__ = [
+ 'body_decode',
+ 'body_encode',
+ 'decode',
+ 'decodestring',
+ 'header_encode',
+ 'header_length',
+ ]
+
+
+from base64 import b64encode
+from binascii import b2a_base64, a2b_base64
+
+CRLF = '\r\n'
+NL = '\n'
+EMPTYSTRING = ''
+
+# See also Charset.py
+MISC_LEN = 7
+
+
+
+# Helpers
+def header_length(bytearray):
+ """Return the length of s when it is encoded with base64."""
+ groups_of_3, leftover = divmod(len(bytearray), 3)
+ # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
+ n = groups_of_3 * 4
+ if leftover:
+ n += 4
+ return n
+
+
+
+def header_encode(header_bytes, charset='iso-8859-1'):
+ """Encode a single header line with Base64 encoding in a given charset.
+
+ charset names the character set to use to encode the header. It defaults
+ to iso-8859-1. Base64 encoding is defined in RFC 2045.
+ """
+ if not header_bytes:
+ return ""
+ if isinstance(header_bytes, str):
+ header_bytes = header_bytes.encode(charset)
+ encoded = b64encode(header_bytes).decode("ascii")
+ return '=?%s?b?%s?=' % (charset, encoded)
+
+
+
+def body_encode(s, maxlinelen=76, eol=NL):
+ r"""Encode a string with base64.
+
+ Each line will be wrapped at, at most, maxlinelen characters (defaults to
+ 76 characters).
+
+ Each line of encoded text will end with eol, which defaults to "\n". Set
+ this to "\r\n" if you will be using the result of this function directly
+ in an email.
+ """
+ if not s:
+ return s
+
+ encvec = []
+ max_unencoded = maxlinelen * 3 // 4
+ for i in range(0, len(s), max_unencoded):
+ # BAW: should encode() inherit b2a_base64()'s dubious behavior in
+ # adding a newline to the encoded string?
+ enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii")
+ if enc.endswith(NL) and eol != NL:
+ enc = enc[:-1] + eol
+ encvec.append(enc)
+ return EMPTYSTRING.join(encvec)
+
+
+
+def decode(string):
+ """Decode a raw base64 string, returning a bytes object.
+
+ This function does not parse a full MIME header value encoded with
+ base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high
+ level email.header class for that functionality.
+ """
+ if not string:
+ return bytes()
+ elif isinstance(string, str):
+ return a2b_base64(string.encode('raw-unicode-escape'))
+ else:
+ return a2b_base64(string)
+
+
+# For convenience and backwards compatibility w/ standard base64 module
+body_decode = decode
+decodestring = decode
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/charset.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/charset.py
new file mode 100644
index 00000000..ee564040
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/charset.py
@@ -0,0 +1,406 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Ben Gertzfield, Barry Warsaw
+# Contact: email-sig@python.org
+
+__all__ = [
+ 'Charset',
+ 'add_alias',
+ 'add_charset',
+ 'add_codec',
+ ]
+
+from functools import partial
+
+import email.base64mime
+import email.quoprimime
+
+from email import errors
+from email.encoders import encode_7or8bit
+
+
+
+# Flags for types of header encodings
+QP = 1 # Quoted-Printable
+BASE64 = 2 # Base64
+SHORTEST = 3 # the shorter of QP and base64, but only for headers
+
+# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
+RFC2047_CHROME_LEN = 7
+
+DEFAULT_CHARSET = 'us-ascii'
+UNKNOWN8BIT = 'unknown-8bit'
+EMPTYSTRING = ''
+
+
+
+# Defaults
+CHARSETS = {
+ # input header enc body enc output conv
+ 'iso-8859-1': (QP, QP, None),
+ 'iso-8859-2': (QP, QP, None),
+ 'iso-8859-3': (QP, QP, None),
+ 'iso-8859-4': (QP, QP, None),
+ # iso-8859-5 is Cyrillic, and not especially used
+ # iso-8859-6 is Arabic, also not particularly used
+ # iso-8859-7 is Greek, QP will not make it readable
+ # iso-8859-8 is Hebrew, QP will not make it readable
+ 'iso-8859-9': (QP, QP, None),
+ 'iso-8859-10': (QP, QP, None),
+ # iso-8859-11 is Thai, QP will not make it readable
+ 'iso-8859-13': (QP, QP, None),
+ 'iso-8859-14': (QP, QP, None),
+ 'iso-8859-15': (QP, QP, None),
+ 'iso-8859-16': (QP, QP, None),
+ 'windows-1252':(QP, QP, None),
+ 'viscii': (QP, QP, None),
+ 'us-ascii': (None, None, None),
+ 'big5': (BASE64, BASE64, None),
+ 'gb2312': (BASE64, BASE64, None),
+ 'euc-jp': (BASE64, None, 'iso-2022-jp'),
+ 'shift_jis': (BASE64, None, 'iso-2022-jp'),
+ 'iso-2022-jp': (BASE64, None, None),
+ 'koi8-r': (BASE64, BASE64, None),
+ 'utf-8': (SHORTEST, BASE64, 'utf-8'),
+ }
+
+# Aliases for other commonly-used names for character sets. Map
+# them to the real ones used in email.
+ALIASES = {
+ 'latin_1': 'iso-8859-1',
+ 'latin-1': 'iso-8859-1',
+ 'latin_2': 'iso-8859-2',
+ 'latin-2': 'iso-8859-2',
+ 'latin_3': 'iso-8859-3',
+ 'latin-3': 'iso-8859-3',
+ 'latin_4': 'iso-8859-4',
+ 'latin-4': 'iso-8859-4',
+ 'latin_5': 'iso-8859-9',
+ 'latin-5': 'iso-8859-9',
+ 'latin_6': 'iso-8859-10',
+ 'latin-6': 'iso-8859-10',
+ 'latin_7': 'iso-8859-13',
+ 'latin-7': 'iso-8859-13',
+ 'latin_8': 'iso-8859-14',
+ 'latin-8': 'iso-8859-14',
+ 'latin_9': 'iso-8859-15',
+ 'latin-9': 'iso-8859-15',
+ 'latin_10':'iso-8859-16',
+ 'latin-10':'iso-8859-16',
+ 'cp949': 'ks_c_5601-1987',
+ 'euc_jp': 'euc-jp',
+ 'euc_kr': 'euc-kr',
+ 'ascii': 'us-ascii',
+ }
+
+
+# Map charsets to their Unicode codec strings.
+CODEC_MAP = {
+ 'gb2312': 'eucgb2312_cn',
+ 'big5': 'big5_tw',
+ # Hack: We don't want *any* conversion for stuff marked us-ascii, as all
+ # sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
+ # Let that stuff pass through without conversion to/from Unicode.
+ 'us-ascii': None,
+ }
+
+
+
+# Convenience functions for extending the above mappings
+def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
+ """Add character set properties to the global registry.
+
+ charset is the input character set, and must be the canonical name of a
+ character set.
+
+ Optional header_enc and body_enc is either Charset.QP for
+ quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
+ the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
+ is only valid for header_enc. It describes how message headers and
+ message bodies in the input charset are to be encoded. Default is no
+ encoding.
+
+ Optional output_charset is the character set that the output should be
+ in. Conversions will proceed from input charset, to Unicode, to the
+ output charset when the method Charset.convert() is called. The default
+ is to output in the same character set as the input.
+
+ Both input_charset and output_charset must have Unicode codec entries in
+ the module's charset-to-codec mapping; use add_codec(charset, codecname)
+ to add codecs the module does not know about. See the codecs module's
+ documentation for more information.
+ """
+ if body_enc == SHORTEST:
+ raise ValueError('SHORTEST not allowed for body_enc')
+ CHARSETS[charset] = (header_enc, body_enc, output_charset)
+
+
+def add_alias(alias, canonical):
+ """Add a character set alias.
+
+ alias is the alias name, e.g. latin-1
+ canonical is the character set's canonical name, e.g. iso-8859-1
+ """
+ ALIASES[alias] = canonical
+
+
+def add_codec(charset, codecname):
+ """Add a codec that map characters in the given charset to/from Unicode.
+
+ charset is the canonical name of a character set. codecname is the name
+ of a Python codec, as appropriate for the second argument to the unicode()
+ built-in, or to the encode() method of a Unicode string.
+ """
+ CODEC_MAP[charset] = codecname
+
+
+
+# Convenience function for encoding strings, taking into account
+# that they might be unknown-8bit (ie: have surrogate-escaped bytes)
+def _encode(string, codec):
+ if codec == UNKNOWN8BIT:
+ return string.encode('ascii', 'surrogateescape')
+ else:
+ return string.encode(codec)
+
+
+
+class Charset:
+ """Map character sets to their email properties.
+
+ This class provides information about the requirements imposed on email
+ for a specific character set. It also provides convenience routines for
+ converting between character sets, given the availability of the
+ applicable codecs. Given a character set, it will do its best to provide
+ information on how to use that character set in an email in an
+ RFC-compliant way.
+
+ Certain character sets must be encoded with quoted-printable or base64
+ when used in email headers or bodies. Certain character sets must be
+ converted outright, and are not allowed in email. Instances of this
+ module expose the following information about a character set:
+
+ input_charset: The initial character set specified. Common aliases
+ are converted to their `official' email names (e.g. latin_1
+ is converted to iso-8859-1). Defaults to 7-bit us-ascii.
+
+ header_encoding: If the character set must be encoded before it can be
+ used in an email header, this attribute will be set to
+ Charset.QP (for quoted-printable), Charset.BASE64 (for
+ base64 encoding), or Charset.SHORTEST for the shortest of
+ QP or BASE64 encoding. Otherwise, it will be None.
+
+ body_encoding: Same as header_encoding, but describes the encoding for the
+ mail message's body, which indeed may be different than the
+ header encoding. Charset.SHORTEST is not allowed for
+ body_encoding.
+
+ output_charset: Some character sets must be converted before they can be
+ used in email headers or bodies. If the input_charset is
+ one of them, this attribute will contain the name of the
+ charset output will be converted to. Otherwise, it will
+ be None.
+
+ input_codec: The name of the Python codec used to convert the
+ input_charset to Unicode. If no conversion codec is
+ necessary, this attribute will be None.
+
+ output_codec: The name of the Python codec used to convert Unicode
+ to the output_charset. If no conversion codec is necessary,
+ this attribute will have the same value as the input_codec.
+ """
+ def __init__(self, input_charset=DEFAULT_CHARSET):
+ # RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
+ # unicode because its .lower() is locale insensitive. If the argument
+ # is already a unicode, we leave it at that, but ensure that the
+ # charset is ASCII, as the standard (RFC XXX) requires.
+ try:
+ if isinstance(input_charset, str):
+ input_charset.encode('ascii')
+ else:
+ input_charset = str(input_charset, 'ascii')
+ except UnicodeError:
+ raise errors.CharsetError(input_charset)
+ input_charset = input_charset.lower()
+ # Set the input charset after filtering through the aliases
+ self.input_charset = ALIASES.get(input_charset, input_charset)
+ # We can try to guess which encoding and conversion to use by the
+ # charset_map dictionary. Try that first, but let the user override
+ # it.
+ henc, benc, conv = CHARSETS.get(self.input_charset,
+ (SHORTEST, BASE64, None))
+ if not conv:
+ conv = self.input_charset
+ # Set the attributes, allowing the arguments to override the default.
+ self.header_encoding = henc
+ self.body_encoding = benc
+ self.output_charset = ALIASES.get(conv, conv)
+ # Now set the codecs. If one isn't defined for input_charset,
+ # guess and try a Unicode codec with the same name as input_codec.
+ self.input_codec = CODEC_MAP.get(self.input_charset,
+ self.input_charset)
+ self.output_codec = CODEC_MAP.get(self.output_charset,
+ self.output_charset)
+
+ def __str__(self):
+ return self.input_charset.lower()
+
+ __repr__ = __str__
+
+ def __eq__(self, other):
+ return str(self) == str(other).lower()
+
+ def get_body_encoding(self):
+ """Return the content-transfer-encoding used for body encoding.
+
+ This is either the string `quoted-printable' or `base64' depending on
+ the encoding used, or it is a function in which case you should call
+ the function with a single argument, the Message object being
+ encoded. The function should then set the Content-Transfer-Encoding
+ header itself to whatever is appropriate.
+
+ Returns "quoted-printable" if self.body_encoding is QP.
+ Returns "base64" if self.body_encoding is BASE64.
+ Returns conversion function otherwise.
+ """
+ assert self.body_encoding != SHORTEST
+ if self.body_encoding == QP:
+ return 'quoted-printable'
+ elif self.body_encoding == BASE64:
+ return 'base64'
+ else:
+ return encode_7or8bit
+
+ def get_output_charset(self):
+ """Return the output character set.
+
+ This is self.output_charset if that is not None, otherwise it is
+ self.input_charset.
+ """
+ return self.output_charset or self.input_charset
+
+ def header_encode(self, string):
+ """Header-encode a string by converting it first to bytes.
+
+ The type of encoding (base64 or quoted-printable) will be based on
+ this charset's `header_encoding`.
+
+ :param string: A unicode string for the header. It must be possible
+ to encode this string to bytes using the character set's
+ output codec.
+ :return: The encoded string, with RFC 2047 chrome.
+ """
+ codec = self.output_codec or 'us-ascii'
+ header_bytes = _encode(string, codec)
+ # 7bit/8bit encodings return the string unchanged (modulo conversions)
+ encoder_module = self._get_encoder(header_bytes)
+ if encoder_module is None:
+ return string
+ return encoder_module.header_encode(header_bytes, codec)
+
+ def header_encode_lines(self, string, maxlengths):
+ """Header-encode a string by converting it first to bytes.
+
+ This is similar to `header_encode()` except that the string is fit
+ into maximum line lengths as given by the argument.
+
+ :param string: A unicode string for the header. It must be possible
+ to encode this string to bytes using the character set's
+ output codec.
+ :param maxlengths: Maximum line length iterator. Each element
+ returned from this iterator will provide the next maximum line
+ length. This parameter is used as an argument to built-in next()
+ and should never be exhausted. The maximum line lengths should
+ not count the RFC 2047 chrome. These line lengths are only a
+ hint; the splitter does the best it can.
+ :return: Lines of encoded strings, each with RFC 2047 chrome.
+ """
+ # See which encoding we should use.
+ codec = self.output_codec or 'us-ascii'
+ header_bytes = _encode(string, codec)
+ encoder_module = self._get_encoder(header_bytes)
+ encoder = partial(encoder_module.header_encode, charset=codec)
+ # Calculate the number of characters that the RFC 2047 chrome will
+ # contribute to each line.
+ charset = self.get_output_charset()
+ extra = len(charset) + RFC2047_CHROME_LEN
+ # Now comes the hard part. We must encode bytes but we can't split on
+ # bytes because some character sets are variable length and each
+ # encoded word must stand on its own. So the problem is you have to
+ # encode to bytes to figure out this word's length, but you must split
+ # on characters. This causes two problems: first, we don't know how
+ # many octets a specific substring of unicode characters will get
+ # encoded to, and second, we don't know how many ASCII characters
+ # those octets will get encoded to. Unless we try it. Which seems
+ # inefficient. In the interest of being correct rather than fast (and
+ # in the hope that there will be few encoded headers in any such
+ # message), brute force it. :(
+ lines = []
+ current_line = []
+ maxlen = next(maxlengths) - extra
+ for character in string:
+ current_line.append(character)
+ this_line = EMPTYSTRING.join(current_line)
+ length = encoder_module.header_length(_encode(this_line, charset))
+ if length > maxlen:
+ # This last character doesn't fit so pop it off.
+ current_line.pop()
+ # Does nothing fit on the first line?
+ if not lines and not current_line:
+ lines.append(None)
+ else:
+ separator = (' ' if lines else '')
+ joined_line = EMPTYSTRING.join(current_line)
+ header_bytes = _encode(joined_line, codec)
+ lines.append(encoder(header_bytes))
+ current_line = [character]
+ maxlen = next(maxlengths) - extra
+ joined_line = EMPTYSTRING.join(current_line)
+ header_bytes = _encode(joined_line, codec)
+ lines.append(encoder(header_bytes))
+ return lines
+
+ def _get_encoder(self, header_bytes):
+ if self.header_encoding == BASE64:
+ return email.base64mime
+ elif self.header_encoding == QP:
+ return email.quoprimime
+ elif self.header_encoding == SHORTEST:
+ len64 = email.base64mime.header_length(header_bytes)
+ lenqp = email.quoprimime.header_length(header_bytes)
+ if len64 < lenqp:
+ return email.base64mime
+ else:
+ return email.quoprimime
+ else:
+ return None
+
+ def body_encode(self, string):
+ """Body-encode a string by converting it first to bytes.
+
+ The type of encoding (base64 or quoted-printable) will be based on
+ self.body_encoding. If body_encoding is None, we assume the
+ output charset is a 7bit encoding, so re-encoding the decoded
+ string using the ascii codec produces the correct string version
+ of the content.
+ """
+ if not string:
+ return string
+ if self.body_encoding is BASE64:
+ if isinstance(string, str):
+ string = string.encode(self.output_charset)
+ return email.base64mime.body_encode(string)
+ elif self.body_encoding is QP:
+ # quopromime.body_encode takes a string, but operates on it as if
+ # it were a list of byte codes. For a (minimal) history on why
+ # this is so, see changeset 0cf700464177. To correctly encode a
+ # character set, then, we must turn it into pseudo bytes via the
+ # latin1 charset, which will encode any byte as a single code point
+ # between 0 and 255, which is what body_encode is expecting.
+ if isinstance(string, str):
+ string = string.encode(self.output_charset)
+ string = string.decode('latin1')
+ return email.quoprimime.body_encode(string)
+ else:
+ if isinstance(string, str):
+ string = string.encode(self.output_charset).decode('ascii')
+ return string
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/contentmanager.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/contentmanager.py
new file mode 100644
index 00000000..b904ded9
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/contentmanager.py
@@ -0,0 +1,250 @@
+import binascii
+import email.charset
+import email.message
+import email.errors
+from email import quoprimime
+
+class ContentManager:
+
+ def __init__(self):
+ self.get_handlers = {}
+ self.set_handlers = {}
+
+ def add_get_handler(self, key, handler):
+ self.get_handlers[key] = handler
+
+ def get_content(self, msg, *args, **kw):
+ content_type = msg.get_content_type()
+ if content_type in self.get_handlers:
+ return self.get_handlers[content_type](msg, *args, **kw)
+ maintype = msg.get_content_maintype()
+ if maintype in self.get_handlers:
+ return self.get_handlers[maintype](msg, *args, **kw)
+ if '' in self.get_handlers:
+ return self.get_handlers[''](msg, *args, **kw)
+ raise KeyError(content_type)
+
+ def add_set_handler(self, typekey, handler):
+ self.set_handlers[typekey] = handler
+
+ def set_content(self, msg, obj, *args, **kw):
+ if msg.get_content_maintype() == 'multipart':
+ # XXX: is this error a good idea or not? We can remove it later,
+ # but we can't add it later, so do it for now.
+ raise TypeError("set_content not valid on multipart")
+ handler = self._find_set_handler(msg, obj)
+ msg.clear_content()
+ handler(msg, obj, *args, **kw)
+
+ def _find_set_handler(self, msg, obj):
+ full_path_for_error = None
+ for typ in type(obj).__mro__:
+ if typ in self.set_handlers:
+ return self.set_handlers[typ]
+ qname = typ.__qualname__
+ modname = getattr(typ, '__module__', '')
+ full_path = '.'.join((modname, qname)) if modname else qname
+ if full_path_for_error is None:
+ full_path_for_error = full_path
+ if full_path in self.set_handlers:
+ return self.set_handlers[full_path]
+ if qname in self.set_handlers:
+ return self.set_handlers[qname]
+ name = typ.__name__
+ if name in self.set_handlers:
+ return self.set_handlers[name]
+ if None in self.set_handlers:
+ return self.set_handlers[None]
+ raise KeyError(full_path_for_error)
+
+
+raw_data_manager = ContentManager()
+
+
+def get_text_content(msg, errors='replace'):
+ content = msg.get_payload(decode=True)
+ charset = msg.get_param('charset', 'ASCII')
+ return content.decode(charset, errors=errors)
+raw_data_manager.add_get_handler('text', get_text_content)
+
+
+def get_non_text_content(msg):
+ return msg.get_payload(decode=True)
+for maintype in 'audio image video application'.split():
+ raw_data_manager.add_get_handler(maintype, get_non_text_content)
+
+
+def get_message_content(msg):
+ return msg.get_payload(0)
+for subtype in 'rfc822 external-body'.split():
+ raw_data_manager.add_get_handler('message/'+subtype, get_message_content)
+
+
+def get_and_fixup_unknown_message_content(msg):
+ # If we don't understand a message subtype, we are supposed to treat it as
+ # if it were application/octet-stream, per
+ # tools.ietf.org/html/rfc2046#section-5.2.4. Feedparser doesn't do that,
+ # so do our best to fix things up. Note that it is *not* appropriate to
+ # model message/partial content as Message objects, so they are handled
+ # here as well. (How to reassemble them is out of scope for this comment :)
+ return bytes(msg.get_payload(0))
+raw_data_manager.add_get_handler('message',
+ get_and_fixup_unknown_message_content)
+
+
+def _prepare_set(msg, maintype, subtype, headers):
+ msg['Content-Type'] = '/'.join((maintype, subtype))
+ if headers:
+ if not hasattr(headers[0], 'name'):
+ mp = msg.policy
+ headers = [mp.header_factory(*mp.header_source_parse([header]))
+ for header in headers]
+ try:
+ for header in headers:
+ if header.defects:
+ raise header.defects[0]
+ msg[header.name] = header
+ except email.errors.HeaderDefect as exc:
+ raise ValueError("Invalid header: {}".format(
+ header.fold(policy=msg.policy))) from exc
+
+
+def _finalize_set(msg, disposition, filename, cid, params):
+ if disposition is None and filename is not None:
+ disposition = 'attachment'
+ if disposition is not None:
+ msg['Content-Disposition'] = disposition
+ if filename is not None:
+ msg.set_param('filename',
+ filename,
+ header='Content-Disposition',
+ replace=True)
+ if cid is not None:
+ msg['Content-ID'] = cid
+ if params is not None:
+ for key, value in params.items():
+ msg.set_param(key, value)
+
+
+# XXX: This is a cleaned-up version of base64mime.body_encode (including a bug
+# fix in the calculation of unencoded_bytes_per_line). It would be nice to
+# drop both this and quoprimime.body_encode in favor of enhanced binascii
+# routines that accepted a max_line_length parameter.
+def _encode_base64(data, max_line_length):
+ encoded_lines = []
+ unencoded_bytes_per_line = max_line_length // 4 * 3
+ for i in range(0, len(data), unencoded_bytes_per_line):
+ thisline = data[i:i+unencoded_bytes_per_line]
+ encoded_lines.append(binascii.b2a_base64(thisline).decode('ascii'))
+ return ''.join(encoded_lines)
+
+
+def _encode_text(string, charset, cte, policy):
+ lines = string.encode(charset).splitlines()
+ linesep = policy.linesep.encode('ascii')
+ def embedded_body(lines): return linesep.join(lines) + linesep
+ def normal_body(lines): return b'\n'.join(lines) + b'\n'
+ if cte==None:
+ # Use heuristics to decide on the "best" encoding.
+ try:
+ return '7bit', normal_body(lines).decode('ascii')
+ except UnicodeDecodeError:
+ pass
+ if (policy.cte_type == '8bit' and
+ max(len(x) for x in lines) <= policy.max_line_length):
+ return '8bit', normal_body(lines).decode('ascii', 'surrogateescape')
+ sniff = embedded_body(lines[:10])
+ sniff_qp = quoprimime.body_encode(sniff.decode('latin-1'),
+ policy.max_line_length)
+ sniff_base64 = binascii.b2a_base64(sniff)
+ # This is a little unfair to qp; it includes lineseps, base64 doesn't.
+ if len(sniff_qp) > len(sniff_base64):
+ cte = 'base64'
+ else:
+ cte = 'quoted-printable'
+ if len(lines) <= 10:
+ return cte, sniff_qp
+ if cte == '7bit':
+ data = normal_body(lines).decode('ascii')
+ elif cte == '8bit':
+ data = normal_body(lines).decode('ascii', 'surrogateescape')
+ elif cte == 'quoted-printable':
+ data = quoprimime.body_encode(normal_body(lines).decode('latin-1'),
+ policy.max_line_length)
+ elif cte == 'base64':
+ data = _encode_base64(embedded_body(lines), policy.max_line_length)
+ else:
+ raise ValueError("Unknown content transfer encoding {}".format(cte))
+ return cte, data
+
+
+def set_text_content(msg, string, subtype="plain", charset='utf-8', cte=None,
+ disposition=None, filename=None, cid=None,
+ params=None, headers=None):
+ _prepare_set(msg, 'text', subtype, headers)
+ cte, payload = _encode_text(string, charset, cte, msg.policy)
+ msg.set_payload(payload)
+ msg.set_param('charset',
+ email.charset.ALIASES.get(charset, charset),
+ replace=True)
+ msg['Content-Transfer-Encoding'] = cte
+ _finalize_set(msg, disposition, filename, cid, params)
+raw_data_manager.add_set_handler(str, set_text_content)
+
+
+def set_message_content(msg, message, subtype="rfc822", cte=None,
+ disposition=None, filename=None, cid=None,
+ params=None, headers=None):
+ if subtype == 'partial':
+ raise ValueError("message/partial is not supported for Message objects")
+ if subtype == 'rfc822':
+ if cte not in (None, '7bit', '8bit', 'binary'):
+ # http://tools.ietf.org/html/rfc2046#section-5.2.1 mandate.
+ raise ValueError(
+ "message/rfc822 parts do not support cte={}".format(cte))
+ # 8bit will get coerced on serialization if policy.cte_type='7bit'. We
+ # may end up claiming 8bit when it isn't needed, but the only negative
+ # result of that should be a gateway that needs to coerce to 7bit
+ # having to look through the whole embedded message to discover whether
+ # or not it actually has to do anything.
+ cte = '8bit' if cte is None else cte
+ elif subtype == 'external-body':
+ if cte not in (None, '7bit'):
+ # http://tools.ietf.org/html/rfc2046#section-5.2.3 mandate.
+ raise ValueError(
+ "message/external-body parts do not support cte={}".format(cte))
+ cte = '7bit'
+ elif cte is None:
+ # http://tools.ietf.org/html/rfc2046#section-5.2.4 says all future
+ # subtypes should be restricted to 7bit, so assume that.
+ cte = '7bit'
+ _prepare_set(msg, 'message', subtype, headers)
+ msg.set_payload([message])
+ msg['Content-Transfer-Encoding'] = cte
+ _finalize_set(msg, disposition, filename, cid, params)
+raw_data_manager.add_set_handler(email.message.Message, set_message_content)
+
+
+def set_bytes_content(msg, data, maintype, subtype, cte='base64',
+ disposition=None, filename=None, cid=None,
+ params=None, headers=None):
+ _prepare_set(msg, maintype, subtype, headers)
+ if cte == 'base64':
+ data = _encode_base64(data, max_line_length=msg.policy.max_line_length)
+ elif cte == 'quoted-printable':
+ # XXX: quoprimime.body_encode won't encode newline characters in data,
+ # so we can't use it. This means max_line_length is ignored. Another
+ # bug to fix later. (Note: encoders.quopri is broken on line ends.)
+ data = binascii.b2a_qp(data, istext=False, header=False, quotetabs=True)
+ data = data.decode('ascii')
+ elif cte == '7bit':
+ # Make sure it really is only ASCII. The early warning here seems
+ # worth the overhead...if you care write your own content manager :).
+ data.encode('ascii')
+ elif cte in ('8bit', 'binary'):
+ data = data.decode('ascii', 'surrogateescape')
+ msg.set_payload(data)
+ msg['Content-Transfer-Encoding'] = cte
+ _finalize_set(msg, disposition, filename, cid, params)
+for typ in (bytes, bytearray, memoryview):
+ raw_data_manager.add_set_handler(typ, set_bytes_content)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/encoders.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/encoders.py
new file mode 100644
index 00000000..0a66acb6
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/encoders.py
@@ -0,0 +1,69 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Encodings and related functions."""
+
+__all__ = [
+ 'encode_7or8bit',
+ 'encode_base64',
+ 'encode_noop',
+ 'encode_quopri',
+ ]
+
+
+from base64 import encodebytes as _bencode
+from quopri import encodestring as _encodestring
+
+
+
+def _qencode(s):
+ enc = _encodestring(s, quotetabs=True)
+ # Must encode spaces, which quopri.encodestring() doesn't do
+ return enc.replace(b' ', b'=20')
+
+
+def encode_base64(msg):
+ """Encode the message's payload in Base64.
+
+ Also, add an appropriate Content-Transfer-Encoding header.
+ """
+ orig = msg.get_payload(decode=True)
+ encdata = str(_bencode(orig), 'ascii')
+ msg.set_payload(encdata)
+ msg['Content-Transfer-Encoding'] = 'base64'
+
+
+
+def encode_quopri(msg):
+ """Encode the message's payload in quoted-printable.
+
+ Also, add an appropriate Content-Transfer-Encoding header.
+ """
+ orig = msg.get_payload(decode=True)
+ encdata = _qencode(orig)
+ msg.set_payload(encdata)
+ msg['Content-Transfer-Encoding'] = 'quoted-printable'
+
+
+
+def encode_7or8bit(msg):
+ """Set the Content-Transfer-Encoding header to 7bit or 8bit."""
+ orig = msg.get_payload(decode=True)
+ if orig is None:
+ # There's no payload. For backwards compatibility we use 7bit
+ msg['Content-Transfer-Encoding'] = '7bit'
+ return
+ # We play a trick to make this go fast. If decoding from ASCII succeeds,
+ # we know the data must be 7bit, otherwise treat it as 8bit.
+ try:
+ orig.decode('ascii')
+ except UnicodeError:
+ msg['Content-Transfer-Encoding'] = '8bit'
+ else:
+ msg['Content-Transfer-Encoding'] = '7bit'
+
+
+
+def encode_noop(msg):
+ """Do nothing."""
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/errors.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/errors.py
new file mode 100644
index 00000000..d28a6800
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/errors.py
@@ -0,0 +1,110 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""email package exception classes."""
+
+
+class MessageError(Exception):
+ """Base class for errors in the email package."""
+
+
+class MessageParseError(MessageError):
+ """Base class for message parsing errors."""
+
+
+class HeaderParseError(MessageParseError):
+ """Error while parsing headers."""
+
+
+class BoundaryError(MessageParseError):
+ """Couldn't find terminating boundary."""
+
+
+class MultipartConversionError(MessageError, TypeError):
+ """Conversion to a multipart is prohibited."""
+
+
+class CharsetError(MessageError):
+ """An illegal charset was given."""
+
+
+# These are parsing defects which the parser was able to work around.
+class MessageDefect(ValueError):
+ """Base class for a message defect."""
+
+ def __init__(self, line=None):
+ if line is not None:
+ super().__init__(line)
+ self.line = line
+
+class NoBoundaryInMultipartDefect(MessageDefect):
+ """A message claimed to be a multipart but had no boundary parameter."""
+
+class StartBoundaryNotFoundDefect(MessageDefect):
+ """The claimed start boundary was never found."""
+
+class CloseBoundaryNotFoundDefect(MessageDefect):
+ """A start boundary was found, but not the corresponding close boundary."""
+
+class FirstHeaderLineIsContinuationDefect(MessageDefect):
+ """A message had a continuation line as its first header line."""
+
+class MisplacedEnvelopeHeaderDefect(MessageDefect):
+ """A 'Unix-from' header was found in the middle of a header block."""
+
+class MissingHeaderBodySeparatorDefect(MessageDefect):
+ """Found line with no leading whitespace and no colon before blank line."""
+# XXX: backward compatibility, just in case (it was never emitted).
+MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
+
+class MultipartInvariantViolationDefect(MessageDefect):
+ """A message claimed to be a multipart but no subparts were found."""
+
+class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
+ """An invalid content transfer encoding was set on the multipart itself."""
+
+class UndecodableBytesDefect(MessageDefect):
+ """Header contained bytes that could not be decoded"""
+
+class InvalidBase64PaddingDefect(MessageDefect):
+ """base64 encoded sequence had an incorrect length"""
+
+class InvalidBase64CharactersDefect(MessageDefect):
+ """base64 encoded sequence had characters not in base64 alphabet"""
+
+class InvalidBase64LengthDefect(MessageDefect):
+ """base64 encoded sequence had invalid length (1 mod 4)"""
+
+# These errors are specific to header parsing.
+
+class HeaderDefect(MessageDefect):
+ """Base class for a header defect."""
+
+ def __init__(self, *args, **kw):
+ super().__init__(*args, **kw)
+
+class InvalidHeaderDefect(HeaderDefect):
+ """Header is not valid, message gives details."""
+
+class HeaderMissingRequiredValue(HeaderDefect):
+ """A header that must have a value had none"""
+
+class NonPrintableDefect(HeaderDefect):
+ """ASCII characters outside the ascii-printable range found"""
+
+ def __init__(self, non_printables):
+ super().__init__(non_printables)
+ self.non_printables = non_printables
+
+ def __str__(self):
+ return ("the following ASCII non-printables found in header: "
+ "{}".format(self.non_printables))
+
+class ObsoleteHeaderDefect(HeaderDefect):
+ """Header uses syntax declared obsolete by RFC 5322"""
+
+class NonASCIILocalPartDefect(HeaderDefect):
+ """local_part contains non-ASCII characters"""
+ # This defect only occurs during unicode parsing, not when
+ # parsing messages decoded from binary.
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/feedparser.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/feedparser.py
new file mode 100644
index 00000000..7c07ca86
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/feedparser.py
@@ -0,0 +1,536 @@
+# Copyright (C) 2004-2006 Python Software Foundation
+# Authors: Baxter, Wouters and Warsaw
+# Contact: email-sig@python.org
+
+"""FeedParser - An email feed parser.
+
+The feed parser implements an interface for incrementally parsing an email
+message, line by line. This has advantages for certain applications, such as
+those reading email messages off a socket.
+
+FeedParser.feed() is the primary interface for pushing new data into the
+parser. It returns when there's nothing more it can do with the available
+data. When you have no more data to push into the parser, call .close().
+This completes the parsing and returns the root message object.
+
+The other advantage of this parser is that it will never raise a parsing
+exception. Instead, when it finds something unexpected, it adds a 'defect' to
+the current message. Defects are just instances that live on the message
+object's .defects attribute.
+"""
+
+__all__ = ['FeedParser', 'BytesFeedParser']
+
+import re
+
+from email import errors
+from email._policybase import compat32
+from collections import deque
+from io import StringIO
+
+NLCRE = re.compile(r'\r\n|\r|\n')
+NLCRE_bol = re.compile(r'(\r\n|\r|\n)')
+NLCRE_eol = re.compile(r'(\r\n|\r|\n)\Z')
+NLCRE_crack = re.compile(r'(\r\n|\r|\n)')
+# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
+# except controls, SP, and ":".
+headerRE = re.compile(r'^(From |[\041-\071\073-\176]*:|[\t ])')
+EMPTYSTRING = ''
+NL = '\n'
+
+NeedMoreData = object()
+
+
+
+class BufferedSubFile(object):
+ """A file-ish object that can have new data loaded into it.
+
+ You can also push and pop line-matching predicates onto a stack. When the
+ current predicate matches the current line, a false EOF response
+ (i.e. empty string) is returned instead. This lets the parser adhere to a
+ simple abstraction -- it parses until EOF closes the current message.
+ """
+ def __init__(self):
+ # Text stream of the last partial line pushed into this object.
+ # See issue 22233 for why this is a text stream and not a list.
+ self._partial = StringIO(newline='')
+ # A deque of full, pushed lines
+ self._lines = deque()
+ # The stack of false-EOF checking predicates.
+ self._eofstack = []
+ # A flag indicating whether the file has been closed or not.
+ self._closed = False
+
+ def push_eof_matcher(self, pred):
+ self._eofstack.append(pred)
+
+ def pop_eof_matcher(self):
+ return self._eofstack.pop()
+
+ def close(self):
+ # Don't forget any trailing partial line.
+ self._partial.seek(0)
+ self.pushlines(self._partial.readlines())
+ self._partial.seek(0)
+ self._partial.truncate()
+ self._closed = True
+
+ def readline(self):
+ if not self._lines:
+ if self._closed:
+ return ''
+ return NeedMoreData
+ # Pop the line off the stack and see if it matches the current
+ # false-EOF predicate.
+ line = self._lines.popleft()
+ # RFC 2046, section 5.1.2 requires us to recognize outer level
+ # boundaries at any level of inner nesting. Do this, but be sure it's
+ # in the order of most to least nested.
+ for ateof in reversed(self._eofstack):
+ if ateof(line):
+ # We're at the false EOF. But push the last line back first.
+ self._lines.appendleft(line)
+ return ''
+ return line
+
+ def unreadline(self, line):
+ # Let the consumer push a line back into the buffer.
+ assert line is not NeedMoreData
+ self._lines.appendleft(line)
+
+ def push(self, data):
+ """Push some new data into this object."""
+ self._partial.write(data)
+ if '\n' not in data and '\r' not in data:
+ # No new complete lines, wait for more.
+ return
+
+ # Crack into lines, preserving the linesep characters.
+ self._partial.seek(0)
+ parts = self._partial.readlines()
+ self._partial.seek(0)
+ self._partial.truncate()
+
+ # If the last element of the list does not end in a newline, then treat
+ # it as a partial line. We only check for '\n' here because a line
+ # ending with '\r' might be a line that was split in the middle of a
+ # '\r\n' sequence (see bugs 1555570 and 1721862).
+ if not parts[-1].endswith('\n'):
+ self._partial.write(parts.pop())
+ self.pushlines(parts)
+
+ def pushlines(self, lines):
+ self._lines.extend(lines)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ line = self.readline()
+ if line == '':
+ raise StopIteration
+ return line
+
+
+
+class FeedParser:
+ """A feed-style parser of email."""
+
+ def __init__(self, _factory=None, *, policy=compat32):
+ """_factory is called with no arguments to create a new message obj
+
+ The policy keyword specifies a policy object that controls a number of
+ aspects of the parser's operation. The default policy maintains
+ backward compatibility.
+
+ """
+ self.policy = policy
+ self._old_style_factory = False
+ if _factory is None:
+ if policy.message_factory is None:
+ from email.message import Message
+ self._factory = Message
+ else:
+ self._factory = policy.message_factory
+ else:
+ self._factory = _factory
+ try:
+ _factory(policy=self.policy)
+ except TypeError:
+ # Assume this is an old-style factory
+ self._old_style_factory = True
+ self._input = BufferedSubFile()
+ self._msgstack = []
+ self._parse = self._parsegen().__next__
+ self._cur = None
+ self._last = None
+ self._headersonly = False
+
+ # Non-public interface for supporting Parser's headersonly flag
+ def _set_headersonly(self):
+ self._headersonly = True
+
+ def feed(self, data):
+ """Push more data into the parser."""
+ self._input.push(data)
+ self._call_parse()
+
+ def _call_parse(self):
+ try:
+ self._parse()
+ except StopIteration:
+ pass
+
+ def close(self):
+ """Parse all remaining data and return the root message object."""
+ self._input.close()
+ self._call_parse()
+ root = self._pop_message()
+ assert not self._msgstack
+ # Look for final set of defects
+ if root.get_content_maintype() == 'multipart' \
+ and not root.is_multipart():
+ defect = errors.MultipartInvariantViolationDefect()
+ self.policy.handle_defect(root, defect)
+ return root
+
+ def _new_message(self):
+ if self._old_style_factory:
+ msg = self._factory()
+ else:
+ msg = self._factory(policy=self.policy)
+ if self._cur and self._cur.get_content_type() == 'multipart/digest':
+ msg.set_default_type('message/rfc822')
+ if self._msgstack:
+ self._msgstack[-1].attach(msg)
+ self._msgstack.append(msg)
+ self._cur = msg
+ self._last = msg
+
+ def _pop_message(self):
+ retval = self._msgstack.pop()
+ if self._msgstack:
+ self._cur = self._msgstack[-1]
+ else:
+ self._cur = None
+ return retval
+
+ def _parsegen(self):
+ # Create a new message and start by parsing headers.
+ self._new_message()
+ headers = []
+ # Collect the headers, searching for a line that doesn't match the RFC
+ # 2822 header or continuation pattern (including an empty line).
+ for line in self._input:
+ if line is NeedMoreData:
+ yield NeedMoreData
+ continue
+ if not headerRE.match(line):
+ # If we saw the RFC defined header/body separator
+ # (i.e. newline), just throw it away. Otherwise the line is
+ # part of the body so push it back.
+ if not NLCRE.match(line):
+ defect = errors.MissingHeaderBodySeparatorDefect()
+ self.policy.handle_defect(self._cur, defect)
+ self._input.unreadline(line)
+ break
+ headers.append(line)
+ # Done with the headers, so parse them and figure out what we're
+ # supposed to see in the body of the message.
+ self._parse_headers(headers)
+ # Headers-only parsing is a backwards compatibility hack, which was
+ # necessary in the older parser, which could raise errors. All
+ # remaining lines in the input are thrown into the message body.
+ if self._headersonly:
+ lines = []
+ while True:
+ line = self._input.readline()
+ if line is NeedMoreData:
+ yield NeedMoreData
+ continue
+ if line == '':
+ break
+ lines.append(line)
+ self._cur.set_payload(EMPTYSTRING.join(lines))
+ return
+ if self._cur.get_content_type() == 'message/delivery-status':
+ # message/delivery-status contains blocks of headers separated by
+ # a blank line. We'll represent each header block as a separate
+ # nested message object, but the processing is a bit different
+ # than standard message/* types because there is no body for the
+ # nested messages. A blank line separates the subparts.
+ while True:
+ self._input.push_eof_matcher(NLCRE.match)
+ for retval in self._parsegen():
+ if retval is NeedMoreData:
+ yield NeedMoreData
+ continue
+ break
+ msg = self._pop_message()
+ # We need to pop the EOF matcher in order to tell if we're at
+ # the end of the current file, not the end of the last block
+ # of message headers.
+ self._input.pop_eof_matcher()
+ # The input stream must be sitting at the newline or at the
+ # EOF. We want to see if we're at the end of this subpart, so
+ # first consume the blank line, then test the next line to see
+ # if we're at this subpart's EOF.
+ while True:
+ line = self._input.readline()
+ if line is NeedMoreData:
+ yield NeedMoreData
+ continue
+ break
+ while True:
+ line = self._input.readline()
+ if line is NeedMoreData:
+ yield NeedMoreData
+ continue
+ break
+ if line == '':
+ break
+ # Not at EOF so this is a line we're going to need.
+ self._input.unreadline(line)
+ return
+ if self._cur.get_content_maintype() == 'message':
+ # The message claims to be a message/* type, then what follows is
+ # another RFC 2822 message.
+ for retval in self._parsegen():
+ if retval is NeedMoreData:
+ yield NeedMoreData
+ continue
+ break
+ self._pop_message()
+ return
+ if self._cur.get_content_maintype() == 'multipart':
+ boundary = self._cur.get_boundary()
+ if boundary is None:
+ # The message /claims/ to be a multipart but it has not
+ # defined a boundary. That's a problem which we'll handle by
+ # reading everything until the EOF and marking the message as
+ # defective.
+ defect = errors.NoBoundaryInMultipartDefect()
+ self.policy.handle_defect(self._cur, defect)
+ lines = []
+ for line in self._input:
+ if line is NeedMoreData:
+ yield NeedMoreData
+ continue
+ lines.append(line)
+ self._cur.set_payload(EMPTYSTRING.join(lines))
+ return
+ # Make sure a valid content type was specified per RFC 2045:6.4.
+ if (self._cur.get('content-transfer-encoding', '8bit').lower()
+ not in ('7bit', '8bit', 'binary')):
+ defect = errors.InvalidMultipartContentTransferEncodingDefect()
+ self.policy.handle_defect(self._cur, defect)
+ # Create a line match predicate which matches the inter-part
+ # boundary as well as the end-of-multipart boundary. Don't push
+ # this onto the input stream until we've scanned past the
+ # preamble.
+ separator = '--' + boundary
+ boundaryre = re.compile(
+ '(?P' + re.escape(separator) +
+ r')(?P--)?(?P[ \t]*)(?P\r\n|\r|\n)?$')
+ capturing_preamble = True
+ preamble = []
+ linesep = False
+ close_boundary_seen = False
+ while True:
+ line = self._input.readline()
+ if line is NeedMoreData:
+ yield NeedMoreData
+ continue
+ if line == '':
+ break
+ mo = boundaryre.match(line)
+ if mo:
+ # If we're looking at the end boundary, we're done with
+ # this multipart. If there was a newline at the end of
+ # the closing boundary, then we need to initialize the
+ # epilogue with the empty string (see below).
+ if mo.group('end'):
+ close_boundary_seen = True
+ linesep = mo.group('linesep')
+ break
+ # We saw an inter-part boundary. Were we in the preamble?
+ if capturing_preamble:
+ if preamble:
+ # According to RFC 2046, the last newline belongs
+ # to the boundary.
+ lastline = preamble[-1]
+ eolmo = NLCRE_eol.search(lastline)
+ if eolmo:
+ preamble[-1] = lastline[:-len(eolmo.group(0))]
+ self._cur.preamble = EMPTYSTRING.join(preamble)
+ capturing_preamble = False
+ self._input.unreadline(line)
+ continue
+ # We saw a boundary separating two parts. Consume any
+ # multiple boundary lines that may be following. Our
+ # interpretation of RFC 2046 BNF grammar does not produce
+ # body parts within such double boundaries.
+ while True:
+ line = self._input.readline()
+ if line is NeedMoreData:
+ yield NeedMoreData
+ continue
+ mo = boundaryre.match(line)
+ if not mo:
+ self._input.unreadline(line)
+ break
+ # Recurse to parse this subpart; the input stream points
+ # at the subpart's first line.
+ self._input.push_eof_matcher(boundaryre.match)
+ for retval in self._parsegen():
+ if retval is NeedMoreData:
+ yield NeedMoreData
+ continue
+ break
+ # Because of RFC 2046, the newline preceding the boundary
+ # separator actually belongs to the boundary, not the
+ # previous subpart's payload (or epilogue if the previous
+ # part is a multipart).
+ if self._last.get_content_maintype() == 'multipart':
+ epilogue = self._last.epilogue
+ if epilogue == '':
+ self._last.epilogue = None
+ elif epilogue is not None:
+ mo = NLCRE_eol.search(epilogue)
+ if mo:
+ end = len(mo.group(0))
+ self._last.epilogue = epilogue[:-end]
+ else:
+ payload = self._last._payload
+ if isinstance(payload, str):
+ mo = NLCRE_eol.search(payload)
+ if mo:
+ payload = payload[:-len(mo.group(0))]
+ self._last._payload = payload
+ self._input.pop_eof_matcher()
+ self._pop_message()
+ # Set the multipart up for newline cleansing, which will
+ # happen if we're in a nested multipart.
+ self._last = self._cur
+ else:
+ # I think we must be in the preamble
+ assert capturing_preamble
+ preamble.append(line)
+ # We've seen either the EOF or the end boundary. If we're still
+ # capturing the preamble, we never saw the start boundary. Note
+ # that as a defect and store the captured text as the payload.
+ if capturing_preamble:
+ defect = errors.StartBoundaryNotFoundDefect()
+ self.policy.handle_defect(self._cur, defect)
+ self._cur.set_payload(EMPTYSTRING.join(preamble))
+ epilogue = []
+ for line in self._input:
+ if line is NeedMoreData:
+ yield NeedMoreData
+ continue
+ self._cur.epilogue = EMPTYSTRING.join(epilogue)
+ return
+ # If we're not processing the preamble, then we might have seen
+ # EOF without seeing that end boundary...that is also a defect.
+ if not close_boundary_seen:
+ defect = errors.CloseBoundaryNotFoundDefect()
+ self.policy.handle_defect(self._cur, defect)
+ return
+ # Everything from here to the EOF is epilogue. If the end boundary
+ # ended in a newline, we'll need to make sure the epilogue isn't
+ # None
+ if linesep:
+ epilogue = ['']
+ else:
+ epilogue = []
+ for line in self._input:
+ if line is NeedMoreData:
+ yield NeedMoreData
+ continue
+ epilogue.append(line)
+ # Any CRLF at the front of the epilogue is not technically part of
+ # the epilogue. Also, watch out for an empty string epilogue,
+ # which means a single newline.
+ if epilogue:
+ firstline = epilogue[0]
+ bolmo = NLCRE_bol.match(firstline)
+ if bolmo:
+ epilogue[0] = firstline[len(bolmo.group(0)):]
+ self._cur.epilogue = EMPTYSTRING.join(epilogue)
+ return
+ # Otherwise, it's some non-multipart type, so the entire rest of the
+ # file contents becomes the payload.
+ lines = []
+ for line in self._input:
+ if line is NeedMoreData:
+ yield NeedMoreData
+ continue
+ lines.append(line)
+ self._cur.set_payload(EMPTYSTRING.join(lines))
+
+ def _parse_headers(self, lines):
+ # Passed a list of lines that make up the headers for the current msg
+ lastheader = ''
+ lastvalue = []
+ for lineno, line in enumerate(lines):
+ # Check for continuation
+ if line[0] in ' \t':
+ if not lastheader:
+ # The first line of the headers was a continuation. This
+ # is illegal, so let's note the defect, store the illegal
+ # line, and ignore it for purposes of headers.
+ defect = errors.FirstHeaderLineIsContinuationDefect(line)
+ self.policy.handle_defect(self._cur, defect)
+ continue
+ lastvalue.append(line)
+ continue
+ if lastheader:
+ self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
+ lastheader, lastvalue = '', []
+ # Check for envelope header, i.e. unix-from
+ if line.startswith('From '):
+ if lineno == 0:
+ # Strip off the trailing newline
+ mo = NLCRE_eol.search(line)
+ if mo:
+ line = line[:-len(mo.group(0))]
+ self._cur.set_unixfrom(line)
+ continue
+ elif lineno == len(lines) - 1:
+ # Something looking like a unix-from at the end - it's
+ # probably the first line of the body, so push back the
+ # line and stop.
+ self._input.unreadline(line)
+ return
+ else:
+ # Weirdly placed unix-from line. Note this as a defect
+ # and ignore it.
+ defect = errors.MisplacedEnvelopeHeaderDefect(line)
+ self._cur.defects.append(defect)
+ continue
+ # Split the line on the colon separating field name from value.
+ # There will always be a colon, because if there wasn't the part of
+ # the parser that calls us would have started parsing the body.
+ i = line.find(':')
+
+ # If the colon is on the start of the line the header is clearly
+ # malformed, but we might be able to salvage the rest of the
+ # message. Track the error but keep going.
+ if i == 0:
+ defect = errors.InvalidHeaderDefect("Missing header name.")
+ self._cur.defects.append(defect)
+ continue
+
+ assert i>0, "_parse_headers fed line with no : and no leading WS"
+ lastheader = line[:i]
+ lastvalue = [line]
+ # Done with all the lines, so handle the last header.
+ if lastheader:
+ self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
+
+
+class BytesFeedParser(FeedParser):
+ """Like FeedParser, but feed accepts bytes."""
+
+ def feed(self, data):
+ super().feed(data.decode('ascii', 'surrogateescape'))
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/generator.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/generator.py
new file mode 100644
index 00000000..ae670c23
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/generator.py
@@ -0,0 +1,508 @@
+# Copyright (C) 2001-2010 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Classes to generate plain text from a message object tree."""
+
+__all__ = ['Generator', 'DecodedGenerator', 'BytesGenerator']
+
+import re
+import sys
+import time
+import random
+
+from copy import deepcopy
+from io import StringIO, BytesIO
+from email.utils import _has_surrogates
+
+UNDERSCORE = '_'
+NL = '\n' # XXX: no longer used by the code below.
+
+NLCRE = re.compile(r'\r\n|\r|\n')
+fcre = re.compile(r'^From ', re.MULTILINE)
+
+
+
+class Generator:
+ """Generates output from a Message object tree.
+
+ This basic generator writes the message to the given file object as plain
+ text.
+ """
+ #
+ # Public interface
+ #
+
+ def __init__(self, outfp, mangle_from_=None, maxheaderlen=None, *,
+ policy=None):
+ """Create the generator for message flattening.
+
+ outfp is the output file-like object for writing the message to. It
+ must have a write() method.
+
+ Optional mangle_from_ is a flag that, when True (the default if policy
+ is not set), escapes From_ lines in the body of the message by putting
+ a `>' in front of them.
+
+ Optional maxheaderlen specifies the longest length for a non-continued
+ header. When a header line is longer (in characters, with tabs
+ expanded to 8 spaces) than maxheaderlen, the header will split as
+ defined in the Header class. Set maxheaderlen to zero to disable
+ header wrapping. The default is 78, as recommended (but not required)
+ by RFC 2822.
+
+ The policy keyword specifies a policy object that controls a number of
+ aspects of the generator's operation. If no policy is specified,
+ the policy associated with the Message object passed to the
+ flatten method is used.
+
+ """
+
+ if mangle_from_ is None:
+ mangle_from_ = True if policy is None else policy.mangle_from_
+ self._fp = outfp
+ self._mangle_from_ = mangle_from_
+ self.maxheaderlen = maxheaderlen
+ self.policy = policy
+
+ def write(self, s):
+ # Just delegate to the file object
+ self._fp.write(s)
+
+ def flatten(self, msg, unixfrom=False, linesep=None):
+ r"""Print the message object tree rooted at msg to the output file
+ specified when the Generator instance was created.
+
+ unixfrom is a flag that forces the printing of a Unix From_ delimiter
+ before the first object in the message tree. If the original message
+ has no From_ delimiter, a `standard' one is crafted. By default, this
+ is False to inhibit the printing of any From_ delimiter.
+
+ Note that for subobjects, no From_ line is printed.
+
+ linesep specifies the characters used to indicate a new line in
+ the output. The default value is determined by the policy specified
+ when the Generator instance was created or, if none was specified,
+ from the policy associated with the msg.
+
+ """
+ # We use the _XXX constants for operating on data that comes directly
+ # from the msg, and _encoded_XXX constants for operating on data that
+ # has already been converted (to bytes in the BytesGenerator) and
+ # inserted into a temporary buffer.
+ policy = msg.policy if self.policy is None else self.policy
+ if linesep is not None:
+ policy = policy.clone(linesep=linesep)
+ if self.maxheaderlen is not None:
+ policy = policy.clone(max_line_length=self.maxheaderlen)
+ self._NL = policy.linesep
+ self._encoded_NL = self._encode(self._NL)
+ self._EMPTY = ''
+ self._encoded_EMPTY = self._encode(self._EMPTY)
+ # Because we use clone (below) when we recursively process message
+ # subparts, and because clone uses the computed policy (not None),
+ # submessages will automatically get set to the computed policy when
+ # they are processed by this code.
+ old_gen_policy = self.policy
+ old_msg_policy = msg.policy
+ try:
+ self.policy = policy
+ msg.policy = policy
+ if unixfrom:
+ ufrom = msg.get_unixfrom()
+ if not ufrom:
+ ufrom = 'From nobody ' + time.ctime(time.time())
+ self.write(ufrom + self._NL)
+ self._write(msg)
+ finally:
+ self.policy = old_gen_policy
+ msg.policy = old_msg_policy
+
+ def clone(self, fp):
+ """Clone this generator with the exact same options."""
+ return self.__class__(fp,
+ self._mangle_from_,
+ None, # Use policy setting, which we've adjusted
+ policy=self.policy)
+
+ #
+ # Protected interface - undocumented ;/
+ #
+
+ # Note that we use 'self.write' when what we are writing is coming from
+ # the source, and self._fp.write when what we are writing is coming from a
+ # buffer (because the Bytes subclass has already had a chance to transform
+ # the data in its write method in that case). This is an entirely
+ # pragmatic split determined by experiment; we could be more general by
+ # always using write and having the Bytes subclass write method detect when
+ # it has already transformed the input; but, since this whole thing is a
+ # hack anyway this seems good enough.
+
+ def _new_buffer(self):
+ # BytesGenerator overrides this to return BytesIO.
+ return StringIO()
+
+ def _encode(self, s):
+ # BytesGenerator overrides this to encode strings to bytes.
+ return s
+
+ def _write_lines(self, lines):
+ # We have to transform the line endings.
+ if not lines:
+ return
+ lines = NLCRE.split(lines)
+ for line in lines[:-1]:
+ self.write(line)
+ self.write(self._NL)
+ if lines[-1]:
+ self.write(lines[-1])
+ # XXX logic tells me this else should be needed, but the tests fail
+ # with it and pass without it. (NLCRE.split ends with a blank element
+ # if and only if there was a trailing newline.)
+ #else:
+ # self.write(self._NL)
+
+ def _write(self, msg):
+ # We can't write the headers yet because of the following scenario:
+ # say a multipart message includes the boundary string somewhere in
+ # its body. We'd have to calculate the new boundary /before/ we write
+ # the headers so that we can write the correct Content-Type:
+ # parameter.
+ #
+ # The way we do this, so as to make the _handle_*() methods simpler,
+ # is to cache any subpart writes into a buffer. The we write the
+ # headers and the buffer contents. That way, subpart handlers can
+ # Do The Right Thing, and can still modify the Content-Type: header if
+ # necessary.
+ oldfp = self._fp
+ try:
+ self._munge_cte = None
+ self._fp = sfp = self._new_buffer()
+ self._dispatch(msg)
+ finally:
+ self._fp = oldfp
+ munge_cte = self._munge_cte
+ del self._munge_cte
+ # If we munged the cte, copy the message again and re-fix the CTE.
+ if munge_cte:
+ msg = deepcopy(msg)
+ msg.replace_header('content-transfer-encoding', munge_cte[0])
+ msg.replace_header('content-type', munge_cte[1])
+ # Write the headers. First we see if the message object wants to
+ # handle that itself. If not, we'll do it generically.
+ meth = getattr(msg, '_write_headers', None)
+ if meth is None:
+ self._write_headers(msg)
+ else:
+ meth(self)
+ self._fp.write(sfp.getvalue())
+
+ def _dispatch(self, msg):
+ # Get the Content-Type: for the message, then try to dispatch to
+ # self._handle__(). If there's no handler for the
+ # full MIME type, then dispatch to self._handle_(). If
+ # that's missing too, then dispatch to self._writeBody().
+ main = msg.get_content_maintype()
+ sub = msg.get_content_subtype()
+ specific = UNDERSCORE.join((main, sub)).replace('-', '_')
+ meth = getattr(self, '_handle_' + specific, None)
+ if meth is None:
+ generic = main.replace('-', '_')
+ meth = getattr(self, '_handle_' + generic, None)
+ if meth is None:
+ meth = self._writeBody
+ meth(msg)
+
+ #
+ # Default handlers
+ #
+
+ def _write_headers(self, msg):
+ for h, v in msg.raw_items():
+ self.write(self.policy.fold(h, v))
+ # A blank line always separates headers from body
+ self.write(self._NL)
+
+ #
+ # Handlers for writing types and subtypes
+ #
+
+ def _handle_text(self, msg):
+ payload = msg.get_payload()
+ if payload is None:
+ return
+ if not isinstance(payload, str):
+ raise TypeError('string payload expected: %s' % type(payload))
+ if _has_surrogates(msg._payload):
+ charset = msg.get_param('charset')
+ if charset is not None:
+ # XXX: This copy stuff is an ugly hack to avoid modifying the
+ # existing message.
+ msg = deepcopy(msg)
+ del msg['content-transfer-encoding']
+ msg.set_payload(payload, charset)
+ payload = msg.get_payload()
+ self._munge_cte = (msg['content-transfer-encoding'],
+ msg['content-type'])
+ if self._mangle_from_:
+ payload = fcre.sub('>From ', payload)
+ self._write_lines(payload)
+
+ # Default body handler
+ _writeBody = _handle_text
+
+ def _handle_multipart(self, msg):
+ # The trick here is to write out each part separately, merge them all
+ # together, and then make sure that the boundary we've chosen isn't
+ # present in the payload.
+ msgtexts = []
+ subparts = msg.get_payload()
+ if subparts is None:
+ subparts = []
+ elif isinstance(subparts, str):
+ # e.g. a non-strict parse of a message with no starting boundary.
+ self.write(subparts)
+ return
+ elif not isinstance(subparts, list):
+ # Scalar payload
+ subparts = [subparts]
+ for part in subparts:
+ s = self._new_buffer()
+ g = self.clone(s)
+ g.flatten(part, unixfrom=False, linesep=self._NL)
+ msgtexts.append(s.getvalue())
+ # BAW: What about boundaries that are wrapped in double-quotes?
+ boundary = msg.get_boundary()
+ if not boundary:
+ # Create a boundary that doesn't appear in any of the
+ # message texts.
+ alltext = self._encoded_NL.join(msgtexts)
+ boundary = self._make_boundary(alltext)
+ msg.set_boundary(boundary)
+ # If there's a preamble, write it out, with a trailing CRLF
+ if msg.preamble is not None:
+ if self._mangle_from_:
+ preamble = fcre.sub('>From ', msg.preamble)
+ else:
+ preamble = msg.preamble
+ self._write_lines(preamble)
+ self.write(self._NL)
+ # dash-boundary transport-padding CRLF
+ self.write('--' + boundary + self._NL)
+ # body-part
+ if msgtexts:
+ self._fp.write(msgtexts.pop(0))
+ # *encapsulation
+ # --> delimiter transport-padding
+ # --> CRLF body-part
+ for body_part in msgtexts:
+ # delimiter transport-padding CRLF
+ self.write(self._NL + '--' + boundary + self._NL)
+ # body-part
+ self._fp.write(body_part)
+ # close-delimiter transport-padding
+ self.write(self._NL + '--' + boundary + '--' + self._NL)
+ if msg.epilogue is not None:
+ if self._mangle_from_:
+ epilogue = fcre.sub('>From ', msg.epilogue)
+ else:
+ epilogue = msg.epilogue
+ self._write_lines(epilogue)
+
+ def _handle_multipart_signed(self, msg):
+ # The contents of signed parts has to stay unmodified in order to keep
+ # the signature intact per RFC1847 2.1, so we disable header wrapping.
+ # RDM: This isn't enough to completely preserve the part, but it helps.
+ p = self.policy
+ self.policy = p.clone(max_line_length=0)
+ try:
+ self._handle_multipart(msg)
+ finally:
+ self.policy = p
+
+ def _handle_message_delivery_status(self, msg):
+ # We can't just write the headers directly to self's file object
+ # because this will leave an extra newline between the last header
+ # block and the boundary. Sigh.
+ blocks = []
+ for part in msg.get_payload():
+ s = self._new_buffer()
+ g = self.clone(s)
+ g.flatten(part, unixfrom=False, linesep=self._NL)
+ text = s.getvalue()
+ lines = text.split(self._encoded_NL)
+ # Strip off the unnecessary trailing empty line
+ if lines and lines[-1] == self._encoded_EMPTY:
+ blocks.append(self._encoded_NL.join(lines[:-1]))
+ else:
+ blocks.append(text)
+ # Now join all the blocks with an empty line. This has the lovely
+ # effect of separating each block with an empty line, but not adding
+ # an extra one after the last one.
+ self._fp.write(self._encoded_NL.join(blocks))
+
+ def _handle_message(self, msg):
+ s = self._new_buffer()
+ g = self.clone(s)
+ # The payload of a message/rfc822 part should be a multipart sequence
+ # of length 1. The zeroth element of the list should be the Message
+ # object for the subpart. Extract that object, stringify it, and
+ # write it out.
+ # Except, it turns out, when it's a string instead, which happens when
+ # and only when HeaderParser is used on a message of mime type
+ # message/rfc822. Such messages are generated by, for example,
+ # Groupwise when forwarding unadorned messages. (Issue 7970.) So
+ # in that case we just emit the string body.
+ payload = msg._payload
+ if isinstance(payload, list):
+ g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL)
+ payload = s.getvalue()
+ else:
+ payload = self._encode(payload)
+ self._fp.write(payload)
+
+ # This used to be a module level function; we use a classmethod for this
+ # and _compile_re so we can continue to provide the module level function
+ # for backward compatibility by doing
+ # _make_boundary = Generator._make_boundary
+ # at the end of the module. It *is* internal, so we could drop that...
+ @classmethod
+ def _make_boundary(cls, text=None):
+ # Craft a random boundary. If text is given, ensure that the chosen
+ # boundary doesn't appear in the text.
+ token = random.randrange(sys.maxsize)
+ boundary = ('=' * 15) + (_fmt % token) + '=='
+ if text is None:
+ return boundary
+ b = boundary
+ counter = 0
+ while True:
+ cre = cls._compile_re('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
+ if not cre.search(text):
+ break
+ b = boundary + '.' + str(counter)
+ counter += 1
+ return b
+
+ @classmethod
+ def _compile_re(cls, s, flags):
+ return re.compile(s, flags)
+
+
+class BytesGenerator(Generator):
+ """Generates a bytes version of a Message object tree.
+
+ Functionally identical to the base Generator except that the output is
+ bytes and not string. When surrogates were used in the input to encode
+ bytes, these are decoded back to bytes for output. If the policy has
+ cte_type set to 7bit, then the message is transformed such that the
+ non-ASCII bytes are properly content transfer encoded, using the charset
+ unknown-8bit.
+
+ The outfp object must accept bytes in its write method.
+ """
+
+ def write(self, s):
+ self._fp.write(s.encode('ascii', 'surrogateescape'))
+
+ def _new_buffer(self):
+ return BytesIO()
+
+ def _encode(self, s):
+ return s.encode('ascii')
+
+ def _write_headers(self, msg):
+ # This is almost the same as the string version, except for handling
+ # strings with 8bit bytes.
+ for h, v in msg.raw_items():
+ self._fp.write(self.policy.fold_binary(h, v))
+ # A blank line always separates headers from body
+ self.write(self._NL)
+
+ def _handle_text(self, msg):
+ # If the string has surrogates the original source was bytes, so
+ # just write it back out.
+ if msg._payload is None:
+ return
+ if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit':
+ if self._mangle_from_:
+ msg._payload = fcre.sub(">From ", msg._payload)
+ self._write_lines(msg._payload)
+ else:
+ super(BytesGenerator,self)._handle_text(msg)
+
+ # Default body handler
+ _writeBody = _handle_text
+
+ @classmethod
+ def _compile_re(cls, s, flags):
+ return re.compile(s.encode('ascii'), flags)
+
+
+
+_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
+
+class DecodedGenerator(Generator):
+ """Generates a text representation of a message.
+
+ Like the Generator base class, except that non-text parts are substituted
+ with a format string representing the part.
+ """
+ def __init__(self, outfp, mangle_from_=None, maxheaderlen=None, fmt=None, *,
+ policy=None):
+ """Like Generator.__init__() except that an additional optional
+ argument is allowed.
+
+ Walks through all subparts of a message. If the subpart is of main
+ type `text', then it prints the decoded payload of the subpart.
+
+ Otherwise, fmt is a format string that is used instead of the message
+ payload. fmt is expanded with the following keywords (in
+ %(keyword)s format):
+
+ type : Full MIME type of the non-text part
+ maintype : Main MIME type of the non-text part
+ subtype : Sub-MIME type of the non-text part
+ filename : Filename of the non-text part
+ description: Description associated with the non-text part
+ encoding : Content transfer encoding of the non-text part
+
+ The default value for fmt is None, meaning
+
+ [Non-text (%(type)s) part of message omitted, filename %(filename)s]
+ """
+ Generator.__init__(self, outfp, mangle_from_, maxheaderlen,
+ policy=policy)
+ if fmt is None:
+ self._fmt = _FMT
+ else:
+ self._fmt = fmt
+
+ def _dispatch(self, msg):
+ for part in msg.walk():
+ maintype = part.get_content_maintype()
+ if maintype == 'text':
+ print(part.get_payload(decode=False), file=self)
+ elif maintype == 'multipart':
+ # Just skip this
+ pass
+ else:
+ print(self._fmt % {
+ 'type' : part.get_content_type(),
+ 'maintype' : part.get_content_maintype(),
+ 'subtype' : part.get_content_subtype(),
+ 'filename' : part.get_filename('[no filename]'),
+ 'description': part.get('Content-Description',
+ '[no description]'),
+ 'encoding' : part.get('Content-Transfer-Encoding',
+ '[no encoding]'),
+ }, file=self)
+
+
+
+# Helper used by Generator._make_boundary
+_width = len(repr(sys.maxsize-1))
+_fmt = '%%0%dd' % _width
+
+# Backward compatibility
+_make_boundary = Generator._make_boundary
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/header.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/header.py
new file mode 100644
index 00000000..7b30a039
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/header.py
@@ -0,0 +1,578 @@
+# Copyright (C) 2002-2007 Python Software Foundation
+# Author: Ben Gertzfield, Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Header encoding and decoding functionality."""
+
+__all__ = [
+ 'Header',
+ 'decode_header',
+ 'make_header',
+ ]
+
+import re
+import binascii
+
+import email.quoprimime
+import email.base64mime
+
+from email.errors import HeaderParseError
+from email import charset as _charset
+Charset = _charset.Charset
+
+NL = '\n'
+SPACE = ' '
+BSPACE = b' '
+SPACE8 = ' ' * 8
+EMPTYSTRING = ''
+MAXLINELEN = 78
+FWS = ' \t'
+
+USASCII = Charset('us-ascii')
+UTF8 = Charset('utf-8')
+
+# Match encoded-word strings in the form =?charset?q?Hello_World?=
+ecre = re.compile(r'''
+ =\? # literal =?
+ (?P[^?]*?) # non-greedy up to the next ? is the charset
+ \? # literal ?
+ (?P[qQbB]) # either a "q" or a "b", case insensitive
+ \? # literal ?
+ (?P.*?) # non-greedy up to the next ?= is the encoded string
+ \?= # literal ?=
+ ''', re.VERBOSE | re.MULTILINE)
+
+# Field name regexp, including trailing colon, but not separating whitespace,
+# according to RFC 2822. Character range is from tilde to exclamation mark.
+# For use with .match()
+fcre = re.compile(r'[\041-\176]+:$')
+
+# Find a header embedded in a putative header value. Used to check for
+# header injection attack.
+_embedded_header = re.compile(r'\n[^ \t]+:')
+
+
+
+# Helpers
+_max_append = email.quoprimime._max_append
+
+
+
+def decode_header(header):
+ """Decode a message header value without converting charset.
+
+ Returns a list of (string, charset) pairs containing each of the decoded
+ parts of the header. Charset is None for non-encoded parts of the header,
+ otherwise a lower-case string containing the name of the character set
+ specified in the encoded string.
+
+ header may be a string that may or may not contain RFC2047 encoded words,
+ or it may be a Header object.
+
+ An email.errors.HeaderParseError may be raised when certain decoding error
+ occurs (e.g. a base64 decoding exception).
+ """
+ # If it is a Header object, we can just return the encoded chunks.
+ if hasattr(header, '_chunks'):
+ return [(_charset._encode(string, str(charset)), str(charset))
+ for string, charset in header._chunks]
+ # If no encoding, just return the header with no charset.
+ if not ecre.search(header):
+ return [(header, None)]
+ # First step is to parse all the encoded parts into triplets of the form
+ # (encoded_string, encoding, charset). For unencoded strings, the last
+ # two parts will be None.
+ words = []
+ for line in header.splitlines():
+ parts = ecre.split(line)
+ first = True
+ while parts:
+ unencoded = parts.pop(0)
+ if first:
+ unencoded = unencoded.lstrip()
+ first = False
+ if unencoded:
+ words.append((unencoded, None, None))
+ if parts:
+ charset = parts.pop(0).lower()
+ encoding = parts.pop(0).lower()
+ encoded = parts.pop(0)
+ words.append((encoded, encoding, charset))
+ # Now loop over words and remove words that consist of whitespace
+ # between two encoded strings.
+ droplist = []
+ for n, w in enumerate(words):
+ if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace():
+ droplist.append(n-1)
+ for d in reversed(droplist):
+ del words[d]
+
+ # The next step is to decode each encoded word by applying the reverse
+ # base64 or quopri transformation. decoded_words is now a list of the
+ # form (decoded_word, charset).
+ decoded_words = []
+ for encoded_string, encoding, charset in words:
+ if encoding is None:
+ # This is an unencoded word.
+ decoded_words.append((encoded_string, charset))
+ elif encoding == 'q':
+ word = email.quoprimime.header_decode(encoded_string)
+ decoded_words.append((word, charset))
+ elif encoding == 'b':
+ paderr = len(encoded_string) % 4 # Postel's law: add missing padding
+ if paderr:
+ encoded_string += '==='[:4 - paderr]
+ try:
+ word = email.base64mime.decode(encoded_string)
+ except binascii.Error:
+ raise HeaderParseError('Base64 decoding error')
+ else:
+ decoded_words.append((word, charset))
+ else:
+ raise AssertionError('Unexpected encoding: ' + encoding)
+ # Now convert all words to bytes and collapse consecutive runs of
+ # similarly encoded words.
+ collapsed = []
+ last_word = last_charset = None
+ for word, charset in decoded_words:
+ if isinstance(word, str):
+ word = bytes(word, 'raw-unicode-escape')
+ if last_word is None:
+ last_word = word
+ last_charset = charset
+ elif charset != last_charset:
+ collapsed.append((last_word, last_charset))
+ last_word = word
+ last_charset = charset
+ elif last_charset is None:
+ last_word += BSPACE + word
+ else:
+ last_word += word
+ collapsed.append((last_word, last_charset))
+ return collapsed
+
+
+
+def make_header(decoded_seq, maxlinelen=None, header_name=None,
+ continuation_ws=' '):
+ """Create a Header from a sequence of pairs as returned by decode_header()
+
+ decode_header() takes a header value string and returns a sequence of
+ pairs of the format (decoded_string, charset) where charset is the string
+ name of the character set.
+
+ This function takes one of those sequence of pairs and returns a Header
+ instance. Optional maxlinelen, header_name, and continuation_ws are as in
+ the Header constructor.
+ """
+ h = Header(maxlinelen=maxlinelen, header_name=header_name,
+ continuation_ws=continuation_ws)
+ for s, charset in decoded_seq:
+ # None means us-ascii but we can simply pass it on to h.append()
+ if charset is not None and not isinstance(charset, Charset):
+ charset = Charset(charset)
+ h.append(s, charset)
+ return h
+
+
+
+class Header:
+ def __init__(self, s=None, charset=None,
+ maxlinelen=None, header_name=None,
+ continuation_ws=' ', errors='strict'):
+ """Create a MIME-compliant header that can contain many character sets.
+
+ Optional s is the initial header value. If None, the initial header
+ value is not set. You can later append to the header with .append()
+ method calls. s may be a byte string or a Unicode string, but see the
+ .append() documentation for semantics.
+
+ Optional charset serves two purposes: it has the same meaning as the
+ charset argument to the .append() method. It also sets the default
+ character set for all subsequent .append() calls that omit the charset
+ argument. If charset is not provided in the constructor, the us-ascii
+ charset is used both as s's initial charset and as the default for
+ subsequent .append() calls.
+
+ The maximum line length can be specified explicitly via maxlinelen. For
+ splitting the first line to a shorter value (to account for the field
+ header which isn't included in s, e.g. `Subject') pass in the name of
+ the field in header_name. The default maxlinelen is 78 as recommended
+ by RFC 2822.
+
+ continuation_ws must be RFC 2822 compliant folding whitespace (usually
+ either a space or a hard tab) which will be prepended to continuation
+ lines.
+
+ errors is passed through to the .append() call.
+ """
+ if charset is None:
+ charset = USASCII
+ elif not isinstance(charset, Charset):
+ charset = Charset(charset)
+ self._charset = charset
+ self._continuation_ws = continuation_ws
+ self._chunks = []
+ if s is not None:
+ self.append(s, charset, errors)
+ if maxlinelen is None:
+ maxlinelen = MAXLINELEN
+ self._maxlinelen = maxlinelen
+ if header_name is None:
+ self._headerlen = 0
+ else:
+ # Take the separating colon and space into account.
+ self._headerlen = len(header_name) + 2
+
+ def __str__(self):
+ """Return the string value of the header."""
+ self._normalize()
+ uchunks = []
+ lastcs = None
+ lastspace = None
+ for string, charset in self._chunks:
+ # We must preserve spaces between encoded and non-encoded word
+ # boundaries, which means for us we need to add a space when we go
+ # from a charset to None/us-ascii, or from None/us-ascii to a
+ # charset. Only do this for the second and subsequent chunks.
+ # Don't add a space if the None/us-ascii string already has
+ # a space (trailing or leading depending on transition)
+ nextcs = charset
+ if nextcs == _charset.UNKNOWN8BIT:
+ original_bytes = string.encode('ascii', 'surrogateescape')
+ string = original_bytes.decode('ascii', 'replace')
+ if uchunks:
+ hasspace = string and self._nonctext(string[0])
+ if lastcs not in (None, 'us-ascii'):
+ if nextcs in (None, 'us-ascii') and not hasspace:
+ uchunks.append(SPACE)
+ nextcs = None
+ elif nextcs not in (None, 'us-ascii') and not lastspace:
+ uchunks.append(SPACE)
+ lastspace = string and self._nonctext(string[-1])
+ lastcs = nextcs
+ uchunks.append(string)
+ return EMPTYSTRING.join(uchunks)
+
+ # Rich comparison operators for equality only. BAW: does it make sense to
+ # have or explicitly disable <, <=, >, >= operators?
+ def __eq__(self, other):
+ # other may be a Header or a string. Both are fine so coerce
+ # ourselves to a unicode (of the unencoded header value), swap the
+ # args and do another comparison.
+ return other == str(self)
+
+ def append(self, s, charset=None, errors='strict'):
+ """Append a string to the MIME header.
+
+ Optional charset, if given, should be a Charset instance or the name
+ of a character set (which will be converted to a Charset instance). A
+ value of None (the default) means that the charset given in the
+ constructor is used.
+
+ s may be a byte string or a Unicode string. If it is a byte string
+ (i.e. isinstance(s, str) is false), then charset is the encoding of
+ that byte string, and a UnicodeError will be raised if the string
+ cannot be decoded with that charset. If s is a Unicode string, then
+ charset is a hint specifying the character set of the characters in
+ the string. In either case, when producing an RFC 2822 compliant
+ header using RFC 2047 rules, the string will be encoded using the
+ output codec of the charset. If the string cannot be encoded to the
+ output codec, a UnicodeError will be raised.
+
+ Optional `errors' is passed as the errors argument to the decode
+ call if s is a byte string.
+ """
+ if charset is None:
+ charset = self._charset
+ elif not isinstance(charset, Charset):
+ charset = Charset(charset)
+ if not isinstance(s, str):
+ input_charset = charset.input_codec or 'us-ascii'
+ if input_charset == _charset.UNKNOWN8BIT:
+ s = s.decode('us-ascii', 'surrogateescape')
+ else:
+ s = s.decode(input_charset, errors)
+ # Ensure that the bytes we're storing can be decoded to the output
+ # character set, otherwise an early error is raised.
+ output_charset = charset.output_codec or 'us-ascii'
+ if output_charset != _charset.UNKNOWN8BIT:
+ try:
+ s.encode(output_charset, errors)
+ except UnicodeEncodeError:
+ if output_charset!='us-ascii':
+ raise
+ charset = UTF8
+ self._chunks.append((s, charset))
+
+ def _nonctext(self, s):
+ """True if string s is not a ctext character of RFC822.
+ """
+ return s.isspace() or s in ('(', ')', '\\')
+
+ def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'):
+ r"""Encode a message header into an RFC-compliant format.
+
+ There are many issues involved in converting a given string for use in
+ an email header. Only certain character sets are readable in most
+ email clients, and as header strings can only contain a subset of
+ 7-bit ASCII, care must be taken to properly convert and encode (with
+ Base64 or quoted-printable) header strings. In addition, there is a
+ 75-character length limit on any given encoded header field, so
+ line-wrapping must be performed, even with double-byte character sets.
+
+ Optional maxlinelen specifies the maximum length of each generated
+ line, exclusive of the linesep string. Individual lines may be longer
+ than maxlinelen if a folding point cannot be found. The first line
+ will be shorter by the length of the header name plus ": " if a header
+ name was specified at Header construction time. The default value for
+ maxlinelen is determined at header construction time.
+
+ Optional splitchars is a string containing characters which should be
+ given extra weight by the splitting algorithm during normal header
+ wrapping. This is in very rough support of RFC 2822's `higher level
+ syntactic breaks': split points preceded by a splitchar are preferred
+ during line splitting, with the characters preferred in the order in
+ which they appear in the string. Space and tab may be included in the
+ string to indicate whether preference should be given to one over the
+ other as a split point when other split chars do not appear in the line
+ being split. Splitchars does not affect RFC 2047 encoded lines.
+
+ Optional linesep is a string to be used to separate the lines of
+ the value. The default value is the most useful for typical
+ Python applications, but it can be set to \r\n to produce RFC-compliant
+ line separators when needed.
+ """
+ self._normalize()
+ if maxlinelen is None:
+ maxlinelen = self._maxlinelen
+ # A maxlinelen of 0 means don't wrap. For all practical purposes,
+ # choosing a huge number here accomplishes that and makes the
+ # _ValueFormatter algorithm much simpler.
+ if maxlinelen == 0:
+ maxlinelen = 1000000
+ formatter = _ValueFormatter(self._headerlen, maxlinelen,
+ self._continuation_ws, splitchars)
+ lastcs = None
+ hasspace = lastspace = None
+ for string, charset in self._chunks:
+ if hasspace is not None:
+ hasspace = string and self._nonctext(string[0])
+ if lastcs not in (None, 'us-ascii'):
+ if not hasspace or charset not in (None, 'us-ascii'):
+ formatter.add_transition()
+ elif charset not in (None, 'us-ascii') and not lastspace:
+ formatter.add_transition()
+ lastspace = string and self._nonctext(string[-1])
+ lastcs = charset
+ hasspace = False
+ lines = string.splitlines()
+ if lines:
+ formatter.feed('', lines[0], charset)
+ else:
+ formatter.feed('', '', charset)
+ for line in lines[1:]:
+ formatter.newline()
+ if charset.header_encoding is not None:
+ formatter.feed(self._continuation_ws, ' ' + line.lstrip(),
+ charset)
+ else:
+ sline = line.lstrip()
+ fws = line[:len(line)-len(sline)]
+ formatter.feed(fws, sline, charset)
+ if len(lines) > 1:
+ formatter.newline()
+ if self._chunks:
+ formatter.add_transition()
+ value = formatter._str(linesep)
+ if _embedded_header.search(value):
+ raise HeaderParseError("header value appears to contain "
+ "an embedded header: {!r}".format(value))
+ return value
+
+ def _normalize(self):
+ # Step 1: Normalize the chunks so that all runs of identical charsets
+ # get collapsed into a single unicode string.
+ chunks = []
+ last_charset = None
+ last_chunk = []
+ for string, charset in self._chunks:
+ if charset == last_charset:
+ last_chunk.append(string)
+ else:
+ if last_charset is not None:
+ chunks.append((SPACE.join(last_chunk), last_charset))
+ last_chunk = [string]
+ last_charset = charset
+ if last_chunk:
+ chunks.append((SPACE.join(last_chunk), last_charset))
+ self._chunks = chunks
+
+
+
+class _ValueFormatter:
+ def __init__(self, headerlen, maxlen, continuation_ws, splitchars):
+ self._maxlen = maxlen
+ self._continuation_ws = continuation_ws
+ self._continuation_ws_len = len(continuation_ws)
+ self._splitchars = splitchars
+ self._lines = []
+ self._current_line = _Accumulator(headerlen)
+
+ def _str(self, linesep):
+ self.newline()
+ return linesep.join(self._lines)
+
+ def __str__(self):
+ return self._str(NL)
+
+ def newline(self):
+ end_of_line = self._current_line.pop()
+ if end_of_line != (' ', ''):
+ self._current_line.push(*end_of_line)
+ if len(self._current_line) > 0:
+ if self._current_line.is_onlyws():
+ self._lines[-1] += str(self._current_line)
+ else:
+ self._lines.append(str(self._current_line))
+ self._current_line.reset()
+
+ def add_transition(self):
+ self._current_line.push(' ', '')
+
+ def feed(self, fws, string, charset):
+ # If the charset has no header encoding (i.e. it is an ASCII encoding)
+ # then we must split the header at the "highest level syntactic break"
+ # possible. Note that we don't have a lot of smarts about field
+ # syntax; we just try to break on semi-colons, then commas, then
+ # whitespace. Eventually, this should be pluggable.
+ if charset.header_encoding is None:
+ self._ascii_split(fws, string, self._splitchars)
+ return
+ # Otherwise, we're doing either a Base64 or a quoted-printable
+ # encoding which means we don't need to split the line on syntactic
+ # breaks. We can basically just find enough characters to fit on the
+ # current line, minus the RFC 2047 chrome. What makes this trickier
+ # though is that we have to split at octet boundaries, not character
+ # boundaries but it's only safe to split at character boundaries so at
+ # best we can only get close.
+ encoded_lines = charset.header_encode_lines(string, self._maxlengths())
+ # The first element extends the current line, but if it's None then
+ # nothing more fit on the current line so start a new line.
+ try:
+ first_line = encoded_lines.pop(0)
+ except IndexError:
+ # There are no encoded lines, so we're done.
+ return
+ if first_line is not None:
+ self._append_chunk(fws, first_line)
+ try:
+ last_line = encoded_lines.pop()
+ except IndexError:
+ # There was only one line.
+ return
+ self.newline()
+ self._current_line.push(self._continuation_ws, last_line)
+ # Everything else are full lines in themselves.
+ for line in encoded_lines:
+ self._lines.append(self._continuation_ws + line)
+
+ def _maxlengths(self):
+ # The first line's length.
+ yield self._maxlen - len(self._current_line)
+ while True:
+ yield self._maxlen - self._continuation_ws_len
+
+ def _ascii_split(self, fws, string, splitchars):
+ # The RFC 2822 header folding algorithm is simple in principle but
+ # complex in practice. Lines may be folded any place where "folding
+ # white space" appears by inserting a linesep character in front of the
+ # FWS. The complication is that not all spaces or tabs qualify as FWS,
+ # and we are also supposed to prefer to break at "higher level
+ # syntactic breaks". We can't do either of these without intimate
+ # knowledge of the structure of structured headers, which we don't have
+ # here. So the best we can do here is prefer to break at the specified
+ # splitchars, and hope that we don't choose any spaces or tabs that
+ # aren't legal FWS. (This is at least better than the old algorithm,
+ # where we would sometimes *introduce* FWS after a splitchar, or the
+ # algorithm before that, where we would turn all white space runs into
+ # single spaces or tabs.)
+ parts = re.split("(["+FWS+"]+)", fws+string)
+ if parts[0]:
+ parts[:0] = ['']
+ else:
+ parts.pop(0)
+ for fws, part in zip(*[iter(parts)]*2):
+ self._append_chunk(fws, part)
+
+ def _append_chunk(self, fws, string):
+ self._current_line.push(fws, string)
+ if len(self._current_line) > self._maxlen:
+ # Find the best split point, working backward from the end.
+ # There might be none, on a long first line.
+ for ch in self._splitchars:
+ for i in range(self._current_line.part_count()-1, 0, -1):
+ if ch.isspace():
+ fws = self._current_line[i][0]
+ if fws and fws[0]==ch:
+ break
+ prevpart = self._current_line[i-1][1]
+ if prevpart and prevpart[-1]==ch:
+ break
+ else:
+ continue
+ break
+ else:
+ fws, part = self._current_line.pop()
+ if self._current_line._initial_size > 0:
+ # There will be a header, so leave it on a line by itself.
+ self.newline()
+ if not fws:
+ # We don't use continuation_ws here because the whitespace
+ # after a header should always be a space.
+ fws = ' '
+ self._current_line.push(fws, part)
+ return
+ remainder = self._current_line.pop_from(i)
+ self._lines.append(str(self._current_line))
+ self._current_line.reset(remainder)
+
+
+class _Accumulator(list):
+
+ def __init__(self, initial_size=0):
+ self._initial_size = initial_size
+ super().__init__()
+
+ def push(self, fws, string):
+ self.append((fws, string))
+
+ def pop_from(self, i=0):
+ popped = self[i:]
+ self[i:] = []
+ return popped
+
+ def pop(self):
+ if self.part_count()==0:
+ return ('', '')
+ return super().pop()
+
+ def __len__(self):
+ return sum((len(fws)+len(part) for fws, part in self),
+ self._initial_size)
+
+ def __str__(self):
+ return EMPTYSTRING.join((EMPTYSTRING.join((fws, part))
+ for fws, part in self))
+
+ def reset(self, startval=None):
+ if startval is None:
+ startval = []
+ self[:] = startval
+ self._initial_size = 0
+
+ def is_onlyws(self):
+ return self._initial_size==0 and (not self or str(self).isspace())
+
+ def part_count(self):
+ return super().__len__()
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/headerregistry.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/headerregistry.py
new file mode 100644
index 00000000..00652049
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/headerregistry.py
@@ -0,0 +1,589 @@
+"""Representing and manipulating email headers via custom objects.
+
+This module provides an implementation of the HeaderRegistry API.
+The implementation is designed to flexibly follow RFC5322 rules.
+
+Eventually HeaderRegistry will be a public API, but it isn't yet,
+and will probably change some before that happens.
+
+"""
+from types import MappingProxyType
+
+from email import utils
+from email import errors
+from email import _header_value_parser as parser
+
+class Address:
+
+ def __init__(self, display_name='', username='', domain='', addr_spec=None):
+ """Create an object representing a full email address.
+
+ An address can have a 'display_name', a 'username', and a 'domain'. In
+ addition to specifying the username and domain separately, they may be
+ specified together by using the addr_spec keyword *instead of* the
+ username and domain keywords. If an addr_spec string is specified it
+ must be properly quoted according to RFC 5322 rules; an error will be
+ raised if it is not.
+
+ An Address object has display_name, username, domain, and addr_spec
+ attributes, all of which are read-only. The addr_spec and the string
+ value of the object are both quoted according to RFC5322 rules, but
+ without any Content Transfer Encoding.
+
+ """
+ # This clause with its potential 'raise' may only happen when an
+ # application program creates an Address object using an addr_spec
+ # keyword. The email library code itself must always supply username
+ # and domain.
+ if addr_spec is not None:
+ if username or domain:
+ raise TypeError("addrspec specified when username and/or "
+ "domain also specified")
+ a_s, rest = parser.get_addr_spec(addr_spec)
+ if rest:
+ raise ValueError("Invalid addr_spec; only '{}' "
+ "could be parsed from '{}'".format(
+ a_s, addr_spec))
+ if a_s.all_defects:
+ raise a_s.all_defects[0]
+ username = a_s.local_part
+ domain = a_s.domain
+ self._display_name = display_name
+ self._username = username
+ self._domain = domain
+
+ @property
+ def display_name(self):
+ return self._display_name
+
+ @property
+ def username(self):
+ return self._username
+
+ @property
+ def domain(self):
+ return self._domain
+
+ @property
+ def addr_spec(self):
+ """The addr_spec (username@domain) portion of the address, quoted
+ according to RFC 5322 rules, but with no Content Transfer Encoding.
+ """
+ nameset = set(self.username)
+ if len(nameset) > len(nameset-parser.DOT_ATOM_ENDS):
+ lp = parser.quote_string(self.username)
+ else:
+ lp = self.username
+ if self.domain:
+ return lp + '@' + self.domain
+ if not lp:
+ return '<>'
+ return lp
+
+ def __repr__(self):
+ return "{}(display_name={!r}, username={!r}, domain={!r})".format(
+ self.__class__.__name__,
+ self.display_name, self.username, self.domain)
+
+ def __str__(self):
+ nameset = set(self.display_name)
+ if len(nameset) > len(nameset-parser.SPECIALS):
+ disp = parser.quote_string(self.display_name)
+ else:
+ disp = self.display_name
+ if disp:
+ addr_spec = '' if self.addr_spec=='<>' else self.addr_spec
+ return "{} <{}>".format(disp, addr_spec)
+ return self.addr_spec
+
+ def __eq__(self, other):
+ if type(other) != type(self):
+ return False
+ return (self.display_name == other.display_name and
+ self.username == other.username and
+ self.domain == other.domain)
+
+
+class Group:
+
+ def __init__(self, display_name=None, addresses=None):
+ """Create an object representing an address group.
+
+ An address group consists of a display_name followed by colon and a
+ list of addresses (see Address) terminated by a semi-colon. The Group
+ is created by specifying a display_name and a possibly empty list of
+ Address objects. A Group can also be used to represent a single
+ address that is not in a group, which is convenient when manipulating
+ lists that are a combination of Groups and individual Addresses. In
+ this case the display_name should be set to None. In particular, the
+ string representation of a Group whose display_name is None is the same
+ as the Address object, if there is one and only one Address object in
+ the addresses list.
+
+ """
+ self._display_name = display_name
+ self._addresses = tuple(addresses) if addresses else tuple()
+
+ @property
+ def display_name(self):
+ return self._display_name
+
+ @property
+ def addresses(self):
+ return self._addresses
+
+ def __repr__(self):
+ return "{}(display_name={!r}, addresses={!r}".format(
+ self.__class__.__name__,
+ self.display_name, self.addresses)
+
+ def __str__(self):
+ if self.display_name is None and len(self.addresses)==1:
+ return str(self.addresses[0])
+ disp = self.display_name
+ if disp is not None:
+ nameset = set(disp)
+ if len(nameset) > len(nameset-parser.SPECIALS):
+ disp = parser.quote_string(disp)
+ adrstr = ", ".join(str(x) for x in self.addresses)
+ adrstr = ' ' + adrstr if adrstr else adrstr
+ return "{}:{};".format(disp, adrstr)
+
+ def __eq__(self, other):
+ if type(other) != type(self):
+ return False
+ return (self.display_name == other.display_name and
+ self.addresses == other.addresses)
+
+
+# Header Classes #
+
+class BaseHeader(str):
+
+ """Base class for message headers.
+
+ Implements generic behavior and provides tools for subclasses.
+
+ A subclass must define a classmethod named 'parse' that takes an unfolded
+ value string and a dictionary as its arguments. The dictionary will
+ contain one key, 'defects', initialized to an empty list. After the call
+ the dictionary must contain two additional keys: parse_tree, set to the
+ parse tree obtained from parsing the header, and 'decoded', set to the
+ string value of the idealized representation of the data from the value.
+ (That is, encoded words are decoded, and values that have canonical
+ representations are so represented.)
+
+ The defects key is intended to collect parsing defects, which the message
+ parser will subsequently dispose of as appropriate. The parser should not,
+ insofar as practical, raise any errors. Defects should be added to the
+ list instead. The standard header parsers register defects for RFC
+ compliance issues, for obsolete RFC syntax, and for unrecoverable parsing
+ errors.
+
+ The parse method may add additional keys to the dictionary. In this case
+ the subclass must define an 'init' method, which will be passed the
+ dictionary as its keyword arguments. The method should use (usually by
+ setting them as the value of similarly named attributes) and remove all the
+ extra keys added by its parse method, and then use super to call its parent
+ class with the remaining arguments and keywords.
+
+ The subclass should also make sure that a 'max_count' attribute is defined
+ that is either None or 1. XXX: need to better define this API.
+
+ """
+
+ def __new__(cls, name, value):
+ kwds = {'defects': []}
+ cls.parse(value, kwds)
+ if utils._has_surrogates(kwds['decoded']):
+ kwds['decoded'] = utils._sanitize(kwds['decoded'])
+ self = str.__new__(cls, kwds['decoded'])
+ del kwds['decoded']
+ self.init(name, **kwds)
+ return self
+
+ def init(self, name, *, parse_tree, defects):
+ self._name = name
+ self._parse_tree = parse_tree
+ self._defects = defects
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def defects(self):
+ return tuple(self._defects)
+
+ def __reduce__(self):
+ return (
+ _reconstruct_header,
+ (
+ self.__class__.__name__,
+ self.__class__.__bases__,
+ str(self),
+ ),
+ self.__dict__)
+
+ @classmethod
+ def _reconstruct(cls, value):
+ return str.__new__(cls, value)
+
+ def fold(self, *, policy):
+ """Fold header according to policy.
+
+ The parsed representation of the header is folded according to
+ RFC5322 rules, as modified by the policy. If the parse tree
+ contains surrogateescaped bytes, the bytes are CTE encoded using
+ the charset 'unknown-8bit".
+
+ Any non-ASCII characters in the parse tree are CTE encoded using
+ charset utf-8. XXX: make this a policy setting.
+
+ The returned value is an ASCII-only string possibly containing linesep
+ characters, and ending with a linesep character. The string includes
+ the header name and the ': ' separator.
+
+ """
+ # At some point we need to put fws here iif it was in the source.
+ header = parser.Header([
+ parser.HeaderLabel([
+ parser.ValueTerminal(self.name, 'header-name'),
+ parser.ValueTerminal(':', 'header-sep')]),
+ ])
+ if self._parse_tree:
+ header.append(
+ parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]))
+ header.append(self._parse_tree)
+ return header.fold(policy=policy)
+
+
+def _reconstruct_header(cls_name, bases, value):
+ return type(cls_name, bases, {})._reconstruct(value)
+
+
+class UnstructuredHeader:
+
+ max_count = None
+ value_parser = staticmethod(parser.get_unstructured)
+
+ @classmethod
+ def parse(cls, value, kwds):
+ kwds['parse_tree'] = cls.value_parser(value)
+ kwds['decoded'] = str(kwds['parse_tree'])
+
+
+class UniqueUnstructuredHeader(UnstructuredHeader):
+
+ max_count = 1
+
+
+class DateHeader:
+
+ """Header whose value consists of a single timestamp.
+
+ Provides an additional attribute, datetime, which is either an aware
+ datetime using a timezone, or a naive datetime if the timezone
+ in the input string is -0000. Also accepts a datetime as input.
+ The 'value' attribute is the normalized form of the timestamp,
+ which means it is the output of format_datetime on the datetime.
+ """
+
+ max_count = None
+
+ # This is used only for folding, not for creating 'decoded'.
+ value_parser = staticmethod(parser.get_unstructured)
+
+ @classmethod
+ def parse(cls, value, kwds):
+ if not value:
+ kwds['defects'].append(errors.HeaderMissingRequiredValue())
+ kwds['datetime'] = None
+ kwds['decoded'] = ''
+ kwds['parse_tree'] = parser.TokenList()
+ return
+ if isinstance(value, str):
+ value = utils.parsedate_to_datetime(value)
+ kwds['datetime'] = value
+ kwds['decoded'] = utils.format_datetime(kwds['datetime'])
+ kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
+
+ def init(self, *args, **kw):
+ self._datetime = kw.pop('datetime')
+ super().init(*args, **kw)
+
+ @property
+ def datetime(self):
+ return self._datetime
+
+
+class UniqueDateHeader(DateHeader):
+
+ max_count = 1
+
+
+class AddressHeader:
+
+ max_count = None
+
+ @staticmethod
+ def value_parser(value):
+ address_list, value = parser.get_address_list(value)
+ assert not value, 'this should not happen'
+ return address_list
+
+ @classmethod
+ def parse(cls, value, kwds):
+ if isinstance(value, str):
+ # We are translating here from the RFC language (address/mailbox)
+ # to our API language (group/address).
+ kwds['parse_tree'] = address_list = cls.value_parser(value)
+ groups = []
+ for addr in address_list.addresses:
+ groups.append(Group(addr.display_name,
+ [Address(mb.display_name or '',
+ mb.local_part or '',
+ mb.domain or '')
+ for mb in addr.all_mailboxes]))
+ defects = list(address_list.all_defects)
+ else:
+ # Assume it is Address/Group stuff
+ if not hasattr(value, '__iter__'):
+ value = [value]
+ groups = [Group(None, [item]) if not hasattr(item, 'addresses')
+ else item
+ for item in value]
+ defects = []
+ kwds['groups'] = groups
+ kwds['defects'] = defects
+ kwds['decoded'] = ', '.join([str(item) for item in groups])
+ if 'parse_tree' not in kwds:
+ kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
+
+ def init(self, *args, **kw):
+ self._groups = tuple(kw.pop('groups'))
+ self._addresses = None
+ super().init(*args, **kw)
+
+ @property
+ def groups(self):
+ return self._groups
+
+ @property
+ def addresses(self):
+ if self._addresses is None:
+ self._addresses = tuple(address for group in self._groups
+ for address in group.addresses)
+ return self._addresses
+
+
+class UniqueAddressHeader(AddressHeader):
+
+ max_count = 1
+
+
+class SingleAddressHeader(AddressHeader):
+
+ @property
+ def address(self):
+ if len(self.addresses)!=1:
+ raise ValueError(("value of single address header {} is not "
+ "a single address").format(self.name))
+ return self.addresses[0]
+
+
+class UniqueSingleAddressHeader(SingleAddressHeader):
+
+ max_count = 1
+
+
+class MIMEVersionHeader:
+
+ max_count = 1
+
+ value_parser = staticmethod(parser.parse_mime_version)
+
+ @classmethod
+ def parse(cls, value, kwds):
+ kwds['parse_tree'] = parse_tree = cls.value_parser(value)
+ kwds['decoded'] = str(parse_tree)
+ kwds['defects'].extend(parse_tree.all_defects)
+ kwds['major'] = None if parse_tree.minor is None else parse_tree.major
+ kwds['minor'] = parse_tree.minor
+ if parse_tree.minor is not None:
+ kwds['version'] = '{}.{}'.format(kwds['major'], kwds['minor'])
+ else:
+ kwds['version'] = None
+
+ def init(self, *args, **kw):
+ self._version = kw.pop('version')
+ self._major = kw.pop('major')
+ self._minor = kw.pop('minor')
+ super().init(*args, **kw)
+
+ @property
+ def major(self):
+ return self._major
+
+ @property
+ def minor(self):
+ return self._minor
+
+ @property
+ def version(self):
+ return self._version
+
+
+class ParameterizedMIMEHeader:
+
+ # Mixin that handles the params dict. Must be subclassed and
+ # a property value_parser for the specific header provided.
+
+ max_count = 1
+
+ @classmethod
+ def parse(cls, value, kwds):
+ kwds['parse_tree'] = parse_tree = cls.value_parser(value)
+ kwds['decoded'] = str(parse_tree)
+ kwds['defects'].extend(parse_tree.all_defects)
+ if parse_tree.params is None:
+ kwds['params'] = {}
+ else:
+ # The MIME RFCs specify that parameter ordering is arbitrary.
+ kwds['params'] = {utils._sanitize(name).lower():
+ utils._sanitize(value)
+ for name, value in parse_tree.params}
+
+ def init(self, *args, **kw):
+ self._params = kw.pop('params')
+ super().init(*args, **kw)
+
+ @property
+ def params(self):
+ return MappingProxyType(self._params)
+
+
+class ContentTypeHeader(ParameterizedMIMEHeader):
+
+ value_parser = staticmethod(parser.parse_content_type_header)
+
+ def init(self, *args, **kw):
+ super().init(*args, **kw)
+ self._maintype = utils._sanitize(self._parse_tree.maintype)
+ self._subtype = utils._sanitize(self._parse_tree.subtype)
+
+ @property
+ def maintype(self):
+ return self._maintype
+
+ @property
+ def subtype(self):
+ return self._subtype
+
+ @property
+ def content_type(self):
+ return self.maintype + '/' + self.subtype
+
+
+class ContentDispositionHeader(ParameterizedMIMEHeader):
+
+ value_parser = staticmethod(parser.parse_content_disposition_header)
+
+ def init(self, *args, **kw):
+ super().init(*args, **kw)
+ cd = self._parse_tree.content_disposition
+ self._content_disposition = cd if cd is None else utils._sanitize(cd)
+
+ @property
+ def content_disposition(self):
+ return self._content_disposition
+
+
+class ContentTransferEncodingHeader:
+
+ max_count = 1
+
+ value_parser = staticmethod(parser.parse_content_transfer_encoding_header)
+
+ @classmethod
+ def parse(cls, value, kwds):
+ kwds['parse_tree'] = parse_tree = cls.value_parser(value)
+ kwds['decoded'] = str(parse_tree)
+ kwds['defects'].extend(parse_tree.all_defects)
+
+ def init(self, *args, **kw):
+ super().init(*args, **kw)
+ self._cte = utils._sanitize(self._parse_tree.cte)
+
+ @property
+ def cte(self):
+ return self._cte
+
+
+# The header factory #
+
+_default_header_map = {
+ 'subject': UniqueUnstructuredHeader,
+ 'date': UniqueDateHeader,
+ 'resent-date': DateHeader,
+ 'orig-date': UniqueDateHeader,
+ 'sender': UniqueSingleAddressHeader,
+ 'resent-sender': SingleAddressHeader,
+ 'to': UniqueAddressHeader,
+ 'resent-to': AddressHeader,
+ 'cc': UniqueAddressHeader,
+ 'resent-cc': AddressHeader,
+ 'bcc': UniqueAddressHeader,
+ 'resent-bcc': AddressHeader,
+ 'from': UniqueAddressHeader,
+ 'resent-from': AddressHeader,
+ 'reply-to': UniqueAddressHeader,
+ 'mime-version': MIMEVersionHeader,
+ 'content-type': ContentTypeHeader,
+ 'content-disposition': ContentDispositionHeader,
+ 'content-transfer-encoding': ContentTransferEncodingHeader,
+ }
+
+class HeaderRegistry:
+
+ """A header_factory and header registry."""
+
+ def __init__(self, base_class=BaseHeader, default_class=UnstructuredHeader,
+ use_default_map=True):
+ """Create a header_factory that works with the Policy API.
+
+ base_class is the class that will be the last class in the created
+ header class's __bases__ list. default_class is the class that will be
+ used if "name" (see __call__) does not appear in the registry.
+ use_default_map controls whether or not the default mapping of names to
+ specialized classes is copied in to the registry when the factory is
+ created. The default is True.
+
+ """
+ self.registry = {}
+ self.base_class = base_class
+ self.default_class = default_class
+ if use_default_map:
+ self.registry.update(_default_header_map)
+
+ def map_to_type(self, name, cls):
+ """Register cls as the specialized class for handling "name" headers.
+
+ """
+ self.registry[name.lower()] = cls
+
+ def __getitem__(self, name):
+ cls = self.registry.get(name.lower(), self.default_class)
+ return type('_'+cls.__name__, (cls, self.base_class), {})
+
+ def __call__(self, name, value):
+ """Create a header instance for header 'name' from 'value'.
+
+ Creates a header instance by creating a specialized class for parsing
+ and representing the specified header by combining the factory
+ base_class with a specialized class from the registry or the
+ default_class, and passing the name and value to the constructed
+ class's constructor.
+
+ """
+ return self[name](name, value)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/iterators.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/iterators.py
new file mode 100644
index 00000000..b5502ee9
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/iterators.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Various types of useful iterators and generators."""
+
+__all__ = [
+ 'body_line_iterator',
+ 'typed_subpart_iterator',
+ 'walk',
+ # Do not include _structure() since it's part of the debugging API.
+ ]
+
+import sys
+from io import StringIO
+
+
+
+# This function will become a method of the Message class
+def walk(self):
+ """Walk over the message tree, yielding each subpart.
+
+ The walk is performed in depth-first order. This method is a
+ generator.
+ """
+ yield self
+ if self.is_multipart():
+ for subpart in self.get_payload():
+ yield from subpart.walk()
+
+
+
+# These two functions are imported into the Iterators.py interface module.
+def body_line_iterator(msg, decode=False):
+ """Iterate over the parts, returning string payloads line-by-line.
+
+ Optional decode (default False) is passed through to .get_payload().
+ """
+ for subpart in msg.walk():
+ payload = subpart.get_payload(decode=decode)
+ if isinstance(payload, str):
+ yield from StringIO(payload)
+
+
+def typed_subpart_iterator(msg, maintype='text', subtype=None):
+ """Iterate over the subparts with a given MIME type.
+
+ Use `maintype' as the main MIME type to match against; this defaults to
+ "text". Optional `subtype' is the MIME subtype to match against; if
+ omitted, only the main type is matched.
+ """
+ for subpart in msg.walk():
+ if subpart.get_content_maintype() == maintype:
+ if subtype is None or subpart.get_content_subtype() == subtype:
+ yield subpart
+
+
+
+def _structure(msg, fp=None, level=0, include_default=False):
+ """A handy debugging aid"""
+ if fp is None:
+ fp = sys.stdout
+ tab = ' ' * (level * 4)
+ print(tab + msg.get_content_type(), end='', file=fp)
+ if include_default:
+ print(' [%s]' % msg.get_default_type(), file=fp)
+ else:
+ print(file=fp)
+ if msg.is_multipart():
+ for subpart in msg.get_payload():
+ _structure(subpart, fp, level+1, include_default)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/message.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/message.py
new file mode 100644
index 00000000..b6512f21
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/message.py
@@ -0,0 +1,1164 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Basic message object for the email package object model."""
+
+__all__ = ['Message', 'EmailMessage']
+
+import re
+import uu
+import quopri
+from io import BytesIO, StringIO
+
+# Intrapackage imports
+from email import utils
+from email import errors
+from email._policybase import Policy, compat32
+from email import charset as _charset
+from email._encoded_words import decode_b
+Charset = _charset.Charset
+
+SEMISPACE = '; '
+
+# Regular expression that matches `special' characters in parameters, the
+# existence of which force quoting of the parameter value.
+tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
+
+
+def _splitparam(param):
+ # Split header parameters. BAW: this may be too simple. It isn't
+ # strictly RFC 2045 (section 5.1) compliant, but it catches most headers
+ # found in the wild. We may eventually need a full fledged parser.
+ # RDM: we might have a Header here; for now just stringify it.
+ a, sep, b = str(param).partition(';')
+ if not sep:
+ return a.strip(), None
+ return a.strip(), b.strip()
+
+def _formatparam(param, value=None, quote=True):
+ """Convenience function to format and return a key=value pair.
+
+ This will quote the value if needed or if quote is true. If value is a
+ three tuple (charset, language, value), it will be encoded according
+ to RFC2231 rules. If it contains non-ascii characters it will likewise
+ be encoded according to RFC2231 rules, using the utf-8 charset and
+ a null language.
+ """
+ if value is not None and len(value) > 0:
+ # A tuple is used for RFC 2231 encoded parameter values where items
+ # are (charset, language, value). charset is a string, not a Charset
+ # instance. RFC 2231 encoded values are never quoted, per RFC.
+ if isinstance(value, tuple):
+ # Encode as per RFC 2231
+ param += '*'
+ value = utils.encode_rfc2231(value[2], value[0], value[1])
+ return '%s=%s' % (param, value)
+ else:
+ try:
+ value.encode('ascii')
+ except UnicodeEncodeError:
+ param += '*'
+ value = utils.encode_rfc2231(value, 'utf-8', '')
+ return '%s=%s' % (param, value)
+ # BAW: Please check this. I think that if quote is set it should
+ # force quoting even if not necessary.
+ if quote or tspecials.search(value):
+ return '%s="%s"' % (param, utils.quote(value))
+ else:
+ return '%s=%s' % (param, value)
+ else:
+ return param
+
+def _parseparam(s):
+ # RDM This might be a Header, so for now stringify it.
+ s = ';' + str(s)
+ plist = []
+ while s[:1] == ';':
+ s = s[1:]
+ end = s.find(';')
+ while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
+ end = s.find(';', end + 1)
+ if end < 0:
+ end = len(s)
+ f = s[:end]
+ if '=' in f:
+ i = f.index('=')
+ f = f[:i].strip().lower() + '=' + f[i+1:].strip()
+ plist.append(f.strip())
+ s = s[end:]
+ return plist
+
+
+def _unquotevalue(value):
+ # This is different than utils.collapse_rfc2231_value() because it doesn't
+ # try to convert the value to a unicode. Message.get_param() and
+ # Message.get_params() are both currently defined to return the tuple in
+ # the face of RFC 2231 parameters.
+ if isinstance(value, tuple):
+ return value[0], value[1], utils.unquote(value[2])
+ else:
+ return utils.unquote(value)
+
+
+
+class Message:
+ """Basic message object.
+
+ A message object is defined as something that has a bunch of RFC 2822
+ headers and a payload. It may optionally have an envelope header
+ (a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
+ multipart or a message/rfc822), then the payload is a list of Message
+ objects, otherwise it is a string.
+
+ Message objects implement part of the `mapping' interface, which assumes
+ there is exactly one occurrence of the header per message. Some headers
+ do in fact appear multiple times (e.g. Received) and for those headers,
+ you must use the explicit API to set or get all the headers. Not all of
+ the mapping methods are implemented.
+ """
+ def __init__(self, policy=compat32):
+ self.policy = policy
+ self._headers = []
+ self._unixfrom = None
+ self._payload = None
+ self._charset = None
+ # Defaults for multipart messages
+ self.preamble = self.epilogue = None
+ self.defects = []
+ # Default content type
+ self._default_type = 'text/plain'
+
+ def __str__(self):
+ """Return the entire formatted message as a string.
+ """
+ return self.as_string()
+
+ def as_string(self, unixfrom=False, maxheaderlen=0, policy=None):
+ """Return the entire formatted message as a string.
+
+ Optional 'unixfrom', when true, means include the Unix From_ envelope
+ header. For backward compatibility reasons, if maxheaderlen is
+ not specified it defaults to 0, so you must override it explicitly
+ if you want a different maxheaderlen. 'policy' is passed to the
+ Generator instance used to serialize the mesasge; if it is not
+ specified the policy associated with the message instance is used.
+
+ If the message object contains binary data that is not encoded
+ according to RFC standards, the non-compliant data will be replaced by
+ unicode "unknown character" code points.
+ """
+ from email.generator import Generator
+ policy = self.policy if policy is None else policy
+ fp = StringIO()
+ g = Generator(fp,
+ mangle_from_=False,
+ maxheaderlen=maxheaderlen,
+ policy=policy)
+ g.flatten(self, unixfrom=unixfrom)
+ return fp.getvalue()
+
+ def __bytes__(self):
+ """Return the entire formatted message as a bytes object.
+ """
+ return self.as_bytes()
+
+ def as_bytes(self, unixfrom=False, policy=None):
+ """Return the entire formatted message as a bytes object.
+
+ Optional 'unixfrom', when true, means include the Unix From_ envelope
+ header. 'policy' is passed to the BytesGenerator instance used to
+ serialize the message; if not specified the policy associated with
+ the message instance is used.
+ """
+ from email.generator import BytesGenerator
+ policy = self.policy if policy is None else policy
+ fp = BytesIO()
+ g = BytesGenerator(fp, mangle_from_=False, policy=policy)
+ g.flatten(self, unixfrom=unixfrom)
+ return fp.getvalue()
+
+ def is_multipart(self):
+ """Return True if the message consists of multiple parts."""
+ return isinstance(self._payload, list)
+
+ #
+ # Unix From_ line
+ #
+ def set_unixfrom(self, unixfrom):
+ self._unixfrom = unixfrom
+
+ def get_unixfrom(self):
+ return self._unixfrom
+
+ #
+ # Payload manipulation.
+ #
+ def attach(self, payload):
+ """Add the given payload to the current payload.
+
+ The current payload will always be a list of objects after this method
+ is called. If you want to set the payload to a scalar object, use
+ set_payload() instead.
+ """
+ if self._payload is None:
+ self._payload = [payload]
+ else:
+ try:
+ self._payload.append(payload)
+ except AttributeError:
+ raise TypeError("Attach is not valid on a message with a"
+ " non-multipart payload")
+
+ def get_payload(self, i=None, decode=False):
+ """Return a reference to the payload.
+
+ The payload will either be a list object or a string. If you mutate
+ the list object, you modify the message's payload in place. Optional
+ i returns that index into the payload.
+
+ Optional decode is a flag indicating whether the payload should be
+ decoded or not, according to the Content-Transfer-Encoding header
+ (default is False).
+
+ When True and the message is not a multipart, the payload will be
+ decoded if this header's value is `quoted-printable' or `base64'. If
+ some other encoding is used, or the header is missing, or if the
+ payload has bogus data (i.e. bogus base64 or uuencoded data), the
+ payload is returned as-is.
+
+ If the message is a multipart and the decode flag is True, then None
+ is returned.
+ """
+ # Here is the logic table for this code, based on the email5.0.0 code:
+ # i decode is_multipart result
+ # ------ ------ ------------ ------------------------------
+ # None True True None
+ # i True True None
+ # None False True _payload (a list)
+ # i False True _payload element i (a Message)
+ # i False False error (not a list)
+ # i True False error (not a list)
+ # None False False _payload
+ # None True False _payload decoded (bytes)
+ # Note that Barry planned to factor out the 'decode' case, but that
+ # isn't so easy now that we handle the 8 bit data, which needs to be
+ # converted in both the decode and non-decode path.
+ if self.is_multipart():
+ if decode:
+ return None
+ if i is None:
+ return self._payload
+ else:
+ return self._payload[i]
+ # For backward compatibility, Use isinstance and this error message
+ # instead of the more logical is_multipart test.
+ if i is not None and not isinstance(self._payload, list):
+ raise TypeError('Expected list, got %s' % type(self._payload))
+ payload = self._payload
+ # cte might be a Header, so for now stringify it.
+ cte = str(self.get('content-transfer-encoding', '')).lower()
+ # payload may be bytes here.
+ if isinstance(payload, str):
+ if utils._has_surrogates(payload):
+ bpayload = payload.encode('ascii', 'surrogateescape')
+ if not decode:
+ try:
+ payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace')
+ except LookupError:
+ payload = bpayload.decode('ascii', 'replace')
+ elif decode:
+ try:
+ bpayload = payload.encode('ascii')
+ except UnicodeError:
+ # This won't happen for RFC compliant messages (messages
+ # containing only ASCII code points in the unicode input).
+ # If it does happen, turn the string into bytes in a way
+ # guaranteed not to fail.
+ bpayload = payload.encode('raw-unicode-escape')
+ if not decode:
+ return payload
+ if cte == 'quoted-printable':
+ return quopri.decodestring(bpayload)
+ elif cte == 'base64':
+ # XXX: this is a bit of a hack; decode_b should probably be factored
+ # out somewhere, but I haven't figured out where yet.
+ value, defects = decode_b(b''.join(bpayload.splitlines()))
+ for defect in defects:
+ self.policy.handle_defect(self, defect)
+ return value
+ elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
+ in_file = BytesIO(bpayload)
+ out_file = BytesIO()
+ try:
+ uu.decode(in_file, out_file, quiet=True)
+ return out_file.getvalue()
+ except uu.Error:
+ # Some decoding problem
+ return bpayload
+ if isinstance(payload, str):
+ return bpayload
+ return payload
+
+ def set_payload(self, payload, charset=None):
+ """Set the payload to the given value.
+
+ Optional charset sets the message's default character set. See
+ set_charset() for details.
+ """
+ if hasattr(payload, 'encode'):
+ if charset is None:
+ self._payload = payload
+ return
+ if not isinstance(charset, Charset):
+ charset = Charset(charset)
+ payload = payload.encode(charset.output_charset)
+ if hasattr(payload, 'decode'):
+ self._payload = payload.decode('ascii', 'surrogateescape')
+ else:
+ self._payload = payload
+ if charset is not None:
+ self.set_charset(charset)
+
+ def set_charset(self, charset):
+ """Set the charset of the payload to a given character set.
+
+ charset can be a Charset instance, a string naming a character set, or
+ None. If it is a string it will be converted to a Charset instance.
+ If charset is None, the charset parameter will be removed from the
+ Content-Type field. Anything else will generate a TypeError.
+
+ The message will be assumed to be of type text/* encoded with
+ charset.input_charset. It will be converted to charset.output_charset
+ and encoded properly, if needed, when generating the plain text
+ representation of the message. MIME headers (MIME-Version,
+ Content-Type, Content-Transfer-Encoding) will be added as needed.
+ """
+ if charset is None:
+ self.del_param('charset')
+ self._charset = None
+ return
+ if not isinstance(charset, Charset):
+ charset = Charset(charset)
+ self._charset = charset
+ if 'MIME-Version' not in self:
+ self.add_header('MIME-Version', '1.0')
+ if 'Content-Type' not in self:
+ self.add_header('Content-Type', 'text/plain',
+ charset=charset.get_output_charset())
+ else:
+ self.set_param('charset', charset.get_output_charset())
+ if charset != charset.get_output_charset():
+ self._payload = charset.body_encode(self._payload)
+ if 'Content-Transfer-Encoding' not in self:
+ cte = charset.get_body_encoding()
+ try:
+ cte(self)
+ except TypeError:
+ # This 'if' is for backward compatibility, it allows unicode
+ # through even though that won't work correctly if the
+ # message is serialized.
+ payload = self._payload
+ if payload:
+ try:
+ payload = payload.encode('ascii', 'surrogateescape')
+ except UnicodeError:
+ payload = payload.encode(charset.output_charset)
+ self._payload = charset.body_encode(payload)
+ self.add_header('Content-Transfer-Encoding', cte)
+
+ def get_charset(self):
+ """Return the Charset instance associated with the message's payload.
+ """
+ return self._charset
+
+ #
+ # MAPPING INTERFACE (partial)
+ #
+ def __len__(self):
+ """Return the total number of headers, including duplicates."""
+ return len(self._headers)
+
+ def __getitem__(self, name):
+ """Get a header value.
+
+ Return None if the header is missing instead of raising an exception.
+
+ Note that if the header appeared multiple times, exactly which
+ occurrence gets returned is undefined. Use get_all() to get all
+ the values matching a header field name.
+ """
+ return self.get(name)
+
+ def __setitem__(self, name, val):
+ """Set the value of a header.
+
+ Note: this does not overwrite an existing header with the same field
+ name. Use __delitem__() first to delete any existing headers.
+ """
+ max_count = self.policy.header_max_count(name)
+ if max_count:
+ lname = name.lower()
+ found = 0
+ for k, v in self._headers:
+ if k.lower() == lname:
+ found += 1
+ if found >= max_count:
+ raise ValueError("There may be at most {} {} headers "
+ "in a message".format(max_count, name))
+ self._headers.append(self.policy.header_store_parse(name, val))
+
+ def __delitem__(self, name):
+ """Delete all occurrences of a header, if present.
+
+ Does not raise an exception if the header is missing.
+ """
+ name = name.lower()
+ newheaders = []
+ for k, v in self._headers:
+ if k.lower() != name:
+ newheaders.append((k, v))
+ self._headers = newheaders
+
+ def __contains__(self, name):
+ return name.lower() in [k.lower() for k, v in self._headers]
+
+ def __iter__(self):
+ for field, value in self._headers:
+ yield field
+
+ def keys(self):
+ """Return a list of all the message's header field names.
+
+ These will be sorted in the order they appeared in the original
+ message, or were added to the message, and may contain duplicates.
+ Any fields deleted and re-inserted are always appended to the header
+ list.
+ """
+ return [k for k, v in self._headers]
+
+ def values(self):
+ """Return a list of all the message's header values.
+
+ These will be sorted in the order they appeared in the original
+ message, or were added to the message, and may contain duplicates.
+ Any fields deleted and re-inserted are always appended to the header
+ list.
+ """
+ return [self.policy.header_fetch_parse(k, v)
+ for k, v in self._headers]
+
+ def items(self):
+ """Get all the message's header fields and values.
+
+ These will be sorted in the order they appeared in the original
+ message, or were added to the message, and may contain duplicates.
+ Any fields deleted and re-inserted are always appended to the header
+ list.
+ """
+ return [(k, self.policy.header_fetch_parse(k, v))
+ for k, v in self._headers]
+
+ def get(self, name, failobj=None):
+ """Get a header value.
+
+ Like __getitem__() but return failobj instead of None when the field
+ is missing.
+ """
+ name = name.lower()
+ for k, v in self._headers:
+ if k.lower() == name:
+ return self.policy.header_fetch_parse(k, v)
+ return failobj
+
+ #
+ # "Internal" methods (public API, but only intended for use by a parser
+ # or generator, not normal application code.
+ #
+
+ def set_raw(self, name, value):
+ """Store name and value in the model without modification.
+
+ This is an "internal" API, intended only for use by a parser.
+ """
+ self._headers.append((name, value))
+
+ def raw_items(self):
+ """Return the (name, value) header pairs without modification.
+
+ This is an "internal" API, intended only for use by a generator.
+ """
+ return iter(self._headers.copy())
+
+ #
+ # Additional useful stuff
+ #
+
+ def get_all(self, name, failobj=None):
+ """Return a list of all the values for the named field.
+
+ These will be sorted in the order they appeared in the original
+ message, and may contain duplicates. Any fields deleted and
+ re-inserted are always appended to the header list.
+
+ If no such fields exist, failobj is returned (defaults to None).
+ """
+ values = []
+ name = name.lower()
+ for k, v in self._headers:
+ if k.lower() == name:
+ values.append(self.policy.header_fetch_parse(k, v))
+ if not values:
+ return failobj
+ return values
+
+ def add_header(self, _name, _value, **_params):
+ """Extended header setting.
+
+ name is the header field to add. keyword arguments can be used to set
+ additional parameters for the header field, with underscores converted
+ to dashes. Normally the parameter will be added as key="value" unless
+ value is None, in which case only the key will be added. If a
+ parameter value contains non-ASCII characters it can be specified as a
+ three-tuple of (charset, language, value), in which case it will be
+ encoded according to RFC2231 rules. Otherwise it will be encoded using
+ the utf-8 charset and a language of ''.
+
+ Examples:
+
+ msg.add_header('content-disposition', 'attachment', filename='bud.gif')
+ msg.add_header('content-disposition', 'attachment',
+ filename=('utf-8', '', Fußballer.ppt'))
+ msg.add_header('content-disposition', 'attachment',
+ filename='Fußballer.ppt'))
+ """
+ parts = []
+ for k, v in _params.items():
+ if v is None:
+ parts.append(k.replace('_', '-'))
+ else:
+ parts.append(_formatparam(k.replace('_', '-'), v))
+ if _value is not None:
+ parts.insert(0, _value)
+ self[_name] = SEMISPACE.join(parts)
+
+ def replace_header(self, _name, _value):
+ """Replace a header.
+
+ Replace the first matching header found in the message, retaining
+ header order and case. If no matching header was found, a KeyError is
+ raised.
+ """
+ _name = _name.lower()
+ for i, (k, v) in zip(range(len(self._headers)), self._headers):
+ if k.lower() == _name:
+ self._headers[i] = self.policy.header_store_parse(k, _value)
+ break
+ else:
+ raise KeyError(_name)
+
+ #
+ # Use these three methods instead of the three above.
+ #
+
+ def get_content_type(self):
+ """Return the message's content type.
+
+ The returned string is coerced to lower case of the form
+ `maintype/subtype'. If there was no Content-Type header in the
+ message, the default type as given by get_default_type() will be
+ returned. Since according to RFC 2045, messages always have a default
+ type this will always return a value.
+
+ RFC 2045 defines a message's default type to be text/plain unless it
+ appears inside a multipart/digest container, in which case it would be
+ message/rfc822.
+ """
+ missing = object()
+ value = self.get('content-type', missing)
+ if value is missing:
+ # This should have no parameters
+ return self.get_default_type()
+ ctype = _splitparam(value)[0].lower()
+ # RFC 2045, section 5.2 says if its invalid, use text/plain
+ if ctype.count('/') != 1:
+ return 'text/plain'
+ return ctype
+
+ def get_content_maintype(self):
+ """Return the message's main content type.
+
+ This is the `maintype' part of the string returned by
+ get_content_type().
+ """
+ ctype = self.get_content_type()
+ return ctype.split('/')[0]
+
+ def get_content_subtype(self):
+ """Returns the message's sub-content type.
+
+ This is the `subtype' part of the string returned by
+ get_content_type().
+ """
+ ctype = self.get_content_type()
+ return ctype.split('/')[1]
+
+ def get_default_type(self):
+ """Return the `default' content type.
+
+ Most messages have a default content type of text/plain, except for
+ messages that are subparts of multipart/digest containers. Such
+ subparts have a default content type of message/rfc822.
+ """
+ return self._default_type
+
+ def set_default_type(self, ctype):
+ """Set the `default' content type.
+
+ ctype should be either "text/plain" or "message/rfc822", although this
+ is not enforced. The default content type is not stored in the
+ Content-Type header.
+ """
+ self._default_type = ctype
+
+ def _get_params_preserve(self, failobj, header):
+ # Like get_params() but preserves the quoting of values. BAW:
+ # should this be part of the public interface?
+ missing = object()
+ value = self.get(header, missing)
+ if value is missing:
+ return failobj
+ params = []
+ for p in _parseparam(value):
+ try:
+ name, val = p.split('=', 1)
+ name = name.strip()
+ val = val.strip()
+ except ValueError:
+ # Must have been a bare attribute
+ name = p.strip()
+ val = ''
+ params.append((name, val))
+ params = utils.decode_params(params)
+ return params
+
+ def get_params(self, failobj=None, header='content-type', unquote=True):
+ """Return the message's Content-Type parameters, as a list.
+
+ The elements of the returned list are 2-tuples of key/value pairs, as
+ split on the `=' sign. The left hand side of the `=' is the key,
+ while the right hand side is the value. If there is no `=' sign in
+ the parameter the value is the empty string. The value is as
+ described in the get_param() method.
+
+ Optional failobj is the object to return if there is no Content-Type
+ header. Optional header is the header to search instead of
+ Content-Type. If unquote is True, the value is unquoted.
+ """
+ missing = object()
+ params = self._get_params_preserve(missing, header)
+ if params is missing:
+ return failobj
+ if unquote:
+ return [(k, _unquotevalue(v)) for k, v in params]
+ else:
+ return params
+
+ def get_param(self, param, failobj=None, header='content-type',
+ unquote=True):
+ """Return the parameter value if found in the Content-Type header.
+
+ Optional failobj is the object to return if there is no Content-Type
+ header, or the Content-Type header has no such parameter. Optional
+ header is the header to search instead of Content-Type.
+
+ Parameter keys are always compared case insensitively. The return
+ value can either be a string, or a 3-tuple if the parameter was RFC
+ 2231 encoded. When it's a 3-tuple, the elements of the value are of
+ the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and
+ LANGUAGE can be None, in which case you should consider VALUE to be
+ encoded in the us-ascii charset. You can usually ignore LANGUAGE.
+ The parameter value (either the returned string, or the VALUE item in
+ the 3-tuple) is always unquoted, unless unquote is set to False.
+
+ If your application doesn't care whether the parameter was RFC 2231
+ encoded, it can turn the return value into a string as follows:
+
+ rawparam = msg.get_param('foo')
+ param = email.utils.collapse_rfc2231_value(rawparam)
+
+ """
+ if header not in self:
+ return failobj
+ for k, v in self._get_params_preserve(failobj, header):
+ if k.lower() == param.lower():
+ if unquote:
+ return _unquotevalue(v)
+ else:
+ return v
+ return failobj
+
+ def set_param(self, param, value, header='Content-Type', requote=True,
+ charset=None, language='', replace=False):
+ """Set a parameter in the Content-Type header.
+
+ If the parameter already exists in the header, its value will be
+ replaced with the new value.
+
+ If header is Content-Type and has not yet been defined for this
+ message, it will be set to "text/plain" and the new parameter and
+ value will be appended as per RFC 2045.
+
+ An alternate header can be specified in the header argument, and all
+ parameters will be quoted as necessary unless requote is False.
+
+ If charset is specified, the parameter will be encoded according to RFC
+ 2231. Optional language specifies the RFC 2231 language, defaulting
+ to the empty string. Both charset and language should be strings.
+ """
+ if not isinstance(value, tuple) and charset:
+ value = (charset, language, value)
+
+ if header not in self and header.lower() == 'content-type':
+ ctype = 'text/plain'
+ else:
+ ctype = self.get(header)
+ if not self.get_param(param, header=header):
+ if not ctype:
+ ctype = _formatparam(param, value, requote)
+ else:
+ ctype = SEMISPACE.join(
+ [ctype, _formatparam(param, value, requote)])
+ else:
+ ctype = ''
+ for old_param, old_value in self.get_params(header=header,
+ unquote=requote):
+ append_param = ''
+ if old_param.lower() == param.lower():
+ append_param = _formatparam(param, value, requote)
+ else:
+ append_param = _formatparam(old_param, old_value, requote)
+ if not ctype:
+ ctype = append_param
+ else:
+ ctype = SEMISPACE.join([ctype, append_param])
+ if ctype != self.get(header):
+ if replace:
+ self.replace_header(header, ctype)
+ else:
+ del self[header]
+ self[header] = ctype
+
+ def del_param(self, param, header='content-type', requote=True):
+ """Remove the given parameter completely from the Content-Type header.
+
+ The header will be re-written in place without the parameter or its
+ value. All values will be quoted as necessary unless requote is
+ False. Optional header specifies an alternative to the Content-Type
+ header.
+ """
+ if header not in self:
+ return
+ new_ctype = ''
+ for p, v in self.get_params(header=header, unquote=requote):
+ if p.lower() != param.lower():
+ if not new_ctype:
+ new_ctype = _formatparam(p, v, requote)
+ else:
+ new_ctype = SEMISPACE.join([new_ctype,
+ _formatparam(p, v, requote)])
+ if new_ctype != self.get(header):
+ del self[header]
+ self[header] = new_ctype
+
+ def set_type(self, type, header='Content-Type', requote=True):
+ """Set the main type and subtype for the Content-Type header.
+
+ type must be a string in the form "maintype/subtype", otherwise a
+ ValueError is raised.
+
+ This method replaces the Content-Type header, keeping all the
+ parameters in place. If requote is False, this leaves the existing
+ header's quoting as is. Otherwise, the parameters will be quoted (the
+ default).
+
+ An alternative header can be specified in the header argument. When
+ the Content-Type header is set, we'll always also add a MIME-Version
+ header.
+ """
+ # BAW: should we be strict?
+ if not type.count('/') == 1:
+ raise ValueError
+ # Set the Content-Type, you get a MIME-Version
+ if header.lower() == 'content-type':
+ del self['mime-version']
+ self['MIME-Version'] = '1.0'
+ if header not in self:
+ self[header] = type
+ return
+ params = self.get_params(header=header, unquote=requote)
+ del self[header]
+ self[header] = type
+ # Skip the first param; it's the old type.
+ for p, v in params[1:]:
+ self.set_param(p, v, header, requote)
+
+ def get_filename(self, failobj=None):
+ """Return the filename associated with the payload if present.
+
+ The filename is extracted from the Content-Disposition header's
+ `filename' parameter, and it is unquoted. If that header is missing
+ the `filename' parameter, this method falls back to looking for the
+ `name' parameter.
+ """
+ missing = object()
+ filename = self.get_param('filename', missing, 'content-disposition')
+ if filename is missing:
+ filename = self.get_param('name', missing, 'content-type')
+ if filename is missing:
+ return failobj
+ return utils.collapse_rfc2231_value(filename).strip()
+
+ def get_boundary(self, failobj=None):
+ """Return the boundary associated with the payload if present.
+
+ The boundary is extracted from the Content-Type header's `boundary'
+ parameter, and it is unquoted.
+ """
+ missing = object()
+ boundary = self.get_param('boundary', missing)
+ if boundary is missing:
+ return failobj
+ # RFC 2046 says that boundaries may begin but not end in w/s
+ return utils.collapse_rfc2231_value(boundary).rstrip()
+
+ def set_boundary(self, boundary):
+ """Set the boundary parameter in Content-Type to 'boundary'.
+
+ This is subtly different than deleting the Content-Type header and
+ adding a new one with a new boundary parameter via add_header(). The
+ main difference is that using the set_boundary() method preserves the
+ order of the Content-Type header in the original message.
+
+ HeaderParseError is raised if the message has no Content-Type header.
+ """
+ missing = object()
+ params = self._get_params_preserve(missing, 'content-type')
+ if params is missing:
+ # There was no Content-Type header, and we don't know what type
+ # to set it to, so raise an exception.
+ raise errors.HeaderParseError('No Content-Type header found')
+ newparams = []
+ foundp = False
+ for pk, pv in params:
+ if pk.lower() == 'boundary':
+ newparams.append(('boundary', '"%s"' % boundary))
+ foundp = True
+ else:
+ newparams.append((pk, pv))
+ if not foundp:
+ # The original Content-Type header had no boundary attribute.
+ # Tack one on the end. BAW: should we raise an exception
+ # instead???
+ newparams.append(('boundary', '"%s"' % boundary))
+ # Replace the existing Content-Type header with the new value
+ newheaders = []
+ for h, v in self._headers:
+ if h.lower() == 'content-type':
+ parts = []
+ for k, v in newparams:
+ if v == '':
+ parts.append(k)
+ else:
+ parts.append('%s=%s' % (k, v))
+ val = SEMISPACE.join(parts)
+ newheaders.append(self.policy.header_store_parse(h, val))
+
+ else:
+ newheaders.append((h, v))
+ self._headers = newheaders
+
+ def get_content_charset(self, failobj=None):
+ """Return the charset parameter of the Content-Type header.
+
+ The returned string is always coerced to lower case. If there is no
+ Content-Type header, or if that header has no charset parameter,
+ failobj is returned.
+ """
+ missing = object()
+ charset = self.get_param('charset', missing)
+ if charset is missing:
+ return failobj
+ if isinstance(charset, tuple):
+ # RFC 2231 encoded, so decode it, and it better end up as ascii.
+ pcharset = charset[0] or 'us-ascii'
+ try:
+ # LookupError will be raised if the charset isn't known to
+ # Python. UnicodeError will be raised if the encoded text
+ # contains a character not in the charset.
+ as_bytes = charset[2].encode('raw-unicode-escape')
+ charset = str(as_bytes, pcharset)
+ except (LookupError, UnicodeError):
+ charset = charset[2]
+ # charset characters must be in us-ascii range
+ try:
+ charset.encode('us-ascii')
+ except UnicodeError:
+ return failobj
+ # RFC 2046, $4.1.2 says charsets are not case sensitive
+ return charset.lower()
+
+ def get_charsets(self, failobj=None):
+ """Return a list containing the charset(s) used in this message.
+
+ The returned list of items describes the Content-Type headers'
+ charset parameter for this message and all the subparts in its
+ payload.
+
+ Each item will either be a string (the value of the charset parameter
+ in the Content-Type header of that part) or the value of the
+ 'failobj' parameter (defaults to None), if the part does not have a
+ main MIME type of "text", or the charset is not defined.
+
+ The list will contain one string for each part of the message, plus
+ one for the container message (i.e. self), so that a non-multipart
+ message will still return a list of length 1.
+ """
+ return [part.get_content_charset(failobj) for part in self.walk()]
+
+ def get_content_disposition(self):
+ """Return the message's content-disposition if it exists, or None.
+
+ The return values can be either 'inline', 'attachment' or None
+ according to the rfc2183.
+ """
+ value = self.get('content-disposition')
+ if value is None:
+ return None
+ c_d = _splitparam(value)[0].lower()
+ return c_d
+
+ # I.e. def walk(self): ...
+ from email.iterators import walk
+
+
+class MIMEPart(Message):
+
+ def __init__(self, policy=None):
+ if policy is None:
+ from email.policy import default
+ policy = default
+ Message.__init__(self, policy)
+
+
+ def as_string(self, unixfrom=False, maxheaderlen=None, policy=None):
+ """Return the entire formatted message as a string.
+
+ Optional 'unixfrom', when true, means include the Unix From_ envelope
+ header. maxheaderlen is retained for backward compatibility with the
+ base Message class, but defaults to None, meaning that the policy value
+ for max_line_length controls the header maximum length. 'policy' is
+ passed to the Generator instance used to serialize the mesasge; if it
+ is not specified the policy associated with the message instance is
+ used.
+ """
+ policy = self.policy if policy is None else policy
+ if maxheaderlen is None:
+ maxheaderlen = policy.max_line_length
+ return super().as_string(maxheaderlen=maxheaderlen, policy=policy)
+
+ def __str__(self):
+ return self.as_string(policy=self.policy.clone(utf8=True))
+
+ def is_attachment(self):
+ c_d = self.get('content-disposition')
+ return False if c_d is None else c_d.content_disposition == 'attachment'
+
+ def _find_body(self, part, preferencelist):
+ if part.is_attachment():
+ return
+ maintype, subtype = part.get_content_type().split('/')
+ if maintype == 'text':
+ if subtype in preferencelist:
+ yield (preferencelist.index(subtype), part)
+ return
+ if maintype != 'multipart':
+ return
+ if subtype != 'related':
+ for subpart in part.iter_parts():
+ yield from self._find_body(subpart, preferencelist)
+ return
+ if 'related' in preferencelist:
+ yield (preferencelist.index('related'), part)
+ candidate = None
+ start = part.get_param('start')
+ if start:
+ for subpart in part.iter_parts():
+ if subpart['content-id'] == start:
+ candidate = subpart
+ break
+ if candidate is None:
+ subparts = part.get_payload()
+ candidate = subparts[0] if subparts else None
+ if candidate is not None:
+ yield from self._find_body(candidate, preferencelist)
+
+ def get_body(self, preferencelist=('related', 'html', 'plain')):
+ """Return best candidate mime part for display as 'body' of message.
+
+ Do a depth first search, starting with self, looking for the first part
+ matching each of the items in preferencelist, and return the part
+ corresponding to the first item that has a match, or None if no items
+ have a match. If 'related' is not included in preferencelist, consider
+ the root part of any multipart/related encountered as a candidate
+ match. Ignore parts with 'Content-Disposition: attachment'.
+ """
+ best_prio = len(preferencelist)
+ body = None
+ for prio, part in self._find_body(self, preferencelist):
+ if prio < best_prio:
+ best_prio = prio
+ body = part
+ if prio == 0:
+ break
+ return body
+
+ _body_types = {('text', 'plain'),
+ ('text', 'html'),
+ ('multipart', 'related'),
+ ('multipart', 'alternative')}
+ def iter_attachments(self):
+ """Return an iterator over the non-main parts of a multipart.
+
+ Skip the first of each occurrence of text/plain, text/html,
+ multipart/related, or multipart/alternative in the multipart (unless
+ they have a 'Content-Disposition: attachment' header) and include all
+ remaining subparts in the returned iterator. When applied to a
+ multipart/related, return all parts except the root part. Return an
+ empty iterator when applied to a multipart/alternative or a
+ non-multipart.
+ """
+ maintype, subtype = self.get_content_type().split('/')
+ if maintype != 'multipart' or subtype == 'alternative':
+ return
+ parts = self.get_payload().copy()
+ if maintype == 'multipart' and subtype == 'related':
+ # For related, we treat everything but the root as an attachment.
+ # The root may be indicated by 'start'; if there's no start or we
+ # can't find the named start, treat the first subpart as the root.
+ start = self.get_param('start')
+ if start:
+ found = False
+ attachments = []
+ for part in parts:
+ if part.get('content-id') == start:
+ found = True
+ else:
+ attachments.append(part)
+ if found:
+ yield from attachments
+ return
+ parts.pop(0)
+ yield from parts
+ return
+ # Otherwise we more or less invert the remaining logic in get_body.
+ # This only really works in edge cases (ex: non-text related or
+ # alternatives) if the sending agent sets content-disposition.
+ seen = [] # Only skip the first example of each candidate type.
+ for part in parts:
+ maintype, subtype = part.get_content_type().split('/')
+ if ((maintype, subtype) in self._body_types and
+ not part.is_attachment() and subtype not in seen):
+ seen.append(subtype)
+ continue
+ yield part
+
+ def iter_parts(self):
+ """Return an iterator over all immediate subparts of a multipart.
+
+ Return an empty iterator for a non-multipart.
+ """
+ if self.get_content_maintype() == 'multipart':
+ yield from self.get_payload()
+
+ def get_content(self, *args, content_manager=None, **kw):
+ if content_manager is None:
+ content_manager = self.policy.content_manager
+ return content_manager.get_content(self, *args, **kw)
+
+ def set_content(self, *args, content_manager=None, **kw):
+ if content_manager is None:
+ content_manager = self.policy.content_manager
+ content_manager.set_content(self, *args, **kw)
+
+ def _make_multipart(self, subtype, disallowed_subtypes, boundary):
+ if self.get_content_maintype() == 'multipart':
+ existing_subtype = self.get_content_subtype()
+ disallowed_subtypes = disallowed_subtypes + (subtype,)
+ if existing_subtype in disallowed_subtypes:
+ raise ValueError("Cannot convert {} to {}".format(
+ existing_subtype, subtype))
+ keep_headers = []
+ part_headers = []
+ for name, value in self._headers:
+ if name.lower().startswith('content-'):
+ part_headers.append((name, value))
+ else:
+ keep_headers.append((name, value))
+ if part_headers:
+ # There is existing content, move it to the first subpart.
+ part = type(self)(policy=self.policy)
+ part._headers = part_headers
+ part._payload = self._payload
+ self._payload = [part]
+ else:
+ self._payload = []
+ self._headers = keep_headers
+ self['Content-Type'] = 'multipart/' + subtype
+ if boundary is not None:
+ self.set_param('boundary', boundary)
+
+ def make_related(self, boundary=None):
+ self._make_multipart('related', ('alternative', 'mixed'), boundary)
+
+ def make_alternative(self, boundary=None):
+ self._make_multipart('alternative', ('mixed',), boundary)
+
+ def make_mixed(self, boundary=None):
+ self._make_multipart('mixed', (), boundary)
+
+ def _add_multipart(self, _subtype, *args, _disp=None, **kw):
+ if (self.get_content_maintype() != 'multipart' or
+ self.get_content_subtype() != _subtype):
+ getattr(self, 'make_' + _subtype)()
+ part = type(self)(policy=self.policy)
+ part.set_content(*args, **kw)
+ if _disp and 'content-disposition' not in part:
+ part['Content-Disposition'] = _disp
+ self.attach(part)
+
+ def add_related(self, *args, **kw):
+ self._add_multipart('related', *args, _disp='inline', **kw)
+
+ def add_alternative(self, *args, **kw):
+ self._add_multipart('alternative', *args, **kw)
+
+ def add_attachment(self, *args, **kw):
+ self._add_multipart('mixed', *args, _disp='attachment', **kw)
+
+ def clear(self):
+ self._headers = []
+ self._payload = None
+
+ def clear_content(self):
+ self._headers = [(n, v) for n, v in self._headers
+ if not n.lower().startswith('content-')]
+ self._payload = None
+
+
+class EmailMessage(MIMEPart):
+
+ def set_content(self, *args, **kw):
+ super().set_content(*args, **kw)
+ if 'MIME-Version' not in self:
+ self['MIME-Version'] = '1.0'
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/__init__.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/application.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/application.py
new file mode 100644
index 00000000..6877e554
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/application.py
@@ -0,0 +1,37 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Keith Dart
+# Contact: email-sig@python.org
+
+"""Class representing application/* type MIME documents."""
+
+__all__ = ["MIMEApplication"]
+
+from email import encoders
+from email.mime.nonmultipart import MIMENonMultipart
+
+
+class MIMEApplication(MIMENonMultipart):
+ """Class for generating application/* MIME documents."""
+
+ def __init__(self, _data, _subtype='octet-stream',
+ _encoder=encoders.encode_base64, *, policy=None, **_params):
+ """Create an application/* type MIME document.
+
+ _data is a string containing the raw application data.
+
+ _subtype is the MIME content type subtype, defaulting to
+ 'octet-stream'.
+
+ _encoder is a function which will perform the actual encoding for
+ transport of the application data, defaulting to base64 encoding.
+
+ Any additional keyword arguments are passed to the base class
+ constructor, which turns them into parameters on the Content-Type
+ header.
+ """
+ if _subtype is None:
+ raise TypeError('Invalid application MIME subtype')
+ MIMENonMultipart.__init__(self, 'application', _subtype, policy=policy,
+ **_params)
+ self.set_payload(_data)
+ _encoder(self)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/audio.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/audio.py
new file mode 100644
index 00000000..4bcd7b22
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/audio.py
@@ -0,0 +1,74 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Anthony Baxter
+# Contact: email-sig@python.org
+
+"""Class representing audio/* type MIME documents."""
+
+__all__ = ['MIMEAudio']
+
+import sndhdr
+
+from io import BytesIO
+from email import encoders
+from email.mime.nonmultipart import MIMENonMultipart
+
+
+
+_sndhdr_MIMEmap = {'au' : 'basic',
+ 'wav' :'x-wav',
+ 'aiff':'x-aiff',
+ 'aifc':'x-aiff',
+ }
+
+# There are others in sndhdr that don't have MIME types. :(
+# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
+def _whatsnd(data):
+ """Try to identify a sound file type.
+
+ sndhdr.what() has a pretty cruddy interface, unfortunately. This is why
+ we re-do it here. It would be easier to reverse engineer the Unix 'file'
+ command and use the standard 'magic' file, as shipped with a modern Unix.
+ """
+ hdr = data[:512]
+ fakefile = BytesIO(hdr)
+ for testfn in sndhdr.tests:
+ res = testfn(hdr, fakefile)
+ if res is not None:
+ return _sndhdr_MIMEmap.get(res[0])
+ return None
+
+
+
+class MIMEAudio(MIMENonMultipart):
+ """Class for generating audio/* MIME documents."""
+
+ def __init__(self, _audiodata, _subtype=None,
+ _encoder=encoders.encode_base64, *, policy=None, **_params):
+ """Create an audio/* type MIME document.
+
+ _audiodata is a string containing the raw audio data. If this data
+ can be decoded by the standard Python `sndhdr' module, then the
+ subtype will be automatically included in the Content-Type header.
+ Otherwise, you can specify the specific audio subtype via the
+ _subtype parameter. If _subtype is not given, and no subtype can be
+ guessed, a TypeError is raised.
+
+ _encoder is a function which will perform the actual encoding for
+ transport of the image data. It takes one argument, which is this
+ Image instance. It should use get_payload() and set_payload() to
+ change the payload to the encoded form. It should also add any
+ Content-Transfer-Encoding or other headers to the message as
+ necessary. The default encoding is Base64.
+
+ Any additional keyword arguments are passed to the base class
+ constructor, which turns them into parameters on the Content-Type
+ header.
+ """
+ if _subtype is None:
+ _subtype = _whatsnd(_audiodata)
+ if _subtype is None:
+ raise TypeError('Could not find audio MIME subtype')
+ MIMENonMultipart.__init__(self, 'audio', _subtype, policy=policy,
+ **_params)
+ self.set_payload(_audiodata)
+ _encoder(self)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/base.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/base.py
new file mode 100644
index 00000000..1a3f9b51
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/base.py
@@ -0,0 +1,30 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Base class for MIME specializations."""
+
+__all__ = ['MIMEBase']
+
+import email.policy
+
+from email import message
+
+
+
+class MIMEBase(message.Message):
+ """Base class for MIME specializations."""
+
+ def __init__(self, _maintype, _subtype, *, policy=None, **_params):
+ """This constructor adds a Content-Type: and a MIME-Version: header.
+
+ The Content-Type: header is taken from the _maintype and _subtype
+ arguments. Additional parameters for this header are taken from the
+ keyword arguments.
+ """
+ if policy is None:
+ policy = email.policy.compat32
+ message.Message.__init__(self, policy=policy)
+ ctype = '%s/%s' % (_maintype, _subtype)
+ self.add_header('Content-Type', ctype, **_params)
+ self['MIME-Version'] = '1.0'
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/image.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/image.py
new file mode 100644
index 00000000..92724643
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/image.py
@@ -0,0 +1,47 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Class representing image/* type MIME documents."""
+
+__all__ = ['MIMEImage']
+
+import imghdr
+
+from email import encoders
+from email.mime.nonmultipart import MIMENonMultipart
+
+
+
+class MIMEImage(MIMENonMultipart):
+ """Class for generating image/* type MIME documents."""
+
+ def __init__(self, _imagedata, _subtype=None,
+ _encoder=encoders.encode_base64, *, policy=None, **_params):
+ """Create an image/* type MIME document.
+
+ _imagedata is a string containing the raw image data. If this data
+ can be decoded by the standard Python `imghdr' module, then the
+ subtype will be automatically included in the Content-Type header.
+ Otherwise, you can specify the specific image subtype via the _subtype
+ parameter.
+
+ _encoder is a function which will perform the actual encoding for
+ transport of the image data. It takes one argument, which is this
+ Image instance. It should use get_payload() and set_payload() to
+ change the payload to the encoded form. It should also add any
+ Content-Transfer-Encoding or other headers to the message as
+ necessary. The default encoding is Base64.
+
+ Any additional keyword arguments are passed to the base class
+ constructor, which turns them into parameters on the Content-Type
+ header.
+ """
+ if _subtype is None:
+ _subtype = imghdr.what(None, _imagedata)
+ if _subtype is None:
+ raise TypeError('Could not guess image MIME subtype')
+ MIMENonMultipart.__init__(self, 'image', _subtype, policy=policy,
+ **_params)
+ self.set_payload(_imagedata)
+ _encoder(self)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/message.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/message.py
new file mode 100644
index 00000000..07e4f2d1
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/message.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Class representing message/* MIME documents."""
+
+__all__ = ['MIMEMessage']
+
+from email import message
+from email.mime.nonmultipart import MIMENonMultipart
+
+
+
+class MIMEMessage(MIMENonMultipart):
+ """Class representing message/* MIME documents."""
+
+ def __init__(self, _msg, _subtype='rfc822', *, policy=None):
+ """Create a message/* type MIME document.
+
+ _msg is a message object and must be an instance of Message, or a
+ derived class of Message, otherwise a TypeError is raised.
+
+ Optional _subtype defines the subtype of the contained message. The
+ default is "rfc822" (this is defined by the MIME standard, even though
+ the term "rfc822" is technically outdated by RFC 2822).
+ """
+ MIMENonMultipart.__init__(self, 'message', _subtype, policy=policy)
+ if not isinstance(_msg, message.Message):
+ raise TypeError('Argument is not an instance of Message')
+ # It's convenient to use this base class method. We need to do it
+ # this way or we'll get an exception
+ message.Message.attach(self, _msg)
+ # And be sure our default type is set correctly
+ self.set_default_type('message/rfc822')
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/multipart.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/multipart.py
new file mode 100644
index 00000000..2d3f2888
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/multipart.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2002-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Base class for MIME multipart/* type messages."""
+
+__all__ = ['MIMEMultipart']
+
+from email.mime.base import MIMEBase
+
+
+
+class MIMEMultipart(MIMEBase):
+ """Base class for MIME multipart/* type messages."""
+
+ def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
+ *, policy=None,
+ **_params):
+ """Creates a multipart/* type message.
+
+ By default, creates a multipart/mixed message, with proper
+ Content-Type and MIME-Version headers.
+
+ _subtype is the subtype of the multipart content type, defaulting to
+ `mixed'.
+
+ boundary is the multipart boundary string. By default it is
+ calculated as needed.
+
+ _subparts is a sequence of initial subparts for the payload. It
+ must be an iterable object, such as a list. You can always
+ attach new subparts to the message by using the attach() method.
+
+ Additional parameters for the Content-Type header are taken from the
+ keyword arguments (or passed into the _params argument).
+ """
+ MIMEBase.__init__(self, 'multipart', _subtype, policy=policy, **_params)
+
+ # Initialise _payload to an empty list as the Message superclass's
+ # implementation of is_multipart assumes that _payload is a list for
+ # multipart messages.
+ self._payload = []
+
+ if _subparts:
+ for p in _subparts:
+ self.attach(p)
+ if boundary:
+ self.set_boundary(boundary)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/nonmultipart.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/nonmultipart.py
new file mode 100644
index 00000000..e1f51968
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/nonmultipart.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2002-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Base class for MIME type messages that are not multipart."""
+
+__all__ = ['MIMENonMultipart']
+
+from email import errors
+from email.mime.base import MIMEBase
+
+
+
+class MIMENonMultipart(MIMEBase):
+ """Base class for MIME non-multipart type messages."""
+
+ def attach(self, payload):
+ # The public API prohibits attaching multiple subparts to MIMEBase
+ # derived subtypes since none of them are, by definition, of content
+ # type multipart/*
+ raise errors.MultipartConversionError(
+ 'Cannot attach additional subparts to non-multipart/*')
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/text.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/text.py
new file mode 100644
index 00000000..35b44238
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/mime/text.py
@@ -0,0 +1,42 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Class representing text/* type MIME documents."""
+
+__all__ = ['MIMEText']
+
+from email.charset import Charset
+from email.mime.nonmultipart import MIMENonMultipart
+
+
+
+class MIMEText(MIMENonMultipart):
+ """Class for generating text/* type MIME documents."""
+
+ def __init__(self, _text, _subtype='plain', _charset=None, *, policy=None):
+ """Create a text/* type MIME document.
+
+ _text is the string for this message object.
+
+ _subtype is the MIME sub content type, defaulting to "plain".
+
+ _charset is the character set parameter added to the Content-Type
+ header. This defaults to "us-ascii". Note that as a side-effect, the
+ Content-Transfer-Encoding header will also be set.
+ """
+
+ # If no _charset was specified, check to see if there are non-ascii
+ # characters present. If not, use 'us-ascii', otherwise use utf-8.
+ # XXX: This can be removed once #7304 is fixed.
+ if _charset is None:
+ try:
+ _text.encode('us-ascii')
+ _charset = 'us-ascii'
+ except UnicodeEncodeError:
+ _charset = 'utf-8'
+
+ MIMENonMultipart.__init__(self, 'text', _subtype, policy=policy,
+ **{'charset': str(_charset)})
+
+ self.set_payload(_text, _charset)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/parser.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/parser.py
new file mode 100644
index 00000000..555b1725
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/parser.py
@@ -0,0 +1,132 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
+# Contact: email-sig@python.org
+
+"""A parser of RFC 2822 and MIME email messages."""
+
+__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser',
+ 'FeedParser', 'BytesFeedParser']
+
+from io import StringIO, TextIOWrapper
+
+from email.feedparser import FeedParser, BytesFeedParser
+from email._policybase import compat32
+
+
+
+class Parser:
+ def __init__(self, _class=None, *, policy=compat32):
+ """Parser of RFC 2822 and MIME email messages.
+
+ Creates an in-memory object tree representing the email message, which
+ can then be manipulated and turned over to a Generator to return the
+ textual representation of the message.
+
+ The string must be formatted as a block of RFC 2822 headers and header
+ continuation lines, optionally preceded by a `Unix-from' header. The
+ header block is terminated either by the end of the string or by a
+ blank line.
+
+ _class is the class to instantiate for new message objects when they
+ must be created. This class must have a constructor that can take
+ zero arguments. Default is Message.Message.
+
+ The policy keyword specifies a policy object that controls a number of
+ aspects of the parser's operation. The default policy maintains
+ backward compatibility.
+
+ """
+ self._class = _class
+ self.policy = policy
+
+ def parse(self, fp, headersonly=False):
+ """Create a message structure from the data in a file.
+
+ Reads all the data from the file and returns the root of the message
+ structure. Optional headersonly is a flag specifying whether to stop
+ parsing after reading the headers or not. The default is False,
+ meaning it parses the entire contents of the file.
+ """
+ feedparser = FeedParser(self._class, policy=self.policy)
+ if headersonly:
+ feedparser._set_headersonly()
+ while True:
+ data = fp.read(8192)
+ if not data:
+ break
+ feedparser.feed(data)
+ return feedparser.close()
+
+ def parsestr(self, text, headersonly=False):
+ """Create a message structure from a string.
+
+ Returns the root of the message structure. Optional headersonly is a
+ flag specifying whether to stop parsing after reading the headers or
+ not. The default is False, meaning it parses the entire contents of
+ the file.
+ """
+ return self.parse(StringIO(text), headersonly=headersonly)
+
+
+
+class HeaderParser(Parser):
+ def parse(self, fp, headersonly=True):
+ return Parser.parse(self, fp, True)
+
+ def parsestr(self, text, headersonly=True):
+ return Parser.parsestr(self, text, True)
+
+
+class BytesParser:
+
+ def __init__(self, *args, **kw):
+ """Parser of binary RFC 2822 and MIME email messages.
+
+ Creates an in-memory object tree representing the email message, which
+ can then be manipulated and turned over to a Generator to return the
+ textual representation of the message.
+
+ The input must be formatted as a block of RFC 2822 headers and header
+ continuation lines, optionally preceded by a `Unix-from' header. The
+ header block is terminated either by the end of the input or by a
+ blank line.
+
+ _class is the class to instantiate for new message objects when they
+ must be created. This class must have a constructor that can take
+ zero arguments. Default is Message.Message.
+ """
+ self.parser = Parser(*args, **kw)
+
+ def parse(self, fp, headersonly=False):
+ """Create a message structure from the data in a binary file.
+
+ Reads all the data from the file and returns the root of the message
+ structure. Optional headersonly is a flag specifying whether to stop
+ parsing after reading the headers or not. The default is False,
+ meaning it parses the entire contents of the file.
+ """
+ fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape')
+ try:
+ return self.parser.parse(fp, headersonly)
+ finally:
+ fp.detach()
+
+
+ def parsebytes(self, text, headersonly=False):
+ """Create a message structure from a byte string.
+
+ Returns the root of the message structure. Optional headersonly is a
+ flag specifying whether to stop parsing after reading the headers or
+ not. The default is False, meaning it parses the entire contents of
+ the file.
+ """
+ text = text.decode('ASCII', errors='surrogateescape')
+ return self.parser.parsestr(text, headersonly)
+
+
+class BytesHeaderParser(BytesParser):
+ def parse(self, fp, headersonly=True):
+ return BytesParser.parse(self, fp, headersonly=True)
+
+ def parsebytes(self, text, headersonly=True):
+ return BytesParser.parsebytes(self, text, headersonly=True)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/policy.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/policy.py
new file mode 100644
index 00000000..5131311a
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/policy.py
@@ -0,0 +1,223 @@
+"""This will be the home for the policy that hooks in the new
+code that adds all the email6 features.
+"""
+
+import re
+from email._policybase import Policy, Compat32, compat32, _extend_docstrings
+from email.utils import _has_surrogates
+from email.headerregistry import HeaderRegistry as HeaderRegistry
+from email.contentmanager import raw_data_manager
+from email.message import EmailMessage
+
+__all__ = [
+ 'Compat32',
+ 'compat32',
+ 'Policy',
+ 'EmailPolicy',
+ 'default',
+ 'strict',
+ 'SMTP',
+ 'HTTP',
+ ]
+
+linesep_splitter = re.compile(r'\n|\r')
+
+@_extend_docstrings
+class EmailPolicy(Policy):
+
+ """+
+ PROVISIONAL
+
+ The API extensions enabled by this policy are currently provisional.
+ Refer to the documentation for details.
+
+ This policy adds new header parsing and folding algorithms. Instead of
+ simple strings, headers are custom objects with custom attributes
+ depending on the type of the field. The folding algorithm fully
+ implements RFCs 2047 and 5322.
+
+ In addition to the settable attributes listed above that apply to
+ all Policies, this policy adds the following additional attributes:
+
+ utf8 -- if False (the default) message headers will be
+ serialized as ASCII, using encoded words to encode
+ any non-ASCII characters in the source strings. If
+ True, the message headers will be serialized using
+ utf8 and will not contain encoded words (see RFC
+ 6532 for more on this serialization format).
+
+ refold_source -- if the value for a header in the Message object
+ came from the parsing of some source, this attribute
+ indicates whether or not a generator should refold
+ that value when transforming the message back into
+ stream form. The possible values are:
+
+ none -- all source values use original folding
+ long -- source values that have any line that is
+ longer than max_line_length will be
+ refolded
+ all -- all values are refolded.
+
+ The default is 'long'.
+
+ header_factory -- a callable that takes two arguments, 'name' and
+ 'value', where 'name' is a header field name and
+ 'value' is an unfolded header field value, and
+ returns a string-like object that represents that
+ header. A default header_factory is provided that
+ understands some of the RFC5322 header field types.
+ (Currently address fields and date fields have
+ special treatment, while all other fields are
+ treated as unstructured. This list will be
+ completed before the extension is marked stable.)
+
+ content_manager -- an object with at least two methods: get_content
+ and set_content. When the get_content or
+ set_content method of a Message object is called,
+ it calls the corresponding method of this object,
+ passing it the message object as its first argument,
+ and any arguments or keywords that were passed to
+ it as additional arguments. The default
+ content_manager is
+ :data:`~email.contentmanager.raw_data_manager`.
+
+ """
+
+ message_factory = EmailMessage
+ utf8 = False
+ refold_source = 'long'
+ header_factory = HeaderRegistry()
+ content_manager = raw_data_manager
+
+ def __init__(self, **kw):
+ # Ensure that each new instance gets a unique header factory
+ # (as opposed to clones, which share the factory).
+ if 'header_factory' not in kw:
+ object.__setattr__(self, 'header_factory', HeaderRegistry())
+ super().__init__(**kw)
+
+ def header_max_count(self, name):
+ """+
+ The implementation for this class returns the max_count attribute from
+ the specialized header class that would be used to construct a header
+ of type 'name'.
+ """
+ return self.header_factory[name].max_count
+
+ # The logic of the next three methods is chosen such that it is possible to
+ # switch a Message object between a Compat32 policy and a policy derived
+ # from this class and have the results stay consistent. This allows a
+ # Message object constructed with this policy to be passed to a library
+ # that only handles Compat32 objects, or to receive such an object and
+ # convert it to use the newer style by just changing its policy. It is
+ # also chosen because it postpones the relatively expensive full rfc5322
+ # parse until as late as possible when parsing from source, since in many
+ # applications only a few headers will actually be inspected.
+
+ def header_source_parse(self, sourcelines):
+ """+
+ The name is parsed as everything up to the ':' and returned unmodified.
+ The value is determined by stripping leading whitespace off the
+ remainder of the first line, joining all subsequent lines together, and
+ stripping any trailing carriage return or linefeed characters. (This
+ is the same as Compat32).
+
+ """
+ name, value = sourcelines[0].split(':', 1)
+ value = value.lstrip(' \t') + ''.join(sourcelines[1:])
+ return (name, value.rstrip('\r\n'))
+
+ def header_store_parse(self, name, value):
+ """+
+ The name is returned unchanged. If the input value has a 'name'
+ attribute and it matches the name ignoring case, the value is returned
+ unchanged. Otherwise the name and value are passed to header_factory
+ method, and the resulting custom header object is returned as the
+ value. In this case a ValueError is raised if the input value contains
+ CR or LF characters.
+
+ """
+ if hasattr(value, 'name') and value.name.lower() == name.lower():
+ return (name, value)
+ if isinstance(value, str) and len(value.splitlines())>1:
+ # XXX this error message isn't quite right when we use splitlines
+ # (see issue 22233), but I'm not sure what should happen here.
+ raise ValueError("Header values may not contain linefeed "
+ "or carriage return characters")
+ return (name, self.header_factory(name, value))
+
+ def header_fetch_parse(self, name, value):
+ """+
+ If the value has a 'name' attribute, it is returned to unmodified.
+ Otherwise the name and the value with any linesep characters removed
+ are passed to the header_factory method, and the resulting custom
+ header object is returned. Any surrogateescaped bytes get turned
+ into the unicode unknown-character glyph.
+
+ """
+ if hasattr(value, 'name'):
+ return value
+ # We can't use splitlines here because it splits on more than \r and \n.
+ value = ''.join(linesep_splitter.split(value))
+ return self.header_factory(name, value)
+
+ def fold(self, name, value):
+ """+
+ Header folding is controlled by the refold_source policy setting. A
+ value is considered to be a 'source value' if and only if it does not
+ have a 'name' attribute (having a 'name' attribute means it is a header
+ object of some sort). If a source value needs to be refolded according
+ to the policy, it is converted into a custom header object by passing
+ the name and the value with any linesep characters removed to the
+ header_factory method. Folding of a custom header object is done by
+ calling its fold method with the current policy.
+
+ Source values are split into lines using splitlines. If the value is
+ not to be refolded, the lines are rejoined using the linesep from the
+ policy and returned. The exception is lines containing non-ascii
+ binary data. In that case the value is refolded regardless of the
+ refold_source setting, which causes the binary data to be CTE encoded
+ using the unknown-8bit charset.
+
+ """
+ return self._fold(name, value, refold_binary=True)
+
+ def fold_binary(self, name, value):
+ """+
+ The same as fold if cte_type is 7bit, except that the returned value is
+ bytes.
+
+ If cte_type is 8bit, non-ASCII binary data is converted back into
+ bytes. Headers with binary data are not refolded, regardless of the
+ refold_header setting, since there is no way to know whether the binary
+ data consists of single byte characters or multibyte characters.
+
+ If utf8 is true, headers are encoded to utf8, otherwise to ascii with
+ non-ASCII unicode rendered as encoded words.
+
+ """
+ folded = self._fold(name, value, refold_binary=self.cte_type=='7bit')
+ charset = 'utf8' if self.utf8 else 'ascii'
+ return folded.encode(charset, 'surrogateescape')
+
+ def _fold(self, name, value, refold_binary=False):
+ if hasattr(value, 'name'):
+ return value.fold(policy=self)
+ maxlen = self.max_line_length if self.max_line_length else float('inf')
+ lines = value.splitlines()
+ refold = (self.refold_source == 'all' or
+ self.refold_source == 'long' and
+ (lines and len(lines[0])+len(name)+2 > maxlen or
+ any(len(x) > maxlen for x in lines[1:])))
+ if refold or refold_binary and _has_surrogates(value):
+ return self.header_factory(name, ''.join(lines)).fold(policy=self)
+ return name + ': ' + self.linesep.join(lines) + self.linesep
+
+
+default = EmailPolicy()
+# Make the default policy use the class default header_factory
+del default.header_factory
+strict = default.clone(raise_on_defect=True)
+SMTP = default.clone(linesep='\r\n')
+HTTP = default.clone(linesep='\r\n', max_line_length=None)
+SMTPUTF8 = SMTP.clone(utf8=True)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/quoprimime.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/quoprimime.py
new file mode 100644
index 00000000..94534f7e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/quoprimime.py
@@ -0,0 +1,299 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Ben Gertzfield
+# Contact: email-sig@python.org
+
+"""Quoted-printable content transfer encoding per RFCs 2045-2047.
+
+This module handles the content transfer encoding method defined in RFC 2045
+to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
+safely encode text that is in a character set similar to the 7-bit US ASCII
+character set, but that includes some 8-bit characters that are normally not
+allowed in email bodies or headers.
+
+Quoted-printable is very space-inefficient for encoding binary files; use the
+email.base64mime module for that instead.
+
+This module provides an interface to encode and decode both headers and bodies
+with quoted-printable encoding.
+
+RFC 2045 defines a method for including character set information in an
+`encoded-word' in a header. This method is commonly used for 8-bit real names
+in To:/From:/Cc: etc. fields, as well as Subject: lines.
+
+This module does not do the line wrapping or end-of-line character
+conversion necessary for proper internationalized headers; it only
+does dumb encoding and decoding. To deal with the various line
+wrapping issues, use the email.header module.
+"""
+
+__all__ = [
+ 'body_decode',
+ 'body_encode',
+ 'body_length',
+ 'decode',
+ 'decodestring',
+ 'header_decode',
+ 'header_encode',
+ 'header_length',
+ 'quote',
+ 'unquote',
+ ]
+
+import re
+
+from string import ascii_letters, digits, hexdigits
+
+CRLF = '\r\n'
+NL = '\n'
+EMPTYSTRING = ''
+
+# Build a mapping of octets to the expansion of that octet. Since we're only
+# going to have 256 of these things, this isn't terribly inefficient
+# space-wise. Remember that headers and bodies have different sets of safe
+# characters. Initialize both maps with the full expansion, and then override
+# the safe bytes with the more compact form.
+_QUOPRI_MAP = ['=%02X' % c for c in range(256)]
+_QUOPRI_HEADER_MAP = _QUOPRI_MAP[:]
+_QUOPRI_BODY_MAP = _QUOPRI_MAP[:]
+
+# Safe header bytes which need no encoding.
+for c in b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii'):
+ _QUOPRI_HEADER_MAP[c] = chr(c)
+# Headers have one other special encoding; spaces become underscores.
+_QUOPRI_HEADER_MAP[ord(' ')] = '_'
+
+# Safe body bytes which need no encoding.
+for c in (b' !"#$%&\'()*+,-./0123456789:;<>'
+ b'?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`'
+ b'abcdefghijklmnopqrstuvwxyz{|}~\t'):
+ _QUOPRI_BODY_MAP[c] = chr(c)
+
+
+
+# Helpers
+def header_check(octet):
+ """Return True if the octet should be escaped with header quopri."""
+ return chr(octet) != _QUOPRI_HEADER_MAP[octet]
+
+
+def body_check(octet):
+ """Return True if the octet should be escaped with body quopri."""
+ return chr(octet) != _QUOPRI_BODY_MAP[octet]
+
+
+def header_length(bytearray):
+ """Return a header quoted-printable encoding length.
+
+ Note that this does not include any RFC 2047 chrome added by
+ `header_encode()`.
+
+ :param bytearray: An array of bytes (a.k.a. octets).
+ :return: The length in bytes of the byte array when it is encoded with
+ quoted-printable for headers.
+ """
+ return sum(len(_QUOPRI_HEADER_MAP[octet]) for octet in bytearray)
+
+
+def body_length(bytearray):
+ """Return a body quoted-printable encoding length.
+
+ :param bytearray: An array of bytes (a.k.a. octets).
+ :return: The length in bytes of the byte array when it is encoded with
+ quoted-printable for bodies.
+ """
+ return sum(len(_QUOPRI_BODY_MAP[octet]) for octet in bytearray)
+
+
+def _max_append(L, s, maxlen, extra=''):
+ if not isinstance(s, str):
+ s = chr(s)
+ if not L:
+ L.append(s.lstrip())
+ elif len(L[-1]) + len(s) <= maxlen:
+ L[-1] += extra + s
+ else:
+ L.append(s.lstrip())
+
+
+def unquote(s):
+ """Turn a string in the form =AB to the ASCII character with value 0xab"""
+ return chr(int(s[1:3], 16))
+
+
+def quote(c):
+ return _QUOPRI_MAP[ord(c)]
+
+
+def header_encode(header_bytes, charset='iso-8859-1'):
+ """Encode a single header line with quoted-printable (like) encoding.
+
+ Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
+ used specifically for email header fields to allow charsets with mostly 7
+ bit characters (and some 8 bit) to remain more or less readable in non-RFC
+ 2045 aware mail clients.
+
+ charset names the character set to use in the RFC 2046 header. It
+ defaults to iso-8859-1.
+ """
+ # Return empty headers as an empty string.
+ if not header_bytes:
+ return ''
+ # Iterate over every byte, encoding if necessary.
+ encoded = header_bytes.decode('latin1').translate(_QUOPRI_HEADER_MAP)
+ # Now add the RFC chrome to each encoded chunk and glue the chunks
+ # together.
+ return '=?%s?q?%s?=' % (charset, encoded)
+
+
+_QUOPRI_BODY_ENCODE_MAP = _QUOPRI_BODY_MAP[:]
+for c in b'\r\n':
+ _QUOPRI_BODY_ENCODE_MAP[c] = chr(c)
+
+def body_encode(body, maxlinelen=76, eol=NL):
+ """Encode with quoted-printable, wrapping at maxlinelen characters.
+
+ Each line of encoded text will end with eol, which defaults to "\\n". Set
+ this to "\\r\\n" if you will be using the result of this function directly
+ in an email.
+
+ Each line will be wrapped at, at most, maxlinelen characters before the
+ eol string (maxlinelen defaults to 76 characters, the maximum value
+ permitted by RFC 2045). Long lines will have the 'soft line break'
+ quoted-printable character "=" appended to them, so the decoded text will
+ be identical to the original text.
+
+ The minimum maxlinelen is 4 to have room for a quoted character ("=XX")
+ followed by a soft line break. Smaller values will generate a
+ ValueError.
+
+ """
+
+ if maxlinelen < 4:
+ raise ValueError("maxlinelen must be at least 4")
+ if not body:
+ return body
+
+ # quote special characters
+ body = body.translate(_QUOPRI_BODY_ENCODE_MAP)
+
+ soft_break = '=' + eol
+ # leave space for the '=' at the end of a line
+ maxlinelen1 = maxlinelen - 1
+
+ encoded_body = []
+ append = encoded_body.append
+
+ for line in body.splitlines():
+ # break up the line into pieces no longer than maxlinelen - 1
+ start = 0
+ laststart = len(line) - 1 - maxlinelen
+ while start <= laststart:
+ stop = start + maxlinelen1
+ # make sure we don't break up an escape sequence
+ if line[stop - 2] == '=':
+ append(line[start:stop - 1])
+ start = stop - 2
+ elif line[stop - 1] == '=':
+ append(line[start:stop])
+ start = stop - 1
+ else:
+ append(line[start:stop] + '=')
+ start = stop
+
+ # handle rest of line, special case if line ends in whitespace
+ if line and line[-1] in ' \t':
+ room = start - laststart
+ if room >= 3:
+ # It's a whitespace character at end-of-line, and we have room
+ # for the three-character quoted encoding.
+ q = quote(line[-1])
+ elif room == 2:
+ # There's room for the whitespace character and a soft break.
+ q = line[-1] + soft_break
+ else:
+ # There's room only for a soft break. The quoted whitespace
+ # will be the only content on the subsequent line.
+ q = soft_break + quote(line[-1])
+ append(line[start:-1] + q)
+ else:
+ append(line[start:])
+
+ # add back final newline if present
+ if body[-1] in CRLF:
+ append('')
+
+ return eol.join(encoded_body)
+
+
+
+# BAW: I'm not sure if the intent was for the signature of this function to be
+# the same as base64MIME.decode() or not...
+def decode(encoded, eol=NL):
+ """Decode a quoted-printable string.
+
+ Lines are separated with eol, which defaults to \\n.
+ """
+ if not encoded:
+ return encoded
+ # BAW: see comment in encode() above. Again, we're building up the
+ # decoded string with string concatenation, which could be done much more
+ # efficiently.
+ decoded = ''
+
+ for line in encoded.splitlines():
+ line = line.rstrip()
+ if not line:
+ decoded += eol
+ continue
+
+ i = 0
+ n = len(line)
+ while i < n:
+ c = line[i]
+ if c != '=':
+ decoded += c
+ i += 1
+ # Otherwise, c == "=". Are we at the end of the line? If so, add
+ # a soft line break.
+ elif i+1 == n:
+ i += 1
+ continue
+ # Decode if in form =AB
+ elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
+ decoded += unquote(line[i:i+3])
+ i += 3
+ # Otherwise, not in form =AB, pass literally
+ else:
+ decoded += c
+ i += 1
+
+ if i == n:
+ decoded += eol
+ # Special case if original string did not end with eol
+ if encoded[-1] not in '\r\n' and decoded.endswith(eol):
+ decoded = decoded[:-1]
+ return decoded
+
+
+# For convenience and backwards compatibility w/ standard base64 module
+body_decode = decode
+decodestring = decode
+
+
+
+def _unquote_match(match):
+ """Turn a match in the form =AB to the ASCII character with value 0xab"""
+ s = match.group(0)
+ return unquote(s)
+
+
+# Header decoding is done a bit differently
+def header_decode(s):
+ """Decode a string encoded with RFC 2045 MIME header `Q' encoding.
+
+ This function does not parse a full MIME header value encoded with
+ quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use
+ the high level email.header class for that functionality.
+ """
+ s = s.replace('_', ' ')
+ return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s, flags=re.ASCII)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/utils.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/utils.py
new file mode 100644
index 00000000..858f620e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/email/utils.py
@@ -0,0 +1,376 @@
+# Copyright (C) 2001-2010 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Miscellaneous utilities."""
+
+__all__ = [
+ 'collapse_rfc2231_value',
+ 'decode_params',
+ 'decode_rfc2231',
+ 'encode_rfc2231',
+ 'formataddr',
+ 'formatdate',
+ 'format_datetime',
+ 'getaddresses',
+ 'make_msgid',
+ 'mktime_tz',
+ 'parseaddr',
+ 'parsedate',
+ 'parsedate_tz',
+ 'parsedate_to_datetime',
+ 'unquote',
+ ]
+
+import os
+import re
+import time
+import random
+import socket
+import datetime
+import urllib.parse
+
+from email._parseaddr import quote
+from email._parseaddr import AddressList as _AddressList
+from email._parseaddr import mktime_tz
+
+from email._parseaddr import parsedate, parsedate_tz, _parsedate_tz
+
+# Intrapackage imports
+from email.charset import Charset
+
+COMMASPACE = ', '
+EMPTYSTRING = ''
+UEMPTYSTRING = ''
+CRLF = '\r\n'
+TICK = "'"
+
+specialsre = re.compile(r'[][\\()<>@,:;".]')
+escapesre = re.compile(r'[\\"]')
+
+def _has_surrogates(s):
+ """Return True if s contains surrogate-escaped binary data."""
+ # This check is based on the fact that unless there are surrogates, utf8
+ # (Python's default encoding) can encode any string. This is the fastest
+ # way to check for surrogates, see issue 11454 for timings.
+ try:
+ s.encode()
+ return False
+ except UnicodeEncodeError:
+ return True
+
+# How to deal with a string containing bytes before handing it to the
+# application through the 'normal' interface.
+def _sanitize(string):
+ # Turn any escaped bytes into unicode 'unknown' char. If the escaped
+ # bytes happen to be utf-8 they will instead get decoded, even if they
+ # were invalid in the charset the source was supposed to be in. This
+ # seems like it is not a bad thing; a defect was still registered.
+ original_bytes = string.encode('utf-8', 'surrogateescape')
+ return original_bytes.decode('utf-8', 'replace')
+
+
+
+# Helpers
+
+def formataddr(pair, charset='utf-8'):
+ """The inverse of parseaddr(), this takes a 2-tuple of the form
+ (realname, email_address) and returns the string value suitable
+ for an RFC 2822 From, To or Cc header.
+
+ If the first element of pair is false, then the second element is
+ returned unmodified.
+
+ Optional charset if given is the character set that is used to encode
+ realname in case realname is not ASCII safe. Can be an instance of str or
+ a Charset-like object which has a header_encode method. Default is
+ 'utf-8'.
+ """
+ name, address = pair
+ # The address MUST (per RFC) be ascii, so raise a UnicodeError if it isn't.
+ address.encode('ascii')
+ if name:
+ try:
+ name.encode('ascii')
+ except UnicodeEncodeError:
+ if isinstance(charset, str):
+ charset = Charset(charset)
+ encoded_name = charset.header_encode(name)
+ return "%s <%s>" % (encoded_name, address)
+ else:
+ quotes = ''
+ if specialsre.search(name):
+ quotes = '"'
+ name = escapesre.sub(r'\\\g<0>', name)
+ return '%s%s%s <%s>' % (quotes, name, quotes, address)
+ return address
+
+
+
+def getaddresses(fieldvalues):
+ """Return a list of (REALNAME, EMAIL) for each fieldvalue."""
+ all = COMMASPACE.join(fieldvalues)
+ a = _AddressList(all)
+ return a.addresslist
+
+
+def _format_timetuple_and_zone(timetuple, zone):
+ return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
+ ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
+ timetuple[2],
+ ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
+ timetuple[0], timetuple[3], timetuple[4], timetuple[5],
+ zone)
+
+def formatdate(timeval=None, localtime=False, usegmt=False):
+ """Returns a date string as specified by RFC 2822, e.g.:
+
+ Fri, 09 Nov 2001 01:08:47 -0000
+
+ Optional timeval if given is a floating point time value as accepted by
+ gmtime() and localtime(), otherwise the current time is used.
+
+ Optional localtime is a flag that when True, interprets timeval, and
+ returns a date relative to the local timezone instead of UTC, properly
+ taking daylight savings time into account.
+
+ Optional argument usegmt means that the timezone is written out as
+ an ascii string, not numeric one (so "GMT" instead of "+0000"). This
+ is needed for HTTP, and is only used when localtime==False.
+ """
+ # Note: we cannot use strftime() because that honors the locale and RFC
+ # 2822 requires that day and month names be the English abbreviations.
+ if timeval is None:
+ timeval = time.time()
+ if localtime or usegmt:
+ dt = datetime.datetime.fromtimestamp(timeval, datetime.timezone.utc)
+ else:
+ dt = datetime.datetime.utcfromtimestamp(timeval)
+ if localtime:
+ dt = dt.astimezone()
+ usegmt = False
+ return format_datetime(dt, usegmt)
+
+def format_datetime(dt, usegmt=False):
+ """Turn a datetime into a date string as specified in RFC 2822.
+
+ If usegmt is True, dt must be an aware datetime with an offset of zero. In
+ this case 'GMT' will be rendered instead of the normal +0000 required by
+ RFC2822. This is to support HTTP headers involving date stamps.
+ """
+ now = dt.timetuple()
+ if usegmt:
+ if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc:
+ raise ValueError("usegmt option requires a UTC datetime")
+ zone = 'GMT'
+ elif dt.tzinfo is None:
+ zone = '-0000'
+ else:
+ zone = dt.strftime("%z")
+ return _format_timetuple_and_zone(now, zone)
+
+
+def make_msgid(idstring=None, domain=None):
+ """Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
+
+ <142480216486.20800.16526388040877946887@nightshade.la.mastaler.com>
+
+ Optional idstring if given is a string used to strengthen the
+ uniqueness of the message id. Optional domain if given provides the
+ portion of the message id after the '@'. It defaults to the locally
+ defined hostname.
+ """
+ timeval = int(time.time()*100)
+ pid = os.getpid()
+ randint = random.getrandbits(64)
+ if idstring is None:
+ idstring = ''
+ else:
+ idstring = '.' + idstring
+ if domain is None:
+ domain = socket.getfqdn()
+ msgid = '<%d.%d.%d%s@%s>' % (timeval, pid, randint, idstring, domain)
+ return msgid
+
+
+def parsedate_to_datetime(data):
+ *dtuple, tz = _parsedate_tz(data)
+ if tz is None:
+ return datetime.datetime(*dtuple[:6])
+ return datetime.datetime(*dtuple[:6],
+ tzinfo=datetime.timezone(datetime.timedelta(seconds=tz)))
+
+
+def parseaddr(addr):
+ """
+ Parse addr into its constituent realname and email address parts.
+
+ Return a tuple of realname and email address, unless the parse fails, in
+ which case return a 2-tuple of ('', '').
+ """
+ addrs = _AddressList(addr).addresslist
+ if not addrs:
+ return '', ''
+ return addrs[0]
+
+
+# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
+def unquote(str):
+ """Remove quotes from a string."""
+ if len(str) > 1:
+ if str.startswith('"') and str.endswith('"'):
+ return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
+ if str.startswith('<') and str.endswith('>'):
+ return str[1:-1]
+ return str
+
+
+
+# RFC2231-related functions - parameter encoding and decoding
+def decode_rfc2231(s):
+ """Decode string according to RFC 2231"""
+ parts = s.split(TICK, 2)
+ if len(parts) <= 2:
+ return None, None, s
+ return parts
+
+
+def encode_rfc2231(s, charset=None, language=None):
+ """Encode string according to RFC 2231.
+
+ If neither charset nor language is given, then s is returned as-is. If
+ charset is given but not language, the string is encoded using the empty
+ string for language.
+ """
+ s = urllib.parse.quote(s, safe='', encoding=charset or 'ascii')
+ if charset is None and language is None:
+ return s
+ if language is None:
+ language = ''
+ return "%s'%s'%s" % (charset, language, s)
+
+
+rfc2231_continuation = re.compile(r'^(?P\w+)\*((?P[0-9]+)\*?)?$',
+ re.ASCII)
+
+def decode_params(params):
+ """Decode parameters list according to RFC 2231.
+
+ params is a sequence of 2-tuples containing (param name, string value).
+ """
+ # Copy params so we don't mess with the original
+ params = params[:]
+ new_params = []
+ # Map parameter's name to a list of continuations. The values are a
+ # 3-tuple of the continuation number, the string value, and a flag
+ # specifying whether a particular segment is %-encoded.
+ rfc2231_params = {}
+ name, value = params.pop(0)
+ new_params.append((name, value))
+ while params:
+ name, value = params.pop(0)
+ if name.endswith('*'):
+ encoded = True
+ else:
+ encoded = False
+ value = unquote(value)
+ mo = rfc2231_continuation.match(name)
+ if mo:
+ name, num = mo.group('name', 'num')
+ if num is not None:
+ num = int(num)
+ rfc2231_params.setdefault(name, []).append((num, value, encoded))
+ else:
+ new_params.append((name, '"%s"' % quote(value)))
+ if rfc2231_params:
+ for name, continuations in rfc2231_params.items():
+ value = []
+ extended = False
+ # Sort by number
+ continuations.sort()
+ # And now append all values in numerical order, converting
+ # %-encodings for the encoded segments. If any of the
+ # continuation names ends in a *, then the entire string, after
+ # decoding segments and concatenating, must have the charset and
+ # language specifiers at the beginning of the string.
+ for num, s, encoded in continuations:
+ if encoded:
+ # Decode as "latin-1", so the characters in s directly
+ # represent the percent-encoded octet values.
+ # collapse_rfc2231_value treats this as an octet sequence.
+ s = urllib.parse.unquote(s, encoding="latin-1")
+ extended = True
+ value.append(s)
+ value = quote(EMPTYSTRING.join(value))
+ if extended:
+ charset, language, value = decode_rfc2231(value)
+ new_params.append((name, (charset, language, '"%s"' % value)))
+ else:
+ new_params.append((name, '"%s"' % value))
+ return new_params
+
+def collapse_rfc2231_value(value, errors='replace',
+ fallback_charset='us-ascii'):
+ if not isinstance(value, tuple) or len(value) != 3:
+ return unquote(value)
+ # While value comes to us as a unicode string, we need it to be a bytes
+ # object. We do not want bytes() normal utf-8 decoder, we want a straight
+ # interpretation of the string as character bytes.
+ charset, language, text = value
+ if charset is None:
+ # Issue 17369: if charset/lang is None, decode_rfc2231 couldn't parse
+ # the value, so use the fallback_charset.
+ charset = fallback_charset
+ rawbytes = bytes(text, 'raw-unicode-escape')
+ try:
+ return str(rawbytes, charset, errors)
+ except LookupError:
+ # charset is not a known codec.
+ return unquote(text)
+
+
+#
+# datetime doesn't provide a localtime function yet, so provide one. Code
+# adapted from the patch in issue 9527. This may not be perfect, but it is
+# better than not having it.
+#
+
+def localtime(dt=None, isdst=-1):
+ """Return local time as an aware datetime object.
+
+ If called without arguments, return current time. Otherwise *dt*
+ argument should be a datetime instance, and it is converted to the
+ local time zone according to the system time zone database. If *dt* is
+ naive (that is, dt.tzinfo is None), it is assumed to be in local time.
+ In this case, a positive or zero value for *isdst* causes localtime to
+ presume initially that summer time (for example, Daylight Saving Time)
+ is or is not (respectively) in effect for the specified time. A
+ negative value for *isdst* causes the localtime() function to attempt
+ to divine whether summer time is in effect for the specified time.
+
+ """
+ if dt is None:
+ return datetime.datetime.now(datetime.timezone.utc).astimezone()
+ if dt.tzinfo is not None:
+ return dt.astimezone()
+ # We have a naive datetime. Convert to a (localtime) timetuple and pass to
+ # system mktime together with the isdst hint. System mktime will return
+ # seconds since epoch.
+ tm = dt.timetuple()[:-1] + (isdst,)
+ seconds = time.mktime(tm)
+ localtm = time.localtime(seconds)
+ try:
+ delta = datetime.timedelta(seconds=localtm.tm_gmtoff)
+ tz = datetime.timezone(delta, localtm.tm_zone)
+ except AttributeError:
+ # Compute UTC offset and compare with the value implied by tm_isdst.
+ # If the values match, use the zone name implied by tm_isdst.
+ delta = dt - datetime.datetime(*time.gmtime(seconds)[:6])
+ dst = time.daylight and localtm.tm_isdst > 0
+ gmtoff = -(time.altzone if dst else time.timezone)
+ if delta == datetime.timedelta(seconds=gmtoff):
+ tz = datetime.timezone(delta, time.tzname[dst])
+ else:
+ tz = datetime.timezone(delta)
+ return dt.replace(tzinfo=tz)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__init__.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__init__.py
new file mode 100644
index 00000000..025b7a8d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__init__.py
@@ -0,0 +1,170 @@
+""" Standard "encodings" Package
+
+ Standard Python encoding modules are stored in this package
+ directory.
+
+ Codec modules must have names corresponding to normalized encoding
+ names as defined in the normalize_encoding() function below, e.g.
+ 'utf-8' must be implemented by the module 'utf_8.py'.
+
+ Each codec module must export the following interface:
+
+ * getregentry() -> codecs.CodecInfo object
+ The getregentry() API must return a CodecInfo object with encoder, decoder,
+ incrementalencoder, incrementaldecoder, streamwriter and streamreader
+ atttributes which adhere to the Python Codec Interface Standard.
+
+ In addition, a module may optionally also define the following
+ APIs which are then used by the package's codec search function:
+
+ * getaliases() -> sequence of encoding name strings to use as aliases
+
+ Alias names returned by getaliases() must be normalized encoding
+ names as defined by normalize_encoding().
+
+Written by Marc-Andre Lemburg (mal@lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+
+import codecs
+import sys
+from . import aliases
+
+_cache = {}
+_unknown = '--unknown--'
+_import_tail = ['*']
+_aliases = aliases.aliases
+
+class CodecRegistryError(LookupError, SystemError):
+ pass
+
+def normalize_encoding(encoding):
+
+ """ Normalize an encoding name.
+
+ Normalization works as follows: all non-alphanumeric
+ characters except the dot used for Python package names are
+ collapsed and replaced with a single underscore, e.g. ' -;#'
+ becomes '_'. Leading and trailing underscores are removed.
+
+ Note that encoding names should be ASCII only; if they do use
+ non-ASCII characters, these must be Latin-1 compatible.
+
+ """
+ if isinstance(encoding, bytes):
+ encoding = str(encoding, "ascii")
+
+ chars = []
+ punct = False
+ for c in encoding:
+ if c.isalnum() or c == '.':
+ if punct and chars:
+ chars.append('_')
+ chars.append(c)
+ punct = False
+ else:
+ punct = True
+ return ''.join(chars)
+
+def search_function(encoding):
+
+ # Cache lookup
+ entry = _cache.get(encoding, _unknown)
+ if entry is not _unknown:
+ return entry
+
+ # Import the module:
+ #
+ # First try to find an alias for the normalized encoding
+ # name and lookup the module using the aliased name, then try to
+ # lookup the module using the standard import scheme, i.e. first
+ # try in the encodings package, then at top-level.
+ #
+ norm_encoding = normalize_encoding(encoding)
+ aliased_encoding = _aliases.get(norm_encoding) or \
+ _aliases.get(norm_encoding.replace('.', '_'))
+ if aliased_encoding is not None:
+ modnames = [aliased_encoding,
+ norm_encoding]
+ else:
+ modnames = [norm_encoding]
+ for modname in modnames:
+ if not modname or '.' in modname:
+ continue
+ try:
+ # Import is absolute to prevent the possibly malicious import of a
+ # module with side-effects that is not in the 'encodings' package.
+ mod = __import__('encodings.' + modname, fromlist=_import_tail,
+ level=0)
+ except ImportError:
+ # ImportError may occur because 'encodings.(modname)' does not exist,
+ # or because it imports a name that does not exist (see mbcs and oem)
+ pass
+ else:
+ break
+ else:
+ mod = None
+
+ try:
+ getregentry = mod.getregentry
+ except AttributeError:
+ # Not a codec module
+ mod = None
+
+ if mod is None:
+ # Cache misses
+ _cache[encoding] = None
+ return None
+
+ # Now ask the module for the registry entry
+ entry = getregentry()
+ if not isinstance(entry, codecs.CodecInfo):
+ if not 4 <= len(entry) <= 7:
+ raise CodecRegistryError('module "%s" (%s) failed to register'
+ % (mod.__name__, mod.__file__))
+ if not callable(entry[0]) or not callable(entry[1]) or \
+ (entry[2] is not None and not callable(entry[2])) or \
+ (entry[3] is not None and not callable(entry[3])) or \
+ (len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \
+ (len(entry) > 5 and entry[5] is not None and not callable(entry[5])):
+ raise CodecRegistryError('incompatible codecs in module "%s" (%s)'
+ % (mod.__name__, mod.__file__))
+ if len(entry)<7 or entry[6] is None:
+ entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
+ entry = codecs.CodecInfo(*entry)
+
+ # Cache the codec registry entry
+ _cache[encoding] = entry
+
+ # Register its aliases (without overwriting previously registered
+ # aliases)
+ try:
+ codecaliases = mod.getaliases()
+ except AttributeError:
+ pass
+ else:
+ for alias in codecaliases:
+ if alias not in _aliases:
+ _aliases[alias] = modname
+
+ # Return the registry entry
+ return entry
+
+# Register the search_function in the Python codec registry
+codecs.register(search_function)
+
+if sys.platform == 'win32':
+ def _alias_mbcs(encoding):
+ try:
+ import _winapi
+ ansi_code_page = "cp%s" % _winapi.GetACP()
+ if encoding == ansi_code_page:
+ import encodings.mbcs
+ return encodings.mbcs.getregentry()
+ except ImportError:
+ # Imports may fail while we are shutting down
+ pass
+
+ codecs.register(_alias_mbcs)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/__init__.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 00000000..ab1502ce
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/__init__.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/aliases.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/aliases.cpython-37.pyc
new file mode 100644
index 00000000..9fcf9e45
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/aliases.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/ascii.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/ascii.cpython-37.pyc
new file mode 100644
index 00000000..fa9adb59
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/ascii.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/big5.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/big5.cpython-37.pyc
new file mode 100644
index 00000000..dedc0c43
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/big5.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/cp1251.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/cp1251.cpython-37.pyc
new file mode 100644
index 00000000..9a70c0cc
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/cp1251.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/cp1252.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/cp1252.cpython-37.pyc
new file mode 100644
index 00000000..92a367ff
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/cp1252.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/cp437.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/cp437.cpython-37.pyc
new file mode 100644
index 00000000..198cc20d
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/cp437.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/idna.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/idna.cpython-37.pyc
new file mode 100644
index 00000000..861fe4ba
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/idna.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/latin_1.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/latin_1.cpython-37.pyc
new file mode 100644
index 00000000..f83b2d62
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/latin_1.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/utf_16_be.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/utf_16_be.cpython-37.pyc
new file mode 100644
index 00000000..bcba18b6
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/utf_16_be.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/utf_16_le.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/utf_16_le.cpython-37.pyc
new file mode 100644
index 00000000..294c68d8
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/utf_16_le.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/utf_8.cpython-37.pyc b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/utf_8.cpython-37.pyc
new file mode 100644
index 00000000..26533e3f
Binary files /dev/null and b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/__pycache__/utf_8.cpython-37.pyc differ
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/aliases.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/aliases.py
new file mode 100644
index 00000000..2e63c2f9
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/aliases.py
@@ -0,0 +1,550 @@
+""" Encoding Aliases Support
+
+ This module is used by the encodings package search function to
+ map encodings names to module names.
+
+ Note that the search function normalizes the encoding names before
+ doing the lookup, so the mapping will have to map normalized
+ encoding names to module names.
+
+ Contents:
+
+ The following aliases dictionary contains mappings of all IANA
+ character set names for which the Python core library provides
+ codecs. In addition to these, a few Python specific codec
+ aliases have also been added.
+
+"""
+aliases = {
+
+ # Please keep this list sorted alphabetically by value !
+
+ # ascii codec
+ '646' : 'ascii',
+ 'ansi_x3.4_1968' : 'ascii',
+ 'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
+ 'ansi_x3.4_1986' : 'ascii',
+ 'cp367' : 'ascii',
+ 'csascii' : 'ascii',
+ 'ibm367' : 'ascii',
+ 'iso646_us' : 'ascii',
+ 'iso_646.irv_1991' : 'ascii',
+ 'iso_ir_6' : 'ascii',
+ 'us' : 'ascii',
+ 'us_ascii' : 'ascii',
+
+ # base64_codec codec
+ 'base64' : 'base64_codec',
+ 'base_64' : 'base64_codec',
+
+ # big5 codec
+ 'big5_tw' : 'big5',
+ 'csbig5' : 'big5',
+
+ # big5hkscs codec
+ 'big5_hkscs' : 'big5hkscs',
+ 'hkscs' : 'big5hkscs',
+
+ # bz2_codec codec
+ 'bz2' : 'bz2_codec',
+
+ # cp037 codec
+ '037' : 'cp037',
+ 'csibm037' : 'cp037',
+ 'ebcdic_cp_ca' : 'cp037',
+ 'ebcdic_cp_nl' : 'cp037',
+ 'ebcdic_cp_us' : 'cp037',
+ 'ebcdic_cp_wt' : 'cp037',
+ 'ibm037' : 'cp037',
+ 'ibm039' : 'cp037',
+
+ # cp1026 codec
+ '1026' : 'cp1026',
+ 'csibm1026' : 'cp1026',
+ 'ibm1026' : 'cp1026',
+
+ # cp1125 codec
+ '1125' : 'cp1125',
+ 'ibm1125' : 'cp1125',
+ 'cp866u' : 'cp1125',
+ 'ruscii' : 'cp1125',
+
+ # cp1140 codec
+ '1140' : 'cp1140',
+ 'ibm1140' : 'cp1140',
+
+ # cp1250 codec
+ '1250' : 'cp1250',
+ 'windows_1250' : 'cp1250',
+
+ # cp1251 codec
+ '1251' : 'cp1251',
+ 'windows_1251' : 'cp1251',
+
+ # cp1252 codec
+ '1252' : 'cp1252',
+ 'windows_1252' : 'cp1252',
+
+ # cp1253 codec
+ '1253' : 'cp1253',
+ 'windows_1253' : 'cp1253',
+
+ # cp1254 codec
+ '1254' : 'cp1254',
+ 'windows_1254' : 'cp1254',
+
+ # cp1255 codec
+ '1255' : 'cp1255',
+ 'windows_1255' : 'cp1255',
+
+ # cp1256 codec
+ '1256' : 'cp1256',
+ 'windows_1256' : 'cp1256',
+
+ # cp1257 codec
+ '1257' : 'cp1257',
+ 'windows_1257' : 'cp1257',
+
+ # cp1258 codec
+ '1258' : 'cp1258',
+ 'windows_1258' : 'cp1258',
+
+ # cp273 codec
+ '273' : 'cp273',
+ 'ibm273' : 'cp273',
+ 'csibm273' : 'cp273',
+
+ # cp424 codec
+ '424' : 'cp424',
+ 'csibm424' : 'cp424',
+ 'ebcdic_cp_he' : 'cp424',
+ 'ibm424' : 'cp424',
+
+ # cp437 codec
+ '437' : 'cp437',
+ 'cspc8codepage437' : 'cp437',
+ 'ibm437' : 'cp437',
+
+ # cp500 codec
+ '500' : 'cp500',
+ 'csibm500' : 'cp500',
+ 'ebcdic_cp_be' : 'cp500',
+ 'ebcdic_cp_ch' : 'cp500',
+ 'ibm500' : 'cp500',
+
+ # cp775 codec
+ '775' : 'cp775',
+ 'cspc775baltic' : 'cp775',
+ 'ibm775' : 'cp775',
+
+ # cp850 codec
+ '850' : 'cp850',
+ 'cspc850multilingual' : 'cp850',
+ 'ibm850' : 'cp850',
+
+ # cp852 codec
+ '852' : 'cp852',
+ 'cspcp852' : 'cp852',
+ 'ibm852' : 'cp852',
+
+ # cp855 codec
+ '855' : 'cp855',
+ 'csibm855' : 'cp855',
+ 'ibm855' : 'cp855',
+
+ # cp857 codec
+ '857' : 'cp857',
+ 'csibm857' : 'cp857',
+ 'ibm857' : 'cp857',
+
+ # cp858 codec
+ '858' : 'cp858',
+ 'csibm858' : 'cp858',
+ 'ibm858' : 'cp858',
+
+ # cp860 codec
+ '860' : 'cp860',
+ 'csibm860' : 'cp860',
+ 'ibm860' : 'cp860',
+
+ # cp861 codec
+ '861' : 'cp861',
+ 'cp_is' : 'cp861',
+ 'csibm861' : 'cp861',
+ 'ibm861' : 'cp861',
+
+ # cp862 codec
+ '862' : 'cp862',
+ 'cspc862latinhebrew' : 'cp862',
+ 'ibm862' : 'cp862',
+
+ # cp863 codec
+ '863' : 'cp863',
+ 'csibm863' : 'cp863',
+ 'ibm863' : 'cp863',
+
+ # cp864 codec
+ '864' : 'cp864',
+ 'csibm864' : 'cp864',
+ 'ibm864' : 'cp864',
+
+ # cp865 codec
+ '865' : 'cp865',
+ 'csibm865' : 'cp865',
+ 'ibm865' : 'cp865',
+
+ # cp866 codec
+ '866' : 'cp866',
+ 'csibm866' : 'cp866',
+ 'ibm866' : 'cp866',
+
+ # cp869 codec
+ '869' : 'cp869',
+ 'cp_gr' : 'cp869',
+ 'csibm869' : 'cp869',
+ 'ibm869' : 'cp869',
+
+ # cp932 codec
+ '932' : 'cp932',
+ 'ms932' : 'cp932',
+ 'mskanji' : 'cp932',
+ 'ms_kanji' : 'cp932',
+
+ # cp949 codec
+ '949' : 'cp949',
+ 'ms949' : 'cp949',
+ 'uhc' : 'cp949',
+
+ # cp950 codec
+ '950' : 'cp950',
+ 'ms950' : 'cp950',
+
+ # euc_jis_2004 codec
+ 'jisx0213' : 'euc_jis_2004',
+ 'eucjis2004' : 'euc_jis_2004',
+ 'euc_jis2004' : 'euc_jis_2004',
+
+ # euc_jisx0213 codec
+ 'eucjisx0213' : 'euc_jisx0213',
+
+ # euc_jp codec
+ 'eucjp' : 'euc_jp',
+ 'ujis' : 'euc_jp',
+ 'u_jis' : 'euc_jp',
+
+ # euc_kr codec
+ 'euckr' : 'euc_kr',
+ 'korean' : 'euc_kr',
+ 'ksc5601' : 'euc_kr',
+ 'ks_c_5601' : 'euc_kr',
+ 'ks_c_5601_1987' : 'euc_kr',
+ 'ksx1001' : 'euc_kr',
+ 'ks_x_1001' : 'euc_kr',
+
+ # gb18030 codec
+ 'gb18030_2000' : 'gb18030',
+
+ # gb2312 codec
+ 'chinese' : 'gb2312',
+ 'csiso58gb231280' : 'gb2312',
+ 'euc_cn' : 'gb2312',
+ 'euccn' : 'gb2312',
+ 'eucgb2312_cn' : 'gb2312',
+ 'gb2312_1980' : 'gb2312',
+ 'gb2312_80' : 'gb2312',
+ 'iso_ir_58' : 'gb2312',
+
+ # gbk codec
+ '936' : 'gbk',
+ 'cp936' : 'gbk',
+ 'ms936' : 'gbk',
+
+ # hex_codec codec
+ 'hex' : 'hex_codec',
+
+ # hp_roman8 codec
+ 'roman8' : 'hp_roman8',
+ 'r8' : 'hp_roman8',
+ 'csHPRoman8' : 'hp_roman8',
+
+ # hz codec
+ 'hzgb' : 'hz',
+ 'hz_gb' : 'hz',
+ 'hz_gb_2312' : 'hz',
+
+ # iso2022_jp codec
+ 'csiso2022jp' : 'iso2022_jp',
+ 'iso2022jp' : 'iso2022_jp',
+ 'iso_2022_jp' : 'iso2022_jp',
+
+ # iso2022_jp_1 codec
+ 'iso2022jp_1' : 'iso2022_jp_1',
+ 'iso_2022_jp_1' : 'iso2022_jp_1',
+
+ # iso2022_jp_2 codec
+ 'iso2022jp_2' : 'iso2022_jp_2',
+ 'iso_2022_jp_2' : 'iso2022_jp_2',
+
+ # iso2022_jp_2004 codec
+ 'iso_2022_jp_2004' : 'iso2022_jp_2004',
+ 'iso2022jp_2004' : 'iso2022_jp_2004',
+
+ # iso2022_jp_3 codec
+ 'iso2022jp_3' : 'iso2022_jp_3',
+ 'iso_2022_jp_3' : 'iso2022_jp_3',
+
+ # iso2022_jp_ext codec
+ 'iso2022jp_ext' : 'iso2022_jp_ext',
+ 'iso_2022_jp_ext' : 'iso2022_jp_ext',
+
+ # iso2022_kr codec
+ 'csiso2022kr' : 'iso2022_kr',
+ 'iso2022kr' : 'iso2022_kr',
+ 'iso_2022_kr' : 'iso2022_kr',
+
+ # iso8859_10 codec
+ 'csisolatin6' : 'iso8859_10',
+ 'iso_8859_10' : 'iso8859_10',
+ 'iso_8859_10_1992' : 'iso8859_10',
+ 'iso_ir_157' : 'iso8859_10',
+ 'l6' : 'iso8859_10',
+ 'latin6' : 'iso8859_10',
+
+ # iso8859_11 codec
+ 'thai' : 'iso8859_11',
+ 'iso_8859_11' : 'iso8859_11',
+ 'iso_8859_11_2001' : 'iso8859_11',
+
+ # iso8859_13 codec
+ 'iso_8859_13' : 'iso8859_13',
+ 'l7' : 'iso8859_13',
+ 'latin7' : 'iso8859_13',
+
+ # iso8859_14 codec
+ 'iso_8859_14' : 'iso8859_14',
+ 'iso_8859_14_1998' : 'iso8859_14',
+ 'iso_celtic' : 'iso8859_14',
+ 'iso_ir_199' : 'iso8859_14',
+ 'l8' : 'iso8859_14',
+ 'latin8' : 'iso8859_14',
+
+ # iso8859_15 codec
+ 'iso_8859_15' : 'iso8859_15',
+ 'l9' : 'iso8859_15',
+ 'latin9' : 'iso8859_15',
+
+ # iso8859_16 codec
+ 'iso_8859_16' : 'iso8859_16',
+ 'iso_8859_16_2001' : 'iso8859_16',
+ 'iso_ir_226' : 'iso8859_16',
+ 'l10' : 'iso8859_16',
+ 'latin10' : 'iso8859_16',
+
+ # iso8859_2 codec
+ 'csisolatin2' : 'iso8859_2',
+ 'iso_8859_2' : 'iso8859_2',
+ 'iso_8859_2_1987' : 'iso8859_2',
+ 'iso_ir_101' : 'iso8859_2',
+ 'l2' : 'iso8859_2',
+ 'latin2' : 'iso8859_2',
+
+ # iso8859_3 codec
+ 'csisolatin3' : 'iso8859_3',
+ 'iso_8859_3' : 'iso8859_3',
+ 'iso_8859_3_1988' : 'iso8859_3',
+ 'iso_ir_109' : 'iso8859_3',
+ 'l3' : 'iso8859_3',
+ 'latin3' : 'iso8859_3',
+
+ # iso8859_4 codec
+ 'csisolatin4' : 'iso8859_4',
+ 'iso_8859_4' : 'iso8859_4',
+ 'iso_8859_4_1988' : 'iso8859_4',
+ 'iso_ir_110' : 'iso8859_4',
+ 'l4' : 'iso8859_4',
+ 'latin4' : 'iso8859_4',
+
+ # iso8859_5 codec
+ 'csisolatincyrillic' : 'iso8859_5',
+ 'cyrillic' : 'iso8859_5',
+ 'iso_8859_5' : 'iso8859_5',
+ 'iso_8859_5_1988' : 'iso8859_5',
+ 'iso_ir_144' : 'iso8859_5',
+
+ # iso8859_6 codec
+ 'arabic' : 'iso8859_6',
+ 'asmo_708' : 'iso8859_6',
+ 'csisolatinarabic' : 'iso8859_6',
+ 'ecma_114' : 'iso8859_6',
+ 'iso_8859_6' : 'iso8859_6',
+ 'iso_8859_6_1987' : 'iso8859_6',
+ 'iso_ir_127' : 'iso8859_6',
+
+ # iso8859_7 codec
+ 'csisolatingreek' : 'iso8859_7',
+ 'ecma_118' : 'iso8859_7',
+ 'elot_928' : 'iso8859_7',
+ 'greek' : 'iso8859_7',
+ 'greek8' : 'iso8859_7',
+ 'iso_8859_7' : 'iso8859_7',
+ 'iso_8859_7_1987' : 'iso8859_7',
+ 'iso_ir_126' : 'iso8859_7',
+
+ # iso8859_8 codec
+ 'csisolatinhebrew' : 'iso8859_8',
+ 'hebrew' : 'iso8859_8',
+ 'iso_8859_8' : 'iso8859_8',
+ 'iso_8859_8_1988' : 'iso8859_8',
+ 'iso_ir_138' : 'iso8859_8',
+
+ # iso8859_9 codec
+ 'csisolatin5' : 'iso8859_9',
+ 'iso_8859_9' : 'iso8859_9',
+ 'iso_8859_9_1989' : 'iso8859_9',
+ 'iso_ir_148' : 'iso8859_9',
+ 'l5' : 'iso8859_9',
+ 'latin5' : 'iso8859_9',
+
+ # johab codec
+ 'cp1361' : 'johab',
+ 'ms1361' : 'johab',
+
+ # koi8_r codec
+ 'cskoi8r' : 'koi8_r',
+
+ # kz1048 codec
+ 'kz_1048' : 'kz1048',
+ 'rk1048' : 'kz1048',
+ 'strk1048_2002' : 'kz1048',
+
+ # latin_1 codec
+ #
+ # Note that the latin_1 codec is implemented internally in C and a
+ # lot faster than the charmap codec iso8859_1 which uses the same
+ # encoding. This is why we discourage the use of the iso8859_1
+ # codec and alias it to latin_1 instead.
+ #
+ '8859' : 'latin_1',
+ 'cp819' : 'latin_1',
+ 'csisolatin1' : 'latin_1',
+ 'ibm819' : 'latin_1',
+ 'iso8859' : 'latin_1',
+ 'iso8859_1' : 'latin_1',
+ 'iso_8859_1' : 'latin_1',
+ 'iso_8859_1_1987' : 'latin_1',
+ 'iso_ir_100' : 'latin_1',
+ 'l1' : 'latin_1',
+ 'latin' : 'latin_1',
+ 'latin1' : 'latin_1',
+
+ # mac_cyrillic codec
+ 'maccyrillic' : 'mac_cyrillic',
+
+ # mac_greek codec
+ 'macgreek' : 'mac_greek',
+
+ # mac_iceland codec
+ 'maciceland' : 'mac_iceland',
+
+ # mac_latin2 codec
+ 'maccentraleurope' : 'mac_latin2',
+ 'maclatin2' : 'mac_latin2',
+
+ # mac_roman codec
+ 'macintosh' : 'mac_roman',
+ 'macroman' : 'mac_roman',
+
+ # mac_turkish codec
+ 'macturkish' : 'mac_turkish',
+
+ # mbcs codec
+ 'ansi' : 'mbcs',
+ 'dbcs' : 'mbcs',
+
+ # ptcp154 codec
+ 'csptcp154' : 'ptcp154',
+ 'pt154' : 'ptcp154',
+ 'cp154' : 'ptcp154',
+ 'cyrillic_asian' : 'ptcp154',
+
+ # quopri_codec codec
+ 'quopri' : 'quopri_codec',
+ 'quoted_printable' : 'quopri_codec',
+ 'quotedprintable' : 'quopri_codec',
+
+ # rot_13 codec
+ 'rot13' : 'rot_13',
+
+ # shift_jis codec
+ 'csshiftjis' : 'shift_jis',
+ 'shiftjis' : 'shift_jis',
+ 'sjis' : 'shift_jis',
+ 's_jis' : 'shift_jis',
+
+ # shift_jis_2004 codec
+ 'shiftjis2004' : 'shift_jis_2004',
+ 'sjis_2004' : 'shift_jis_2004',
+ 's_jis_2004' : 'shift_jis_2004',
+
+ # shift_jisx0213 codec
+ 'shiftjisx0213' : 'shift_jisx0213',
+ 'sjisx0213' : 'shift_jisx0213',
+ 's_jisx0213' : 'shift_jisx0213',
+
+ # tactis codec
+ 'tis260' : 'tactis',
+
+ # tis_620 codec
+ 'tis620' : 'tis_620',
+ 'tis_620_0' : 'tis_620',
+ 'tis_620_2529_0' : 'tis_620',
+ 'tis_620_2529_1' : 'tis_620',
+ 'iso_ir_166' : 'tis_620',
+
+ # utf_16 codec
+ 'u16' : 'utf_16',
+ 'utf16' : 'utf_16',
+
+ # utf_16_be codec
+ 'unicodebigunmarked' : 'utf_16_be',
+ 'utf_16be' : 'utf_16_be',
+
+ # utf_16_le codec
+ 'unicodelittleunmarked' : 'utf_16_le',
+ 'utf_16le' : 'utf_16_le',
+
+ # utf_32 codec
+ 'u32' : 'utf_32',
+ 'utf32' : 'utf_32',
+
+ # utf_32_be codec
+ 'utf_32be' : 'utf_32_be',
+
+ # utf_32_le codec
+ 'utf_32le' : 'utf_32_le',
+
+ # utf_7 codec
+ 'u7' : 'utf_7',
+ 'utf7' : 'utf_7',
+ 'unicode_1_1_utf_7' : 'utf_7',
+
+ # utf_8 codec
+ 'u8' : 'utf_8',
+ 'utf' : 'utf_8',
+ 'utf8' : 'utf_8',
+ 'utf8_ucs2' : 'utf_8',
+ 'utf8_ucs4' : 'utf_8',
+
+ # uu_codec codec
+ 'uu' : 'uu_codec',
+
+ # zlib_codec codec
+ 'zip' : 'zlib_codec',
+ 'zlib' : 'zlib_codec',
+
+ # temporary mac CJK aliases, will be replaced by proper codecs in 3.1
+ 'x_mac_japanese' : 'shift_jis',
+ 'x_mac_korean' : 'euc_kr',
+ 'x_mac_simp_chinese' : 'gb2312',
+ 'x_mac_trad_chinese' : 'big5',
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/ascii.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/ascii.py
new file mode 100644
index 00000000..2033cde9
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/ascii.py
@@ -0,0 +1,50 @@
+""" Python 'ascii' Codec
+
+
+Written by Marc-Andre Lemburg (mal@lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ # Note: Binding these as C functions will result in the class not
+ # converting them to methods. This is intended.
+ encode = codecs.ascii_encode
+ decode = codecs.ascii_decode
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.ascii_encode(input, self.errors)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.ascii_decode(input, self.errors)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+class StreamConverter(StreamWriter,StreamReader):
+
+ encode = codecs.ascii_decode
+ decode = codecs.ascii_encode
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='ascii',
+ encode=Codec.encode,
+ decode=Codec.decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamwriter=StreamWriter,
+ streamreader=StreamReader,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/base64_codec.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/base64_codec.py
new file mode 100644
index 00000000..8e7703b3
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/base64_codec.py
@@ -0,0 +1,55 @@
+"""Python 'base64_codec' Codec - base64 content transfer encoding.
+
+This codec de/encodes from bytes to bytes.
+
+Written by Marc-Andre Lemburg (mal@lemburg.com).
+"""
+
+import codecs
+import base64
+
+### Codec APIs
+
+def base64_encode(input, errors='strict'):
+ assert errors == 'strict'
+ return (base64.encodebytes(input), len(input))
+
+def base64_decode(input, errors='strict'):
+ assert errors == 'strict'
+ return (base64.decodebytes(input), len(input))
+
+class Codec(codecs.Codec):
+ def encode(self, input, errors='strict'):
+ return base64_encode(input, errors)
+ def decode(self, input, errors='strict'):
+ return base64_decode(input, errors)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ assert self.errors == 'strict'
+ return base64.encodebytes(input)
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ assert self.errors == 'strict'
+ return base64.decodebytes(input)
+
+class StreamWriter(Codec, codecs.StreamWriter):
+ charbuffertype = bytes
+
+class StreamReader(Codec, codecs.StreamReader):
+ charbuffertype = bytes
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='base64',
+ encode=base64_encode,
+ decode=base64_decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamwriter=StreamWriter,
+ streamreader=StreamReader,
+ _is_text_encoding=False,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/big5.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/big5.py
new file mode 100644
index 00000000..7adeb0e1
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/big5.py
@@ -0,0 +1,39 @@
+#
+# big5.py: Python Unicode Codec for BIG5
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_tw, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_tw.getcodec('big5')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='big5',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/big5hkscs.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/big5hkscs.py
new file mode 100644
index 00000000..350df37b
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/big5hkscs.py
@@ -0,0 +1,39 @@
+#
+# big5hkscs.py: Python Unicode Codec for BIG5HKSCS
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_hk, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_hk.getcodec('big5hkscs')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='big5hkscs',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/bz2_codec.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/bz2_codec.py
new file mode 100644
index 00000000..fd9495e3
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/bz2_codec.py
@@ -0,0 +1,78 @@
+"""Python 'bz2_codec' Codec - bz2 compression encoding.
+
+This codec de/encodes from bytes to bytes and is therefore usable with
+bytes.transform() and bytes.untransform().
+
+Adapted by Raymond Hettinger from zlib_codec.py which was written
+by Marc-Andre Lemburg (mal@lemburg.com).
+"""
+
+import codecs
+import bz2 # this codec needs the optional bz2 module !
+
+### Codec APIs
+
+def bz2_encode(input, errors='strict'):
+ assert errors == 'strict'
+ return (bz2.compress(input), len(input))
+
+def bz2_decode(input, errors='strict'):
+ assert errors == 'strict'
+ return (bz2.decompress(input), len(input))
+
+class Codec(codecs.Codec):
+ def encode(self, input, errors='strict'):
+ return bz2_encode(input, errors)
+ def decode(self, input, errors='strict'):
+ return bz2_decode(input, errors)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def __init__(self, errors='strict'):
+ assert errors == 'strict'
+ self.errors = errors
+ self.compressobj = bz2.BZ2Compressor()
+
+ def encode(self, input, final=False):
+ if final:
+ c = self.compressobj.compress(input)
+ return c + self.compressobj.flush()
+ else:
+ return self.compressobj.compress(input)
+
+ def reset(self):
+ self.compressobj = bz2.BZ2Compressor()
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def __init__(self, errors='strict'):
+ assert errors == 'strict'
+ self.errors = errors
+ self.decompressobj = bz2.BZ2Decompressor()
+
+ def decode(self, input, final=False):
+ try:
+ return self.decompressobj.decompress(input)
+ except EOFError:
+ return ''
+
+ def reset(self):
+ self.decompressobj = bz2.BZ2Decompressor()
+
+class StreamWriter(Codec, codecs.StreamWriter):
+ charbuffertype = bytes
+
+class StreamReader(Codec, codecs.StreamReader):
+ charbuffertype = bytes
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name="bz2",
+ encode=bz2_encode,
+ decode=bz2_decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamwriter=StreamWriter,
+ streamreader=StreamReader,
+ _is_text_encoding=False,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/charmap.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/charmap.py
new file mode 100644
index 00000000..81189b16
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/charmap.py
@@ -0,0 +1,69 @@
+""" Generic Python Character Mapping Codec.
+
+ Use this codec directly rather than through the automatic
+ conversion mechanisms supplied by unicode() and .encode().
+
+
+Written by Marc-Andre Lemburg (mal@lemburg.com).
+
+(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ # Note: Binding these as C functions will result in the class not
+ # converting them to methods. This is intended.
+ encode = codecs.charmap_encode
+ decode = codecs.charmap_decode
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def __init__(self, errors='strict', mapping=None):
+ codecs.IncrementalEncoder.__init__(self, errors)
+ self.mapping = mapping
+
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input, self.errors, self.mapping)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def __init__(self, errors='strict', mapping=None):
+ codecs.IncrementalDecoder.__init__(self, errors)
+ self.mapping = mapping
+
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input, self.errors, self.mapping)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+
+ def __init__(self,stream,errors='strict',mapping=None):
+ codecs.StreamWriter.__init__(self,stream,errors)
+ self.mapping = mapping
+
+ def encode(self,input,errors='strict'):
+ return Codec.encode(input,errors,self.mapping)
+
+class StreamReader(Codec,codecs.StreamReader):
+
+ def __init__(self,stream,errors='strict',mapping=None):
+ codecs.StreamReader.__init__(self,stream,errors)
+ self.mapping = mapping
+
+ def decode(self,input,errors='strict'):
+ return Codec.decode(input,errors,self.mapping)
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='charmap',
+ encode=Codec.encode,
+ decode=Codec.decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamwriter=StreamWriter,
+ streamreader=StreamReader,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp037.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp037.py
new file mode 100644
index 00000000..4edd708f
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp037.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp037',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x9c' # 0x04 -> CONTROL
+ '\t' # 0x05 -> HORIZONTAL TABULATION
+ '\x86' # 0x06 -> CONTROL
+ '\x7f' # 0x07 -> DELETE
+ '\x97' # 0x08 -> CONTROL
+ '\x8d' # 0x09 -> CONTROL
+ '\x8e' # 0x0A -> CONTROL
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x9d' # 0x14 -> CONTROL
+ '\x85' # 0x15 -> CONTROL
+ '\x08' # 0x16 -> BACKSPACE
+ '\x87' # 0x17 -> CONTROL
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x92' # 0x1A -> CONTROL
+ '\x8f' # 0x1B -> CONTROL
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ '\x80' # 0x20 -> CONTROL
+ '\x81' # 0x21 -> CONTROL
+ '\x82' # 0x22 -> CONTROL
+ '\x83' # 0x23 -> CONTROL
+ '\x84' # 0x24 -> CONTROL
+ '\n' # 0x25 -> LINE FEED
+ '\x17' # 0x26 -> END OF TRANSMISSION BLOCK
+ '\x1b' # 0x27 -> ESCAPE
+ '\x88' # 0x28 -> CONTROL
+ '\x89' # 0x29 -> CONTROL
+ '\x8a' # 0x2A -> CONTROL
+ '\x8b' # 0x2B -> CONTROL
+ '\x8c' # 0x2C -> CONTROL
+ '\x05' # 0x2D -> ENQUIRY
+ '\x06' # 0x2E -> ACKNOWLEDGE
+ '\x07' # 0x2F -> BELL
+ '\x90' # 0x30 -> CONTROL
+ '\x91' # 0x31 -> CONTROL
+ '\x16' # 0x32 -> SYNCHRONOUS IDLE
+ '\x93' # 0x33 -> CONTROL
+ '\x94' # 0x34 -> CONTROL
+ '\x95' # 0x35 -> CONTROL
+ '\x96' # 0x36 -> CONTROL
+ '\x04' # 0x37 -> END OF TRANSMISSION
+ '\x98' # 0x38 -> CONTROL
+ '\x99' # 0x39 -> CONTROL
+ '\x9a' # 0x3A -> CONTROL
+ '\x9b' # 0x3B -> CONTROL
+ '\x14' # 0x3C -> DEVICE CONTROL FOUR
+ '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
+ '\x9e' # 0x3E -> CONTROL
+ '\x1a' # 0x3F -> SUBSTITUTE
+ ' ' # 0x40 -> SPACE
+ '\xa0' # 0x41 -> NO-BREAK SPACE
+ '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
+ '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
+ '\xa2' # 0x4A -> CENT SIGN
+ '.' # 0x4B -> FULL STOP
+ '<' # 0x4C -> LESS-THAN SIGN
+ '(' # 0x4D -> LEFT PARENTHESIS
+ '+' # 0x4E -> PLUS SIGN
+ '|' # 0x4F -> VERTICAL LINE
+ '&' # 0x50 -> AMPERSAND
+ '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
+ '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
+ '\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
+ '!' # 0x5A -> EXCLAMATION MARK
+ '$' # 0x5B -> DOLLAR SIGN
+ '*' # 0x5C -> ASTERISK
+ ')' # 0x5D -> RIGHT PARENTHESIS
+ ';' # 0x5E -> SEMICOLON
+ '\xac' # 0x5F -> NOT SIGN
+ '-' # 0x60 -> HYPHEN-MINUS
+ '/' # 0x61 -> SOLIDUS
+ '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
+ '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xa6' # 0x6A -> BROKEN BAR
+ ',' # 0x6B -> COMMA
+ '%' # 0x6C -> PERCENT SIGN
+ '_' # 0x6D -> LOW LINE
+ '>' # 0x6E -> GREATER-THAN SIGN
+ '?' # 0x6F -> QUESTION MARK
+ '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
+ '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
+ '`' # 0x79 -> GRAVE ACCENT
+ ':' # 0x7A -> COLON
+ '#' # 0x7B -> NUMBER SIGN
+ '@' # 0x7C -> COMMERCIAL AT
+ "'" # 0x7D -> APOSTROPHE
+ '=' # 0x7E -> EQUALS SIGN
+ '"' # 0x7F -> QUOTATION MARK
+ '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
+ 'a' # 0x81 -> LATIN SMALL LETTER A
+ 'b' # 0x82 -> LATIN SMALL LETTER B
+ 'c' # 0x83 -> LATIN SMALL LETTER C
+ 'd' # 0x84 -> LATIN SMALL LETTER D
+ 'e' # 0x85 -> LATIN SMALL LETTER E
+ 'f' # 0x86 -> LATIN SMALL LETTER F
+ 'g' # 0x87 -> LATIN SMALL LETTER G
+ 'h' # 0x88 -> LATIN SMALL LETTER H
+ 'i' # 0x89 -> LATIN SMALL LETTER I
+ '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
+ '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
+ '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
+ '\xb1' # 0x8F -> PLUS-MINUS SIGN
+ '\xb0' # 0x90 -> DEGREE SIGN
+ 'j' # 0x91 -> LATIN SMALL LETTER J
+ 'k' # 0x92 -> LATIN SMALL LETTER K
+ 'l' # 0x93 -> LATIN SMALL LETTER L
+ 'm' # 0x94 -> LATIN SMALL LETTER M
+ 'n' # 0x95 -> LATIN SMALL LETTER N
+ 'o' # 0x96 -> LATIN SMALL LETTER O
+ 'p' # 0x97 -> LATIN SMALL LETTER P
+ 'q' # 0x98 -> LATIN SMALL LETTER Q
+ 'r' # 0x99 -> LATIN SMALL LETTER R
+ '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
+ '\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
+ '\xb8' # 0x9D -> CEDILLA
+ '\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
+ '\xa4' # 0x9F -> CURRENCY SIGN
+ '\xb5' # 0xA0 -> MICRO SIGN
+ '~' # 0xA1 -> TILDE
+ 's' # 0xA2 -> LATIN SMALL LETTER S
+ 't' # 0xA3 -> LATIN SMALL LETTER T
+ 'u' # 0xA4 -> LATIN SMALL LETTER U
+ 'v' # 0xA5 -> LATIN SMALL LETTER V
+ 'w' # 0xA6 -> LATIN SMALL LETTER W
+ 'x' # 0xA7 -> LATIN SMALL LETTER X
+ 'y' # 0xA8 -> LATIN SMALL LETTER Y
+ 'z' # 0xA9 -> LATIN SMALL LETTER Z
+ '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
+ '\xbf' # 0xAB -> INVERTED QUESTION MARK
+ '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
+ '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
+ '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
+ '\xae' # 0xAF -> REGISTERED SIGN
+ '^' # 0xB0 -> CIRCUMFLEX ACCENT
+ '\xa3' # 0xB1 -> POUND SIGN
+ '\xa5' # 0xB2 -> YEN SIGN
+ '\xb7' # 0xB3 -> MIDDLE DOT
+ '\xa9' # 0xB4 -> COPYRIGHT SIGN
+ '\xa7' # 0xB5 -> SECTION SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
+ '[' # 0xBA -> LEFT SQUARE BRACKET
+ ']' # 0xBB -> RIGHT SQUARE BRACKET
+ '\xaf' # 0xBC -> MACRON
+ '\xa8' # 0xBD -> DIAERESIS
+ '\xb4' # 0xBE -> ACUTE ACCENT
+ '\xd7' # 0xBF -> MULTIPLICATION SIGN
+ '{' # 0xC0 -> LEFT CURLY BRACKET
+ 'A' # 0xC1 -> LATIN CAPITAL LETTER A
+ 'B' # 0xC2 -> LATIN CAPITAL LETTER B
+ 'C' # 0xC3 -> LATIN CAPITAL LETTER C
+ 'D' # 0xC4 -> LATIN CAPITAL LETTER D
+ 'E' # 0xC5 -> LATIN CAPITAL LETTER E
+ 'F' # 0xC6 -> LATIN CAPITAL LETTER F
+ 'G' # 0xC7 -> LATIN CAPITAL LETTER G
+ 'H' # 0xC8 -> LATIN CAPITAL LETTER H
+ 'I' # 0xC9 -> LATIN CAPITAL LETTER I
+ '\xad' # 0xCA -> SOFT HYPHEN
+ '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
+ '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
+ '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
+ '}' # 0xD0 -> RIGHT CURLY BRACKET
+ 'J' # 0xD1 -> LATIN CAPITAL LETTER J
+ 'K' # 0xD2 -> LATIN CAPITAL LETTER K
+ 'L' # 0xD3 -> LATIN CAPITAL LETTER L
+ 'M' # 0xD4 -> LATIN CAPITAL LETTER M
+ 'N' # 0xD5 -> LATIN CAPITAL LETTER N
+ 'O' # 0xD6 -> LATIN CAPITAL LETTER O
+ 'P' # 0xD7 -> LATIN CAPITAL LETTER P
+ 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
+ 'R' # 0xD9 -> LATIN CAPITAL LETTER R
+ '\xb9' # 0xDA -> SUPERSCRIPT ONE
+ '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
+ '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
+ '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
+ '\\' # 0xE0 -> REVERSE SOLIDUS
+ '\xf7' # 0xE1 -> DIVISION SIGN
+ 'S' # 0xE2 -> LATIN CAPITAL LETTER S
+ 'T' # 0xE3 -> LATIN CAPITAL LETTER T
+ 'U' # 0xE4 -> LATIN CAPITAL LETTER U
+ 'V' # 0xE5 -> LATIN CAPITAL LETTER V
+ 'W' # 0xE6 -> LATIN CAPITAL LETTER W
+ 'X' # 0xE7 -> LATIN CAPITAL LETTER X
+ 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
+ '\xb2' # 0xEA -> SUPERSCRIPT TWO
+ '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
+ '0' # 0xF0 -> DIGIT ZERO
+ '1' # 0xF1 -> DIGIT ONE
+ '2' # 0xF2 -> DIGIT TWO
+ '3' # 0xF3 -> DIGIT THREE
+ '4' # 0xF4 -> DIGIT FOUR
+ '5' # 0xF5 -> DIGIT FIVE
+ '6' # 0xF6 -> DIGIT SIX
+ '7' # 0xF7 -> DIGIT SEVEN
+ '8' # 0xF8 -> DIGIT EIGHT
+ '9' # 0xF9 -> DIGIT NINE
+ '\xb3' # 0xFA -> SUPERSCRIPT THREE
+ '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\x9f' # 0xFF -> CONTROL
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1006.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1006.py
new file mode 100644
index 00000000..a1221c3e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1006.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1006 generated from 'MAPPINGS/VENDORS/MISC/CP1006.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1006',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\x80' # 0x80 ->
+ '\x81' # 0x81 ->
+ '\x82' # 0x82 ->
+ '\x83' # 0x83 ->
+ '\x84' # 0x84 ->
+ '\x85' # 0x85 ->
+ '\x86' # 0x86 ->
+ '\x87' # 0x87 ->
+ '\x88' # 0x88 ->
+ '\x89' # 0x89 ->
+ '\x8a' # 0x8A ->
+ '\x8b' # 0x8B ->
+ '\x8c' # 0x8C ->
+ '\x8d' # 0x8D ->
+ '\x8e' # 0x8E ->
+ '\x8f' # 0x8F ->
+ '\x90' # 0x90 ->
+ '\x91' # 0x91 ->
+ '\x92' # 0x92 ->
+ '\x93' # 0x93 ->
+ '\x94' # 0x94 ->
+ '\x95' # 0x95 ->
+ '\x96' # 0x96 ->
+ '\x97' # 0x97 ->
+ '\x98' # 0x98 ->
+ '\x99' # 0x99 ->
+ '\x9a' # 0x9A ->
+ '\x9b' # 0x9B ->
+ '\x9c' # 0x9C ->
+ '\x9d' # 0x9D ->
+ '\x9e' # 0x9E ->
+ '\x9f' # 0x9F ->
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\u06f0' # 0xA1 -> EXTENDED ARABIC-INDIC DIGIT ZERO
+ '\u06f1' # 0xA2 -> EXTENDED ARABIC-INDIC DIGIT ONE
+ '\u06f2' # 0xA3 -> EXTENDED ARABIC-INDIC DIGIT TWO
+ '\u06f3' # 0xA4 -> EXTENDED ARABIC-INDIC DIGIT THREE
+ '\u06f4' # 0xA5 -> EXTENDED ARABIC-INDIC DIGIT FOUR
+ '\u06f5' # 0xA6 -> EXTENDED ARABIC-INDIC DIGIT FIVE
+ '\u06f6' # 0xA7 -> EXTENDED ARABIC-INDIC DIGIT SIX
+ '\u06f7' # 0xA8 -> EXTENDED ARABIC-INDIC DIGIT SEVEN
+ '\u06f8' # 0xA9 -> EXTENDED ARABIC-INDIC DIGIT EIGHT
+ '\u06f9' # 0xAA -> EXTENDED ARABIC-INDIC DIGIT NINE
+ '\u060c' # 0xAB -> ARABIC COMMA
+ '\u061b' # 0xAC -> ARABIC SEMICOLON
+ '\xad' # 0xAD -> SOFT HYPHEN
+ '\u061f' # 0xAE -> ARABIC QUESTION MARK
+ '\ufe81' # 0xAF -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
+ '\ufe8d' # 0xB0 -> ARABIC LETTER ALEF ISOLATED FORM
+ '\ufe8e' # 0xB1 -> ARABIC LETTER ALEF FINAL FORM
+ '\ufe8e' # 0xB2 -> ARABIC LETTER ALEF FINAL FORM
+ '\ufe8f' # 0xB3 -> ARABIC LETTER BEH ISOLATED FORM
+ '\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM
+ '\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM
+ '\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM
+ '\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
+ '\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM
+ '\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM
+ '\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM
+ '\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM
+ '\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM
+ '\ufe9b' # 0xBD -> ARABIC LETTER THEH INITIAL FORM
+ '\ufe9d' # 0xBE -> ARABIC LETTER JEEM ISOLATED FORM
+ '\ufe9f' # 0xBF -> ARABIC LETTER JEEM INITIAL FORM
+ '\ufb7a' # 0xC0 -> ARABIC LETTER TCHEH ISOLATED FORM
+ '\ufb7c' # 0xC1 -> ARABIC LETTER TCHEH INITIAL FORM
+ '\ufea1' # 0xC2 -> ARABIC LETTER HAH ISOLATED FORM
+ '\ufea3' # 0xC3 -> ARABIC LETTER HAH INITIAL FORM
+ '\ufea5' # 0xC4 -> ARABIC LETTER KHAH ISOLATED FORM
+ '\ufea7' # 0xC5 -> ARABIC LETTER KHAH INITIAL FORM
+ '\ufea9' # 0xC6 -> ARABIC LETTER DAL ISOLATED FORM
+ '\ufb84' # 0xC7 -> ARABIC LETTER DAHAL ISOLATED FORMN
+ '\ufeab' # 0xC8 -> ARABIC LETTER THAL ISOLATED FORM
+ '\ufead' # 0xC9 -> ARABIC LETTER REH ISOLATED FORM
+ '\ufb8c' # 0xCA -> ARABIC LETTER RREH ISOLATED FORM
+ '\ufeaf' # 0xCB -> ARABIC LETTER ZAIN ISOLATED FORM
+ '\ufb8a' # 0xCC -> ARABIC LETTER JEH ISOLATED FORM
+ '\ufeb1' # 0xCD -> ARABIC LETTER SEEN ISOLATED FORM
+ '\ufeb3' # 0xCE -> ARABIC LETTER SEEN INITIAL FORM
+ '\ufeb5' # 0xCF -> ARABIC LETTER SHEEN ISOLATED FORM
+ '\ufeb7' # 0xD0 -> ARABIC LETTER SHEEN INITIAL FORM
+ '\ufeb9' # 0xD1 -> ARABIC LETTER SAD ISOLATED FORM
+ '\ufebb' # 0xD2 -> ARABIC LETTER SAD INITIAL FORM
+ '\ufebd' # 0xD3 -> ARABIC LETTER DAD ISOLATED FORM
+ '\ufebf' # 0xD4 -> ARABIC LETTER DAD INITIAL FORM
+ '\ufec1' # 0xD5 -> ARABIC LETTER TAH ISOLATED FORM
+ '\ufec5' # 0xD6 -> ARABIC LETTER ZAH ISOLATED FORM
+ '\ufec9' # 0xD7 -> ARABIC LETTER AIN ISOLATED FORM
+ '\ufeca' # 0xD8 -> ARABIC LETTER AIN FINAL FORM
+ '\ufecb' # 0xD9 -> ARABIC LETTER AIN INITIAL FORM
+ '\ufecc' # 0xDA -> ARABIC LETTER AIN MEDIAL FORM
+ '\ufecd' # 0xDB -> ARABIC LETTER GHAIN ISOLATED FORM
+ '\ufece' # 0xDC -> ARABIC LETTER GHAIN FINAL FORM
+ '\ufecf' # 0xDD -> ARABIC LETTER GHAIN INITIAL FORM
+ '\ufed0' # 0xDE -> ARABIC LETTER GHAIN MEDIAL FORM
+ '\ufed1' # 0xDF -> ARABIC LETTER FEH ISOLATED FORM
+ '\ufed3' # 0xE0 -> ARABIC LETTER FEH INITIAL FORM
+ '\ufed5' # 0xE1 -> ARABIC LETTER QAF ISOLATED FORM
+ '\ufed7' # 0xE2 -> ARABIC LETTER QAF INITIAL FORM
+ '\ufed9' # 0xE3 -> ARABIC LETTER KAF ISOLATED FORM
+ '\ufedb' # 0xE4 -> ARABIC LETTER KAF INITIAL FORM
+ '\ufb92' # 0xE5 -> ARABIC LETTER GAF ISOLATED FORM
+ '\ufb94' # 0xE6 -> ARABIC LETTER GAF INITIAL FORM
+ '\ufedd' # 0xE7 -> ARABIC LETTER LAM ISOLATED FORM
+ '\ufedf' # 0xE8 -> ARABIC LETTER LAM INITIAL FORM
+ '\ufee0' # 0xE9 -> ARABIC LETTER LAM MEDIAL FORM
+ '\ufee1' # 0xEA -> ARABIC LETTER MEEM ISOLATED FORM
+ '\ufee3' # 0xEB -> ARABIC LETTER MEEM INITIAL FORM
+ '\ufb9e' # 0xEC -> ARABIC LETTER NOON GHUNNA ISOLATED FORM
+ '\ufee5' # 0xED -> ARABIC LETTER NOON ISOLATED FORM
+ '\ufee7' # 0xEE -> ARABIC LETTER NOON INITIAL FORM
+ '\ufe85' # 0xEF -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
+ '\ufeed' # 0xF0 -> ARABIC LETTER WAW ISOLATED FORM
+ '\ufba6' # 0xF1 -> ARABIC LETTER HEH GOAL ISOLATED FORM
+ '\ufba8' # 0xF2 -> ARABIC LETTER HEH GOAL INITIAL FORM
+ '\ufba9' # 0xF3 -> ARABIC LETTER HEH GOAL MEDIAL FORM
+ '\ufbaa' # 0xF4 -> ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM
+ '\ufe80' # 0xF5 -> ARABIC LETTER HAMZA ISOLATED FORM
+ '\ufe89' # 0xF6 -> ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
+ '\ufe8a' # 0xF7 -> ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
+ '\ufe8b' # 0xF8 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
+ '\ufef1' # 0xF9 -> ARABIC LETTER YEH ISOLATED FORM
+ '\ufef2' # 0xFA -> ARABIC LETTER YEH FINAL FORM
+ '\ufef3' # 0xFB -> ARABIC LETTER YEH INITIAL FORM
+ '\ufbb0' # 0xFC -> ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM
+ '\ufbae' # 0xFD -> ARABIC LETTER YEH BARREE ISOLATED FORM
+ '\ufe7c' # 0xFE -> ARABIC SHADDA ISOLATED FORM
+ '\ufe7d' # 0xFF -> ARABIC SHADDA MEDIAL FORM
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1026.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1026.py
new file mode 100644
index 00000000..46f71f74
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1026.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1026',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x9c' # 0x04 -> CONTROL
+ '\t' # 0x05 -> HORIZONTAL TABULATION
+ '\x86' # 0x06 -> CONTROL
+ '\x7f' # 0x07 -> DELETE
+ '\x97' # 0x08 -> CONTROL
+ '\x8d' # 0x09 -> CONTROL
+ '\x8e' # 0x0A -> CONTROL
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x9d' # 0x14 -> CONTROL
+ '\x85' # 0x15 -> CONTROL
+ '\x08' # 0x16 -> BACKSPACE
+ '\x87' # 0x17 -> CONTROL
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x92' # 0x1A -> CONTROL
+ '\x8f' # 0x1B -> CONTROL
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ '\x80' # 0x20 -> CONTROL
+ '\x81' # 0x21 -> CONTROL
+ '\x82' # 0x22 -> CONTROL
+ '\x83' # 0x23 -> CONTROL
+ '\x84' # 0x24 -> CONTROL
+ '\n' # 0x25 -> LINE FEED
+ '\x17' # 0x26 -> END OF TRANSMISSION BLOCK
+ '\x1b' # 0x27 -> ESCAPE
+ '\x88' # 0x28 -> CONTROL
+ '\x89' # 0x29 -> CONTROL
+ '\x8a' # 0x2A -> CONTROL
+ '\x8b' # 0x2B -> CONTROL
+ '\x8c' # 0x2C -> CONTROL
+ '\x05' # 0x2D -> ENQUIRY
+ '\x06' # 0x2E -> ACKNOWLEDGE
+ '\x07' # 0x2F -> BELL
+ '\x90' # 0x30 -> CONTROL
+ '\x91' # 0x31 -> CONTROL
+ '\x16' # 0x32 -> SYNCHRONOUS IDLE
+ '\x93' # 0x33 -> CONTROL
+ '\x94' # 0x34 -> CONTROL
+ '\x95' # 0x35 -> CONTROL
+ '\x96' # 0x36 -> CONTROL
+ '\x04' # 0x37 -> END OF TRANSMISSION
+ '\x98' # 0x38 -> CONTROL
+ '\x99' # 0x39 -> CONTROL
+ '\x9a' # 0x3A -> CONTROL
+ '\x9b' # 0x3B -> CONTROL
+ '\x14' # 0x3C -> DEVICE CONTROL FOUR
+ '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
+ '\x9e' # 0x3E -> CONTROL
+ '\x1a' # 0x3F -> SUBSTITUTE
+ ' ' # 0x40 -> SPACE
+ '\xa0' # 0x41 -> NO-BREAK SPACE
+ '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
+ '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '{' # 0x48 -> LEFT CURLY BRACKET
+ '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
+ '\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '.' # 0x4B -> FULL STOP
+ '<' # 0x4C -> LESS-THAN SIGN
+ '(' # 0x4D -> LEFT PARENTHESIS
+ '+' # 0x4E -> PLUS SIGN
+ '!' # 0x4F -> EXCLAMATION MARK
+ '&' # 0x50 -> AMPERSAND
+ '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
+ '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
+ '\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
+ '\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
+ '\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
+ '*' # 0x5C -> ASTERISK
+ ')' # 0x5D -> RIGHT PARENTHESIS
+ ';' # 0x5E -> SEMICOLON
+ '^' # 0x5F -> CIRCUMFLEX ACCENT
+ '-' # 0x60 -> HYPHEN-MINUS
+ '/' # 0x61 -> SOLIDUS
+ '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
+ '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '[' # 0x68 -> LEFT SQUARE BRACKET
+ '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
+ ',' # 0x6B -> COMMA
+ '%' # 0x6C -> PERCENT SIGN
+ '_' # 0x6D -> LOW LINE
+ '>' # 0x6E -> GREATER-THAN SIGN
+ '?' # 0x6F -> QUESTION MARK
+ '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
+ '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
+ '\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
+ ':' # 0x7A -> COLON
+ '\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
+ "'" # 0x7D -> APOSTROPHE
+ '=' # 0x7E -> EQUALS SIGN
+ '\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
+ 'a' # 0x81 -> LATIN SMALL LETTER A
+ 'b' # 0x82 -> LATIN SMALL LETTER B
+ 'c' # 0x83 -> LATIN SMALL LETTER C
+ 'd' # 0x84 -> LATIN SMALL LETTER D
+ 'e' # 0x85 -> LATIN SMALL LETTER E
+ 'f' # 0x86 -> LATIN SMALL LETTER F
+ 'g' # 0x87 -> LATIN SMALL LETTER G
+ 'h' # 0x88 -> LATIN SMALL LETTER H
+ 'i' # 0x89 -> LATIN SMALL LETTER I
+ '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '}' # 0x8C -> RIGHT CURLY BRACKET
+ '`' # 0x8D -> GRAVE ACCENT
+ '\xa6' # 0x8E -> BROKEN BAR
+ '\xb1' # 0x8F -> PLUS-MINUS SIGN
+ '\xb0' # 0x90 -> DEGREE SIGN
+ 'j' # 0x91 -> LATIN SMALL LETTER J
+ 'k' # 0x92 -> LATIN SMALL LETTER K
+ 'l' # 0x93 -> LATIN SMALL LETTER L
+ 'm' # 0x94 -> LATIN SMALL LETTER M
+ 'n' # 0x95 -> LATIN SMALL LETTER N
+ 'o' # 0x96 -> LATIN SMALL LETTER O
+ 'p' # 0x97 -> LATIN SMALL LETTER P
+ 'q' # 0x98 -> LATIN SMALL LETTER Q
+ 'r' # 0x99 -> LATIN SMALL LETTER R
+ '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
+ '\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
+ '\xb8' # 0x9D -> CEDILLA
+ '\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
+ '\xa4' # 0x9F -> CURRENCY SIGN
+ '\xb5' # 0xA0 -> MICRO SIGN
+ '\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
+ 's' # 0xA2 -> LATIN SMALL LETTER S
+ 't' # 0xA3 -> LATIN SMALL LETTER T
+ 'u' # 0xA4 -> LATIN SMALL LETTER U
+ 'v' # 0xA5 -> LATIN SMALL LETTER V
+ 'w' # 0xA6 -> LATIN SMALL LETTER W
+ 'x' # 0xA7 -> LATIN SMALL LETTER X
+ 'y' # 0xA8 -> LATIN SMALL LETTER Y
+ 'z' # 0xA9 -> LATIN SMALL LETTER Z
+ '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
+ '\xbf' # 0xAB -> INVERTED QUESTION MARK
+ ']' # 0xAC -> RIGHT SQUARE BRACKET
+ '$' # 0xAD -> DOLLAR SIGN
+ '@' # 0xAE -> COMMERCIAL AT
+ '\xae' # 0xAF -> REGISTERED SIGN
+ '\xa2' # 0xB0 -> CENT SIGN
+ '\xa3' # 0xB1 -> POUND SIGN
+ '\xa5' # 0xB2 -> YEN SIGN
+ '\xb7' # 0xB3 -> MIDDLE DOT
+ '\xa9' # 0xB4 -> COPYRIGHT SIGN
+ '\xa7' # 0xB5 -> SECTION SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
+ '\xac' # 0xBA -> NOT SIGN
+ '|' # 0xBB -> VERTICAL LINE
+ '\xaf' # 0xBC -> MACRON
+ '\xa8' # 0xBD -> DIAERESIS
+ '\xb4' # 0xBE -> ACUTE ACCENT
+ '\xd7' # 0xBF -> MULTIPLICATION SIGN
+ '\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
+ 'A' # 0xC1 -> LATIN CAPITAL LETTER A
+ 'B' # 0xC2 -> LATIN CAPITAL LETTER B
+ 'C' # 0xC3 -> LATIN CAPITAL LETTER C
+ 'D' # 0xC4 -> LATIN CAPITAL LETTER D
+ 'E' # 0xC5 -> LATIN CAPITAL LETTER E
+ 'F' # 0xC6 -> LATIN CAPITAL LETTER F
+ 'G' # 0xC7 -> LATIN CAPITAL LETTER G
+ 'H' # 0xC8 -> LATIN CAPITAL LETTER H
+ 'I' # 0xC9 -> LATIN CAPITAL LETTER I
+ '\xad' # 0xCA -> SOFT HYPHEN
+ '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '~' # 0xCC -> TILDE
+ '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
+ '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
+ '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
+ '\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
+ 'J' # 0xD1 -> LATIN CAPITAL LETTER J
+ 'K' # 0xD2 -> LATIN CAPITAL LETTER K
+ 'L' # 0xD3 -> LATIN CAPITAL LETTER L
+ 'M' # 0xD4 -> LATIN CAPITAL LETTER M
+ 'N' # 0xD5 -> LATIN CAPITAL LETTER N
+ 'O' # 0xD6 -> LATIN CAPITAL LETTER O
+ 'P' # 0xD7 -> LATIN CAPITAL LETTER P
+ 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
+ 'R' # 0xD9 -> LATIN CAPITAL LETTER R
+ '\xb9' # 0xDA -> SUPERSCRIPT ONE
+ '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\\' # 0xDC -> REVERSE SOLIDUS
+ '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
+ '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
+ '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
+ '\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xf7' # 0xE1 -> DIVISION SIGN
+ 'S' # 0xE2 -> LATIN CAPITAL LETTER S
+ 'T' # 0xE3 -> LATIN CAPITAL LETTER T
+ 'U' # 0xE4 -> LATIN CAPITAL LETTER U
+ 'V' # 0xE5 -> LATIN CAPITAL LETTER V
+ 'W' # 0xE6 -> LATIN CAPITAL LETTER W
+ 'X' # 0xE7 -> LATIN CAPITAL LETTER X
+ 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
+ '\xb2' # 0xEA -> SUPERSCRIPT TWO
+ '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '#' # 0xEC -> NUMBER SIGN
+ '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
+ '0' # 0xF0 -> DIGIT ZERO
+ '1' # 0xF1 -> DIGIT ONE
+ '2' # 0xF2 -> DIGIT TWO
+ '3' # 0xF3 -> DIGIT THREE
+ '4' # 0xF4 -> DIGIT FOUR
+ '5' # 0xF5 -> DIGIT FIVE
+ '6' # 0xF6 -> DIGIT SIX
+ '7' # 0xF7 -> DIGIT SEVEN
+ '8' # 0xF8 -> DIGIT EIGHT
+ '9' # 0xF9 -> DIGIT NINE
+ '\xb3' # 0xFA -> SUPERSCRIPT THREE
+ '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '"' # 0xFC -> QUOTATION MARK
+ '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\x9f' # 0xFF -> CONTROL
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1125.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1125.py
new file mode 100644
index 00000000..b1fd69de
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1125.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec for CP1125
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1125',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
+ 0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
+ 0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
+ 0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
+ 0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
+ 0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
+ 0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
+ 0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
+ 0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
+ 0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
+ 0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
+ 0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
+ 0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
+ 0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
+ 0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
+ 0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
+ 0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
+ 0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
+ 0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
+ 0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
+ 0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
+ 0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
+ 0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
+ 0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
+ 0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
+ 0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
+ 0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
+ 0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
+ 0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
+ 0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
+ 0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
+ 0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
+ 0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
+ 0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
+ 0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
+ 0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
+ 0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
+ 0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
+ 0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
+ 0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
+ 0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
+ 0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
+ 0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
+ 0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
+ 0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
+ 0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
+ 0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
+ 0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x258c, # LEFT HALF BLOCK
+ 0x00de: 0x2590, # RIGHT HALF BLOCK
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
+ 0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
+ 0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
+ 0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
+ 0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
+ 0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
+ 0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
+ 0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
+ 0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
+ 0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
+ 0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
+ 0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
+ 0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
+ 0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
+ 0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
+ 0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
+ 0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
+ 0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
+ 0x00f2: 0x0490, # CYRILLIC CAPITAL LETTER GHE WITH UPTURN
+ 0x00f3: 0x0491, # CYRILLIC SMALL LETTER GHE WITH UPTURN
+ 0x00f4: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
+ 0x00f5: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
+ 0x00f6: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+ 0x00f7: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+ 0x00f8: 0x0407, # CYRILLIC CAPITAL LETTER YI
+ 0x00f9: 0x0457, # CYRILLIC SMALL LETTER YI
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x221a, # SQUARE ROOT
+ 0x00fc: 0x2116, # NUMERO SIGN
+ 0x00fd: 0x00a4, # CURRENCY SIGN
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
+ '\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
+ '\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
+ '\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
+ '\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
+ '\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
+ '\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
+ '\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
+ '\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
+ '\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
+ '\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
+ '\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
+ '\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
+ '\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
+ '\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
+ '\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
+ '\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
+ '\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
+ '\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
+ '\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
+ '\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
+ '\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
+ '\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
+ '\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
+ '\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
+ '\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
+ '\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
+ '\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
+ '\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
+ '\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
+ '\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
+ '\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
+ '\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
+ '\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
+ '\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
+ '\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
+ '\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
+ '\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
+ '\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
+ '\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
+ '\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
+ '\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
+ '\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
+ '\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
+ '\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
+ '\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
+ '\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
+ '\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u258c' # 0x00dd -> LEFT HALF BLOCK
+ '\u2590' # 0x00de -> RIGHT HALF BLOCK
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
+ '\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
+ '\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
+ '\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
+ '\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
+ '\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
+ '\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
+ '\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
+ '\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
+ '\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
+ '\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
+ '\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
+ '\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
+ '\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
+ '\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
+ '\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
+ '\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
+ '\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
+ '\u0490' # 0x00f2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
+ '\u0491' # 0x00f3 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
+ '\u0404' # 0x00f4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
+ '\u0454' # 0x00f5 -> CYRILLIC SMALL LETTER UKRAINIAN IE
+ '\u0406' # 0x00f6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+ '\u0456' # 0x00f7 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+ '\u0407' # 0x00f8 -> CYRILLIC CAPITAL LETTER YI
+ '\u0457' # 0x00f9 -> CYRILLIC SMALL LETTER YI
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\u221a' # 0x00fb -> SQUARE ROOT
+ '\u2116' # 0x00fc -> NUMERO SIGN
+ '\xa4' # 0x00fd -> CURRENCY SIGN
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a4: 0x00fd, # CURRENCY SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
+ 0x0404: 0x00f4, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
+ 0x0406: 0x00f6, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+ 0x0407: 0x00f8, # CYRILLIC CAPITAL LETTER YI
+ 0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
+ 0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
+ 0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
+ 0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
+ 0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
+ 0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
+ 0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
+ 0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
+ 0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
+ 0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
+ 0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
+ 0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
+ 0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
+ 0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
+ 0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
+ 0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
+ 0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
+ 0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
+ 0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
+ 0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
+ 0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
+ 0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
+ 0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
+ 0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
+ 0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
+ 0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
+ 0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
+ 0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
+ 0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
+ 0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
+ 0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
+ 0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
+ 0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
+ 0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
+ 0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
+ 0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
+ 0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
+ 0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
+ 0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
+ 0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
+ 0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
+ 0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
+ 0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
+ 0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
+ 0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
+ 0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
+ 0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
+ 0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
+ 0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
+ 0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
+ 0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
+ 0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
+ 0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
+ 0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
+ 0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
+ 0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
+ 0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
+ 0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
+ 0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
+ 0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
+ 0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
+ 0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
+ 0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
+ 0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
+ 0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
+ 0x0454: 0x00f5, # CYRILLIC SMALL LETTER UKRAINIAN IE
+ 0x0456: 0x00f7, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+ 0x0457: 0x00f9, # CYRILLIC SMALL LETTER YI
+ 0x0490: 0x00f2, # CYRILLIC CAPITAL LETTER GHE WITH UPTURN
+ 0x0491: 0x00f3, # CYRILLIC SMALL LETTER GHE WITH UPTURN
+ 0x2116: 0x00fc, # NUMERO SIGN
+ 0x221a: 0x00fb, # SQUARE ROOT
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x258c: 0x00dd, # LEFT HALF BLOCK
+ 0x2590: 0x00de, # RIGHT HALF BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1140.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1140.py
new file mode 100644
index 00000000..0a919d83
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1140.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1140 generated from 'python-mappings/CP1140.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1140',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x9c' # 0x04 -> CONTROL
+ '\t' # 0x05 -> HORIZONTAL TABULATION
+ '\x86' # 0x06 -> CONTROL
+ '\x7f' # 0x07 -> DELETE
+ '\x97' # 0x08 -> CONTROL
+ '\x8d' # 0x09 -> CONTROL
+ '\x8e' # 0x0A -> CONTROL
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x9d' # 0x14 -> CONTROL
+ '\x85' # 0x15 -> CONTROL
+ '\x08' # 0x16 -> BACKSPACE
+ '\x87' # 0x17 -> CONTROL
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x92' # 0x1A -> CONTROL
+ '\x8f' # 0x1B -> CONTROL
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ '\x80' # 0x20 -> CONTROL
+ '\x81' # 0x21 -> CONTROL
+ '\x82' # 0x22 -> CONTROL
+ '\x83' # 0x23 -> CONTROL
+ '\x84' # 0x24 -> CONTROL
+ '\n' # 0x25 -> LINE FEED
+ '\x17' # 0x26 -> END OF TRANSMISSION BLOCK
+ '\x1b' # 0x27 -> ESCAPE
+ '\x88' # 0x28 -> CONTROL
+ '\x89' # 0x29 -> CONTROL
+ '\x8a' # 0x2A -> CONTROL
+ '\x8b' # 0x2B -> CONTROL
+ '\x8c' # 0x2C -> CONTROL
+ '\x05' # 0x2D -> ENQUIRY
+ '\x06' # 0x2E -> ACKNOWLEDGE
+ '\x07' # 0x2F -> BELL
+ '\x90' # 0x30 -> CONTROL
+ '\x91' # 0x31 -> CONTROL
+ '\x16' # 0x32 -> SYNCHRONOUS IDLE
+ '\x93' # 0x33 -> CONTROL
+ '\x94' # 0x34 -> CONTROL
+ '\x95' # 0x35 -> CONTROL
+ '\x96' # 0x36 -> CONTROL
+ '\x04' # 0x37 -> END OF TRANSMISSION
+ '\x98' # 0x38 -> CONTROL
+ '\x99' # 0x39 -> CONTROL
+ '\x9a' # 0x3A -> CONTROL
+ '\x9b' # 0x3B -> CONTROL
+ '\x14' # 0x3C -> DEVICE CONTROL FOUR
+ '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
+ '\x9e' # 0x3E -> CONTROL
+ '\x1a' # 0x3F -> SUBSTITUTE
+ ' ' # 0x40 -> SPACE
+ '\xa0' # 0x41 -> NO-BREAK SPACE
+ '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
+ '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
+ '\xa2' # 0x4A -> CENT SIGN
+ '.' # 0x4B -> FULL STOP
+ '<' # 0x4C -> LESS-THAN SIGN
+ '(' # 0x4D -> LEFT PARENTHESIS
+ '+' # 0x4E -> PLUS SIGN
+ '|' # 0x4F -> VERTICAL LINE
+ '&' # 0x50 -> AMPERSAND
+ '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
+ '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
+ '\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
+ '!' # 0x5A -> EXCLAMATION MARK
+ '$' # 0x5B -> DOLLAR SIGN
+ '*' # 0x5C -> ASTERISK
+ ')' # 0x5D -> RIGHT PARENTHESIS
+ ';' # 0x5E -> SEMICOLON
+ '\xac' # 0x5F -> NOT SIGN
+ '-' # 0x60 -> HYPHEN-MINUS
+ '/' # 0x61 -> SOLIDUS
+ '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
+ '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xa6' # 0x6A -> BROKEN BAR
+ ',' # 0x6B -> COMMA
+ '%' # 0x6C -> PERCENT SIGN
+ '_' # 0x6D -> LOW LINE
+ '>' # 0x6E -> GREATER-THAN SIGN
+ '?' # 0x6F -> QUESTION MARK
+ '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
+ '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
+ '`' # 0x79 -> GRAVE ACCENT
+ ':' # 0x7A -> COLON
+ '#' # 0x7B -> NUMBER SIGN
+ '@' # 0x7C -> COMMERCIAL AT
+ "'" # 0x7D -> APOSTROPHE
+ '=' # 0x7E -> EQUALS SIGN
+ '"' # 0x7F -> QUOTATION MARK
+ '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
+ 'a' # 0x81 -> LATIN SMALL LETTER A
+ 'b' # 0x82 -> LATIN SMALL LETTER B
+ 'c' # 0x83 -> LATIN SMALL LETTER C
+ 'd' # 0x84 -> LATIN SMALL LETTER D
+ 'e' # 0x85 -> LATIN SMALL LETTER E
+ 'f' # 0x86 -> LATIN SMALL LETTER F
+ 'g' # 0x87 -> LATIN SMALL LETTER G
+ 'h' # 0x88 -> LATIN SMALL LETTER H
+ 'i' # 0x89 -> LATIN SMALL LETTER I
+ '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
+ '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
+ '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
+ '\xb1' # 0x8F -> PLUS-MINUS SIGN
+ '\xb0' # 0x90 -> DEGREE SIGN
+ 'j' # 0x91 -> LATIN SMALL LETTER J
+ 'k' # 0x92 -> LATIN SMALL LETTER K
+ 'l' # 0x93 -> LATIN SMALL LETTER L
+ 'm' # 0x94 -> LATIN SMALL LETTER M
+ 'n' # 0x95 -> LATIN SMALL LETTER N
+ 'o' # 0x96 -> LATIN SMALL LETTER O
+ 'p' # 0x97 -> LATIN SMALL LETTER P
+ 'q' # 0x98 -> LATIN SMALL LETTER Q
+ 'r' # 0x99 -> LATIN SMALL LETTER R
+ '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
+ '\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
+ '\xb8' # 0x9D -> CEDILLA
+ '\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
+ '\u20ac' # 0x9F -> EURO SIGN
+ '\xb5' # 0xA0 -> MICRO SIGN
+ '~' # 0xA1 -> TILDE
+ 's' # 0xA2 -> LATIN SMALL LETTER S
+ 't' # 0xA3 -> LATIN SMALL LETTER T
+ 'u' # 0xA4 -> LATIN SMALL LETTER U
+ 'v' # 0xA5 -> LATIN SMALL LETTER V
+ 'w' # 0xA6 -> LATIN SMALL LETTER W
+ 'x' # 0xA7 -> LATIN SMALL LETTER X
+ 'y' # 0xA8 -> LATIN SMALL LETTER Y
+ 'z' # 0xA9 -> LATIN SMALL LETTER Z
+ '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
+ '\xbf' # 0xAB -> INVERTED QUESTION MARK
+ '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
+ '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
+ '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
+ '\xae' # 0xAF -> REGISTERED SIGN
+ '^' # 0xB0 -> CIRCUMFLEX ACCENT
+ '\xa3' # 0xB1 -> POUND SIGN
+ '\xa5' # 0xB2 -> YEN SIGN
+ '\xb7' # 0xB3 -> MIDDLE DOT
+ '\xa9' # 0xB4 -> COPYRIGHT SIGN
+ '\xa7' # 0xB5 -> SECTION SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
+ '[' # 0xBA -> LEFT SQUARE BRACKET
+ ']' # 0xBB -> RIGHT SQUARE BRACKET
+ '\xaf' # 0xBC -> MACRON
+ '\xa8' # 0xBD -> DIAERESIS
+ '\xb4' # 0xBE -> ACUTE ACCENT
+ '\xd7' # 0xBF -> MULTIPLICATION SIGN
+ '{' # 0xC0 -> LEFT CURLY BRACKET
+ 'A' # 0xC1 -> LATIN CAPITAL LETTER A
+ 'B' # 0xC2 -> LATIN CAPITAL LETTER B
+ 'C' # 0xC3 -> LATIN CAPITAL LETTER C
+ 'D' # 0xC4 -> LATIN CAPITAL LETTER D
+ 'E' # 0xC5 -> LATIN CAPITAL LETTER E
+ 'F' # 0xC6 -> LATIN CAPITAL LETTER F
+ 'G' # 0xC7 -> LATIN CAPITAL LETTER G
+ 'H' # 0xC8 -> LATIN CAPITAL LETTER H
+ 'I' # 0xC9 -> LATIN CAPITAL LETTER I
+ '\xad' # 0xCA -> SOFT HYPHEN
+ '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
+ '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
+ '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
+ '}' # 0xD0 -> RIGHT CURLY BRACKET
+ 'J' # 0xD1 -> LATIN CAPITAL LETTER J
+ 'K' # 0xD2 -> LATIN CAPITAL LETTER K
+ 'L' # 0xD3 -> LATIN CAPITAL LETTER L
+ 'M' # 0xD4 -> LATIN CAPITAL LETTER M
+ 'N' # 0xD5 -> LATIN CAPITAL LETTER N
+ 'O' # 0xD6 -> LATIN CAPITAL LETTER O
+ 'P' # 0xD7 -> LATIN CAPITAL LETTER P
+ 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
+ 'R' # 0xD9 -> LATIN CAPITAL LETTER R
+ '\xb9' # 0xDA -> SUPERSCRIPT ONE
+ '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
+ '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
+ '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
+ '\\' # 0xE0 -> REVERSE SOLIDUS
+ '\xf7' # 0xE1 -> DIVISION SIGN
+ 'S' # 0xE2 -> LATIN CAPITAL LETTER S
+ 'T' # 0xE3 -> LATIN CAPITAL LETTER T
+ 'U' # 0xE4 -> LATIN CAPITAL LETTER U
+ 'V' # 0xE5 -> LATIN CAPITAL LETTER V
+ 'W' # 0xE6 -> LATIN CAPITAL LETTER W
+ 'X' # 0xE7 -> LATIN CAPITAL LETTER X
+ 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
+ '\xb2' # 0xEA -> SUPERSCRIPT TWO
+ '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
+ '0' # 0xF0 -> DIGIT ZERO
+ '1' # 0xF1 -> DIGIT ONE
+ '2' # 0xF2 -> DIGIT TWO
+ '3' # 0xF3 -> DIGIT THREE
+ '4' # 0xF4 -> DIGIT FOUR
+ '5' # 0xF5 -> DIGIT FIVE
+ '6' # 0xF6 -> DIGIT SIX
+ '7' # 0xF7 -> DIGIT SEVEN
+ '8' # 0xF8 -> DIGIT EIGHT
+ '9' # 0xF9 -> DIGIT NINE
+ '\xb3' # 0xFA -> SUPERSCRIPT THREE
+ '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\x9f' # 0xFF -> CONTROL
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1250.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1250.py
new file mode 100644
index 00000000..c2c83aaf
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1250.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1250 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1250',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u20ac' # 0x80 -> EURO SIGN
+ '\ufffe' # 0x81 -> UNDEFINED
+ '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
+ '\ufffe' # 0x83 -> UNDEFINED
+ '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
+ '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ '\u2020' # 0x86 -> DAGGER
+ '\u2021' # 0x87 -> DOUBLE DAGGER
+ '\ufffe' # 0x88 -> UNDEFINED
+ '\u2030' # 0x89 -> PER MILLE SIGN
+ '\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
+ '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ '\u015a' # 0x8C -> LATIN CAPITAL LETTER S WITH ACUTE
+ '\u0164' # 0x8D -> LATIN CAPITAL LETTER T WITH CARON
+ '\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
+ '\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
+ '\ufffe' # 0x90 -> UNDEFINED
+ '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ '\u2022' # 0x95 -> BULLET
+ '\u2013' # 0x96 -> EN DASH
+ '\u2014' # 0x97 -> EM DASH
+ '\ufffe' # 0x98 -> UNDEFINED
+ '\u2122' # 0x99 -> TRADE MARK SIGN
+ '\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
+ '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ '\u015b' # 0x9C -> LATIN SMALL LETTER S WITH ACUTE
+ '\u0165' # 0x9D -> LATIN SMALL LETTER T WITH CARON
+ '\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
+ '\u017a' # 0x9F -> LATIN SMALL LETTER Z WITH ACUTE
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\u02c7' # 0xA1 -> CARON
+ '\u02d8' # 0xA2 -> BREVE
+ '\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
+ '\xa4' # 0xA4 -> CURRENCY SIGN
+ '\u0104' # 0xA5 -> LATIN CAPITAL LETTER A WITH OGONEK
+ '\xa6' # 0xA6 -> BROKEN BAR
+ '\xa7' # 0xA7 -> SECTION SIGN
+ '\xa8' # 0xA8 -> DIAERESIS
+ '\xa9' # 0xA9 -> COPYRIGHT SIGN
+ '\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
+ '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xac' # 0xAC -> NOT SIGN
+ '\xad' # 0xAD -> SOFT HYPHEN
+ '\xae' # 0xAE -> REGISTERED SIGN
+ '\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
+ '\xb0' # 0xB0 -> DEGREE SIGN
+ '\xb1' # 0xB1 -> PLUS-MINUS SIGN
+ '\u02db' # 0xB2 -> OGONEK
+ '\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
+ '\xb4' # 0xB4 -> ACUTE ACCENT
+ '\xb5' # 0xB5 -> MICRO SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xb7' # 0xB7 -> MIDDLE DOT
+ '\xb8' # 0xB8 -> CEDILLA
+ '\u0105' # 0xB9 -> LATIN SMALL LETTER A WITH OGONEK
+ '\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
+ '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u013d' # 0xBC -> LATIN CAPITAL LETTER L WITH CARON
+ '\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
+ '\u013e' # 0xBE -> LATIN SMALL LETTER L WITH CARON
+ '\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
+ '\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
+ '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
+ '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
+ '\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
+ '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
+ '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
+ '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
+ '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
+ '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
+ '\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
+ '\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
+ '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+ '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xd7' # 0xD7 -> MULTIPLICATION SIGN
+ '\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
+ '\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
+ '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+ '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
+ '\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
+ '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
+ '\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
+ '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
+ '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
+ '\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
+ '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
+ '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
+ '\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
+ '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
+ '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
+ '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
+ '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
+ '\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
+ '\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
+ '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
+ '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf7' # 0xF7 -> DIVISION SIGN
+ '\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
+ '\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
+ '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
+ '\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
+ '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
+ '\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
+ '\u02d9' # 0xFF -> DOT ABOVE
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1251.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1251.py
new file mode 100644
index 00000000..22bc6600
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1251.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1251',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE
+ '\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE
+ '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
+ '\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE
+ '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
+ '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ '\u2020' # 0x86 -> DAGGER
+ '\u2021' # 0x87 -> DOUBLE DAGGER
+ '\u20ac' # 0x88 -> EURO SIGN
+ '\u2030' # 0x89 -> PER MILLE SIGN
+ '\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE
+ '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ '\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE
+ '\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE
+ '\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE
+ '\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE
+ '\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE
+ '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ '\u2022' # 0x95 -> BULLET
+ '\u2013' # 0x96 -> EN DASH
+ '\u2014' # 0x97 -> EM DASH
+ '\ufffe' # 0x98 -> UNDEFINED
+ '\u2122' # 0x99 -> TRADE MARK SIGN
+ '\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE
+ '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ '\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE
+ '\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE
+ '\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE
+ '\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U
+ '\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U
+ '\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
+ '\xa4' # 0xA4 -> CURRENCY SIGN
+ '\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
+ '\xa6' # 0xA6 -> BROKEN BAR
+ '\xa7' # 0xA7 -> SECTION SIGN
+ '\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
+ '\xa9' # 0xA9 -> COPYRIGHT SIGN
+ '\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
+ '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xac' # 0xAC -> NOT SIGN
+ '\xad' # 0xAD -> SOFT HYPHEN
+ '\xae' # 0xAE -> REGISTERED SIGN
+ '\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI
+ '\xb0' # 0xB0 -> DEGREE SIGN
+ '\xb1' # 0xB1 -> PLUS-MINUS SIGN
+ '\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+ '\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+ '\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
+ '\xb5' # 0xB5 -> MICRO SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xb7' # 0xB7 -> MIDDLE DOT
+ '\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
+ '\u2116' # 0xB9 -> NUMERO SIGN
+ '\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE
+ '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
+ '\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE
+ '\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE
+ '\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI
+ '\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
+ '\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
+ '\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
+ '\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
+ '\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
+ '\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
+ '\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
+ '\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
+ '\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
+ '\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
+ '\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
+ '\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
+ '\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
+ '\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
+ '\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
+ '\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
+ '\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
+ '\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
+ '\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
+ '\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
+ '\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
+ '\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
+ '\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
+ '\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
+ '\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
+ '\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
+ '\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
+ '\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
+ '\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
+ '\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
+ '\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
+ '\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
+ '\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
+ '\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
+ '\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
+ '\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
+ '\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
+ '\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
+ '\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
+ '\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
+ '\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
+ '\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
+ '\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
+ '\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
+ '\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
+ '\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
+ '\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
+ '\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
+ '\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
+ '\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
+ '\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
+ '\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
+ '\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
+ '\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
+ '\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
+ '\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
+ '\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
+ '\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
+ '\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
+ '\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
+ '\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
+ '\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
+ '\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
+ '\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1252.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1252.py
new file mode 100644
index 00000000..c0e8088e
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1252.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1252',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u20ac' # 0x80 -> EURO SIGN
+ '\ufffe' # 0x81 -> UNDEFINED
+ '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
+ '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
+ '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
+ '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ '\u2020' # 0x86 -> DAGGER
+ '\u2021' # 0x87 -> DOUBLE DAGGER
+ '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
+ '\u2030' # 0x89 -> PER MILLE SIGN
+ '\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
+ '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
+ '\ufffe' # 0x8D -> UNDEFINED
+ '\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
+ '\ufffe' # 0x8F -> UNDEFINED
+ '\ufffe' # 0x90 -> UNDEFINED
+ '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ '\u2022' # 0x95 -> BULLET
+ '\u2013' # 0x96 -> EN DASH
+ '\u2014' # 0x97 -> EM DASH
+ '\u02dc' # 0x98 -> SMALL TILDE
+ '\u2122' # 0x99 -> TRADE MARK SIGN
+ '\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
+ '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
+ '\ufffe' # 0x9D -> UNDEFINED
+ '\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
+ '\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
+ '\xa2' # 0xA2 -> CENT SIGN
+ '\xa3' # 0xA3 -> POUND SIGN
+ '\xa4' # 0xA4 -> CURRENCY SIGN
+ '\xa5' # 0xA5 -> YEN SIGN
+ '\xa6' # 0xA6 -> BROKEN BAR
+ '\xa7' # 0xA7 -> SECTION SIGN
+ '\xa8' # 0xA8 -> DIAERESIS
+ '\xa9' # 0xA9 -> COPYRIGHT SIGN
+ '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
+ '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xac' # 0xAC -> NOT SIGN
+ '\xad' # 0xAD -> SOFT HYPHEN
+ '\xae' # 0xAE -> REGISTERED SIGN
+ '\xaf' # 0xAF -> MACRON
+ '\xb0' # 0xB0 -> DEGREE SIGN
+ '\xb1' # 0xB1 -> PLUS-MINUS SIGN
+ '\xb2' # 0xB2 -> SUPERSCRIPT TWO
+ '\xb3' # 0xB3 -> SUPERSCRIPT THREE
+ '\xb4' # 0xB4 -> ACUTE ACCENT
+ '\xb5' # 0xB5 -> MICRO SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xb7' # 0xB7 -> MIDDLE DOT
+ '\xb8' # 0xB8 -> CEDILLA
+ '\xb9' # 0xB9 -> SUPERSCRIPT ONE
+ '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
+ '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
+ '\xbf' # 0xBF -> INVERTED QUESTION MARK
+ '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
+ '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
+ '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
+ '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
+ '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
+ '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xd7' # 0xD7 -> MULTIPLICATION SIGN
+ '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
+ '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
+ '\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
+ '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
+ '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
+ '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe6' # 0xE6 -> LATIN SMALL LETTER AE
+ '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
+ '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
+ '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
+ '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
+ '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
+ '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
+ '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
+ '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf7' # 0xF7 -> DIVISION SIGN
+ '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
+ '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
+ '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
+ '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
+ '\xfe' # 0xFE -> LATIN SMALL LETTER THORN
+ '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1253.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1253.py
new file mode 100644
index 00000000..ec9c0972
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1253.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1253',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u20ac' # 0x80 -> EURO SIGN
+ '\ufffe' # 0x81 -> UNDEFINED
+ '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
+ '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
+ '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
+ '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ '\u2020' # 0x86 -> DAGGER
+ '\u2021' # 0x87 -> DOUBLE DAGGER
+ '\ufffe' # 0x88 -> UNDEFINED
+ '\u2030' # 0x89 -> PER MILLE SIGN
+ '\ufffe' # 0x8A -> UNDEFINED
+ '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ '\ufffe' # 0x8C -> UNDEFINED
+ '\ufffe' # 0x8D -> UNDEFINED
+ '\ufffe' # 0x8E -> UNDEFINED
+ '\ufffe' # 0x8F -> UNDEFINED
+ '\ufffe' # 0x90 -> UNDEFINED
+ '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ '\u2022' # 0x95 -> BULLET
+ '\u2013' # 0x96 -> EN DASH
+ '\u2014' # 0x97 -> EM DASH
+ '\ufffe' # 0x98 -> UNDEFINED
+ '\u2122' # 0x99 -> TRADE MARK SIGN
+ '\ufffe' # 0x9A -> UNDEFINED
+ '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ '\ufffe' # 0x9C -> UNDEFINED
+ '\ufffe' # 0x9D -> UNDEFINED
+ '\ufffe' # 0x9E -> UNDEFINED
+ '\ufffe' # 0x9F -> UNDEFINED
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS
+ '\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
+ '\xa3' # 0xA3 -> POUND SIGN
+ '\xa4' # 0xA4 -> CURRENCY SIGN
+ '\xa5' # 0xA5 -> YEN SIGN
+ '\xa6' # 0xA6 -> BROKEN BAR
+ '\xa7' # 0xA7 -> SECTION SIGN
+ '\xa8' # 0xA8 -> DIAERESIS
+ '\xa9' # 0xA9 -> COPYRIGHT SIGN
+ '\ufffe' # 0xAA -> UNDEFINED
+ '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xac' # 0xAC -> NOT SIGN
+ '\xad' # 0xAD -> SOFT HYPHEN
+ '\xae' # 0xAE -> REGISTERED SIGN
+ '\u2015' # 0xAF -> HORIZONTAL BAR
+ '\xb0' # 0xB0 -> DEGREE SIGN
+ '\xb1' # 0xB1 -> PLUS-MINUS SIGN
+ '\xb2' # 0xB2 -> SUPERSCRIPT TWO
+ '\xb3' # 0xB3 -> SUPERSCRIPT THREE
+ '\u0384' # 0xB4 -> GREEK TONOS
+ '\xb5' # 0xB5 -> MICRO SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xb7' # 0xB7 -> MIDDLE DOT
+ '\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
+ '\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
+ '\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
+ '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
+ '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
+ '\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
+ '\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
+ '\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+ '\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
+ '\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
+ '\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
+ '\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
+ '\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
+ '\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
+ '\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
+ '\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
+ '\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
+ '\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
+ '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
+ '\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
+ '\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
+ '\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
+ '\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
+ '\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
+ '\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
+ '\ufffe' # 0xD2 -> UNDEFINED
+ '\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
+ '\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
+ '\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
+ '\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
+ '\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
+ '\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
+ '\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
+ '\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+ '\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+ '\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
+ '\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
+ '\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
+ '\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
+ '\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+ '\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
+ '\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
+ '\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
+ '\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
+ '\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
+ '\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
+ '\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
+ '\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
+ '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
+ '\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
+ '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
+ '\u03bc' # 0xEC -> GREEK SMALL LETTER MU
+ '\u03bd' # 0xED -> GREEK SMALL LETTER NU
+ '\u03be' # 0xEE -> GREEK SMALL LETTER XI
+ '\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
+ '\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
+ '\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
+ '\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
+ '\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
+ '\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
+ '\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
+ '\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
+ '\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
+ '\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
+ '\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
+ '\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
+ '\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+ '\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
+ '\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
+ '\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
+ '\ufffe' # 0xFF -> UNDEFINED
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1254.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1254.py
new file mode 100644
index 00000000..4912327a
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1254.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1254',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u20ac' # 0x80 -> EURO SIGN
+ '\ufffe' # 0x81 -> UNDEFINED
+ '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
+ '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
+ '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
+ '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ '\u2020' # 0x86 -> DAGGER
+ '\u2021' # 0x87 -> DOUBLE DAGGER
+ '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
+ '\u2030' # 0x89 -> PER MILLE SIGN
+ '\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
+ '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
+ '\ufffe' # 0x8D -> UNDEFINED
+ '\ufffe' # 0x8E -> UNDEFINED
+ '\ufffe' # 0x8F -> UNDEFINED
+ '\ufffe' # 0x90 -> UNDEFINED
+ '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ '\u2022' # 0x95 -> BULLET
+ '\u2013' # 0x96 -> EN DASH
+ '\u2014' # 0x97 -> EM DASH
+ '\u02dc' # 0x98 -> SMALL TILDE
+ '\u2122' # 0x99 -> TRADE MARK SIGN
+ '\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
+ '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
+ '\ufffe' # 0x9D -> UNDEFINED
+ '\ufffe' # 0x9E -> UNDEFINED
+ '\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
+ '\xa2' # 0xA2 -> CENT SIGN
+ '\xa3' # 0xA3 -> POUND SIGN
+ '\xa4' # 0xA4 -> CURRENCY SIGN
+ '\xa5' # 0xA5 -> YEN SIGN
+ '\xa6' # 0xA6 -> BROKEN BAR
+ '\xa7' # 0xA7 -> SECTION SIGN
+ '\xa8' # 0xA8 -> DIAERESIS
+ '\xa9' # 0xA9 -> COPYRIGHT SIGN
+ '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
+ '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xac' # 0xAC -> NOT SIGN
+ '\xad' # 0xAD -> SOFT HYPHEN
+ '\xae' # 0xAE -> REGISTERED SIGN
+ '\xaf' # 0xAF -> MACRON
+ '\xb0' # 0xB0 -> DEGREE SIGN
+ '\xb1' # 0xB1 -> PLUS-MINUS SIGN
+ '\xb2' # 0xB2 -> SUPERSCRIPT TWO
+ '\xb3' # 0xB3 -> SUPERSCRIPT THREE
+ '\xb4' # 0xB4 -> ACUTE ACCENT
+ '\xb5' # 0xB5 -> MICRO SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xb7' # 0xB7 -> MIDDLE DOT
+ '\xb8' # 0xB8 -> CEDILLA
+ '\xb9' # 0xB9 -> SUPERSCRIPT ONE
+ '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
+ '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
+ '\xbf' # 0xBF -> INVERTED QUESTION MARK
+ '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
+ '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
+ '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
+ '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
+ '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
+ '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xd7' # 0xD7 -> MULTIPLICATION SIGN
+ '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
+ '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
+ '\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
+ '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
+ '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
+ '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe6' # 0xE6 -> LATIN SMALL LETTER AE
+ '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
+ '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
+ '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
+ '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
+ '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
+ '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
+ '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
+ '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf7' # 0xF7 -> DIVISION SIGN
+ '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
+ '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
+ '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
+ '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
+ '\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
+ '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1255.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1255.py
new file mode 100644
index 00000000..91ce26b9
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1255.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1255',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u20ac' # 0x80 -> EURO SIGN
+ '\ufffe' # 0x81 -> UNDEFINED
+ '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
+ '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
+ '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
+ '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ '\u2020' # 0x86 -> DAGGER
+ '\u2021' # 0x87 -> DOUBLE DAGGER
+ '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
+ '\u2030' # 0x89 -> PER MILLE SIGN
+ '\ufffe' # 0x8A -> UNDEFINED
+ '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ '\ufffe' # 0x8C -> UNDEFINED
+ '\ufffe' # 0x8D -> UNDEFINED
+ '\ufffe' # 0x8E -> UNDEFINED
+ '\ufffe' # 0x8F -> UNDEFINED
+ '\ufffe' # 0x90 -> UNDEFINED
+ '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ '\u2022' # 0x95 -> BULLET
+ '\u2013' # 0x96 -> EN DASH
+ '\u2014' # 0x97 -> EM DASH
+ '\u02dc' # 0x98 -> SMALL TILDE
+ '\u2122' # 0x99 -> TRADE MARK SIGN
+ '\ufffe' # 0x9A -> UNDEFINED
+ '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ '\ufffe' # 0x9C -> UNDEFINED
+ '\ufffe' # 0x9D -> UNDEFINED
+ '\ufffe' # 0x9E -> UNDEFINED
+ '\ufffe' # 0x9F -> UNDEFINED
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
+ '\xa2' # 0xA2 -> CENT SIGN
+ '\xa3' # 0xA3 -> POUND SIGN
+ '\u20aa' # 0xA4 -> NEW SHEQEL SIGN
+ '\xa5' # 0xA5 -> YEN SIGN
+ '\xa6' # 0xA6 -> BROKEN BAR
+ '\xa7' # 0xA7 -> SECTION SIGN
+ '\xa8' # 0xA8 -> DIAERESIS
+ '\xa9' # 0xA9 -> COPYRIGHT SIGN
+ '\xd7' # 0xAA -> MULTIPLICATION SIGN
+ '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xac' # 0xAC -> NOT SIGN
+ '\xad' # 0xAD -> SOFT HYPHEN
+ '\xae' # 0xAE -> REGISTERED SIGN
+ '\xaf' # 0xAF -> MACRON
+ '\xb0' # 0xB0 -> DEGREE SIGN
+ '\xb1' # 0xB1 -> PLUS-MINUS SIGN
+ '\xb2' # 0xB2 -> SUPERSCRIPT TWO
+ '\xb3' # 0xB3 -> SUPERSCRIPT THREE
+ '\xb4' # 0xB4 -> ACUTE ACCENT
+ '\xb5' # 0xB5 -> MICRO SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xb7' # 0xB7 -> MIDDLE DOT
+ '\xb8' # 0xB8 -> CEDILLA
+ '\xb9' # 0xB9 -> SUPERSCRIPT ONE
+ '\xf7' # 0xBA -> DIVISION SIGN
+ '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
+ '\xbf' # 0xBF -> INVERTED QUESTION MARK
+ '\u05b0' # 0xC0 -> HEBREW POINT SHEVA
+ '\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL
+ '\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH
+ '\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS
+ '\u05b4' # 0xC4 -> HEBREW POINT HIRIQ
+ '\u05b5' # 0xC5 -> HEBREW POINT TSERE
+ '\u05b6' # 0xC6 -> HEBREW POINT SEGOL
+ '\u05b7' # 0xC7 -> HEBREW POINT PATAH
+ '\u05b8' # 0xC8 -> HEBREW POINT QAMATS
+ '\u05b9' # 0xC9 -> HEBREW POINT HOLAM
+ '\ufffe' # 0xCA -> UNDEFINED
+ '\u05bb' # 0xCB -> HEBREW POINT QUBUTS
+ '\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ
+ '\u05bd' # 0xCD -> HEBREW POINT METEG
+ '\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF
+ '\u05bf' # 0xCF -> HEBREW POINT RAFE
+ '\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ
+ '\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT
+ '\u05c2' # 0xD2 -> HEBREW POINT SIN DOT
+ '\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ
+ '\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
+ '\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
+ '\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
+ '\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH
+ '\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM
+ '\ufffe' # 0xD9 -> UNDEFINED
+ '\ufffe' # 0xDA -> UNDEFINED
+ '\ufffe' # 0xDB -> UNDEFINED
+ '\ufffe' # 0xDC -> UNDEFINED
+ '\ufffe' # 0xDD -> UNDEFINED
+ '\ufffe' # 0xDE -> UNDEFINED
+ '\ufffe' # 0xDF -> UNDEFINED
+ '\u05d0' # 0xE0 -> HEBREW LETTER ALEF
+ '\u05d1' # 0xE1 -> HEBREW LETTER BET
+ '\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
+ '\u05d3' # 0xE3 -> HEBREW LETTER DALET
+ '\u05d4' # 0xE4 -> HEBREW LETTER HE
+ '\u05d5' # 0xE5 -> HEBREW LETTER VAV
+ '\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
+ '\u05d7' # 0xE7 -> HEBREW LETTER HET
+ '\u05d8' # 0xE8 -> HEBREW LETTER TET
+ '\u05d9' # 0xE9 -> HEBREW LETTER YOD
+ '\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
+ '\u05db' # 0xEB -> HEBREW LETTER KAF
+ '\u05dc' # 0xEC -> HEBREW LETTER LAMED
+ '\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
+ '\u05de' # 0xEE -> HEBREW LETTER MEM
+ '\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
+ '\u05e0' # 0xF0 -> HEBREW LETTER NUN
+ '\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
+ '\u05e2' # 0xF2 -> HEBREW LETTER AYIN
+ '\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
+ '\u05e4' # 0xF4 -> HEBREW LETTER PE
+ '\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
+ '\u05e6' # 0xF6 -> HEBREW LETTER TSADI
+ '\u05e7' # 0xF7 -> HEBREW LETTER QOF
+ '\u05e8' # 0xF8 -> HEBREW LETTER RESH
+ '\u05e9' # 0xF9 -> HEBREW LETTER SHIN
+ '\u05ea' # 0xFA -> HEBREW LETTER TAV
+ '\ufffe' # 0xFB -> UNDEFINED
+ '\ufffe' # 0xFC -> UNDEFINED
+ '\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
+ '\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
+ '\ufffe' # 0xFF -> UNDEFINED
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1256.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1256.py
new file mode 100644
index 00000000..fd6afab5
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1256.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1256 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1256.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1256',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u20ac' # 0x80 -> EURO SIGN
+ '\u067e' # 0x81 -> ARABIC LETTER PEH
+ '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
+ '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
+ '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
+ '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ '\u2020' # 0x86 -> DAGGER
+ '\u2021' # 0x87 -> DOUBLE DAGGER
+ '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
+ '\u2030' # 0x89 -> PER MILLE SIGN
+ '\u0679' # 0x8A -> ARABIC LETTER TTEH
+ '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
+ '\u0686' # 0x8D -> ARABIC LETTER TCHEH
+ '\u0698' # 0x8E -> ARABIC LETTER JEH
+ '\u0688' # 0x8F -> ARABIC LETTER DDAL
+ '\u06af' # 0x90 -> ARABIC LETTER GAF
+ '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ '\u2022' # 0x95 -> BULLET
+ '\u2013' # 0x96 -> EN DASH
+ '\u2014' # 0x97 -> EM DASH
+ '\u06a9' # 0x98 -> ARABIC LETTER KEHEH
+ '\u2122' # 0x99 -> TRADE MARK SIGN
+ '\u0691' # 0x9A -> ARABIC LETTER RREH
+ '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
+ '\u200c' # 0x9D -> ZERO WIDTH NON-JOINER
+ '\u200d' # 0x9E -> ZERO WIDTH JOINER
+ '\u06ba' # 0x9F -> ARABIC LETTER NOON GHUNNA
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\u060c' # 0xA1 -> ARABIC COMMA
+ '\xa2' # 0xA2 -> CENT SIGN
+ '\xa3' # 0xA3 -> POUND SIGN
+ '\xa4' # 0xA4 -> CURRENCY SIGN
+ '\xa5' # 0xA5 -> YEN SIGN
+ '\xa6' # 0xA6 -> BROKEN BAR
+ '\xa7' # 0xA7 -> SECTION SIGN
+ '\xa8' # 0xA8 -> DIAERESIS
+ '\xa9' # 0xA9 -> COPYRIGHT SIGN
+ '\u06be' # 0xAA -> ARABIC LETTER HEH DOACHASHMEE
+ '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xac' # 0xAC -> NOT SIGN
+ '\xad' # 0xAD -> SOFT HYPHEN
+ '\xae' # 0xAE -> REGISTERED SIGN
+ '\xaf' # 0xAF -> MACRON
+ '\xb0' # 0xB0 -> DEGREE SIGN
+ '\xb1' # 0xB1 -> PLUS-MINUS SIGN
+ '\xb2' # 0xB2 -> SUPERSCRIPT TWO
+ '\xb3' # 0xB3 -> SUPERSCRIPT THREE
+ '\xb4' # 0xB4 -> ACUTE ACCENT
+ '\xb5' # 0xB5 -> MICRO SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xb7' # 0xB7 -> MIDDLE DOT
+ '\xb8' # 0xB8 -> CEDILLA
+ '\xb9' # 0xB9 -> SUPERSCRIPT ONE
+ '\u061b' # 0xBA -> ARABIC SEMICOLON
+ '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
+ '\u061f' # 0xBF -> ARABIC QUESTION MARK
+ '\u06c1' # 0xC0 -> ARABIC LETTER HEH GOAL
+ '\u0621' # 0xC1 -> ARABIC LETTER HAMZA
+ '\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
+ '\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
+ '\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
+ '\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
+ '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
+ '\u0627' # 0xC7 -> ARABIC LETTER ALEF
+ '\u0628' # 0xC8 -> ARABIC LETTER BEH
+ '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
+ '\u062a' # 0xCA -> ARABIC LETTER TEH
+ '\u062b' # 0xCB -> ARABIC LETTER THEH
+ '\u062c' # 0xCC -> ARABIC LETTER JEEM
+ '\u062d' # 0xCD -> ARABIC LETTER HAH
+ '\u062e' # 0xCE -> ARABIC LETTER KHAH
+ '\u062f' # 0xCF -> ARABIC LETTER DAL
+ '\u0630' # 0xD0 -> ARABIC LETTER THAL
+ '\u0631' # 0xD1 -> ARABIC LETTER REH
+ '\u0632' # 0xD2 -> ARABIC LETTER ZAIN
+ '\u0633' # 0xD3 -> ARABIC LETTER SEEN
+ '\u0634' # 0xD4 -> ARABIC LETTER SHEEN
+ '\u0635' # 0xD5 -> ARABIC LETTER SAD
+ '\u0636' # 0xD6 -> ARABIC LETTER DAD
+ '\xd7' # 0xD7 -> MULTIPLICATION SIGN
+ '\u0637' # 0xD8 -> ARABIC LETTER TAH
+ '\u0638' # 0xD9 -> ARABIC LETTER ZAH
+ '\u0639' # 0xDA -> ARABIC LETTER AIN
+ '\u063a' # 0xDB -> ARABIC LETTER GHAIN
+ '\u0640' # 0xDC -> ARABIC TATWEEL
+ '\u0641' # 0xDD -> ARABIC LETTER FEH
+ '\u0642' # 0xDE -> ARABIC LETTER QAF
+ '\u0643' # 0xDF -> ARABIC LETTER KAF
+ '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
+ '\u0644' # 0xE1 -> ARABIC LETTER LAM
+ '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\u0645' # 0xE3 -> ARABIC LETTER MEEM
+ '\u0646' # 0xE4 -> ARABIC LETTER NOON
+ '\u0647' # 0xE5 -> ARABIC LETTER HEH
+ '\u0648' # 0xE6 -> ARABIC LETTER WAW
+ '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
+ '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\u0649' # 0xEC -> ARABIC LETTER ALEF MAKSURA
+ '\u064a' # 0xED -> ARABIC LETTER YEH
+ '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\u064b' # 0xF0 -> ARABIC FATHATAN
+ '\u064c' # 0xF1 -> ARABIC DAMMATAN
+ '\u064d' # 0xF2 -> ARABIC KASRATAN
+ '\u064e' # 0xF3 -> ARABIC FATHA
+ '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\u064f' # 0xF5 -> ARABIC DAMMA
+ '\u0650' # 0xF6 -> ARABIC KASRA
+ '\xf7' # 0xF7 -> DIVISION SIGN
+ '\u0651' # 0xF8 -> ARABIC SHADDA
+ '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
+ '\u0652' # 0xFA -> ARABIC SUKUN
+ '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
+ '\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
+ '\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1257.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1257.py
new file mode 100644
index 00000000..9ebc90d5
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1257.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1257',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u20ac' # 0x80 -> EURO SIGN
+ '\ufffe' # 0x81 -> UNDEFINED
+ '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
+ '\ufffe' # 0x83 -> UNDEFINED
+ '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
+ '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ '\u2020' # 0x86 -> DAGGER
+ '\u2021' # 0x87 -> DOUBLE DAGGER
+ '\ufffe' # 0x88 -> UNDEFINED
+ '\u2030' # 0x89 -> PER MILLE SIGN
+ '\ufffe' # 0x8A -> UNDEFINED
+ '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ '\ufffe' # 0x8C -> UNDEFINED
+ '\xa8' # 0x8D -> DIAERESIS
+ '\u02c7' # 0x8E -> CARON
+ '\xb8' # 0x8F -> CEDILLA
+ '\ufffe' # 0x90 -> UNDEFINED
+ '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ '\u2022' # 0x95 -> BULLET
+ '\u2013' # 0x96 -> EN DASH
+ '\u2014' # 0x97 -> EM DASH
+ '\ufffe' # 0x98 -> UNDEFINED
+ '\u2122' # 0x99 -> TRADE MARK SIGN
+ '\ufffe' # 0x9A -> UNDEFINED
+ '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ '\ufffe' # 0x9C -> UNDEFINED
+ '\xaf' # 0x9D -> MACRON
+ '\u02db' # 0x9E -> OGONEK
+ '\ufffe' # 0x9F -> UNDEFINED
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\ufffe' # 0xA1 -> UNDEFINED
+ '\xa2' # 0xA2 -> CENT SIGN
+ '\xa3' # 0xA3 -> POUND SIGN
+ '\xa4' # 0xA4 -> CURRENCY SIGN
+ '\ufffe' # 0xA5 -> UNDEFINED
+ '\xa6' # 0xA6 -> BROKEN BAR
+ '\xa7' # 0xA7 -> SECTION SIGN
+ '\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
+ '\xa9' # 0xA9 -> COPYRIGHT SIGN
+ '\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
+ '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xac' # 0xAC -> NOT SIGN
+ '\xad' # 0xAD -> SOFT HYPHEN
+ '\xae' # 0xAE -> REGISTERED SIGN
+ '\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
+ '\xb0' # 0xB0 -> DEGREE SIGN
+ '\xb1' # 0xB1 -> PLUS-MINUS SIGN
+ '\xb2' # 0xB2 -> SUPERSCRIPT TWO
+ '\xb3' # 0xB3 -> SUPERSCRIPT THREE
+ '\xb4' # 0xB4 -> ACUTE ACCENT
+ '\xb5' # 0xB5 -> MICRO SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xb7' # 0xB7 -> MIDDLE DOT
+ '\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
+ '\xb9' # 0xB9 -> SUPERSCRIPT ONE
+ '\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
+ '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
+ '\xe6' # 0xBF -> LATIN SMALL LETTER AE
+ '\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
+ '\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
+ '\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
+ '\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
+ '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
+ '\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
+ '\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
+ '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
+ '\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
+ '\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
+ '\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
+ '\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
+ '\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
+ '\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
+ '\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
+ '\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
+ '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
+ '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
+ '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xd7' # 0xD7 -> MULTIPLICATION SIGN
+ '\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
+ '\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
+ '\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
+ '\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
+ '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
+ '\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
+ '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
+ '\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
+ '\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
+ '\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
+ '\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
+ '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
+ '\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
+ '\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
+ '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
+ '\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
+ '\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
+ '\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
+ '\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
+ '\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
+ '\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
+ '\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
+ '\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
+ '\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
+ '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
+ '\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
+ '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
+ '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf7' # 0xF7 -> DIVISION SIGN
+ '\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
+ '\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
+ '\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
+ '\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
+ '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
+ '\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
+ '\u02d9' # 0xFF -> DOT ABOVE
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1258.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1258.py
new file mode 100644
index 00000000..784378a8
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp1258.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp1258',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u20ac' # 0x80 -> EURO SIGN
+ '\ufffe' # 0x81 -> UNDEFINED
+ '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
+ '\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
+ '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
+ '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ '\u2020' # 0x86 -> DAGGER
+ '\u2021' # 0x87 -> DOUBLE DAGGER
+ '\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
+ '\u2030' # 0x89 -> PER MILLE SIGN
+ '\ufffe' # 0x8A -> UNDEFINED
+ '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ '\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
+ '\ufffe' # 0x8D -> UNDEFINED
+ '\ufffe' # 0x8E -> UNDEFINED
+ '\ufffe' # 0x8F -> UNDEFINED
+ '\ufffe' # 0x90 -> UNDEFINED
+ '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ '\u2022' # 0x95 -> BULLET
+ '\u2013' # 0x96 -> EN DASH
+ '\u2014' # 0x97 -> EM DASH
+ '\u02dc' # 0x98 -> SMALL TILDE
+ '\u2122' # 0x99 -> TRADE MARK SIGN
+ '\ufffe' # 0x9A -> UNDEFINED
+ '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ '\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
+ '\ufffe' # 0x9D -> UNDEFINED
+ '\ufffe' # 0x9E -> UNDEFINED
+ '\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
+ '\xa2' # 0xA2 -> CENT SIGN
+ '\xa3' # 0xA3 -> POUND SIGN
+ '\xa4' # 0xA4 -> CURRENCY SIGN
+ '\xa5' # 0xA5 -> YEN SIGN
+ '\xa6' # 0xA6 -> BROKEN BAR
+ '\xa7' # 0xA7 -> SECTION SIGN
+ '\xa8' # 0xA8 -> DIAERESIS
+ '\xa9' # 0xA9 -> COPYRIGHT SIGN
+ '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
+ '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xac' # 0xAC -> NOT SIGN
+ '\xad' # 0xAD -> SOFT HYPHEN
+ '\xae' # 0xAE -> REGISTERED SIGN
+ '\xaf' # 0xAF -> MACRON
+ '\xb0' # 0xB0 -> DEGREE SIGN
+ '\xb1' # 0xB1 -> PLUS-MINUS SIGN
+ '\xb2' # 0xB2 -> SUPERSCRIPT TWO
+ '\xb3' # 0xB3 -> SUPERSCRIPT THREE
+ '\xb4' # 0xB4 -> ACUTE ACCENT
+ '\xb5' # 0xB5 -> MICRO SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xb7' # 0xB7 -> MIDDLE DOT
+ '\xb8' # 0xB8 -> CEDILLA
+ '\xb9' # 0xB9 -> SUPERSCRIPT ONE
+ '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
+ '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
+ '\xbf' # 0xBF -> INVERTED QUESTION MARK
+ '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
+ '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
+ '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\u0300' # 0xCC -> COMBINING GRAVE ACCENT
+ '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
+ '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\u0309' # 0xD2 -> COMBINING HOOK ABOVE
+ '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN
+ '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xd7' # 0xD7 -> MULTIPLICATION SIGN
+ '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
+ '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN
+ '\u0303' # 0xDE -> COMBINING TILDE
+ '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
+ '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
+ '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe6' # 0xE6 -> LATIN SMALL LETTER AE
+ '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
+ '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\u0301' # 0xEC -> COMBINING ACUTE ACCENT
+ '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
+ '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
+ '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
+ '\u0323' # 0xF2 -> COMBINING DOT BELOW
+ '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN
+ '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf7' # 0xF7 -> DIVISION SIGN
+ '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
+ '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
+ '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
+ '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN
+ '\u20ab' # 0xFE -> DONG SIGN
+ '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp273.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp273.py
new file mode 100644
index 00000000..69c6d778
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp273.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp273 generated from 'python-mappings/CP273.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp273',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL (NUL)
+ '\x01' # 0x01 -> START OF HEADING (SOH)
+ '\x02' # 0x02 -> START OF TEXT (STX)
+ '\x03' # 0x03 -> END OF TEXT (ETX)
+ '\x9c' # 0x04 -> STRING TERMINATOR (ST)
+ '\t' # 0x05 -> CHARACTER TABULATION (HT)
+ '\x86' # 0x06 -> START OF SELECTED AREA (SSA)
+ '\x7f' # 0x07 -> DELETE (DEL)
+ '\x97' # 0x08 -> END OF GUARDED AREA (EPA)
+ '\x8d' # 0x09 -> REVERSE LINE FEED (RI)
+ '\x8e' # 0x0A -> SINGLE-SHIFT TWO (SS2)
+ '\x0b' # 0x0B -> LINE TABULATION (VT)
+ '\x0c' # 0x0C -> FORM FEED (FF)
+ '\r' # 0x0D -> CARRIAGE RETURN (CR)
+ '\x0e' # 0x0E -> SHIFT OUT (SO)
+ '\x0f' # 0x0F -> SHIFT IN (SI)
+ '\x10' # 0x10 -> DATALINK ESCAPE (DLE)
+ '\x11' # 0x11 -> DEVICE CONTROL ONE (DC1)
+ '\x12' # 0x12 -> DEVICE CONTROL TWO (DC2)
+ '\x13' # 0x13 -> DEVICE CONTROL THREE (DC3)
+ '\x9d' # 0x14 -> OPERATING SYSTEM COMMAND (OSC)
+ '\x85' # 0x15 -> NEXT LINE (NEL)
+ '\x08' # 0x16 -> BACKSPACE (BS)
+ '\x87' # 0x17 -> END OF SELECTED AREA (ESA)
+ '\x18' # 0x18 -> CANCEL (CAN)
+ '\x19' # 0x19 -> END OF MEDIUM (EM)
+ '\x92' # 0x1A -> PRIVATE USE TWO (PU2)
+ '\x8f' # 0x1B -> SINGLE-SHIFT THREE (SS3)
+ '\x1c' # 0x1C -> FILE SEPARATOR (IS4)
+ '\x1d' # 0x1D -> GROUP SEPARATOR (IS3)
+ '\x1e' # 0x1E -> RECORD SEPARATOR (IS2)
+ '\x1f' # 0x1F -> UNIT SEPARATOR (IS1)
+ '\x80' # 0x20 -> PADDING CHARACTER (PAD)
+ '\x81' # 0x21 -> HIGH OCTET PRESET (HOP)
+ '\x82' # 0x22 -> BREAK PERMITTED HERE (BPH)
+ '\x83' # 0x23 -> NO BREAK HERE (NBH)
+ '\x84' # 0x24 -> INDEX (IND)
+ '\n' # 0x25 -> LINE FEED (LF)
+ '\x17' # 0x26 -> END OF TRANSMISSION BLOCK (ETB)
+ '\x1b' # 0x27 -> ESCAPE (ESC)
+ '\x88' # 0x28 -> CHARACTER TABULATION SET (HTS)
+ '\x89' # 0x29 -> CHARACTER TABULATION WITH JUSTIFICATION (HTJ)
+ '\x8a' # 0x2A -> LINE TABULATION SET (VTS)
+ '\x8b' # 0x2B -> PARTIAL LINE FORWARD (PLD)
+ '\x8c' # 0x2C -> PARTIAL LINE BACKWARD (PLU)
+ '\x05' # 0x2D -> ENQUIRY (ENQ)
+ '\x06' # 0x2E -> ACKNOWLEDGE (ACK)
+ '\x07' # 0x2F -> BELL (BEL)
+ '\x90' # 0x30 -> DEVICE CONTROL STRING (DCS)
+ '\x91' # 0x31 -> PRIVATE USE ONE (PU1)
+ '\x16' # 0x32 -> SYNCHRONOUS IDLE (SYN)
+ '\x93' # 0x33 -> SET TRANSMIT STATE (STS)
+ '\x94' # 0x34 -> CANCEL CHARACTER (CCH)
+ '\x95' # 0x35 -> MESSAGE WAITING (MW)
+ '\x96' # 0x36 -> START OF GUARDED AREA (SPA)
+ '\x04' # 0x37 -> END OF TRANSMISSION (EOT)
+ '\x98' # 0x38 -> START OF STRING (SOS)
+ '\x99' # 0x39 -> SINGLE GRAPHIC CHARACTER INTRODUCER (SGCI)
+ '\x9a' # 0x3A -> SINGLE CHARACTER INTRODUCER (SCI)
+ '\x9b' # 0x3B -> CONTROL SEQUENCE INTRODUCER (CSI)
+ '\x14' # 0x3C -> DEVICE CONTROL FOUR (DC4)
+ '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE (NAK)
+ '\x9e' # 0x3E -> PRIVACY MESSAGE (PM)
+ '\x1a' # 0x3F -> SUBSTITUTE (SUB)
+ ' ' # 0x40 -> SPACE
+ '\xa0' # 0x41 -> NO-BREAK SPACE
+ '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '{' # 0x43 -> LEFT CURLY BRACKET
+ '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
+ '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
+ '\xc4' # 0x4A -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '.' # 0x4B -> FULL STOP
+ '<' # 0x4C -> LESS-THAN SIGN
+ '(' # 0x4D -> LEFT PARENTHESIS
+ '+' # 0x4E -> PLUS SIGN
+ '!' # 0x4F -> EXCLAMATION MARK
+ '&' # 0x50 -> AMPERSAND
+ '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
+ '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
+ '~' # 0x59 -> TILDE
+ '\xdc' # 0x5A -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '$' # 0x5B -> DOLLAR SIGN
+ '*' # 0x5C -> ASTERISK
+ ')' # 0x5D -> RIGHT PARENTHESIS
+ ';' # 0x5E -> SEMICOLON
+ '^' # 0x5F -> CIRCUMFLEX ACCENT
+ '-' # 0x60 -> HYPHEN-MINUS
+ '/' # 0x61 -> SOLIDUS
+ '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '[' # 0x63 -> LEFT SQUARE BRACKET
+ '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
+ '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xf6' # 0x6A -> LATIN SMALL LETTER O WITH DIAERESIS
+ ',' # 0x6B -> COMMA
+ '%' # 0x6C -> PERCENT SIGN
+ '_' # 0x6D -> LOW LINE
+ '>' # 0x6E -> GREATER-THAN SIGN
+ '?' # 0x6F -> QUESTION MARK
+ '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
+ '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
+ '`' # 0x79 -> GRAVE ACCENT
+ ':' # 0x7A -> COLON
+ '#' # 0x7B -> NUMBER SIGN
+ '\xa7' # 0x7C -> SECTION SIGN
+ "'" # 0x7D -> APOSTROPHE
+ '=' # 0x7E -> EQUALS SIGN
+ '"' # 0x7F -> QUOTATION MARK
+ '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
+ 'a' # 0x81 -> LATIN SMALL LETTER A
+ 'b' # 0x82 -> LATIN SMALL LETTER B
+ 'c' # 0x83 -> LATIN SMALL LETTER C
+ 'd' # 0x84 -> LATIN SMALL LETTER D
+ 'e' # 0x85 -> LATIN SMALL LETTER E
+ 'f' # 0x86 -> LATIN SMALL LETTER F
+ 'g' # 0x87 -> LATIN SMALL LETTER G
+ 'h' # 0x88 -> LATIN SMALL LETTER H
+ 'i' # 0x89 -> LATIN SMALL LETTER I
+ '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (Icelandic)
+ '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
+ '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (Icelandic)
+ '\xb1' # 0x8F -> PLUS-MINUS SIGN
+ '\xb0' # 0x90 -> DEGREE SIGN
+ 'j' # 0x91 -> LATIN SMALL LETTER J
+ 'k' # 0x92 -> LATIN SMALL LETTER K
+ 'l' # 0x93 -> LATIN SMALL LETTER L
+ 'm' # 0x94 -> LATIN SMALL LETTER M
+ 'n' # 0x95 -> LATIN SMALL LETTER N
+ 'o' # 0x96 -> LATIN SMALL LETTER O
+ 'p' # 0x97 -> LATIN SMALL LETTER P
+ 'q' # 0x98 -> LATIN SMALL LETTER Q
+ 'r' # 0x99 -> LATIN SMALL LETTER R
+ '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
+ '\xe6' # 0x9C -> LATIN SMALL LETTER AE
+ '\xb8' # 0x9D -> CEDILLA
+ '\xc6' # 0x9E -> LATIN CAPITAL LETTER AE
+ '\xa4' # 0x9F -> CURRENCY SIGN
+ '\xb5' # 0xA0 -> MICRO SIGN
+ '\xdf' # 0xA1 -> LATIN SMALL LETTER SHARP S (German)
+ 's' # 0xA2 -> LATIN SMALL LETTER S
+ 't' # 0xA3 -> LATIN SMALL LETTER T
+ 'u' # 0xA4 -> LATIN SMALL LETTER U
+ 'v' # 0xA5 -> LATIN SMALL LETTER V
+ 'w' # 0xA6 -> LATIN SMALL LETTER W
+ 'x' # 0xA7 -> LATIN SMALL LETTER X
+ 'y' # 0xA8 -> LATIN SMALL LETTER Y
+ 'z' # 0xA9 -> LATIN SMALL LETTER Z
+ '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
+ '\xbf' # 0xAB -> INVERTED QUESTION MARK
+ '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (Icelandic)
+ '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
+ '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (Icelandic)
+ '\xae' # 0xAF -> REGISTERED SIGN
+ '\xa2' # 0xB0 -> CENT SIGN
+ '\xa3' # 0xB1 -> POUND SIGN
+ '\xa5' # 0xB2 -> YEN SIGN
+ '\xb7' # 0xB3 -> MIDDLE DOT
+ '\xa9' # 0xB4 -> COPYRIGHT SIGN
+ '@' # 0xB5 -> COMMERCIAL AT
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
+ '\xac' # 0xBA -> NOT SIGN
+ '|' # 0xBB -> VERTICAL LINE
+ '\u203e' # 0xBC -> OVERLINE
+ '\xa8' # 0xBD -> DIAERESIS
+ '\xb4' # 0xBE -> ACUTE ACCENT
+ '\xd7' # 0xBF -> MULTIPLICATION SIGN
+ '\xe4' # 0xC0 -> LATIN SMALL LETTER A WITH DIAERESIS
+ 'A' # 0xC1 -> LATIN CAPITAL LETTER A
+ 'B' # 0xC2 -> LATIN CAPITAL LETTER B
+ 'C' # 0xC3 -> LATIN CAPITAL LETTER C
+ 'D' # 0xC4 -> LATIN CAPITAL LETTER D
+ 'E' # 0xC5 -> LATIN CAPITAL LETTER E
+ 'F' # 0xC6 -> LATIN CAPITAL LETTER F
+ 'G' # 0xC7 -> LATIN CAPITAL LETTER G
+ 'H' # 0xC8 -> LATIN CAPITAL LETTER H
+ 'I' # 0xC9 -> LATIN CAPITAL LETTER I
+ '\xad' # 0xCA -> SOFT HYPHEN
+ '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xa6' # 0xCC -> BROKEN BAR
+ '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
+ '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
+ '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
+ '\xfc' # 0xD0 -> LATIN SMALL LETTER U WITH DIAERESIS
+ 'J' # 0xD1 -> LATIN CAPITAL LETTER J
+ 'K' # 0xD2 -> LATIN CAPITAL LETTER K
+ 'L' # 0xD3 -> LATIN CAPITAL LETTER L
+ 'M' # 0xD4 -> LATIN CAPITAL LETTER M
+ 'N' # 0xD5 -> LATIN CAPITAL LETTER N
+ 'O' # 0xD6 -> LATIN CAPITAL LETTER O
+ 'P' # 0xD7 -> LATIN CAPITAL LETTER P
+ 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
+ 'R' # 0xD9 -> LATIN CAPITAL LETTER R
+ '\xb9' # 0xDA -> SUPERSCRIPT ONE
+ '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '}' # 0xDC -> RIGHT CURLY BRACKET
+ '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
+ '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
+ '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
+ '\xd6' # 0xE0 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xf7' # 0xE1 -> DIVISION SIGN
+ 'S' # 0xE2 -> LATIN CAPITAL LETTER S
+ 'T' # 0xE3 -> LATIN CAPITAL LETTER T
+ 'U' # 0xE4 -> LATIN CAPITAL LETTER U
+ 'V' # 0xE5 -> LATIN CAPITAL LETTER V
+ 'W' # 0xE6 -> LATIN CAPITAL LETTER W
+ 'X' # 0xE7 -> LATIN CAPITAL LETTER X
+ 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
+ '\xb2' # 0xEA -> SUPERSCRIPT TWO
+ '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\\' # 0xEC -> REVERSE SOLIDUS
+ '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
+ '0' # 0xF0 -> DIGIT ZERO
+ '1' # 0xF1 -> DIGIT ONE
+ '2' # 0xF2 -> DIGIT TWO
+ '3' # 0xF3 -> DIGIT THREE
+ '4' # 0xF4 -> DIGIT FOUR
+ '5' # 0xF5 -> DIGIT FIVE
+ '6' # 0xF6 -> DIGIT SIX
+ '7' # 0xF7 -> DIGIT SEVEN
+ '8' # 0xF8 -> DIGIT EIGHT
+ '9' # 0xF9 -> DIGIT NINE
+ '\xb3' # 0xFA -> SUPERSCRIPT THREE
+ '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ ']' # 0xFC -> RIGHT SQUARE BRACKET
+ '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\x9f' # 0xFF -> APPLICATION PROGRAM COMMAND (APC)
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp424.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp424.py
new file mode 100644
index 00000000..6753daf1
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp424.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp424 generated from 'MAPPINGS/VENDORS/MISC/CP424.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp424',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x9c' # 0x04 -> SELECT
+ '\t' # 0x05 -> HORIZONTAL TABULATION
+ '\x86' # 0x06 -> REQUIRED NEW LINE
+ '\x7f' # 0x07 -> DELETE
+ '\x97' # 0x08 -> GRAPHIC ESCAPE
+ '\x8d' # 0x09 -> SUPERSCRIPT
+ '\x8e' # 0x0A -> REPEAT
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x9d' # 0x14 -> RESTORE/ENABLE PRESENTATION
+ '\x85' # 0x15 -> NEW LINE
+ '\x08' # 0x16 -> BACKSPACE
+ '\x87' # 0x17 -> PROGRAM OPERATOR COMMUNICATION
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x92' # 0x1A -> UNIT BACK SPACE
+ '\x8f' # 0x1B -> CUSTOMER USE ONE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ '\x80' # 0x20 -> DIGIT SELECT
+ '\x81' # 0x21 -> START OF SIGNIFICANCE
+ '\x82' # 0x22 -> FIELD SEPARATOR
+ '\x83' # 0x23 -> WORD UNDERSCORE
+ '\x84' # 0x24 -> BYPASS OR INHIBIT PRESENTATION
+ '\n' # 0x25 -> LINE FEED
+ '\x17' # 0x26 -> END OF TRANSMISSION BLOCK
+ '\x1b' # 0x27 -> ESCAPE
+ '\x88' # 0x28 -> SET ATTRIBUTE
+ '\x89' # 0x29 -> START FIELD EXTENDED
+ '\x8a' # 0x2A -> SET MODE OR SWITCH
+ '\x8b' # 0x2B -> CONTROL SEQUENCE PREFIX
+ '\x8c' # 0x2C -> MODIFY FIELD ATTRIBUTE
+ '\x05' # 0x2D -> ENQUIRY
+ '\x06' # 0x2E -> ACKNOWLEDGE
+ '\x07' # 0x2F -> BELL
+ '\x90' # 0x30 ->
+ '\x91' # 0x31 ->
+ '\x16' # 0x32 -> SYNCHRONOUS IDLE
+ '\x93' # 0x33 -> INDEX RETURN
+ '\x94' # 0x34 -> PRESENTATION POSITION
+ '\x95' # 0x35 -> TRANSPARENT
+ '\x96' # 0x36 -> NUMERIC BACKSPACE
+ '\x04' # 0x37 -> END OF TRANSMISSION
+ '\x98' # 0x38 -> SUBSCRIPT
+ '\x99' # 0x39 -> INDENT TABULATION
+ '\x9a' # 0x3A -> REVERSE FORM FEED
+ '\x9b' # 0x3B -> CUSTOMER USE THREE
+ '\x14' # 0x3C -> DEVICE CONTROL FOUR
+ '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
+ '\x9e' # 0x3E ->
+ '\x1a' # 0x3F -> SUBSTITUTE
+ ' ' # 0x40 -> SPACE
+ '\u05d0' # 0x41 -> HEBREW LETTER ALEF
+ '\u05d1' # 0x42 -> HEBREW LETTER BET
+ '\u05d2' # 0x43 -> HEBREW LETTER GIMEL
+ '\u05d3' # 0x44 -> HEBREW LETTER DALET
+ '\u05d4' # 0x45 -> HEBREW LETTER HE
+ '\u05d5' # 0x46 -> HEBREW LETTER VAV
+ '\u05d6' # 0x47 -> HEBREW LETTER ZAYIN
+ '\u05d7' # 0x48 -> HEBREW LETTER HET
+ '\u05d8' # 0x49 -> HEBREW LETTER TET
+ '\xa2' # 0x4A -> CENT SIGN
+ '.' # 0x4B -> FULL STOP
+ '<' # 0x4C -> LESS-THAN SIGN
+ '(' # 0x4D -> LEFT PARENTHESIS
+ '+' # 0x4E -> PLUS SIGN
+ '|' # 0x4F -> VERTICAL LINE
+ '&' # 0x50 -> AMPERSAND
+ '\u05d9' # 0x51 -> HEBREW LETTER YOD
+ '\u05da' # 0x52 -> HEBREW LETTER FINAL KAF
+ '\u05db' # 0x53 -> HEBREW LETTER KAF
+ '\u05dc' # 0x54 -> HEBREW LETTER LAMED
+ '\u05dd' # 0x55 -> HEBREW LETTER FINAL MEM
+ '\u05de' # 0x56 -> HEBREW LETTER MEM
+ '\u05df' # 0x57 -> HEBREW LETTER FINAL NUN
+ '\u05e0' # 0x58 -> HEBREW LETTER NUN
+ '\u05e1' # 0x59 -> HEBREW LETTER SAMEKH
+ '!' # 0x5A -> EXCLAMATION MARK
+ '$' # 0x5B -> DOLLAR SIGN
+ '*' # 0x5C -> ASTERISK
+ ')' # 0x5D -> RIGHT PARENTHESIS
+ ';' # 0x5E -> SEMICOLON
+ '\xac' # 0x5F -> NOT SIGN
+ '-' # 0x60 -> HYPHEN-MINUS
+ '/' # 0x61 -> SOLIDUS
+ '\u05e2' # 0x62 -> HEBREW LETTER AYIN
+ '\u05e3' # 0x63 -> HEBREW LETTER FINAL PE
+ '\u05e4' # 0x64 -> HEBREW LETTER PE
+ '\u05e5' # 0x65 -> HEBREW LETTER FINAL TSADI
+ '\u05e6' # 0x66 -> HEBREW LETTER TSADI
+ '\u05e7' # 0x67 -> HEBREW LETTER QOF
+ '\u05e8' # 0x68 -> HEBREW LETTER RESH
+ '\u05e9' # 0x69 -> HEBREW LETTER SHIN
+ '\xa6' # 0x6A -> BROKEN BAR
+ ',' # 0x6B -> COMMA
+ '%' # 0x6C -> PERCENT SIGN
+ '_' # 0x6D -> LOW LINE
+ '>' # 0x6E -> GREATER-THAN SIGN
+ '?' # 0x6F -> QUESTION MARK
+ '\ufffe' # 0x70 -> UNDEFINED
+ '\u05ea' # 0x71 -> HEBREW LETTER TAV
+ '\ufffe' # 0x72 -> UNDEFINED
+ '\ufffe' # 0x73 -> UNDEFINED
+ '\xa0' # 0x74 -> NO-BREAK SPACE
+ '\ufffe' # 0x75 -> UNDEFINED
+ '\ufffe' # 0x76 -> UNDEFINED
+ '\ufffe' # 0x77 -> UNDEFINED
+ '\u2017' # 0x78 -> DOUBLE LOW LINE
+ '`' # 0x79 -> GRAVE ACCENT
+ ':' # 0x7A -> COLON
+ '#' # 0x7B -> NUMBER SIGN
+ '@' # 0x7C -> COMMERCIAL AT
+ "'" # 0x7D -> APOSTROPHE
+ '=' # 0x7E -> EQUALS SIGN
+ '"' # 0x7F -> QUOTATION MARK
+ '\ufffe' # 0x80 -> UNDEFINED
+ 'a' # 0x81 -> LATIN SMALL LETTER A
+ 'b' # 0x82 -> LATIN SMALL LETTER B
+ 'c' # 0x83 -> LATIN SMALL LETTER C
+ 'd' # 0x84 -> LATIN SMALL LETTER D
+ 'e' # 0x85 -> LATIN SMALL LETTER E
+ 'f' # 0x86 -> LATIN SMALL LETTER F
+ 'g' # 0x87 -> LATIN SMALL LETTER G
+ 'h' # 0x88 -> LATIN SMALL LETTER H
+ 'i' # 0x89 -> LATIN SMALL LETTER I
+ '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\ufffe' # 0x8C -> UNDEFINED
+ '\ufffe' # 0x8D -> UNDEFINED
+ '\ufffe' # 0x8E -> UNDEFINED
+ '\xb1' # 0x8F -> PLUS-MINUS SIGN
+ '\xb0' # 0x90 -> DEGREE SIGN
+ 'j' # 0x91 -> LATIN SMALL LETTER J
+ 'k' # 0x92 -> LATIN SMALL LETTER K
+ 'l' # 0x93 -> LATIN SMALL LETTER L
+ 'm' # 0x94 -> LATIN SMALL LETTER M
+ 'n' # 0x95 -> LATIN SMALL LETTER N
+ 'o' # 0x96 -> LATIN SMALL LETTER O
+ 'p' # 0x97 -> LATIN SMALL LETTER P
+ 'q' # 0x98 -> LATIN SMALL LETTER Q
+ 'r' # 0x99 -> LATIN SMALL LETTER R
+ '\ufffe' # 0x9A -> UNDEFINED
+ '\ufffe' # 0x9B -> UNDEFINED
+ '\ufffe' # 0x9C -> UNDEFINED
+ '\xb8' # 0x9D -> CEDILLA
+ '\ufffe' # 0x9E -> UNDEFINED
+ '\xa4' # 0x9F -> CURRENCY SIGN
+ '\xb5' # 0xA0 -> MICRO SIGN
+ '~' # 0xA1 -> TILDE
+ 's' # 0xA2 -> LATIN SMALL LETTER S
+ 't' # 0xA3 -> LATIN SMALL LETTER T
+ 'u' # 0xA4 -> LATIN SMALL LETTER U
+ 'v' # 0xA5 -> LATIN SMALL LETTER V
+ 'w' # 0xA6 -> LATIN SMALL LETTER W
+ 'x' # 0xA7 -> LATIN SMALL LETTER X
+ 'y' # 0xA8 -> LATIN SMALL LETTER Y
+ 'z' # 0xA9 -> LATIN SMALL LETTER Z
+ '\ufffe' # 0xAA -> UNDEFINED
+ '\ufffe' # 0xAB -> UNDEFINED
+ '\ufffe' # 0xAC -> UNDEFINED
+ '\ufffe' # 0xAD -> UNDEFINED
+ '\ufffe' # 0xAE -> UNDEFINED
+ '\xae' # 0xAF -> REGISTERED SIGN
+ '^' # 0xB0 -> CIRCUMFLEX ACCENT
+ '\xa3' # 0xB1 -> POUND SIGN
+ '\xa5' # 0xB2 -> YEN SIGN
+ '\xb7' # 0xB3 -> MIDDLE DOT
+ '\xa9' # 0xB4 -> COPYRIGHT SIGN
+ '\xa7' # 0xB5 -> SECTION SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
+ '[' # 0xBA -> LEFT SQUARE BRACKET
+ ']' # 0xBB -> RIGHT SQUARE BRACKET
+ '\xaf' # 0xBC -> MACRON
+ '\xa8' # 0xBD -> DIAERESIS
+ '\xb4' # 0xBE -> ACUTE ACCENT
+ '\xd7' # 0xBF -> MULTIPLICATION SIGN
+ '{' # 0xC0 -> LEFT CURLY BRACKET
+ 'A' # 0xC1 -> LATIN CAPITAL LETTER A
+ 'B' # 0xC2 -> LATIN CAPITAL LETTER B
+ 'C' # 0xC3 -> LATIN CAPITAL LETTER C
+ 'D' # 0xC4 -> LATIN CAPITAL LETTER D
+ 'E' # 0xC5 -> LATIN CAPITAL LETTER E
+ 'F' # 0xC6 -> LATIN CAPITAL LETTER F
+ 'G' # 0xC7 -> LATIN CAPITAL LETTER G
+ 'H' # 0xC8 -> LATIN CAPITAL LETTER H
+ 'I' # 0xC9 -> LATIN CAPITAL LETTER I
+ '\xad' # 0xCA -> SOFT HYPHEN
+ '\ufffe' # 0xCB -> UNDEFINED
+ '\ufffe' # 0xCC -> UNDEFINED
+ '\ufffe' # 0xCD -> UNDEFINED
+ '\ufffe' # 0xCE -> UNDEFINED
+ '\ufffe' # 0xCF -> UNDEFINED
+ '}' # 0xD0 -> RIGHT CURLY BRACKET
+ 'J' # 0xD1 -> LATIN CAPITAL LETTER J
+ 'K' # 0xD2 -> LATIN CAPITAL LETTER K
+ 'L' # 0xD3 -> LATIN CAPITAL LETTER L
+ 'M' # 0xD4 -> LATIN CAPITAL LETTER M
+ 'N' # 0xD5 -> LATIN CAPITAL LETTER N
+ 'O' # 0xD6 -> LATIN CAPITAL LETTER O
+ 'P' # 0xD7 -> LATIN CAPITAL LETTER P
+ 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
+ 'R' # 0xD9 -> LATIN CAPITAL LETTER R
+ '\xb9' # 0xDA -> SUPERSCRIPT ONE
+ '\ufffe' # 0xDB -> UNDEFINED
+ '\ufffe' # 0xDC -> UNDEFINED
+ '\ufffe' # 0xDD -> UNDEFINED
+ '\ufffe' # 0xDE -> UNDEFINED
+ '\ufffe' # 0xDF -> UNDEFINED
+ '\\' # 0xE0 -> REVERSE SOLIDUS
+ '\xf7' # 0xE1 -> DIVISION SIGN
+ 'S' # 0xE2 -> LATIN CAPITAL LETTER S
+ 'T' # 0xE3 -> LATIN CAPITAL LETTER T
+ 'U' # 0xE4 -> LATIN CAPITAL LETTER U
+ 'V' # 0xE5 -> LATIN CAPITAL LETTER V
+ 'W' # 0xE6 -> LATIN CAPITAL LETTER W
+ 'X' # 0xE7 -> LATIN CAPITAL LETTER X
+ 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
+ '\xb2' # 0xEA -> SUPERSCRIPT TWO
+ '\ufffe' # 0xEB -> UNDEFINED
+ '\ufffe' # 0xEC -> UNDEFINED
+ '\ufffe' # 0xED -> UNDEFINED
+ '\ufffe' # 0xEE -> UNDEFINED
+ '\ufffe' # 0xEF -> UNDEFINED
+ '0' # 0xF0 -> DIGIT ZERO
+ '1' # 0xF1 -> DIGIT ONE
+ '2' # 0xF2 -> DIGIT TWO
+ '3' # 0xF3 -> DIGIT THREE
+ '4' # 0xF4 -> DIGIT FOUR
+ '5' # 0xF5 -> DIGIT FIVE
+ '6' # 0xF6 -> DIGIT SIX
+ '7' # 0xF7 -> DIGIT SEVEN
+ '8' # 0xF8 -> DIGIT EIGHT
+ '9' # 0xF9 -> DIGIT NINE
+ '\xb3' # 0xFA -> SUPERSCRIPT THREE
+ '\ufffe' # 0xFB -> UNDEFINED
+ '\ufffe' # 0xFC -> UNDEFINED
+ '\ufffe' # 0xFD -> UNDEFINED
+ '\ufffe' # 0xFE -> UNDEFINED
+ '\x9f' # 0xFF -> EIGHT ONES
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp437.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp437.py
new file mode 100644
index 00000000..b6c75e2c
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp437.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp437',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
+ 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
+ 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
+ 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
+ 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
+ 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
+ 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
+ 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
+ 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
+ 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x009b: 0x00a2, # CENT SIGN
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x00a5, # YEN SIGN
+ 0x009e: 0x20a7, # PESETA SIGN
+ 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
+ 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
+ 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
+ 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
+ 0x00a8: 0x00bf, # INVERTED QUESTION MARK
+ 0x00a9: 0x2310, # REVERSED NOT SIGN
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
+ 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x258c, # LEFT HALF BLOCK
+ 0x00de: 0x2590, # RIGHT HALF BLOCK
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
+ 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
+ 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
+ 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
+ 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
+ 0x00e6: 0x00b5, # MICRO SIGN
+ 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
+ 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
+ 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
+ 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
+ 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
+ 0x00ec: 0x221e, # INFINITY
+ 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
+ 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
+ 0x00ef: 0x2229, # INTERSECTION
+ 0x00f0: 0x2261, # IDENTICAL TO
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
+ 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
+ 0x00f4: 0x2320, # TOP HALF INTEGRAL
+ 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x2248, # ALMOST EQUAL TO
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x2219, # BULLET OPERATOR
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x221a, # SQUARE ROOT
+ 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
+ '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
+ '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
+ '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
+ '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
+ '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
+ '\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
+ '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xa2' # 0x009b -> CENT SIGN
+ '\xa3' # 0x009c -> POUND SIGN
+ '\xa5' # 0x009d -> YEN SIGN
+ '\u20a7' # 0x009e -> PESETA SIGN
+ '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
+ '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
+ '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
+ '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
+ '\xbf' # 0x00a8 -> INVERTED QUESTION MARK
+ '\u2310' # 0x00a9 -> REVERSED NOT SIGN
+ '\xac' # 0x00aa -> NOT SIGN
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
+ '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u258c' # 0x00dd -> LEFT HALF BLOCK
+ '\u2590' # 0x00de -> RIGHT HALF BLOCK
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
+ '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
+ '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
+ '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
+ '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
+ '\xb5' # 0x00e6 -> MICRO SIGN
+ '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
+ '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
+ '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
+ '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
+ '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
+ '\u221e' # 0x00ec -> INFINITY
+ '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
+ '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
+ '\u2229' # 0x00ef -> INTERSECTION
+ '\u2261' # 0x00f0 -> IDENTICAL TO
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
+ '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
+ '\u2320' # 0x00f4 -> TOP HALF INTEGRAL
+ '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\u2248' # 0x00f7 -> ALMOST EQUAL TO
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\u2219' # 0x00f9 -> BULLET OPERATOR
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\u221a' # 0x00fb -> SQUARE ROOT
+ '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
+ 0x00a2: 0x009b, # CENT SIGN
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00a5: 0x009d, # YEN SIGN
+ 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b5: 0x00e6, # MICRO SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x00bf: 0x00a8, # INVERTED QUESTION MARK
+ 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
+ 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
+ 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
+ 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
+ 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
+ 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
+ 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
+ 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
+ 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
+ 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
+ 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
+ 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
+ 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
+ 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
+ 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
+ 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
+ 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
+ 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
+ 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
+ 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
+ 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
+ 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
+ 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
+ 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x20a7: 0x009e, # PESETA SIGN
+ 0x2219: 0x00f9, # BULLET OPERATOR
+ 0x221a: 0x00fb, # SQUARE ROOT
+ 0x221e: 0x00ec, # INFINITY
+ 0x2229: 0x00ef, # INTERSECTION
+ 0x2248: 0x00f7, # ALMOST EQUAL TO
+ 0x2261: 0x00f0, # IDENTICAL TO
+ 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
+ 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
+ 0x2310: 0x00a9, # REVERSED NOT SIGN
+ 0x2320: 0x00f4, # TOP HALF INTEGRAL
+ 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x258c: 0x00dd, # LEFT HALF BLOCK
+ 0x2590: 0x00de, # RIGHT HALF BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp500.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp500.py
new file mode 100644
index 00000000..5f61535f
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp500.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp500',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x9c' # 0x04 -> CONTROL
+ '\t' # 0x05 -> HORIZONTAL TABULATION
+ '\x86' # 0x06 -> CONTROL
+ '\x7f' # 0x07 -> DELETE
+ '\x97' # 0x08 -> CONTROL
+ '\x8d' # 0x09 -> CONTROL
+ '\x8e' # 0x0A -> CONTROL
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x9d' # 0x14 -> CONTROL
+ '\x85' # 0x15 -> CONTROL
+ '\x08' # 0x16 -> BACKSPACE
+ '\x87' # 0x17 -> CONTROL
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x92' # 0x1A -> CONTROL
+ '\x8f' # 0x1B -> CONTROL
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ '\x80' # 0x20 -> CONTROL
+ '\x81' # 0x21 -> CONTROL
+ '\x82' # 0x22 -> CONTROL
+ '\x83' # 0x23 -> CONTROL
+ '\x84' # 0x24 -> CONTROL
+ '\n' # 0x25 -> LINE FEED
+ '\x17' # 0x26 -> END OF TRANSMISSION BLOCK
+ '\x1b' # 0x27 -> ESCAPE
+ '\x88' # 0x28 -> CONTROL
+ '\x89' # 0x29 -> CONTROL
+ '\x8a' # 0x2A -> CONTROL
+ '\x8b' # 0x2B -> CONTROL
+ '\x8c' # 0x2C -> CONTROL
+ '\x05' # 0x2D -> ENQUIRY
+ '\x06' # 0x2E -> ACKNOWLEDGE
+ '\x07' # 0x2F -> BELL
+ '\x90' # 0x30 -> CONTROL
+ '\x91' # 0x31 -> CONTROL
+ '\x16' # 0x32 -> SYNCHRONOUS IDLE
+ '\x93' # 0x33 -> CONTROL
+ '\x94' # 0x34 -> CONTROL
+ '\x95' # 0x35 -> CONTROL
+ '\x96' # 0x36 -> CONTROL
+ '\x04' # 0x37 -> END OF TRANSMISSION
+ '\x98' # 0x38 -> CONTROL
+ '\x99' # 0x39 -> CONTROL
+ '\x9a' # 0x3A -> CONTROL
+ '\x9b' # 0x3B -> CONTROL
+ '\x14' # 0x3C -> DEVICE CONTROL FOUR
+ '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
+ '\x9e' # 0x3E -> CONTROL
+ '\x1a' # 0x3F -> SUBSTITUTE
+ ' ' # 0x40 -> SPACE
+ '\xa0' # 0x41 -> NO-BREAK SPACE
+ '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
+ '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
+ '[' # 0x4A -> LEFT SQUARE BRACKET
+ '.' # 0x4B -> FULL STOP
+ '<' # 0x4C -> LESS-THAN SIGN
+ '(' # 0x4D -> LEFT PARENTHESIS
+ '+' # 0x4E -> PLUS SIGN
+ '!' # 0x4F -> EXCLAMATION MARK
+ '&' # 0x50 -> AMPERSAND
+ '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
+ '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
+ '\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
+ ']' # 0x5A -> RIGHT SQUARE BRACKET
+ '$' # 0x5B -> DOLLAR SIGN
+ '*' # 0x5C -> ASTERISK
+ ')' # 0x5D -> RIGHT PARENTHESIS
+ ';' # 0x5E -> SEMICOLON
+ '^' # 0x5F -> CIRCUMFLEX ACCENT
+ '-' # 0x60 -> HYPHEN-MINUS
+ '/' # 0x61 -> SOLIDUS
+ '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
+ '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xa6' # 0x6A -> BROKEN BAR
+ ',' # 0x6B -> COMMA
+ '%' # 0x6C -> PERCENT SIGN
+ '_' # 0x6D -> LOW LINE
+ '>' # 0x6E -> GREATER-THAN SIGN
+ '?' # 0x6F -> QUESTION MARK
+ '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
+ '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
+ '`' # 0x79 -> GRAVE ACCENT
+ ':' # 0x7A -> COLON
+ '#' # 0x7B -> NUMBER SIGN
+ '@' # 0x7C -> COMMERCIAL AT
+ "'" # 0x7D -> APOSTROPHE
+ '=' # 0x7E -> EQUALS SIGN
+ '"' # 0x7F -> QUOTATION MARK
+ '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
+ 'a' # 0x81 -> LATIN SMALL LETTER A
+ 'b' # 0x82 -> LATIN SMALL LETTER B
+ 'c' # 0x83 -> LATIN SMALL LETTER C
+ 'd' # 0x84 -> LATIN SMALL LETTER D
+ 'e' # 0x85 -> LATIN SMALL LETTER E
+ 'f' # 0x86 -> LATIN SMALL LETTER F
+ 'g' # 0x87 -> LATIN SMALL LETTER G
+ 'h' # 0x88 -> LATIN SMALL LETTER H
+ 'i' # 0x89 -> LATIN SMALL LETTER I
+ '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
+ '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
+ '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
+ '\xb1' # 0x8F -> PLUS-MINUS SIGN
+ '\xb0' # 0x90 -> DEGREE SIGN
+ 'j' # 0x91 -> LATIN SMALL LETTER J
+ 'k' # 0x92 -> LATIN SMALL LETTER K
+ 'l' # 0x93 -> LATIN SMALL LETTER L
+ 'm' # 0x94 -> LATIN SMALL LETTER M
+ 'n' # 0x95 -> LATIN SMALL LETTER N
+ 'o' # 0x96 -> LATIN SMALL LETTER O
+ 'p' # 0x97 -> LATIN SMALL LETTER P
+ 'q' # 0x98 -> LATIN SMALL LETTER Q
+ 'r' # 0x99 -> LATIN SMALL LETTER R
+ '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
+ '\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
+ '\xb8' # 0x9D -> CEDILLA
+ '\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
+ '\xa4' # 0x9F -> CURRENCY SIGN
+ '\xb5' # 0xA0 -> MICRO SIGN
+ '~' # 0xA1 -> TILDE
+ 's' # 0xA2 -> LATIN SMALL LETTER S
+ 't' # 0xA3 -> LATIN SMALL LETTER T
+ 'u' # 0xA4 -> LATIN SMALL LETTER U
+ 'v' # 0xA5 -> LATIN SMALL LETTER V
+ 'w' # 0xA6 -> LATIN SMALL LETTER W
+ 'x' # 0xA7 -> LATIN SMALL LETTER X
+ 'y' # 0xA8 -> LATIN SMALL LETTER Y
+ 'z' # 0xA9 -> LATIN SMALL LETTER Z
+ '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
+ '\xbf' # 0xAB -> INVERTED QUESTION MARK
+ '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
+ '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
+ '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
+ '\xae' # 0xAF -> REGISTERED SIGN
+ '\xa2' # 0xB0 -> CENT SIGN
+ '\xa3' # 0xB1 -> POUND SIGN
+ '\xa5' # 0xB2 -> YEN SIGN
+ '\xb7' # 0xB3 -> MIDDLE DOT
+ '\xa9' # 0xB4 -> COPYRIGHT SIGN
+ '\xa7' # 0xB5 -> SECTION SIGN
+ '\xb6' # 0xB6 -> PILCROW SIGN
+ '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
+ '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
+ '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
+ '\xac' # 0xBA -> NOT SIGN
+ '|' # 0xBB -> VERTICAL LINE
+ '\xaf' # 0xBC -> MACRON
+ '\xa8' # 0xBD -> DIAERESIS
+ '\xb4' # 0xBE -> ACUTE ACCENT
+ '\xd7' # 0xBF -> MULTIPLICATION SIGN
+ '{' # 0xC0 -> LEFT CURLY BRACKET
+ 'A' # 0xC1 -> LATIN CAPITAL LETTER A
+ 'B' # 0xC2 -> LATIN CAPITAL LETTER B
+ 'C' # 0xC3 -> LATIN CAPITAL LETTER C
+ 'D' # 0xC4 -> LATIN CAPITAL LETTER D
+ 'E' # 0xC5 -> LATIN CAPITAL LETTER E
+ 'F' # 0xC6 -> LATIN CAPITAL LETTER F
+ 'G' # 0xC7 -> LATIN CAPITAL LETTER G
+ 'H' # 0xC8 -> LATIN CAPITAL LETTER H
+ 'I' # 0xC9 -> LATIN CAPITAL LETTER I
+ '\xad' # 0xCA -> SOFT HYPHEN
+ '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
+ '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
+ '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
+ '}' # 0xD0 -> RIGHT CURLY BRACKET
+ 'J' # 0xD1 -> LATIN CAPITAL LETTER J
+ 'K' # 0xD2 -> LATIN CAPITAL LETTER K
+ 'L' # 0xD3 -> LATIN CAPITAL LETTER L
+ 'M' # 0xD4 -> LATIN CAPITAL LETTER M
+ 'N' # 0xD5 -> LATIN CAPITAL LETTER N
+ 'O' # 0xD6 -> LATIN CAPITAL LETTER O
+ 'P' # 0xD7 -> LATIN CAPITAL LETTER P
+ 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
+ 'R' # 0xD9 -> LATIN CAPITAL LETTER R
+ '\xb9' # 0xDA -> SUPERSCRIPT ONE
+ '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
+ '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
+ '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
+ '\\' # 0xE0 -> REVERSE SOLIDUS
+ '\xf7' # 0xE1 -> DIVISION SIGN
+ 'S' # 0xE2 -> LATIN CAPITAL LETTER S
+ 'T' # 0xE3 -> LATIN CAPITAL LETTER T
+ 'U' # 0xE4 -> LATIN CAPITAL LETTER U
+ 'V' # 0xE5 -> LATIN CAPITAL LETTER V
+ 'W' # 0xE6 -> LATIN CAPITAL LETTER W
+ 'X' # 0xE7 -> LATIN CAPITAL LETTER X
+ 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
+ '\xb2' # 0xEA -> SUPERSCRIPT TWO
+ '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
+ '0' # 0xF0 -> DIGIT ZERO
+ '1' # 0xF1 -> DIGIT ONE
+ '2' # 0xF2 -> DIGIT TWO
+ '3' # 0xF3 -> DIGIT THREE
+ '4' # 0xF4 -> DIGIT FOUR
+ '5' # 0xF5 -> DIGIT FIVE
+ '6' # 0xF6 -> DIGIT SIX
+ '7' # 0xF7 -> DIGIT SEVEN
+ '8' # 0xF8 -> DIGIT EIGHT
+ '9' # 0xF9 -> DIGIT NINE
+ '\xb3' # 0xFA -> SUPERSCRIPT THREE
+ '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\x9f' # 0xFF -> CONTROL
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp65001.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp65001.py
new file mode 100644
index 00000000..95cb2aec
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp65001.py
@@ -0,0 +1,43 @@
+"""
+Code page 65001: Windows UTF-8 (CP_UTF8).
+"""
+
+import codecs
+import functools
+
+if not hasattr(codecs, 'code_page_encode'):
+ raise LookupError("cp65001 encoding is only available on Windows")
+
+### Codec APIs
+
+encode = functools.partial(codecs.code_page_encode, 65001)
+_decode = functools.partial(codecs.code_page_decode, 65001)
+
+def decode(input, errors='strict'):
+ return codecs.code_page_decode(65001, input, errors, True)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return encode(input, self.errors)[0]
+
+class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
+ _buffer_decode = _decode
+
+class StreamWriter(codecs.StreamWriter):
+ encode = encode
+
+class StreamReader(codecs.StreamReader):
+ decode = _decode
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp65001',
+ encode=encode,
+ decode=decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp720.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp720.py
new file mode 100644
index 00000000..96d60961
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp720.py
@@ -0,0 +1,309 @@
+"""Python Character Mapping Codec cp720 generated on Windows:
+Vista 6.0.6002 SP2 Multiprocessor Free with the command:
+ python Tools/unicode/genwincodec.py 720
+"""#"
+
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp720',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> CONTROL CHARACTER
+ '\x01' # 0x01 -> CONTROL CHARACTER
+ '\x02' # 0x02 -> CONTROL CHARACTER
+ '\x03' # 0x03 -> CONTROL CHARACTER
+ '\x04' # 0x04 -> CONTROL CHARACTER
+ '\x05' # 0x05 -> CONTROL CHARACTER
+ '\x06' # 0x06 -> CONTROL CHARACTER
+ '\x07' # 0x07 -> CONTROL CHARACTER
+ '\x08' # 0x08 -> CONTROL CHARACTER
+ '\t' # 0x09 -> CONTROL CHARACTER
+ '\n' # 0x0A -> CONTROL CHARACTER
+ '\x0b' # 0x0B -> CONTROL CHARACTER
+ '\x0c' # 0x0C -> CONTROL CHARACTER
+ '\r' # 0x0D -> CONTROL CHARACTER
+ '\x0e' # 0x0E -> CONTROL CHARACTER
+ '\x0f' # 0x0F -> CONTROL CHARACTER
+ '\x10' # 0x10 -> CONTROL CHARACTER
+ '\x11' # 0x11 -> CONTROL CHARACTER
+ '\x12' # 0x12 -> CONTROL CHARACTER
+ '\x13' # 0x13 -> CONTROL CHARACTER
+ '\x14' # 0x14 -> CONTROL CHARACTER
+ '\x15' # 0x15 -> CONTROL CHARACTER
+ '\x16' # 0x16 -> CONTROL CHARACTER
+ '\x17' # 0x17 -> CONTROL CHARACTER
+ '\x18' # 0x18 -> CONTROL CHARACTER
+ '\x19' # 0x19 -> CONTROL CHARACTER
+ '\x1a' # 0x1A -> CONTROL CHARACTER
+ '\x1b' # 0x1B -> CONTROL CHARACTER
+ '\x1c' # 0x1C -> CONTROL CHARACTER
+ '\x1d' # 0x1D -> CONTROL CHARACTER
+ '\x1e' # 0x1E -> CONTROL CHARACTER
+ '\x1f' # 0x1F -> CONTROL CHARACTER
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> CONTROL CHARACTER
+ '\x80'
+ '\x81'
+ '\xe9' # 0x82 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xe2' # 0x83 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\x84'
+ '\xe0' # 0x85 -> LATIN SMALL LETTER A WITH GRAVE
+ '\x86'
+ '\xe7' # 0x87 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xea' # 0x88 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x89 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x8A -> LATIN SMALL LETTER E WITH GRAVE
+ '\xef' # 0x8B -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xee' # 0x8C -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\x8d'
+ '\x8e'
+ '\x8f'
+ '\x90'
+ '\u0651' # 0x91 -> ARABIC SHADDA
+ '\u0652' # 0x92 -> ARABIC SUKUN
+ '\xf4' # 0x93 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xa4' # 0x94 -> CURRENCY SIGN
+ '\u0640' # 0x95 -> ARABIC TATWEEL
+ '\xfb' # 0x96 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xf9' # 0x97 -> LATIN SMALL LETTER U WITH GRAVE
+ '\u0621' # 0x98 -> ARABIC LETTER HAMZA
+ '\u0622' # 0x99 -> ARABIC LETTER ALEF WITH MADDA ABOVE
+ '\u0623' # 0x9A -> ARABIC LETTER ALEF WITH HAMZA ABOVE
+ '\u0624' # 0x9B -> ARABIC LETTER WAW WITH HAMZA ABOVE
+ '\xa3' # 0x9C -> POUND SIGN
+ '\u0625' # 0x9D -> ARABIC LETTER ALEF WITH HAMZA BELOW
+ '\u0626' # 0x9E -> ARABIC LETTER YEH WITH HAMZA ABOVE
+ '\u0627' # 0x9F -> ARABIC LETTER ALEF
+ '\u0628' # 0xA0 -> ARABIC LETTER BEH
+ '\u0629' # 0xA1 -> ARABIC LETTER TEH MARBUTA
+ '\u062a' # 0xA2 -> ARABIC LETTER TEH
+ '\u062b' # 0xA3 -> ARABIC LETTER THEH
+ '\u062c' # 0xA4 -> ARABIC LETTER JEEM
+ '\u062d' # 0xA5 -> ARABIC LETTER HAH
+ '\u062e' # 0xA6 -> ARABIC LETTER KHAH
+ '\u062f' # 0xA7 -> ARABIC LETTER DAL
+ '\u0630' # 0xA8 -> ARABIC LETTER THAL
+ '\u0631' # 0xA9 -> ARABIC LETTER REH
+ '\u0632' # 0xAA -> ARABIC LETTER ZAIN
+ '\u0633' # 0xAB -> ARABIC LETTER SEEN
+ '\u0634' # 0xAC -> ARABIC LETTER SHEEN
+ '\u0635' # 0xAD -> ARABIC LETTER SAD
+ '\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0xB0 -> LIGHT SHADE
+ '\u2592' # 0xB1 -> MEDIUM SHADE
+ '\u2593' # 0xB2 -> DARK SHADE
+ '\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u2561' # 0xB5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ '\u2562' # 0xB6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ '\u2556' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ '\u2555' # 0xB8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ '\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u255c' # 0xBD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ '\u255b' # 0xBE -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ '\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u255e' # 0xC6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ '\u255f' # 0xC7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ '\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u2567' # 0xCF -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ '\u2568' # 0xD0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ '\u2564' # 0xD1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ '\u2565' # 0xD2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ '\u2559' # 0xD3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ '\u2558' # 0xD4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ '\u2552' # 0xD5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ '\u2553' # 0xD6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ '\u256b' # 0xD7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ '\u256a' # 0xD8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ '\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0xDB -> FULL BLOCK
+ '\u2584' # 0xDC -> LOWER HALF BLOCK
+ '\u258c' # 0xDD -> LEFT HALF BLOCK
+ '\u2590' # 0xDE -> RIGHT HALF BLOCK
+ '\u2580' # 0xDF -> UPPER HALF BLOCK
+ '\u0636' # 0xE0 -> ARABIC LETTER DAD
+ '\u0637' # 0xE1 -> ARABIC LETTER TAH
+ '\u0638' # 0xE2 -> ARABIC LETTER ZAH
+ '\u0639' # 0xE3 -> ARABIC LETTER AIN
+ '\u063a' # 0xE4 -> ARABIC LETTER GHAIN
+ '\u0641' # 0xE5 -> ARABIC LETTER FEH
+ '\xb5' # 0xE6 -> MICRO SIGN
+ '\u0642' # 0xE7 -> ARABIC LETTER QAF
+ '\u0643' # 0xE8 -> ARABIC LETTER KAF
+ '\u0644' # 0xE9 -> ARABIC LETTER LAM
+ '\u0645' # 0xEA -> ARABIC LETTER MEEM
+ '\u0646' # 0xEB -> ARABIC LETTER NOON
+ '\u0647' # 0xEC -> ARABIC LETTER HEH
+ '\u0648' # 0xED -> ARABIC LETTER WAW
+ '\u0649' # 0xEE -> ARABIC LETTER ALEF MAKSURA
+ '\u064a' # 0xEF -> ARABIC LETTER YEH
+ '\u2261' # 0xF0 -> IDENTICAL TO
+ '\u064b' # 0xF1 -> ARABIC FATHATAN
+ '\u064c' # 0xF2 -> ARABIC DAMMATAN
+ '\u064d' # 0xF3 -> ARABIC KASRATAN
+ '\u064e' # 0xF4 -> ARABIC FATHA
+ '\u064f' # 0xF5 -> ARABIC DAMMA
+ '\u0650' # 0xF6 -> ARABIC KASRA
+ '\u2248' # 0xF7 -> ALMOST EQUAL TO
+ '\xb0' # 0xF8 -> DEGREE SIGN
+ '\u2219' # 0xF9 -> BULLET OPERATOR
+ '\xb7' # 0xFA -> MIDDLE DOT
+ '\u221a' # 0xFB -> SQUARE ROOT
+ '\u207f' # 0xFC -> SUPERSCRIPT LATIN SMALL LETTER N
+ '\xb2' # 0xFD -> SUPERSCRIPT TWO
+ '\u25a0' # 0xFE -> BLACK SQUARE
+ '\xa0' # 0xFF -> NO-BREAK SPACE
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp737.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp737.py
new file mode 100644
index 00000000..9685bae7
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp737.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec cp737 generated from 'VENDORS/MICSFT/PC/CP737.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp737',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
+ 0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
+ 0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
+ 0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
+ 0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
+ 0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
+ 0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
+ 0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
+ 0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
+ 0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
+ 0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
+ 0x008b: 0x039c, # GREEK CAPITAL LETTER MU
+ 0x008c: 0x039d, # GREEK CAPITAL LETTER NU
+ 0x008d: 0x039e, # GREEK CAPITAL LETTER XI
+ 0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
+ 0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
+ 0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
+ 0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
+ 0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
+ 0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
+ 0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
+ 0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
+ 0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
+ 0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
+ 0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
+ 0x0099: 0x03b2, # GREEK SMALL LETTER BETA
+ 0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
+ 0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
+ 0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
+ 0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
+ 0x009e: 0x03b7, # GREEK SMALL LETTER ETA
+ 0x009f: 0x03b8, # GREEK SMALL LETTER THETA
+ 0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
+ 0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
+ 0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
+ 0x00a3: 0x03bc, # GREEK SMALL LETTER MU
+ 0x00a4: 0x03bd, # GREEK SMALL LETTER NU
+ 0x00a5: 0x03be, # GREEK SMALL LETTER XI
+ 0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
+ 0x00a7: 0x03c0, # GREEK SMALL LETTER PI
+ 0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
+ 0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
+ 0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
+ 0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
+ 0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
+ 0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
+ 0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
+ 0x00af: 0x03c8, # GREEK SMALL LETTER PSI
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x258c, # LEFT HALF BLOCK
+ 0x00de: 0x2590, # RIGHT HALF BLOCK
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
+ 0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
+ 0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
+ 0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
+ 0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
+ 0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
+ 0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
+ 0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
+ 0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+ 0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
+ 0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
+ 0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
+ 0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
+ 0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
+ 0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
+ 0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
+ 0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
+ 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
+ 0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+ 0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x2248, # ALMOST EQUAL TO
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x2219, # BULLET OPERATOR
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x221a, # SQUARE ROOT
+ 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\u0391' # 0x0080 -> GREEK CAPITAL LETTER ALPHA
+ '\u0392' # 0x0081 -> GREEK CAPITAL LETTER BETA
+ '\u0393' # 0x0082 -> GREEK CAPITAL LETTER GAMMA
+ '\u0394' # 0x0083 -> GREEK CAPITAL LETTER DELTA
+ '\u0395' # 0x0084 -> GREEK CAPITAL LETTER EPSILON
+ '\u0396' # 0x0085 -> GREEK CAPITAL LETTER ZETA
+ '\u0397' # 0x0086 -> GREEK CAPITAL LETTER ETA
+ '\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA
+ '\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA
+ '\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA
+ '\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA
+ '\u039c' # 0x008b -> GREEK CAPITAL LETTER MU
+ '\u039d' # 0x008c -> GREEK CAPITAL LETTER NU
+ '\u039e' # 0x008d -> GREEK CAPITAL LETTER XI
+ '\u039f' # 0x008e -> GREEK CAPITAL LETTER OMICRON
+ '\u03a0' # 0x008f -> GREEK CAPITAL LETTER PI
+ '\u03a1' # 0x0090 -> GREEK CAPITAL LETTER RHO
+ '\u03a3' # 0x0091 -> GREEK CAPITAL LETTER SIGMA
+ '\u03a4' # 0x0092 -> GREEK CAPITAL LETTER TAU
+ '\u03a5' # 0x0093 -> GREEK CAPITAL LETTER UPSILON
+ '\u03a6' # 0x0094 -> GREEK CAPITAL LETTER PHI
+ '\u03a7' # 0x0095 -> GREEK CAPITAL LETTER CHI
+ '\u03a8' # 0x0096 -> GREEK CAPITAL LETTER PSI
+ '\u03a9' # 0x0097 -> GREEK CAPITAL LETTER OMEGA
+ '\u03b1' # 0x0098 -> GREEK SMALL LETTER ALPHA
+ '\u03b2' # 0x0099 -> GREEK SMALL LETTER BETA
+ '\u03b3' # 0x009a -> GREEK SMALL LETTER GAMMA
+ '\u03b4' # 0x009b -> GREEK SMALL LETTER DELTA
+ '\u03b5' # 0x009c -> GREEK SMALL LETTER EPSILON
+ '\u03b6' # 0x009d -> GREEK SMALL LETTER ZETA
+ '\u03b7' # 0x009e -> GREEK SMALL LETTER ETA
+ '\u03b8' # 0x009f -> GREEK SMALL LETTER THETA
+ '\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA
+ '\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA
+ '\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA
+ '\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU
+ '\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU
+ '\u03be' # 0x00a5 -> GREEK SMALL LETTER XI
+ '\u03bf' # 0x00a6 -> GREEK SMALL LETTER OMICRON
+ '\u03c0' # 0x00a7 -> GREEK SMALL LETTER PI
+ '\u03c1' # 0x00a8 -> GREEK SMALL LETTER RHO
+ '\u03c3' # 0x00a9 -> GREEK SMALL LETTER SIGMA
+ '\u03c2' # 0x00aa -> GREEK SMALL LETTER FINAL SIGMA
+ '\u03c4' # 0x00ab -> GREEK SMALL LETTER TAU
+ '\u03c5' # 0x00ac -> GREEK SMALL LETTER UPSILON
+ '\u03c6' # 0x00ad -> GREEK SMALL LETTER PHI
+ '\u03c7' # 0x00ae -> GREEK SMALL LETTER CHI
+ '\u03c8' # 0x00af -> GREEK SMALL LETTER PSI
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u258c' # 0x00dd -> LEFT HALF BLOCK
+ '\u2590' # 0x00de -> RIGHT HALF BLOCK
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u03c9' # 0x00e0 -> GREEK SMALL LETTER OMEGA
+ '\u03ac' # 0x00e1 -> GREEK SMALL LETTER ALPHA WITH TONOS
+ '\u03ad' # 0x00e2 -> GREEK SMALL LETTER EPSILON WITH TONOS
+ '\u03ae' # 0x00e3 -> GREEK SMALL LETTER ETA WITH TONOS
+ '\u03ca' # 0x00e4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
+ '\u03af' # 0x00e5 -> GREEK SMALL LETTER IOTA WITH TONOS
+ '\u03cc' # 0x00e6 -> GREEK SMALL LETTER OMICRON WITH TONOS
+ '\u03cd' # 0x00e7 -> GREEK SMALL LETTER UPSILON WITH TONOS
+ '\u03cb' # 0x00e8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+ '\u03ce' # 0x00e9 -> GREEK SMALL LETTER OMEGA WITH TONOS
+ '\u0386' # 0x00ea -> GREEK CAPITAL LETTER ALPHA WITH TONOS
+ '\u0388' # 0x00eb -> GREEK CAPITAL LETTER EPSILON WITH TONOS
+ '\u0389' # 0x00ec -> GREEK CAPITAL LETTER ETA WITH TONOS
+ '\u038a' # 0x00ed -> GREEK CAPITAL LETTER IOTA WITH TONOS
+ '\u038c' # 0x00ee -> GREEK CAPITAL LETTER OMICRON WITH TONOS
+ '\u038e' # 0x00ef -> GREEK CAPITAL LETTER UPSILON WITH TONOS
+ '\u038f' # 0x00f0 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
+ '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
+ '\u03aa' # 0x00f4 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+ '\u03ab' # 0x00f5 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\u2248' # 0x00f7 -> ALMOST EQUAL TO
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\u2219' # 0x00f9 -> BULLET OPERATOR
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\u221a' # 0x00fb -> SQUARE ROOT
+ '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x0386: 0x00ea, # GREEK CAPITAL LETTER ALPHA WITH TONOS
+ 0x0388: 0x00eb, # GREEK CAPITAL LETTER EPSILON WITH TONOS
+ 0x0389: 0x00ec, # GREEK CAPITAL LETTER ETA WITH TONOS
+ 0x038a: 0x00ed, # GREEK CAPITAL LETTER IOTA WITH TONOS
+ 0x038c: 0x00ee, # GREEK CAPITAL LETTER OMICRON WITH TONOS
+ 0x038e: 0x00ef, # GREEK CAPITAL LETTER UPSILON WITH TONOS
+ 0x038f: 0x00f0, # GREEK CAPITAL LETTER OMEGA WITH TONOS
+ 0x0391: 0x0080, # GREEK CAPITAL LETTER ALPHA
+ 0x0392: 0x0081, # GREEK CAPITAL LETTER BETA
+ 0x0393: 0x0082, # GREEK CAPITAL LETTER GAMMA
+ 0x0394: 0x0083, # GREEK CAPITAL LETTER DELTA
+ 0x0395: 0x0084, # GREEK CAPITAL LETTER EPSILON
+ 0x0396: 0x0085, # GREEK CAPITAL LETTER ZETA
+ 0x0397: 0x0086, # GREEK CAPITAL LETTER ETA
+ 0x0398: 0x0087, # GREEK CAPITAL LETTER THETA
+ 0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA
+ 0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA
+ 0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA
+ 0x039c: 0x008b, # GREEK CAPITAL LETTER MU
+ 0x039d: 0x008c, # GREEK CAPITAL LETTER NU
+ 0x039e: 0x008d, # GREEK CAPITAL LETTER XI
+ 0x039f: 0x008e, # GREEK CAPITAL LETTER OMICRON
+ 0x03a0: 0x008f, # GREEK CAPITAL LETTER PI
+ 0x03a1: 0x0090, # GREEK CAPITAL LETTER RHO
+ 0x03a3: 0x0091, # GREEK CAPITAL LETTER SIGMA
+ 0x03a4: 0x0092, # GREEK CAPITAL LETTER TAU
+ 0x03a5: 0x0093, # GREEK CAPITAL LETTER UPSILON
+ 0x03a6: 0x0094, # GREEK CAPITAL LETTER PHI
+ 0x03a7: 0x0095, # GREEK CAPITAL LETTER CHI
+ 0x03a8: 0x0096, # GREEK CAPITAL LETTER PSI
+ 0x03a9: 0x0097, # GREEK CAPITAL LETTER OMEGA
+ 0x03aa: 0x00f4, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+ 0x03ab: 0x00f5, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+ 0x03ac: 0x00e1, # GREEK SMALL LETTER ALPHA WITH TONOS
+ 0x03ad: 0x00e2, # GREEK SMALL LETTER EPSILON WITH TONOS
+ 0x03ae: 0x00e3, # GREEK SMALL LETTER ETA WITH TONOS
+ 0x03af: 0x00e5, # GREEK SMALL LETTER IOTA WITH TONOS
+ 0x03b1: 0x0098, # GREEK SMALL LETTER ALPHA
+ 0x03b2: 0x0099, # GREEK SMALL LETTER BETA
+ 0x03b3: 0x009a, # GREEK SMALL LETTER GAMMA
+ 0x03b4: 0x009b, # GREEK SMALL LETTER DELTA
+ 0x03b5: 0x009c, # GREEK SMALL LETTER EPSILON
+ 0x03b6: 0x009d, # GREEK SMALL LETTER ZETA
+ 0x03b7: 0x009e, # GREEK SMALL LETTER ETA
+ 0x03b8: 0x009f, # GREEK SMALL LETTER THETA
+ 0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA
+ 0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA
+ 0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA
+ 0x03bc: 0x00a3, # GREEK SMALL LETTER MU
+ 0x03bd: 0x00a4, # GREEK SMALL LETTER NU
+ 0x03be: 0x00a5, # GREEK SMALL LETTER XI
+ 0x03bf: 0x00a6, # GREEK SMALL LETTER OMICRON
+ 0x03c0: 0x00a7, # GREEK SMALL LETTER PI
+ 0x03c1: 0x00a8, # GREEK SMALL LETTER RHO
+ 0x03c2: 0x00aa, # GREEK SMALL LETTER FINAL SIGMA
+ 0x03c3: 0x00a9, # GREEK SMALL LETTER SIGMA
+ 0x03c4: 0x00ab, # GREEK SMALL LETTER TAU
+ 0x03c5: 0x00ac, # GREEK SMALL LETTER UPSILON
+ 0x03c6: 0x00ad, # GREEK SMALL LETTER PHI
+ 0x03c7: 0x00ae, # GREEK SMALL LETTER CHI
+ 0x03c8: 0x00af, # GREEK SMALL LETTER PSI
+ 0x03c9: 0x00e0, # GREEK SMALL LETTER OMEGA
+ 0x03ca: 0x00e4, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
+ 0x03cb: 0x00e8, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+ 0x03cc: 0x00e6, # GREEK SMALL LETTER OMICRON WITH TONOS
+ 0x03cd: 0x00e7, # GREEK SMALL LETTER UPSILON WITH TONOS
+ 0x03ce: 0x00e9, # GREEK SMALL LETTER OMEGA WITH TONOS
+ 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x2219: 0x00f9, # BULLET OPERATOR
+ 0x221a: 0x00fb, # SQUARE ROOT
+ 0x2248: 0x00f7, # ALMOST EQUAL TO
+ 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
+ 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x258c: 0x00dd, # LEFT HALF BLOCK
+ 0x2590: 0x00de, # RIGHT HALF BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp775.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp775.py
new file mode 100644
index 00000000..fe06e7bc
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp775.py
@@ -0,0 +1,697 @@
+""" Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp775',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
+ 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
+ 0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
+ 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
+ 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
+ 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
+ 0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
+ 0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
+ 0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
+ 0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
+ 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
+ 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
+ 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
+ 0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
+ 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
+ 0x0096: 0x00a2, # CENT SIGN
+ 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
+ 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
+ 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x009e: 0x00d7, # MULTIPLICATION SIGN
+ 0x009f: 0x00a4, # CURRENCY SIGN
+ 0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
+ 0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
+ 0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
+ 0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
+ 0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
+ 0x00a7: 0x00a6, # BROKEN BAR
+ 0x00a8: 0x00a9, # COPYRIGHT SIGN
+ 0x00a9: 0x00ae, # REGISTERED SIGN
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
+ 0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
+ 0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
+ 0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
+ 0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
+ 0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
+ 0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
+ 0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
+ 0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
+ 0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
+ 0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
+ 0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
+ 0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
+ 0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
+ 0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
+ 0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x258c, # LEFT HALF BLOCK
+ 0x00de: 0x2590, # RIGHT HALF BLOCK
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
+ 0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
+ 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
+ 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
+ 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
+ 0x00e6: 0x00b5, # MICRO SIGN
+ 0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
+ 0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
+ 0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
+ 0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
+ 0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
+ 0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
+ 0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
+ 0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
+ 0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
+ 0x00f0: 0x00ad, # SOFT HYPHEN
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
+ 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
+ 0x00f4: 0x00b6, # PILCROW SIGN
+ 0x00f5: 0x00a7, # SECTION SIGN
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x2219, # BULLET OPERATOR
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x00b9, # SUPERSCRIPT ONE
+ 0x00fc: 0x00b3, # SUPERSCRIPT THREE
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE
+ '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
+ '\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON
+ '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA
+ '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE
+ '\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
+ '\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON
+ '\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA
+ '\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA
+ '\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON
+ '\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
+ '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
+ '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
+ '\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON
+ '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA
+ '\xa2' # 0x0096 -> CENT SIGN
+ '\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
+ '\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
+ '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
+ '\xa3' # 0x009c -> POUND SIGN
+ '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
+ '\xd7' # 0x009e -> MULTIPLICATION SIGN
+ '\xa4' # 0x009f -> CURRENCY SIGN
+ '\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON
+ '\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
+ '\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE
+ '\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE
+ '\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK
+ '\xa6' # 0x00a7 -> BROKEN BAR
+ '\xa9' # 0x00a8 -> COPYRIGHT SIGN
+ '\xae' # 0x00a9 -> REGISTERED SIGN
+ '\xac' # 0x00aa -> NOT SIGN
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
+ '\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK
+ '\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON
+ '\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK
+ '\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK
+ '\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK
+ '\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON
+ '\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK
+ '\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON
+ '\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK
+ '\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE
+ '\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK
+ '\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON
+ '\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK
+ '\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON
+ '\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u258c' # 0x00dd -> LEFT HALF BLOCK
+ '\u2590' # 0x00de -> RIGHT HALF BLOCK
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
+ '\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON
+ '\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
+ '\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
+ '\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
+ '\xb5' # 0x00e6 -> MICRO SIGN
+ '\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE
+ '\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA
+ '\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA
+ '\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA
+ '\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA
+ '\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA
+ '\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON
+ '\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA
+ '\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK
+ '\xad' # 0x00f0 -> SOFT HYPHEN
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK
+ '\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
+ '\xb6' # 0x00f4 -> PILCROW SIGN
+ '\xa7' # 0x00f5 -> SECTION SIGN
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\u2219' # 0x00f9 -> BULLET OPERATOR
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\xb9' # 0x00fb -> SUPERSCRIPT ONE
+ '\xb3' # 0x00fc -> SUPERSCRIPT THREE
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a2: 0x0096, # CENT SIGN
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00a4: 0x009f, # CURRENCY SIGN
+ 0x00a6: 0x00a7, # BROKEN BAR
+ 0x00a7: 0x00f5, # SECTION SIGN
+ 0x00a9: 0x00a8, # COPYRIGHT SIGN
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00ad: 0x00f0, # SOFT HYPHEN
+ 0x00ae: 0x00a9, # REGISTERED SIGN
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b3: 0x00fc, # SUPERSCRIPT THREE
+ 0x00b5: 0x00e6, # MICRO SIGN
+ 0x00b6: 0x00f4, # PILCROW SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00b9: 0x00fb, # SUPERSCRIPT ONE
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
+ 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
+ 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
+ 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x00d7: 0x009e, # MULTIPLICATION SIGN
+ 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
+ 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
+ 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
+ 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
+ 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON
+ 0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON
+ 0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK
+ 0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK
+ 0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE
+ 0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE
+ 0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON
+ 0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON
+ 0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON
+ 0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON
+ 0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE
+ 0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE
+ 0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK
+ 0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK
+ 0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA
+ 0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA
+ 0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON
+ 0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON
+ 0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK
+ 0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK
+ 0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA
+ 0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA
+ 0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA
+ 0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA
+ 0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE
+ 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
+ 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
+ 0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE
+ 0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA
+ 0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA
+ 0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON
+ 0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON
+ 0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA
+ 0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA
+ 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
+ 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
+ 0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON
+ 0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON
+ 0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON
+ 0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON
+ 0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK
+ 0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK
+ 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
+ 0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE
+ 0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
+ 0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE
+ 0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON
+ 0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON
+ 0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK
+ 0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK
+ 0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK
+ 0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK
+ 0x2219: 0x00f9, # BULLET OPERATOR
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x258c: 0x00dd, # LEFT HALF BLOCK
+ 0x2590: 0x00de, # RIGHT HALF BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp850.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp850.py
new file mode 100644
index 00000000..f98aef99
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp850.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP850.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp850',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
+ 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
+ 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
+ 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
+ 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
+ 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
+ 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
+ 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
+ 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
+ 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x009e: 0x00d7, # MULTIPLICATION SIGN
+ 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
+ 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
+ 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
+ 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
+ 0x00a8: 0x00bf, # INVERTED QUESTION MARK
+ 0x00a9: 0x00ae, # REGISTERED SIGN
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
+ 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
+ 0x00b8: 0x00a9, # COPYRIGHT SIGN
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x00a2, # CENT SIGN
+ 0x00be: 0x00a5, # YEN SIGN
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
+ 0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x00a4, # CURRENCY SIGN
+ 0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
+ 0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
+ 0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
+ 0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
+ 0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I
+ 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ 0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x00a6, # BROKEN BAR
+ 0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
+ 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
+ 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
+ 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
+ 0x00e6: 0x00b5, # MICRO SIGN
+ 0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
+ 0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
+ 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ 0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
+ 0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
+ 0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
+ 0x00ee: 0x00af, # MACRON
+ 0x00ef: 0x00b4, # ACUTE ACCENT
+ 0x00f0: 0x00ad, # SOFT HYPHEN
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x2017, # DOUBLE LOW LINE
+ 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
+ 0x00f4: 0x00b6, # PILCROW SIGN
+ 0x00f5: 0x00a7, # SECTION SIGN
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x00b8, # CEDILLA
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x00a8, # DIAERESIS
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x00b9, # SUPERSCRIPT ONE
+ 0x00fc: 0x00b3, # SUPERSCRIPT THREE
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
+ '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
+ '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
+ '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
+ '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
+ '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
+ '\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
+ '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
+ '\xa3' # 0x009c -> POUND SIGN
+ '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
+ '\xd7' # 0x009e -> MULTIPLICATION SIGN
+ '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
+ '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
+ '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
+ '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
+ '\xbf' # 0x00a8 -> INVERTED QUESTION MARK
+ '\xae' # 0x00a9 -> REGISTERED SIGN
+ '\xac' # 0x00aa -> NOT SIGN
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
+ '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xa9' # 0x00b8 -> COPYRIGHT SIGN
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\xa2' # 0x00bd -> CENT SIGN
+ '\xa5' # 0x00be -> YEN SIGN
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
+ '\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\xa4' # 0x00cf -> CURRENCY SIGN
+ '\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
+ '\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
+ '\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\u0131' # 0x00d5 -> LATIN SMALL LETTER DOTLESS I
+ '\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\xa6' # 0x00dd -> BROKEN BAR
+ '\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
+ '\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
+ '\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
+ '\xb5' # 0x00e6 -> MICRO SIGN
+ '\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
+ '\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
+ '\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
+ '\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
+ '\xaf' # 0x00ee -> MACRON
+ '\xb4' # 0x00ef -> ACUTE ACCENT
+ '\xad' # 0x00f0 -> SOFT HYPHEN
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u2017' # 0x00f2 -> DOUBLE LOW LINE
+ '\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
+ '\xb6' # 0x00f4 -> PILCROW SIGN
+ '\xa7' # 0x00f5 -> SECTION SIGN
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\xb8' # 0x00f7 -> CEDILLA
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\xa8' # 0x00f9 -> DIAERESIS
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\xb9' # 0x00fb -> SUPERSCRIPT ONE
+ '\xb3' # 0x00fc -> SUPERSCRIPT THREE
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
+ 0x00a2: 0x00bd, # CENT SIGN
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00a4: 0x00cf, # CURRENCY SIGN
+ 0x00a5: 0x00be, # YEN SIGN
+ 0x00a6: 0x00dd, # BROKEN BAR
+ 0x00a7: 0x00f5, # SECTION SIGN
+ 0x00a8: 0x00f9, # DIAERESIS
+ 0x00a9: 0x00b8, # COPYRIGHT SIGN
+ 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00ad: 0x00f0, # SOFT HYPHEN
+ 0x00ae: 0x00a9, # REGISTERED SIGN
+ 0x00af: 0x00ee, # MACRON
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b3: 0x00fc, # SUPERSCRIPT THREE
+ 0x00b4: 0x00ef, # ACUTE ACCENT
+ 0x00b5: 0x00e6, # MICRO SIGN
+ 0x00b6: 0x00f4, # PILCROW SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00b8: 0x00f7, # CEDILLA
+ 0x00b9: 0x00fb, # SUPERSCRIPT ONE
+ 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
+ 0x00bf: 0x00a8, # INVERTED QUESTION MARK
+ 0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
+ 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
+ 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
+ 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
+ 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
+ 0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
+ 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ 0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
+ 0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
+ 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
+ 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
+ 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x00d7: 0x009e, # MULTIPLICATION SIGN
+ 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
+ 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
+ 0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
+ 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
+ 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
+ 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
+ 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
+ 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
+ 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
+ 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
+ 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
+ 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
+ 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
+ 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
+ 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
+ 0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
+ 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
+ 0x0131: 0x00d5, # LATIN SMALL LETTER DOTLESS I
+ 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
+ 0x2017: 0x00f2, # DOUBLE LOW LINE
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp852.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp852.py
new file mode 100644
index 00000000..34d8a0ea
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp852.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp852',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
+ 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
+ 0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
+ 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
+ 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+ 0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
+ 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
+ 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
+ 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
+ 0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
+ 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
+ 0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
+ 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
+ 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
+ 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
+ 0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
+ 0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
+ 0x009e: 0x00d7, # MULTIPLICATION SIGN
+ 0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
+ 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
+ 0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
+ 0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
+ 0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
+ 0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
+ 0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
+ 0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
+ 0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
+ 0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
+ 0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
+ 0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x00a4, # CURRENCY SIGN
+ 0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
+ 0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
+ 0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
+ 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
+ 0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
+ 0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
+ 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ 0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
+ 0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
+ 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
+ 0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
+ 0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
+ 0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
+ 0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
+ 0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
+ 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
+ 0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+ 0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
+ 0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
+ 0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
+ 0x00ef: 0x00b4, # ACUTE ACCENT
+ 0x00f0: 0x00ad, # SOFT HYPHEN
+ 0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
+ 0x00f2: 0x02db, # OGONEK
+ 0x00f3: 0x02c7, # CARON
+ 0x00f4: 0x02d8, # BREVE
+ 0x00f5: 0x00a7, # SECTION SIGN
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x00b8, # CEDILLA
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x00a8, # DIAERESIS
+ 0x00fa: 0x02d9, # DOT ABOVE
+ 0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
+ 0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
+ 0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
+ '\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
+ '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
+ '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+ '\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
+ '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
+ '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
+ '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
+ '\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
+ '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
+ '\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
+ '\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
+ '\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
+ '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
+ '\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
+ '\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
+ '\xd7' # 0x009e -> MULTIPLICATION SIGN
+ '\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
+ '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
+ '\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
+ '\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
+ '\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
+ '\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
+ '\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
+ '\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
+ '\xac' # 0x00aa -> NOT SIGN
+ '\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
+ '\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
+ '\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
+ '\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
+ '\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
+ '\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\xa4' # 0x00cf -> CURRENCY SIGN
+ '\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
+ '\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
+ '\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
+ '\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
+ '\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
+ '\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
+ '\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
+ '\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
+ '\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
+ '\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
+ '\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
+ '\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
+ '\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
+ '\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
+ '\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+ '\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
+ '\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
+ '\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
+ '\xb4' # 0x00ef -> ACUTE ACCENT
+ '\xad' # 0x00f0 -> SOFT HYPHEN
+ '\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
+ '\u02db' # 0x00f2 -> OGONEK
+ '\u02c7' # 0x00f3 -> CARON
+ '\u02d8' # 0x00f4 -> BREVE
+ '\xa7' # 0x00f5 -> SECTION SIGN
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\xb8' # 0x00f7 -> CEDILLA
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\xa8' # 0x00f9 -> DIAERESIS
+ '\u02d9' # 0x00fa -> DOT ABOVE
+ '\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
+ '\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
+ '\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a4: 0x00cf, # CURRENCY SIGN
+ 0x00a7: 0x00f5, # SECTION SIGN
+ 0x00a8: 0x00f9, # DIAERESIS
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00ad: 0x00f0, # SOFT HYPHEN
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b4: 0x00ef, # ACUTE ACCENT
+ 0x00b8: 0x00f7, # CEDILLA
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
+ 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x00d7: 0x009e, # MULTIPLICATION SIGN
+ 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
+ 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
+ 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
+ 0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
+ 0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
+ 0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
+ 0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
+ 0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
+ 0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
+ 0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
+ 0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
+ 0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
+ 0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
+ 0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
+ 0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
+ 0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
+ 0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
+ 0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
+ 0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
+ 0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
+ 0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
+ 0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
+ 0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
+ 0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
+ 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
+ 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
+ 0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
+ 0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
+ 0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
+ 0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+ 0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
+ 0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
+ 0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
+ 0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
+ 0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
+ 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
+ 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
+ 0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
+ 0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
+ 0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
+ 0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
+ 0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
+ 0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
+ 0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
+ 0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
+ 0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
+ 0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
+ 0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+ 0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
+ 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
+ 0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
+ 0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
+ 0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
+ 0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
+ 0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
+ 0x02c7: 0x00f3, # CARON
+ 0x02d8: 0x00f4, # BREVE
+ 0x02d9: 0x00fa, # DOT ABOVE
+ 0x02db: 0x00f2, # OGONEK
+ 0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp855.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp855.py
new file mode 100644
index 00000000..4fe92106
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp855.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp855',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
+ 0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
+ 0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
+ 0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
+ 0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
+ 0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
+ 0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
+ 0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
+ 0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
+ 0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
+ 0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+ 0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+ 0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
+ 0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
+ 0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
+ 0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
+ 0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
+ 0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
+ 0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
+ 0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
+ 0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
+ 0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
+ 0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
+ 0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
+ 0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
+ 0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
+ 0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
+ 0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
+ 0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
+ 0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
+ 0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
+ 0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
+ 0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
+ 0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
+ 0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
+ 0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
+ 0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
+ 0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
+ 0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
+ 0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
+ 0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
+ 0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
+ 0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
+ 0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
+ 0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
+ 0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
+ 0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
+ 0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
+ 0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
+ 0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
+ 0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x00a4, # CURRENCY SIGN
+ 0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
+ 0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
+ 0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
+ 0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
+ 0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
+ 0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
+ 0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
+ 0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
+ 0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
+ 0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
+ 0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
+ 0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
+ 0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
+ 0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
+ 0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
+ 0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
+ 0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
+ 0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
+ 0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
+ 0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
+ 0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
+ 0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
+ 0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
+ 0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
+ 0x00ef: 0x2116, # NUMERO SIGN
+ 0x00f0: 0x00ad, # SOFT HYPHEN
+ 0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
+ 0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
+ 0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
+ 0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
+ 0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
+ 0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
+ 0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
+ 0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
+ 0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
+ 0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
+ 0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
+ 0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
+ 0x00fd: 0x00a7, # SECTION SIGN
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
+ '\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
+ '\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
+ '\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
+ '\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
+ '\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
+ '\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
+ '\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
+ '\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
+ '\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
+ '\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+ '\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+ '\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
+ '\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
+ '\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
+ '\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
+ '\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
+ '\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
+ '\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
+ '\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
+ '\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
+ '\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
+ '\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
+ '\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
+ '\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
+ '\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
+ '\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
+ '\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
+ '\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
+ '\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
+ '\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
+ '\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
+ '\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
+ '\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
+ '\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
+ '\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
+ '\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
+ '\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
+ '\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
+ '\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
+ '\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
+ '\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
+ '\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
+ '\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
+ '\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
+ '\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
+ '\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
+ '\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
+ '\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
+ '\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
+ '\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\xa4' # 0x00cf -> CURRENCY SIGN
+ '\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
+ '\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
+ '\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
+ '\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
+ '\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
+ '\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
+ '\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
+ '\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
+ '\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
+ '\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
+ '\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
+ '\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
+ '\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
+ '\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
+ '\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
+ '\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
+ '\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
+ '\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
+ '\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
+ '\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
+ '\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
+ '\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
+ '\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
+ '\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
+ '\u2116' # 0x00ef -> NUMERO SIGN
+ '\xad' # 0x00f0 -> SOFT HYPHEN
+ '\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
+ '\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
+ '\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
+ '\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
+ '\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
+ '\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
+ '\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
+ '\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
+ '\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
+ '\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
+ '\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
+ '\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
+ '\xa7' # 0x00fd -> SECTION SIGN
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a4: 0x00cf, # CURRENCY SIGN
+ 0x00a7: 0x00fd, # SECTION SIGN
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ad: 0x00f0, # SOFT HYPHEN
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
+ 0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
+ 0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
+ 0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
+ 0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
+ 0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+ 0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
+ 0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
+ 0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
+ 0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
+ 0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
+ 0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
+ 0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
+ 0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
+ 0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
+ 0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
+ 0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
+ 0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
+ 0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
+ 0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
+ 0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
+ 0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
+ 0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
+ 0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
+ 0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
+ 0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
+ 0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
+ 0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
+ 0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
+ 0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
+ 0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
+ 0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
+ 0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
+ 0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
+ 0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
+ 0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
+ 0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
+ 0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
+ 0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
+ 0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
+ 0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
+ 0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
+ 0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
+ 0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
+ 0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
+ 0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
+ 0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
+ 0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
+ 0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
+ 0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
+ 0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
+ 0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
+ 0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
+ 0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
+ 0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
+ 0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
+ 0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
+ 0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
+ 0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
+ 0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
+ 0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
+ 0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
+ 0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
+ 0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
+ 0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
+ 0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
+ 0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
+ 0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
+ 0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
+ 0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
+ 0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
+ 0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
+ 0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
+ 0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
+ 0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
+ 0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
+ 0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
+ 0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
+ 0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
+ 0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
+ 0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
+ 0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
+ 0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
+ 0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
+ 0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
+ 0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
+ 0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
+ 0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
+ 0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
+ 0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
+ 0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
+ 0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
+ 0x2116: 0x00ef, # NUMERO SIGN
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp856.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp856.py
new file mode 100644
index 00000000..cacbfb2f
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp856.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp856 generated from 'MAPPINGS/VENDORS/MISC/CP856.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp856',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u05d0' # 0x80 -> HEBREW LETTER ALEF
+ '\u05d1' # 0x81 -> HEBREW LETTER BET
+ '\u05d2' # 0x82 -> HEBREW LETTER GIMEL
+ '\u05d3' # 0x83 -> HEBREW LETTER DALET
+ '\u05d4' # 0x84 -> HEBREW LETTER HE
+ '\u05d5' # 0x85 -> HEBREW LETTER VAV
+ '\u05d6' # 0x86 -> HEBREW LETTER ZAYIN
+ '\u05d7' # 0x87 -> HEBREW LETTER HET
+ '\u05d8' # 0x88 -> HEBREW LETTER TET
+ '\u05d9' # 0x89 -> HEBREW LETTER YOD
+ '\u05da' # 0x8A -> HEBREW LETTER FINAL KAF
+ '\u05db' # 0x8B -> HEBREW LETTER KAF
+ '\u05dc' # 0x8C -> HEBREW LETTER LAMED
+ '\u05dd' # 0x8D -> HEBREW LETTER FINAL MEM
+ '\u05de' # 0x8E -> HEBREW LETTER MEM
+ '\u05df' # 0x8F -> HEBREW LETTER FINAL NUN
+ '\u05e0' # 0x90 -> HEBREW LETTER NUN
+ '\u05e1' # 0x91 -> HEBREW LETTER SAMEKH
+ '\u05e2' # 0x92 -> HEBREW LETTER AYIN
+ '\u05e3' # 0x93 -> HEBREW LETTER FINAL PE
+ '\u05e4' # 0x94 -> HEBREW LETTER PE
+ '\u05e5' # 0x95 -> HEBREW LETTER FINAL TSADI
+ '\u05e6' # 0x96 -> HEBREW LETTER TSADI
+ '\u05e7' # 0x97 -> HEBREW LETTER QOF
+ '\u05e8' # 0x98 -> HEBREW LETTER RESH
+ '\u05e9' # 0x99 -> HEBREW LETTER SHIN
+ '\u05ea' # 0x9A -> HEBREW LETTER TAV
+ '\ufffe' # 0x9B -> UNDEFINED
+ '\xa3' # 0x9C -> POUND SIGN
+ '\ufffe' # 0x9D -> UNDEFINED
+ '\xd7' # 0x9E -> MULTIPLICATION SIGN
+ '\ufffe' # 0x9F -> UNDEFINED
+ '\ufffe' # 0xA0 -> UNDEFINED
+ '\ufffe' # 0xA1 -> UNDEFINED
+ '\ufffe' # 0xA2 -> UNDEFINED
+ '\ufffe' # 0xA3 -> UNDEFINED
+ '\ufffe' # 0xA4 -> UNDEFINED
+ '\ufffe' # 0xA5 -> UNDEFINED
+ '\ufffe' # 0xA6 -> UNDEFINED
+ '\ufffe' # 0xA7 -> UNDEFINED
+ '\ufffe' # 0xA8 -> UNDEFINED
+ '\xae' # 0xA9 -> REGISTERED SIGN
+ '\xac' # 0xAA -> NOT SIGN
+ '\xbd' # 0xAB -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0xAC -> VULGAR FRACTION ONE QUARTER
+ '\ufffe' # 0xAD -> UNDEFINED
+ '\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0xB0 -> LIGHT SHADE
+ '\u2592' # 0xB1 -> MEDIUM SHADE
+ '\u2593' # 0xB2 -> DARK SHADE
+ '\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\ufffe' # 0xB5 -> UNDEFINED
+ '\ufffe' # 0xB6 -> UNDEFINED
+ '\ufffe' # 0xB7 -> UNDEFINED
+ '\xa9' # 0xB8 -> COPYRIGHT SIGN
+ '\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\xa2' # 0xBD -> CENT SIGN
+ '\xa5' # 0xBE -> YEN SIGN
+ '\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\ufffe' # 0xC6 -> UNDEFINED
+ '\ufffe' # 0xC7 -> UNDEFINED
+ '\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\xa4' # 0xCF -> CURRENCY SIGN
+ '\ufffe' # 0xD0 -> UNDEFINED
+ '\ufffe' # 0xD1 -> UNDEFINED
+ '\ufffe' # 0xD2 -> UNDEFINED
+ '\ufffe' # 0xD3 -> UNDEFINEDS
+ '\ufffe' # 0xD4 -> UNDEFINED
+ '\ufffe' # 0xD5 -> UNDEFINED
+ '\ufffe' # 0xD6 -> UNDEFINEDE
+ '\ufffe' # 0xD7 -> UNDEFINED
+ '\ufffe' # 0xD8 -> UNDEFINED
+ '\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0xDB -> FULL BLOCK
+ '\u2584' # 0xDC -> LOWER HALF BLOCK
+ '\xa6' # 0xDD -> BROKEN BAR
+ '\ufffe' # 0xDE -> UNDEFINED
+ '\u2580' # 0xDF -> UPPER HALF BLOCK
+ '\ufffe' # 0xE0 -> UNDEFINED
+ '\ufffe' # 0xE1 -> UNDEFINED
+ '\ufffe' # 0xE2 -> UNDEFINED
+ '\ufffe' # 0xE3 -> UNDEFINED
+ '\ufffe' # 0xE4 -> UNDEFINED
+ '\ufffe' # 0xE5 -> UNDEFINED
+ '\xb5' # 0xE6 -> MICRO SIGN
+ '\ufffe' # 0xE7 -> UNDEFINED
+ '\ufffe' # 0xE8 -> UNDEFINED
+ '\ufffe' # 0xE9 -> UNDEFINED
+ '\ufffe' # 0xEA -> UNDEFINED
+ '\ufffe' # 0xEB -> UNDEFINED
+ '\ufffe' # 0xEC -> UNDEFINED
+ '\ufffe' # 0xED -> UNDEFINED
+ '\xaf' # 0xEE -> MACRON
+ '\xb4' # 0xEF -> ACUTE ACCENT
+ '\xad' # 0xF0 -> SOFT HYPHEN
+ '\xb1' # 0xF1 -> PLUS-MINUS SIGN
+ '\u2017' # 0xF2 -> DOUBLE LOW LINE
+ '\xbe' # 0xF3 -> VULGAR FRACTION THREE QUARTERS
+ '\xb6' # 0xF4 -> PILCROW SIGN
+ '\xa7' # 0xF5 -> SECTION SIGN
+ '\xf7' # 0xF6 -> DIVISION SIGN
+ '\xb8' # 0xF7 -> CEDILLA
+ '\xb0' # 0xF8 -> DEGREE SIGN
+ '\xa8' # 0xF9 -> DIAERESIS
+ '\xb7' # 0xFA -> MIDDLE DOT
+ '\xb9' # 0xFB -> SUPERSCRIPT ONE
+ '\xb3' # 0xFC -> SUPERSCRIPT THREE
+ '\xb2' # 0xFD -> SUPERSCRIPT TWO
+ '\u25a0' # 0xFE -> BLACK SQUARE
+ '\xa0' # 0xFF -> NO-BREAK SPACE
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp857.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp857.py
new file mode 100644
index 00000000..741b059b
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp857.py
@@ -0,0 +1,694 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP857.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp857',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
+ 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
+ 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
+ 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I
+ 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
+ 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
+ 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
+ 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
+ 0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
+ 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
+ 0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
+ 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
+ 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
+ 0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE
+ 0x00a8: 0x00bf, # INVERTED QUESTION MARK
+ 0x00a9: 0x00ae, # REGISTERED SIGN
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
+ 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
+ 0x00b8: 0x00a9, # COPYRIGHT SIGN
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x00a2, # CENT SIGN
+ 0x00be: 0x00a5, # YEN SIGN
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
+ 0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x00a4, # CURRENCY SIGN
+ 0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR
+ 0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR
+ 0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
+ 0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
+ 0x00d5: None, # UNDEFINED
+ 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ 0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x00a6, # BROKEN BAR
+ 0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
+ 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
+ 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
+ 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
+ 0x00e6: 0x00b5, # MICRO SIGN
+ 0x00e7: None, # UNDEFINED
+ 0x00e8: 0x00d7, # MULTIPLICATION SIGN
+ 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ 0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
+ 0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
+ 0x00ee: 0x00af, # MACRON
+ 0x00ef: 0x00b4, # ACUTE ACCENT
+ 0x00f0: 0x00ad, # SOFT HYPHEN
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: None, # UNDEFINED
+ 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
+ 0x00f4: 0x00b6, # PILCROW SIGN
+ 0x00f5: 0x00a7, # SECTION SIGN
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x00b8, # CEDILLA
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x00a8, # DIAERESIS
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x00b9, # SUPERSCRIPT ONE
+ 0x00fc: 0x00b3, # SUPERSCRIPT THREE
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
+ '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\u0131' # 0x008d -> LATIN SMALL LETTER DOTLESS I
+ '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
+ '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
+ '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
+ '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
+ '\u0130' # 0x0098 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
+ '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
+ '\xa3' # 0x009c -> POUND SIGN
+ '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
+ '\u015e' # 0x009e -> LATIN CAPITAL LETTER S WITH CEDILLA
+ '\u015f' # 0x009f -> LATIN SMALL LETTER S WITH CEDILLA
+ '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
+ '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
+ '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\u011e' # 0x00a6 -> LATIN CAPITAL LETTER G WITH BREVE
+ '\u011f' # 0x00a7 -> LATIN SMALL LETTER G WITH BREVE
+ '\xbf' # 0x00a8 -> INVERTED QUESTION MARK
+ '\xae' # 0x00a9 -> REGISTERED SIGN
+ '\xac' # 0x00aa -> NOT SIGN
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
+ '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xa9' # 0x00b8 -> COPYRIGHT SIGN
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\xa2' # 0x00bd -> CENT SIGN
+ '\xa5' # 0x00be -> YEN SIGN
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
+ '\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\xa4' # 0x00cf -> CURRENCY SIGN
+ '\xba' # 0x00d0 -> MASCULINE ORDINAL INDICATOR
+ '\xaa' # 0x00d1 -> FEMININE ORDINAL INDICATOR
+ '\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\ufffe' # 0x00d5 -> UNDEFINED
+ '\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\xa6' # 0x00dd -> BROKEN BAR
+ '\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
+ '\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
+ '\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
+ '\xb5' # 0x00e6 -> MICRO SIGN
+ '\ufffe' # 0x00e7 -> UNDEFINED
+ '\xd7' # 0x00e8 -> MULTIPLICATION SIGN
+ '\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xec' # 0x00ec -> LATIN SMALL LETTER I WITH GRAVE
+ '\xff' # 0x00ed -> LATIN SMALL LETTER Y WITH DIAERESIS
+ '\xaf' # 0x00ee -> MACRON
+ '\xb4' # 0x00ef -> ACUTE ACCENT
+ '\xad' # 0x00f0 -> SOFT HYPHEN
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\ufffe' # 0x00f2 -> UNDEFINED
+ '\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
+ '\xb6' # 0x00f4 -> PILCROW SIGN
+ '\xa7' # 0x00f5 -> SECTION SIGN
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\xb8' # 0x00f7 -> CEDILLA
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\xa8' # 0x00f9 -> DIAERESIS
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\xb9' # 0x00fb -> SUPERSCRIPT ONE
+ '\xb3' # 0x00fc -> SUPERSCRIPT THREE
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
+ 0x00a2: 0x00bd, # CENT SIGN
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00a4: 0x00cf, # CURRENCY SIGN
+ 0x00a5: 0x00be, # YEN SIGN
+ 0x00a6: 0x00dd, # BROKEN BAR
+ 0x00a7: 0x00f5, # SECTION SIGN
+ 0x00a8: 0x00f9, # DIAERESIS
+ 0x00a9: 0x00b8, # COPYRIGHT SIGN
+ 0x00aa: 0x00d1, # FEMININE ORDINAL INDICATOR
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00ad: 0x00f0, # SOFT HYPHEN
+ 0x00ae: 0x00a9, # REGISTERED SIGN
+ 0x00af: 0x00ee, # MACRON
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b3: 0x00fc, # SUPERSCRIPT THREE
+ 0x00b4: 0x00ef, # ACUTE ACCENT
+ 0x00b5: 0x00e6, # MICRO SIGN
+ 0x00b6: 0x00f4, # PILCROW SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00b8: 0x00f7, # CEDILLA
+ 0x00b9: 0x00fb, # SUPERSCRIPT ONE
+ 0x00ba: 0x00d0, # MASCULINE ORDINAL INDICATOR
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
+ 0x00bf: 0x00a8, # INVERTED QUESTION MARK
+ 0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
+ 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
+ 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
+ 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
+ 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
+ 0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
+ 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ 0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
+ 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
+ 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
+ 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x00d7: 0x00e8, # MULTIPLICATION SIGN
+ 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
+ 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
+ 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
+ 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
+ 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
+ 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
+ 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
+ 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x00ec: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
+ 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
+ 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
+ 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
+ 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
+ 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x00ff: 0x00ed, # LATIN SMALL LETTER Y WITH DIAERESIS
+ 0x011e: 0x00a6, # LATIN CAPITAL LETTER G WITH BREVE
+ 0x011f: 0x00a7, # LATIN SMALL LETTER G WITH BREVE
+ 0x0130: 0x0098, # LATIN CAPITAL LETTER I WITH DOT ABOVE
+ 0x0131: 0x008d, # LATIN SMALL LETTER DOTLESS I
+ 0x015e: 0x009e, # LATIN CAPITAL LETTER S WITH CEDILLA
+ 0x015f: 0x009f, # LATIN SMALL LETTER S WITH CEDILLA
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp858.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp858.py
new file mode 100644
index 00000000..7579f525
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp858.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec for CP858, modified from cp850.
+
+"""
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp858',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
+ 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
+ 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
+ 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
+ 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
+ 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
+ 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
+ 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
+ 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
+ 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x009e: 0x00d7, # MULTIPLICATION SIGN
+ 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
+ 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
+ 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
+ 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
+ 0x00a8: 0x00bf, # INVERTED QUESTION MARK
+ 0x00a9: 0x00ae, # REGISTERED SIGN
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
+ 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
+ 0x00b8: 0x00a9, # COPYRIGHT SIGN
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x00a2, # CENT SIGN
+ 0x00be: 0x00a5, # YEN SIGN
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
+ 0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x00a4, # CURRENCY SIGN
+ 0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
+ 0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
+ 0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
+ 0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
+ 0x00d5: 0x20ac, # EURO SIGN
+ 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ 0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x00a6, # BROKEN BAR
+ 0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
+ 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
+ 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
+ 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
+ 0x00e6: 0x00b5, # MICRO SIGN
+ 0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
+ 0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
+ 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ 0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
+ 0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
+ 0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
+ 0x00ee: 0x00af, # MACRON
+ 0x00ef: 0x00b4, # ACUTE ACCENT
+ 0x00f0: 0x00ad, # SOFT HYPHEN
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x2017, # DOUBLE LOW LINE
+ 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
+ 0x00f4: 0x00b6, # PILCROW SIGN
+ 0x00f5: 0x00a7, # SECTION SIGN
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x00b8, # CEDILLA
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x00a8, # DIAERESIS
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x00b9, # SUPERSCRIPT ONE
+ 0x00fc: 0x00b3, # SUPERSCRIPT THREE
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
+ '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
+ '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
+ '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
+ '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
+ '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
+ '\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
+ '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
+ '\xa3' # 0x009c -> POUND SIGN
+ '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
+ '\xd7' # 0x009e -> MULTIPLICATION SIGN
+ '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
+ '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
+ '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
+ '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
+ '\xbf' # 0x00a8 -> INVERTED QUESTION MARK
+ '\xae' # 0x00a9 -> REGISTERED SIGN
+ '\xac' # 0x00aa -> NOT SIGN
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
+ '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xa9' # 0x00b8 -> COPYRIGHT SIGN
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\xa2' # 0x00bd -> CENT SIGN
+ '\xa5' # 0x00be -> YEN SIGN
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
+ '\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\xa4' # 0x00cf -> CURRENCY SIGN
+ '\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
+ '\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
+ '\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\u20ac' # 0x00d5 -> EURO SIGN
+ '\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\xa6' # 0x00dd -> BROKEN BAR
+ '\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
+ '\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
+ '\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
+ '\xb5' # 0x00e6 -> MICRO SIGN
+ '\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
+ '\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
+ '\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
+ '\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
+ '\xaf' # 0x00ee -> MACRON
+ '\xb4' # 0x00ef -> ACUTE ACCENT
+ '\xad' # 0x00f0 -> SOFT HYPHEN
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u2017' # 0x00f2 -> DOUBLE LOW LINE
+ '\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
+ '\xb6' # 0x00f4 -> PILCROW SIGN
+ '\xa7' # 0x00f5 -> SECTION SIGN
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\xb8' # 0x00f7 -> CEDILLA
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\xa8' # 0x00f9 -> DIAERESIS
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\xb9' # 0x00fb -> SUPERSCRIPT ONE
+ '\xb3' # 0x00fc -> SUPERSCRIPT THREE
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
+ 0x00a2: 0x00bd, # CENT SIGN
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00a4: 0x00cf, # CURRENCY SIGN
+ 0x00a5: 0x00be, # YEN SIGN
+ 0x00a6: 0x00dd, # BROKEN BAR
+ 0x00a7: 0x00f5, # SECTION SIGN
+ 0x00a8: 0x00f9, # DIAERESIS
+ 0x00a9: 0x00b8, # COPYRIGHT SIGN
+ 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00ad: 0x00f0, # SOFT HYPHEN
+ 0x00ae: 0x00a9, # REGISTERED SIGN
+ 0x00af: 0x00ee, # MACRON
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b3: 0x00fc, # SUPERSCRIPT THREE
+ 0x00b4: 0x00ef, # ACUTE ACCENT
+ 0x00b5: 0x00e6, # MICRO SIGN
+ 0x00b6: 0x00f4, # PILCROW SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00b8: 0x00f7, # CEDILLA
+ 0x00b9: 0x00fb, # SUPERSCRIPT ONE
+ 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
+ 0x00bf: 0x00a8, # INVERTED QUESTION MARK
+ 0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
+ 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
+ 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
+ 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
+ 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
+ 0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
+ 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ 0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
+ 0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
+ 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
+ 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
+ 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x00d7: 0x009e, # MULTIPLICATION SIGN
+ 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
+ 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
+ 0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
+ 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
+ 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
+ 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
+ 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
+ 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
+ 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
+ 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
+ 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
+ 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
+ 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
+ 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
+ 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
+ 0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
+ 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
+ 0x20ac: 0x00d5, # EURO SIGN
+ 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
+ 0x2017: 0x00f2, # DOUBLE LOW LINE
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp860.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp860.py
new file mode 100644
index 00000000..65903e74
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp860.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP860.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp860',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
+ 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x0084: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
+ 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
+ 0x0086: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x0089: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
+ 0x008b: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x008c: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
+ 0x008e: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
+ 0x008f: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x0091: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
+ 0x0092: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
+ 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x0094: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
+ 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
+ 0x0096: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
+ 0x0098: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
+ 0x0099: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
+ 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x009b: 0x00a2, # CENT SIGN
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
+ 0x009e: 0x20a7, # PESETA SIGN
+ 0x009f: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
+ 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
+ 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
+ 0x00a8: 0x00bf, # INVERTED QUESTION MARK
+ 0x00a9: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
+ 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x258c, # LEFT HALF BLOCK
+ 0x00de: 0x2590, # RIGHT HALF BLOCK
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
+ 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
+ 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
+ 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
+ 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
+ 0x00e6: 0x00b5, # MICRO SIGN
+ 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
+ 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
+ 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
+ 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
+ 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
+ 0x00ec: 0x221e, # INFINITY
+ 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
+ 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
+ 0x00ef: 0x2229, # INTERSECTION
+ 0x00f0: 0x2261, # IDENTICAL TO
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
+ 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
+ 0x00f4: 0x2320, # TOP HALF INTEGRAL
+ 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x2248, # ALMOST EQUAL TO
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x2219, # BULLET OPERATOR
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x221a, # SQUARE ROOT
+ 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe3' # 0x0084 -> LATIN SMALL LETTER A WITH TILDE
+ '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xc1' # 0x0086 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xca' # 0x0089 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
+ '\xcd' # 0x008b -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xd4' # 0x008c -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
+ '\xc3' # 0x008e -> LATIN CAPITAL LETTER A WITH TILDE
+ '\xc2' # 0x008f -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xc0' # 0x0091 -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xc8' # 0x0092 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf5' # 0x0094 -> LATIN SMALL LETTER O WITH TILDE
+ '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
+ '\xda' # 0x0096 -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
+ '\xcc' # 0x0098 -> LATIN CAPITAL LETTER I WITH GRAVE
+ '\xd5' # 0x0099 -> LATIN CAPITAL LETTER O WITH TILDE
+ '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xa2' # 0x009b -> CENT SIGN
+ '\xa3' # 0x009c -> POUND SIGN
+ '\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\u20a7' # 0x009e -> PESETA SIGN
+ '\xd3' # 0x009f -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
+ '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
+ '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
+ '\xbf' # 0x00a8 -> INVERTED QUESTION MARK
+ '\xd2' # 0x00a9 -> LATIN CAPITAL LETTER O WITH GRAVE
+ '\xac' # 0x00aa -> NOT SIGN
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
+ '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u258c' # 0x00dd -> LEFT HALF BLOCK
+ '\u2590' # 0x00de -> RIGHT HALF BLOCK
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
+ '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
+ '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
+ '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
+ '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
+ '\xb5' # 0x00e6 -> MICRO SIGN
+ '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
+ '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
+ '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
+ '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
+ '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
+ '\u221e' # 0x00ec -> INFINITY
+ '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
+ '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
+ '\u2229' # 0x00ef -> INTERSECTION
+ '\u2261' # 0x00f0 -> IDENTICAL TO
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
+ '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
+ '\u2320' # 0x00f4 -> TOP HALF INTEGRAL
+ '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\u2248' # 0x00f7 -> ALMOST EQUAL TO
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\u2219' # 0x00f9 -> BULLET OPERATOR
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\u221a' # 0x00fb -> SQUARE ROOT
+ '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
+ 0x00a2: 0x009b, # CENT SIGN
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b5: 0x00e6, # MICRO SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x00bf: 0x00a8, # INVERTED QUESTION MARK
+ 0x00c0: 0x0091, # LATIN CAPITAL LETTER A WITH GRAVE
+ 0x00c1: 0x0086, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00c2: 0x008f, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x00c3: 0x008e, # LATIN CAPITAL LETTER A WITH TILDE
+ 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x00c8: 0x0092, # LATIN CAPITAL LETTER E WITH GRAVE
+ 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x00ca: 0x0089, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ 0x00cc: 0x0098, # LATIN CAPITAL LETTER I WITH GRAVE
+ 0x00cd: 0x008b, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00d2: 0x00a9, # LATIN CAPITAL LETTER O WITH GRAVE
+ 0x00d3: 0x009f, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00d4: 0x008c, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x00d5: 0x0099, # LATIN CAPITAL LETTER O WITH TILDE
+ 0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
+ 0x00da: 0x0096, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
+ 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
+ 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x00e3: 0x0084, # LATIN SMALL LETTER A WITH TILDE
+ 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
+ 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
+ 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
+ 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
+ 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x00f5: 0x0094, # LATIN SMALL LETTER O WITH TILDE
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
+ 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
+ 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
+ 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
+ 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
+ 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
+ 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
+ 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
+ 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
+ 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
+ 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
+ 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
+ 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
+ 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x20a7: 0x009e, # PESETA SIGN
+ 0x2219: 0x00f9, # BULLET OPERATOR
+ 0x221a: 0x00fb, # SQUARE ROOT
+ 0x221e: 0x00ec, # INFINITY
+ 0x2229: 0x00ef, # INTERSECTION
+ 0x2248: 0x00f7, # ALMOST EQUAL TO
+ 0x2261: 0x00f0, # IDENTICAL TO
+ 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
+ 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
+ 0x2320: 0x00f4, # TOP HALF INTEGRAL
+ 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x258c: 0x00dd, # LEFT HALF BLOCK
+ 0x2590: 0x00de, # RIGHT HALF BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp861.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp861.py
new file mode 100644
index 00000000..860a05fa
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp861.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP861.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp861',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
+ 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
+ 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
+ 0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH
+ 0x008c: 0x00f0, # LATIN SMALL LETTER ETH
+ 0x008d: 0x00de, # LATIN CAPITAL LETTER THORN
+ 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
+ 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
+ 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x0095: 0x00fe, # LATIN SMALL LETTER THORN
+ 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
+ 0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
+ 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x009e: 0x20a7, # PESETA SIGN
+ 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
+ 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00a8: 0x00bf, # INVERTED QUESTION MARK
+ 0x00a9: 0x2310, # REVERSED NOT SIGN
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
+ 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x258c, # LEFT HALF BLOCK
+ 0x00de: 0x2590, # RIGHT HALF BLOCK
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
+ 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
+ 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
+ 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
+ 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
+ 0x00e6: 0x00b5, # MICRO SIGN
+ 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
+ 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
+ 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
+ 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
+ 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
+ 0x00ec: 0x221e, # INFINITY
+ 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
+ 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
+ 0x00ef: 0x2229, # INTERSECTION
+ 0x00f0: 0x2261, # IDENTICAL TO
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
+ 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
+ 0x00f4: 0x2320, # TOP HALF INTEGRAL
+ 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x2248, # ALMOST EQUAL TO
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x2219, # BULLET OPERATOR
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x221a, # SQUARE ROOT
+ 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
+ '\xd0' # 0x008b -> LATIN CAPITAL LETTER ETH
+ '\xf0' # 0x008c -> LATIN SMALL LETTER ETH
+ '\xde' # 0x008d -> LATIN CAPITAL LETTER THORN
+ '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
+ '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
+ '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xfe' # 0x0095 -> LATIN SMALL LETTER THORN
+ '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xdd' # 0x0097 -> LATIN CAPITAL LETTER Y WITH ACUTE
+ '\xfd' # 0x0098 -> LATIN SMALL LETTER Y WITH ACUTE
+ '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
+ '\xa3' # 0x009c -> POUND SIGN
+ '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
+ '\u20a7' # 0x009e -> PESETA SIGN
+ '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
+ '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
+ '\xc1' # 0x00a4 -> LATIN CAPITAL LETTER A WITH ACUTE
+ '\xcd' # 0x00a5 -> LATIN CAPITAL LETTER I WITH ACUTE
+ '\xd3' # 0x00a6 -> LATIN CAPITAL LETTER O WITH ACUTE
+ '\xda' # 0x00a7 -> LATIN CAPITAL LETTER U WITH ACUTE
+ '\xbf' # 0x00a8 -> INVERTED QUESTION MARK
+ '\u2310' # 0x00a9 -> REVERSED NOT SIGN
+ '\xac' # 0x00aa -> NOT SIGN
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
+ '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u258c' # 0x00dd -> LEFT HALF BLOCK
+ '\u2590' # 0x00de -> RIGHT HALF BLOCK
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
+ '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
+ '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
+ '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
+ '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
+ '\xb5' # 0x00e6 -> MICRO SIGN
+ '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
+ '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
+ '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
+ '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
+ '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
+ '\u221e' # 0x00ec -> INFINITY
+ '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
+ '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
+ '\u2229' # 0x00ef -> INTERSECTION
+ '\u2261' # 0x00f0 -> IDENTICAL TO
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
+ '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
+ '\u2320' # 0x00f4 -> TOP HALF INTEGRAL
+ '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\u2248' # 0x00f7 -> ALMOST EQUAL TO
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\u2219' # 0x00f9 -> BULLET OPERATOR
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\u221a' # 0x00fb -> SQUARE ROOT
+ '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b5: 0x00e6, # MICRO SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x00bf: 0x00a8, # INVERTED QUESTION MARK
+ 0x00c1: 0x00a4, # LATIN CAPITAL LETTER A WITH ACUTE
+ 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
+ 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x00cd: 0x00a5, # LATIN CAPITAL LETTER I WITH ACUTE
+ 0x00d0: 0x008b, # LATIN CAPITAL LETTER ETH
+ 0x00d3: 0x00a6, # LATIN CAPITAL LETTER O WITH ACUTE
+ 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x00da: 0x00a7, # LATIN CAPITAL LETTER U WITH ACUTE
+ 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x00dd: 0x0097, # LATIN CAPITAL LETTER Y WITH ACUTE
+ 0x00de: 0x008d, # LATIN CAPITAL LETTER THORN
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
+ 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
+ 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
+ 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
+ 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
+ 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00f0: 0x008c, # LATIN SMALL LETTER ETH
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
+ 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x00fd: 0x0098, # LATIN SMALL LETTER Y WITH ACUTE
+ 0x00fe: 0x0095, # LATIN SMALL LETTER THORN
+ 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
+ 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
+ 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
+ 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
+ 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
+ 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
+ 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
+ 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
+ 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
+ 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
+ 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
+ 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
+ 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
+ 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x20a7: 0x009e, # PESETA SIGN
+ 0x2219: 0x00f9, # BULLET OPERATOR
+ 0x221a: 0x00fb, # SQUARE ROOT
+ 0x221e: 0x00ec, # INFINITY
+ 0x2229: 0x00ef, # INTERSECTION
+ 0x2248: 0x00f7, # ALMOST EQUAL TO
+ 0x2261: 0x00f0, # IDENTICAL TO
+ 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
+ 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
+ 0x2310: 0x00a9, # REVERSED NOT SIGN
+ 0x2320: 0x00f4, # TOP HALF INTEGRAL
+ 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x258c: 0x00dd, # LEFT HALF BLOCK
+ 0x2590: 0x00de, # RIGHT HALF BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp862.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp862.py
new file mode 100644
index 00000000..3df22f99
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp862.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp862',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x05d0, # HEBREW LETTER ALEF
+ 0x0081: 0x05d1, # HEBREW LETTER BET
+ 0x0082: 0x05d2, # HEBREW LETTER GIMEL
+ 0x0083: 0x05d3, # HEBREW LETTER DALET
+ 0x0084: 0x05d4, # HEBREW LETTER HE
+ 0x0085: 0x05d5, # HEBREW LETTER VAV
+ 0x0086: 0x05d6, # HEBREW LETTER ZAYIN
+ 0x0087: 0x05d7, # HEBREW LETTER HET
+ 0x0088: 0x05d8, # HEBREW LETTER TET
+ 0x0089: 0x05d9, # HEBREW LETTER YOD
+ 0x008a: 0x05da, # HEBREW LETTER FINAL KAF
+ 0x008b: 0x05db, # HEBREW LETTER KAF
+ 0x008c: 0x05dc, # HEBREW LETTER LAMED
+ 0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
+ 0x008e: 0x05de, # HEBREW LETTER MEM
+ 0x008f: 0x05df, # HEBREW LETTER FINAL NUN
+ 0x0090: 0x05e0, # HEBREW LETTER NUN
+ 0x0091: 0x05e1, # HEBREW LETTER SAMEKH
+ 0x0092: 0x05e2, # HEBREW LETTER AYIN
+ 0x0093: 0x05e3, # HEBREW LETTER FINAL PE
+ 0x0094: 0x05e4, # HEBREW LETTER PE
+ 0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
+ 0x0096: 0x05e6, # HEBREW LETTER TSADI
+ 0x0097: 0x05e7, # HEBREW LETTER QOF
+ 0x0098: 0x05e8, # HEBREW LETTER RESH
+ 0x0099: 0x05e9, # HEBREW LETTER SHIN
+ 0x009a: 0x05ea, # HEBREW LETTER TAV
+ 0x009b: 0x00a2, # CENT SIGN
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x00a5, # YEN SIGN
+ 0x009e: 0x20a7, # PESETA SIGN
+ 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
+ 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
+ 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
+ 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
+ 0x00a8: 0x00bf, # INVERTED QUESTION MARK
+ 0x00a9: 0x2310, # REVERSED NOT SIGN
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
+ 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x258c, # LEFT HALF BLOCK
+ 0x00de: 0x2590, # RIGHT HALF BLOCK
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
+ 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
+ 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
+ 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
+ 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
+ 0x00e6: 0x00b5, # MICRO SIGN
+ 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
+ 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
+ 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
+ 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
+ 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
+ 0x00ec: 0x221e, # INFINITY
+ 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
+ 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
+ 0x00ef: 0x2229, # INTERSECTION
+ 0x00f0: 0x2261, # IDENTICAL TO
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
+ 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
+ 0x00f4: 0x2320, # TOP HALF INTEGRAL
+ 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x2248, # ALMOST EQUAL TO
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x2219, # BULLET OPERATOR
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x221a, # SQUARE ROOT
+ 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\u05d0' # 0x0080 -> HEBREW LETTER ALEF
+ '\u05d1' # 0x0081 -> HEBREW LETTER BET
+ '\u05d2' # 0x0082 -> HEBREW LETTER GIMEL
+ '\u05d3' # 0x0083 -> HEBREW LETTER DALET
+ '\u05d4' # 0x0084 -> HEBREW LETTER HE
+ '\u05d5' # 0x0085 -> HEBREW LETTER VAV
+ '\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN
+ '\u05d7' # 0x0087 -> HEBREW LETTER HET
+ '\u05d8' # 0x0088 -> HEBREW LETTER TET
+ '\u05d9' # 0x0089 -> HEBREW LETTER YOD
+ '\u05da' # 0x008a -> HEBREW LETTER FINAL KAF
+ '\u05db' # 0x008b -> HEBREW LETTER KAF
+ '\u05dc' # 0x008c -> HEBREW LETTER LAMED
+ '\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM
+ '\u05de' # 0x008e -> HEBREW LETTER MEM
+ '\u05df' # 0x008f -> HEBREW LETTER FINAL NUN
+ '\u05e0' # 0x0090 -> HEBREW LETTER NUN
+ '\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH
+ '\u05e2' # 0x0092 -> HEBREW LETTER AYIN
+ '\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE
+ '\u05e4' # 0x0094 -> HEBREW LETTER PE
+ '\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI
+ '\u05e6' # 0x0096 -> HEBREW LETTER TSADI
+ '\u05e7' # 0x0097 -> HEBREW LETTER QOF
+ '\u05e8' # 0x0098 -> HEBREW LETTER RESH
+ '\u05e9' # 0x0099 -> HEBREW LETTER SHIN
+ '\u05ea' # 0x009a -> HEBREW LETTER TAV
+ '\xa2' # 0x009b -> CENT SIGN
+ '\xa3' # 0x009c -> POUND SIGN
+ '\xa5' # 0x009d -> YEN SIGN
+ '\u20a7' # 0x009e -> PESETA SIGN
+ '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
+ '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
+ '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
+ '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
+ '\xbf' # 0x00a8 -> INVERTED QUESTION MARK
+ '\u2310' # 0x00a9 -> REVERSED NOT SIGN
+ '\xac' # 0x00aa -> NOT SIGN
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
+ '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u258c' # 0x00dd -> LEFT HALF BLOCK
+ '\u2590' # 0x00de -> RIGHT HALF BLOCK
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
+ '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
+ '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
+ '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
+ '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
+ '\xb5' # 0x00e6 -> MICRO SIGN
+ '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
+ '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
+ '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
+ '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
+ '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
+ '\u221e' # 0x00ec -> INFINITY
+ '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
+ '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
+ '\u2229' # 0x00ef -> INTERSECTION
+ '\u2261' # 0x00f0 -> IDENTICAL TO
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
+ '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
+ '\u2320' # 0x00f4 -> TOP HALF INTEGRAL
+ '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\u2248' # 0x00f7 -> ALMOST EQUAL TO
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\u2219' # 0x00f9 -> BULLET OPERATOR
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\u221a' # 0x00fb -> SQUARE ROOT
+ '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
+ 0x00a2: 0x009b, # CENT SIGN
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00a5: 0x009d, # YEN SIGN
+ 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b5: 0x00e6, # MICRO SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x00bf: 0x00a8, # INVERTED QUESTION MARK
+ 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
+ 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
+ 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
+ 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
+ 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
+ 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
+ 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
+ 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
+ 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
+ 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
+ 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
+ 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
+ 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
+ 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
+ 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
+ 0x05d0: 0x0080, # HEBREW LETTER ALEF
+ 0x05d1: 0x0081, # HEBREW LETTER BET
+ 0x05d2: 0x0082, # HEBREW LETTER GIMEL
+ 0x05d3: 0x0083, # HEBREW LETTER DALET
+ 0x05d4: 0x0084, # HEBREW LETTER HE
+ 0x05d5: 0x0085, # HEBREW LETTER VAV
+ 0x05d6: 0x0086, # HEBREW LETTER ZAYIN
+ 0x05d7: 0x0087, # HEBREW LETTER HET
+ 0x05d8: 0x0088, # HEBREW LETTER TET
+ 0x05d9: 0x0089, # HEBREW LETTER YOD
+ 0x05da: 0x008a, # HEBREW LETTER FINAL KAF
+ 0x05db: 0x008b, # HEBREW LETTER KAF
+ 0x05dc: 0x008c, # HEBREW LETTER LAMED
+ 0x05dd: 0x008d, # HEBREW LETTER FINAL MEM
+ 0x05de: 0x008e, # HEBREW LETTER MEM
+ 0x05df: 0x008f, # HEBREW LETTER FINAL NUN
+ 0x05e0: 0x0090, # HEBREW LETTER NUN
+ 0x05e1: 0x0091, # HEBREW LETTER SAMEKH
+ 0x05e2: 0x0092, # HEBREW LETTER AYIN
+ 0x05e3: 0x0093, # HEBREW LETTER FINAL PE
+ 0x05e4: 0x0094, # HEBREW LETTER PE
+ 0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI
+ 0x05e6: 0x0096, # HEBREW LETTER TSADI
+ 0x05e7: 0x0097, # HEBREW LETTER QOF
+ 0x05e8: 0x0098, # HEBREW LETTER RESH
+ 0x05e9: 0x0099, # HEBREW LETTER SHIN
+ 0x05ea: 0x009a, # HEBREW LETTER TAV
+ 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x20a7: 0x009e, # PESETA SIGN
+ 0x2219: 0x00f9, # BULLET OPERATOR
+ 0x221a: 0x00fb, # SQUARE ROOT
+ 0x221e: 0x00ec, # INFINITY
+ 0x2229: 0x00ef, # INTERSECTION
+ 0x2248: 0x00f7, # ALMOST EQUAL TO
+ 0x2261: 0x00f0, # IDENTICAL TO
+ 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
+ 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
+ 0x2310: 0x00a9, # REVERSED NOT SIGN
+ 0x2320: 0x00f4, # TOP HALF INTEGRAL
+ 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x258c: 0x00dd, # LEFT HALF BLOCK
+ 0x2590: 0x00de, # RIGHT HALF BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp863.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp863.py
new file mode 100644
index 00000000..764180b6
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp863.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp863',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
+ 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
+ 0x0086: 0x00b6, # PILCROW SIGN
+ 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
+ 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x008d: 0x2017, # DOUBLE LOW LINE
+ 0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
+ 0x008f: 0x00a7, # SECTION SIGN
+ 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
+ 0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
+ 0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
+ 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
+ 0x0098: 0x00a4, # CURRENCY SIGN
+ 0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x009b: 0x00a2, # CENT SIGN
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
+ 0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
+ 0x00a0: 0x00a6, # BROKEN BAR
+ 0x00a1: 0x00b4, # ACUTE ACCENT
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00a4: 0x00a8, # DIAERESIS
+ 0x00a5: 0x00b8, # CEDILLA
+ 0x00a6: 0x00b3, # SUPERSCRIPT THREE
+ 0x00a7: 0x00af, # MACRON
+ 0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ 0x00a9: 0x2310, # REVERSED NOT SIGN
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
+ 0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x258c, # LEFT HALF BLOCK
+ 0x00de: 0x2590, # RIGHT HALF BLOCK
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
+ 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
+ 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
+ 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
+ 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
+ 0x00e6: 0x00b5, # MICRO SIGN
+ 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
+ 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
+ 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
+ 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
+ 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
+ 0x00ec: 0x221e, # INFINITY
+ 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
+ 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
+ 0x00ef: 0x2229, # INTERSECTION
+ 0x00f0: 0x2261, # IDENTICAL TO
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
+ 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
+ 0x00f4: 0x2320, # TOP HALF INTEGRAL
+ 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x2248, # ALMOST EQUAL TO
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x2219, # BULLET OPERATOR
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x221a, # SQUARE ROOT
+ 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xb6' # 0x0086 -> PILCROW SIGN
+ '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
+ '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\u2017' # 0x008d -> DOUBLE LOW LINE
+ '\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
+ '\xa7' # 0x008f -> SECTION SIGN
+ '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
+ '\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
+ '\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
+ '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
+ '\xa4' # 0x0098 -> CURRENCY SIGN
+ '\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xa2' # 0x009b -> CENT SIGN
+ '\xa3' # 0x009c -> POUND SIGN
+ '\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
+ '\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
+ '\xa6' # 0x00a0 -> BROKEN BAR
+ '\xb4' # 0x00a1 -> ACUTE ACCENT
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
+ '\xa8' # 0x00a4 -> DIAERESIS
+ '\xb8' # 0x00a5 -> CEDILLA
+ '\xb3' # 0x00a6 -> SUPERSCRIPT THREE
+ '\xaf' # 0x00a7 -> MACRON
+ '\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ '\u2310' # 0x00a9 -> REVERSED NOT SIGN
+ '\xac' # 0x00aa -> NOT SIGN
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
+ '\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u258c' # 0x00dd -> LEFT HALF BLOCK
+ '\u2590' # 0x00de -> RIGHT HALF BLOCK
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
+ '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
+ '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
+ '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
+ '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
+ '\xb5' # 0x00e6 -> MICRO SIGN
+ '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
+ '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
+ '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
+ '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
+ '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
+ '\u221e' # 0x00ec -> INFINITY
+ '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
+ '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
+ '\u2229' # 0x00ef -> INTERSECTION
+ '\u2261' # 0x00f0 -> IDENTICAL TO
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
+ '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
+ '\u2320' # 0x00f4 -> TOP HALF INTEGRAL
+ '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\u2248' # 0x00f7 -> ALMOST EQUAL TO
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\u2219' # 0x00f9 -> BULLET OPERATOR
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\u221a' # 0x00fb -> SQUARE ROOT
+ '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a2: 0x009b, # CENT SIGN
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00a4: 0x0098, # CURRENCY SIGN
+ 0x00a6: 0x00a0, # BROKEN BAR
+ 0x00a7: 0x008f, # SECTION SIGN
+ 0x00a8: 0x00a4, # DIAERESIS
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00af: 0x00a7, # MACRON
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b3: 0x00a6, # SUPERSCRIPT THREE
+ 0x00b4: 0x00a1, # ACUTE ACCENT
+ 0x00b5: 0x00e6, # MICRO SIGN
+ 0x00b6: 0x0086, # PILCROW SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00b8: 0x00a5, # CEDILLA
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
+ 0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
+ 0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
+ 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ 0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
+ 0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ 0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
+ 0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ 0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
+ 0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
+ 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
+ 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
+ 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
+ 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
+ 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
+ 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
+ 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
+ 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
+ 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
+ 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
+ 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
+ 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
+ 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
+ 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
+ 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
+ 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
+ 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
+ 0x2017: 0x008d, # DOUBLE LOW LINE
+ 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x2219: 0x00f9, # BULLET OPERATOR
+ 0x221a: 0x00fb, # SQUARE ROOT
+ 0x221e: 0x00ec, # INFINITY
+ 0x2229: 0x00ef, # INTERSECTION
+ 0x2248: 0x00f7, # ALMOST EQUAL TO
+ 0x2261: 0x00f0, # IDENTICAL TO
+ 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
+ 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
+ 0x2310: 0x00a9, # REVERSED NOT SIGN
+ 0x2320: 0x00f4, # TOP HALF INTEGRAL
+ 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x258c: 0x00dd, # LEFT HALF BLOCK
+ 0x2590: 0x00de, # RIGHT HALF BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp864.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp864.py
new file mode 100644
index 00000000..53df482d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp864.py
@@ -0,0 +1,690 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP864.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp864',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0025: 0x066a, # ARABIC PERCENT SIGN
+ 0x0080: 0x00b0, # DEGREE SIGN
+ 0x0081: 0x00b7, # MIDDLE DOT
+ 0x0082: 0x2219, # BULLET OPERATOR
+ 0x0083: 0x221a, # SQUARE ROOT
+ 0x0084: 0x2592, # MEDIUM SHADE
+ 0x0085: 0x2500, # FORMS LIGHT HORIZONTAL
+ 0x0086: 0x2502, # FORMS LIGHT VERTICAL
+ 0x0087: 0x253c, # FORMS LIGHT VERTICAL AND HORIZONTAL
+ 0x0088: 0x2524, # FORMS LIGHT VERTICAL AND LEFT
+ 0x0089: 0x252c, # FORMS LIGHT DOWN AND HORIZONTAL
+ 0x008a: 0x251c, # FORMS LIGHT VERTICAL AND RIGHT
+ 0x008b: 0x2534, # FORMS LIGHT UP AND HORIZONTAL
+ 0x008c: 0x2510, # FORMS LIGHT DOWN AND LEFT
+ 0x008d: 0x250c, # FORMS LIGHT DOWN AND RIGHT
+ 0x008e: 0x2514, # FORMS LIGHT UP AND RIGHT
+ 0x008f: 0x2518, # FORMS LIGHT UP AND LEFT
+ 0x0090: 0x03b2, # GREEK SMALL BETA
+ 0x0091: 0x221e, # INFINITY
+ 0x0092: 0x03c6, # GREEK SMALL PHI
+ 0x0093: 0x00b1, # PLUS-OR-MINUS SIGN
+ 0x0094: 0x00bd, # FRACTION 1/2
+ 0x0095: 0x00bc, # FRACTION 1/4
+ 0x0096: 0x2248, # ALMOST EQUAL TO
+ 0x0097: 0x00ab, # LEFT POINTING GUILLEMET
+ 0x0098: 0x00bb, # RIGHT POINTING GUILLEMET
+ 0x0099: 0xfef7, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
+ 0x009a: 0xfef8, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
+ 0x009b: None, # UNDEFINED
+ 0x009c: None, # UNDEFINED
+ 0x009d: 0xfefb, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
+ 0x009e: 0xfefc, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
+ 0x009f: None, # UNDEFINED
+ 0x00a1: 0x00ad, # SOFT HYPHEN
+ 0x00a2: 0xfe82, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
+ 0x00a5: 0xfe84, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
+ 0x00a6: None, # UNDEFINED
+ 0x00a7: None, # UNDEFINED
+ 0x00a8: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM
+ 0x00a9: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM
+ 0x00aa: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM
+ 0x00ab: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM
+ 0x00ac: 0x060c, # ARABIC COMMA
+ 0x00ad: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM
+ 0x00ae: 0xfea1, # ARABIC LETTER HAH ISOLATED FORM
+ 0x00af: 0xfea5, # ARABIC LETTER KHAH ISOLATED FORM
+ 0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO
+ 0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE
+ 0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO
+ 0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE
+ 0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR
+ 0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE
+ 0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX
+ 0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN
+ 0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT
+ 0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE
+ 0x00ba: 0xfed1, # ARABIC LETTER FEH ISOLATED FORM
+ 0x00bb: 0x061b, # ARABIC SEMICOLON
+ 0x00bc: 0xfeb1, # ARABIC LETTER SEEN ISOLATED FORM
+ 0x00bd: 0xfeb5, # ARABIC LETTER SHEEN ISOLATED FORM
+ 0x00be: 0xfeb9, # ARABIC LETTER SAD ISOLATED FORM
+ 0x00bf: 0x061f, # ARABIC QUESTION MARK
+ 0x00c0: 0x00a2, # CENT SIGN
+ 0x00c1: 0xfe80, # ARABIC LETTER HAMZA ISOLATED FORM
+ 0x00c2: 0xfe81, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
+ 0x00c3: 0xfe83, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
+ 0x00c4: 0xfe85, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
+ 0x00c5: 0xfeca, # ARABIC LETTER AIN FINAL FORM
+ 0x00c6: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
+ 0x00c7: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM
+ 0x00c8: 0xfe91, # ARABIC LETTER BEH INITIAL FORM
+ 0x00c9: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
+ 0x00ca: 0xfe97, # ARABIC LETTER TEH INITIAL FORM
+ 0x00cb: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM
+ 0x00cc: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM
+ 0x00cd: 0xfea3, # ARABIC LETTER HAH INITIAL FORM
+ 0x00ce: 0xfea7, # ARABIC LETTER KHAH INITIAL FORM
+ 0x00cf: 0xfea9, # ARABIC LETTER DAL ISOLATED FORM
+ 0x00d0: 0xfeab, # ARABIC LETTER THAL ISOLATED FORM
+ 0x00d1: 0xfead, # ARABIC LETTER REH ISOLATED FORM
+ 0x00d2: 0xfeaf, # ARABIC LETTER ZAIN ISOLATED FORM
+ 0x00d3: 0xfeb3, # ARABIC LETTER SEEN INITIAL FORM
+ 0x00d4: 0xfeb7, # ARABIC LETTER SHEEN INITIAL FORM
+ 0x00d5: 0xfebb, # ARABIC LETTER SAD INITIAL FORM
+ 0x00d6: 0xfebf, # ARABIC LETTER DAD INITIAL FORM
+ 0x00d7: 0xfec1, # ARABIC LETTER TAH ISOLATED FORM
+ 0x00d8: 0xfec5, # ARABIC LETTER ZAH ISOLATED FORM
+ 0x00d9: 0xfecb, # ARABIC LETTER AIN INITIAL FORM
+ 0x00da: 0xfecf, # ARABIC LETTER GHAIN INITIAL FORM
+ 0x00db: 0x00a6, # BROKEN VERTICAL BAR
+ 0x00dc: 0x00ac, # NOT SIGN
+ 0x00dd: 0x00f7, # DIVISION SIGN
+ 0x00de: 0x00d7, # MULTIPLICATION SIGN
+ 0x00df: 0xfec9, # ARABIC LETTER AIN ISOLATED FORM
+ 0x00e0: 0x0640, # ARABIC TATWEEL
+ 0x00e1: 0xfed3, # ARABIC LETTER FEH INITIAL FORM
+ 0x00e2: 0xfed7, # ARABIC LETTER QAF INITIAL FORM
+ 0x00e3: 0xfedb, # ARABIC LETTER KAF INITIAL FORM
+ 0x00e4: 0xfedf, # ARABIC LETTER LAM INITIAL FORM
+ 0x00e5: 0xfee3, # ARABIC LETTER MEEM INITIAL FORM
+ 0x00e6: 0xfee7, # ARABIC LETTER NOON INITIAL FORM
+ 0x00e7: 0xfeeb, # ARABIC LETTER HEH INITIAL FORM
+ 0x00e8: 0xfeed, # ARABIC LETTER WAW ISOLATED FORM
+ 0x00e9: 0xfeef, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
+ 0x00ea: 0xfef3, # ARABIC LETTER YEH INITIAL FORM
+ 0x00eb: 0xfebd, # ARABIC LETTER DAD ISOLATED FORM
+ 0x00ec: 0xfecc, # ARABIC LETTER AIN MEDIAL FORM
+ 0x00ed: 0xfece, # ARABIC LETTER GHAIN FINAL FORM
+ 0x00ee: 0xfecd, # ARABIC LETTER GHAIN ISOLATED FORM
+ 0x00ef: 0xfee1, # ARABIC LETTER MEEM ISOLATED FORM
+ 0x00f0: 0xfe7d, # ARABIC SHADDA MEDIAL FORM
+ 0x00f1: 0x0651, # ARABIC SHADDAH
+ 0x00f2: 0xfee5, # ARABIC LETTER NOON ISOLATED FORM
+ 0x00f3: 0xfee9, # ARABIC LETTER HEH ISOLATED FORM
+ 0x00f4: 0xfeec, # ARABIC LETTER HEH MEDIAL FORM
+ 0x00f5: 0xfef0, # ARABIC LETTER ALEF MAKSURA FINAL FORM
+ 0x00f6: 0xfef2, # ARABIC LETTER YEH FINAL FORM
+ 0x00f7: 0xfed0, # ARABIC LETTER GHAIN MEDIAL FORM
+ 0x00f8: 0xfed5, # ARABIC LETTER QAF ISOLATED FORM
+ 0x00f9: 0xfef5, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
+ 0x00fa: 0xfef6, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
+ 0x00fb: 0xfedd, # ARABIC LETTER LAM ISOLATED FORM
+ 0x00fc: 0xfed9, # ARABIC LETTER KAF ISOLATED FORM
+ 0x00fd: 0xfef1, # ARABIC LETTER YEH ISOLATED FORM
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: None, # UNDEFINED
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '\u066a' # 0x0025 -> ARABIC PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\xb0' # 0x0080 -> DEGREE SIGN
+ '\xb7' # 0x0081 -> MIDDLE DOT
+ '\u2219' # 0x0082 -> BULLET OPERATOR
+ '\u221a' # 0x0083 -> SQUARE ROOT
+ '\u2592' # 0x0084 -> MEDIUM SHADE
+ '\u2500' # 0x0085 -> FORMS LIGHT HORIZONTAL
+ '\u2502' # 0x0086 -> FORMS LIGHT VERTICAL
+ '\u253c' # 0x0087 -> FORMS LIGHT VERTICAL AND HORIZONTAL
+ '\u2524' # 0x0088 -> FORMS LIGHT VERTICAL AND LEFT
+ '\u252c' # 0x0089 -> FORMS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x008a -> FORMS LIGHT VERTICAL AND RIGHT
+ '\u2534' # 0x008b -> FORMS LIGHT UP AND HORIZONTAL
+ '\u2510' # 0x008c -> FORMS LIGHT DOWN AND LEFT
+ '\u250c' # 0x008d -> FORMS LIGHT DOWN AND RIGHT
+ '\u2514' # 0x008e -> FORMS LIGHT UP AND RIGHT
+ '\u2518' # 0x008f -> FORMS LIGHT UP AND LEFT
+ '\u03b2' # 0x0090 -> GREEK SMALL BETA
+ '\u221e' # 0x0091 -> INFINITY
+ '\u03c6' # 0x0092 -> GREEK SMALL PHI
+ '\xb1' # 0x0093 -> PLUS-OR-MINUS SIGN
+ '\xbd' # 0x0094 -> FRACTION 1/2
+ '\xbc' # 0x0095 -> FRACTION 1/4
+ '\u2248' # 0x0096 -> ALMOST EQUAL TO
+ '\xab' # 0x0097 -> LEFT POINTING GUILLEMET
+ '\xbb' # 0x0098 -> RIGHT POINTING GUILLEMET
+ '\ufef7' # 0x0099 -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
+ '\ufef8' # 0x009a -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
+ '\ufffe' # 0x009b -> UNDEFINED
+ '\ufffe' # 0x009c -> UNDEFINED
+ '\ufefb' # 0x009d -> ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
+ '\ufefc' # 0x009e -> ARABIC LIGATURE LAM WITH ALEF FINAL FORM
+ '\ufffe' # 0x009f -> UNDEFINED
+ '\xa0' # 0x00a0 -> NON-BREAKING SPACE
+ '\xad' # 0x00a1 -> SOFT HYPHEN
+ '\ufe82' # 0x00a2 -> ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
+ '\xa3' # 0x00a3 -> POUND SIGN
+ '\xa4' # 0x00a4 -> CURRENCY SIGN
+ '\ufe84' # 0x00a5 -> ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
+ '\ufffe' # 0x00a6 -> UNDEFINED
+ '\ufffe' # 0x00a7 -> UNDEFINED
+ '\ufe8e' # 0x00a8 -> ARABIC LETTER ALEF FINAL FORM
+ '\ufe8f' # 0x00a9 -> ARABIC LETTER BEH ISOLATED FORM
+ '\ufe95' # 0x00aa -> ARABIC LETTER TEH ISOLATED FORM
+ '\ufe99' # 0x00ab -> ARABIC LETTER THEH ISOLATED FORM
+ '\u060c' # 0x00ac -> ARABIC COMMA
+ '\ufe9d' # 0x00ad -> ARABIC LETTER JEEM ISOLATED FORM
+ '\ufea1' # 0x00ae -> ARABIC LETTER HAH ISOLATED FORM
+ '\ufea5' # 0x00af -> ARABIC LETTER KHAH ISOLATED FORM
+ '\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO
+ '\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE
+ '\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO
+ '\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE
+ '\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR
+ '\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE
+ '\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX
+ '\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN
+ '\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT
+ '\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE
+ '\ufed1' # 0x00ba -> ARABIC LETTER FEH ISOLATED FORM
+ '\u061b' # 0x00bb -> ARABIC SEMICOLON
+ '\ufeb1' # 0x00bc -> ARABIC LETTER SEEN ISOLATED FORM
+ '\ufeb5' # 0x00bd -> ARABIC LETTER SHEEN ISOLATED FORM
+ '\ufeb9' # 0x00be -> ARABIC LETTER SAD ISOLATED FORM
+ '\u061f' # 0x00bf -> ARABIC QUESTION MARK
+ '\xa2' # 0x00c0 -> CENT SIGN
+ '\ufe80' # 0x00c1 -> ARABIC LETTER HAMZA ISOLATED FORM
+ '\ufe81' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
+ '\ufe83' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
+ '\ufe85' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
+ '\ufeca' # 0x00c5 -> ARABIC LETTER AIN FINAL FORM
+ '\ufe8b' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
+ '\ufe8d' # 0x00c7 -> ARABIC LETTER ALEF ISOLATED FORM
+ '\ufe91' # 0x00c8 -> ARABIC LETTER BEH INITIAL FORM
+ '\ufe93' # 0x00c9 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
+ '\ufe97' # 0x00ca -> ARABIC LETTER TEH INITIAL FORM
+ '\ufe9b' # 0x00cb -> ARABIC LETTER THEH INITIAL FORM
+ '\ufe9f' # 0x00cc -> ARABIC LETTER JEEM INITIAL FORM
+ '\ufea3' # 0x00cd -> ARABIC LETTER HAH INITIAL FORM
+ '\ufea7' # 0x00ce -> ARABIC LETTER KHAH INITIAL FORM
+ '\ufea9' # 0x00cf -> ARABIC LETTER DAL ISOLATED FORM
+ '\ufeab' # 0x00d0 -> ARABIC LETTER THAL ISOLATED FORM
+ '\ufead' # 0x00d1 -> ARABIC LETTER REH ISOLATED FORM
+ '\ufeaf' # 0x00d2 -> ARABIC LETTER ZAIN ISOLATED FORM
+ '\ufeb3' # 0x00d3 -> ARABIC LETTER SEEN INITIAL FORM
+ '\ufeb7' # 0x00d4 -> ARABIC LETTER SHEEN INITIAL FORM
+ '\ufebb' # 0x00d5 -> ARABIC LETTER SAD INITIAL FORM
+ '\ufebf' # 0x00d6 -> ARABIC LETTER DAD INITIAL FORM
+ '\ufec1' # 0x00d7 -> ARABIC LETTER TAH ISOLATED FORM
+ '\ufec5' # 0x00d8 -> ARABIC LETTER ZAH ISOLATED FORM
+ '\ufecb' # 0x00d9 -> ARABIC LETTER AIN INITIAL FORM
+ '\ufecf' # 0x00da -> ARABIC LETTER GHAIN INITIAL FORM
+ '\xa6' # 0x00db -> BROKEN VERTICAL BAR
+ '\xac' # 0x00dc -> NOT SIGN
+ '\xf7' # 0x00dd -> DIVISION SIGN
+ '\xd7' # 0x00de -> MULTIPLICATION SIGN
+ '\ufec9' # 0x00df -> ARABIC LETTER AIN ISOLATED FORM
+ '\u0640' # 0x00e0 -> ARABIC TATWEEL
+ '\ufed3' # 0x00e1 -> ARABIC LETTER FEH INITIAL FORM
+ '\ufed7' # 0x00e2 -> ARABIC LETTER QAF INITIAL FORM
+ '\ufedb' # 0x00e3 -> ARABIC LETTER KAF INITIAL FORM
+ '\ufedf' # 0x00e4 -> ARABIC LETTER LAM INITIAL FORM
+ '\ufee3' # 0x00e5 -> ARABIC LETTER MEEM INITIAL FORM
+ '\ufee7' # 0x00e6 -> ARABIC LETTER NOON INITIAL FORM
+ '\ufeeb' # 0x00e7 -> ARABIC LETTER HEH INITIAL FORM
+ '\ufeed' # 0x00e8 -> ARABIC LETTER WAW ISOLATED FORM
+ '\ufeef' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA ISOLATED FORM
+ '\ufef3' # 0x00ea -> ARABIC LETTER YEH INITIAL FORM
+ '\ufebd' # 0x00eb -> ARABIC LETTER DAD ISOLATED FORM
+ '\ufecc' # 0x00ec -> ARABIC LETTER AIN MEDIAL FORM
+ '\ufece' # 0x00ed -> ARABIC LETTER GHAIN FINAL FORM
+ '\ufecd' # 0x00ee -> ARABIC LETTER GHAIN ISOLATED FORM
+ '\ufee1' # 0x00ef -> ARABIC LETTER MEEM ISOLATED FORM
+ '\ufe7d' # 0x00f0 -> ARABIC SHADDA MEDIAL FORM
+ '\u0651' # 0x00f1 -> ARABIC SHADDAH
+ '\ufee5' # 0x00f2 -> ARABIC LETTER NOON ISOLATED FORM
+ '\ufee9' # 0x00f3 -> ARABIC LETTER HEH ISOLATED FORM
+ '\ufeec' # 0x00f4 -> ARABIC LETTER HEH MEDIAL FORM
+ '\ufef0' # 0x00f5 -> ARABIC LETTER ALEF MAKSURA FINAL FORM
+ '\ufef2' # 0x00f6 -> ARABIC LETTER YEH FINAL FORM
+ '\ufed0' # 0x00f7 -> ARABIC LETTER GHAIN MEDIAL FORM
+ '\ufed5' # 0x00f8 -> ARABIC LETTER QAF ISOLATED FORM
+ '\ufef5' # 0x00f9 -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
+ '\ufef6' # 0x00fa -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
+ '\ufedd' # 0x00fb -> ARABIC LETTER LAM ISOLATED FORM
+ '\ufed9' # 0x00fc -> ARABIC LETTER KAF ISOLATED FORM
+ '\ufef1' # 0x00fd -> ARABIC LETTER YEH ISOLATED FORM
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\ufffe' # 0x00ff -> UNDEFINED
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00a0, # NON-BREAKING SPACE
+ 0x00a2: 0x00c0, # CENT SIGN
+ 0x00a3: 0x00a3, # POUND SIGN
+ 0x00a4: 0x00a4, # CURRENCY SIGN
+ 0x00a6: 0x00db, # BROKEN VERTICAL BAR
+ 0x00ab: 0x0097, # LEFT POINTING GUILLEMET
+ 0x00ac: 0x00dc, # NOT SIGN
+ 0x00ad: 0x00a1, # SOFT HYPHEN
+ 0x00b0: 0x0080, # DEGREE SIGN
+ 0x00b1: 0x0093, # PLUS-OR-MINUS SIGN
+ 0x00b7: 0x0081, # MIDDLE DOT
+ 0x00bb: 0x0098, # RIGHT POINTING GUILLEMET
+ 0x00bc: 0x0095, # FRACTION 1/4
+ 0x00bd: 0x0094, # FRACTION 1/2
+ 0x00d7: 0x00de, # MULTIPLICATION SIGN
+ 0x00f7: 0x00dd, # DIVISION SIGN
+ 0x03b2: 0x0090, # GREEK SMALL BETA
+ 0x03c6: 0x0092, # GREEK SMALL PHI
+ 0x060c: 0x00ac, # ARABIC COMMA
+ 0x061b: 0x00bb, # ARABIC SEMICOLON
+ 0x061f: 0x00bf, # ARABIC QUESTION MARK
+ 0x0640: 0x00e0, # ARABIC TATWEEL
+ 0x0651: 0x00f1, # ARABIC SHADDAH
+ 0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO
+ 0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE
+ 0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO
+ 0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE
+ 0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR
+ 0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE
+ 0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX
+ 0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN
+ 0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT
+ 0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE
+ 0x066a: 0x0025, # ARABIC PERCENT SIGN
+ 0x2219: 0x0082, # BULLET OPERATOR
+ 0x221a: 0x0083, # SQUARE ROOT
+ 0x221e: 0x0091, # INFINITY
+ 0x2248: 0x0096, # ALMOST EQUAL TO
+ 0x2500: 0x0085, # FORMS LIGHT HORIZONTAL
+ 0x2502: 0x0086, # FORMS LIGHT VERTICAL
+ 0x250c: 0x008d, # FORMS LIGHT DOWN AND RIGHT
+ 0x2510: 0x008c, # FORMS LIGHT DOWN AND LEFT
+ 0x2514: 0x008e, # FORMS LIGHT UP AND RIGHT
+ 0x2518: 0x008f, # FORMS LIGHT UP AND LEFT
+ 0x251c: 0x008a, # FORMS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x0088, # FORMS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x0089, # FORMS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x008b, # FORMS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x0087, # FORMS LIGHT VERTICAL AND HORIZONTAL
+ 0x2592: 0x0084, # MEDIUM SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+ 0xfe7d: 0x00f0, # ARABIC SHADDA MEDIAL FORM
+ 0xfe80: 0x00c1, # ARABIC LETTER HAMZA ISOLATED FORM
+ 0xfe81: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
+ 0xfe82: 0x00a2, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
+ 0xfe83: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
+ 0xfe84: 0x00a5, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
+ 0xfe85: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
+ 0xfe8b: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
+ 0xfe8d: 0x00c7, # ARABIC LETTER ALEF ISOLATED FORM
+ 0xfe8e: 0x00a8, # ARABIC LETTER ALEF FINAL FORM
+ 0xfe8f: 0x00a9, # ARABIC LETTER BEH ISOLATED FORM
+ 0xfe91: 0x00c8, # ARABIC LETTER BEH INITIAL FORM
+ 0xfe93: 0x00c9, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
+ 0xfe95: 0x00aa, # ARABIC LETTER TEH ISOLATED FORM
+ 0xfe97: 0x00ca, # ARABIC LETTER TEH INITIAL FORM
+ 0xfe99: 0x00ab, # ARABIC LETTER THEH ISOLATED FORM
+ 0xfe9b: 0x00cb, # ARABIC LETTER THEH INITIAL FORM
+ 0xfe9d: 0x00ad, # ARABIC LETTER JEEM ISOLATED FORM
+ 0xfe9f: 0x00cc, # ARABIC LETTER JEEM INITIAL FORM
+ 0xfea1: 0x00ae, # ARABIC LETTER HAH ISOLATED FORM
+ 0xfea3: 0x00cd, # ARABIC LETTER HAH INITIAL FORM
+ 0xfea5: 0x00af, # ARABIC LETTER KHAH ISOLATED FORM
+ 0xfea7: 0x00ce, # ARABIC LETTER KHAH INITIAL FORM
+ 0xfea9: 0x00cf, # ARABIC LETTER DAL ISOLATED FORM
+ 0xfeab: 0x00d0, # ARABIC LETTER THAL ISOLATED FORM
+ 0xfead: 0x00d1, # ARABIC LETTER REH ISOLATED FORM
+ 0xfeaf: 0x00d2, # ARABIC LETTER ZAIN ISOLATED FORM
+ 0xfeb1: 0x00bc, # ARABIC LETTER SEEN ISOLATED FORM
+ 0xfeb3: 0x00d3, # ARABIC LETTER SEEN INITIAL FORM
+ 0xfeb5: 0x00bd, # ARABIC LETTER SHEEN ISOLATED FORM
+ 0xfeb7: 0x00d4, # ARABIC LETTER SHEEN INITIAL FORM
+ 0xfeb9: 0x00be, # ARABIC LETTER SAD ISOLATED FORM
+ 0xfebb: 0x00d5, # ARABIC LETTER SAD INITIAL FORM
+ 0xfebd: 0x00eb, # ARABIC LETTER DAD ISOLATED FORM
+ 0xfebf: 0x00d6, # ARABIC LETTER DAD INITIAL FORM
+ 0xfec1: 0x00d7, # ARABIC LETTER TAH ISOLATED FORM
+ 0xfec5: 0x00d8, # ARABIC LETTER ZAH ISOLATED FORM
+ 0xfec9: 0x00df, # ARABIC LETTER AIN ISOLATED FORM
+ 0xfeca: 0x00c5, # ARABIC LETTER AIN FINAL FORM
+ 0xfecb: 0x00d9, # ARABIC LETTER AIN INITIAL FORM
+ 0xfecc: 0x00ec, # ARABIC LETTER AIN MEDIAL FORM
+ 0xfecd: 0x00ee, # ARABIC LETTER GHAIN ISOLATED FORM
+ 0xfece: 0x00ed, # ARABIC LETTER GHAIN FINAL FORM
+ 0xfecf: 0x00da, # ARABIC LETTER GHAIN INITIAL FORM
+ 0xfed0: 0x00f7, # ARABIC LETTER GHAIN MEDIAL FORM
+ 0xfed1: 0x00ba, # ARABIC LETTER FEH ISOLATED FORM
+ 0xfed3: 0x00e1, # ARABIC LETTER FEH INITIAL FORM
+ 0xfed5: 0x00f8, # ARABIC LETTER QAF ISOLATED FORM
+ 0xfed7: 0x00e2, # ARABIC LETTER QAF INITIAL FORM
+ 0xfed9: 0x00fc, # ARABIC LETTER KAF ISOLATED FORM
+ 0xfedb: 0x00e3, # ARABIC LETTER KAF INITIAL FORM
+ 0xfedd: 0x00fb, # ARABIC LETTER LAM ISOLATED FORM
+ 0xfedf: 0x00e4, # ARABIC LETTER LAM INITIAL FORM
+ 0xfee1: 0x00ef, # ARABIC LETTER MEEM ISOLATED FORM
+ 0xfee3: 0x00e5, # ARABIC LETTER MEEM INITIAL FORM
+ 0xfee5: 0x00f2, # ARABIC LETTER NOON ISOLATED FORM
+ 0xfee7: 0x00e6, # ARABIC LETTER NOON INITIAL FORM
+ 0xfee9: 0x00f3, # ARABIC LETTER HEH ISOLATED FORM
+ 0xfeeb: 0x00e7, # ARABIC LETTER HEH INITIAL FORM
+ 0xfeec: 0x00f4, # ARABIC LETTER HEH MEDIAL FORM
+ 0xfeed: 0x00e8, # ARABIC LETTER WAW ISOLATED FORM
+ 0xfeef: 0x00e9, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
+ 0xfef0: 0x00f5, # ARABIC LETTER ALEF MAKSURA FINAL FORM
+ 0xfef1: 0x00fd, # ARABIC LETTER YEH ISOLATED FORM
+ 0xfef2: 0x00f6, # ARABIC LETTER YEH FINAL FORM
+ 0xfef3: 0x00ea, # ARABIC LETTER YEH INITIAL FORM
+ 0xfef5: 0x00f9, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
+ 0xfef6: 0x00fa, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
+ 0xfef7: 0x0099, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
+ 0xfef8: 0x009a, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
+ 0xfefb: 0x009d, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
+ 0xfefc: 0x009e, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp865.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp865.py
new file mode 100644
index 00000000..6726cf3f
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp865.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP865.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp865',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
+ 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
+ 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
+ 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
+ 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
+ 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
+ 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
+ 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
+ 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
+ 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x009e: 0x20a7, # PESETA SIGN
+ 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
+ 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
+ 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
+ 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
+ 0x00a8: 0x00bf, # INVERTED QUESTION MARK
+ 0x00a9: 0x2310, # REVERSED NOT SIGN
+ 0x00aa: 0x00ac, # NOT SIGN
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
+ 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00a4, # CURRENCY SIGN
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x258c, # LEFT HALF BLOCK
+ 0x00de: 0x2590, # RIGHT HALF BLOCK
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
+ 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
+ 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
+ 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
+ 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
+ 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
+ 0x00e6: 0x00b5, # MICRO SIGN
+ 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
+ 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
+ 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
+ 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
+ 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
+ 0x00ec: 0x221e, # INFINITY
+ 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
+ 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
+ 0x00ef: 0x2229, # INTERSECTION
+ 0x00f0: 0x2261, # IDENTICAL TO
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
+ 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
+ 0x00f4: 0x2320, # TOP HALF INTEGRAL
+ 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
+ 0x00f6: 0x00f7, # DIVISION SIGN
+ 0x00f7: 0x2248, # ALMOST EQUAL TO
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x2219, # BULLET OPERATOR
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x221a, # SQUARE ROOT
+ 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x00fd: 0x00b2, # SUPERSCRIPT TWO
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
+ '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
+ '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
+ '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
+ '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
+ '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
+ '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
+ '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
+ '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
+ '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
+ '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
+ '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
+ '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
+ '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
+ '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
+ '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
+ '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
+ '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
+ '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
+ '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
+ '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
+ '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
+ '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
+ '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
+ '\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
+ '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
+ '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
+ '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
+ '\xa3' # 0x009c -> POUND SIGN
+ '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
+ '\u20a7' # 0x009e -> PESETA SIGN
+ '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
+ '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
+ '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
+ '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
+ '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
+ '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
+ '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
+ '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
+ '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
+ '\xbf' # 0x00a8 -> INVERTED QUESTION MARK
+ '\u2310' # 0x00a9 -> REVERSED NOT SIGN
+ '\xac' # 0x00aa -> NOT SIGN
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
+ '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xa4' # 0x00af -> CURRENCY SIGN
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u258c' # 0x00dd -> LEFT HALF BLOCK
+ '\u2590' # 0x00de -> RIGHT HALF BLOCK
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
+ '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
+ '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
+ '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
+ '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
+ '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
+ '\xb5' # 0x00e6 -> MICRO SIGN
+ '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
+ '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
+ '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
+ '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
+ '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
+ '\u221e' # 0x00ec -> INFINITY
+ '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
+ '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
+ '\u2229' # 0x00ef -> INTERSECTION
+ '\u2261' # 0x00f0 -> IDENTICAL TO
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
+ '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
+ '\u2320' # 0x00f4 -> TOP HALF INTEGRAL
+ '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
+ '\xf7' # 0x00f6 -> DIVISION SIGN
+ '\u2248' # 0x00f7 -> ALMOST EQUAL TO
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\u2219' # 0x00f9 -> BULLET OPERATOR
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\u221a' # 0x00fb -> SQUARE ROOT
+ '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
+ '\xb2' # 0x00fd -> SUPERSCRIPT TWO
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00a4: 0x00af, # CURRENCY SIGN
+ 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x00aa, # NOT SIGN
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x00fd, # SUPERSCRIPT TWO
+ 0x00b5: 0x00e6, # MICRO SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
+ 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x00bf: 0x00a8, # INVERTED QUESTION MARK
+ 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
+ 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
+ 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
+ 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
+ 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
+ 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
+ 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
+ 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
+ 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
+ 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
+ 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
+ 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
+ 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
+ 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
+ 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
+ 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
+ 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
+ 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
+ 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
+ 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
+ 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
+ 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
+ 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
+ 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
+ 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
+ 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
+ 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
+ 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
+ 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
+ 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
+ 0x00f7: 0x00f6, # DIVISION SIGN
+ 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
+ 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
+ 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
+ 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
+ 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
+ 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
+ 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
+ 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
+ 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
+ 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
+ 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
+ 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
+ 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
+ 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
+ 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
+ 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
+ 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
+ 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
+ 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
+ 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
+ 0x20a7: 0x009e, # PESETA SIGN
+ 0x2219: 0x00f9, # BULLET OPERATOR
+ 0x221a: 0x00fb, # SQUARE ROOT
+ 0x221e: 0x00ec, # INFINITY
+ 0x2229: 0x00ef, # INTERSECTION
+ 0x2248: 0x00f7, # ALMOST EQUAL TO
+ 0x2261: 0x00f0, # IDENTICAL TO
+ 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
+ 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
+ 0x2310: 0x00a9, # REVERSED NOT SIGN
+ 0x2320: 0x00f4, # TOP HALF INTEGRAL
+ 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x258c: 0x00dd, # LEFT HALF BLOCK
+ 0x2590: 0x00de, # RIGHT HALF BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp866.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp866.py
new file mode 100644
index 00000000..bec7ae39
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp866.py
@@ -0,0 +1,698 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp866',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
+ 0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
+ 0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
+ 0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
+ 0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
+ 0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
+ 0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
+ 0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
+ 0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
+ 0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
+ 0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
+ 0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
+ 0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
+ 0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
+ 0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
+ 0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
+ 0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
+ 0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
+ 0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
+ 0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
+ 0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
+ 0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
+ 0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
+ 0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
+ 0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
+ 0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
+ 0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
+ 0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
+ 0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
+ 0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
+ 0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
+ 0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
+ 0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
+ 0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
+ 0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
+ 0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
+ 0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
+ 0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
+ 0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
+ 0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
+ 0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
+ 0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
+ 0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
+ 0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
+ 0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
+ 0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
+ 0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
+ 0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x258c, # LEFT HALF BLOCK
+ 0x00de: 0x2590, # RIGHT HALF BLOCK
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
+ 0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
+ 0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
+ 0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
+ 0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
+ 0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
+ 0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
+ 0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
+ 0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
+ 0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
+ 0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
+ 0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
+ 0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
+ 0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
+ 0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
+ 0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
+ 0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
+ 0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
+ 0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
+ 0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
+ 0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
+ 0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
+ 0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
+ 0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x2219, # BULLET OPERATOR
+ 0x00fa: 0x00b7, # MIDDLE DOT
+ 0x00fb: 0x221a, # SQUARE ROOT
+ 0x00fc: 0x2116, # NUMERO SIGN
+ 0x00fd: 0x00a4, # CURRENCY SIGN
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
+ '\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
+ '\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
+ '\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
+ '\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
+ '\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
+ '\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
+ '\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
+ '\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
+ '\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
+ '\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
+ '\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
+ '\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
+ '\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
+ '\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
+ '\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
+ '\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
+ '\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
+ '\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
+ '\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
+ '\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
+ '\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
+ '\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
+ '\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
+ '\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
+ '\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
+ '\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
+ '\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
+ '\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
+ '\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
+ '\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
+ '\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
+ '\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
+ '\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
+ '\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
+ '\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
+ '\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
+ '\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
+ '\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
+ '\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
+ '\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
+ '\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
+ '\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
+ '\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
+ '\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
+ '\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
+ '\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
+ '\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u258c' # 0x00dd -> LEFT HALF BLOCK
+ '\u2590' # 0x00de -> RIGHT HALF BLOCK
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
+ '\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
+ '\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
+ '\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
+ '\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
+ '\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
+ '\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
+ '\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
+ '\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
+ '\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
+ '\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
+ '\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
+ '\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
+ '\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
+ '\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
+ '\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
+ '\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
+ '\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
+ '\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
+ '\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE
+ '\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI
+ '\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI
+ '\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U
+ '\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\u2219' # 0x00f9 -> BULLET OPERATOR
+ '\xb7' # 0x00fa -> MIDDLE DOT
+ '\u221a' # 0x00fb -> SQUARE ROOT
+ '\u2116' # 0x00fc -> NUMERO SIGN
+ '\xa4' # 0x00fd -> CURRENCY SIGN
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a4: 0x00fd, # CURRENCY SIGN
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b7: 0x00fa, # MIDDLE DOT
+ 0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
+ 0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
+ 0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI
+ 0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U
+ 0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
+ 0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
+ 0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
+ 0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
+ 0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
+ 0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
+ 0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
+ 0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
+ 0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
+ 0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
+ 0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
+ 0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
+ 0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
+ 0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
+ 0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
+ 0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
+ 0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
+ 0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
+ 0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
+ 0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
+ 0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
+ 0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
+ 0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
+ 0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
+ 0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
+ 0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
+ 0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
+ 0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
+ 0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
+ 0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
+ 0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
+ 0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
+ 0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
+ 0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
+ 0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
+ 0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
+ 0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
+ 0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
+ 0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
+ 0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
+ 0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
+ 0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
+ 0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
+ 0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
+ 0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
+ 0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
+ 0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
+ 0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
+ 0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
+ 0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
+ 0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
+ 0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
+ 0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
+ 0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
+ 0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
+ 0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
+ 0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
+ 0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
+ 0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
+ 0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
+ 0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
+ 0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
+ 0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
+ 0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
+ 0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
+ 0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE
+ 0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI
+ 0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U
+ 0x2116: 0x00fc, # NUMERO SIGN
+ 0x2219: 0x00f9, # BULLET OPERATOR
+ 0x221a: 0x00fb, # SQUARE ROOT
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+ 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+ 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+ 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+ 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+ 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+ 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+ 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+ 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+ 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x258c: 0x00dd, # LEFT HALF BLOCK
+ 0x2590: 0x00de, # RIGHT HALF BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp869.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp869.py
new file mode 100644
index 00000000..8d8a29b1
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp869.py
@@ -0,0 +1,689 @@
+""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP869.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_map)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_map)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp869',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+### Decoding Map
+
+decoding_map = codecs.make_identity_dict(range(256))
+decoding_map.update({
+ 0x0080: None, # UNDEFINED
+ 0x0081: None, # UNDEFINED
+ 0x0082: None, # UNDEFINED
+ 0x0083: None, # UNDEFINED
+ 0x0084: None, # UNDEFINED
+ 0x0085: None, # UNDEFINED
+ 0x0086: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
+ 0x0087: None, # UNDEFINED
+ 0x0088: 0x00b7, # MIDDLE DOT
+ 0x0089: 0x00ac, # NOT SIGN
+ 0x008a: 0x00a6, # BROKEN BAR
+ 0x008b: 0x2018, # LEFT SINGLE QUOTATION MARK
+ 0x008c: 0x2019, # RIGHT SINGLE QUOTATION MARK
+ 0x008d: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
+ 0x008e: 0x2015, # HORIZONTAL BAR
+ 0x008f: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
+ 0x0090: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
+ 0x0091: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+ 0x0092: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
+ 0x0093: None, # UNDEFINED
+ 0x0094: None, # UNDEFINED
+ 0x0095: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
+ 0x0096: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+ 0x0097: 0x00a9, # COPYRIGHT SIGN
+ 0x0098: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
+ 0x0099: 0x00b2, # SUPERSCRIPT TWO
+ 0x009a: 0x00b3, # SUPERSCRIPT THREE
+ 0x009b: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
+ 0x009c: 0x00a3, # POUND SIGN
+ 0x009d: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
+ 0x009e: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
+ 0x009f: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
+ 0x00a0: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
+ 0x00a1: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+ 0x00a2: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
+ 0x00a3: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
+ 0x00a4: 0x0391, # GREEK CAPITAL LETTER ALPHA
+ 0x00a5: 0x0392, # GREEK CAPITAL LETTER BETA
+ 0x00a6: 0x0393, # GREEK CAPITAL LETTER GAMMA
+ 0x00a7: 0x0394, # GREEK CAPITAL LETTER DELTA
+ 0x00a8: 0x0395, # GREEK CAPITAL LETTER EPSILON
+ 0x00a9: 0x0396, # GREEK CAPITAL LETTER ZETA
+ 0x00aa: 0x0397, # GREEK CAPITAL LETTER ETA
+ 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
+ 0x00ac: 0x0398, # GREEK CAPITAL LETTER THETA
+ 0x00ad: 0x0399, # GREEK CAPITAL LETTER IOTA
+ 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00b0: 0x2591, # LIGHT SHADE
+ 0x00b1: 0x2592, # MEDIUM SHADE
+ 0x00b2: 0x2593, # DARK SHADE
+ 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
+ 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x00b5: 0x039a, # GREEK CAPITAL LETTER KAPPA
+ 0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMDA
+ 0x00b7: 0x039c, # GREEK CAPITAL LETTER MU
+ 0x00b8: 0x039d, # GREEK CAPITAL LETTER NU
+ 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x00bd: 0x039e, # GREEK CAPITAL LETTER XI
+ 0x00be: 0x039f, # GREEK CAPITAL LETTER OMICRON
+ 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x00c6: 0x03a0, # GREEK CAPITAL LETTER PI
+ 0x00c7: 0x03a1, # GREEK CAPITAL LETTER RHO
+ 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x00cf: 0x03a3, # GREEK CAPITAL LETTER SIGMA
+ 0x00d0: 0x03a4, # GREEK CAPITAL LETTER TAU
+ 0x00d1: 0x03a5, # GREEK CAPITAL LETTER UPSILON
+ 0x00d2: 0x03a6, # GREEK CAPITAL LETTER PHI
+ 0x00d3: 0x03a7, # GREEK CAPITAL LETTER CHI
+ 0x00d4: 0x03a8, # GREEK CAPITAL LETTER PSI
+ 0x00d5: 0x03a9, # GREEK CAPITAL LETTER OMEGA
+ 0x00d6: 0x03b1, # GREEK SMALL LETTER ALPHA
+ 0x00d7: 0x03b2, # GREEK SMALL LETTER BETA
+ 0x00d8: 0x03b3, # GREEK SMALL LETTER GAMMA
+ 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x00db: 0x2588, # FULL BLOCK
+ 0x00dc: 0x2584, # LOWER HALF BLOCK
+ 0x00dd: 0x03b4, # GREEK SMALL LETTER DELTA
+ 0x00de: 0x03b5, # GREEK SMALL LETTER EPSILON
+ 0x00df: 0x2580, # UPPER HALF BLOCK
+ 0x00e0: 0x03b6, # GREEK SMALL LETTER ZETA
+ 0x00e1: 0x03b7, # GREEK SMALL LETTER ETA
+ 0x00e2: 0x03b8, # GREEK SMALL LETTER THETA
+ 0x00e3: 0x03b9, # GREEK SMALL LETTER IOTA
+ 0x00e4: 0x03ba, # GREEK SMALL LETTER KAPPA
+ 0x00e5: 0x03bb, # GREEK SMALL LETTER LAMDA
+ 0x00e6: 0x03bc, # GREEK SMALL LETTER MU
+ 0x00e7: 0x03bd, # GREEK SMALL LETTER NU
+ 0x00e8: 0x03be, # GREEK SMALL LETTER XI
+ 0x00e9: 0x03bf, # GREEK SMALL LETTER OMICRON
+ 0x00ea: 0x03c0, # GREEK SMALL LETTER PI
+ 0x00eb: 0x03c1, # GREEK SMALL LETTER RHO
+ 0x00ec: 0x03c3, # GREEK SMALL LETTER SIGMA
+ 0x00ed: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
+ 0x00ee: 0x03c4, # GREEK SMALL LETTER TAU
+ 0x00ef: 0x0384, # GREEK TONOS
+ 0x00f0: 0x00ad, # SOFT HYPHEN
+ 0x00f1: 0x00b1, # PLUS-MINUS SIGN
+ 0x00f2: 0x03c5, # GREEK SMALL LETTER UPSILON
+ 0x00f3: 0x03c6, # GREEK SMALL LETTER PHI
+ 0x00f4: 0x03c7, # GREEK SMALL LETTER CHI
+ 0x00f5: 0x00a7, # SECTION SIGN
+ 0x00f6: 0x03c8, # GREEK SMALL LETTER PSI
+ 0x00f7: 0x0385, # GREEK DIALYTIKA TONOS
+ 0x00f8: 0x00b0, # DEGREE SIGN
+ 0x00f9: 0x00a8, # DIAERESIS
+ 0x00fa: 0x03c9, # GREEK SMALL LETTER OMEGA
+ 0x00fb: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+ 0x00fc: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+ 0x00fd: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
+ 0x00fe: 0x25a0, # BLACK SQUARE
+ 0x00ff: 0x00a0, # NO-BREAK SPACE
+})
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x0000 -> NULL
+ '\x01' # 0x0001 -> START OF HEADING
+ '\x02' # 0x0002 -> START OF TEXT
+ '\x03' # 0x0003 -> END OF TEXT
+ '\x04' # 0x0004 -> END OF TRANSMISSION
+ '\x05' # 0x0005 -> ENQUIRY
+ '\x06' # 0x0006 -> ACKNOWLEDGE
+ '\x07' # 0x0007 -> BELL
+ '\x08' # 0x0008 -> BACKSPACE
+ '\t' # 0x0009 -> HORIZONTAL TABULATION
+ '\n' # 0x000a -> LINE FEED
+ '\x0b' # 0x000b -> VERTICAL TABULATION
+ '\x0c' # 0x000c -> FORM FEED
+ '\r' # 0x000d -> CARRIAGE RETURN
+ '\x0e' # 0x000e -> SHIFT OUT
+ '\x0f' # 0x000f -> SHIFT IN
+ '\x10' # 0x0010 -> DATA LINK ESCAPE
+ '\x11' # 0x0011 -> DEVICE CONTROL ONE
+ '\x12' # 0x0012 -> DEVICE CONTROL TWO
+ '\x13' # 0x0013 -> DEVICE CONTROL THREE
+ '\x14' # 0x0014 -> DEVICE CONTROL FOUR
+ '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x0016 -> SYNCHRONOUS IDLE
+ '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x0018 -> CANCEL
+ '\x19' # 0x0019 -> END OF MEDIUM
+ '\x1a' # 0x001a -> SUBSTITUTE
+ '\x1b' # 0x001b -> ESCAPE
+ '\x1c' # 0x001c -> FILE SEPARATOR
+ '\x1d' # 0x001d -> GROUP SEPARATOR
+ '\x1e' # 0x001e -> RECORD SEPARATOR
+ '\x1f' # 0x001f -> UNIT SEPARATOR
+ ' ' # 0x0020 -> SPACE
+ '!' # 0x0021 -> EXCLAMATION MARK
+ '"' # 0x0022 -> QUOTATION MARK
+ '#' # 0x0023 -> NUMBER SIGN
+ '$' # 0x0024 -> DOLLAR SIGN
+ '%' # 0x0025 -> PERCENT SIGN
+ '&' # 0x0026 -> AMPERSAND
+ "'" # 0x0027 -> APOSTROPHE
+ '(' # 0x0028 -> LEFT PARENTHESIS
+ ')' # 0x0029 -> RIGHT PARENTHESIS
+ '*' # 0x002a -> ASTERISK
+ '+' # 0x002b -> PLUS SIGN
+ ',' # 0x002c -> COMMA
+ '-' # 0x002d -> HYPHEN-MINUS
+ '.' # 0x002e -> FULL STOP
+ '/' # 0x002f -> SOLIDUS
+ '0' # 0x0030 -> DIGIT ZERO
+ '1' # 0x0031 -> DIGIT ONE
+ '2' # 0x0032 -> DIGIT TWO
+ '3' # 0x0033 -> DIGIT THREE
+ '4' # 0x0034 -> DIGIT FOUR
+ '5' # 0x0035 -> DIGIT FIVE
+ '6' # 0x0036 -> DIGIT SIX
+ '7' # 0x0037 -> DIGIT SEVEN
+ '8' # 0x0038 -> DIGIT EIGHT
+ '9' # 0x0039 -> DIGIT NINE
+ ':' # 0x003a -> COLON
+ ';' # 0x003b -> SEMICOLON
+ '<' # 0x003c -> LESS-THAN SIGN
+ '=' # 0x003d -> EQUALS SIGN
+ '>' # 0x003e -> GREATER-THAN SIGN
+ '?' # 0x003f -> QUESTION MARK
+ '@' # 0x0040 -> COMMERCIAL AT
+ 'A' # 0x0041 -> LATIN CAPITAL LETTER A
+ 'B' # 0x0042 -> LATIN CAPITAL LETTER B
+ 'C' # 0x0043 -> LATIN CAPITAL LETTER C
+ 'D' # 0x0044 -> LATIN CAPITAL LETTER D
+ 'E' # 0x0045 -> LATIN CAPITAL LETTER E
+ 'F' # 0x0046 -> LATIN CAPITAL LETTER F
+ 'G' # 0x0047 -> LATIN CAPITAL LETTER G
+ 'H' # 0x0048 -> LATIN CAPITAL LETTER H
+ 'I' # 0x0049 -> LATIN CAPITAL LETTER I
+ 'J' # 0x004a -> LATIN CAPITAL LETTER J
+ 'K' # 0x004b -> LATIN CAPITAL LETTER K
+ 'L' # 0x004c -> LATIN CAPITAL LETTER L
+ 'M' # 0x004d -> LATIN CAPITAL LETTER M
+ 'N' # 0x004e -> LATIN CAPITAL LETTER N
+ 'O' # 0x004f -> LATIN CAPITAL LETTER O
+ 'P' # 0x0050 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x0052 -> LATIN CAPITAL LETTER R
+ 'S' # 0x0053 -> LATIN CAPITAL LETTER S
+ 'T' # 0x0054 -> LATIN CAPITAL LETTER T
+ 'U' # 0x0055 -> LATIN CAPITAL LETTER U
+ 'V' # 0x0056 -> LATIN CAPITAL LETTER V
+ 'W' # 0x0057 -> LATIN CAPITAL LETTER W
+ 'X' # 0x0058 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x005a -> LATIN CAPITAL LETTER Z
+ '[' # 0x005b -> LEFT SQUARE BRACKET
+ '\\' # 0x005c -> REVERSE SOLIDUS
+ ']' # 0x005d -> RIGHT SQUARE BRACKET
+ '^' # 0x005e -> CIRCUMFLEX ACCENT
+ '_' # 0x005f -> LOW LINE
+ '`' # 0x0060 -> GRAVE ACCENT
+ 'a' # 0x0061 -> LATIN SMALL LETTER A
+ 'b' # 0x0062 -> LATIN SMALL LETTER B
+ 'c' # 0x0063 -> LATIN SMALL LETTER C
+ 'd' # 0x0064 -> LATIN SMALL LETTER D
+ 'e' # 0x0065 -> LATIN SMALL LETTER E
+ 'f' # 0x0066 -> LATIN SMALL LETTER F
+ 'g' # 0x0067 -> LATIN SMALL LETTER G
+ 'h' # 0x0068 -> LATIN SMALL LETTER H
+ 'i' # 0x0069 -> LATIN SMALL LETTER I
+ 'j' # 0x006a -> LATIN SMALL LETTER J
+ 'k' # 0x006b -> LATIN SMALL LETTER K
+ 'l' # 0x006c -> LATIN SMALL LETTER L
+ 'm' # 0x006d -> LATIN SMALL LETTER M
+ 'n' # 0x006e -> LATIN SMALL LETTER N
+ 'o' # 0x006f -> LATIN SMALL LETTER O
+ 'p' # 0x0070 -> LATIN SMALL LETTER P
+ 'q' # 0x0071 -> LATIN SMALL LETTER Q
+ 'r' # 0x0072 -> LATIN SMALL LETTER R
+ 's' # 0x0073 -> LATIN SMALL LETTER S
+ 't' # 0x0074 -> LATIN SMALL LETTER T
+ 'u' # 0x0075 -> LATIN SMALL LETTER U
+ 'v' # 0x0076 -> LATIN SMALL LETTER V
+ 'w' # 0x0077 -> LATIN SMALL LETTER W
+ 'x' # 0x0078 -> LATIN SMALL LETTER X
+ 'y' # 0x0079 -> LATIN SMALL LETTER Y
+ 'z' # 0x007a -> LATIN SMALL LETTER Z
+ '{' # 0x007b -> LEFT CURLY BRACKET
+ '|' # 0x007c -> VERTICAL LINE
+ '}' # 0x007d -> RIGHT CURLY BRACKET
+ '~' # 0x007e -> TILDE
+ '\x7f' # 0x007f -> DELETE
+ '\ufffe' # 0x0080 -> UNDEFINED
+ '\ufffe' # 0x0081 -> UNDEFINED
+ '\ufffe' # 0x0082 -> UNDEFINED
+ '\ufffe' # 0x0083 -> UNDEFINED
+ '\ufffe' # 0x0084 -> UNDEFINED
+ '\ufffe' # 0x0085 -> UNDEFINED
+ '\u0386' # 0x0086 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
+ '\ufffe' # 0x0087 -> UNDEFINED
+ '\xb7' # 0x0088 -> MIDDLE DOT
+ '\xac' # 0x0089 -> NOT SIGN
+ '\xa6' # 0x008a -> BROKEN BAR
+ '\u2018' # 0x008b -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x008c -> RIGHT SINGLE QUOTATION MARK
+ '\u0388' # 0x008d -> GREEK CAPITAL LETTER EPSILON WITH TONOS
+ '\u2015' # 0x008e -> HORIZONTAL BAR
+ '\u0389' # 0x008f -> GREEK CAPITAL LETTER ETA WITH TONOS
+ '\u038a' # 0x0090 -> GREEK CAPITAL LETTER IOTA WITH TONOS
+ '\u03aa' # 0x0091 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+ '\u038c' # 0x0092 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
+ '\ufffe' # 0x0093 -> UNDEFINED
+ '\ufffe' # 0x0094 -> UNDEFINED
+ '\u038e' # 0x0095 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
+ '\u03ab' # 0x0096 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+ '\xa9' # 0x0097 -> COPYRIGHT SIGN
+ '\u038f' # 0x0098 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
+ '\xb2' # 0x0099 -> SUPERSCRIPT TWO
+ '\xb3' # 0x009a -> SUPERSCRIPT THREE
+ '\u03ac' # 0x009b -> GREEK SMALL LETTER ALPHA WITH TONOS
+ '\xa3' # 0x009c -> POUND SIGN
+ '\u03ad' # 0x009d -> GREEK SMALL LETTER EPSILON WITH TONOS
+ '\u03ae' # 0x009e -> GREEK SMALL LETTER ETA WITH TONOS
+ '\u03af' # 0x009f -> GREEK SMALL LETTER IOTA WITH TONOS
+ '\u03ca' # 0x00a0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
+ '\u0390' # 0x00a1 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+ '\u03cc' # 0x00a2 -> GREEK SMALL LETTER OMICRON WITH TONOS
+ '\u03cd' # 0x00a3 -> GREEK SMALL LETTER UPSILON WITH TONOS
+ '\u0391' # 0x00a4 -> GREEK CAPITAL LETTER ALPHA
+ '\u0392' # 0x00a5 -> GREEK CAPITAL LETTER BETA
+ '\u0393' # 0x00a6 -> GREEK CAPITAL LETTER GAMMA
+ '\u0394' # 0x00a7 -> GREEK CAPITAL LETTER DELTA
+ '\u0395' # 0x00a8 -> GREEK CAPITAL LETTER EPSILON
+ '\u0396' # 0x00a9 -> GREEK CAPITAL LETTER ZETA
+ '\u0397' # 0x00aa -> GREEK CAPITAL LETTER ETA
+ '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
+ '\u0398' # 0x00ac -> GREEK CAPITAL LETTER THETA
+ '\u0399' # 0x00ad -> GREEK CAPITAL LETTER IOTA
+ '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\u2591' # 0x00b0 -> LIGHT SHADE
+ '\u2592' # 0x00b1 -> MEDIUM SHADE
+ '\u2593' # 0x00b2 -> DARK SHADE
+ '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
+ '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ '\u039a' # 0x00b5 -> GREEK CAPITAL LETTER KAPPA
+ '\u039b' # 0x00b6 -> GREEK CAPITAL LETTER LAMDA
+ '\u039c' # 0x00b7 -> GREEK CAPITAL LETTER MU
+ '\u039d' # 0x00b8 -> GREEK CAPITAL LETTER NU
+ '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
+ '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
+ '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
+ '\u039e' # 0x00bd -> GREEK CAPITAL LETTER XI
+ '\u039f' # 0x00be -> GREEK CAPITAL LETTER OMICRON
+ '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
+ '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
+ '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
+ '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ '\u03a0' # 0x00c6 -> GREEK CAPITAL LETTER PI
+ '\u03a1' # 0x00c7 -> GREEK CAPITAL LETTER RHO
+ '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
+ '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
+ '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ '\u03a3' # 0x00cf -> GREEK CAPITAL LETTER SIGMA
+ '\u03a4' # 0x00d0 -> GREEK CAPITAL LETTER TAU
+ '\u03a5' # 0x00d1 -> GREEK CAPITAL LETTER UPSILON
+ '\u03a6' # 0x00d2 -> GREEK CAPITAL LETTER PHI
+ '\u03a7' # 0x00d3 -> GREEK CAPITAL LETTER CHI
+ '\u03a8' # 0x00d4 -> GREEK CAPITAL LETTER PSI
+ '\u03a9' # 0x00d5 -> GREEK CAPITAL LETTER OMEGA
+ '\u03b1' # 0x00d6 -> GREEK SMALL LETTER ALPHA
+ '\u03b2' # 0x00d7 -> GREEK SMALL LETTER BETA
+ '\u03b3' # 0x00d8 -> GREEK SMALL LETTER GAMMA
+ '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
+ '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
+ '\u2588' # 0x00db -> FULL BLOCK
+ '\u2584' # 0x00dc -> LOWER HALF BLOCK
+ '\u03b4' # 0x00dd -> GREEK SMALL LETTER DELTA
+ '\u03b5' # 0x00de -> GREEK SMALL LETTER EPSILON
+ '\u2580' # 0x00df -> UPPER HALF BLOCK
+ '\u03b6' # 0x00e0 -> GREEK SMALL LETTER ZETA
+ '\u03b7' # 0x00e1 -> GREEK SMALL LETTER ETA
+ '\u03b8' # 0x00e2 -> GREEK SMALL LETTER THETA
+ '\u03b9' # 0x00e3 -> GREEK SMALL LETTER IOTA
+ '\u03ba' # 0x00e4 -> GREEK SMALL LETTER KAPPA
+ '\u03bb' # 0x00e5 -> GREEK SMALL LETTER LAMDA
+ '\u03bc' # 0x00e6 -> GREEK SMALL LETTER MU
+ '\u03bd' # 0x00e7 -> GREEK SMALL LETTER NU
+ '\u03be' # 0x00e8 -> GREEK SMALL LETTER XI
+ '\u03bf' # 0x00e9 -> GREEK SMALL LETTER OMICRON
+ '\u03c0' # 0x00ea -> GREEK SMALL LETTER PI
+ '\u03c1' # 0x00eb -> GREEK SMALL LETTER RHO
+ '\u03c3' # 0x00ec -> GREEK SMALL LETTER SIGMA
+ '\u03c2' # 0x00ed -> GREEK SMALL LETTER FINAL SIGMA
+ '\u03c4' # 0x00ee -> GREEK SMALL LETTER TAU
+ '\u0384' # 0x00ef -> GREEK TONOS
+ '\xad' # 0x00f0 -> SOFT HYPHEN
+ '\xb1' # 0x00f1 -> PLUS-MINUS SIGN
+ '\u03c5' # 0x00f2 -> GREEK SMALL LETTER UPSILON
+ '\u03c6' # 0x00f3 -> GREEK SMALL LETTER PHI
+ '\u03c7' # 0x00f4 -> GREEK SMALL LETTER CHI
+ '\xa7' # 0x00f5 -> SECTION SIGN
+ '\u03c8' # 0x00f6 -> GREEK SMALL LETTER PSI
+ '\u0385' # 0x00f7 -> GREEK DIALYTIKA TONOS
+ '\xb0' # 0x00f8 -> DEGREE SIGN
+ '\xa8' # 0x00f9 -> DIAERESIS
+ '\u03c9' # 0x00fa -> GREEK SMALL LETTER OMEGA
+ '\u03cb' # 0x00fb -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+ '\u03b0' # 0x00fc -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+ '\u03ce' # 0x00fd -> GREEK SMALL LETTER OMEGA WITH TONOS
+ '\u25a0' # 0x00fe -> BLACK SQUARE
+ '\xa0' # 0x00ff -> NO-BREAK SPACE
+)
+
+### Encoding Map
+
+encoding_map = {
+ 0x0000: 0x0000, # NULL
+ 0x0001: 0x0001, # START OF HEADING
+ 0x0002: 0x0002, # START OF TEXT
+ 0x0003: 0x0003, # END OF TEXT
+ 0x0004: 0x0004, # END OF TRANSMISSION
+ 0x0005: 0x0005, # ENQUIRY
+ 0x0006: 0x0006, # ACKNOWLEDGE
+ 0x0007: 0x0007, # BELL
+ 0x0008: 0x0008, # BACKSPACE
+ 0x0009: 0x0009, # HORIZONTAL TABULATION
+ 0x000a: 0x000a, # LINE FEED
+ 0x000b: 0x000b, # VERTICAL TABULATION
+ 0x000c: 0x000c, # FORM FEED
+ 0x000d: 0x000d, # CARRIAGE RETURN
+ 0x000e: 0x000e, # SHIFT OUT
+ 0x000f: 0x000f, # SHIFT IN
+ 0x0010: 0x0010, # DATA LINK ESCAPE
+ 0x0011: 0x0011, # DEVICE CONTROL ONE
+ 0x0012: 0x0012, # DEVICE CONTROL TWO
+ 0x0013: 0x0013, # DEVICE CONTROL THREE
+ 0x0014: 0x0014, # DEVICE CONTROL FOUR
+ 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
+ 0x0016: 0x0016, # SYNCHRONOUS IDLE
+ 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
+ 0x0018: 0x0018, # CANCEL
+ 0x0019: 0x0019, # END OF MEDIUM
+ 0x001a: 0x001a, # SUBSTITUTE
+ 0x001b: 0x001b, # ESCAPE
+ 0x001c: 0x001c, # FILE SEPARATOR
+ 0x001d: 0x001d, # GROUP SEPARATOR
+ 0x001e: 0x001e, # RECORD SEPARATOR
+ 0x001f: 0x001f, # UNIT SEPARATOR
+ 0x0020: 0x0020, # SPACE
+ 0x0021: 0x0021, # EXCLAMATION MARK
+ 0x0022: 0x0022, # QUOTATION MARK
+ 0x0023: 0x0023, # NUMBER SIGN
+ 0x0024: 0x0024, # DOLLAR SIGN
+ 0x0025: 0x0025, # PERCENT SIGN
+ 0x0026: 0x0026, # AMPERSAND
+ 0x0027: 0x0027, # APOSTROPHE
+ 0x0028: 0x0028, # LEFT PARENTHESIS
+ 0x0029: 0x0029, # RIGHT PARENTHESIS
+ 0x002a: 0x002a, # ASTERISK
+ 0x002b: 0x002b, # PLUS SIGN
+ 0x002c: 0x002c, # COMMA
+ 0x002d: 0x002d, # HYPHEN-MINUS
+ 0x002e: 0x002e, # FULL STOP
+ 0x002f: 0x002f, # SOLIDUS
+ 0x0030: 0x0030, # DIGIT ZERO
+ 0x0031: 0x0031, # DIGIT ONE
+ 0x0032: 0x0032, # DIGIT TWO
+ 0x0033: 0x0033, # DIGIT THREE
+ 0x0034: 0x0034, # DIGIT FOUR
+ 0x0035: 0x0035, # DIGIT FIVE
+ 0x0036: 0x0036, # DIGIT SIX
+ 0x0037: 0x0037, # DIGIT SEVEN
+ 0x0038: 0x0038, # DIGIT EIGHT
+ 0x0039: 0x0039, # DIGIT NINE
+ 0x003a: 0x003a, # COLON
+ 0x003b: 0x003b, # SEMICOLON
+ 0x003c: 0x003c, # LESS-THAN SIGN
+ 0x003d: 0x003d, # EQUALS SIGN
+ 0x003e: 0x003e, # GREATER-THAN SIGN
+ 0x003f: 0x003f, # QUESTION MARK
+ 0x0040: 0x0040, # COMMERCIAL AT
+ 0x0041: 0x0041, # LATIN CAPITAL LETTER A
+ 0x0042: 0x0042, # LATIN CAPITAL LETTER B
+ 0x0043: 0x0043, # LATIN CAPITAL LETTER C
+ 0x0044: 0x0044, # LATIN CAPITAL LETTER D
+ 0x0045: 0x0045, # LATIN CAPITAL LETTER E
+ 0x0046: 0x0046, # LATIN CAPITAL LETTER F
+ 0x0047: 0x0047, # LATIN CAPITAL LETTER G
+ 0x0048: 0x0048, # LATIN CAPITAL LETTER H
+ 0x0049: 0x0049, # LATIN CAPITAL LETTER I
+ 0x004a: 0x004a, # LATIN CAPITAL LETTER J
+ 0x004b: 0x004b, # LATIN CAPITAL LETTER K
+ 0x004c: 0x004c, # LATIN CAPITAL LETTER L
+ 0x004d: 0x004d, # LATIN CAPITAL LETTER M
+ 0x004e: 0x004e, # LATIN CAPITAL LETTER N
+ 0x004f: 0x004f, # LATIN CAPITAL LETTER O
+ 0x0050: 0x0050, # LATIN CAPITAL LETTER P
+ 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
+ 0x0052: 0x0052, # LATIN CAPITAL LETTER R
+ 0x0053: 0x0053, # LATIN CAPITAL LETTER S
+ 0x0054: 0x0054, # LATIN CAPITAL LETTER T
+ 0x0055: 0x0055, # LATIN CAPITAL LETTER U
+ 0x0056: 0x0056, # LATIN CAPITAL LETTER V
+ 0x0057: 0x0057, # LATIN CAPITAL LETTER W
+ 0x0058: 0x0058, # LATIN CAPITAL LETTER X
+ 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
+ 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
+ 0x005b: 0x005b, # LEFT SQUARE BRACKET
+ 0x005c: 0x005c, # REVERSE SOLIDUS
+ 0x005d: 0x005d, # RIGHT SQUARE BRACKET
+ 0x005e: 0x005e, # CIRCUMFLEX ACCENT
+ 0x005f: 0x005f, # LOW LINE
+ 0x0060: 0x0060, # GRAVE ACCENT
+ 0x0061: 0x0061, # LATIN SMALL LETTER A
+ 0x0062: 0x0062, # LATIN SMALL LETTER B
+ 0x0063: 0x0063, # LATIN SMALL LETTER C
+ 0x0064: 0x0064, # LATIN SMALL LETTER D
+ 0x0065: 0x0065, # LATIN SMALL LETTER E
+ 0x0066: 0x0066, # LATIN SMALL LETTER F
+ 0x0067: 0x0067, # LATIN SMALL LETTER G
+ 0x0068: 0x0068, # LATIN SMALL LETTER H
+ 0x0069: 0x0069, # LATIN SMALL LETTER I
+ 0x006a: 0x006a, # LATIN SMALL LETTER J
+ 0x006b: 0x006b, # LATIN SMALL LETTER K
+ 0x006c: 0x006c, # LATIN SMALL LETTER L
+ 0x006d: 0x006d, # LATIN SMALL LETTER M
+ 0x006e: 0x006e, # LATIN SMALL LETTER N
+ 0x006f: 0x006f, # LATIN SMALL LETTER O
+ 0x0070: 0x0070, # LATIN SMALL LETTER P
+ 0x0071: 0x0071, # LATIN SMALL LETTER Q
+ 0x0072: 0x0072, # LATIN SMALL LETTER R
+ 0x0073: 0x0073, # LATIN SMALL LETTER S
+ 0x0074: 0x0074, # LATIN SMALL LETTER T
+ 0x0075: 0x0075, # LATIN SMALL LETTER U
+ 0x0076: 0x0076, # LATIN SMALL LETTER V
+ 0x0077: 0x0077, # LATIN SMALL LETTER W
+ 0x0078: 0x0078, # LATIN SMALL LETTER X
+ 0x0079: 0x0079, # LATIN SMALL LETTER Y
+ 0x007a: 0x007a, # LATIN SMALL LETTER Z
+ 0x007b: 0x007b, # LEFT CURLY BRACKET
+ 0x007c: 0x007c, # VERTICAL LINE
+ 0x007d: 0x007d, # RIGHT CURLY BRACKET
+ 0x007e: 0x007e, # TILDE
+ 0x007f: 0x007f, # DELETE
+ 0x00a0: 0x00ff, # NO-BREAK SPACE
+ 0x00a3: 0x009c, # POUND SIGN
+ 0x00a6: 0x008a, # BROKEN BAR
+ 0x00a7: 0x00f5, # SECTION SIGN
+ 0x00a8: 0x00f9, # DIAERESIS
+ 0x00a9: 0x0097, # COPYRIGHT SIGN
+ 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00ac: 0x0089, # NOT SIGN
+ 0x00ad: 0x00f0, # SOFT HYPHEN
+ 0x00b0: 0x00f8, # DEGREE SIGN
+ 0x00b1: 0x00f1, # PLUS-MINUS SIGN
+ 0x00b2: 0x0099, # SUPERSCRIPT TWO
+ 0x00b3: 0x009a, # SUPERSCRIPT THREE
+ 0x00b7: 0x0088, # MIDDLE DOT
+ 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
+ 0x0384: 0x00ef, # GREEK TONOS
+ 0x0385: 0x00f7, # GREEK DIALYTIKA TONOS
+ 0x0386: 0x0086, # GREEK CAPITAL LETTER ALPHA WITH TONOS
+ 0x0388: 0x008d, # GREEK CAPITAL LETTER EPSILON WITH TONOS
+ 0x0389: 0x008f, # GREEK CAPITAL LETTER ETA WITH TONOS
+ 0x038a: 0x0090, # GREEK CAPITAL LETTER IOTA WITH TONOS
+ 0x038c: 0x0092, # GREEK CAPITAL LETTER OMICRON WITH TONOS
+ 0x038e: 0x0095, # GREEK CAPITAL LETTER UPSILON WITH TONOS
+ 0x038f: 0x0098, # GREEK CAPITAL LETTER OMEGA WITH TONOS
+ 0x0390: 0x00a1, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+ 0x0391: 0x00a4, # GREEK CAPITAL LETTER ALPHA
+ 0x0392: 0x00a5, # GREEK CAPITAL LETTER BETA
+ 0x0393: 0x00a6, # GREEK CAPITAL LETTER GAMMA
+ 0x0394: 0x00a7, # GREEK CAPITAL LETTER DELTA
+ 0x0395: 0x00a8, # GREEK CAPITAL LETTER EPSILON
+ 0x0396: 0x00a9, # GREEK CAPITAL LETTER ZETA
+ 0x0397: 0x00aa, # GREEK CAPITAL LETTER ETA
+ 0x0398: 0x00ac, # GREEK CAPITAL LETTER THETA
+ 0x0399: 0x00ad, # GREEK CAPITAL LETTER IOTA
+ 0x039a: 0x00b5, # GREEK CAPITAL LETTER KAPPA
+ 0x039b: 0x00b6, # GREEK CAPITAL LETTER LAMDA
+ 0x039c: 0x00b7, # GREEK CAPITAL LETTER MU
+ 0x039d: 0x00b8, # GREEK CAPITAL LETTER NU
+ 0x039e: 0x00bd, # GREEK CAPITAL LETTER XI
+ 0x039f: 0x00be, # GREEK CAPITAL LETTER OMICRON
+ 0x03a0: 0x00c6, # GREEK CAPITAL LETTER PI
+ 0x03a1: 0x00c7, # GREEK CAPITAL LETTER RHO
+ 0x03a3: 0x00cf, # GREEK CAPITAL LETTER SIGMA
+ 0x03a4: 0x00d0, # GREEK CAPITAL LETTER TAU
+ 0x03a5: 0x00d1, # GREEK CAPITAL LETTER UPSILON
+ 0x03a6: 0x00d2, # GREEK CAPITAL LETTER PHI
+ 0x03a7: 0x00d3, # GREEK CAPITAL LETTER CHI
+ 0x03a8: 0x00d4, # GREEK CAPITAL LETTER PSI
+ 0x03a9: 0x00d5, # GREEK CAPITAL LETTER OMEGA
+ 0x03aa: 0x0091, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+ 0x03ab: 0x0096, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+ 0x03ac: 0x009b, # GREEK SMALL LETTER ALPHA WITH TONOS
+ 0x03ad: 0x009d, # GREEK SMALL LETTER EPSILON WITH TONOS
+ 0x03ae: 0x009e, # GREEK SMALL LETTER ETA WITH TONOS
+ 0x03af: 0x009f, # GREEK SMALL LETTER IOTA WITH TONOS
+ 0x03b0: 0x00fc, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+ 0x03b1: 0x00d6, # GREEK SMALL LETTER ALPHA
+ 0x03b2: 0x00d7, # GREEK SMALL LETTER BETA
+ 0x03b3: 0x00d8, # GREEK SMALL LETTER GAMMA
+ 0x03b4: 0x00dd, # GREEK SMALL LETTER DELTA
+ 0x03b5: 0x00de, # GREEK SMALL LETTER EPSILON
+ 0x03b6: 0x00e0, # GREEK SMALL LETTER ZETA
+ 0x03b7: 0x00e1, # GREEK SMALL LETTER ETA
+ 0x03b8: 0x00e2, # GREEK SMALL LETTER THETA
+ 0x03b9: 0x00e3, # GREEK SMALL LETTER IOTA
+ 0x03ba: 0x00e4, # GREEK SMALL LETTER KAPPA
+ 0x03bb: 0x00e5, # GREEK SMALL LETTER LAMDA
+ 0x03bc: 0x00e6, # GREEK SMALL LETTER MU
+ 0x03bd: 0x00e7, # GREEK SMALL LETTER NU
+ 0x03be: 0x00e8, # GREEK SMALL LETTER XI
+ 0x03bf: 0x00e9, # GREEK SMALL LETTER OMICRON
+ 0x03c0: 0x00ea, # GREEK SMALL LETTER PI
+ 0x03c1: 0x00eb, # GREEK SMALL LETTER RHO
+ 0x03c2: 0x00ed, # GREEK SMALL LETTER FINAL SIGMA
+ 0x03c3: 0x00ec, # GREEK SMALL LETTER SIGMA
+ 0x03c4: 0x00ee, # GREEK SMALL LETTER TAU
+ 0x03c5: 0x00f2, # GREEK SMALL LETTER UPSILON
+ 0x03c6: 0x00f3, # GREEK SMALL LETTER PHI
+ 0x03c7: 0x00f4, # GREEK SMALL LETTER CHI
+ 0x03c8: 0x00f6, # GREEK SMALL LETTER PSI
+ 0x03c9: 0x00fa, # GREEK SMALL LETTER OMEGA
+ 0x03ca: 0x00a0, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
+ 0x03cb: 0x00fb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+ 0x03cc: 0x00a2, # GREEK SMALL LETTER OMICRON WITH TONOS
+ 0x03cd: 0x00a3, # GREEK SMALL LETTER UPSILON WITH TONOS
+ 0x03ce: 0x00fd, # GREEK SMALL LETTER OMEGA WITH TONOS
+ 0x2015: 0x008e, # HORIZONTAL BAR
+ 0x2018: 0x008b, # LEFT SINGLE QUOTATION MARK
+ 0x2019: 0x008c, # RIGHT SINGLE QUOTATION MARK
+ 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
+ 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
+ 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
+ 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
+ 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
+ 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
+ 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+ 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
+ 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+ 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
+ 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+ 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
+ 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
+ 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
+ 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
+ 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
+ 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
+ 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+ 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+ 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+ 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+ 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+ 0x2580: 0x00df, # UPPER HALF BLOCK
+ 0x2584: 0x00dc, # LOWER HALF BLOCK
+ 0x2588: 0x00db, # FULL BLOCK
+ 0x2591: 0x00b0, # LIGHT SHADE
+ 0x2592: 0x00b1, # MEDIUM SHADE
+ 0x2593: 0x00b2, # DARK SHADE
+ 0x25a0: 0x00fe, # BLACK SQUARE
+}
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp874.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp874.py
new file mode 100644
index 00000000..59bfcbc9
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp874.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp874',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\u20ac' # 0x80 -> EURO SIGN
+ '\ufffe' # 0x81 -> UNDEFINED
+ '\ufffe' # 0x82 -> UNDEFINED
+ '\ufffe' # 0x83 -> UNDEFINED
+ '\ufffe' # 0x84 -> UNDEFINED
+ '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ '\ufffe' # 0x86 -> UNDEFINED
+ '\ufffe' # 0x87 -> UNDEFINED
+ '\ufffe' # 0x88 -> UNDEFINED
+ '\ufffe' # 0x89 -> UNDEFINED
+ '\ufffe' # 0x8A -> UNDEFINED
+ '\ufffe' # 0x8B -> UNDEFINED
+ '\ufffe' # 0x8C -> UNDEFINED
+ '\ufffe' # 0x8D -> UNDEFINED
+ '\ufffe' # 0x8E -> UNDEFINED
+ '\ufffe' # 0x8F -> UNDEFINED
+ '\ufffe' # 0x90 -> UNDEFINED
+ '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ '\u2022' # 0x95 -> BULLET
+ '\u2013' # 0x96 -> EN DASH
+ '\u2014' # 0x97 -> EM DASH
+ '\ufffe' # 0x98 -> UNDEFINED
+ '\ufffe' # 0x99 -> UNDEFINED
+ '\ufffe' # 0x9A -> UNDEFINED
+ '\ufffe' # 0x9B -> UNDEFINED
+ '\ufffe' # 0x9C -> UNDEFINED
+ '\ufffe' # 0x9D -> UNDEFINED
+ '\ufffe' # 0x9E -> UNDEFINED
+ '\ufffe' # 0x9F -> UNDEFINED
+ '\xa0' # 0xA0 -> NO-BREAK SPACE
+ '\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
+ '\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
+ '\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
+ '\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
+ '\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
+ '\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
+ '\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
+ '\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
+ '\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
+ '\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
+ '\u0e0b' # 0xAB -> THAI CHARACTER SO SO
+ '\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
+ '\u0e0d' # 0xAD -> THAI CHARACTER YO YING
+ '\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
+ '\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
+ '\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
+ '\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
+ '\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
+ '\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
+ '\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
+ '\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
+ '\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
+ '\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
+ '\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
+ '\u0e19' # 0xB9 -> THAI CHARACTER NO NU
+ '\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
+ '\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
+ '\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
+ '\u0e1d' # 0xBD -> THAI CHARACTER FO FA
+ '\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
+ '\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
+ '\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
+ '\u0e21' # 0xC1 -> THAI CHARACTER MO MA
+ '\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
+ '\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
+ '\u0e24' # 0xC4 -> THAI CHARACTER RU
+ '\u0e25' # 0xC5 -> THAI CHARACTER LO LING
+ '\u0e26' # 0xC6 -> THAI CHARACTER LU
+ '\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
+ '\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
+ '\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
+ '\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
+ '\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
+ '\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
+ '\u0e2d' # 0xCD -> THAI CHARACTER O ANG
+ '\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
+ '\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
+ '\u0e30' # 0xD0 -> THAI CHARACTER SARA A
+ '\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
+ '\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
+ '\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
+ '\u0e34' # 0xD4 -> THAI CHARACTER SARA I
+ '\u0e35' # 0xD5 -> THAI CHARACTER SARA II
+ '\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
+ '\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
+ '\u0e38' # 0xD8 -> THAI CHARACTER SARA U
+ '\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
+ '\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
+ '\ufffe' # 0xDB -> UNDEFINED
+ '\ufffe' # 0xDC -> UNDEFINED
+ '\ufffe' # 0xDD -> UNDEFINED
+ '\ufffe' # 0xDE -> UNDEFINED
+ '\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
+ '\u0e40' # 0xE0 -> THAI CHARACTER SARA E
+ '\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
+ '\u0e42' # 0xE2 -> THAI CHARACTER SARA O
+ '\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
+ '\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
+ '\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
+ '\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
+ '\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
+ '\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
+ '\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
+ '\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
+ '\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
+ '\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
+ '\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
+ '\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
+ '\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
+ '\u0e50' # 0xF0 -> THAI DIGIT ZERO
+ '\u0e51' # 0xF1 -> THAI DIGIT ONE
+ '\u0e52' # 0xF2 -> THAI DIGIT TWO
+ '\u0e53' # 0xF3 -> THAI DIGIT THREE
+ '\u0e54' # 0xF4 -> THAI DIGIT FOUR
+ '\u0e55' # 0xF5 -> THAI DIGIT FIVE
+ '\u0e56' # 0xF6 -> THAI DIGIT SIX
+ '\u0e57' # 0xF7 -> THAI DIGIT SEVEN
+ '\u0e58' # 0xF8 -> THAI DIGIT EIGHT
+ '\u0e59' # 0xF9 -> THAI DIGIT NINE
+ '\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
+ '\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
+ '\ufffe' # 0xFC -> UNDEFINED
+ '\ufffe' # 0xFD -> UNDEFINED
+ '\ufffe' # 0xFE -> UNDEFINED
+ '\ufffe' # 0xFF -> UNDEFINED
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp875.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp875.py
new file mode 100644
index 00000000..c25a5a43
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp875.py
@@ -0,0 +1,307 @@
+""" Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp875',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x9c' # 0x04 -> CONTROL
+ '\t' # 0x05 -> HORIZONTAL TABULATION
+ '\x86' # 0x06 -> CONTROL
+ '\x7f' # 0x07 -> DELETE
+ '\x97' # 0x08 -> CONTROL
+ '\x8d' # 0x09 -> CONTROL
+ '\x8e' # 0x0A -> CONTROL
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x9d' # 0x14 -> CONTROL
+ '\x85' # 0x15 -> CONTROL
+ '\x08' # 0x16 -> BACKSPACE
+ '\x87' # 0x17 -> CONTROL
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x92' # 0x1A -> CONTROL
+ '\x8f' # 0x1B -> CONTROL
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ '\x80' # 0x20 -> CONTROL
+ '\x81' # 0x21 -> CONTROL
+ '\x82' # 0x22 -> CONTROL
+ '\x83' # 0x23 -> CONTROL
+ '\x84' # 0x24 -> CONTROL
+ '\n' # 0x25 -> LINE FEED
+ '\x17' # 0x26 -> END OF TRANSMISSION BLOCK
+ '\x1b' # 0x27 -> ESCAPE
+ '\x88' # 0x28 -> CONTROL
+ '\x89' # 0x29 -> CONTROL
+ '\x8a' # 0x2A -> CONTROL
+ '\x8b' # 0x2B -> CONTROL
+ '\x8c' # 0x2C -> CONTROL
+ '\x05' # 0x2D -> ENQUIRY
+ '\x06' # 0x2E -> ACKNOWLEDGE
+ '\x07' # 0x2F -> BELL
+ '\x90' # 0x30 -> CONTROL
+ '\x91' # 0x31 -> CONTROL
+ '\x16' # 0x32 -> SYNCHRONOUS IDLE
+ '\x93' # 0x33 -> CONTROL
+ '\x94' # 0x34 -> CONTROL
+ '\x95' # 0x35 -> CONTROL
+ '\x96' # 0x36 -> CONTROL
+ '\x04' # 0x37 -> END OF TRANSMISSION
+ '\x98' # 0x38 -> CONTROL
+ '\x99' # 0x39 -> CONTROL
+ '\x9a' # 0x3A -> CONTROL
+ '\x9b' # 0x3B -> CONTROL
+ '\x14' # 0x3C -> DEVICE CONTROL FOUR
+ '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
+ '\x9e' # 0x3E -> CONTROL
+ '\x1a' # 0x3F -> SUBSTITUTE
+ ' ' # 0x40 -> SPACE
+ '\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
+ '\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
+ '\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
+ '\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
+ '\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
+ '\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
+ '\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
+ '\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
+ '\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
+ '[' # 0x4A -> LEFT SQUARE BRACKET
+ '.' # 0x4B -> FULL STOP
+ '<' # 0x4C -> LESS-THAN SIGN
+ '(' # 0x4D -> LEFT PARENTHESIS
+ '+' # 0x4E -> PLUS SIGN
+ '!' # 0x4F -> EXCLAMATION MARK
+ '&' # 0x50 -> AMPERSAND
+ '\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
+ '\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
+ '\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
+ '\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
+ '\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
+ '\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
+ '\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
+ '\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
+ '\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
+ ']' # 0x5A -> RIGHT SQUARE BRACKET
+ '$' # 0x5B -> DOLLAR SIGN
+ '*' # 0x5C -> ASTERISK
+ ')' # 0x5D -> RIGHT PARENTHESIS
+ ';' # 0x5E -> SEMICOLON
+ '^' # 0x5F -> CIRCUMFLEX ACCENT
+ '-' # 0x60 -> HYPHEN-MINUS
+ '/' # 0x61 -> SOLIDUS
+ '\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
+ '\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
+ '\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
+ '\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
+ '\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
+ '\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
+ '\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+ '\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+ '|' # 0x6A -> VERTICAL LINE
+ ',' # 0x6B -> COMMA
+ '%' # 0x6C -> PERCENT SIGN
+ '_' # 0x6D -> LOW LINE
+ '>' # 0x6E -> GREATER-THAN SIGN
+ '?' # 0x6F -> QUESTION MARK
+ '\xa8' # 0x70 -> DIAERESIS
+ '\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
+ '\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
+ '\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
+ '\xa0' # 0x74 -> NO-BREAK SPACE
+ '\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
+ '\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
+ '\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
+ '\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
+ '`' # 0x79 -> GRAVE ACCENT
+ ':' # 0x7A -> COLON
+ '#' # 0x7B -> NUMBER SIGN
+ '@' # 0x7C -> COMMERCIAL AT
+ "'" # 0x7D -> APOSTROPHE
+ '=' # 0x7E -> EQUALS SIGN
+ '"' # 0x7F -> QUOTATION MARK
+ '\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
+ 'a' # 0x81 -> LATIN SMALL LETTER A
+ 'b' # 0x82 -> LATIN SMALL LETTER B
+ 'c' # 0x83 -> LATIN SMALL LETTER C
+ 'd' # 0x84 -> LATIN SMALL LETTER D
+ 'e' # 0x85 -> LATIN SMALL LETTER E
+ 'f' # 0x86 -> LATIN SMALL LETTER F
+ 'g' # 0x87 -> LATIN SMALL LETTER G
+ 'h' # 0x88 -> LATIN SMALL LETTER H
+ 'i' # 0x89 -> LATIN SMALL LETTER I
+ '\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
+ '\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
+ '\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
+ '\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
+ '\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
+ '\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
+ '\xb0' # 0x90 -> DEGREE SIGN
+ 'j' # 0x91 -> LATIN SMALL LETTER J
+ 'k' # 0x92 -> LATIN SMALL LETTER K
+ 'l' # 0x93 -> LATIN SMALL LETTER L
+ 'm' # 0x94 -> LATIN SMALL LETTER M
+ 'n' # 0x95 -> LATIN SMALL LETTER N
+ 'o' # 0x96 -> LATIN SMALL LETTER O
+ 'p' # 0x97 -> LATIN SMALL LETTER P
+ 'q' # 0x98 -> LATIN SMALL LETTER Q
+ 'r' # 0x99 -> LATIN SMALL LETTER R
+ '\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
+ '\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
+ '\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
+ '\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
+ '\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
+ '\u03bc' # 0x9F -> GREEK SMALL LETTER MU
+ '\xb4' # 0xA0 -> ACUTE ACCENT
+ '~' # 0xA1 -> TILDE
+ 's' # 0xA2 -> LATIN SMALL LETTER S
+ 't' # 0xA3 -> LATIN SMALL LETTER T
+ 'u' # 0xA4 -> LATIN SMALL LETTER U
+ 'v' # 0xA5 -> LATIN SMALL LETTER V
+ 'w' # 0xA6 -> LATIN SMALL LETTER W
+ 'x' # 0xA7 -> LATIN SMALL LETTER X
+ 'y' # 0xA8 -> LATIN SMALL LETTER Y
+ 'z' # 0xA9 -> LATIN SMALL LETTER Z
+ '\u03bd' # 0xAA -> GREEK SMALL LETTER NU
+ '\u03be' # 0xAB -> GREEK SMALL LETTER XI
+ '\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
+ '\u03c0' # 0xAD -> GREEK SMALL LETTER PI
+ '\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
+ '\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
+ '\xa3' # 0xB0 -> POUND SIGN
+ '\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
+ '\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
+ '\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
+ '\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
+ '\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
+ '\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
+ '\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
+ '\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+ '\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
+ '\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
+ '\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
+ '\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
+ '\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
+ '\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
+ '\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
+ '{' # 0xC0 -> LEFT CURLY BRACKET
+ 'A' # 0xC1 -> LATIN CAPITAL LETTER A
+ 'B' # 0xC2 -> LATIN CAPITAL LETTER B
+ 'C' # 0xC3 -> LATIN CAPITAL LETTER C
+ 'D' # 0xC4 -> LATIN CAPITAL LETTER D
+ 'E' # 0xC5 -> LATIN CAPITAL LETTER E
+ 'F' # 0xC6 -> LATIN CAPITAL LETTER F
+ 'G' # 0xC7 -> LATIN CAPITAL LETTER G
+ 'H' # 0xC8 -> LATIN CAPITAL LETTER H
+ 'I' # 0xC9 -> LATIN CAPITAL LETTER I
+ '\xad' # 0xCA -> SOFT HYPHEN
+ '\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
+ '\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+ '\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+ '\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
+ '\u2015' # 0xCF -> HORIZONTAL BAR
+ '}' # 0xD0 -> RIGHT CURLY BRACKET
+ 'J' # 0xD1 -> LATIN CAPITAL LETTER J
+ 'K' # 0xD2 -> LATIN CAPITAL LETTER K
+ 'L' # 0xD3 -> LATIN CAPITAL LETTER L
+ 'M' # 0xD4 -> LATIN CAPITAL LETTER M
+ 'N' # 0xD5 -> LATIN CAPITAL LETTER N
+ 'O' # 0xD6 -> LATIN CAPITAL LETTER O
+ 'P' # 0xD7 -> LATIN CAPITAL LETTER P
+ 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
+ 'R' # 0xD9 -> LATIN CAPITAL LETTER R
+ '\xb1' # 0xDA -> PLUS-MINUS SIGN
+ '\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
+ '\x1a' # 0xDC -> SUBSTITUTE
+ '\u0387' # 0xDD -> GREEK ANO TELEIA
+ '\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
+ '\xa6' # 0xDF -> BROKEN BAR
+ '\\' # 0xE0 -> REVERSE SOLIDUS
+ '\x1a' # 0xE1 -> SUBSTITUTE
+ 'S' # 0xE2 -> LATIN CAPITAL LETTER S
+ 'T' # 0xE3 -> LATIN CAPITAL LETTER T
+ 'U' # 0xE4 -> LATIN CAPITAL LETTER U
+ 'V' # 0xE5 -> LATIN CAPITAL LETTER V
+ 'W' # 0xE6 -> LATIN CAPITAL LETTER W
+ 'X' # 0xE7 -> LATIN CAPITAL LETTER X
+ 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
+ '\xb2' # 0xEA -> SUPERSCRIPT TWO
+ '\xa7' # 0xEB -> SECTION SIGN
+ '\x1a' # 0xEC -> SUBSTITUTE
+ '\x1a' # 0xED -> SUBSTITUTE
+ '\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\xac' # 0xEF -> NOT SIGN
+ '0' # 0xF0 -> DIGIT ZERO
+ '1' # 0xF1 -> DIGIT ONE
+ '2' # 0xF2 -> DIGIT TWO
+ '3' # 0xF3 -> DIGIT THREE
+ '4' # 0xF4 -> DIGIT FOUR
+ '5' # 0xF5 -> DIGIT FIVE
+ '6' # 0xF6 -> DIGIT SIX
+ '7' # 0xF7 -> DIGIT SEVEN
+ '8' # 0xF8 -> DIGIT EIGHT
+ '9' # 0xF9 -> DIGIT NINE
+ '\xb3' # 0xFA -> SUPERSCRIPT THREE
+ '\xa9' # 0xFB -> COPYRIGHT SIGN
+ '\x1a' # 0xFC -> SUBSTITUTE
+ '\x1a' # 0xFD -> SUBSTITUTE
+ '\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+ '\x9f' # 0xFF -> CONTROL
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp932.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp932.py
new file mode 100644
index 00000000..e01f59b7
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp932.py
@@ -0,0 +1,39 @@
+#
+# cp932.py: Python Unicode Codec for CP932
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_jp, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_jp.getcodec('cp932')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp932',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp949.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp949.py
new file mode 100644
index 00000000..627c8712
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp949.py
@@ -0,0 +1,39 @@
+#
+# cp949.py: Python Unicode Codec for CP949
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_kr, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_kr.getcodec('cp949')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp949',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp950.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp950.py
new file mode 100644
index 00000000..39eec5ed
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/cp950.py
@@ -0,0 +1,39 @@
+#
+# cp950.py: Python Unicode Codec for CP950
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_tw, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_tw.getcodec('cp950')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp950',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_jis_2004.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_jis_2004.py
new file mode 100644
index 00000000..72b87aea
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_jis_2004.py
@@ -0,0 +1,39 @@
+#
+# euc_jis_2004.py: Python Unicode Codec for EUC_JIS_2004
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_jp, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_jp.getcodec('euc_jis_2004')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='euc_jis_2004',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_jisx0213.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_jisx0213.py
new file mode 100644
index 00000000..cc47d041
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_jisx0213.py
@@ -0,0 +1,39 @@
+#
+# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_jp, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_jp.getcodec('euc_jisx0213')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='euc_jisx0213',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_jp.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_jp.py
new file mode 100644
index 00000000..7bcbe414
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_jp.py
@@ -0,0 +1,39 @@
+#
+# euc_jp.py: Python Unicode Codec for EUC_JP
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_jp, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_jp.getcodec('euc_jp')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='euc_jp',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_kr.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_kr.py
new file mode 100644
index 00000000..c1fb1260
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/euc_kr.py
@@ -0,0 +1,39 @@
+#
+# euc_kr.py: Python Unicode Codec for EUC_KR
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_kr, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_kr.getcodec('euc_kr')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='euc_kr',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/gb18030.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/gb18030.py
new file mode 100644
index 00000000..34fb6c36
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/gb18030.py
@@ -0,0 +1,39 @@
+#
+# gb18030.py: Python Unicode Codec for GB18030
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_cn, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_cn.getcodec('gb18030')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='gb18030',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/gb2312.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/gb2312.py
new file mode 100644
index 00000000..3c3b837d
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/gb2312.py
@@ -0,0 +1,39 @@
+#
+# gb2312.py: Python Unicode Codec for GB2312
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_cn, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_cn.getcodec('gb2312')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='gb2312',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/gbk.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/gbk.py
new file mode 100644
index 00000000..1b45db89
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/gbk.py
@@ -0,0 +1,39 @@
+#
+# gbk.py: Python Unicode Codec for GBK
+#
+# Written by Hye-Shik Chang
+#
+
+import _codecs_cn, codecs
+import _multibytecodec as mbc
+
+codec = _codecs_cn.getcodec('gbk')
+
+class Codec(codecs.Codec):
+ encode = codec.encode
+ decode = codec.decode
+
+class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
+ codecs.IncrementalEncoder):
+ codec = codec
+
+class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
+ codecs.IncrementalDecoder):
+ codec = codec
+
+class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
+ codec = codec
+
+class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
+ codec = codec
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='gbk',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/hex_codec.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/hex_codec.py
new file mode 100644
index 00000000..9fb10728
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/hex_codec.py
@@ -0,0 +1,55 @@
+"""Python 'hex_codec' Codec - 2-digit hex content transfer encoding.
+
+This codec de/encodes from bytes to bytes.
+
+Written by Marc-Andre Lemburg (mal@lemburg.com).
+"""
+
+import codecs
+import binascii
+
+### Codec APIs
+
+def hex_encode(input, errors='strict'):
+ assert errors == 'strict'
+ return (binascii.b2a_hex(input), len(input))
+
+def hex_decode(input, errors='strict'):
+ assert errors == 'strict'
+ return (binascii.a2b_hex(input), len(input))
+
+class Codec(codecs.Codec):
+ def encode(self, input, errors='strict'):
+ return hex_encode(input, errors)
+ def decode(self, input, errors='strict'):
+ return hex_decode(input, errors)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ assert self.errors == 'strict'
+ return binascii.b2a_hex(input)
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ assert self.errors == 'strict'
+ return binascii.a2b_hex(input)
+
+class StreamWriter(Codec, codecs.StreamWriter):
+ charbuffertype = bytes
+
+class StreamReader(Codec, codecs.StreamReader):
+ charbuffertype = bytes
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='hex',
+ encode=hex_encode,
+ decode=hex_decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamwriter=StreamWriter,
+ streamreader=StreamReader,
+ _is_text_encoding=False,
+ )
diff --git a/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/hp_roman8.py b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/hp_roman8.py
new file mode 100644
index 00000000..58de1033
--- /dev/null
+++ b/Resources/WPy64-3720/python-3.7.2.amd64/Lib/encodings/hp_roman8.py
@@ -0,0 +1,314 @@
+""" Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py.
+
+ Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen)
+
+ Original source: LaserJet IIP Printer User's Manual HP part no
+ 33471-90901, Hewlet-Packard, June 1989.
+
+ (Used with permission)
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='hp-roman8',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamwriter=StreamWriter,
+ streamreader=StreamReader,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ '\x00' # 0x00 -> NULL
+ '\x01' # 0x01 -> START OF HEADING
+ '\x02' # 0x02 -> START OF TEXT
+ '\x03' # 0x03 -> END OF TEXT
+ '\x04' # 0x04 -> END OF TRANSMISSION
+ '\x05' # 0x05 -> ENQUIRY
+ '\x06' # 0x06 -> ACKNOWLEDGE
+ '\x07' # 0x07 -> BELL
+ '\x08' # 0x08 -> BACKSPACE
+ '\t' # 0x09 -> HORIZONTAL TABULATION
+ '\n' # 0x0A -> LINE FEED
+ '\x0b' # 0x0B -> VERTICAL TABULATION
+ '\x0c' # 0x0C -> FORM FEED
+ '\r' # 0x0D -> CARRIAGE RETURN
+ '\x0e' # 0x0E -> SHIFT OUT
+ '\x0f' # 0x0F -> SHIFT IN
+ '\x10' # 0x10 -> DATA LINK ESCAPE
+ '\x11' # 0x11 -> DEVICE CONTROL ONE
+ '\x12' # 0x12 -> DEVICE CONTROL TWO
+ '\x13' # 0x13 -> DEVICE CONTROL THREE
+ '\x14' # 0x14 -> DEVICE CONTROL FOUR
+ '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ '\x16' # 0x16 -> SYNCHRONOUS IDLE
+ '\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ '\x18' # 0x18 -> CANCEL
+ '\x19' # 0x19 -> END OF MEDIUM
+ '\x1a' # 0x1A -> SUBSTITUTE
+ '\x1b' # 0x1B -> ESCAPE
+ '\x1c' # 0x1C -> FILE SEPARATOR
+ '\x1d' # 0x1D -> GROUP SEPARATOR
+ '\x1e' # 0x1E -> RECORD SEPARATOR
+ '\x1f' # 0x1F -> UNIT SEPARATOR
+ ' ' # 0x20 -> SPACE
+ '!' # 0x21 -> EXCLAMATION MARK
+ '"' # 0x22 -> QUOTATION MARK
+ '#' # 0x23 -> NUMBER SIGN
+ '$' # 0x24 -> DOLLAR SIGN
+ '%' # 0x25 -> PERCENT SIGN
+ '&' # 0x26 -> AMPERSAND
+ "'" # 0x27 -> APOSTROPHE
+ '(' # 0x28 -> LEFT PARENTHESIS
+ ')' # 0x29 -> RIGHT PARENTHESIS
+ '*' # 0x2A -> ASTERISK
+ '+' # 0x2B -> PLUS SIGN
+ ',' # 0x2C -> COMMA
+ '-' # 0x2D -> HYPHEN-MINUS
+ '.' # 0x2E -> FULL STOP
+ '/' # 0x2F -> SOLIDUS
+ '0' # 0x30 -> DIGIT ZERO
+ '1' # 0x31 -> DIGIT ONE
+ '2' # 0x32 -> DIGIT TWO
+ '3' # 0x33 -> DIGIT THREE
+ '4' # 0x34 -> DIGIT FOUR
+ '5' # 0x35 -> DIGIT FIVE
+ '6' # 0x36 -> DIGIT SIX
+ '7' # 0x37 -> DIGIT SEVEN
+ '8' # 0x38 -> DIGIT EIGHT
+ '9' # 0x39 -> DIGIT NINE
+ ':' # 0x3A -> COLON
+ ';' # 0x3B -> SEMICOLON
+ '<' # 0x3C -> LESS-THAN SIGN
+ '=' # 0x3D -> EQUALS SIGN
+ '>' # 0x3E -> GREATER-THAN SIGN
+ '?' # 0x3F -> QUESTION MARK
+ '@' # 0x40 -> COMMERCIAL AT
+ 'A' # 0x41 -> LATIN CAPITAL LETTER A
+ 'B' # 0x42 -> LATIN CAPITAL LETTER B
+ 'C' # 0x43 -> LATIN CAPITAL LETTER C
+ 'D' # 0x44 -> LATIN CAPITAL LETTER D
+ 'E' # 0x45 -> LATIN CAPITAL LETTER E
+ 'F' # 0x46 -> LATIN CAPITAL LETTER F
+ 'G' # 0x47 -> LATIN CAPITAL LETTER G
+ 'H' # 0x48 -> LATIN CAPITAL LETTER H
+ 'I' # 0x49 -> LATIN CAPITAL LETTER I
+ 'J' # 0x4A -> LATIN CAPITAL LETTER J
+ 'K' # 0x4B -> LATIN CAPITAL LETTER K
+ 'L' # 0x4C -> LATIN CAPITAL LETTER L
+ 'M' # 0x4D -> LATIN CAPITAL LETTER M
+ 'N' # 0x4E -> LATIN CAPITAL LETTER N
+ 'O' # 0x4F -> LATIN CAPITAL LETTER O
+ 'P' # 0x50 -> LATIN CAPITAL LETTER P
+ 'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ 'R' # 0x52 -> LATIN CAPITAL LETTER R
+ 'S' # 0x53 -> LATIN CAPITAL LETTER S
+ 'T' # 0x54 -> LATIN CAPITAL LETTER T
+ 'U' # 0x55 -> LATIN CAPITAL LETTER U
+ 'V' # 0x56 -> LATIN CAPITAL LETTER V
+ 'W' # 0x57 -> LATIN CAPITAL LETTER W
+ 'X' # 0x58 -> LATIN CAPITAL LETTER X
+ 'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ 'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ '[' # 0x5B -> LEFT SQUARE BRACKET
+ '\\' # 0x5C -> REVERSE SOLIDUS
+ ']' # 0x5D -> RIGHT SQUARE BRACKET
+ '^' # 0x5E -> CIRCUMFLEX ACCENT
+ '_' # 0x5F -> LOW LINE
+ '`' # 0x60 -> GRAVE ACCENT
+ 'a' # 0x61 -> LATIN SMALL LETTER A
+ 'b' # 0x62 -> LATIN SMALL LETTER B
+ 'c' # 0x63 -> LATIN SMALL LETTER C
+ 'd' # 0x64 -> LATIN SMALL LETTER D
+ 'e' # 0x65 -> LATIN SMALL LETTER E
+ 'f' # 0x66 -> LATIN SMALL LETTER F
+ 'g' # 0x67 -> LATIN SMALL LETTER G
+ 'h' # 0x68 -> LATIN SMALL LETTER H
+ 'i' # 0x69 -> LATIN SMALL LETTER I
+ 'j' # 0x6A -> LATIN SMALL LETTER J
+ 'k' # 0x6B -> LATIN SMALL LETTER K
+ 'l' # 0x6C -> LATIN SMALL LETTER L
+ 'm' # 0x6D -> LATIN SMALL LETTER M
+ 'n' # 0x6E -> LATIN SMALL LETTER N
+ 'o' # 0x6F -> LATIN SMALL LETTER O
+ 'p' # 0x70 -> LATIN SMALL LETTER P
+ 'q' # 0x71 -> LATIN SMALL LETTER Q
+ 'r' # 0x72 -> LATIN SMALL LETTER R
+ 's' # 0x73 -> LATIN SMALL LETTER S
+ 't' # 0x74 -> LATIN SMALL LETTER T
+ 'u' # 0x75 -> LATIN SMALL LETTER U
+ 'v' # 0x76 -> LATIN SMALL LETTER V
+ 'w' # 0x77 -> LATIN SMALL LETTER W
+ 'x' # 0x78 -> LATIN SMALL LETTER X
+ 'y' # 0x79 -> LATIN SMALL LETTER Y
+ 'z' # 0x7A -> LATIN SMALL LETTER Z
+ '{' # 0x7B -> LEFT CURLY BRACKET
+ '|' # 0x7C -> VERTICAL LINE
+ '}' # 0x7D -> RIGHT CURLY BRACKET
+ '~' # 0x7E -> TILDE
+ '\x7f' # 0x7F -> DELETE
+ '\x80' # 0x80 ->
+ '\x81' # 0x81 ->
+ '\x82' # 0x82 ->
+ '\x83' # 0x83 ->
+ '\x84' # 0x84 ->
+ '\x85' # 0x85 ->
+ '\x86' # 0x86 ->
+ '\x87' # 0x87 ->
+ '\x88' # 0x88 ->
+ '\x89' # 0x89 ->
+ '\x8a' # 0x8A ->
+ '\x8b' # 0x8B ->
+ '\x8c' # 0x8C ->
+ '\x8d' # 0x8D ->
+ '\x8e' # 0x8E ->
+ '\x8f' # 0x8F ->
+ '\x90' # 0x90 ->
+ '\x91' # 0x91 ->
+ '\x92' # 0x92 ->
+ '\x93' # 0x93 ->
+ '\x94' # 0x94 ->
+ '\x95' # 0x95 ->
+ '\x96' # 0x96 ->
+ '\x97' # 0x97 ->
+ '\x98' # 0x98 ->
+ '\x99' # 0x99 ->
+ '\x9a' # 0x9A ->
+ '\x9b' # 0x9B ->
+ '\x9c' # 0x9C ->
+ '\x9d' # 0x9D ->
+ '\x9e' # 0x9E ->
+ '\x9f' # 0x9F ->