text stringlengths 4 1.02M | meta dict |
|---|---|
""" Logging Configuration """
from __future__ import absolute_import
import logging
def setup():
fmt = '%(asctime)s|%(levelname)s|%(module)s|%(message)s'
logging.basicConfig(format=fmt)
log = logging.getLogger('nel')
log.setLevel(logging.DEBUG)
def getLogger():
return logging.getLogger('nel')
setup() | {
"content_hash": "2cc36bad105578efe3442719c0c4ca35",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 60,
"avg_line_length": 23.142857142857142,
"alnum_prop": 0.6820987654320988,
"repo_name": "wikilinks/sift",
"id": "8329c390ac4ecfb452fc989994723ea9a31245fb",
"size": "324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sift/logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "15787"
},
{
"name": "Python",
"bytes": "51495"
},
{
"name": "Shell",
"bytes": "1737"
}
],
"symlink_target": ""
} |
import socket
import os
import argparse
from os.path import isdir
class HTTPServer():
BUFFER_SIZE = 1024*8
def __init__(self, port=80, verbose=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("", args.port))
sock.listen(1)
self.sock = sock
self.conn = None
self.addr = None
self.verbose = verbose
self.cwd = os.getcwd()
self.serve_path = os.path.dirname(os.path.realpath(__file__))
if self.verbose:
print("ready")
def run(self):
try:
while True:
conn, addr = self.sock.accept()
self.conn = conn
self.addr = addr
if self.verbose:
print("\nconnection: " + addr[0])
data = conn.recv(HTTPServer.BUFFER_SIZE)
if not data:
conn.close()
continue
request = HTTPRequest(data)
if request.error_code is not None:
self.handle_error(request.errno, request.errstr)
else:
if self.verbose:
print(request.path)
self.handle_request(request)
conn.close()
except KeyboardInterrupt:
if self.verbose:
print("\nCaught keyboard interrupt. Shutting down.")
if self.conn is not None:
self.conn.close()
self.sock.close()
def handle_request(self, request):
request.file = None
if isdir(self.cwd + request.path):
try:
path = self.cwd + request.path + "/index.html"
request.file = open(path, "rb")
request.fileSize = os.stat(path).st_size
except IOError as e:
if request.verbose:
print(str(e.errno) + " " + e.errstr)
try:
path = self.cwd + request.path + "/index.htm"
request.file = open(path, "rb")
request.fileSize = os.stat(path).st_size
except IOError as f:
if request.verbose:
print(str(f.errno) + " " + f.errstr)
# should generate index here but 404 for now
self.handle_error(404, "File not found.")
if request.file is not None:
header = self.generate_response_header(
200, "OK", "text/html", request.fileSize)
self.conn.send(bytes(header, "utf-8"))
self.conn.send(request.file.read())
else:
try:
path = self.cwd + request.path
request.file = open(path, "rb")
request.fileSize = os.stat(path).st_size
header = self.generate_response_header(
200, "OK", "text/html", request.fileSize)
self.conn.send(bytes(header, "utf-8"))
self.conn.send(request.file.read())
except IOError as e:
if request.verbose:
print(str(f.errno) + " " + f.errstr)
self.handle_error(404, "File not found.")
def handle_error(self, errno, errstr):
fd = errstr
size = len(fd)
try:
path = self.serve_path + "/errors/" + str(errno) + ".html"
fd = open(path, "rb")
size = os.stat(path).st_size
except IOError:
print("Error opening error")
path = self.serve_path + "/errors/500.html"
fd = open(path, "rb")
size = os.stat(path).st_size
header = self.generate_response_header(
errno, errstr, "text/html", size)
self.conn.send(bytes(header, "utf-8"))
self.conn.send(fd.read())
def generate_response_header(self, code, message, type, length):
header = "HTTP/1.1 " + str(code) + " " + message + "\r\n"
header += "Content-Type: " + type + "; charset=utf-8\r\n"
header += "Content-Length: " + str(length) + "\r\n\r\n"
return header
class HTTPRequest:
def __init__(self, request, verbose=False):
self.verbose = verbose
self.fullRequest = str(request, "utf-8")
self.error_code = self.error_message = None
self.parse_request()
def parse_request(self):
requestline = self.fullRequest.split("\r\n")[0]
self.path = requestline.split()[1]
def send_error(self, code, message):
self.error_code = code
self.error_message = message
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("port", type=int, nargs="?", default=8888,
help="The port to run on. Defaults to 8888."
)
parser.add_argument("-v", "--verbose", action="store_true",
help="Verbosity."
)
args = parser.parse_args()
server = HTTPServer(port=args.port, verbose=args.verbose)
server.run()
| {
"content_hash": "9e98b779df0f7c4bb60003ced1abfff8",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 70,
"avg_line_length": 34.74496644295302,
"alnum_prop": 0.5070504152984354,
"repo_name": "Squaar/webserver",
"id": "173105a3a471f0a71d37c3542626cea89372a353",
"size": "5197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywebserver/HTTPServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5525"
}
],
"symlink_target": ""
} |
"""The admin module."""
from . import views
| {
"content_hash": "92892a2c64f47763851f88b57833c7f4",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 23,
"avg_line_length": 22,
"alnum_prop": 0.6590909090909091,
"repo_name": "codeforamerica/comport",
"id": "02dcde2ed06c251ef7e6ae7578a5a54b07aee387",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comport/admin/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6889"
},
{
"name": "HTML",
"bytes": "73956"
},
{
"name": "JavaScript",
"bytes": "228515"
},
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "725626"
},
{
"name": "Ruby",
"bytes": "1030"
}
],
"symlink_target": ""
} |
import re
from thefuck.utils import for_app
from thefuck.system import open_command
@for_app('yarn', at_least=2)
def match(command):
return (command.script_parts[1] == 'help'
and 'for documentation about this command.' in command.stdout)
def get_new_command(command):
url = re.findall(
r'Visit ([^ ]*) for documentation about this command.',
command.stdout)[0]
return open_command(url)
| {
"content_hash": "005dc7228fe61e84cd52ebf14cd665ac",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 74,
"avg_line_length": 25.352941176470587,
"alnum_prop": 0.6705336426914154,
"repo_name": "mlk/thefuck",
"id": "a4d8f931ece3c32847700187deb22cf8f208fb9a",
"size": "431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thefuck/rules/yarn_help.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "391804"
},
{
"name": "Shell",
"bytes": "134"
}
],
"symlink_target": ""
} |
import six
from django.template import TemplateSyntaxError
from django.template.loader import render_to_string
from ttag import core, args
class TemplateTagOptions(core.Options):
def __init__(self, meta, *args, **kwargs):
super(TemplateTagOptions, self).__init__(meta=meta, *args, **kwargs)
self.template_name = getattr(meta, 'template_name', 'using')
def post_process(self):
super(TemplateTagOptions, self).post_process()
non_keyword_args = [name for name, arg in self.named_args.items()
if not arg.keyword]
if (self.template_name in non_keyword_args and
self.template_name not in self.parent_args):
raise TemplateSyntaxError(
"%s can not explicitly define a named argument called %r" %
(self.name, self.template_name))
arg = args.Arg(required=False, named=True)
arg.name = self.template_name
self.named_args[self.template_name] = arg
class TemplateTagMetaclass(core.DeclarativeArgsMetaclass):
options_class = TemplateTagOptions
@six.add_metaclass(TemplateTagMetaclass)
class TemplateTag(core.BaseTag):
def render(self, context):
data = self.resolve(context)
template_name = data.get(self._meta.template_name, self.using(data))
if not template_name:
raise TemplateSyntaxError(
"%s wasn't given a template to render with" % self._meta.name)
extra_context = {
'data': data,
'output': self.output(data),
}
return render_to_string(template_name, extra_context, context)
def using(self, data):
"""
TemplateTag subclasses must implement this method if
not templates are given as the argument, e.g.::
class RenderTag(TemplateTag):
def using(context):
return 'templatetags/%s.html' % self._meta.name.lower()
"""
return None
| {
"content_hash": "64d0f233bf2b7de53ae0467c3fff1a1f",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 78,
"avg_line_length": 34.12068965517241,
"alnum_prop": 0.627589691763517,
"repo_name": "lincolnloop/django-ttag",
"id": "af7ee134a4e5711cdce216b7a0242faab2159ddc",
"size": "1979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ttag/helpers/template_tag.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "88"
},
{
"name": "Python",
"bytes": "57964"
}
],
"symlink_target": ""
} |
import csv
import pdb
import sys
import gzip
import os
from itertools import islice, chain
from collections import defaultdict
import collections.abc
from copy import copy
from sys import intern
from numbers import Integral
__all__ = ["Entry", "Gr", "Variant", "bed", "appris", "gff3", "vcf", "depth_alt_depth_function", "chromosome_order", "gzopen"]
nucleotide = {"A": "R", "G": "R", "C": "Y", "T": "Y"}# puRine: A, G, pYrimadine: T, C
ENSEMBL_REFSEQ_PREFIXES = set(["ENS", "NM_", "NR_", "XM_", "XR_"])
standard_chrom = {1: "chr1",
2: "chr2",
3: "chr3",
4: "chr4",
5: "chr5",
6: "chr6",
7: "chr7",
8: "chr8",
9: "chr9",
10: "chr10",
11: "chr11",
12: "chr12",
13: "chr13",
14: "chr14",
15: "chr15",
16: "chr16",
17: "chr17",
18: "chr18",
19: "chr19",
20: "chr20",
21: "chr21",
22: "chr22",
23: "chrX",
24: "chrY",
25: "chrM",
"1": "chr1",
"2": "chr2",
"3": "chr3",
"4": "chr4",
"5": "chr5",
"6": "chr6",
"7": "chr7",
"8": "chr8",
"9": "chr9",
"10": "chr10",
"11": "chr11",
"12": "chr12",
"13": "chr13",
"14": "chr14",
"15": "chr15",
"16": "chr16",
"17": "chr17",
"18": "chr18",
"19": "chr19",
"20": "chr20",
"21": "chr21",
"22": "chr22",
"23": "chrX",
"chr23": "chrX",
"x": "chrX",
"chrx": "chrX",
"X": "chrX",
"24": "chrY",
"chr24": "chrY",
"y": "chrY",
"chry": "chrY",
"Y": "chrY",
"25": "chrM",
"chr25": "chrM",
"m": "chrM",
"chrm": "chrM",
"M": "chrM",
"NC_000001": "chr1",
"NC_000002": "chr2",
"NC_000003": "chr3",
"NC_000004": "chr4",
"NC_000005": "chr5",
"NC_000006": "chr6",
"NC_000007": "chr7",
"NC_000008": "chr8",
"NC_000009": "chr9",
"NC_000010": "chr10",
"NC_000011": "chr11",
"NC_000012": "chr12",
"NC_000013": "chr13",
"NC_000014": "chr14",
"NC_000015": "chr15",
"NC_000016": "chr16",
"NC_000017": "chr17",
"NC_000018": "chr18",
"NC_000019": "chr19",
"NC_000020": "chr20",
"NC_000021": "chr21",
"NC_000022": "chr22",
"NC_000023": "chrX",
"NC_000024": "chrY",
"NC_012920": "chrM"}
class AnnotatedString(str):
__slots__ = ("transcript", "site", "histology", "cds")
pass
def chromosome_order(key):
if key.startswith("chr"):
key = key[3:]
return ("", int(key)) if key.isnumeric() else (key, 0)
def gzopen(fn, *args, **kwargs):
return (gzip.open if fn.endswith(".gz") else open)(fn, *args, **kwargs)
def iterpath(paths):
try:
return (os.fspath(paths),)
except TypeError:
return paths or ()
class Entry(object):
__slots___ = ("chrom", "start", "stop", "name", "strand")
def __init__(self, chrom, start, stop, name=".", strand="."):
self.chrom = standard_chrom.get(chrom, intern(chrom))
if not isinstance(start, Integral):
raise TypeError("Entry: start must be an integer")
self.start = start
if not isinstance(stop, Integral):
raise TypeError("Entry: stop must be an integer")
self.stop = stop
self.name = name
if strand not in ("+", "-", "."):
raise ValueError("Entry: strand must be equal to '+', '-' or '.'")
self.strand = intern(strand)
def __repr__(self):
return "{}({}, {}, {}, {}, {})".format(type(self).__name__, repr(self.chrom), repr(self.start), repr(self.stop), repr(self.name), repr(self.strand))
def __str__(self):
return "{}:{}-{}".format(self.chrom, self.start, self.stop)
def __eq__(self, other):
return self._tuple == other._tuple
def __lt__(self, other):
return self._tuple < other._tuple
def __hash__(self):
return hash(self._tuple)
def __iter__(self):
yield self
@property
def _tuple(self):
return (self.chrom, self.start, self.stop, self.name, self.strand)
class Variant(Entry):
__slots___ = ("ref", "alt", "depth", "alt_depth")
def __str__(self):
return "{}:{} {}/{}".format(self.chrom, self.start, self.ref, self.alt)
def __init__(self, chrom, pos, ref, alt, name=".", depth=None, alt_depth=None):
if ref == alt:
raise ValueError("Alt allele cannot be equal to ref allele")
while ref and alt and ref[0] == alt[0]:
ref = ref[1:]
alt = alt[1:]
pos += 1
while ref and alt and ref[-1] == alt[-1]:
ref = ref[:-1]
alt = alt[:-1]
self.ref = ref or "-"
self.alt = alt or "-"
self.depth = depth
self.alt_depth = alt_depth
super().__init__(chrom, pos, pos-1 if self.ref=="-" else pos+len(ref)-1, name)
@property
def pos(self):
return self.start
@property
def vartype(self):
if self.ref == "-":
return "ins"
elif self.alt == "-":
return "del"
elif len(self.ref) == len(self.alt) == 1:
return "snp"
else:
return "delins"
@property
def substitution(self):
try:
return "transition" if nucleotide[self.ref]==nucleotide[self.alt] else "transversion"
except KeyError:
return None
@property
def vaf(self):
return float(self.alt_depth)/self.depth
@property
def zygosity(self):
vaf = self.vaf
if 0.95 <= vaf:
zygosity = "hemizygous" if self.chrom in ("chrX", "chrY", "chrM") else "homozygous"
elif 0.45 <= vaf <= 0.55:
zygosity = "heterozygous"
else:
zygosity = "unknown"
return zygosity
def bisect_left(a, start, lo, hi):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
x = start - a.maxlength + 1
while lo < hi:
mid = (lo+hi)//2
if a[mid].start < x: lo = mid+1
else: hi = mid
return lo
class Iterate(object):
def __init__(self, a):
self.a = a
self.lena = len(a)
self.lo = 0
def yield_overlapping(self, start, stop):
#find_ge(a, x, lo, hi) 'Find leftmost item greater than or equal to x'
self.lo = bisect_left(self.a, start, self.lo, self.lena)
for index in range(self.lo, self.lena):
entry = self.a[index]
if entry.start > stop:
break
if entry.stop >= start:
yield entry
class GrList(list):
def __init__(self):
super().__init__()
self.maxlength = 0
self.maxstop = 0
class Gr(collections.abc.Mapping):
def __init__(self, iterable=()):
self._data = defaultdict(GrList)
with self as gr:
for entry in iterable:
gr.add(entry)
def __repr__(self):
return "{}({})".format(type(self).__name__, ", ".join(repr(entry) for entry in self))
def __len__(self):
return sum([len(chrom) for chrom in self._data.values()])
def __iter__(self):
for key in sorted(self._data.keys(), key=chromosome_order):
yield from self._data[key]
def __getitem__(self, key):
return self._data[key]
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.sort()
def keys(self):
return self._data.keys()
def add(self, entry):
grlist = self._data[entry.chrom]
grlist.append(entry)
length = entry.stop - entry.start + 1
if length > grlist.maxlength:
grlist.maxlength = length
if entry.stop > grlist.maxstop:
grlist.maxstop = entry.stop
return self
def sort(self):
for chrom in self._data.values():
chrom.sort()
@property
def merged(self):
new = type(self)()
for key, chrom in self._data.items():
new.add(chrom[0])
nchrom = new._data[key]
nchrom.maxstop = chrom.maxstop
for entry in islice(chrom, 1, len(chrom)):
if entry.start-1 <= nchrom[-1].stop:
if entry.stop > nchrom[-1].stop:
nchrom[-1] = Entry(key, nchrom[-1].start, entry.stop)
length = entry.stop - nchrom[-1].start + 1
if length > nchrom.maxlength:
nchrom.maxlength = length
else:
nchrom.append(entry)
return new
def overlapped_by(self, other):
if isinstance(other, Entry): other = Gr(other)
def a_overlapped_by_b(a, b):
if b.start <= a.start and b.stop >= a.stop:
entry = a
else:
entry = copy(a)
if b.start > entry.start:
entry.start = b.start
if b.stop < entry.stop:
entry.stop = b.stop
return entry
new = type(self)()
for key, chrom in self._data.items():
if key in other._data:
iterateself = Iterate(chrom)
iterateother = Iterate(other._data[key])
for a in iterateself.yield_overlapping(other._data[key][0].start, other._data[key].maxstop):
entry = None
for b in iterateother.yield_overlapping(a.start, a.stop):
if entry is None:
entry = a_overlapped_by_b(a, b)
else:
if b.start <= entry.stop + 1:
if b.stop > entry.stop:
if b.stop < a.stop:
entry.stop = b.stop
continue
else:
entry.stop = a.stop
break
else:
new.add(entry)
entry = a_overlapped_by_b(a, b)
if entry.stop == a.stop:
break
if entry is not None:
new.add(entry)
new.sort()
return new
def not_touched_by(self, other):
if isinstance(other, Entry): other = Gr(other)
new = type(self)()
for key, chrom in self._data.items():
if key not in other._data:
for a in chrom:
new.add(a)
else:
iterateother = Iterate(other._data[key])
for a in chrom:
nottouching = True
for b in iterateother.yield_overlapping(a.start, a.stop):
nottouching = False
break
if nottouching:
new.add(a)
return new
def touched_by(self, other):
if isinstance(other, Entry): other = Gr(other)
new = type(self)()
for key, chrom in self._data.items():
if key in other._data:
iterateself = Iterate(chrom)
iterateother = Iterate(other._data[key])
for a in iterateself.yield_overlapping(other._data[key][0].start, other._data[key].maxstop):
for b in iterateother.yield_overlapping(a.start, a.stop):
new.add(a)
break
return new
def covered_by(self, other):
if isinstance(other, Entry): other = Gr(other)
new = type(self)()
for key, chrom in self._data.items():
if key in other._data:
iterateself = Iterate(chrom)
iterateother = Iterate(other._data[key])
for a in iterateself.yield_overlapping(other._data[key][0].start, other._data[key].maxstop):
laststop = a.start - 1
for b in iterateother.yield_overlapping(a.start, a.stop):
if b.start > laststop + 1:
break
if b.stop > laststop:
laststop = b.stop
if laststop >= a.stop:
new.add(a)
return new
def combined_with(self, other):
new = type(self)()
for entry in chain(self, other):
new.add(entry)
new.sort()
return new
@property
def names(self):
return set([entry.name for entry in self])
@property
def bases(self):
bases = 0
for entry in self:
bases += entry.stop - entry.start + 1
return bases
def bed(paths):
for path in iterpath(paths):
with gzopen(path, "rt") as f_in:
for row in f_in:
row = row.strip()
if row:
row = row.split("\t")
if len(row) < 4:
yield Entry(row[0], int(row[1]) + 1, int(row[2]))
elif len(row) < 6:
yield Entry(row[0], int(row[1]) + 1, int(row[2]), row[3])
else:
yield Entry(row[0], int(row[1]) + 1, int(row[2]), row[3], row[5])
def appris(paths):
# returns 2 if principal transcript, 1 if alternative
score = {}
for path in iterpath(paths):
with gzopen(path, "rt") as f:
for row in f:
row = row.strip()
if row:
row = row.split("\t")
score[row[2].split(".")[0]] = row[4].startswith("PRINCIPAL") + 1
return score
def bases(components):
return sum(c.stop - c.start + 1 for c in components)
REFSEQ = {"NM": 4, "NR": 3, "XM": 2, "XR": 1}
DELETE_NON_DIGIT = str.maketrans("", "", "ABCDEFGHIJKLMNOPQRSTUVWXYZorf_")
def cannonical(transcript_name, elements):
return (REFSEQ.get(transcript_name[:2], 0),
bases(elements.get("CDS", ())),
bases(elements.get("exon", ())),
bases(elements.get("transcript", ())),
-int(transcript_name.translate(DELETE_NON_DIGIT)))
SEQID = 0
SOURCE = 1
TYPE = 2
START = 3
END = 4
SCORE = 5
STRAND = 6
PHASE = 7
ATTRIBUTES = 8
def cosmic(paths):
for path in iterpath(paths):
with gzopen(path, "rt") as f_in:
reader = csv.DictReader(f_in, delimiter="\t")
for row in reader:
gene = AnnotatedString(intern(row["Gene name"]))
if "_" in gene:
gene = AnnotatedString(gene.split("_")[0])
gene.transcript = intern(row["Accession Number"].split(".")[0])
gene.site = intern(row["Primary site"])
gene.histology = intern(row["Primary histology"])
gene.cds = row["Mutation CDS"]
position = row["Mutation genome position"]
try:
chrom, coordinates = position.split(":")
start, stop = coordinates.split("-")
except ValueError:
continue # If no position then ignore
yield(Entry(chrom, int(start), int(stop), gene, row["Mutation strand"]))
def gff3(paths, what, names=None, principal=None, all_transcripts=False):
if what not in ("transcripts", "exons", "codingregions", "codingexons"):
raise ValueError(f"Invalid value for what: {what}")
what = what[:-1] # remove trailing s
codingregion = what == "codingregion"
if what in ("codingregion", "codingexon"):
what = "CDS"
needed = defaultdict(list)
for name in names or ():
splitname = name.split()
if splitname:
needed[splitname[0]].extend(splitname[1:])
score = appris(principal)
matches = defaultdict(lambda:defaultdict(lambda:defaultdict(list)))
gene_name = ""
for path in iterpath(paths):
del gene_name
# These 3 variabls are used for weeding out refseq duplicate genes
gene_attr = ""
refseq = True
valid_gene = True
with gzopen(path, "rt") as f_in:
transcript = ""
reader = csv.reader(f_in, delimiter="\t")
for row in reader:
if row[0].startswith("#"):
continue
feature = row[TYPE]
if feature == "gene":
gene_attr = row[ATTRIBUTES]
if (feature.endswith("transcript") or feature.endswith("RNA")):
transcript = ""
attributes = row[ATTRIBUTES]
try:
start = attributes.index(gene_name)
except NameError:
if "gene_name=" in attributes:
gene_name = "gene_name="
elif "gene=" in attributes:
gene_name = "gene="
else:
raise ValueError("Gene name not found in transcript attributes")
start = attributes.index(gene_name)
except ValueError:
continue
start += len(gene_name)
try:
stop = attributes.index(";", start)
except ValueError:
stop = len(attributes)
gene = attributes[start:stop]
if not names or gene in needed:
# Remove refseq duplicate genes
if refseq:
name = ""
ident = ""
for attr in gene_attr.split(";"):
if attr.startswith("ID="):
ident = attr[8:]
elif attr.startswith("Name="):
name = attr[5:]
if ident and name:
valid_gene = (ident == name)
else:
refseq = False
if valid_gene:
chrom = row[SEQID].split(".")[0]
chrom = standard_chrom.get(chrom, chrom)
strand = row[STRAND]
feature = "transcript"
start = attributes.index("transcript_id=") + 14
try:
stop = attributes.index(";", start)
except ValueError:
stop = len(attributes)
transcript = attributes[start:stop]
if transcript[:3] in ENSEMBL_REFSEQ_PREFIXES:
transcript = transcript.split(".")[0]
else:
print(f"Excluding duplicate gene {ident}", file=sys.stderr)
if transcript and feature in ("transcript", "exon", "CDS"):
name = intern(f"{gene} {transcript}")
#if feature == "exon":
#name = AnnotatedString(name)
#attributes = row[ATTRIBUTES]
#start = attributes.index("exon_number=") + 12
#try:
#stop = attributes.index(";", start)
#except ValueError:
#stop = len(attributes)
#exon_number = attributes[start:stop]
#name.exon = exon_number
entry = Entry(chrom, int(row[START]), int(row[END]), name, strand)
matches[gene][transcript][feature].append(entry)
for gene in sorted(set(needed) - set(matches)):
print(f"WARNING: {gene} not found in reference file", file=sys.stderr)
for gene, transcripts in sorted(matches.items()):
selected = needed[gene]
if not selected:
if len(transcripts) == 1 or all_transcripts:
selected = transcripts.keys()
else:
scored = ([], [], []) # unrecognised, alternative, principal
for transcript in transcripts:
scored[score.get(transcript, 0)].append(transcript)
candidates = scored[2] or scored[1] or scored[0]
if len(candidates) == 1:
selected = candidates
else:
selected = sorted(candidates, key=lambda t:cannonical(t, transcripts[t]))[-1:]
for transcript in selected:
if transcript not in transcripts:
print(f"WARNING: {transcript} not found in reference file", file=sys.stderr)
continue
features = transcripts[transcript]
if codingregion and "CDS" in features:
first = features["CDS"][0]
last = features["CDS"][-1]
yield Entry(first.chrom,
min(first.start, last.start),
max(first.stop, last.stop),
first.name,
first.strand)
else:
yield from features.get(what, ())
CHROM = 0
POS = 1
ID = 2
REF = 3
ALT = 4
QUAL = 5
FILTER = 6
INFO = 7
FORMAT = 8
def vcf(paths, name="."):
depth_alt_depth = None
for path in iterpath(paths):
del depth_alt_depth
with open(path, "rt") as f:
for row in f:
if not row.startswith("#"):
row = row.rstrip("\n ;").split("\t")
try:
dp, ad = depth_alt_depth(row)
except NameError:
depth_alt_depth = depth_alt_depth_function(row)
dp, ad = depth_alt_depth(row)
if ad != 0 and row[ALT] not in (row[REF], "."):
yield Variant(row[CHROM], int(row[POS]), row[REF], row[ALT], name, dp, ad)
def vd_dp_dad(row):
info = infodict(row)
return (int(info["DP"]), int(info["VD"]))
def ao_ro_dad(row):
info = infodict(row)
ad = int(info["AO"])
return (int(info["RO"]) + ad, ad)
def fao_fro_dad(row):
info = infodict(row)
ad = int(info["FAO"])
return (int(info["FRO"]) + ad, ad)
def ao_dp_dad(row):
info = infodict(row)
return (int(info["DP"]), int(info["AO"]))
def fao_fdp_dad(row):
info = infodict(row)
return (int(info["FDP"]), int(info["FAO"]))
def ad_dad(row):
ref, alt = formatdict(row)["AD"].split(",")
ad = int(alt)
return (int(ref) + ad, ad)
def ad_rd_dad(row):
fmt = formatdict(row)
ad = int(fmt["AD"])
return (int(fmt["RD"]) + ad, ad)
def dp4_format_dad(row):
fmt = formatdict(row)
dp4 = [int(num) for num in fmt["DP4"].split(",")]
ad = dp4[2] + dp4[3]
return (dp4[0] + dp4[1] + ad, ad)
def dp4_info_dad(row):
info = infodict(row)
dp4 = [int(num) for num in info["DP4"].split(",")]
ad = dp4[2] + dp4[3]
return (dp4[0] + dp4[1] + ad, ad)
def strelka_dad(row):
# https://github.com/Illumina/strelka/blob/v2.9.x/docs/userGuide/README.md#somatic
fmt = formatdict(row)
alt = row[ALT]
if "," in alt:
raise RuntimeError("Multiple variants per row")
ad = int(fmt[f"{alt}U"].split(",")[0])
dp = sum(int(fmt[key].split(",")[0]) for key in ("AU", "TU", "CU", "GU"))
return (dp, ad)
def tar_tir_dad(row):
# https://github.com/Illumina/strelka/blob/v2.9.x/docs/userGuide/README.md#somatic
fmt = formatdict(row)
alt = row[4]
if "," in alt:
raise RuntimeError("Multiple variants per row")
ad = int(fmt["TIR"].split(",")[0])
dp = int(fmt["TAR"].split(",")[0]) + ad
return (dp, ad)
def no_dad(row):
return (None, None)
def infodict(row):
infodict = {}
for token in row[INFO].split(";"):
try:
k, v = token.split("=")
except ValueError:
k = token
v = None
infodict[k] = v
return infodict
def formatdict(row):
return dict(zip(row[FORMAT].split(":"), row[FORMAT+1].split(":")))
def depth_alt_depth_function(row):
### Will fall down if a vcf contains multiple variants on the same line
###
if "," in row[ALT]:
raise RuntimeError("Multiple variants per row")
info = infodict(row)
fmt = formatdict(row) if len(row) > FORMAT + 1 else {}
# Vardict vcf
if "VD" in fmt and "DP" in info:
return vd_dp_dad
if "FRO" in info and "FAO" in info:
return fao_fro_dad
if "RO" in info and "AO" in info:
return ao_ro_dad
if "FDP" in info and "FAO" in info:
return fao_fdp_dad
if "DP" in info and "AO" in info:
return ao_dp_dad
# In illumina vcfs AD stands for allelic depths and contains a comma separated list of ref and all alt depths.
# In some other vcfs AD stands for alt depth and contains the depth of the alt read only with RD containing the ref depth!!!!!!
if "," in fmt.get("AD", ()): # Will fail with multiple variants on same line
return ad_dad
if "AD" in fmt and "RD" in fmt:
return ad_rd_dad
if "DP4" in fmt:
return dp4_format_dad
if all(key in fmt for key in ("GU", "CU", "AU", "TU")):
return strelka_dad
if "TAR" in fmt and "TIR" in fmt:
return tar_tir_dad
if "DP4" in info:
return dp4_info_dad
return no_dad
| {
"content_hash": "cc8e7b96f968b811ed183f5c8086f4f3",
"timestamp": "",
"source": "github",
"line_count": 886,
"max_line_length": 156,
"avg_line_length": 31.2686230248307,
"alnum_prop": 0.4581648859370488,
"repo_name": "eawilson/CoverMi",
"id": "d26e5a3e319bef5f6559d4f235266eee566d1657",
"size": "27704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "covermi/gr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88946"
}
],
"symlink_target": ""
} |
from staticflatpages.tests.tests import StaticFlatpageTests
from staticflatpages.tests.tests import StaticFlatpageUtilTests
from staticflatpages.tests.sitemaps import StaticFlatpageSitemapTest
| {
"content_hash": "ebc63f6ce81e5922df23f10a5cacb053",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 68,
"avg_line_length": 64.33333333333333,
"alnum_prop": 0.9067357512953368,
"repo_name": "bradmontgomery/django-staticflatpages",
"id": "a37b5d6096dfc3a779943f3e6585dd6b99f3c4a2",
"size": "193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "staticflatpages/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "678"
},
{
"name": "Python",
"bytes": "12950"
}
],
"symlink_target": ""
} |
from ..exceptions import AllocationException
class IBPException(AllocationException):
''' Generic exception for IBP related errors '''
def __init__(self, *args, **kwargs):
self.ibpResponse = kwargs.pop("response", None)
super(IBPException, self).__init__(*args, **kwargs)
| {
"content_hash": "4454ff1fef998f0059f1fd566d54ed55",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 59,
"avg_line_length": 42.42857142857143,
"alnum_prop": 0.6801346801346801,
"repo_name": "periscope-ps/unis",
"id": "6e9b8ea7e2bd07dc4d06872cc020b7b5cecd74b2",
"size": "811",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/protocol/ibp/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2620"
},
{
"name": "JavaScript",
"bytes": "6176"
},
{
"name": "Python",
"bytes": "311941"
},
{
"name": "Shell",
"bytes": "11200"
}
],
"symlink_target": ""
} |
import numpy as np
import cv2
file = 'vid_7.gif'
cap = cv2.VideoCapture(file)
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
count = 0
while(1):
print count
count = count + 1
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv2.add(frame,mask)
# cv2.imshow('frame',img)
cv2.imwrite('../gifs/frame' + str(count) + '.png', img)
k = cv2.waitKey(30) & 0xff
if count == 33:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
# cv2.destroyAllWindows()
# cap.release() | {
"content_hash": "1a1edbc1ff50ce40934d602280da7868",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 89,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.6119909502262444,
"repo_name": "AutonomyLab/deep_intent",
"id": "a574e1e85fa078f9feab8f279f5ec46e5471e9a0",
"size": "1768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/autoencoder_model/scripts/thesis_scripts/track.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1344228"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""A JSONRPC class with an IPC backend."""
from socket import socket, AF_UNIX, SOCK_STREAM, SHUT_RDWR
from rpctools.jsonrpc import JSONRPC, is_valid_json
RECV_CHUNK = 4096 # max number of bytes to read from connection at a time.
class IPCRPC(JSONRPC):
"""Sends JSON RPC over an Unix domain socket."""
def __init__(self, address, verbose):
"""Create a connection for JSONRPC to `address`.
Arguments:
address -- The path to the Unix domain socket of an Ethereum
client. This is a file path, not a url!
verbose -- Tells whether or not to print messages.
"""
JSONRPC.__init__(self, verbose)
self.connection = socket(AF_UNIX, SOCK_STREAM)
self.connection.connect(address)
def close(self):
"""Closes the connection."""
self.connection.shutdown(SHUT_RDWR)
self.connection.close()
def _send(self, json):
# Sends stringified JSONRPC messages through Unix Domain socket.
self.connection.sendall(json)
result = bytearray()
while not is_valid_json(result.decode('utf8')):
result.extend(self.connection.recv(RECV_CHUNK))
return result
| {
"content_hash": "ce12cbc87839834f97c450e33df674f3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 36.18181818181818,
"alnum_prop": 0.6457286432160804,
"repo_name": "ChrisCalderon/PyRPCTools",
"id": "76e940f4dae44043b1dff54ad8f151cff528bd8b",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpctools/ipcrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17801"
}
],
"symlink_target": ""
} |
from kivy.properties import ObjectProperty
from kivy.clock import Clock
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.tabbedpanel import TabbedPanelContent, TabbedPanelHeader,\
TabbedPanel
from kivy.uix.sandbox import SandboxContent
class StatusNavBarButton(Button):
'''StatusNavBarButton is a :class:`~kivy.uix.button` representing
the Widgets in the Widget heirarchy of currently selected widget.
'''
node = ObjectProperty()
class StatusNavBarSeparator(Label):
'''StatusNavBarSeparator :class:`~kivy.uix.label.Label`
Used to separate two Widgets by '>'
'''
pass
class StatusBar(BoxLayout):
'''StatusBar used to display Widget heirarchy of currently selected
widget and to display messages.
'''
app = ObjectProperty()
'''Reference to current app instance.
:data:`app` is an
:class:`~kivy.properties.ObjectProperty`
'''
navbar = ObjectProperty()
'''To be used as parent of :class:`~designer.statusbar.StatusNavBarButton`
and :class:`~designer.statusbar.StatusNavBarSeparator`.
:data:`navbar` is an
:class:`~kivy.properties.ObjectProperty`
'''
gridlayout = ObjectProperty()
'''Parent of :data:`navbar`.
:data:`gridlayout` is an
:class:`~kivy.properties.ObjectProperty`
'''
playground = ObjectProperty()
'''Instance of
:data:`playground` is an
:class:`~kivy.properties.ObjectProperty`
'''
def __init__(self, **kwargs):
super(StatusBar, self).__init__(**kwargs)
self.update_navbar = Clock.create_trigger(self._update_navbar)
def show_message(self, message):
'''To show a message in StatusBar
'''
self.app.widget_focused = None
if (self.gridlayout.children or not
isinstance(self.gridlayout.children[0], Label)):
# Create navbar again, as doing clear_widgets
# will make its reference
# count to 0 and it will be destroyed
self.navbar = GridLayout(rows=1)
self.gridlayout.clear_widgets()
self.gridlayout.add_widget(Label(text=message))
self.gridlayout.children[0].text = message
def on_app(self, instance, app):
app.bind(widget_focused=self.update_navbar)
def _update_navbar(self, *largs):
'''To update navbar with the parents of currently selected Widget.
'''
if self.gridlayout.children and\
isinstance(self.gridlayout.children[0], Label):
self.gridlayout.clear_widgets()
self.gridlayout.add_widget(self.navbar)
self.navbar.clear_widgets()
wid = self.app.widget_focused
if not wid:
return
# get parent list, until app.root.playground.root
children = []
while wid:
if wid == self.playground.sandbox or\
wid == self.playground.sandbox.children[0]:
break
if isinstance(wid, TabbedPanelContent):
_wid = wid
wid = wid.parent.current_tab
children.append(StatusNavBarButton(node=wid))
wid = _wid.parent
elif isinstance(wid, TabbedPanelHeader):
children.append(StatusNavBarButton(node=wid))
_wid = wid
while _wid and not isinstance(_wid, TabbedPanel):
_wid = _wid.parent
wid = _wid
children.append(StatusNavBarButton(node=wid))
wid = wid.parent
count = len(children)
for index, child in enumerate(reversed(children)):
self.navbar.add_widget(child)
if index < count - 1:
self.navbar.add_widget(StatusNavBarSeparator())
else:
child.state = 'down'
| {
"content_hash": "3903e432305f77bcc6895ec9e9ab1d53",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 78,
"avg_line_length": 31.688,
"alnum_prop": 0.6149962130775056,
"repo_name": "mohammadj22/kivy-designer",
"id": "5aebddceb982c87a266c7548d22c79838732842c",
"size": "3961",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "designer/statusbar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "773"
},
{
"name": "Python",
"bytes": "331697"
}
],
"symlink_target": ""
} |
from typing import Optional, Any
import sys
import unittest
try:
from tools.lib.template_parser import (
TemplateParserException,
is_django_block_tag,
tokenize,
validate,
)
except ImportError:
print('ERROR!!! You need to run this via tools/test-tools.')
sys.exit(1)
class ParserTest(unittest.TestCase):
def _assert_validate_error(self, error, fn=None, text=None, check_indent=True):
# type: (str, Optional[str], Optional[str], bool) -> None
with self.assertRaisesRegex(TemplateParserException, error):
validate(fn=fn, text=text, check_indent=check_indent)
def test_is_django_block_tag(self):
# type: () -> None
self.assertTrue(is_django_block_tag('block'))
self.assertFalse(is_django_block_tag('not a django tag'))
def test_validate_vanilla_html(self):
# type: () -> None
'''
Verify that validate() does not raise errors for
well-formed HTML.
'''
my_html = '''
<table>
<tr>
<td>foo</td>
</tr>
</table>'''
validate(text=my_html)
def test_validate_handlebars(self):
# type: () -> None
my_html = '''
{{#with stream}}
<p>{{stream}}</p>
{{/with}}
'''
validate(text=my_html)
def test_validate_comment(self):
# type: () -> None
my_html = '''
<!---
<h1>foo</h1>
-->'''
validate(text=my_html)
def test_validate_django(self):
# type: () -> None
my_html = '''
{% include "some_other.html" %}
{% if foo %}
<p>bar</p>
{% endif %}
'''
validate(text=my_html)
my_html = '''
{% block "content" %}
{% with className="class" %}
{% include 'foobar' %}
{% endwith %}
{% endblock %}
'''
validate(text=my_html)
def test_validate_no_start_tag(self):
# type: () -> None
my_html = '''
foo</p>
'''
self._assert_validate_error('No start tag', text=my_html)
def test_validate_mismatched_tag(self):
# type: () -> None
my_html = '''
<b>foo</i>
'''
self._assert_validate_error('Mismatched tag.', text=my_html)
def test_validate_bad_indentation(self):
# type: () -> None
my_html = '''
<p>
foo
</p>
'''
self._assert_validate_error('Bad indentation.', text=my_html, check_indent=True)
def test_validate_state_depth(self):
# type: () -> None
my_html = '''
<b>
'''
self._assert_validate_error('Missing end tag', text=my_html)
def test_validate_incomplete_handlebars_tag_1(self):
# type: () -> None
my_html = '''
{{# foo
'''
self._assert_validate_error('''Tag missing "}}" at Line 2 Col 13:"{{# foo
"''', text=my_html)
def test_validate_incomplete_handlebars_tag_2(self):
# type: () -> None
my_html = '''
{{# foo }
'''
self._assert_validate_error('Tag missing "}}" at Line 2 Col 13:"{{# foo }\n"', text=my_html)
def test_validate_incomplete_django_tag_1(self):
# type: () -> None
my_html = '''
{% foo
'''
self._assert_validate_error('''Tag missing "%}" at Line 2 Col 13:"{% foo
"''', text=my_html)
def test_validate_incomplete_django_tag_2(self):
# type: () -> None
my_html = '''
{% foo %
'''
self._assert_validate_error('Tag missing "%}" at Line 2 Col 13:"{% foo %\n"', text=my_html)
def test_validate_incomplete_html_tag_1(self):
# type: () -> None
my_html = '''
<b
'''
self._assert_validate_error('''Tag missing ">" at Line 2 Col 13:"<b
"''', text=my_html)
def test_validate_incomplete_html_tag_2(self):
# type: () -> None
my_html = '''
<a href="
'''
my_html1 = '''
<a href=""
'''
self._assert_validate_error('''Tag missing ">" at Line 2 Col 13:"<a href=""
"''', text=my_html1)
self._assert_validate_error('''Unbalanced Quotes at Line 2 Col 13:"<a href="
"''', text=my_html)
def test_validate_empty_html_tag(self):
# type: () -> None
my_html = '''
< >
'''
self._assert_validate_error('Tag name missing', text=my_html)
def test_code_blocks(self):
# type: () -> None
# This is fine.
my_html = '''
<code>
x = 5
y = x + 1
</code>'''
validate(text=my_html)
# This is also fine.
my_html = "<code>process_widgets()</code>"
validate(text=my_html)
# This is illegal.
my_html = '''
<code>x =
5</code>
'''
self._assert_validate_error('Code tag is split across two lines.', text=my_html)
def test_anchor_blocks(self):
# type: () -> None
# This is allowed, although strange.
my_html = '''
<a hef="/some/url">
Click here
for more info.
</a>'''
validate(text=my_html)
# This is fine.
my_html = '<a href="/some/url">click here</a>'
validate(text=my_html)
# Even this is fine.
my_html = '''
<a class="twitter-timeline" href="https://twitter.com/ZulipStatus"
data-widget-id="443457763394334720"
data-screen-name="ZulipStatus"
>@ZulipStatus on Twitter</a>.
'''
validate(text=my_html)
def test_tokenize(self):
# type: () -> None
tag = '<meta whatever>bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'html_special')
tag = '<a>bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'html_start')
self.assertEqual(token.tag, 'a')
tag = '<br />bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'html_singleton')
self.assertEqual(token.tag, 'br')
tag = '</a>bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'html_end')
self.assertEqual(token.tag, 'a')
tag = '{{#with foo}}bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'handlebars_start')
self.assertEqual(token.tag, 'with')
tag = '{{/with}}bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'handlebars_end')
self.assertEqual(token.tag, 'with')
tag = '{% if foo %}bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'django_start')
self.assertEqual(token.tag, 'if')
tag = '{% endif %}bla'
token = tokenize(tag)[0]
self.assertEqual(token.kind, 'django_end')
self.assertEqual(token.tag, 'if')
| {
"content_hash": "244e9834df246784a1a9557d558071c0",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 100,
"avg_line_length": 28.824701195219124,
"alnum_prop": 0.48680027643400137,
"repo_name": "amanharitsh123/zulip",
"id": "a16fc387e8443681c6d37de693d8b2c08608410d",
"size": "7235",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tools/tests/test_template_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432211"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "494378"
},
{
"name": "JavaScript",
"bytes": "2167185"
},
{
"name": "Nginx",
"bytes": "1485"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86921"
},
{
"name": "Python",
"bytes": "3792729"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "61752"
}
],
"symlink_target": ""
} |
"""
Certificate generation module.
"""
from OpenSSL import crypto
TYPE_RSA = crypto.TYPE_RSA
TYPE_DSA = crypto.TYPE_DSA
def createKeyPair(type, bits):
"""
Create a public/private key pair.
Arguments: type - Key type, must be one of TYPE_RSA and TYPE_DSA
bits - Number of bits to use in the key
Returns: The public/private key pair in a PKey object
"""
pkey = crypto.PKey()
pkey.generate_key(type, bits)
return pkey
def createCertRequest(pkey, digest="sha256", **name):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
subj = req.get_subject()
for key, value in name.items():
setattr(subj, key, value)
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req
def createCertificate(req, issuerCertKey, serial, validityPeriod, digest="sha256"):
"""
Generate a certificate given a certificate request.
Arguments: req - Certificate request to use
issuerCert - The certificate of the issuer
issuerKey - The private key of the issuer
serial - Serial number for the certificate
notBefore - Timestamp (relative to now) when the certificate
starts being valid
notAfter - Timestamp (relative to now) when the certificate
stops being valid
digest - Digest method to use for signing, default is sha256
Returns: The signed certificate in an X509 object
"""
issuerCert, issuerKey = issuerCertKey
notBefore, notAfter = validityPeriod
cert = crypto.X509()
cert.set_serial_number(serial)
cert.gmtime_adj_notBefore(notBefore)
cert.gmtime_adj_notAfter(notAfter)
cert.set_issuer(issuerCert.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(issuerKey, digest)
return cert
| {
"content_hash": "c2a3a14a018e1966cae050b3d4426163",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 83,
"avg_line_length": 34.906666666666666,
"alnum_prop": 0.5935828877005348,
"repo_name": "mhils/pyopenssl",
"id": "da0624f74599290b5911c66cf62e62b20039dc61",
"size": "2736",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/certgen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "498443"
},
{
"name": "Shell",
"bytes": "287"
}
],
"symlink_target": ""
} |
from csv import DictReader
from petsafeconfig import CSV_FILENAME
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
import logging
class PetstreetmallComSpider(BaseSpider):
name = 'petstreetmall.com'
allowed_domains = ['petstreetmall.com']
start_urls = ()
site_name_csv = 'petstreetmall.com'
def start_requests(self):
products = []
with open(CSV_FILENAME, 'rb') as csv_file:
csv_reader = DictReader(csv_file)
for row in csv_reader:
if row['Retailer'] == self.site_name_csv and row['Link'] != '':
products.append((row['SKU'].strip(), row['Link'].strip(), row['Notes'].strip(), row['Name of Product'].strip().decode('utf-8')))
for sku, url, notes, name in products:
yield Request(url, self.parse, meta={'sku': sku, 'notes': notes, 'name': name}, dont_filter=True)
def parse(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
sku = response.meta['sku']
sec_sku = response.meta['notes']
name = response.meta['name'].encode('ascii', 'ignore')
main_product = hxs.select("//div[@id='Product-MainProduct']")
main_products = hxs.select("//div[@id='Product-MainProductContainer']//div[@class='Product-SubProduct']")
secondary_products = hxs.select("//div[@id='Product-SubProductContainer']//div[@class='Product-SubProduct']")
main_product_sku = main_product.select("div[@id='Product-lblItem']/span[@id='lblItem']/text()").extract()
if not main_product_sku:
logging.error("NO MAIN SKU! %s" % url)
else:
main_product_sku = main_product_sku[0]
if main_product_sku == sku or main_product_sku == sec_sku:
# extract main product
price = main_product.select(".//div[@class='Product-Price']/span[@id='lblClubPrice']/b/font/text()").re("\$(.*)")
if not price:
logging.error('ERROR!! NO PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
return
elif main_products:
for product in main_products:
product_sku = product.select("div[@class='Product-SubProductNumber']/font/text()").re("#(.+)")
if not product_sku:
logging.error("NO MAIN SKU! %s" % url)
else:
product_sku = product_sku[0]
if product_sku == sku or product_sku == sec_sku:
# extract secondary product
price = product.select(".//span[contains(@id, 'lblClubPrice')]/b/font/text()").re("\$(.*)")
if not price:
logging.error('ERROR!! NO SEC PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
return
elif secondary_products:
for product in secondary_products:
product_sku = product.select("div[@class='Product-SubProductNumber']/text()").re("#(.+)")
if not product_sku:
logging.error("NO SECONDARY SKU! %s" % url)
else:
product_sku = product_sku[0]
if product_sku == sku or product_sku == sec_sku:
# extract secondary product
price = product.select(".//span[contains(@id, 'lblClubPrice2')]/b/font/text()").re("\$(.*)")
if not price:
logging.error('ERROR!! NO SEC PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
return
else:
logging.error("No products found!")
| {
"content_hash": "be3641f1100f5980bc1f842d2019b453",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 148,
"avg_line_length": 42.03361344537815,
"alnum_prop": 0.5281887245101959,
"repo_name": "ddy88958620/lib",
"id": "37a645c289e6906ab538b59230d264d2e38c959a",
"size": "5002",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/scrapy/petsafe/petstreetmallcom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import os, sys, cPickle
import wx # ensure this import works before starting the application
import matplotlib # ensure this import works before starting the application
# ensure pyeq2 can be imported before starting the application
if -1 != sys.path[0].find('pyeq2-master'):raise Exception('Please rename git checkout directory from "pyeq2-master" to "pyeq2"')
exampleFileDirectory = sys.path[0][:sys.path[0].rfind(os.sep)]
pyeq2IimportDirectory = os.path.join(os.path.join(exampleFileDirectory, '..'), '..')
if pyeq2IimportDirectory not in sys.path:
sys.path.append(pyeq2IimportDirectory)
import pyeq2
# local imports from application subdirectory
import guifiles.icon as icon
import guifiles.DataForControls as dfc
import guifiles.CustomDialogs as CustomDialogs
import guifiles.CustomEvents as CustomEvents
import guifiles.CustomThreads as CustomThreads
class ApplicationFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="Example wxPython Curve And Surface Fitter",
size=(800,600))
# wx converted an icon file to a Python file for embedding here, see icon.py file
self.SetIcon(icon.icon.GetIcon())
p = wx.Panel(self) # something to put the controls on
# create the controls
# no need to use "self." as these are not referenced by other methods
label1 = wx.StaticText(p, -1, "--- 2D Data Text Editor ---")
label2 = wx.StaticText(p, -1, "--- 3D Data Text Editor ---")
# use "self" because of references in other methods
self.text_2D = wx.TextCtrl(p, -1, dfc.exampleText_2D,
style=wx.TE_MULTILINE|wx.HSCROLL)
self.text_3D = wx.TextCtrl(p, -1, dfc.exampleText_3D,
style=wx.TE_MULTILINE|wx.HSCROLL)
# use "self" because of references in other methods
self.rbFittingTargetChoice_2D = wx.RadioBox(
p, -1, "Fitting Target 2D", wx.DefaultPosition, wx.DefaultSize,
dfc.fittingTargetList, 1, wx.RA_SPECIFY_COLS
)
self.rbFittingTargetChoice_3D = wx.RadioBox(
p, -1, "Fitting Target 3D", wx.DefaultPosition, wx.DefaultSize,
dfc.fittingTargetList, 1, wx.RA_SPECIFY_COLS
)
# use "self" because of references in other methods
self.rbEqChoice_2D = wx.RadioBox(
p, -1, "Example 2D Equations", wx.DefaultPosition, wx.DefaultSize,
dfc.exampleEquationList_2D, 1, wx.RA_SPECIFY_COLS
)
self.rbEqChoice_3D = wx.RadioBox(
p, -1, "Example 3D Equations", wx.DefaultPosition, wx.DefaultSize,
dfc.exampleEquationList_3D, 1, wx.RA_SPECIFY_COLS
)
# use "self" because of references in other methods
self.btnFit2D = wx.Button(p, -1, "Fit 2D Text Data")
self.btnFit3D = wx.Button(p, -1, "Fit 3D Text Data")
# setup the layout with grid sizer
fgs = wx.FlexGridSizer(5, 2, 10, 20)
fgs.AddGrowableRow(1)
fgs.AddGrowableCol(0)
fgs.AddGrowableCol(1)
fgs.Add(label1, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(label2, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.text_2D, 0, wx.EXPAND)
fgs.Add(self.text_3D, 0, wx.EXPAND)
fgs.Add(self.rbEqChoice_2D, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.rbEqChoice_3D, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.rbFittingTargetChoice_2D, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.rbFittingTargetChoice_3D, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.btnFit2D, 0, wx.ALIGN_CENTER_HORIZONTAL)
fgs.Add(self.btnFit3D, 0, wx.ALIGN_CENTER_HORIZONTAL)
border = wx.BoxSizer()
border.Add(fgs, 1, wx.EXPAND|wx.ALL, 10)
p.SetSizer(border)
# all controls on the main panel have been added with sizers,
# now center the application window on the user's display
self.Center()
# this dialog will not be displayed unless fitting is in progress
# use "self" because of references in other methods
self.statusBox = CustomDialogs.StatusDialog(self, '', "Status")
# Bind the button events to their application methods
self.Bind(wx.EVT_BUTTON, self.OnFit2D, self.btnFit2D)
self.Bind(wx.EVT_BUTTON, self.OnFit3D, self.btnFit3D)
# Set up event handler for any worker thread results
CustomEvents.EVT_THREADSTATUS(self, self.OnThreadStatus)
self.fittingWorkerThread = None
def OnThreadStatus(self, event):
if type(event.data) == type(''): # strings are status updates
self.statusBox.text.AppendText(event.data + "\n")
else: # not string data type, the worker thread completed
self.fittingWorkerThread = None
# event.data will be the fitted equation
pickledEquationFile = open("pickledEquationFile", "wb")
cPickle.dump(event.data, pickledEquationFile)
pickledEquationFile.close()
self.btnFit2D.Enable()
self.btnFit3D.Enable()
self.statusBox.Hide()
currentDirectory = os.path.dirname(os.path.abspath(__file__))
dialogDirectory = os.path.join(currentDirectory, 'guifiles')
commandString = os.path.join(dialogDirectory, 'CustomDialogs.py')
os.popen(sys.executable + ' ' + commandString)
def OnFit2D(self, evt):
textData = str(self.text_2D.GetValue())
equationSelection = self.rbEqChoice_2D.GetStringSelection()
fittingTargetSelection = self.rbFittingTargetChoice_2D.GetStringSelection()
# the GUI's fitting target string contains what we need - extract it
fittingTarget = fittingTargetSelection.split('(')[1].split(')')[0]
if equationSelection == 'Linear Polynomial':
self.equation = pyeq2.Models_2D.Polynomial.Linear(fittingTarget)
if equationSelection == 'Quadratic Polynomial':
self.equation = pyeq2.Models_2D.Polynomial.Quadratic(fittingTarget)
if equationSelection == 'Cubic Polynomial':
self.equation = pyeq2.Models_2D.Polynomial.Cubic(fittingTarget)
if equationSelection == 'Witch Of Maria Agnesi A':
self.equation = pyeq2.Models_2D.Miscellaneous.WitchOfAgnesiA(fittingTarget)
if equationSelection == 'VanDeemter Chromatography':
self.equation = pyeq2.Models_2D.Engineering.VanDeemterChromatography(fittingTarget)
if equationSelection == 'Gamma Ray Angular Distribution (degrees) B':
self.equation = pyeq2.Models_2D.LegendrePolynomial.GammaRayAngularDistributionDegreesB(fittingTarget)
if equationSelection == 'Exponential With Offset':
self.equation = pyeq2.Models_2D.Exponential.Exponential(fittingTarget, 'Offset')
# convert text to numeric data checking for log of negative numbers, etc.
try:
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(textData, self.equation, False)
except:
wx.MessageBox(self.equation.reasonWhyDataRejected, "Error")
return
# check for number of coefficients > number of data points to be fitted
coeffCount = len(self.equation.GetCoefficientDesignators())
dataCount = len(self.equation.dataCache.allDataCacheDictionary['DependentData'])
if coeffCount > dataCount:
wx.MessageBox("This equation requires a minimum of " + str(coeffCount) + " data points, you have supplied " + repr(dataCount) + ".", "Error")
return
# Now the status dialog is used. Disable fitting buttons until thread completes
self.btnFit2D.Disable()
self.btnFit3D.Disable()
self.statusBox.text.SetValue('')
self.statusBox.Show() # hidden by OnThreadStatus() when thread completes
# thread will automatically start to tun
self.fittingWorkerThread = CustomThreads.FittingThread(self, self.equation)
def OnFit3D(self, evt):
textData = str(self.text_3D.GetValue())
equationSelection = self.rbEqChoice_3D.GetStringSelection()
fittingTargetSelection = self.rbFittingTargetChoice_3D.GetStringSelection()
# the GUI's fitting target string contains what we need - extract it
fittingTarget = fittingTargetSelection.split('(')[1].split(')')[0]
if equationSelection == 'Linear Polynomial':
self.equation = pyeq2.Models_3D.Polynomial.Linear(fittingTarget)
if equationSelection == 'Full Quadratic Polynomial':
self.equation = pyeq2.Models_3D.Polynomial.FullQuadratic(fittingTarget)
if equationSelection == 'Full Cubic Polynomial':
self.equation = pyeq2.Models_3D.Polynomial.FullCubic(fittingTarget)
if equationSelection == 'Monkey Saddle A':
self.equation = pyeq2.Models_3D.Miscellaneous.MonkeySaddleA(fittingTarget)
if equationSelection == 'Gaussian Curvature Of Whitneys Umbrella A':
self.equation = pyeq2.Models_3D.Miscellaneous.GaussianCurvatureOfWhitneysUmbrellaA(fittingTarget)
if equationSelection == 'NIST Nelson Autolog':
self.equation = pyeq2.Models_3D.NIST.NIST_NelsonAutolog(fittingTarget)
if equationSelection == 'Custom Polynomial One':
self.equation = pyeq2.Models_3D.Polynomial.UserSelectablePolynomial(fittingTarget, "Default", 3, 1)
# convert text to numeric data checking for log of negative numbers, etc.
try:
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(textData, self.equation, False)
except:
wx.MessageBox(self.equation.reasonWhyDataRejected, "Error")
return
# check for number of coefficients > number of data points to be fitted
coeffCount = len(self.equation.GetCoefficientDesignators())
dataCount = len(self.equation.dataCache.allDataCacheDictionary['DependentData'])
if coeffCount > dataCount:
wx.MessageBox("This equation requires a minimum of " + str(coeffCount) + " data points, you have supplied " + repr(dataCount) + ".", "Error")
return
# Now the status dialog is used. Disable fitting buttons until thread completes
self.btnFit2D.Disable()
self.btnFit3D.Disable()
self.statusBox.text.SetValue('')
self.statusBox.Show() # hidden by OnThreadStatus() when thread completes
# thread will automatically start to run
self.fittingWorkerThread = CustomThreads.FittingThread(self, self.equation)
if __name__ == "__main__":
app = wx.App()
frm = ApplicationFrame()
frm.Show()
app.MainLoop()
| {
"content_hash": "8109fbf61872b03a7e753ad975b35fd0",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 153,
"avg_line_length": 47.849557522123895,
"alnum_prop": 0.6611799519141853,
"repo_name": "jamesrp/pyeq2",
"id": "6fec2a2f10d704d049dc4e9a3b0f51ead96fbd5b",
"size": "10814",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Examples/GUI/wxPythonFit.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1882602"
},
{
"name": "Shell",
"bytes": "119"
}
],
"symlink_target": ""
} |
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class ReadingType(IdentifiedObject):
"""Type of data conveyed by a specific Reading.Type of data conveyed by a specific Reading.
"""
def __init__(self, multiplier="M", unit="N", kind="phaseAngle", reverseChronology=False, defaultQuality='', intervalLength=0.0, channelNumber=0, defaultValueDataType='', IntervalBlocks=None, dynamicConfiguration=None, Readings=None, Register=None, PendingCalculation=None, *args, **kw_args):
"""Initialises a new 'ReadingType' instance.
@param multiplier: Multiplier for 'unit'. Values are: "M", "G", "d", "micro", "c", "p", "n", "T", "k", "m", "none"
@param unit: Unit for the reading value. Values are: "N", "A", "rad", "VAh", "Pa", "J", "h", "Hz", "VArh", "ohm", "H", "m3", "deg", "V", "oC", "F", "Wh", "s", "g", "min", "S", "none", "W", "VAr", "m2", "m", "VA"
@param kind: Kind of reading. Values are: "phaseAngle", "volume", "frequency", "energy", "currentAngle", "powerFactor", "date", "other", "demand", "power", "pressure", "voltage", "voltageAngle", "time", "current"
@param reverseChronology: True for systems that must operate in 'reverse' chronological order.
@param defaultQuality: Characteristics of a data value conveyed by a specific Reading, which allow an application to understand how a specific Reading is to be interpreted.
@param intervalLength: (if incremental reading value) Length of increment interval.
@param channelNumber: Logical positioning of this measurement data.
@param defaultValueDataType: Numeric type to be expected for the associated IntervalBlock.value (e.g. unsignedInteger).
@param IntervalBlocks: All blocks containing interval reading values with this type information.
@param dynamicConfiguration: Demand configuration.
@param Readings: All reading values with this type information.
@param Register: Register displaying values with this type information.
@param PendingCalculation: Pending conversion that produced this reading type.
"""
#: Multiplier for 'unit'. Values are: "M", "G", "d", "micro", "c", "p", "n", "T", "k", "m", "none"
self.multiplier = multiplier
#: Unit for the reading value. Values are: "N", "A", "rad", "VAh", "Pa", "J", "h", "Hz", "VArh", "ohm", "H", "m3", "deg", "V", "oC", "F", "Wh", "s", "g", "min", "S", "none", "W", "VAr", "m2", "m", "VA"
self.unit = unit
#: Kind of reading. Values are: "phaseAngle", "volume", "frequency", "energy", "currentAngle", "powerFactor", "date", "other", "demand", "power", "pressure", "voltage", "voltageAngle", "time", "current"
self.kind = kind
#: True for systems that must operate in 'reverse' chronological order.
self.reverseChronology = reverseChronology
#: Characteristics of a data value conveyed by a specific Reading, which allow an application to understand how a specific Reading is to be interpreted.
self.defaultQuality = defaultQuality
#: (if incremental reading value) Length of increment interval.
self.intervalLength = intervalLength
#: Logical positioning of this measurement data.
self.channelNumber = channelNumber
#: Numeric type to be expected for the associated IntervalBlock.value (e.g. unsignedInteger).
self.defaultValueDataType = defaultValueDataType
self._IntervalBlocks = []
self.IntervalBlocks = [] if IntervalBlocks is None else IntervalBlocks
self.dynamicConfiguration = dynamicConfiguration
self._Readings = []
self.Readings = [] if Readings is None else Readings
self._Register = None
self.Register = Register
self._PendingCalculation = None
self.PendingCalculation = PendingCalculation
super(ReadingType, self).__init__(*args, **kw_args)
_attrs = ["multiplier", "unit", "kind", "reverseChronology", "defaultQuality", "intervalLength", "channelNumber", "defaultValueDataType"]
_attr_types = {"multiplier": str, "unit": str, "kind": str, "reverseChronology": bool, "defaultQuality": str, "intervalLength": float, "channelNumber": int, "defaultValueDataType": str}
_defaults = {"multiplier": "M", "unit": "N", "kind": "phaseAngle", "reverseChronology": False, "defaultQuality": '', "intervalLength": 0.0, "channelNumber": 0, "defaultValueDataType": ''}
_enums = {"multiplier": "UnitMultiplier", "unit": "UnitSymbol", "kind": "ReadingKind"}
_refs = ["IntervalBlocks", "dynamicConfiguration", "Readings", "Register", "PendingCalculation"]
_many_refs = ["IntervalBlocks", "Readings"]
def getIntervalBlocks(self):
"""All blocks containing interval reading values with this type information.
"""
return self._IntervalBlocks
def setIntervalBlocks(self, value):
for x in self._IntervalBlocks:
x.ReadingType = None
for y in value:
y._ReadingType = self
self._IntervalBlocks = value
IntervalBlocks = property(getIntervalBlocks, setIntervalBlocks)
def addIntervalBlocks(self, *IntervalBlocks):
for obj in IntervalBlocks:
obj.ReadingType = self
def removeIntervalBlocks(self, *IntervalBlocks):
for obj in IntervalBlocks:
obj.ReadingType = None
# Demand configuration.
dynamicConfiguration = None
def getReadings(self):
"""All reading values with this type information.
"""
return self._Readings
def setReadings(self, value):
for x in self._Readings:
x.ReadingType = None
for y in value:
y._ReadingType = self
self._Readings = value
Readings = property(getReadings, setReadings)
def addReadings(self, *Readings):
for obj in Readings:
obj.ReadingType = self
def removeReadings(self, *Readings):
for obj in Readings:
obj.ReadingType = None
def getRegister(self):
"""Register displaying values with this type information.
"""
return self._Register
def setRegister(self, value):
if self._Register is not None:
self._Register._ReadingType = None
self._Register = value
if self._Register is not None:
self._Register.ReadingType = None
self._Register._ReadingType = self
Register = property(getRegister, setRegister)
def getPendingCalculation(self):
"""Pending conversion that produced this reading type.
"""
return self._PendingCalculation
def setPendingCalculation(self, value):
if self._PendingCalculation is not None:
self._PendingCalculation._ReadingType = None
self._PendingCalculation = value
if self._PendingCalculation is not None:
self._PendingCalculation.ReadingType = None
self._PendingCalculation._ReadingType = self
PendingCalculation = property(getPendingCalculation, setPendingCalculation)
| {
"content_hash": "b86ea62f3c53b7f21283991b89a520eb",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 295,
"avg_line_length": 47.61073825503356,
"alnum_prop": 0.6537919368480406,
"repo_name": "rwl/PyCIM",
"id": "8fc3b367227e1d0dad6252a8cfac23f520c375b8",
"size": "8194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61968/Metering/ReadingType.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('academics', '0022_auto_20160203_1038'),
]
operations = [
migrations.CreateModel(
name='Ethnicity',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('ethnicity', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='HistoricalEthnicity',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('ethnicity', models.CharField(max_length=200)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical ethnicity',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalMealTime',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('name', models.CharField(max_length=200)),
('include_boarding_students', models.BooleanField(default=False)),
('include_day_students', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical meal time',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalPinnedStudent',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical pinned student',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalSeatFiller',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('description', models.CharField(blank=True, max_length=200)),
('seats', models.IntegerField()),
('display', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical seat filler',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalTable',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('description', models.CharField(max_length=200)),
('capacity', models.IntegerField()),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical table',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalTableAssignment',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('waitor', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical table assignment',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='Layout',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='MealTime',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=200)),
('include_boarding_students', models.BooleanField(default=False)),
('include_day_students', models.BooleanField(default=False)),
('include_grades', models.ManyToManyField(to='academics.Grade')),
],
),
migrations.CreateModel(
name='PinnedStudent',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('meal_time', models.ForeignKey(to='seating_charts.MealTime')),
],
),
migrations.CreateModel(
name='SeatFiller',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('description', models.CharField(blank=True, max_length=200)),
('seats', models.IntegerField()),
('display', models.BooleanField(default=False)),
('meal_time', models.ManyToManyField(to='seating_charts.MealTime')),
],
),
migrations.CreateModel(
name='SeatingStudent',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('enrollment', models.ForeignKey(to='academics.Enrollment')),
('ethnicity', models.ForeignKey(null=True, to='seating_charts.Ethnicity')),
],
),
migrations.CreateModel(
name='Table',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('description', models.CharField(max_length=200)),
('capacity', models.IntegerField()),
('for_meals', models.ManyToManyField(to='seating_charts.MealTime')),
],
),
migrations.CreateModel(
name='TableAssignment',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('waitor', models.BooleanField(default=False)),
('meal_time', models.ForeignKey(to='seating_charts.MealTime')),
('student', models.ForeignKey(to='seating_charts.SeatingStudent')),
('table', models.ForeignKey(to='seating_charts.Table')),
],
options={
'permissions': (('view', 'Can view table assignments'), ('edit', 'Can edit table assignments')),
},
),
migrations.AddField(
model_name='seatfiller',
name='table',
field=models.ForeignKey(to='seating_charts.Table'),
),
migrations.AddField(
model_name='pinnedstudent',
name='student',
field=models.ForeignKey(to='seating_charts.SeatingStudent'),
),
migrations.AddField(
model_name='pinnedstudent',
name='table',
field=models.ForeignKey(to='seating_charts.Table'),
),
migrations.AddField(
model_name='layout',
name='left_print',
field=models.ForeignKey(related_name='+', to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='layout',
name='right_print',
field=models.ForeignKey(null=True, related_name='+', blank=True, to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='historicaltableassignment',
name='meal_time',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='historicaltableassignment',
name='student',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.SeatingStudent'),
),
migrations.AddField(
model_name='historicaltableassignment',
name='table',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.Table'),
),
migrations.AddField(
model_name='historicalseatfiller',
name='table',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.Table'),
),
migrations.AddField(
model_name='historicalpinnedstudent',
name='meal_time',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='historicalpinnedstudent',
name='student',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.SeatingStudent'),
),
migrations.AddField(
model_name='historicalpinnedstudent',
name='table',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.Table'),
),
migrations.AlterUniqueTogether(
name='tableassignment',
unique_together=set([('meal_time', 'student')]),
),
migrations.AlterUniqueTogether(
name='pinnedstudent',
unique_together=set([('student', 'meal_time')]),
),
]
| {
"content_hash": "93db7efb014d865c2ddb3877056d1705",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 182,
"avg_line_length": 50.63921568627451,
"alnum_prop": 0.5575001936033455,
"repo_name": "rectory-school/rectory-apps",
"id": "12d796a49ed281bcd90924db24a6676777afabb5",
"size": "12937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seating_charts/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1150635"
},
{
"name": "HTML",
"bytes": "2337278"
},
{
"name": "JavaScript",
"bytes": "30707"
},
{
"name": "PHP",
"bytes": "51712"
},
{
"name": "Python",
"bytes": "455392"
},
{
"name": "Ruby",
"bytes": "524"
}
],
"symlink_target": ""
} |
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha2MachinePreferences(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'preferred_machine_type': 'str'
}
attribute_map = {
'preferred_machine_type': 'preferredMachineType'
}
def __init__(self, preferred_machine_type=None):
"""
V1alpha2MachinePreferences - a model defined in Swagger
"""
self._preferred_machine_type = None
if preferred_machine_type is not None:
self.preferred_machine_type = preferred_machine_type
@property
def preferred_machine_type(self):
"""
Gets the preferred_machine_type of this V1alpha2MachinePreferences.
PreferredMachineType optionally defines the preferred machine type to use.
:return: The preferred_machine_type of this V1alpha2MachinePreferences.
:rtype: str
"""
return self._preferred_machine_type
@preferred_machine_type.setter
def preferred_machine_type(self, preferred_machine_type):
"""
Sets the preferred_machine_type of this V1alpha2MachinePreferences.
PreferredMachineType optionally defines the preferred machine type to use.
:param preferred_machine_type: The preferred_machine_type of this V1alpha2MachinePreferences.
:type: str
"""
self._preferred_machine_type = preferred_machine_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha2MachinePreferences):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "32b44216758234a56f30a6853f7dea03",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 101,
"avg_line_length": 28.4390243902439,
"alnum_prop": 0.5723270440251572,
"repo_name": "kubevirt/client-python",
"id": "94f99e8d93f1781f191e594ecbb7644325c7c442",
"size": "3515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubevirt/models/v1alpha2_machine_preferences.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4224980"
},
{
"name": "Shell",
"bytes": "2209"
}
],
"symlink_target": ""
} |
"""HTTP utility code shared by clients and servers.
This module also defines the `HTTPServerRequest` class which is exposed
via `tornado.web.RequestHandler.request`.
"""
import calendar
import collections
import copy
import datetime
import email.utils
from functools import lru_cache
from http.client import responses
import http.cookies
import re
from ssl import SSLError
import time
import unicodedata
from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
from tornado.escape import native_str, parse_qs_bytes, utf8
from tornado.log import gen_log
from tornado.util import ObjectDict, unicode_type
# responses is unused in this file, but we re-export it to other files.
# Reference it so pyflakes doesn't complain.
responses
import typing
from typing import (
Tuple,
Iterable,
List,
Mapping,
Iterator,
Dict,
Union,
Optional,
Awaitable,
Generator,
AnyStr,
)
if typing.TYPE_CHECKING:
from typing import Deque # noqa: F401
from asyncio import Future # noqa: F401
import unittest # noqa: F401
@lru_cache(1000)
def _normalize_header(name: str) -> str:
"""Map a header name to Http-Header-Case.
>>> _normalize_header("coNtent-TYPE")
'Content-Type'
"""
return "-".join([w.capitalize() for w in name.split("-")])
class HTTPHeaders(collections.abc.MutableMapping):
"""A dictionary that maintains ``Http-Header-Case`` for all keys.
Supports multiple values per key via a pair of new methods,
`add()` and `get_list()`. The regular dictionary interface
returns a single value per key, with multiple values joined by a
comma.
>>> h = HTTPHeaders({"content-type": "text/html"})
>>> list(h.keys())
['Content-Type']
>>> h["Content-Type"]
'text/html'
>>> h.add("Set-Cookie", "A=B")
>>> h.add("Set-Cookie", "C=D")
>>> h["set-cookie"]
'A=B,C=D'
>>> h.get_list("set-cookie")
['A=B', 'C=D']
>>> for (k,v) in sorted(h.get_all()):
... print('%s: %s' % (k,v))
...
Content-Type: text/html
Set-Cookie: A=B
Set-Cookie: C=D
"""
@typing.overload
def __init__(self, __arg: Mapping[str, List[str]]) -> None:
pass
@typing.overload # noqa: F811
def __init__(self, __arg: Mapping[str, str]) -> None:
pass
@typing.overload # noqa: F811
def __init__(self, *args: Tuple[str, str]) -> None:
pass
@typing.overload # noqa: F811
def __init__(self, **kwargs: str) -> None:
pass
def __init__(self, *args: typing.Any, **kwargs: str) -> None: # noqa: F811
self._dict = {} # type: typing.Dict[str, str]
self._as_list = {} # type: typing.Dict[str, typing.List[str]]
self._last_key = None # type: Optional[str]
if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], HTTPHeaders):
# Copy constructor
for k, v in args[0].get_all():
self.add(k, v)
else:
# Dict-style initialization
self.update(*args, **kwargs)
# new public methods
def add(self, name: str, value: str) -> None:
"""Adds a new value for the given key."""
norm_name = _normalize_header(name)
self._last_key = norm_name
if norm_name in self:
self._dict[norm_name] = (
native_str(self[norm_name]) + "," + native_str(value)
)
self._as_list[norm_name].append(value)
else:
self[norm_name] = value
def get_list(self, name: str) -> List[str]:
"""Returns all values for the given header as a list."""
norm_name = _normalize_header(name)
return self._as_list.get(norm_name, [])
def get_all(self) -> Iterable[Tuple[str, str]]:
"""Returns an iterable of all (name, value) pairs.
If a header has multiple values, multiple pairs will be
returned with the same name.
"""
for name, values in self._as_list.items():
for value in values:
yield (name, value)
def parse_line(self, line: str) -> None:
"""Updates the dictionary with a single header line.
>>> h = HTTPHeaders()
>>> h.parse_line("Content-Type: text/html")
>>> h.get('content-type')
'text/html'
"""
if line[0].isspace():
# continuation of a multi-line header
if self._last_key is None:
raise HTTPInputError("first header line cannot start with whitespace")
new_part = " " + line.lstrip()
self._as_list[self._last_key][-1] += new_part
self._dict[self._last_key] += new_part
else:
try:
name, value = line.split(":", 1)
except ValueError:
raise HTTPInputError("no colon in header line")
self.add(name, value.strip())
@classmethod
def parse(cls, headers: str) -> "HTTPHeaders":
"""Returns a dictionary from HTTP header text.
>>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
>>> sorted(h.items())
[('Content-Length', '42'), ('Content-Type', 'text/html')]
.. versionchanged:: 5.1
Raises `HTTPInputError` on malformed headers instead of a
mix of `KeyError`, and `ValueError`.
"""
h = cls()
# RFC 7230 section 3.5: a recipient MAY recognize a single LF as a line
# terminator and ignore any preceding CR.
for line in headers.split("\n"):
if line.endswith("\r"):
line = line[:-1]
if line:
h.parse_line(line)
return h
# MutableMapping abstract method implementations.
def __setitem__(self, name: str, value: str) -> None:
norm_name = _normalize_header(name)
self._dict[norm_name] = value
self._as_list[norm_name] = [value]
def __getitem__(self, name: str) -> str:
return self._dict[_normalize_header(name)]
def __delitem__(self, name: str) -> None:
norm_name = _normalize_header(name)
del self._dict[norm_name]
del self._as_list[norm_name]
def __len__(self) -> int:
return len(self._dict)
def __iter__(self) -> Iterator[typing.Any]:
return iter(self._dict)
def copy(self) -> "HTTPHeaders":
# defined in dict but not in MutableMapping.
return HTTPHeaders(self)
# Use our overridden copy method for the copy.copy module.
# This makes shallow copies one level deeper, but preserves
# the appearance that HTTPHeaders is a single container.
__copy__ = copy
def __str__(self) -> str:
lines = []
for name, value in self.get_all():
lines.append("%s: %s\n" % (name, value))
return "".join(lines)
__unicode__ = __str__
class HTTPServerRequest(object):
"""A single HTTP request.
All attributes are type `str` unless otherwise noted.
.. attribute:: method
HTTP request method, e.g. "GET" or "POST"
.. attribute:: uri
The requested uri.
.. attribute:: path
The path portion of `uri`
.. attribute:: query
The query portion of `uri`
.. attribute:: version
HTTP version specified in request, e.g. "HTTP/1.1"
.. attribute:: headers
`.HTTPHeaders` dictionary-like object for request headers. Acts like
a case-insensitive dictionary with additional methods for repeated
headers.
.. attribute:: body
Request body, if present, as a byte string.
.. attribute:: remote_ip
Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
will pass along the real IP address provided by a load balancer
in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
.. versionchanged:: 3.1
The list format of ``X-Forwarded-For`` is now supported.
.. attribute:: protocol
The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
is set, will pass along the protocol used by a load balancer if
reported via an ``X-Scheme`` header.
.. attribute:: host
The requested hostname, usually taken from the ``Host`` header.
.. attribute:: arguments
GET/POST arguments are available in the arguments property, which
maps arguments names to lists of values (to support multiple values
for individual names). Names are of type `str`, while arguments
are byte strings. Note that this is different from
`.RequestHandler.get_argument`, which returns argument values as
unicode strings.
.. attribute:: query_arguments
Same format as ``arguments``, but contains only arguments extracted
from the query string.
.. versionadded:: 3.2
.. attribute:: body_arguments
Same format as ``arguments``, but contains only arguments extracted
from the request body.
.. versionadded:: 3.2
.. attribute:: files
File uploads are available in the files property, which maps file
names to lists of `.HTTPFile`.
.. attribute:: connection
An HTTP request is attached to a single HTTP connection, which can
be accessed through the "connection" attribute. Since connections
are typically kept open in HTTP/1.1, multiple requests can be handled
sequentially on a single connection.
.. versionchanged:: 4.0
Moved from ``tornado.httpserver.HTTPRequest``.
"""
path = None # type: str
query = None # type: str
# HACK: Used for stream_request_body
_body_future = None # type: Future[None]
def __init__(
self,
method: Optional[str] = None,
uri: Optional[str] = None,
version: str = "HTTP/1.0",
headers: Optional[HTTPHeaders] = None,
body: Optional[bytes] = None,
host: Optional[str] = None,
files: Optional[Dict[str, List["HTTPFile"]]] = None,
connection: Optional["HTTPConnection"] = None,
start_line: Optional["RequestStartLine"] = None,
server_connection: Optional[object] = None,
) -> None:
if start_line is not None:
method, uri, version = start_line
self.method = method
self.uri = uri
self.version = version
self.headers = headers or HTTPHeaders()
self.body = body or b""
# set remote IP and protocol
context = getattr(connection, "context", None)
self.remote_ip = getattr(context, "remote_ip", None)
self.protocol = getattr(context, "protocol", "http")
self.host = host or self.headers.get("Host") or "127.0.0.1"
self.host_name = split_host_and_port(self.host.lower())[0]
self.files = files or {}
self.connection = connection
self.server_connection = server_connection
self._start_time = time.time()
self._finish_time = None
if uri is not None:
self.path, sep, self.query = uri.partition("?")
self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
self.query_arguments = copy.deepcopy(self.arguments)
self.body_arguments = {} # type: Dict[str, List[bytes]]
@property
def cookies(self) -> Dict[str, http.cookies.Morsel]:
"""A dictionary of ``http.cookies.Morsel`` objects."""
if not hasattr(self, "_cookies"):
self._cookies = (
http.cookies.SimpleCookie()
) # type: http.cookies.SimpleCookie
if "Cookie" in self.headers:
try:
parsed = parse_cookie(self.headers["Cookie"])
except Exception:
pass
else:
for k, v in parsed.items():
try:
self._cookies[k] = v
except Exception:
# SimpleCookie imposes some restrictions on keys;
# parse_cookie does not. Discard any cookies
# with disallowed keys.
pass
return self._cookies
def full_url(self) -> str:
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self) -> float:
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
def get_ssl_certificate(
self, binary_form: bool = False
) -> Union[None, Dict, bytes]:
"""Returns the client's SSL certificate, if any.
To use client certificates, the HTTPServer's
`ssl.SSLContext.verify_mode` field must be set, e.g.::
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain("foo.crt", "foo.key")
ssl_ctx.load_verify_locations("cacerts.pem")
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
server = HTTPServer(app, ssl_options=ssl_ctx)
By default, the return value is a dictionary (or None, if no
client certificate is present). If ``binary_form`` is true, a
DER-encoded form of the certificate is returned instead. See
SSLSocket.getpeercert() in the standard library for more
details.
http://docs.python.org/library/ssl.html#sslsocket-objects
"""
try:
if self.connection is None:
return None
# TODO: add a method to HTTPConnection for this so it can work with HTTP/2
return self.connection.stream.socket.getpeercert( # type: ignore
binary_form=binary_form
)
except SSLError:
return None
def _parse_body(self) -> None:
parse_body_arguments(
self.headers.get("Content-Type", ""),
self.body,
self.body_arguments,
self.files,
self.headers,
)
for k, v in self.body_arguments.items():
self.arguments.setdefault(k, []).extend(v)
def __repr__(self) -> str:
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
return "%s(%s)" % (self.__class__.__name__, args)
class HTTPInputError(Exception):
"""Exception class for malformed HTTP requests or responses
from remote sources.
.. versionadded:: 4.0
"""
pass
class HTTPOutputError(Exception):
"""Exception class for errors in HTTP output.
.. versionadded:: 4.0
"""
pass
class HTTPServerConnectionDelegate(object):
"""Implement this interface to handle requests from `.HTTPServer`.
.. versionadded:: 4.0
"""
def start_request(
self, server_conn: object, request_conn: "HTTPConnection"
) -> "HTTPMessageDelegate":
"""This method is called by the server when a new request has started.
:arg server_conn: is an opaque object representing the long-lived
(e.g. tcp-level) connection.
:arg request_conn: is a `.HTTPConnection` object for a single
request/response exchange.
This method should return a `.HTTPMessageDelegate`.
"""
raise NotImplementedError()
def on_close(self, server_conn: object) -> None:
"""This method is called when a connection has been closed.
:arg server_conn: is a server connection that has previously been
passed to ``start_request``.
"""
pass
class HTTPMessageDelegate(object):
"""Implement this interface to handle an HTTP request or response.
.. versionadded:: 4.0
"""
# TODO: genericize this class to avoid exposing the Union.
def headers_received(
self,
start_line: Union["RequestStartLine", "ResponseStartLine"],
headers: HTTPHeaders,
) -> Optional[Awaitable[None]]:
"""Called when the HTTP headers have been received and parsed.
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`
depending on whether this is a client or server message.
:arg headers: a `.HTTPHeaders` instance.
Some `.HTTPConnection` methods can only be called during
``headers_received``.
May return a `.Future`; if it does the body will not be read
until it is done.
"""
pass
def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
"""Called when a chunk of data has been received.
May return a `.Future` for flow control.
"""
pass
def finish(self) -> None:
"""Called after the last chunk of data has been received."""
pass
def on_connection_close(self) -> None:
"""Called if the connection is closed without finishing the request.
If ``headers_received`` is called, either ``finish`` or
``on_connection_close`` will be called, but not both.
"""
pass
class HTTPConnection(object):
"""Applications use this interface to write their responses.
.. versionadded:: 4.0
"""
def write_headers(
self,
start_line: Union["RequestStartLine", "ResponseStartLine"],
headers: HTTPHeaders,
chunk: Optional[bytes] = None,
) -> "Future[None]":
"""Write an HTTP header block.
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`.
:arg headers: a `.HTTPHeaders` instance.
:arg chunk: the first (optional) chunk of data. This is an optimization
so that small responses can be written in the same call as their
headers.
The ``version`` field of ``start_line`` is ignored.
Returns a future for flow control.
.. versionchanged:: 6.0
The ``callback`` argument was removed.
"""
raise NotImplementedError()
def write(self, chunk: bytes) -> "Future[None]":
"""Writes a chunk of body data.
Returns a future for flow control.
.. versionchanged:: 6.0
The ``callback`` argument was removed.
"""
raise NotImplementedError()
def finish(self) -> None:
"""Indicates that the last body data has been written.
"""
raise NotImplementedError()
def url_concat(
url: str,
args: Union[
None, Dict[str, str], List[Tuple[str, str]], Tuple[Tuple[str, str], ...]
],
) -> str:
"""Concatenate url and arguments regardless of whether
url has existing query parameters.
``args`` may be either a dictionary or a list of key-value pairs
(the latter allows for multiple values with the same key.
>>> url_concat("http://example.com/foo", dict(c="d"))
'http://example.com/foo?c=d'
>>> url_concat("http://example.com/foo?a=b", dict(c="d"))
'http://example.com/foo?a=b&c=d'
>>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")])
'http://example.com/foo?a=b&c=d&c=d2'
"""
if args is None:
return url
parsed_url = urlparse(url)
if isinstance(args, dict):
parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
parsed_query.extend(args.items())
elif isinstance(args, list) or isinstance(args, tuple):
parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
parsed_query.extend(args)
else:
err = "'args' parameter should be dict, list or tuple. Not {0}".format(
type(args)
)
raise TypeError(err)
final_query = urlencode(parsed_query)
url = urlunparse(
(
parsed_url[0],
parsed_url[1],
parsed_url[2],
parsed_url[3],
final_query,
parsed_url[5],
)
)
return url
class HTTPFile(ObjectDict):
"""Represents a file uploaded via a form.
For backwards compatibility, its instance attributes are also
accessible as dictionary keys.
* ``filename``
* ``body``
* ``content_type``
"""
pass
def _parse_request_range(
range_header: str,
) -> Optional[Tuple[Optional[int], Optional[int]]]:
"""Parses a Range header.
Returns either ``None`` or tuple ``(start, end)``.
Note that while the HTTP headers use inclusive byte positions,
this method returns indexes suitable for use in slices.
>>> start, end = _parse_request_range("bytes=1-2")
>>> start, end
(1, 3)
>>> [0, 1, 2, 3, 4][start:end]
[1, 2]
>>> _parse_request_range("bytes=6-")
(6, None)
>>> _parse_request_range("bytes=-6")
(-6, None)
>>> _parse_request_range("bytes=-0")
(None, 0)
>>> _parse_request_range("bytes=")
(None, None)
>>> _parse_request_range("foo=42")
>>> _parse_request_range("bytes=1-2,6-10")
Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed).
See [0] for the details of the range header.
[0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges
"""
unit, _, value = range_header.partition("=")
unit, value = unit.strip(), value.strip()
if unit != "bytes":
return None
start_b, _, end_b = value.partition("-")
try:
start = _int_or_none(start_b)
end = _int_or_none(end_b)
except ValueError:
return None
if end is not None:
if start is None:
if end != 0:
start = -end
end = None
else:
end += 1
return (start, end)
def _get_content_range(start: Optional[int], end: Optional[int], total: int) -> str:
"""Returns a suitable Content-Range header:
>>> print(_get_content_range(None, 1, 4))
bytes 0-0/4
>>> print(_get_content_range(1, 3, 4))
bytes 1-2/4
>>> print(_get_content_range(None, None, 4))
bytes 0-3/4
"""
start = start or 0
end = (end or total) - 1
return "bytes %s-%s/%s" % (start, end, total)
def _int_or_none(val: str) -> Optional[int]:
val = val.strip()
if val == "":
return None
return int(val)
def parse_body_arguments(
content_type: str,
body: bytes,
arguments: Dict[str, List[bytes]],
files: Dict[str, List[HTTPFile]],
headers: Optional[HTTPHeaders] = None,
) -> None:
"""Parses a form request body.
Supports ``application/x-www-form-urlencoded`` and
``multipart/form-data``. The ``content_type`` parameter should be
a string and ``body`` should be a byte string. The ``arguments``
and ``files`` parameters are dictionaries that will be updated
with the parsed contents.
"""
if content_type.startswith("application/x-www-form-urlencoded"):
if headers and "Content-Encoding" in headers:
gen_log.warning(
"Unsupported Content-Encoding: %s", headers["Content-Encoding"]
)
return
try:
# real charset decoding will happen in RequestHandler.decode_argument()
uri_arguments = parse_qs_bytes(body, keep_blank_values=True)
except Exception as e:
gen_log.warning("Invalid x-www-form-urlencoded body: %s", e)
uri_arguments = {}
for name, values in uri_arguments.items():
if values:
arguments.setdefault(name, []).extend(values)
elif content_type.startswith("multipart/form-data"):
if headers and "Content-Encoding" in headers:
gen_log.warning(
"Unsupported Content-Encoding: %s", headers["Content-Encoding"]
)
return
try:
fields = content_type.split(";")
for field in fields:
k, sep, v = field.strip().partition("=")
if k == "boundary" and v:
parse_multipart_form_data(utf8(v), body, arguments, files)
break
else:
raise ValueError("multipart boundary not found")
except Exception as e:
gen_log.warning("Invalid multipart/form-data: %s", e)
def parse_multipart_form_data(
boundary: bytes,
data: bytes,
arguments: Dict[str, List[bytes]],
files: Dict[str, List[HTTPFile]],
) -> None:
"""Parses a ``multipart/form-data`` body.
The ``boundary`` and ``data`` parameters are both byte strings.
The dictionaries given in the arguments and files parameters
will be updated with the contents of the body.
.. versionchanged:: 5.1
Now recognizes non-ASCII filenames in RFC 2231/5987
(``filename*=``) format.
"""
# The standard allows for the boundary to be quoted in the header,
# although it's rare (it happens at least for google app engine
# xmpp). I think we're also supposed to handle backslash-escapes
# here but I'll save that until we see a client that uses them
# in the wild.
if boundary.startswith(b'"') and boundary.endswith(b'"'):
boundary = boundary[1:-1]
final_boundary_index = data.rfind(b"--" + boundary + b"--")
if final_boundary_index == -1:
gen_log.warning("Invalid multipart/form-data: no final boundary")
return
parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n")
for part in parts:
if not part:
continue
eoh = part.find(b"\r\n\r\n")
if eoh == -1:
gen_log.warning("multipart/form-data missing headers")
continue
headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
disp_header = headers.get("Content-Disposition", "")
disposition, disp_params = _parse_header(disp_header)
if disposition != "form-data" or not part.endswith(b"\r\n"):
gen_log.warning("Invalid multipart/form-data")
continue
value = part[eoh + 4 : -2]
if not disp_params.get("name"):
gen_log.warning("multipart/form-data value missing name")
continue
name = disp_params["name"]
if disp_params.get("filename"):
ctype = headers.get("Content-Type", "application/unknown")
files.setdefault(name, []).append(
HTTPFile(
filename=disp_params["filename"], body=value, content_type=ctype
)
)
else:
arguments.setdefault(name, []).append(value)
def format_timestamp(
ts: Union[int, float, tuple, time.struct_time, datetime.datetime]
) -> str:
"""Formats a timestamp in the format used by HTTP.
The argument may be a numeric timestamp as returned by `time.time`,
a time tuple as returned by `time.gmtime`, or a `datetime.datetime`
object.
>>> format_timestamp(1359312200)
'Sun, 27 Jan 2013 18:43:20 GMT'
"""
if isinstance(ts, (int, float)):
time_num = ts
elif isinstance(ts, (tuple, time.struct_time)):
time_num = calendar.timegm(ts)
elif isinstance(ts, datetime.datetime):
time_num = calendar.timegm(ts.utctimetuple())
else:
raise TypeError("unknown timestamp type: %r" % ts)
return email.utils.formatdate(time_num, usegmt=True)
RequestStartLine = collections.namedtuple(
"RequestStartLine", ["method", "path", "version"]
)
_http_version_re = re.compile(r"^HTTP/1\.[0-9]$")
def parse_request_start_line(line: str) -> RequestStartLine:
"""Returns a (method, path, version) tuple for an HTTP 1.x request line.
The response is a `collections.namedtuple`.
>>> parse_request_start_line("GET /foo HTTP/1.1")
RequestStartLine(method='GET', path='/foo', version='HTTP/1.1')
"""
try:
method, path, version = line.split(" ")
except ValueError:
# https://tools.ietf.org/html/rfc7230#section-3.1.1
# invalid request-line SHOULD respond with a 400 (Bad Request)
raise HTTPInputError("Malformed HTTP request line")
if not _http_version_re.match(version):
raise HTTPInputError(
"Malformed HTTP version in HTTP Request-Line: %r" % version
)
return RequestStartLine(method, path, version)
ResponseStartLine = collections.namedtuple(
"ResponseStartLine", ["version", "code", "reason"]
)
_http_response_line_re = re.compile(r"(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)")
def parse_response_start_line(line: str) -> ResponseStartLine:
"""Returns a (version, code, reason) tuple for an HTTP 1.x response line.
The response is a `collections.namedtuple`.
>>> parse_response_start_line("HTTP/1.1 200 OK")
ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
"""
line = native_str(line)
match = _http_response_line_re.match(line)
if not match:
raise HTTPInputError("Error parsing response start line")
return ResponseStartLine(match.group(1), int(match.group(2)), match.group(3))
# _parseparam and _parse_header are copied and modified from python2.7's cgi.py
# The original 2.7 version of this code did not correctly support some
# combinations of semicolons and double quotes.
# It has also been modified to support valueless parameters as seen in
# websocket extension negotiations, and to support non-ascii values in
# RFC 2231/5987 format.
def _parseparam(s: str) -> Generator[str, None, None]:
while s[:1] == ";":
s = s[1:]
end = s.find(";")
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(";", end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def _parse_header(line: str) -> Tuple[str, Dict[str, str]]:
r"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
>>> d = "form-data; foo=\"b\\\\a\\\"r\"; file*=utf-8''T%C3%A4st"
>>> ct, d = _parse_header(d)
>>> ct
'form-data'
>>> d['file'] == r'T\u00e4st'.encode('ascii').decode('unicode_escape')
True
>>> d['foo']
'b\\a"r'
"""
parts = _parseparam(";" + line)
key = next(parts)
# decode_params treats first argument special, but we already stripped key
params = [("Dummy", "value")]
for p in parts:
i = p.find("=")
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1 :].strip()
params.append((name, native_str(value)))
decoded_params = email.utils.decode_params(params)
decoded_params.pop(0) # get rid of the dummy again
pdict = {}
for name, decoded_value in decoded_params:
value = email.utils.collapse_rfc2231_value(decoded_value)
if len(value) >= 2 and value[0] == '"' and value[-1] == '"':
value = value[1:-1]
pdict[name] = value
return key, pdict
def _encode_header(key: str, pdict: Dict[str, str]) -> str:
"""Inverse of _parse_header.
>>> _encode_header('permessage-deflate',
... {'client_max_window_bits': 15, 'client_no_context_takeover': None})
'permessage-deflate; client_max_window_bits=15; client_no_context_takeover'
"""
if not pdict:
return key
out = [key]
# Sort the parameters just to make it easy to test.
for k, v in sorted(pdict.items()):
if v is None:
out.append(k)
else:
# TODO: quote if necessary.
out.append("%s=%s" % (k, v))
return "; ".join(out)
def encode_username_password(
username: Union[str, bytes], password: Union[str, bytes]
) -> bytes:
"""Encodes a username/password pair in the format used by HTTP auth.
The return value is a byte string in the form ``username:password``.
.. versionadded:: 5.1
"""
if isinstance(username, unicode_type):
username = unicodedata.normalize("NFC", username)
if isinstance(password, unicode_type):
password = unicodedata.normalize("NFC", password)
return utf8(username) + b":" + utf8(password)
def doctests():
# type: () -> unittest.TestSuite
import doctest
return doctest.DocTestSuite()
_netloc_re = re.compile(r"^(.+):(\d+)$")
def split_host_and_port(netloc: str) -> Tuple[str, Optional[int]]:
"""Returns ``(host, port)`` tuple from ``netloc``.
Returned ``port`` will be ``None`` if not present.
.. versionadded:: 4.1
"""
match = _netloc_re.match(netloc)
if match:
host = match.group(1)
port = int(match.group(2)) # type: Optional[int]
else:
host = netloc
port = None
return (host, port)
def qs_to_qsl(qs: Dict[str, List[AnyStr]]) -> Iterable[Tuple[str, AnyStr]]:
"""Generator converting a result of ``parse_qs`` back to name-value pairs.
.. versionadded:: 5.0
"""
for k, vs in qs.items():
for v in vs:
yield (k, v)
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
_nulljoin = "".join
def _unquote_cookie(s: str) -> str:
"""Handle double quotes and escaping in cookie values.
This method is copied verbatim from the Python 3.5 standard
library (http.cookies._unquote) so we don't have to depend on
non-public interfaces.
"""
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if s is None or len(s) < 2:
return s
if s[0] != '"' or s[-1] != '"':
return s
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
s = s[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(s)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(s, i)
q_match = _QuotePatt.search(s, i)
if not o_match and not q_match: # Neither matched
res.append(s[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(s[i:k])
res.append(s[k + 1])
i = k + 2
else: # OctalPatt matched
res.append(s[i:j])
res.append(chr(int(s[j + 1 : j + 4], 8)))
i = j + 4
return _nulljoin(res)
def parse_cookie(cookie: str) -> Dict[str, str]:
"""Parse a ``Cookie`` HTTP header into a dict of name/value pairs.
This function attempts to mimic browser cookie parsing behavior;
it specifically does not follow any of the cookie-related RFCs
(because browsers don't either).
The algorithm used is identical to that used by Django version 1.9.10.
.. versionadded:: 4.4.2
"""
cookiedict = {}
for chunk in cookie.split(str(";")):
if str("=") in chunk:
key, val = chunk.split(str("="), 1)
else:
# Assume an empty name per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
key, val = str(""), chunk
key, val = key.strip(), val.strip()
if key or val:
# unquote using Python's algorithm.
cookiedict[key] = _unquote_cookie(val)
return cookiedict
| {
"content_hash": "c41bff097e412161e2a6723be4a56ac9",
"timestamp": "",
"source": "github",
"line_count": 1118,
"max_line_length": 93,
"avg_line_length": 31.626118067978535,
"alnum_prop": 0.5878443350868262,
"repo_name": "bdarnell/tornado",
"id": "bd32cd0c49cc2e369eed7a5724a8f0402f3a227d",
"size": "35933",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tornado/httputil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1524"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Python",
"bytes": "1535018"
},
{
"name": "Ruby",
"bytes": "1428"
},
{
"name": "Shell",
"bytes": "4070"
}
],
"symlink_target": ""
} |
<<<<<<< HEAD
<<<<<<< HEAD
# Copyright (C) 2001-2010 Python Software Foundation
# Contact: email-sig@python.org
# email package unit tests
import re
import time
import base64
import unittest
import textwrap
from io import StringIO, BytesIO
from itertools import chain
from random import choice
import email
import email.policy
from email.charset import Charset
from email.header import Header, decode_header, make_header
from email.parser import Parser, HeaderParser
from email.generator import Generator, DecodedGenerator, BytesGenerator
from email.message import Message
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email import utils
from email import errors
from email import encoders
from email import iterators
from email import base64mime
from email import quoprimime
from test.support import unlink
from test.test_email import openfile, TestEmailBase
# These imports are documented to work, but we are testing them using a
# different path, so we import them here just to make sure they are importable.
from email.parser import FeedParser, BytesFeedParser
NL = '\n'
EMPTYSTRING = ''
SPACE = ' '
# Test various aspects of the Message class's API
class TestMessageAPI(TestEmailBase):
def test_get_all(self):
eq = self.assertEqual
msg = self._msgobj('msg_20.txt')
eq(msg.get_all('cc'), ['ccc@zzz.org', 'ddd@zzz.org', 'eee@zzz.org'])
eq(msg.get_all('xx', 'n/a'), 'n/a')
def test_getset_charset(self):
eq = self.assertEqual
msg = Message()
eq(msg.get_charset(), None)
charset = Charset('iso-8859-1')
msg.set_charset(charset)
eq(msg['mime-version'], '1.0')
eq(msg.get_content_type(), 'text/plain')
eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
eq(msg.get_param('charset'), 'iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
eq(msg.get_charset().input_charset, 'iso-8859-1')
# Remove the charset
msg.set_charset(None)
eq(msg.get_charset(), None)
eq(msg['content-type'], 'text/plain')
# Try adding a charset when there's already MIME headers present
msg = Message()
msg['MIME-Version'] = '2.0'
msg['Content-Type'] = 'text/x-weird'
msg['Content-Transfer-Encoding'] = 'quinted-puntable'
msg.set_charset(charset)
eq(msg['mime-version'], '2.0')
eq(msg['content-type'], 'text/x-weird; charset="iso-8859-1"')
eq(msg['content-transfer-encoding'], 'quinted-puntable')
def test_set_charset_from_string(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_set_payload_with_charset(self):
msg = Message()
charset = Charset('iso-8859-1')
msg.set_payload('This is a string payload', charset)
self.assertEqual(msg.get_charset().input_charset, 'iso-8859-1')
def test_set_payload_with_8bit_data_and_charset(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
msg = Message()
msg.set_payload(data, charset)
self.assertEqual(msg['content-transfer-encoding'], 'base64')
self.assertEqual(msg.get_payload(decode=True), data)
self.assertEqual(msg.get_payload(), '0JDQkdCS\n')
def test_set_payload_with_non_ascii_and_charset_body_encoding_none(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
charset.body_encoding = None # Disable base64 encoding
msg = Message()
msg.set_payload(data.decode('utf-8'), charset)
self.assertEqual(msg['content-transfer-encoding'], '8bit')
self.assertEqual(msg.get_payload(decode=True), data)
def test_set_payload_with_8bit_data_and_charset_body_encoding_none(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
charset.body_encoding = None # Disable base64 encoding
msg = Message()
msg.set_payload(data, charset)
self.assertEqual(msg['content-transfer-encoding'], '8bit')
self.assertEqual(msg.get_payload(decode=True), data)
def test_set_payload_to_list(self):
msg = Message()
msg.set_payload([])
self.assertEqual(msg.get_payload(), [])
def test_attach_when_payload_is_string(self):
msg = Message()
msg['Content-Type'] = 'multipart/mixed'
msg.set_payload('string payload')
sub_msg = MIMEMessage(Message())
self.assertRaisesRegex(TypeError, "[Aa]ttach.*non-multipart",
msg.attach, sub_msg)
def test_get_charsets(self):
eq = self.assertEqual
msg = self._msgobj('msg_08.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', 'iso-8859-2', 'koi8-r'])
msg = self._msgobj('msg_09.txt')
charsets = msg.get_charsets('dingbat')
eq(charsets, ['dingbat', 'us-ascii', 'iso-8859-1', 'dingbat',
'koi8-r'])
msg = self._msgobj('msg_12.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', None, 'iso-8859-2',
'iso-8859-3', 'us-ascii', 'koi8-r'])
def test_get_filename(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
msg = self._msgobj('msg_07.txt')
subpart = msg.get_payload(1)
eq(subpart.get_filename(), 'dingusfish.gif')
def test_get_filename_with_name_parameter(self):
eq = self.assertEqual
msg = self._msgobj('msg_44.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
def test_get_boundary(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
# No quotes!
eq(msg.get_boundary(), 'BOUNDARY')
def test_set_boundary(self):
eq = self.assertEqual
# This one has no existing boundary parameter, but the Content-Type:
# header appears fifth.
msg = self._msgobj('msg_01.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
# This one has a Content-Type: header, with a boundary, stuck in the
# middle of its headers. Make sure the order is preserved; it should
# be fifth.
msg = self._msgobj('msg_04.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'multipart/mixed; boundary="BOUNDARY"')
# And this one has no Content-Type: header at all.
msg = self._msgobj('msg_03.txt')
self.assertRaises(errors.HeaderParseError,
msg.set_boundary, 'BOUNDARY')
def test_make_boundary(self):
msg = MIMEMultipart('form-data')
# Note that when the boundary gets created is an implementation
# detail and might change.
self.assertEqual(msg.items()[0][1], 'multipart/form-data')
# Trigger creation of boundary
msg.as_string()
self.assertEqual(msg.items()[0][1][:33],
'multipart/form-data; boundary="==')
# XXX: there ought to be tests of the uniqueness of the boundary, too.
def test_message_rfc822_only(self):
# Issue 7970: message/rfc822 not in multipart parsed by
# HeaderParser caused an exception when flattened.
with openfile('msg_46.txt') as fp:
msgdata = fp.read()
parser = HeaderParser()
msg = parser.parsestr(msgdata)
out = StringIO()
gen = Generator(out, True, 0)
gen.flatten(msg, False)
self.assertEqual(out.getvalue(), msgdata)
def test_byte_message_rfc822_only(self):
# Make sure new bytes header parser also passes this.
with openfile('msg_46.txt') as fp:
msgdata = fp.read().encode('ascii')
parser = email.parser.BytesHeaderParser()
msg = parser.parsebytes(msgdata)
out = BytesIO()
gen = email.generator.BytesGenerator(out)
gen.flatten(msg)
self.assertEqual(out.getvalue(), msgdata)
def test_get_decoded_payload(self):
eq = self.assertEqual
msg = self._msgobj('msg_10.txt')
# The outer message is a multipart
eq(msg.get_payload(decode=True), None)
# Subpart 1 is 7bit encoded
eq(msg.get_payload(0).get_payload(decode=True),
b'This is a 7bit encoded message.\n')
# Subpart 2 is quopri
eq(msg.get_payload(1).get_payload(decode=True),
b'\xa1This is a Quoted Printable encoded message!\n')
# Subpart 3 is base64
eq(msg.get_payload(2).get_payload(decode=True),
b'This is a Base64 encoded message.')
# Subpart 4 is base64 with a trailing newline, which
# used to be stripped (issue 7143).
eq(msg.get_payload(3).get_payload(decode=True),
b'This is a Base64 encoded message.\n')
# Subpart 5 has no Content-Transfer-Encoding: header.
eq(msg.get_payload(4).get_payload(decode=True),
b'This has no Content-Transfer-Encoding: header.\n')
def test_get_decoded_uu_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_payload('begin 666 -\n+:&5L;&\\@=V]R;&0 \n \nend\n')
for cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg['content-transfer-encoding'] = cte
eq(msg.get_payload(decode=True), b'hello world')
# Now try some bogus data
msg.set_payload('foo')
eq(msg.get_payload(decode=True), b'foo')
def test_get_payload_n_raises_on_non_multipart(self):
msg = Message()
self.assertRaises(TypeError, msg.get_payload, 1)
def test_decoded_generator(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
with openfile('msg_17.txt') as fp:
text = fp.read()
s = StringIO()
g = DecodedGenerator(s)
g.flatten(msg)
eq(s.getvalue(), text)
def test__contains__(self):
msg = Message()
msg['From'] = 'Me'
msg['to'] = 'You'
# Check for case insensitivity
self.assertIn('from', msg)
self.assertIn('From', msg)
self.assertIn('FROM', msg)
self.assertIn('to', msg)
self.assertIn('To', msg)
self.assertIn('TO', msg)
def test_as_string(self):
msg = self._msgobj('msg_01.txt')
with openfile('msg_01.txt') as fp:
text = fp.read()
self.assertEqual(text, str(msg))
fullrepr = msg.as_string(unixfrom=True)
lines = fullrepr.split('\n')
self.assertTrue(lines[0].startswith('From '))
self.assertEqual(text, NL.join(lines[1:]))
def test_as_string_policy(self):
msg = self._msgobj('msg_01.txt')
newpolicy = msg.policy.clone(linesep='\r\n')
fullrepr = msg.as_string(policy=newpolicy)
s = StringIO()
g = Generator(s, policy=newpolicy)
g.flatten(msg)
self.assertEqual(fullrepr, s.getvalue())
def test_as_bytes(self):
msg = self._msgobj('msg_01.txt')
with openfile('msg_01.txt') as fp:
data = fp.read().encode('ascii')
self.assertEqual(data, bytes(msg))
fullrepr = msg.as_bytes(unixfrom=True)
lines = fullrepr.split(b'\n')
self.assertTrue(lines[0].startswith(b'From '))
self.assertEqual(data, b'\n'.join(lines[1:]))
def test_as_bytes_policy(self):
msg = self._msgobj('msg_01.txt')
newpolicy = msg.policy.clone(linesep='\r\n')
fullrepr = msg.as_bytes(policy=newpolicy)
s = BytesIO()
g = BytesGenerator(s,policy=newpolicy)
g.flatten(msg)
self.assertEqual(fullrepr, s.getvalue())
# test_headerregistry.TestContentTypeHeader.bad_params
def test_bad_param(self):
msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
self.assertEqual(msg.get_param('baz'), '')
def test_missing_filename(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_filename(), None)
def test_bogus_filename(self):
msg = email.message_from_string(
"Content-Disposition: blarg; filename\n")
self.assertEqual(msg.get_filename(), '')
def test_missing_boundary(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_boundary(), None)
def test_get_params(self):
eq = self.assertEqual
msg = email.message_from_string(
'X-Header: foo=one; bar=two; baz=three\n')
eq(msg.get_params(header='x-header'),
[('foo', 'one'), ('bar', 'two'), ('baz', 'three')])
msg = email.message_from_string(
'X-Header: foo; bar=one; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
eq(msg.get_params(), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
# test_headerregistry.TestContentTypeHeader.spaces_around_param_equals
def test_get_param_liberal(self):
msg = Message()
msg['Content-Type'] = 'Content-Type: Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"'
self.assertEqual(msg.get_param('boundary'), 'CPIMSSMTPC06p5f3tG')
def test_get_param(self):
eq = self.assertEqual
msg = email.message_from_string(
"X-Header: foo=one; bar=two; baz=three\n")
eq(msg.get_param('bar', header='x-header'), 'two')
eq(msg.get_param('quuz', header='x-header'), None)
eq(msg.get_param('quuz'), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_param('foo', header='x-header'), '')
eq(msg.get_param('bar', header='x-header'), 'one')
eq(msg.get_param('baz', header='x-header'), 'two')
# XXX: We are not RFC-2045 compliant! We cannot parse:
# msg["Content-Type"] = 'text/plain; weird="hey; dolly? [you] @ <\\"home\\">?"'
# msg.get_param("weird")
# yet.
# test_headerregistry.TestContentTypeHeader.spaces_around_semis
def test_get_param_funky_continuation_lines(self):
msg = self._msgobj('msg_22.txt')
self.assertEqual(msg.get_payload(1).get_param('name'), 'wibble.JPG')
# test_headerregistry.TestContentTypeHeader.semis_inside_quotes
def test_get_param_with_semis_in_quotes(self):
msg = email.message_from_string(
'Content-Type: image/pjpeg; name="Jim&&Jill"\n')
self.assertEqual(msg.get_param('name'), 'Jim&&Jill')
self.assertEqual(msg.get_param('name', unquote=False),
'"Jim&&Jill"')
# test_headerregistry.TestContentTypeHeader.quotes_inside_rfc2231_value
def test_get_param_with_quotes(self):
msg = email.message_from_string(
'Content-Type: foo; bar*0="baz\\"foobar"; bar*1="\\"baz"')
self.assertEqual(msg.get_param('bar'), 'baz"foobar"baz')
msg = email.message_from_string(
"Content-Type: foo; bar*0=\"baz\\\"foobar\"; bar*1=\"\\\"baz\"")
self.assertEqual(msg.get_param('bar'), 'baz"foobar"baz')
def test_field_containment(self):
msg = email.message_from_string('Header: exists')
self.assertIn('header', msg)
self.assertIn('Header', msg)
self.assertIn('HEADER', msg)
self.assertNotIn('headerx', msg)
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('charset', 'iso-2022-jp')
eq(msg.get_param('charset'), 'iso-2022-jp')
msg.set_param('importance', 'high value')
eq(msg.get_param('importance'), 'high value')
eq(msg.get_param('importance', unquote=False), '"high value"')
eq(msg.get_params(), [('text/plain', ''),
('charset', 'iso-2022-jp'),
('importance', 'high value')])
eq(msg.get_params(unquote=False), [('text/plain', ''),
('charset', '"iso-2022-jp"'),
('importance', '"high value"')])
msg.set_param('charset', 'iso-9999-xx', header='X-Jimmy')
eq(msg.get_param('charset', header='X-Jimmy'), 'iso-9999-xx')
def test_del_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_05.txt')
eq(msg.get_params(),
[('multipart/report', ''), ('report-type', 'delivery-status'),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
old_val = msg.get_param("report-type")
msg.del_param("report-type")
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
msg.set_param("report-type", old_val)
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com'),
('report-type', old_val)])
def test_del_param_on_other_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment', filename='bud.gif')
msg.del_param('filename', 'content-disposition')
self.assertEqual(msg['content-disposition'], 'attachment')
def test_del_param_on_nonexistent_header(self):
msg = Message()
# Deleting param on empty msg should not raise exception.
msg.del_param('filename', 'content-disposition')
def test_del_nonexistent_param(self):
msg = Message()
msg.add_header('Content-Type', 'text/plain', charset='utf-8')
existing_header = msg['Content-Type']
msg.del_param('foobar', header='Content-Type')
self.assertEqual(msg['Content-Type'], existing_header)
def test_set_type(self):
eq = self.assertEqual
msg = Message()
self.assertRaises(ValueError, msg.set_type, 'text')
msg.set_type('text/plain')
eq(msg['content-type'], 'text/plain')
msg.set_param('charset', 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
msg.set_type('text/html')
eq(msg['content-type'], 'text/html; charset="us-ascii"')
def test_set_type_on_other_header(self):
msg = Message()
msg['X-Content-Type'] = 'text/plain'
msg.set_type('application/octet-stream', 'X-Content-Type')
self.assertEqual(msg['x-content-type'], 'application/octet-stream')
def test_get_content_type_missing(self):
msg = Message()
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_type(), 'message/rfc822')
def test_get_content_type_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_maintype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_maintype(), 'message')
def test_get_content_maintype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_maintype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_replace_header(self):
eq = self.assertEqual
msg = Message()
msg.add_header('First', 'One')
msg.add_header('Second', 'Two')
msg.add_header('Third', 'Three')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Two', 'Three'])
msg.replace_header('Second', 'Twenty')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Twenty', 'Three'])
msg.add_header('First', 'Eleven')
msg.replace_header('First', 'One Hundred')
eq(msg.keys(), ['First', 'Second', 'Third', 'First'])
eq(msg.values(), ['One Hundred', 'Twenty', 'Three', 'Eleven'])
self.assertRaises(KeyError, msg.replace_header, 'Fourth', 'Missing')
# test_defect_handling:test_invalid_chars_in_base64_payload
def test_broken_base64_payload(self):
x = 'AwDp0P7//y6LwKEAcPa/6Q=9'
msg = Message()
msg['content-type'] = 'audio/x-midi'
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True),
(b'\x03\x00\xe9\xd0\xfe\xff\xff.\x8b\xc0'
b'\xa1\x00p\xf6\xbf\xe9\x0f'))
self.assertIsInstance(msg.defects[0],
errors.InvalidBase64CharactersDefect)
def test_broken_unicode_payload(self):
# This test improves coverage but is not a compliance test.
# The behavior in this situation is currently undefined by the API.
x = 'this is a br\xf6ken thing to do'
msg = Message()
msg['content-type'] = 'text/plain'
msg['content-transfer-encoding'] = '8bit'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True),
bytes(x, 'raw-unicode-escape'))
def test_questionable_bytes_payload(self):
# This test improves coverage but is not a compliance test,
# since it involves poking inside the black box.
x = 'this is a quéstionable thing to do'.encode('utf-8')
msg = Message()
msg['content-type'] = 'text/plain; charset="utf-8"'
msg['content-transfer-encoding'] = '8bit'
msg._payload = x
self.assertEqual(msg.get_payload(decode=True), x)
# Issue 1078919
def test_ascii_add_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename='bud.gif')
self.assertEqual('attachment; filename="bud.gif"',
msg['Content-Disposition'])
def test_noascii_add_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="Fußballer.ppt")
self.assertEqual(
'attachment; filename*=utf-8\'\'Fu%C3%9Fballer.ppt',
msg['Content-Disposition'])
def test_nonascii_add_header_via_triple(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename=('iso-8859-1', '', 'Fußballer.ppt'))
self.assertEqual(
'attachment; filename*=iso-8859-1\'\'Fu%DFballer.ppt',
msg['Content-Disposition'])
def test_ascii_add_header_with_tspecial(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="windows [filename].ppt")
self.assertEqual(
'attachment; filename="windows [filename].ppt"',
msg['Content-Disposition'])
def test_nonascii_add_header_with_tspecial(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="Fußballer [filename].ppt")
self.assertEqual(
"attachment; filename*=utf-8''Fu%C3%9Fballer%20%5Bfilename%5D.ppt",
msg['Content-Disposition'])
def test_binary_quopri_payload(self):
for charset in ('latin-1', 'ascii'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = 'quoted-printable'
msg.set_payload(b'foo=e6=96=87bar')
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
'get_payload returns wrong result with charset %s.' % charset)
def test_binary_base64_payload(self):
for charset in ('latin-1', 'ascii'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(b'Zm9v5paHYmFy')
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
'get_payload returns wrong result with charset %s.' % charset)
def test_binary_uuencode_payload(self):
for charset in ('latin-1', 'ascii'):
for encoding in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = encoding
msg.set_payload(b"begin 666 -\n)9F]OYI:'8F%R\n \nend\n")
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
str(('get_payload returns wrong result ',
'with charset {0} and encoding {1}.')).\
format(charset, encoding))
def test_add_header_with_name_only_param(self):
msg = Message()
msg.add_header('Content-Disposition', 'inline', foo_bar=None)
self.assertEqual("inline; foo-bar", msg['Content-Disposition'])
def test_add_header_with_no_value(self):
msg = Message()
msg.add_header('X-Status', None)
self.assertEqual('', msg['X-Status'])
# Issue 5871: reject an attempt to embed a header inside a header value
# (header injection attack).
def test_embeded_header_via_Header_rejected(self):
msg = Message()
msg['Dummy'] = Header('dummy\nX-Injected-Header: test')
self.assertRaises(errors.HeaderParseError, msg.as_string)
def test_embeded_header_via_string_rejected(self):
msg = Message()
msg['Dummy'] = 'dummy\nX-Injected-Header: test'
self.assertRaises(errors.HeaderParseError, msg.as_string)
def test_unicode_header_defaults_to_utf8_encoding(self):
# Issue 14291
m = MIMEText('abc\n')
m['Subject'] = 'É test'
self.assertEqual(str(m),textwrap.dedent("""\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: =?utf-8?q?=C3=89_test?=
abc
"""))
def test_unicode_body_defaults_to_utf8_encoding(self):
# Issue 14291
m = MIMEText('É testabc\n')
self.assertEqual(str(m),textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: base64
w4kgdGVzdGFiYwo=
"""))
# Test the email.encoders module
class TestEncoders(unittest.TestCase):
def test_EncodersEncode_base64(self):
with openfile('PyBanner048.gif', 'rb') as fp:
bindata = fp.read()
mimed = email.mime.image.MIMEImage(bindata)
base64ed = mimed.get_payload()
# the transfer-encoded body lines should all be <=76 characters
lines = base64ed.split('\n')
self.assertLessEqual(max([ len(x) for x in lines ]), 76)
def test_encode_empty_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
# 7bit data and the default us-ascii _charset
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
# Similar, but with 8bit data
msg = MIMEText('hello \xf8 world')
eq(msg['content-transfer-encoding'], 'base64')
# And now with a different charset
msg = MIMEText('hello \xf8 world', _charset='iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
def test_encode7or8bit(self):
# Make sure a charset whose input character set is 8bit but
# whose output character set is 7bit gets a transfer-encoding
# of 7bit.
eq = self.assertEqual
msg = MIMEText('文\n', _charset='euc-jp')
eq(msg['content-transfer-encoding'], '7bit')
eq(msg.as_string(), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/plain; charset="iso-2022-jp"
Content-Transfer-Encoding: 7bit
\x1b$BJ8\x1b(B
"""))
def test_qp_encode_latin1(self):
msg = MIMEText('\xe1\xf6\n', 'text', 'ISO-8859-1')
self.assertEqual(str(msg), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/text; charset="iso-8859-1"
Content-Transfer-Encoding: quoted-printable
=E1=F6
"""))
def test_qp_encode_non_latin1(self):
# Issue 16948
msg = MIMEText('\u017c\n', 'text', 'ISO-8859-2')
self.assertEqual(str(msg), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/text; charset="iso-8859-2"
Content-Transfer-Encoding: quoted-printable
=BF
"""))
# Test long header wrapping
class TestLongHeaders(TestEmailBase):
maxDiff = None
def test_split_long_continuation(self):
eq = self.ndiffAssertEqual
msg = email.message_from_string("""\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
def test_another_long_almost_unsplittable_header(self):
eq = self.ndiffAssertEqual
hstr = """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text""")
h = Header(hstr.replace('\t', ' '))
eq(h.encode(), """\
bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text""")
def test_long_nonstring(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = (b'Die Mieter treten hier ein werden mit einem Foerderband '
b'komfortabel den Korridor entlang, an s\xfcdl\xfcndischen '
b'Wandgem\xe4lden vorbei, gegen die rotierenden Klingen '
b'bef\xf6rdert. ')
cz_head = (b'Finan\xe8ni metropole se hroutily pod tlakem jejich '
b'd\xf9vtipu.. ')
utf8_head = ('\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das '
'Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder '
'die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066'
'\u3044\u307e\u3059\u3002')
h = Header(g_head, g, header_name='Subject')
h.append(cz_head, cz)
h.append(utf8_head, utf8)
msg = Message()
msg['Subject'] = h
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: =?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderb?=
=?iso-8859-1?q?and_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen?=
=?iso-8859-1?q?_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef?=
=?iso-8859-1?q?=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hrouti?=
=?iso-8859-2?q?ly_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC5LiA?=
=?utf-8?b?6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn44Gf44KJ?=
=?utf-8?b?44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFzIE51bnN0dWNr?=
=?utf-8?b?IGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWloZXJodW5kIGRhcyBPZGVyIGRp?=
=?utf-8?b?ZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI3jgajoqIDjgaPjgabjgYTjgb7jgZk=?=
=?utf-8?b?44CC?=
""")
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerde?=
=?iso-8859-1?q?rband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndis?=
=?iso-8859-1?q?chen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klinge?=
=?iso-8859-1?q?n_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se?=
=?iso-8859-2?q?_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb?=
=?utf-8?b?44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go?=
=?utf-8?b?44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBp?=
=?utf-8?b?c3QgZGFzIE51bnN0dWNrIGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWlo?=
=?utf-8?b?ZXJodW5kIGRhcyBPZGVyIGRpZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI0=?=
=?utf-8?b?44Go6KiA44Gj44Gm44GE44G+44GZ44CC?=""")
def test_long_header_encode(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation_is_just_a_hint(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals";\t'
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_header_encode_with_different_output_charset(self):
h = Header('文', 'euc-jp')
self.assertEqual(h.encode(), "=?iso-2022-jp?b?GyRCSjgbKEI=?=")
def test_long_header_encode_with_different_output_charset(self):
h = Header(b'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5\xa4\xec\xa4'
b'\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2\xf1\xbc\xd4'
b'\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3\xa4\xc6\xa4'
b'\xa4\xa4\xde\xa4\xb9'.decode('euc-jp'), 'euc-jp')
res = """\
=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKMnE8VCROPjUbKEI=?=
=?iso-2022-jp?b?GyRCRyckckJUJEMkRiQkJF4kORsoQg==?="""
self.assertEqual(h.encode(), res)
def test_header_splitter(self):
eq = self.ndiffAssertEqual
msg = MIMEText('')
# It'd be great if we could use add_header() here, but that doesn't
# guarantee an order of the parameters.
msg['X-Foobar-Spoink-Defrobnit'] = (
'wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), '''\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
''')
def test_no_semis_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join('<%d@dom.ain>' % i for i in range(10))
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <0@dom.ain> <1@dom.ain> <2@dom.ain> <3@dom.ain> <4@dom.ain>
<5@dom.ain> <6@dom.ain> <7@dom.ain> <8@dom.ain> <9@dom.ain>
Test""")
def test_last_split_chunk_does_not_fit(self):
eq = self.ndiffAssertEqual
h = Header('Subject: the first part of this is short, but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
Subject: the first part of this is short,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_splittable_leading_char_followed_by_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header(', but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_multiple_splittable_leading_char_followed_by_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header(', , but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
, ,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_trailing_splitable_on_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header('this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself;')
eq(h.encode(), "this_part_does_not_fit_within_maxlinelen_and_thus_should_"
"be_on_a_line_all_by_itself;")
def test_trailing_splitable_on_overlong_unsplitable_with_leading_splitable(self):
eq = self.ndiffAssertEqual
h = Header('; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_long_header_with_multiple_sequential_split_chars(self):
eq = self.ndiffAssertEqual
h = Header('This is a long line that has two whitespaces in a row. '
'This used to cause truncation of the header when folded')
eq(h.encode(), """\
This is a long line that has two whitespaces in a row. This used to cause
truncation of the header when folded""")
def test_splitter_split_on_punctuation_only_if_fws_with_header(self):
eq = self.ndiffAssertEqual
h = Header('thisverylongheaderhas;semicolons;and,commas,but'
'they;arenotlegal;fold,points')
eq(h.encode(), "thisverylongheaderhas;semicolons;and,commas,butthey;"
"arenotlegal;fold,points")
def test_leading_splittable_in_the_middle_just_before_overlong_last_part(self):
eq = self.ndiffAssertEqual
h = Header('this is a test where we need to have more than one line '
'before; our final line that is just too big to fit;; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself;')
eq(h.encode(), """\
this is a test where we need to have more than one line before;
our final line that is just too big to fit;;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself;""")
def test_overlong_last_part_followed_by_split_point(self):
eq = self.ndiffAssertEqual
h = Header('this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself ')
eq(h.encode(), "this_part_does_not_fit_within_maxlinelen_and_thus_"
"should_be_on_a_line_all_by_itself ")
def test_multiline_with_overlong_parts_separated_by_two_split_points(self):
eq = self.ndiffAssertEqual
h = Header('this_is_a__test_where_we_need_to_have_more_than_one_line_'
'before_our_final_line_; ; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
this_is_a__test_where_we_need_to_have_more_than_one_line_before_our_final_line_;
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_multiline_with_overlong_last_part_followed_by_split_point(self):
eq = self.ndiffAssertEqual
h = Header('this is a test where we need to have more than one line '
'before our final line; ; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
this is a test where we need to have more than one line before our final line;
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_long_header_with_whitespace_runs(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<foo@dom.ain> '] * 10)
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain>\x20\x20
Test""")
def test_long_run_with_semi_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<foo@dom.ain>'] * 10) + '; abc'
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain>; abc
Test""")
def test_splitter_split_on_punctuation_only_if_fws(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = ('thisverylongheaderhas;semicolons;and,commas,but'
'they;arenotlegal;fold,points')
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
# XXX the space after the header should not be there.
eq(sfp.getvalue(), """\
From: test@dom.ain
References:\x20
thisverylongheaderhas;semicolons;and,commas,butthey;arenotlegal;fold,points
Test""")
def test_no_split_long_header(self):
eq = self.ndiffAssertEqual
hstr = 'References: ' + 'x' * 80
h = Header(hstr)
# These come on two lines because Headers are really field value
# classes and don't really know about their field names.
eq(h.encode(), """\
References:
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""")
h = Header('x' * 80)
eq(h.encode(), 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
def test_splitting_multiple_long_lines(self):
eq = self.ndiffAssertEqual
hstr = """\
from babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
from babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)""")
def test_splitting_first_line_only_is_long(self):
eq = self.ndiffAssertEqual
hstr = """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93] helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400"""
h = Header(hstr, maxlinelen=78, header_name='Received',
continuation_ws='\t')
eq(h.encode(), """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93]
helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400""")
def test_long_8bit_header(self):
eq = self.ndiffAssertEqual
msg = Message()
h = Header('Britische Regierung gibt', 'iso-8859-1',
header_name='Subject')
h.append('gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte')
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offs?=
=?iso-8859-1?q?hore-Windkraftprojekte?=""")
msg['Subject'] = h
eq(msg.as_string(maxheaderlen=76), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offs?=
=?iso-8859-1?q?hore-Windkraftprojekte?=
""")
eq(msg.as_string(maxheaderlen=0), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offshore-Windkraftprojekte?=
""")
def test_long_8bit_header_no_charset(self):
eq = self.ndiffAssertEqual
msg = Message()
header_string = ('Britische Regierung gibt gr\xfcnes Licht '
'f\xfcr Offshore-Windkraftprojekte '
'<a-very-long-address@example.com>')
msg['Reply-To'] = header_string
eq(msg.as_string(maxheaderlen=78), """\
Reply-To: =?utf-8?q?Britische_Regierung_gibt_gr=C3=BCnes_Licht_f=C3=BCr_Offs?=
=?utf-8?q?hore-Windkraftprojekte_=3Ca-very-long-address=40example=2Ecom=3E?=
""")
msg = Message()
msg['Reply-To'] = Header(header_string,
header_name='Reply-To')
eq(msg.as_string(maxheaderlen=78), """\
Reply-To: =?utf-8?q?Britische_Regierung_gibt_gr=C3=BCnes_Licht_f=C3=BCr_Offs?=
=?utf-8?q?hore-Windkraftprojekte_=3Ca-very-long-address=40example=2Ecom=3E?=
""")
def test_long_to_header(self):
eq = self.ndiffAssertEqual
to = ('"Someone Test #A" <someone@eecs.umich.edu>,'
'<someone@eecs.umich.edu>, '
'"Someone Test #B" <someone@umich.edu>, '
'"Someone Test #C" <someone@eecs.umich.edu>, '
'"Someone Test #D" <someone@eecs.umich.edu>')
msg = Message()
msg['To'] = to
eq(msg.as_string(maxheaderlen=78), '''\
To: "Someone Test #A" <someone@eecs.umich.edu>,<someone@eecs.umich.edu>,
"Someone Test #B" <someone@umich.edu>,
"Someone Test #C" <someone@eecs.umich.edu>,
"Someone Test #D" <someone@eecs.umich.edu>
''')
def test_long_line_after_append(self):
eq = self.ndiffAssertEqual
s = 'This is an example of string which has almost the limit of header length.'
h = Header(s)
h.append('Add another line.')
eq(h.encode(maxlinelen=76), """\
This is an example of string which has almost the limit of header length.
Add another line.""")
def test_shorter_line_with_append(self):
eq = self.ndiffAssertEqual
s = 'This is a shorter line.'
h = Header(s)
h.append('Add another sentence. (Surprise?)')
eq(h.encode(),
'This is a shorter line. Add another sentence. (Surprise?)')
def test_long_field_name(self):
eq = self.ndiffAssertEqual
fn = 'X-Very-Very-Very-Long-Header-Name'
gs = ('Die Mieter treten hier ein werden mit einem Foerderband '
'komfortabel den Korridor entlang, an s\xfcdl\xfcndischen '
'Wandgem\xe4lden vorbei, gegen die rotierenden Klingen '
'bef\xf6rdert. ')
h = Header(gs, 'iso-8859-1', header_name=fn)
# BAW: this seems broken because the first line is too long
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_e?=
=?iso-8859-1?q?in_werden_mit_einem_Foerderband_komfortabel_den_Korridor_e?=
=?iso-8859-1?q?ntlang=2C_an_s=FCdl=FCndischen_Wandgem=E4lden_vorbei=2C_ge?=
=?iso-8859-1?q?gen_die_rotierenden_Klingen_bef=F6rdert=2E_?=""")
def test_long_received_header(self):
h = ('from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) '
'by hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP; '
'Wed, 05 Mar 2003 18:10:18 -0700')
msg = Message()
msg['Received-1'] = Header(h, continuation_ws='\t')
msg['Received-2'] = h
# This should be splitting on spaces not semicolons.
self.ndiffAssertEqual(msg.as_string(maxheaderlen=78), """\
Received-1: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
Received-2: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
""")
def test_string_headerinst_eq(self):
h = ('<15975.17901.207240.414604@sgigritzmann1.mathematik.'
'tu-muenchen.de> (David Bremner\'s message of '
'"Thu, 6 Mar 2003 13:58:21 +0100")')
msg = Message()
msg['Received-1'] = Header(h, header_name='Received-1',
continuation_ws='\t')
msg['Received-2'] = h
# XXX The space after the ':' should not be there.
self.ndiffAssertEqual(msg.as_string(maxheaderlen=78), """\
Received-1:\x20
<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David
Bremner's message of \"Thu, 6 Mar 2003 13:58:21 +0100\")
Received-2:\x20
<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David
Bremner's message of \"Thu, 6 Mar 2003 13:58:21 +0100\")
""")
def test_long_unbreakable_lines_with_continuation(self):
eq = self.ndiffAssertEqual
msg = Message()
t = """\
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp"""
msg['Face-1'] = t
msg['Face-2'] = Header(t, header_name='Face-2')
msg['Face-3'] = ' ' + t
# XXX This splitting is all wrong. It the first value line should be
# snug against the field name or the space after the header not there.
eq(msg.as_string(maxheaderlen=78), """\
Face-1:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-2:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-3:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
""")
def test_another_long_multiline_header(self):
eq = self.ndiffAssertEqual
m = ('Received: from siimage.com '
'([172.25.1.3]) by zima.siliconimage.com with '
'Microsoft SMTPSVC(5.0.2195.4905); '
'Wed, 16 Oct 2002 07:41:11 -0700')
msg = email.message_from_string(m)
eq(msg.as_string(maxheaderlen=78), '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with
Microsoft SMTPSVC(5.0.2195.4905); Wed, 16 Oct 2002 07:41:11 -0700
''')
def test_long_lines_with_different_header(self):
eq = self.ndiffAssertEqual
h = ('List-Unsubscribe: '
'<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,'
' <mailto:spamassassin-talk-request@lists.sourceforge.net'
'?subject=unsubscribe>')
msg = Message()
msg['List'] = h
msg['List'] = Header(h, header_name='List')
eq(msg.as_string(maxheaderlen=78), """\
List: List-Unsubscribe:
<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
List: List-Unsubscribe:
<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
""")
def test_long_rfc2047_header_with_embedded_fws(self):
h = Header(textwrap.dedent("""\
We're going to pretend this header is in a non-ascii character set
\tto see if line wrapping with encoded words and embedded
folding white space works"""),
charset='utf-8',
header_name='Test')
self.assertEqual(h.encode()+'\n', textwrap.dedent("""\
=?utf-8?q?We=27re_going_to_pretend_this_header_is_in_a_non-ascii_chara?=
=?utf-8?q?cter_set?=
=?utf-8?q?_to_see_if_line_wrapping_with_encoded_words_and_embedded?=
=?utf-8?q?_folding_white_space_works?=""")+'\n')
# Test mangling of "From " lines in the body of a message
class TestFromMangling(unittest.TestCase):
def setUp(self):
self.msg = Message()
self.msg['From'] = 'aaa@bbb.org'
self.msg.set_payload("""\
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangled_from(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
>From the desk of A.A.A.:
Blah blah blah
""")
def test_dont_mangle_from(self):
s = StringIO()
g = Generator(s, mangle_from_=False)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangle_from_in_preamble_and_epilog(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
msg = email.message_from_string(textwrap.dedent("""\
From: foo@bar.com
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=XXX
From somewhere unknown
--XXX
Content-Type: text/plain
foo
--XXX--
From somewhere unknowable
"""))
g.flatten(msg)
self.assertEqual(len([1 for x in s.getvalue().split('\n')
if x.startswith('>From ')]), 2)
def test_mangled_from_with_bad_bytes(self):
source = textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
From: aaa@bbb.org
""").encode('utf-8')
msg = email.message_from_bytes(source + b'From R\xc3\xb6lli\n')
b = BytesIO()
g = BytesGenerator(b, mangle_from_=True)
g.flatten(msg)
self.assertEqual(b.getvalue(), source + b'>From R\xc3\xb6lli\n')
# Test the basic MIMEAudio class
class TestMIMEAudio(unittest.TestCase):
def setUp(self):
with openfile('audiotest.au', 'rb') as fp:
self._audiodata = fp.read()
self._au = MIMEAudio(self._audiodata)
def test_guess_minor_type(self):
self.assertEqual(self._au.get_content_type(), 'audio/basic')
def test_encoding(self):
payload = self._au.get_payload()
self.assertEqual(base64.decodebytes(bytes(payload, 'ascii')),
self._audiodata)
def test_checkSetMinor(self):
au = MIMEAudio(self._audiodata, 'fish')
self.assertEqual(au.get_content_type(), 'audio/fish')
def test_add_header(self):
eq = self.assertEqual
self._au.add_header('Content-Disposition', 'attachment',
filename='audiotest.au')
eq(self._au['content-disposition'],
'attachment; filename="audiotest.au"')
eq(self._au.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'audiotest.au')])
eq(self._au.get_param('filename', header='content-disposition'),
'audiotest.au')
missing = []
eq(self._au.get_param('attachment', header='content-disposition'), '')
self.assertIs(self._au.get_param('foo', failobj=missing,
header='content-disposition'), missing)
# Try some missing stuff
self.assertIs(self._au.get_param('foobar', missing), missing)
self.assertIs(self._au.get_param('attachment', missing,
header='foobar'), missing)
# Test the basic MIMEImage class
class TestMIMEImage(unittest.TestCase):
def setUp(self):
with openfile('PyBanner048.gif', 'rb') as fp:
self._imgdata = fp.read()
self._im = MIMEImage(self._imgdata)
def test_guess_minor_type(self):
self.assertEqual(self._im.get_content_type(), 'image/gif')
def test_encoding(self):
payload = self._im.get_payload()
self.assertEqual(base64.decodebytes(bytes(payload, 'ascii')),
self._imgdata)
def test_checkSetMinor(self):
im = MIMEImage(self._imgdata, 'fish')
self.assertEqual(im.get_content_type(), 'image/fish')
def test_add_header(self):
eq = self.assertEqual
self._im.add_header('Content-Disposition', 'attachment',
filename='dingusfish.gif')
eq(self._im['content-disposition'],
'attachment; filename="dingusfish.gif"')
eq(self._im.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'dingusfish.gif')])
eq(self._im.get_param('filename', header='content-disposition'),
'dingusfish.gif')
missing = []
eq(self._im.get_param('attachment', header='content-disposition'), '')
self.assertIs(self._im.get_param('foo', failobj=missing,
header='content-disposition'), missing)
# Try some missing stuff
self.assertIs(self._im.get_param('foobar', missing), missing)
self.assertIs(self._im.get_param('attachment', missing,
header='foobar'), missing)
# Test the basic MIMEApplication class
class TestMIMEApplication(unittest.TestCase):
def test_headers(self):
eq = self.assertEqual
msg = MIMEApplication(b'\xfa\xfb\xfc\xfd\xfe\xff')
eq(msg.get_content_type(), 'application/octet-stream')
eq(msg['content-transfer-encoding'], 'base64')
def test_body(self):
eq = self.assertEqual
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata)
# whitespace in the cte encoded block is RFC-irrelevant.
eq(msg.get_payload().strip(), '+vv8/f7/')
eq(msg.get_payload(decode=True), bytesdata)
def test_binary_body_with_encode_7or8bit(self):
# Issue 17171.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_7or8bit)
# Treated as a string, this will be invalid code points.
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg.get_payload(decode=True), bytesdata)
self.assertEqual(msg['Content-Transfer-Encoding'], '8bit')
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
self.assertEqual(msg2['Content-Transfer-Encoding'], '8bit')
def test_binary_body_with_encode_noop(self):
# Issue 16564: This does not produce an RFC valid message, since to be
# valid it should have a CTE of binary. But the below works in
# Python2, and is documented as working this way.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_noop)
# Treated as a string, this will be invalid code points.
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg.get_payload(decode=True), bytesdata)
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
def test_binary_body_with_encode_quopri(self):
# Issue 14360.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff '
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_quopri)
self.assertEqual(msg.get_payload(), '=FA=FB=FC=FD=FE=FF=20')
self.assertEqual(msg.get_payload(decode=True), bytesdata)
self.assertEqual(msg['Content-Transfer-Encoding'], 'quoted-printable')
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '=FA=FB=FC=FD=FE=FF=20')
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
self.assertEqual(msg2['Content-Transfer-Encoding'], 'quoted-printable')
def test_binary_body_with_encode_base64(self):
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_base64)
self.assertEqual(msg.get_payload(), '+vv8/f7/\n')
self.assertEqual(msg.get_payload(decode=True), bytesdata)
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '+vv8/f7/\n')
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
# Test the basic MIMEText class
class TestMIMEText(unittest.TestCase):
def setUp(self):
self._msg = MIMEText('hello there')
def test_types(self):
eq = self.assertEqual
eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
self.assertIs(self._msg.get_param('foobar', missing), missing)
self.assertIs(self._msg.get_param('charset', missing, header='foobar'),
missing)
def test_payload(self):
self.assertEqual(self._msg.get_payload(), 'hello there')
self.assertFalse(self._msg.is_multipart())
def test_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_7bit_input(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_7bit_input_no_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there')
eq(msg.get_charset(), 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
self.assertIn('hello there', msg.as_string())
def test_utf8_input(self):
teststr = '\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
eq = self.assertEqual
msg = MIMEText(teststr, _charset='utf-8')
eq(msg.get_charset().output_charset, 'utf-8')
eq(msg['content-type'], 'text/plain; charset="utf-8"')
eq(msg.get_payload(decode=True), teststr.encode('utf-8'))
@unittest.skip("can't fix because of backward compat in email5, "
"will fix in email6")
def test_utf8_input_no_charset(self):
teststr = '\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
self.assertRaises(UnicodeEncodeError, MIMEText, teststr)
# Test complicated multipart/* messages
class TestMultipart(TestEmailBase):
def setUp(self):
with openfile('PyBanner048.gif', 'rb') as fp:
data = fp.read()
container = MIMEBase('multipart', 'mixed', boundary='BOUNDARY')
image = MIMEImage(data, name='dingusfish.gif')
image.add_header('content-disposition', 'attachment',
filename='dingusfish.gif')
intro = MIMEText('''\
Hi there,
This is the dingus fish.
''')
container.attach(intro)
container.attach(image)
container['From'] = 'Barry <barry@digicool.com>'
container['To'] = 'Dingus Lovers <cravindogs@cravindogs.com>'
container['Subject'] = 'Here is your dingus fish'
now = 987809702.54848599
timetuple = time.localtime(now)
if timetuple[-1] == 0:
tzsecs = time.timezone
else:
tzsecs = time.altzone
if tzsecs > 0:
sign = '-'
else:
sign = '+'
tzoffset = ' %s%04d' % (sign, tzsecs / 36)
container['Date'] = time.strftime(
'%a, %d %b %Y %H:%M:%S',
time.localtime(now)) + tzoffset
self._msg = container
self._im = image
self._txt = intro
def test_hierarchy(self):
# convenience
eq = self.assertEqual
raises = self.assertRaises
# tests
m = self._msg
self.assertTrue(m.is_multipart())
eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
m1 = m.get_payload(1)
self.assertIs(m0, self._txt)
self.assertIs(m1, self._im)
eq(m.get_payload(), [m0, m1])
self.assertFalse(m0.is_multipart())
self.assertFalse(m1.is_multipart())
def test_empty_multipart_idempotent(self):
text = """\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
"""
msg = Parser().parsestr(text)
self.ndiffAssertEqual(text, msg.as_string())
def test_no_parts_in_a_multipart_with_none_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_no_parts_in_a_multipart_with_empty_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
outer.epilogue = ''
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_one_part_in_a_multipart(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
msg = MIMEText('hello world')
outer.attach(msg)
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_empty_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_none_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_none_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_nl_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = '\n'
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_message_external_body(self):
eq = self.assertEqual
msg = self._msgobj('msg_36.txt')
eq(len(msg.get_payload()), 2)
msg1 = msg.get_payload(1)
eq(msg1.get_content_type(), 'multipart/alternative')
eq(len(msg1.get_payload()), 2)
for subpart in msg1.get_payload():
eq(subpart.get_content_type(), 'message/external-body')
eq(len(subpart.get_payload()), 1)
subsubpart = subpart.get_payload(0)
eq(subsubpart.get_content_type(), 'text/plain')
def test_double_boundary(self):
# msg_37.txt is a multipart that contains two dash-boundary's in a
# row. Our interpretation of RFC 2046 calls for ignoring the second
# and subsequent boundaries.
msg = self._msgobj('msg_37.txt')
self.assertEqual(len(msg.get_payload()), 3)
def test_nested_inner_contains_outer_boundary(self):
eq = self.ndiffAssertEqual
# msg_38.txt has an inner part that contains outer boundaries. My
# interpretation of RFC 2046 (based on sections 5.1 and 5.1.2) say
# these are illegal and should be interpreted as unterminated inner
# parts.
msg = self._msgobj('msg_38.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
text/plain
text/plain
text/plain
text/plain
""")
def test_nested_with_same_boundary(self):
eq = self.ndiffAssertEqual
# msg 39.txt is similarly evil in that it's got inner parts that use
# the same boundary as outer parts. Again, I believe the way this is
# parsed is closest to the spirit of RFC 2046
msg = self._msgobj('msg_39.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
application/octet-stream
application/octet-stream
text/plain
""")
def test_boundary_in_non_multipart(self):
msg = self._msgobj('msg_40.txt')
self.assertEqual(msg.as_string(), '''\
MIME-Version: 1.0
Content-Type: text/html; boundary="--961284236552522269"
----961284236552522269
Content-Type: text/html;
Content-Transfer-Encoding: 7Bit
<html></html>
----961284236552522269--
''')
def test_boundary_with_leading_space(self):
eq = self.assertEqual
msg = email.message_from_string('''\
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=" XXXX"
-- XXXX
Content-Type: text/plain
-- XXXX
Content-Type: text/plain
-- XXXX--
''')
self.assertTrue(msg.is_multipart())
eq(msg.get_boundary(), ' XXXX')
eq(len(msg.get_payload()), 2)
def test_boundary_without_trailing_newline(self):
m = Parser().parsestr("""\
Content-Type: multipart/mixed; boundary="===============0012394164=="
MIME-Version: 1.0
--===============0012394164==
Content-Type: image/file1.jpg
MIME-Version: 1.0
Content-Transfer-Encoding: base64
YXNkZg==
--===============0012394164==--""")
self.assertEqual(m.get_payload(0).get_payload(), 'YXNkZg==')
# Test some badly formatted messages
class TestNonConformant(TestEmailBase):
def test_parse_missing_minor_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_14.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
# test_defect_handling
def test_same_boundary_inner_outer(self):
msg = self._msgobj('msg_15.txt')
# XXX We can probably eventually do better
inner = msg.get_payload(0)
self.assertTrue(hasattr(inner, 'defects'))
self.assertEqual(len(inner.defects), 1)
self.assertIsInstance(inner.defects[0],
errors.StartBoundaryNotFoundDefect)
# test_defect_handling
def test_multipart_no_boundary(self):
msg = self._msgobj('msg_25.txt')
self.assertIsInstance(msg.get_payload(), str)
self.assertEqual(len(msg.defects), 2)
self.assertIsInstance(msg.defects[0],
errors.NoBoundaryInMultipartDefect)
self.assertIsInstance(msg.defects[1],
errors.MultipartInvariantViolationDefect)
multipart_msg = textwrap.dedent("""\
Date: Wed, 14 Nov 2007 12:56:23 GMT
From: foo@bar.invalid
To: foo@bar.invalid
Subject: Content-Transfer-Encoding: base64 and multipart
MIME-Version: 1.0
Content-Type: multipart/mixed;
boundary="===============3344438784458119861=="{}
--===============3344438784458119861==
Content-Type: text/plain
Test message
--===============3344438784458119861==
Content-Type: application/octet-stream
Content-Transfer-Encoding: base64
YWJj
--===============3344438784458119861==--
""")
# test_defect_handling
def test_multipart_invalid_cte(self):
msg = self._str_msg(
self.multipart_msg.format("\nContent-Transfer-Encoding: base64"))
self.assertEqual(len(msg.defects), 1)
self.assertIsInstance(msg.defects[0],
errors.InvalidMultipartContentTransferEncodingDefect)
# test_defect_handling
def test_multipart_no_cte_no_defect(self):
msg = self._str_msg(self.multipart_msg.format(''))
self.assertEqual(len(msg.defects), 0)
# test_defect_handling
def test_multipart_valid_cte_no_defect(self):
for cte in ('7bit', '8bit', 'BINary'):
msg = self._str_msg(
self.multipart_msg.format(
"\nContent-Transfer-Encoding: {}".format(cte)))
self.assertEqual(len(msg.defects), 0)
# test_headerregistry.TestContentTyopeHeader invalid_1 and invalid_2.
def test_invalid_content_type(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = Message()
# RFC 2045, $5.2 says invalid yields text/plain
msg['Content-Type'] = 'text'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Clear the old value and try something /really/ invalid
del msg['content-type']
msg['Content-Type'] = 'foo'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Still, make sure that the message is idempotently generated
s = StringIO()
g = Generator(s)
g.flatten(msg)
neq(s.getvalue(), 'Content-Type: foo\n\n')
def test_no_start_boundary(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_31.txt')
eq(msg.get_payload(), """\
--BOUNDARY
Content-Type: text/plain
message 1
--BOUNDARY
Content-Type: text/plain
message 2
--BOUNDARY--
""")
def test_no_separating_blank_line(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_35.txt')
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: here's something interesting
counter to RFC 2822, there's no separating newline here
""")
# test_defect_handling
def test_lying_multipart(self):
msg = self._msgobj('msg_41.txt')
self.assertTrue(hasattr(msg, 'defects'))
self.assertEqual(len(msg.defects), 2)
self.assertIsInstance(msg.defects[0],
errors.NoBoundaryInMultipartDefect)
self.assertIsInstance(msg.defects[1],
errors.MultipartInvariantViolationDefect)
# test_defect_handling
def test_missing_start_boundary(self):
outer = self._msgobj('msg_42.txt')
# The message structure is:
#
# multipart/mixed
# text/plain
# message/rfc822
# multipart/mixed [*]
#
# [*] This message is missing its start boundary
bad = outer.get_payload(1).get_payload(0)
self.assertEqual(len(bad.defects), 1)
self.assertIsInstance(bad.defects[0],
errors.StartBoundaryNotFoundDefect)
# test_defect_handling
def test_first_line_is_continuation_header(self):
eq = self.assertEqual
m = ' Line 1\nSubject: test\n\nbody'
msg = email.message_from_string(m)
eq(msg.keys(), ['Subject'])
eq(msg.get_payload(), 'body')
eq(len(msg.defects), 1)
self.assertDefectsEqual(msg.defects,
[errors.FirstHeaderLineIsContinuationDefect])
eq(msg.defects[0].line, ' Line 1\n')
# test_defect_handling
def test_missing_header_body_separator(self):
# Our heuristic if we see a line that doesn't look like a header (no
# leading whitespace but no ':') is to assume that the blank line that
# separates the header from the body is missing, and to stop parsing
# headers and start parsing the body.
msg = self._str_msg('Subject: test\nnot a header\nTo: abc\n\nb\n')
self.assertEqual(msg.keys(), ['Subject'])
self.assertEqual(msg.get_payload(), 'not a header\nTo: abc\n\nb\n')
self.assertDefectsEqual(msg.defects,
[errors.MissingHeaderBodySeparatorDefect])
# Test RFC 2047 header encoding and decoding
class TestRFC2047(TestEmailBase):
def test_rfc2047_multiline(self):
eq = self.assertEqual
s = """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz
foo bar =?mac-iceland?q?r=8Aksm=9Arg=8Cs?="""
dh = decode_header(s)
eq(dh, [
(b'Re: ', None),
(b'r\x8aksm\x9arg\x8cs', 'mac-iceland'),
(b' baz foo bar ', None),
(b'r\x8aksm\x9arg\x8cs', 'mac-iceland')])
header = make_header(dh)
eq(str(header),
'Re: r\xe4ksm\xf6rg\xe5s baz foo bar r\xe4ksm\xf6rg\xe5s')
self.ndiffAssertEqual(header.encode(maxlinelen=76), """\
Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar =?mac-iceland?q?r=8Aksm?=
=?mac-iceland?q?=9Arg=8Cs?=""")
def test_whitespace_keeper_unicode(self):
eq = self.assertEqual
s = '=?ISO-8859-1?Q?Andr=E9?= Pirard <pirard@dom.ain>'
dh = decode_header(s)
eq(dh, [(b'Andr\xe9', 'iso-8859-1'),
(b' Pirard <pirard@dom.ain>', None)])
header = str(make_header(dh))
eq(header, 'Andr\xe9 Pirard <pirard@dom.ain>')
def test_whitespace_keeper_unicode_2(self):
eq = self.assertEqual
s = 'The =?iso-8859-1?b?cXVpY2sgYnJvd24gZm94?= jumped over the =?iso-8859-1?b?bGF6eSBkb2c=?='
dh = decode_header(s)
eq(dh, [(b'The ', None), (b'quick brown fox', 'iso-8859-1'),
(b' jumped over the ', None), (b'lazy dog', 'iso-8859-1')])
hu = str(make_header(dh))
eq(hu, 'The quick brown fox jumped over the lazy dog')
def test_rfc2047_missing_whitespace(self):
s = 'Sm=?ISO-8859-1?B?9g==?=rg=?ISO-8859-1?B?5Q==?=sbord'
dh = decode_header(s)
self.assertEqual(dh, [(b'Sm', None), (b'\xf6', 'iso-8859-1'),
(b'rg', None), (b'\xe5', 'iso-8859-1'),
(b'sbord', None)])
def test_rfc2047_with_whitespace(self):
s = 'Sm =?ISO-8859-1?B?9g==?= rg =?ISO-8859-1?B?5Q==?= sbord'
dh = decode_header(s)
self.assertEqual(dh, [(b'Sm ', None), (b'\xf6', 'iso-8859-1'),
(b' rg ', None), (b'\xe5', 'iso-8859-1'),
(b' sbord', None)])
def test_rfc2047_B_bad_padding(self):
s = '=?iso-8859-1?B?%s?='
data = [ # only test complete bytes
('dm==', b'v'), ('dm=', b'v'), ('dm', b'v'),
('dmk=', b'vi'), ('dmk', b'vi')
]
for q, a in data:
dh = decode_header(s % q)
self.assertEqual(dh, [(a, 'iso-8859-1')])
def test_rfc2047_Q_invalid_digits(self):
# issue 10004.
s = '=?iso-8659-1?Q?andr=e9=zz?='
self.assertEqual(decode_header(s),
[(b'andr\xe9=zz', 'iso-8659-1')])
def test_rfc2047_rfc2047_1(self):
# 1st testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_2(self):
# 2nd testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= b)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b' b)', None)])
def test_rfc2047_rfc2047_3(self):
# 3rd testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_4(self):
# 4th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_5a(self):
# 5th testcase at end of rfc2047 newline is \r\n
s = '(=?ISO-8859-1?Q?a?=\r\n =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_5b(self):
# 5th testcase at end of rfc2047 newline is \n
s = '(=?ISO-8859-1?Q?a?=\n =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_6(self):
# 6th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a_b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a b', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_7(self):
# 7th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-2?Q?_b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b' b', 'iso-8859-2'),
(b')', None)])
self.assertEqual(make_header(decode_header(s)).encode(), s.lower())
self.assertEqual(str(make_header(decode_header(s))), '(a b)')
def test_multiline_header(self):
s = '=?windows-1252?q?=22M=FCller_T=22?=\r\n <T.Mueller@xxx.com>'
self.assertEqual(decode_header(s),
[(b'"M\xfcller T"', 'windows-1252'),
(b'<T.Mueller@xxx.com>', None)])
self.assertEqual(make_header(decode_header(s)).encode(),
''.join(s.splitlines()))
self.assertEqual(str(make_header(decode_header(s))),
'"Müller T" <T.Mueller@xxx.com>')
# Test the MIMEMessage class
class TestMIMEMessage(TestEmailBase):
def setUp(self):
with openfile('msg_11.txt') as fp:
self._text = fp.read()
def test_type_error(self):
self.assertRaises(TypeError, MIMEMessage, 'a plain string')
def test_valid_argument(self):
eq = self.assertEqual
subject = 'A sub-message'
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
subpart = payload[0]
self.assertIs(subpart, m)
eq(subpart['subject'], subject)
def test_bad_multipart(self):
msg1 = Message()
msg1['Subject'] = 'subpart 1'
msg2 = Message()
msg2['Subject'] = 'subpart 2'
r = MIMEMessage(msg1)
self.assertRaises(errors.MultipartConversionError, r.attach, msg2)
def test_generate(self):
# First craft the message to be encapsulated
m = Message()
m['Subject'] = 'An enclosed message'
m.set_payload('Here is the body of the message.\n')
r = MIMEMessage(m)
r['Subject'] = 'The enclosing message'
s = StringIO()
g = Generator(s)
g.flatten(r)
self.assertEqual(s.getvalue(), """\
Content-Type: message/rfc822
MIME-Version: 1.0
Subject: The enclosing message
Subject: An enclosed message
Here is the body of the message.
""")
def test_parse_message_rfc822(self):
eq = self.assertEqual
msg = self._msgobj('msg_11.txt')
eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
submsg = payload[0]
self.assertIsInstance(submsg, Message)
eq(submsg['subject'], 'An enclosed message')
eq(submsg.get_payload(), 'Here is the body of the message.\n')
def test_dsn(self):
eq = self.assertEqual
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
eq(msg.get_content_type(), 'multipart/report')
self.assertTrue(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
eq(subpart.get_content_type(), 'text/plain')
eq(subpart.get_payload(), """\
This report relates to a message you sent with the following header fields:
Message-id: <002001c144a6$8752e060$56104586@oxy.edu>
Date: Sun, 23 Sep 2001 20:10:55 -0700
From: "Ian T. Henry" <henryi@oxy.edu>
To: SoCal Raves <scr@socal-raves.org>
Subject: [scr] yeah for Ians!!
Your message cannot be delivered to the following recipients:
Recipient address: jangel1@cougar.noc.ucla.edu
Reason: recipient reached disk quota
""")
# Subpart 2 contains the machine parsable DSN information. It
# consists of two blocks of headers, represented by two nested Message
# objects.
subpart = msg.get_payload(1)
eq(subpart.get_content_type(), 'message/delivery-status')
eq(len(subpart.get_payload()), 2)
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
dsn1 = subpart.get_payload(0)
self.assertIsInstance(dsn1, Message)
eq(dsn1['original-envelope-id'], '0GK500B4HD0888@cougar.noc.ucla.edu')
eq(dsn1.get_param('dns', header='reporting-mta'), '')
# Try a missing one <wink>
eq(dsn1.get_param('nsd', header='reporting-mta'), None)
dsn2 = subpart.get_payload(1)
self.assertIsInstance(dsn2, Message)
eq(dsn2['action'], 'failed')
eq(dsn2.get_params(header='original-recipient'),
[('rfc822', ''), ('jangel1@cougar.noc.ucla.edu', '')])
eq(dsn2.get_param('rfc822', header='final-recipient'), '')
# Subpart 3 is the original message
subpart = msg.get_payload(2)
eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
subsubpart = payload[0]
self.assertIsInstance(subsubpart, Message)
eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<002001c144a6$8752e060$56104586@oxy.edu>')
def test_epilogue(self):
eq = self.ndiffAssertEqual
with openfile('msg_21.txt') as fp:
text = fp.read()
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = 'End of MIME message\n'
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), text)
def test_no_nl_preamble(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = ''
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: Test
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME message
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
One
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Two
--BOUNDARY--
""")
def test_default_type(self):
eq = self.assertEqual
with openfile('msg_30.txt') as fp:
msg = email.message_from_file(fp)
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_with_explicit_container_type(self):
eq = self.assertEqual
with openfile('msg_28.txt') as fp:
msg = email.message_from_file(fp)
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_non_parsed(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# Set up container
container = MIMEMultipart('digest', 'BOUNDARY')
container.epilogue = ''
# Set up subparts
subpart1a = MIMEText('message 1\n')
subpart2a = MIMEText('message 2\n')
subpart1 = MIMEMessage(subpart1a)
subpart2 = MIMEMessage(subpart2a)
container.attach(subpart1)
container.attach(subpart2)
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
del subpart1['content-type']
del subpart1['mime-version']
del subpart2['content-type']
del subpart2['mime-version']
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
def test_mime_attachments_in_constructor(self):
eq = self.assertEqual
text1 = MIMEText('')
text2 = MIMEText('')
msg = MIMEMultipart(_subparts=(text1, text2))
eq(len(msg.get_payload()), 2)
eq(msg.get_payload(0), text1)
eq(msg.get_payload(1), text2)
def test_default_multipart_constructor(self):
msg = MIMEMultipart()
self.assertTrue(msg.is_multipart())
# A general test of parser->model->generator idempotency. IOW, read a message
# in, parse it into a message object tree, then without touching the tree,
# regenerate the plain text. The original text and the transformed text
# should be identical. Note: that we ignore the Unix-From since that may
# contain a changed date.
class TestIdempotent(TestEmailBase):
linesep = '\n'
def _msgobj(self, filename):
with openfile(filename) as fp:
data = fp.read()
msg = email.message_from_string(data)
return msg, data
def _idempotent(self, msg, text, unixfrom=False):
eq = self.ndiffAssertEqual
s = StringIO()
g = Generator(s, maxheaderlen=0)
g.flatten(msg, unixfrom=unixfrom)
eq(text, s.getvalue())
def test_parse_text_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_01.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_params()[1], ('charset', 'us-ascii'))
eq(msg.get_param('charset'), 'us-ascii')
eq(msg.preamble, None)
eq(msg.epilogue, None)
self._idempotent(msg, text)
def test_parse_untyped_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_03.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_params(), None)
eq(msg.get_param('charset'), None)
self._idempotent(msg, text)
def test_simple_multipart(self):
msg, text = self._msgobj('msg_04.txt')
self._idempotent(msg, text)
def test_MIME_digest(self):
msg, text = self._msgobj('msg_02.txt')
self._idempotent(msg, text)
def test_long_header(self):
msg, text = self._msgobj('msg_27.txt')
self._idempotent(msg, text)
def test_MIME_digest_with_part_headers(self):
msg, text = self._msgobj('msg_28.txt')
self._idempotent(msg, text)
def test_mixed_with_image(self):
msg, text = self._msgobj('msg_06.txt')
self._idempotent(msg, text)
def test_multipart_report(self):
msg, text = self._msgobj('msg_05.txt')
self._idempotent(msg, text)
def test_dsn(self):
msg, text = self._msgobj('msg_16.txt')
self._idempotent(msg, text)
def test_preamble_epilogue(self):
msg, text = self._msgobj('msg_21.txt')
self._idempotent(msg, text)
def test_multipart_one_part(self):
msg, text = self._msgobj('msg_23.txt')
self._idempotent(msg, text)
def test_multipart_no_parts(self):
msg, text = self._msgobj('msg_24.txt')
self._idempotent(msg, text)
def test_no_start_boundary(self):
msg, text = self._msgobj('msg_31.txt')
self._idempotent(msg, text)
def test_rfc2231_charset(self):
msg, text = self._msgobj('msg_32.txt')
self._idempotent(msg, text)
def test_more_rfc2231_parameters(self):
msg, text = self._msgobj('msg_33.txt')
self._idempotent(msg, text)
def test_text_plain_in_a_multipart_digest(self):
msg, text = self._msgobj('msg_34.txt')
self._idempotent(msg, text)
def test_nested_multipart_mixeds(self):
msg, text = self._msgobj('msg_12a.txt')
self._idempotent(msg, text)
def test_message_external_body_idempotent(self):
msg, text = self._msgobj('msg_36.txt')
self._idempotent(msg, text)
def test_message_delivery_status(self):
msg, text = self._msgobj('msg_43.txt')
self._idempotent(msg, text, unixfrom=True)
def test_message_signed_idempotent(self):
msg, text = self._msgobj('msg_45.txt')
self._idempotent(msg, text)
def test_content_type(self):
eq = self.assertEqual
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
eq(msg.get_content_type(), 'multipart/report')
# Test the Content-Type: parameters
params = {}
for pk, pv in msg.get_params():
params[pk] = pv
eq(params['report-type'], 'delivery-status')
eq(params['boundary'], 'D1690A7AC1.996856090/mail.example.com')
eq(msg.preamble, 'This is a MIME-encapsulated message.' + self.linesep)
eq(msg.epilogue, self.linesep)
eq(len(msg.get_payload()), 3)
# Make sure the subparts are what we expect
msg1 = msg.get_payload(0)
eq(msg1.get_content_type(), 'text/plain')
eq(msg1.get_payload(), 'Yadda yadda yadda' + self.linesep)
msg2 = msg.get_payload(1)
eq(msg2.get_content_type(), 'text/plain')
eq(msg2.get_payload(), 'Yadda yadda yadda' + self.linesep)
msg3 = msg.get_payload(2)
eq(msg3.get_content_type(), 'message/rfc822')
self.assertIsInstance(msg3, Message)
payload = msg3.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg4 = payload[0]
self.assertIsInstance(msg4, Message)
eq(msg4.get_payload(), 'Yadda yadda yadda' + self.linesep)
def test_parser(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg1 = payload[0]
self.assertIsInstance(msg1, Message)
eq(msg1.get_content_type(), 'text/plain')
self.assertIsInstance(msg1.get_payload(), str)
eq(msg1.get_payload(), self.linesep)
# Test various other bits of the package's functionality
class TestMiscellaneous(TestEmailBase):
def test_message_from_string(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
msg = email.message_from_string(text)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_file(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
fp.seek(0)
msg = email.message_from_file(fp)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_string_with_class(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
# Create a subclass
class MyMessage(Message):
pass
msg = email.message_from_string(text, MyMessage)
self.assertIsInstance(msg, MyMessage)
# Try something more complicated
with openfile('msg_02.txt') as fp:
text = fp.read()
msg = email.message_from_string(text, MyMessage)
for subpart in msg.walk():
self.assertIsInstance(subpart, MyMessage)
def test_message_from_file_with_class(self):
# Create a subclass
class MyMessage(Message):
pass
with openfile('msg_01.txt') as fp:
msg = email.message_from_file(fp, MyMessage)
self.assertIsInstance(msg, MyMessage)
# Try something more complicated
with openfile('msg_02.txt') as fp:
msg = email.message_from_file(fp, MyMessage)
for subpart in msg.walk():
self.assertIsInstance(subpart, MyMessage)
def test_custom_message_does_not_require_arguments(self):
class MyMessage(Message):
def __init__(self):
super().__init__()
msg = self._str_msg("Subject: test\n\ntest", MyMessage)
self.assertIsInstance(msg, MyMessage)
def test__all__(self):
module = __import__('email')
self.assertEqual(sorted(module.__all__), [
'base64mime', 'charset', 'encoders', 'errors', 'feedparser',
'generator', 'header', 'iterators', 'message',
'message_from_binary_file', 'message_from_bytes',
'message_from_file', 'message_from_string', 'mime', 'parser',
'quoprimime', 'utils',
])
def test_formatdate(self):
now = time.time()
self.assertEqual(utils.parsedate(utils.formatdate(now))[:6],
time.gmtime(now)[:6])
def test_formatdate_localtime(self):
now = time.time()
self.assertEqual(
utils.parsedate(utils.formatdate(now, localtime=True))[:6],
time.localtime(now)[:6])
def test_formatdate_usegmt(self):
now = time.time()
self.assertEqual(
utils.formatdate(now, localtime=False),
time.strftime('%a, %d %b %Y %H:%M:%S -0000', time.gmtime(now)))
self.assertEqual(
utils.formatdate(now, localtime=False, usegmt=True),
time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(now)))
# parsedate and parsedate_tz will become deprecated interfaces someday
def test_parsedate_returns_None_for_invalid_strings(self):
self.assertIsNone(utils.parsedate(''))
self.assertIsNone(utils.parsedate_tz(''))
self.assertIsNone(utils.parsedate('0'))
self.assertIsNone(utils.parsedate_tz('0'))
self.assertIsNone(utils.parsedate('A Complete Waste of Time'))
self.assertIsNone(utils.parsedate_tz('A Complete Waste of Time'))
# Not a part of the spec but, but this has historically worked:
self.assertIsNone(utils.parsedate(None))
self.assertIsNone(utils.parsedate_tz(None))
def test_parsedate_compact(self):
# The FWS after the comma is optional
self.assertEqual(utils.parsedate('Wed,3 Apr 2002 14:58:26 +0800'),
utils.parsedate('Wed, 3 Apr 2002 14:58:26 +0800'))
def test_parsedate_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
(2003, 2, 25, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_compact_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_no_space_before_positive_offset(self):
self.assertEqual(utils.parsedate_tz('Wed, 3 Apr 2002 14:58:26+0800'),
(2002, 4, 3, 14, 58, 26, 0, 1, -1, 28800))
def test_parsedate_no_space_before_negative_offset(self):
# Issue 1155362: we already handled '+' for this case.
self.assertEqual(utils.parsedate_tz('Wed, 3 Apr 2002 14:58:26-0800'),
(2002, 4, 3, 14, 58, 26, 0, 1, -1, -28800))
def test_parsedate_accepts_time_with_dots(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13.47.26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
eq(utils.parsedate_tz('5 Feb 2003 13.47 -0800'),
(2003, 2, 5, 13, 47, 0, 0, 1, -1, -28800))
def test_parsedate_acceptable_to_time_functions(self):
eq = self.assertEqual
timetup = utils.parsedate('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup)), 2003)
timetup = utils.parsedate_tz('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup[:9]))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup[:9])), 2003)
def test_mktime_tz(self):
self.assertEqual(utils.mktime_tz((1970, 1, 1, 0, 0, 0,
-1, -1, -1, 0)), 0)
self.assertEqual(utils.mktime_tz((1970, 1, 1, 0, 0, 0,
-1, -1, -1, 1234)), -1234)
def test_parsedate_y2k(self):
"""Test for parsing a date with a two-digit year.
Parsing a date with a two-digit year should return the correct
four-digit year. RFC822 allows two-digit years, but RFC2822 (which
obsoletes RFC822) requires four-digit years.
"""
self.assertEqual(utils.parsedate_tz('25 Feb 03 13:47:26 -0800'),
utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'))
self.assertEqual(utils.parsedate_tz('25 Feb 71 13:47:26 -0800'),
utils.parsedate_tz('25 Feb 1971 13:47:26 -0800'))
def test_parseaddr_empty(self):
self.assertEqual(utils.parseaddr('<>'), ('', ''))
self.assertEqual(utils.formataddr(utils.parseaddr('<>')), '')
def test_noquote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly Person', 'person@dom.ain')),
'A Silly Person <person@dom.ain>')
def test_escape_dump(self):
self.assertEqual(
utils.formataddr(('A (Very) Silly Person', 'person@dom.ain')),
r'"A (Very) Silly Person" <person@dom.ain>')
self.assertEqual(
utils.parseaddr(r'"A \(Very\) Silly Person" <person@dom.ain>'),
('A (Very) Silly Person', 'person@dom.ain'))
a = r'A \(Special\) Person'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_escape_backslashes(self):
self.assertEqual(
utils.formataddr(('Arthur \Backslash\ Foobar', 'person@dom.ain')),
r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>')
a = r'Arthur \Backslash\ Foobar'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_quotes_unicode_names(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
utf8_base64 = "=?utf-8?b?SMOkbnMgV8O8cnN0?= <person@dom.ain>"
latin1_quopri = "=?iso-8859-1?q?H=E4ns_W=FCrst?= <person@dom.ain>"
self.assertEqual(utils.formataddr((name, addr)), utf8_base64)
self.assertEqual(utils.formataddr((name, addr), 'iso-8859-1'),
latin1_quopri)
def test_accepts_any_charset_like_object(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
utf8_base64 = "=?utf-8?b?SMOkbnMgV8O8cnN0?= <person@dom.ain>"
foobar = "FOOBAR"
class CharsetMock:
def header_encode(self, string):
return foobar
mock = CharsetMock()
mock_expected = "%s <%s>" % (foobar, addr)
self.assertEqual(utils.formataddr((name, addr), mock), mock_expected)
self.assertEqual(utils.formataddr((name, addr), Charset('utf-8')),
utf8_base64)
def test_invalid_charset_like_object_raises_error(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
# A object without a header_encode method:
bad_charset = object()
self.assertRaises(AttributeError, utils.formataddr, (name, addr),
bad_charset)
def test_unicode_address_raises_error(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
addr = 'pers\u00f6n@dom.in'
self.assertRaises(UnicodeError, utils.formataddr, (None, addr))
self.assertRaises(UnicodeError, utils.formataddr, ("Name", addr))
def test_name_with_dot(self):
x = 'John X. Doe <jxd@example.com>'
y = '"John X. Doe" <jxd@example.com>'
a, b = ('John X. Doe', 'jxd@example.com')
self.assertEqual(utils.parseaddr(x), (a, b))
self.assertEqual(utils.parseaddr(y), (a, b))
# formataddr() quotes the name if there's a dot in it
self.assertEqual(utils.formataddr((a, b)), y)
def test_parseaddr_preserves_quoted_pairs_in_addresses(self):
# issue 10005. Note that in the third test the second pair of
# backslashes is not actually a quoted pair because it is not inside a
# comment or quoted string: the address being parsed has a quoted
# string containing a quoted backslash, followed by 'example' and two
# backslashes, followed by another quoted string containing a space and
# the word 'example'. parseaddr copies those two backslashes
# literally. Per rfc5322 this is not technically correct since a \ may
# not appear in an address outside of a quoted string. It is probably
# a sensible Postel interpretation, though.
eq = self.assertEqual
eq(utils.parseaddr('""example" example"@example.com'),
('', '""example" example"@example.com'))
eq(utils.parseaddr('"\\"example\\" example"@example.com'),
('', '"\\"example\\" example"@example.com'))
eq(utils.parseaddr('"\\\\"example\\\\" example"@example.com'),
('', '"\\\\"example\\\\" example"@example.com'))
def test_parseaddr_preserves_spaces_in_local_part(self):
# issue 9286. A normal RFC5322 local part should not contain any
# folding white space, but legacy local parts can (they are a sequence
# of atoms, not dotatoms). On the other hand we strip whitespace from
# before the @ and around dots, on the assumption that the whitespace
# around the punctuation is a mistake in what would otherwise be
# an RFC5322 local part. Leading whitespace is, usual, stripped as well.
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr("merwok wok@xample.com"))
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr("merwok wok@xample.com"))
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr(" merwok wok @xample.com"))
self.assertEqual(('', 'merwok"wok" wok@xample.com'),
utils.parseaddr('merwok"wok" wok@xample.com'))
self.assertEqual(('', 'merwok.wok.wok@xample.com'),
utils.parseaddr('merwok. wok . wok@xample.com'))
def test_formataddr_does_not_quote_parens_in_quoted_string(self):
addr = ("'foo@example.com' (foo@example.com)",
'foo@example.com')
addrstr = ('"\'foo@example.com\' '
'(foo@example.com)" <foo@example.com>')
self.assertEqual(utils.parseaddr(addrstr), addr)
self.assertEqual(utils.formataddr(addr), addrstr)
def test_multiline_from_comment(self):
x = """\
Foo
\tBar <foo@example.com>"""
self.assertEqual(utils.parseaddr(x), ('Foo Bar', 'foo@example.com'))
def test_quote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly; Person', 'person@dom.ain')),
r'"A Silly; Person" <person@dom.ain>')
def test_charset_richcomparisons(self):
eq = self.assertEqual
ne = self.assertNotEqual
cset1 = Charset()
cset2 = Charset()
eq(cset1, 'us-ascii')
eq(cset1, 'US-ASCII')
eq(cset1, 'Us-AsCiI')
eq('us-ascii', cset1)
eq('US-ASCII', cset1)
eq('Us-AsCiI', cset1)
ne(cset1, 'usascii')
ne(cset1, 'USASCII')
ne(cset1, 'UsAsCiI')
ne('usascii', cset1)
ne('USASCII', cset1)
ne('UsAsCiI', cset1)
eq(cset1, cset2)
eq(cset2, cset1)
def test_getaddresses(self):
eq = self.assertEqual
eq(utils.getaddresses(['aperson@dom.ain (Al Person)',
'Bud Person <bperson@dom.ain>']),
[('Al Person', 'aperson@dom.ain'),
('Bud Person', 'bperson@dom.ain')])
def test_getaddresses_nasty(self):
eq = self.assertEqual
eq(utils.getaddresses(['foo: ;']), [('', '')])
eq(utils.getaddresses(
['[]*-- =~$']),
[('', ''), ('', ''), ('', '*--')])
eq(utils.getaddresses(
['foo: ;', '"Jason R. Mastaler" <jason@dom.ain>']),
[('', ''), ('Jason R. Mastaler', 'jason@dom.ain')])
def test_getaddresses_embedded_comment(self):
"""Test proper handling of a nested comment"""
eq = self.assertEqual
addrs = utils.getaddresses(['User ((nested comment)) <foo@bar.com>'])
eq(addrs[0][1], 'foo@bar.com')
def test_utils_quote_unquote(self):
eq = self.assertEqual
msg = Message()
msg.add_header('content-disposition', 'attachment',
filename='foo\\wacky"name')
eq(msg.get_filename(), 'foo\\wacky"name')
def test_get_body_encoding_with_bogus_charset(self):
charset = Charset('not a charset')
self.assertEqual(charset.get_body_encoding(), 'base64')
def test_get_body_encoding_with_uppercase_charset(self):
eq = self.assertEqual
msg = Message()
msg['Content-Type'] = 'text/plain; charset=UTF-8'
eq(msg['content-type'], 'text/plain; charset=UTF-8')
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'utf-8')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), 'base64')
msg.set_payload(b'hello world', charset=charset)
eq(msg.get_payload(), 'aGVsbG8gd29ybGQ=\n')
eq(msg.get_payload(decode=True), b'hello world')
eq(msg['content-transfer-encoding'], 'base64')
# Try another one
msg = Message()
msg['Content-Type'] = 'text/plain; charset="US-ASCII"'
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'us-ascii')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), encoders.encode_7or8bit)
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_charsets_case_insensitive(self):
lc = Charset('us-ascii')
uc = Charset('US-ASCII')
self.assertEqual(lc.get_body_encoding(), uc.get_body_encoding())
def test_partial_falls_inside_message_delivery_status(self):
eq = self.ndiffAssertEqual
# The Parser interface provides chunks of data to FeedParser in 8192
# byte gulps. SF bug #1076485 found one of those chunks inside
# message/delivery-status header block, which triggered an
# unreadline() of NeedMoreData.
msg = self._msgobj('msg_43.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/report
text/plain
message/delivery-status
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/rfc822-headers
""")
def test_make_msgid_domain(self):
self.assertEqual(
email.utils.make_msgid(domain='testdomain-string')[-19:],
'@testdomain-string>')
def test_Generator_linend(self):
# Issue 14645.
with openfile('msg_26.txt', newline='\n') as f:
msgtxt = f.read()
msgtxt_nl = msgtxt.replace('\r\n', '\n')
msg = email.message_from_string(msgtxt)
s = StringIO()
g = email.generator.Generator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), msgtxt_nl)
def test_BytesGenerator_linend(self):
# Issue 14645.
with openfile('msg_26.txt', newline='\n') as f:
msgtxt = f.read()
msgtxt_nl = msgtxt.replace('\r\n', '\n')
msg = email.message_from_string(msgtxt_nl)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue().decode('ascii'), msgtxt)
def test_BytesGenerator_linend_with_non_ascii(self):
# Issue 14645.
with openfile('msg_26.txt', 'rb') as f:
msgtxt = f.read()
msgtxt = msgtxt.replace(b'with attachment', b'fo\xf6')
msgtxt_nl = msgtxt.replace(b'\r\n', b'\n')
msg = email.message_from_bytes(msgtxt_nl)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), msgtxt)
# Test the iterator/generators
class TestIterators(TestEmailBase):
def test_body_line_iterator(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# First a simple non-multipart message
msg = self._msgobj('msg_01.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 6)
neq(EMPTYSTRING.join(lines), msg.get_payload())
# Now a more complicated multipart
msg = self._msgobj('msg_02.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 43)
with openfile('msg_19.txt') as fp:
neq(EMPTYSTRING.join(lines), fp.read())
def test_typed_subpart_iterator(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
it = iterators.typed_subpart_iterator(msg, 'text')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 2)
eq(EMPTYSTRING.join(lines), """\
a simple kind of mirror
to reflect upon our own
a simple kind of mirror
to reflect upon our own
""")
def test_typed_subpart_iterator_default_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_03.txt')
it = iterators.typed_subpart_iterator(msg, 'text', 'plain')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 1)
eq(EMPTYSTRING.join(lines), """\
Hi,
Do you like this message?
-Me
""")
def test_pushCR_LF(self):
'''FeedParser BufferedSubFile.push() assumed it received complete
line endings. A CR ending one push() followed by a LF starting
the next push() added an empty line.
'''
imt = [
("a\r \n", 2),
("b", 0),
("c\n", 1),
("", 0),
("d\r\n", 1),
("e\r", 0),
("\nf", 1),
("\r\n", 1),
]
from email.feedparser import BufferedSubFile, NeedMoreData
bsf = BufferedSubFile()
om = []
nt = 0
for il, n in imt:
bsf.push(il)
nt += n
n1 = 0
for ol in iter(bsf.readline, NeedMoreData):
om.append(ol)
n1 += 1
self.assertEqual(n, n1)
self.assertEqual(len(om), nt)
self.assertEqual(''.join([il for il, n in imt]), ''.join(om))
def test_push_random(self):
from email.feedparser import BufferedSubFile, NeedMoreData
n = 10000
chunksize = 5
chars = 'abcd \t\r\n'
s = ''.join(choice(chars) for i in range(n)) + '\n'
target = s.splitlines(True)
bsf = BufferedSubFile()
lines = []
for i in range(0, len(s), chunksize):
chunk = s[i:i+chunksize]
bsf.push(chunk)
lines.extend(iter(bsf.readline, NeedMoreData))
self.assertEqual(lines, target)
class TestFeedParsers(TestEmailBase):
def parse(self, chunks):
from email.feedparser import FeedParser
feedparser = FeedParser()
for chunk in chunks:
feedparser.feed(chunk)
return feedparser.close()
def test_empty_header_name_handled(self):
# Issue 19996
msg = self.parse("First: val\n: bad\nSecond: val")
self.assertEqual(msg['First'], 'val')
self.assertEqual(msg['Second'], 'val')
def test_newlines(self):
m = self.parse(['a:\nb:\rc:\r\nd:\n'])
self.assertEqual(m.keys(), ['a', 'b', 'c', 'd'])
m = self.parse(['a:\nb:\rc:\r\nd:'])
self.assertEqual(m.keys(), ['a', 'b', 'c', 'd'])
m = self.parse(['a:\rb', 'c:\n'])
self.assertEqual(m.keys(), ['a', 'bc'])
m = self.parse(['a:\r', 'b:\n'])
self.assertEqual(m.keys(), ['a', 'b'])
m = self.parse(['a:\r', '\nb:\n'])
self.assertEqual(m.keys(), ['a', 'b'])
m = self.parse(['a:\x85b:\u2028c:\n'])
self.assertEqual(m.items(), [('a', '\x85'), ('b', '\u2028'), ('c', '')])
m = self.parse(['a:\r', 'b:\x85', 'c:\n'])
self.assertEqual(m.items(), [('a', ''), ('b', '\x85'), ('c', '')])
def test_long_lines(self):
# Expected peak memory use on 32-bit platform: 6*N*M bytes.
M, N = 1000, 20000
m = self.parse(['a:b\n\n'] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), 'x'*M*N)
m = self.parse(['a:b\r\r'] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), 'x'*M*N)
m = self.parse(['a:b\r\r'] + ['x'*M+'\x85'] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), ('x'*M+'\x85')*N)
m = self.parse(['a:\r', 'b: '] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', ''), ('b', 'x'*M*N)])
class TestParsers(TestEmailBase):
def test_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
with openfile('msg_02.txt') as fp:
msg = HeaderParser().parse(fp)
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertIsInstance(msg.get_payload(), str)
def test_bytes_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
with openfile('msg_02.txt', 'rb') as fp:
msg = email.parser.BytesHeaderParser().parse(fp)
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertIsInstance(msg.get_payload(), str)
self.assertIsInstance(msg.get_payload(decode=True), bytes)
def test_bytes_parser_does_not_close_file(self):
with openfile('msg_02.txt', 'rb') as fp:
email.parser.BytesParser().parse(fp)
self.assertFalse(fp.closed)
def test_bytes_parser_on_exception_does_not_close_file(self):
with openfile('msg_15.txt', 'rb') as fp:
bytesParser = email.parser.BytesParser
self.assertRaises(email.errors.StartBoundaryNotFoundDefect,
bytesParser(policy=email.policy.strict).parse,
fp)
self.assertFalse(fp.closed)
def test_parser_does_not_close_file(self):
with openfile('msg_02.txt', 'r') as fp:
email.parser.Parser().parse(fp)
self.assertFalse(fp.closed)
def test_parser_on_exception_does_not_close_file(self):
with openfile('msg_15.txt', 'r') as fp:
parser = email.parser.Parser
self.assertRaises(email.errors.StartBoundaryNotFoundDefect,
parser(policy=email.policy.strict).parse, fp)
self.assertFalse(fp.closed)
def test_whitespace_continuation(self):
eq = self.assertEqual
# This message contains a line after the Subject: header that has only
# whitespace, but it is not empty!
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: the next line has a space on it
\x20
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_whitespace_continuation_last_header(self):
eq = self.assertEqual
# Like the previous test, but the subject line is the last
# header.
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Subject: the next line has a space on it
\x20
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_crlf_separation(self):
eq = self.assertEqual
with openfile('msg_26.txt', newline='\n') as fp:
msg = Parser().parse(fp)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'text/plain')
eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'application/riscos')
def test_crlf_flatten(self):
# Using newline='\n' preserves the crlfs in this input file.
with openfile('msg_26.txt', newline='\n') as fp:
text = fp.read()
msg = email.message_from_string(text)
s = StringIO()
g = Generator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), text)
maxDiff = None
def test_multipart_digest_with_extra_mime_headers(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
with openfile('msg_28.txt') as fp:
msg = email.message_from_file(fp)
# Structure is:
# multipart/digest
# message/rfc822
# text/plain
# message/rfc822
# text/plain
eq(msg.is_multipart(), 1)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'message/rfc822')
eq(part1.is_multipart(), 1)
eq(len(part1.get_payload()), 1)
part1a = part1.get_payload(0)
eq(part1a.is_multipart(), 0)
eq(part1a.get_content_type(), 'text/plain')
neq(part1a.get_payload(), 'message 1\n')
# next message/rfc822
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'message/rfc822')
eq(part2.is_multipart(), 1)
eq(len(part2.get_payload()), 1)
part2a = part2.get_payload(0)
eq(part2a.is_multipart(), 0)
eq(part2a.get_content_type(), 'text/plain')
neq(part2a.get_payload(), 'message 2\n')
def test_three_lines(self):
# A bug report by Andrew McNamara
lines = ['From: Andrew Person <aperson@dom.ain',
'Subject: Test',
'Date: Tue, 20 Aug 2002 16:43:45 +1000']
msg = email.message_from_string(NL.join(lines))
self.assertEqual(msg['date'], 'Tue, 20 Aug 2002 16:43:45 +1000')
def test_strip_line_feed_and_carriage_return_in_headers(self):
eq = self.assertEqual
# For [ 1002475 ] email message parser doesn't handle \r\n correctly
value1 = 'text'
value2 = 'more text'
m = 'Header: %s\r\nNext-Header: %s\r\n\r\nBody\r\n\r\n' % (
value1, value2)
msg = email.message_from_string(m)
eq(msg.get('Header'), value1)
eq(msg.get('Next-Header'), value2)
def test_rfc2822_header_syntax(self):
eq = self.assertEqual
m = '>From: foo\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg), 3)
eq(sorted(field for field in msg), ['!"#QUX;~', '>From', 'From'])
eq(msg.get_payload(), 'body')
def test_rfc2822_space_not_allowed_in_header(self):
eq = self.assertEqual
m = '>From foo@example.com 11:25:53\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 0)
def test_rfc2822_one_character_header(self):
eq = self.assertEqual
m = 'A: first header\nB: second header\nCC: third header\n\nbody'
msg = email.message_from_string(m)
headers = msg.keys()
headers.sort()
eq(headers, ['A', 'B', 'CC'])
eq(msg.get_payload(), 'body')
def test_CRLFLF_at_end_of_part(self):
# issue 5610: feedparser should not eat two chars from body part ending
# with "\r\n\n".
m = (
"From: foo@bar.com\n"
"To: baz\n"
"Mime-Version: 1.0\n"
"Content-Type: multipart/mixed; boundary=BOUNDARY\n"
"\n"
"--BOUNDARY\n"
"Content-Type: text/plain\n"
"\n"
"body ending with CRLF newline\r\n"
"\n"
"--BOUNDARY--\n"
)
msg = email.message_from_string(m)
self.assertTrue(msg.get_payload(0).get_payload().endswith('\r\n'))
class Test8BitBytesHandling(TestEmailBase):
# In Python3 all input is string, but that doesn't work if the actual input
# uses an 8bit transfer encoding. To hack around that, in email 5.1 we
# decode byte streams using the surrogateescape error handler, and
# reconvert to binary at appropriate places if we detect surrogates. This
# doesn't allow us to transform headers with 8bit bytes (they get munged),
# but it does allow us to parse and preserve them, and to decode body
# parts that use an 8bit CTE.
bodytest_msg = textwrap.dedent("""\
From: foo@bar.com
To: baz
Mime-Version: 1.0
Content-Type: text/plain; charset={charset}
Content-Transfer-Encoding: {cte}
{bodyline}
""")
def test_known_8bit_CTE(self):
m = self.bodytest_msg.format(charset='utf-8',
cte='8bit',
bodyline='pöstal').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), "pöstal\n")
self.assertEqual(msg.get_payload(decode=True),
"pöstal\n".encode('utf-8'))
def test_unknown_8bit_CTE(self):
m = self.bodytest_msg.format(charset='notavalidcharset',
cte='8bit',
bodyline='pöstal').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), "p\uFFFD\uFFFDstal\n")
self.assertEqual(msg.get_payload(decode=True),
"pöstal\n".encode('utf-8'))
def test_8bit_in_quopri_body(self):
# This is non-RFC compliant data...without 'decode' the library code
# decodes the body using the charset from the headers, and because the
# source byte really is utf-8 this works. This is likely to fail
# against real dirty data (ie: produce mojibake), but the data is
# invalid anyway so it is as good a guess as any. But this means that
# this test just confirms the current behavior; that behavior is not
# necessarily the best possible behavior. With 'decode' it is
# returning the raw bytes, so that test should be of correct behavior,
# or at least produce the same result that email4 did.
m = self.bodytest_msg.format(charset='utf-8',
cte='quoted-printable',
bodyline='p=C3=B6stál').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), 'p=C3=B6stál\n')
self.assertEqual(msg.get_payload(decode=True),
'pöstál\n'.encode('utf-8'))
def test_invalid_8bit_in_non_8bit_cte_uses_replace(self):
# This is similar to the previous test, but proves that if the 8bit
# byte is undecodeable in the specified charset, it gets replaced
# by the unicode 'unknown' character. Again, this may or may not
# be the ideal behavior. Note that if decode=False none of the
# decoders will get involved, so this is the only test we need
# for this behavior.
m = self.bodytest_msg.format(charset='ascii',
cte='quoted-printable',
bodyline='p=C3=B6stál').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), 'p=C3=B6st\uFFFD\uFFFDl\n')
self.assertEqual(msg.get_payload(decode=True),
'pöstál\n'.encode('utf-8'))
# test_defect_handling:test_invalid_chars_in_base64_payload
def test_8bit_in_base64_body(self):
# If we get 8bit bytes in a base64 body, we can just ignore them
# as being outside the base64 alphabet and decode anyway. But
# we register a defect.
m = self.bodytest_msg.format(charset='utf-8',
cte='base64',
bodyline='cMO2c3RhbAá=').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(decode=True),
'pöstal'.encode('utf-8'))
self.assertIsInstance(msg.defects[0],
errors.InvalidBase64CharactersDefect)
def test_8bit_in_uuencode_body(self):
# Sticking an 8bit byte in a uuencode block makes it undecodable by
# normal means, so the block is returned undecoded, but as bytes.
m = self.bodytest_msg.format(charset='utf-8',
cte='uuencode',
bodyline='<,.V<W1A; á ').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(decode=True),
'<,.V<W1A; á \n'.encode('utf-8'))
headertest_headers = (
('From: foo@bar.com', ('From', 'foo@bar.com')),
('To: báz', ('To', '=?unknown-8bit?q?b=C3=A1z?=')),
('Subject: Maintenant je vous présente mon collègue, le pouf célèbre\n'
'\tJean de Baddie',
('Subject', '=?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_'
'coll=C3=A8gue=2C_le_pouf_c=C3=A9l=C3=A8bre?=\n'
' =?unknown-8bit?q?_Jean_de_Baddie?=')),
('From: göst', ('From', '=?unknown-8bit?b?Z8O2c3Q=?=')),
)
headertest_msg = ('\n'.join([src for (src, _) in headertest_headers]) +
'\nYes, they are flying.\n').encode('utf-8')
def test_get_8bit_header(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertEqual(str(msg.get('to')), 'b\uFFFD\uFFFDz')
self.assertEqual(str(msg['to']), 'b\uFFFD\uFFFDz')
def test_print_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertEqual(str(msg),
textwrap.dedent("""\
From: {}
To: {}
Subject: {}
From: {}
Yes, they are flying.
""").format(*[expected[1] for (_, expected) in
self.headertest_headers]))
def test_values_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([str(x) for x in msg.values()],
['foo@bar.com',
'b\uFFFD\uFFFDz',
'Maintenant je vous pr\uFFFD\uFFFDsente mon '
'coll\uFFFD\uFFFDgue, le pouf '
'c\uFFFD\uFFFDl\uFFFD\uFFFDbre\n'
'\tJean de Baddie',
"g\uFFFD\uFFFDst"])
def test_items_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([(str(x), str(y)) for (x, y) in msg.items()],
[('From', 'foo@bar.com'),
('To', 'b\uFFFD\uFFFDz'),
('Subject', 'Maintenant je vous '
'pr\uFFFD\uFFFDsente '
'mon coll\uFFFD\uFFFDgue, le pouf '
'c\uFFFD\uFFFDl\uFFFD\uFFFDbre\n'
'\tJean de Baddie'),
('From', 'g\uFFFD\uFFFDst')])
def test_get_all_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([str(x) for x in msg.get_all('from')],
['foo@bar.com',
'g\uFFFD\uFFFDst'])
def test_get_content_type_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/pl\xA7in; charset=utf-8
""").encode('latin-1'))
self.assertEqual(msg.get_content_type(), "text/pl\uFFFDin")
self.assertEqual(msg.get_content_maintype(), "text")
self.assertEqual(msg.get_content_subtype(), "pl\uFFFDin")
# test_headerregistry.TestContentTypeHeader.non_ascii_in_params
def test_get_params_with_8bit(self):
msg = email.message_from_bytes(
'X-Header: foo=\xa7ne; b\xa7r=two; baz=three\n'.encode('latin-1'))
self.assertEqual(msg.get_params(header='x-header'),
[('foo', '\uFFFDne'), ('b\uFFFDr', 'two'), ('baz', 'three')])
self.assertEqual(msg.get_param('Foo', header='x-header'), '\uFFFdne')
# XXX: someday you might be able to get 'b\xa7r', for now you can't.
self.assertEqual(msg.get_param('b\xa7r', header='x-header'), None)
# test_headerregistry.TestContentTypeHeader.non_ascii_in_rfc2231_value
def test_get_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
self.assertEqual(msg.get_param('title'),
('us-ascii', 'en', 'This is not f\uFFFDn'))
def test_set_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
msg.set_param('title', 'test')
self.assertEqual(msg.get_param('title'), 'test')
def test_del_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
msg.del_param('title')
self.assertEqual(msg.get_param('title'), None)
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_payload_with_8bit_cte_header(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Transfer-Encoding: b\xa7se64
Content-Type: text/plain; charset=latin-1
payload
""").encode('latin-1'))
self.assertEqual(msg.get_payload(), 'payload\n')
self.assertEqual(msg.get_payload(decode=True), b'payload\n')
non_latin_bin_msg = textwrap.dedent("""\
From: foo@bar.com
To: báz
Subject: Maintenant je vous présente mon collègue, le pouf célèbre
\tJean de Baddie
Mime-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
Да, они летят.
""").encode('utf-8')
def test_bytes_generator(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), self.non_latin_bin_msg)
def test_bytes_generator_handles_None_body(self):
#Issue 11019
msg = email.message.Message()
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), b"\n")
non_latin_bin_msg_as7bit_wrapped = textwrap.dedent("""\
From: foo@bar.com
To: =?unknown-8bit?q?b=C3=A1z?=
Subject: =?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_coll=C3=A8gue?=
=?unknown-8bit?q?=2C_le_pouf_c=C3=A9l=C3=A8bre?=
=?unknown-8bit?q?_Jean_de_Baddie?=
Mime-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: base64
0JTQsCwg0L7QvdC4INC70LXRgtGP0YIuCg==
""")
def test_generator_handles_8bit(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = StringIO()
email.generator.Generator(out).flatten(msg)
self.assertEqual(out.getvalue(), self.non_latin_bin_msg_as7bit_wrapped)
def test_str_generator_should_not_mutate_msg_when_handling_8bit(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
BytesGenerator(out).flatten(msg)
orig_value = out.getvalue()
Generator(StringIO()).flatten(msg) # Should not mutate msg!
out = BytesIO()
BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), orig_value)
def test_bytes_generator_with_unix_from(self):
# The unixfrom contains a current date, so we can't check it
# literally. Just make sure the first word is 'From' and the
# rest of the message matches the input.
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg, unixfrom=True)
lines = out.getvalue().split(b'\n')
self.assertEqual(lines[0].split()[0], b'From')
self.assertEqual(b'\n'.join(lines[1:]), self.non_latin_bin_msg)
non_latin_bin_msg_as7bit = non_latin_bin_msg_as7bit_wrapped.split('\n')
non_latin_bin_msg_as7bit[2:4] = [
'Subject: =?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_'
'coll=C3=A8gue=2C_le_pouf_c=C3=A9l=C3=A8bre?=']
non_latin_bin_msg_as7bit = '\n'.join(non_latin_bin_msg_as7bit)
def test_message_from_binary_file(self):
fn = 'test.msg'
self.addCleanup(unlink, fn)
with open(fn, 'wb') as testfile:
testfile.write(self.non_latin_bin_msg)
with open(fn, 'rb') as testfile:
m = email.parser.BytesParser().parse(testfile)
self.assertEqual(str(m), self.non_latin_bin_msg_as7bit)
latin_bin_msg = textwrap.dedent("""\
From: foo@bar.com
To: Dinsdale
Subject: Nudge nudge, wink, wink
Mime-Version: 1.0
Content-Type: text/plain; charset="latin-1"
Content-Transfer-Encoding: 8bit
oh là là, know what I mean, know what I mean?
""").encode('latin-1')
latin_bin_msg_as7bit = textwrap.dedent("""\
From: foo@bar.com
To: Dinsdale
Subject: Nudge nudge, wink, wink
Mime-Version: 1.0
Content-Type: text/plain; charset="iso-8859-1"
Content-Transfer-Encoding: quoted-printable
oh l=E0 l=E0, know what I mean, know what I mean?
""")
def test_string_generator_reencodes_to_quopri_when_appropriate(self):
m = email.message_from_bytes(self.latin_bin_msg)
self.assertEqual(str(m), self.latin_bin_msg_as7bit)
def test_decoded_generator_emits_unicode_body(self):
m = email.message_from_bytes(self.latin_bin_msg)
out = StringIO()
email.generator.DecodedGenerator(out).flatten(m)
#DecodedHeader output contains an extra blank line compared
#to the input message. RDM: not sure if this is a bug or not,
#but it is not specific to the 8bit->7bit conversion.
self.assertEqual(out.getvalue(),
self.latin_bin_msg.decode('latin-1')+'\n')
def test_bytes_feedparser(self):
bfp = email.feedparser.BytesFeedParser()
for i in range(0, len(self.latin_bin_msg), 10):
bfp.feed(self.latin_bin_msg[i:i+10])
m = bfp.close()
self.assertEqual(str(m), self.latin_bin_msg_as7bit)
def test_crlf_flatten(self):
with openfile('msg_26.txt', 'rb') as fp:
text = fp.read()
msg = email.message_from_bytes(text)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), text)
def test_8bit_multipart(self):
# Issue 11605
source = textwrap.dedent("""\
Date: Fri, 18 Mar 2011 17:15:43 +0100
To: foo@example.com
From: foodwatch-Newsletter <bar@example.com>
Subject: Aktuelles zu Japan, Klonfleisch und Smiley-System
Message-ID: <76a486bee62b0d200f33dc2ca08220ad@localhost.localdomain>
MIME-Version: 1.0
Content-Type: multipart/alternative;
boundary="b1_76a486bee62b0d200f33dc2ca08220ad"
--b1_76a486bee62b0d200f33dc2ca08220ad
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
Guten Tag, ,
mit großer Betroffenheit verfolgen auch wir im foodwatch-Team die
Nachrichten aus Japan.
--b1_76a486bee62b0d200f33dc2ca08220ad
Content-Type: text/html; charset="utf-8"
Content-Transfer-Encoding: 8bit
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html lang="de">
<head>
<title>foodwatch - Newsletter</title>
</head>
<body>
<p>mit großer Betroffenheit verfolgen auch wir im foodwatch-Team
die Nachrichten aus Japan.</p>
</body>
</html>
--b1_76a486bee62b0d200f33dc2ca08220ad--
""").encode('utf-8')
msg = email.message_from_bytes(source)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), source)
def test_bytes_generator_b_encoding_linesep(self):
# Issue 14062: b encoding was tacking on an extra \n.
m = Message()
# This has enough non-ascii that it should always end up b encoded.
m['Subject'] = Header('žluťoučký kůň')
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(m, linesep='\r\n')
self.assertEqual(
s.getvalue(),
b'Subject: =?utf-8?b?xb5sdcWlb3XEjWvDvSBrxa/FiA==?=\r\n\r\n')
def test_generator_b_encoding_linesep(self):
# Since this broke in ByteGenerator, test Generator for completeness.
m = Message()
# This has enough non-ascii that it should always end up b encoded.
m['Subject'] = Header('žluťoučký kůň')
s = StringIO()
g = email.generator.Generator(s)
g.flatten(m, linesep='\r\n')
self.assertEqual(
s.getvalue(),
'Subject: =?utf-8?b?xb5sdcWlb3XEjWvDvSBrxa/FiA==?=\r\n\r\n')
maxDiff = None
class BaseTestBytesGeneratorIdempotent:
maxDiff = None
def _msgobj(self, filename):
with openfile(filename, 'rb') as fp:
data = fp.read()
data = self.normalize_linesep_regex.sub(self.blinesep, data)
msg = email.message_from_bytes(data)
return msg, data
def _idempotent(self, msg, data, unixfrom=False):
b = BytesIO()
g = email.generator.BytesGenerator(b, maxheaderlen=0)
g.flatten(msg, unixfrom=unixfrom, linesep=self.linesep)
self.assertEqual(data, b.getvalue())
class TestBytesGeneratorIdempotentNL(BaseTestBytesGeneratorIdempotent,
TestIdempotent):
linesep = '\n'
blinesep = b'\n'
normalize_linesep_regex = re.compile(br'\r\n')
class TestBytesGeneratorIdempotentCRLF(BaseTestBytesGeneratorIdempotent,
TestIdempotent):
linesep = '\r\n'
blinesep = b'\r\n'
normalize_linesep_regex = re.compile(br'(?<!\r)\n')
class TestBase64(unittest.TestCase):
def test_len(self):
eq = self.assertEqual
eq(base64mime.header_length('hello'),
len(base64mime.body_encode(b'hello', eol='')))
for size in range(15):
if size == 0 : bsize = 0
elif size <= 3 : bsize = 4
elif size <= 6 : bsize = 8
elif size <= 9 : bsize = 12
elif size <= 12: bsize = 16
else : bsize = 20
eq(base64mime.header_length('x' * size), bsize)
def test_decode(self):
eq = self.assertEqual
eq(base64mime.decode(''), b'')
eq(base64mime.decode('aGVsbG8='), b'hello')
def test_encode(self):
eq = self.assertEqual
eq(base64mime.body_encode(b''), b'')
eq(base64mime.body_encode(b'hello'), 'aGVsbG8=\n')
# Test the binary flag
eq(base64mime.body_encode(b'hello\n'), 'aGVsbG8K\n')
# Test the maxlinelen arg
eq(base64mime.body_encode(b'xxxx ' * 20, maxlinelen=40), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IA==
""")
# Test the eol argument
eq(base64mime.body_encode(b'xxxx ' * 20, maxlinelen=40, eol='\r\n'),
"""\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IA==\r
""")
def test_header_encode(self):
eq = self.assertEqual
he = base64mime.header_encode
eq(he('hello'), '=?iso-8859-1?b?aGVsbG8=?=')
eq(he('hello\r\nworld'), '=?iso-8859-1?b?aGVsbG8NCndvcmxk?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?b?aGVsbG8=?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
class TestQuopri(unittest.TestCase):
def setUp(self):
# Set of characters (as byte integers) that don't need to be encoded
# in headers.
self.hlit = list(chain(
range(ord('a'), ord('z') + 1),
range(ord('A'), ord('Z') + 1),
range(ord('0'), ord('9') + 1),
(c for c in b'!*+-/')))
# Set of characters (as byte integers) that do need to be encoded in
# headers.
self.hnon = [c for c in range(256) if c not in self.hlit]
assert len(self.hlit) + len(self.hnon) == 256
# Set of characters (as byte integers) that don't need to be encoded
# in bodies.
self.blit = list(range(ord(' '), ord('~') + 1))
self.blit.append(ord('\t'))
self.blit.remove(ord('='))
# Set of characters (as byte integers) that do need to be encoded in
# bodies.
self.bnon = [c for c in range(256) if c not in self.blit]
assert len(self.blit) + len(self.bnon) == 256
def test_quopri_header_check(self):
for c in self.hlit:
self.assertFalse(quoprimime.header_check(c),
'Should not be header quopri encoded: %s' % chr(c))
for c in self.hnon:
self.assertTrue(quoprimime.header_check(c),
'Should be header quopri encoded: %s' % chr(c))
def test_quopri_body_check(self):
for c in self.blit:
self.assertFalse(quoprimime.body_check(c),
'Should not be body quopri encoded: %s' % chr(c))
for c in self.bnon:
self.assertTrue(quoprimime.body_check(c),
'Should be body quopri encoded: %s' % chr(c))
def test_header_quopri_len(self):
eq = self.assertEqual
eq(quoprimime.header_length(b'hello'), 5)
# RFC 2047 chrome is not included in header_length().
eq(len(quoprimime.header_encode(b'hello', charset='xxx')),
quoprimime.header_length(b'hello') +
# =?xxx?q?...?= means 10 extra characters
10)
eq(quoprimime.header_length(b'h@e@l@l@o@'), 20)
# RFC 2047 chrome is not included in header_length().
eq(len(quoprimime.header_encode(b'h@e@l@l@o@', charset='xxx')),
quoprimime.header_length(b'h@e@l@l@o@') +
# =?xxx?q?...?= means 10 extra characters
10)
for c in self.hlit:
eq(quoprimime.header_length(bytes([c])), 1,
'expected length 1 for %r' % chr(c))
for c in self.hnon:
# Space is special; it's encoded to _
if c == ord(' '):
continue
eq(quoprimime.header_length(bytes([c])), 3,
'expected length 3 for %r' % chr(c))
eq(quoprimime.header_length(b' '), 1)
def test_body_quopri_len(self):
eq = self.assertEqual
for c in self.blit:
eq(quoprimime.body_length(bytes([c])), 1)
for c in self.bnon:
eq(quoprimime.body_length(bytes([c])), 3)
def test_quote_unquote_idempotent(self):
for x in range(256):
c = chr(x)
self.assertEqual(quoprimime.unquote(quoprimime.quote(c)), c)
def _test_header_encode(self, header, expected_encoded_header, charset=None):
if charset is None:
encoded_header = quoprimime.header_encode(header)
else:
encoded_header = quoprimime.header_encode(header, charset)
self.assertEqual(encoded_header, expected_encoded_header)
def test_header_encode_null(self):
self._test_header_encode(b'', '')
def test_header_encode_one_word(self):
self._test_header_encode(b'hello', '=?iso-8859-1?q?hello?=')
def test_header_encode_two_lines(self):
self._test_header_encode(b'hello\nworld',
'=?iso-8859-1?q?hello=0Aworld?=')
def test_header_encode_non_ascii(self):
self._test_header_encode(b'hello\xc7there',
'=?iso-8859-1?q?hello=C7there?=')
def test_header_encode_alt_charset(self):
self._test_header_encode(b'hello', '=?iso-8859-2?q?hello?=',
charset='iso-8859-2')
def _test_header_decode(self, encoded_header, expected_decoded_header):
decoded_header = quoprimime.header_decode(encoded_header)
self.assertEqual(decoded_header, expected_decoded_header)
def test_header_decode_null(self):
self._test_header_decode('', '')
def test_header_decode_one_word(self):
self._test_header_decode('hello', 'hello')
def test_header_decode_two_lines(self):
self._test_header_decode('hello=0Aworld', 'hello\nworld')
def test_header_decode_non_ascii(self):
self._test_header_decode('hello=C7there', 'hello\xc7there')
def test_header_decode_re_bug_18380(self):
# Issue 18380: Call re.sub with a positional argument for flags in the wrong position
self.assertEqual(quoprimime.header_decode('=30' * 257), '0' * 257)
def _test_decode(self, encoded, expected_decoded, eol=None):
if eol is None:
decoded = quoprimime.decode(encoded)
else:
decoded = quoprimime.decode(encoded, eol=eol)
self.assertEqual(decoded, expected_decoded)
def test_decode_null_word(self):
self._test_decode('', '')
def test_decode_null_line_null_word(self):
self._test_decode('\r\n', '\n')
def test_decode_one_word(self):
self._test_decode('hello', 'hello')
def test_decode_one_word_eol(self):
self._test_decode('hello', 'hello', eol='X')
def test_decode_one_line(self):
self._test_decode('hello\r\n', 'hello\n')
def test_decode_one_line_lf(self):
self._test_decode('hello\n', 'hello\n')
def test_decode_one_line_cr(self):
self._test_decode('hello\r', 'hello\n')
def test_decode_one_line_nl(self):
self._test_decode('hello\n', 'helloX', eol='X')
def test_decode_one_line_crnl(self):
self._test_decode('hello\r\n', 'helloX', eol='X')
def test_decode_one_line_one_word(self):
self._test_decode('hello\r\nworld', 'hello\nworld')
def test_decode_one_line_one_word_eol(self):
self._test_decode('hello\r\nworld', 'helloXworld', eol='X')
def test_decode_two_lines(self):
self._test_decode('hello\r\nworld\r\n', 'hello\nworld\n')
def test_decode_two_lines_eol(self):
self._test_decode('hello\r\nworld\r\n', 'helloXworldX', eol='X')
def test_decode_one_long_line(self):
self._test_decode('Spam' * 250, 'Spam' * 250)
def test_decode_one_space(self):
self._test_decode(' ', '')
def test_decode_multiple_spaces(self):
self._test_decode(' ' * 5, '')
def test_decode_one_line_trailing_spaces(self):
self._test_decode('hello \r\n', 'hello\n')
def test_decode_two_lines_trailing_spaces(self):
self._test_decode('hello \r\nworld \r\n', 'hello\nworld\n')
def test_decode_quoted_word(self):
self._test_decode('=22quoted=20words=22', '"quoted words"')
def test_decode_uppercase_quoting(self):
self._test_decode('ab=CD=EF', 'ab\xcd\xef')
def test_decode_lowercase_quoting(self):
self._test_decode('ab=cd=ef', 'ab\xcd\xef')
def test_decode_soft_line_break(self):
self._test_decode('soft line=\r\nbreak', 'soft linebreak')
def test_decode_false_quoting(self):
self._test_decode('A=1,B=A ==> A+B==2', 'A=1,B=A ==> A+B==2')
def _test_encode(self, body, expected_encoded_body, maxlinelen=None, eol=None):
kwargs = {}
if maxlinelen is None:
# Use body_encode's default.
maxlinelen = 76
else:
kwargs['maxlinelen'] = maxlinelen
if eol is None:
# Use body_encode's default.
eol = '\n'
else:
kwargs['eol'] = eol
encoded_body = quoprimime.body_encode(body, **kwargs)
self.assertEqual(encoded_body, expected_encoded_body)
if eol == '\n' or eol == '\r\n':
# We know how to split the result back into lines, so maxlinelen
# can be checked.
for line in encoded_body.splitlines():
self.assertLessEqual(len(line), maxlinelen)
def test_encode_null(self):
self._test_encode('', '')
def test_encode_null_lines(self):
self._test_encode('\n\n', '\n\n')
def test_encode_one_line(self):
self._test_encode('hello\n', 'hello\n')
def test_encode_one_line_crlf(self):
self._test_encode('hello\r\n', 'hello\n')
def test_encode_one_line_eol(self):
self._test_encode('hello\n', 'hello\r\n', eol='\r\n')
def test_encode_one_line_eol_after_non_ascii(self):
# issue 20206; see changeset 0cf700464177 for why the encode/decode.
self._test_encode('hello\u03c5\n'.encode('utf-8').decode('latin1'),
'hello=CF=85\r\n', eol='\r\n')
def test_encode_one_space(self):
self._test_encode(' ', '=20')
def test_encode_one_line_one_space(self):
self._test_encode(' \n', '=20\n')
# XXX: body_encode() expect strings, but uses ord(char) from these strings
# to index into a 256-entry list. For code points above 255, this will fail.
# Should there be a check for 8-bit only ord() values in body, or at least
# a comment about the expected input?
def test_encode_two_lines_one_space(self):
self._test_encode(' \n \n', '=20\n=20\n')
def test_encode_one_word_trailing_spaces(self):
self._test_encode('hello ', 'hello =20')
def test_encode_one_line_trailing_spaces(self):
self._test_encode('hello \n', 'hello =20\n')
def test_encode_one_word_trailing_tab(self):
self._test_encode('hello \t', 'hello =09')
def test_encode_one_line_trailing_tab(self):
self._test_encode('hello \t\n', 'hello =09\n')
def test_encode_trailing_space_before_maxlinelen(self):
self._test_encode('abcd \n1234', 'abcd =\n\n1234', maxlinelen=6)
def test_encode_trailing_space_at_maxlinelen(self):
self._test_encode('abcd \n1234', 'abcd=\n=20\n1234', maxlinelen=5)
def test_encode_trailing_space_beyond_maxlinelen(self):
self._test_encode('abcd \n1234', 'abc=\nd=20\n1234', maxlinelen=4)
def test_encode_whitespace_lines(self):
self._test_encode(' \n' * 5, '=20\n' * 5)
def test_encode_quoted_equals(self):
self._test_encode('a = b', 'a =3D b')
def test_encode_one_long_string(self):
self._test_encode('x' * 100, 'x' * 75 + '=\n' + 'x' * 25)
def test_encode_one_long_line(self):
self._test_encode('x' * 100 + '\n', 'x' * 75 + '=\n' + 'x' * 25 + '\n')
def test_encode_one_very_long_line(self):
self._test_encode('x' * 200 + '\n',
2 * ('x' * 75 + '=\n') + 'x' * 50 + '\n')
def test_encode_shortest_maxlinelen(self):
self._test_encode('=' * 5, '=3D=\n' * 4 + '=3D', maxlinelen=4)
def test_encode_maxlinelen_too_small(self):
self.assertRaises(ValueError, self._test_encode, '', '', maxlinelen=3)
def test_encode(self):
eq = self.assertEqual
eq(quoprimime.body_encode(''), '')
eq(quoprimime.body_encode('hello'), 'hello')
# Test the binary flag
eq(quoprimime.body_encode('hello\r\nworld'), 'hello\nworld')
# Test the maxlinelen arg
eq(quoprimime.body_encode('xxxx ' * 20, maxlinelen=40), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=
x xxxx xxxx xxxx xxxx=20""")
# Test the eol argument
eq(quoprimime.body_encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'),
"""\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=\r
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=\r
x xxxx xxxx xxxx xxxx=20""")
eq(quoprimime.body_encode("""\
one line
two line"""), """\
one line
two line""")
# Test the Charset class
class TestCharset(unittest.TestCase):
def tearDown(self):
from email import charset as CharsetModule
try:
del CharsetModule.CHARSETS['fake']
except KeyError:
pass
def test_codec_encodeable(self):
eq = self.assertEqual
# Make sure us-ascii = no Unicode conversion
c = Charset('us-ascii')
eq(c.header_encode('Hello World!'), 'Hello World!')
# Test 8-bit idempotency with us-ascii
s = '\xa4\xa2\xa4\xa4\xa4\xa6\xa4\xa8\xa4\xaa'
self.assertRaises(UnicodeError, c.header_encode, s)
c = Charset('utf-8')
eq(c.header_encode(s), '=?utf-8?b?wqTCosKkwqTCpMKmwqTCqMKkwqo=?=')
def test_body_encode(self):
eq = self.assertEqual
# Try a charset with QP body encoding
c = Charset('iso-8859-1')
eq('hello w=F6rld', c.body_encode('hello w\xf6rld'))
# Try a charset with Base64 body encoding
c = Charset('utf-8')
eq('aGVsbG8gd29ybGQ=\n', c.body_encode(b'hello world'))
# Try a charset with None body encoding
c = Charset('us-ascii')
eq('hello world', c.body_encode('hello world'))
# Try the convert argument, where input codec != output codec
c = Charset('euc-jp')
# With apologies to Tokio Kikuchi ;)
# XXX FIXME
## try:
## eq('\x1b$B5FCO;~IW\x1b(B',
## c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7'))
## eq('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7',
## c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7', False))
## except LookupError:
## # We probably don't have the Japanese codecs installed
## pass
# Testing SF bug #625509, which we have to fake, since there are no
# built-in encodings where the header encoding is QP but the body
# encoding is not.
from email import charset as CharsetModule
CharsetModule.add_charset('fake', CharsetModule.QP, None, 'utf-8')
c = Charset('fake')
eq('hello world', c.body_encode('hello world'))
def test_unicode_charset_name(self):
charset = Charset('us-ascii')
self.assertEqual(str(charset), 'us-ascii')
self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')
# Test multilingual MIME headers.
class TestHeader(TestEmailBase):
def test_simple(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append(' Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_simple_surprise(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append('Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_header_needs_no_decoding(self):
h = 'no decoding needed'
self.assertEqual(decode_header(h), [(h, None)])
def test_long(self):
h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
maxlinelen=76)
for l in h.encode(splitchars=' ').split('\n '):
self.assertLessEqual(len(l), 76)
def test_multilingual(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = (b'Die Mieter treten hier ein werden mit einem '
b'Foerderband komfortabel den Korridor entlang, '
b'an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, '
b'gegen die rotierenden Klingen bef\xf6rdert. ')
cz_head = (b'Finan\xe8ni metropole se hroutily pod tlakem jejich '
b'd\xf9vtipu.. ')
utf8_head = ('\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das '
'Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder '
'die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066'
'\u3044\u307e\u3059\u3002')
h = Header(g_head, g)
h.append(cz_head, cz)
h.append(utf8_head, utf8)
enc = h.encode(maxlinelen=76)
eq(enc, """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderband_kom?=
=?iso-8859-1?q?fortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen_Wand?=
=?iso-8859-1?q?gem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef=F6r?=
=?iso-8859-1?q?dert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hroutily?=
=?iso-8859-2?q?_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC?=
=?utf-8?b?5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn?=
=?utf-8?b?44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFz?=
=?utf-8?b?IE51bnN0dWNrIGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWloZXJodW5k?=
=?utf-8?b?IGRhcyBPZGVyIGRpZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI3jgajoqIA=?=
=?utf-8?b?44Gj44Gm44GE44G+44GZ44CC?=""")
decoded = decode_header(enc)
eq(len(decoded), 3)
eq(decoded[0], (g_head, 'iso-8859-1'))
eq(decoded[1], (cz_head, 'iso-8859-2'))
eq(decoded[2], (utf8_head.encode('utf-8'), 'utf-8'))
ustr = str(h)
eq(ustr,
(b'Die Mieter treten hier ein werden mit einem Foerderband '
b'komfortabel den Korridor entlang, an s\xc3\xbcdl\xc3\xbcndischen '
b'Wandgem\xc3\xa4lden vorbei, gegen die rotierenden Klingen '
b'bef\xc3\xb6rdert. Finan\xc4\x8dni metropole se hroutily pod '
b'tlakem jejich d\xc5\xafvtipu.. \xe6\xad\xa3\xe7\xa2\xba\xe3\x81'
b'\xab\xe8\xa8\x80\xe3\x81\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3'
b'\xe3\x81\xaf\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3'
b'\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
b'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8\xaa\x9e'
b'\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\xe3\x81\x82\xe3'
b'\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81\x9f\xe3\x82\x89\xe3\x82'
b'\x81\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\xe5\xae\x9f\xe9\x9a\x9b'
b'\xe3\x81\xab\xe3\x81\xaf\xe3\x80\x8cWenn ist das Nunstuck git '
b'und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt '
b'gersput.\xe3\x80\x8d\xe3\x81\xa8\xe8\xa8\x80\xe3\x81\xa3\xe3\x81'
b'\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82'
).decode('utf-8'))
# Test make_header()
newh = make_header(decode_header(enc))
eq(newh, h)
def test_empty_header_encode(self):
h = Header()
self.assertEqual(h.encode(), '')
def test_header_ctor_default_args(self):
eq = self.ndiffAssertEqual
h = Header()
eq(h, '')
h.append('foo', Charset('iso-8859-1'))
eq(h, 'foo')
def test_explicit_maxlinelen(self):
eq = self.ndiffAssertEqual
hstr = ('A very long line that must get split to something other '
'than at the 76th character boundary to test the non-default '
'behavior')
h = Header(hstr)
eq(h.encode(), '''\
A very long line that must get split to something other than at the 76th
character boundary to test the non-default behavior''')
eq(str(h), hstr)
h = Header(hstr, header_name='Subject')
eq(h.encode(), '''\
A very long line that must get split to something other than at the
76th character boundary to test the non-default behavior''')
eq(str(h), hstr)
h = Header(hstr, maxlinelen=1024, header_name='Subject')
eq(h.encode(), hstr)
eq(str(h), hstr)
def test_quopri_splittable(self):
eq = self.ndiffAssertEqual
h = Header(charset='iso-8859-1', maxlinelen=20)
x = 'xxxx ' * 20
h.append(x)
s = h.encode()
eq(s, """\
=?iso-8859-1?q?xxx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_?=""")
eq(x, str(make_header(decode_header(s))))
h = Header(charset='iso-8859-1', maxlinelen=40)
h.append('xxxx ' * 20)
s = h.encode()
eq(s, """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xxx?=
=?iso-8859-1?q?x_xxxx_xxxx_xxxx_xxxx_?=
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=
=?iso-8859-1?q?_xxxx_xxxx_?=""")
eq(x, str(make_header(decode_header(s))))
def test_base64_splittable(self):
eq = self.ndiffAssertEqual
h = Header(charset='koi8-r', maxlinelen=20)
x = 'xxxx ' * 20
h.append(x)
s = h.encode()
eq(s, """\
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IA==?=""")
eq(x, str(make_header(decode_header(s))))
h = Header(charset='koi8-r', maxlinelen=40)
h.append(x)
s = h.encode()
eq(s, """\
=?koi8-r?b?eHh4eCB4eHh4IHh4eHggeHh4?=
=?koi8-r?b?eCB4eHh4IHh4eHggeHh4eCB4?=
=?koi8-r?b?eHh4IHh4eHggeHh4eCB4eHh4?=
=?koi8-r?b?IHh4eHggeHh4eCB4eHh4IHh4?=
=?koi8-r?b?eHggeHh4eCB4eHh4IHh4eHgg?=
=?koi8-r?b?eHh4eCB4eHh4IA==?=""")
eq(x, str(make_header(decode_header(s))))
def test_us_ascii_header(self):
eq = self.assertEqual
s = 'hello'
x = decode_header(s)
eq(x, [('hello', None)])
h = make_header(x)
eq(s, h.encode())
def test_string_charset(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
eq(h, 'hello')
## def test_unicode_error(self):
## raises = self.assertRaises
## raises(UnicodeError, Header, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, '[P\xf6stal]', 'us-ascii')
## h = Header()
## raises(UnicodeError, h.append, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, h.append, '[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, u'\u83ca\u5730\u6642\u592b', 'iso-8859-1')
def test_utf8_shortest(self):
eq = self.assertEqual
h = Header('p\xf6stal', 'utf-8')
eq(h.encode(), '=?utf-8?q?p=C3=B6stal?=')
h = Header('\u83ca\u5730\u6642\u592b', 'utf-8')
eq(h.encode(), '=?utf-8?b?6I+K5Zyw5pmC5aSr?=')
def test_bad_8bit_header(self):
raises = self.assertRaises
eq = self.assertEqual
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
raises(UnicodeError, Header, x)
h = Header()
raises(UnicodeError, h.append, x)
e = x.decode('utf-8', 'replace')
eq(str(Header(x, errors='replace')), e)
h.append(x, errors='replace')
eq(str(h), e)
def test_escaped_8bit_header(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
e = x.decode('ascii', 'surrogateescape')
h = Header(e, charset=email.charset.UNKNOWN8BIT)
self.assertEqual(str(h),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h), [(x, 'unknown-8bit')])
def test_header_handles_binary_unknown8bit(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
h = Header(x, charset=email.charset.UNKNOWN8BIT)
self.assertEqual(str(h),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h), [(x, 'unknown-8bit')])
def test_make_header_handles_binary_unknown8bit(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
h = Header(x, charset=email.charset.UNKNOWN8BIT)
h2 = email.header.make_header(email.header.decode_header(h))
self.assertEqual(str(h2),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h2), [(x, 'unknown-8bit')])
def test_modify_returned_list_does_not_change_header(self):
h = Header('test')
chunks = email.header.decode_header(h)
chunks.append(('ascii', 'test2'))
self.assertEqual(str(h), 'test')
def test_encoded_adjacent_nonencoded(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
h.append('world')
s = h.encode()
eq(s, '=?iso-8859-1?q?hello?= world')
h = make_header(decode_header(s))
eq(h.encode(), s)
def test_whitespace_keeper(self):
eq = self.assertEqual
s = 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztk=?= =?koi8-r?q?=CA?= zz.'
parts = decode_header(s)
eq(parts, [(b'Subject: ', None), (b'\xf0\xd2\xcf\xd7\xc5\xd2\xcb\xc1 \xce\xc1 \xc6\xc9\xce\xc1\xcc\xd8\xce\xd9\xca', 'koi8-r'), (b' zz.', None)])
hdr = make_header(parts)
eq(hdr.encode(),
'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztnK?= zz.')
def test_broken_base64_header(self):
raises = self.assertRaises
s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3I ?='
raises(errors.HeaderParseError, decode_header, s)
def test_shift_jis_charset(self):
h = Header('文', charset='shift_jis')
self.assertEqual(h.encode(), '=?iso-2022-jp?b?GyRCSjgbKEI=?=')
def test_flatten_header_with_no_value(self):
# Issue 11401 (regression from email 4.x) Note that the space after
# the header doesn't reflect the input, but this is also the way
# email 4.x behaved. At some point it would be nice to fix that.
msg = email.message_from_string("EmptyHeader:")
self.assertEqual(str(msg), "EmptyHeader: \n\n")
def test_encode_preserves_leading_ws_on_value(self):
msg = Message()
msg['SomeHeader'] = ' value with leading ws'
self.assertEqual(str(msg), "SomeHeader: value with leading ws\n\n")
# Test RFC 2231 header parameters (en/de)coding
class TestRFC2231(TestEmailBase):
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_with_double_quotes
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_inside_double_quotes
def test_get_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_29.txt')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
eq(msg.get_param('title', unquote=False),
('us-ascii', 'en', '"This is even more ***fun*** isn\'t it!"'))
def test_set_param(self):
eq = self.ndiffAssertEqual
msg = Message()
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii')
eq(msg.get_param('title'),
('us-ascii', '', 'This is even more ***fun*** isn\'t it!'))
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
msg = self._msgobj('msg_01.txt')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.as_string(maxheaderlen=78), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21
Hi,
Do you like this message?
-Me
""")
def test_set_param_requote(self):
msg = Message()
msg.set_param('title', 'foo')
self.assertEqual(msg['content-type'], 'text/plain; title="foo"')
msg.set_param('title', 'bar', requote=False)
self.assertEqual(msg['content-type'], 'text/plain; title=bar')
# tspecial is still quoted.
msg.set_param('title', "(bar)bell", requote=False)
self.assertEqual(msg['content-type'], 'text/plain; title="(bar)bell"')
def test_del_param(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_01.txt')
msg.set_param('foo', 'bar', charset='us-ascii', language='en')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
msg.del_param('foo', header='Content-Type')
eq(msg.as_string(maxheaderlen=78), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset="us-ascii";
title*=us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21
Hi,
Do you like this message?
-Me
""")
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_charset
# I changed the charset name, though, because the one in the file isn't
# a legal charset name. Should add a test for an illegal charset.
def test_rfc2231_get_content_charset(self):
eq = self.assertEqual
msg = self._msgobj('msg_32.txt')
eq(msg.get_content_charset(), 'us-ascii')
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_no_double_quotes
def test_rfc2231_parse_rfc_quoting(self):
m = textwrap.dedent('''\
Content-Disposition: inline;
\tfilename*0*=''This%20is%20even%20more%20;
\tfilename*1*=%2A%2A%2Afun%2A%2A%2A%20;
\tfilename*2="is it not.pdf"
''')
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
self.assertEqual(m, msg.as_string())
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_with_double_quotes
def test_rfc2231_parse_extra_quoting(self):
m = textwrap.dedent('''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
''')
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
self.assertEqual(m, msg.as_string())
# test_headerregistry.TestContentTypeHeader.rfc2231_no_language_or_charset
# but new test uses *0* because otherwise lang/charset is not valid.
# test_headerregistry.TestContentTypeHeader.rfc2231_segmented_normal_values
def test_rfc2231_no_language_or_charset(self):
m = '''\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename="file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm"
Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEM; NAME*1=P_nsmail.htm
'''
msg = email.message_from_string(m)
param = msg.get_param('NAME')
self.assertNotIsInstance(param, tuple)
self.assertEqual(
param,
'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm')
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_no_charset
def test_rfc2231_no_language_or_charset_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
# Duplicate of previous test?
def test_rfc2231_no_language_or_charset_in_filename_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
# test_headerregistry.TestContentTypeHeader.rfc2231_partly_encoded,
# but the test below is wrong (the first part should be decoded).
def test_rfc2231_partly_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20***fun*** is it not.pdf')
def test_rfc2231_partly_nonencoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="This%20is%20even%20more%20";
\tfilename*1="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20is it not.pdf')
def test_rfc2231_no_language_or_charset_in_boundary(self):
m = '''\
Content-Type: multipart/alternative;
\tboundary*0*="''This%20is%20even%20more%20";
\tboundary*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tboundary*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_boundary(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_charset(self):
# This is a nonsensical charset value, but tests the code anyway
m = '''\
Content-Type: text/plain;
\tcharset*0*="This%20is%20even%20more%20";
\tcharset*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tcharset*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_content_charset(),
'this is even more ***fun*** is it not.pdf')
# test_headerregistry.TestContentTypeHeader.rfc2231_unknown_charset_treated_as_ascii
def test_rfc2231_bad_encoding_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="bogus'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_charset(self):
m = """\
Content-Type: text/plain; charset*=bogus''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_charset(self):
m = """\
Content-Type: text/plain; charset*=ascii''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="ascii'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2*="is it not.pdf%E2"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf\ufffd')
def test_rfc2231_unknown_encoding(self):
m = """\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename*=X-UNKNOWN''myfile.txt
"""
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(), 'myfile.txt')
def test_rfc2231_single_tick_in_filename_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, None)
eq(language, None)
eq(s, "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_inside_double_quotes
def test_rfc2231_single_tick_in_filename(self):
m = """\
Content-Type: application/x-foo; name*0=\"Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertNotIsInstance(param, tuple)
self.assertEqual(param, "Frank's Document")
def test_rfc2231_missing_tick(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="'This%20is%20broken";
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
"'This is broken")
def test_rfc2231_missing_tick_with_encoded_non_ascii(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="'This%20is%E2broken";
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
"'This is\ufffdbroken")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_in_value_with_charset_and_lang
def test_rfc2231_tick_attack_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_in_non_encoded_value
def test_rfc2231_tick_attack(self):
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertNotIsInstance(param, tuple)
self.assertEqual(param, "us-ascii'en-us'Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quotes_inside_quotes
def test_rfc2231_no_extended_values(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo; name=\"Frank's Document\"
"""
msg = email.message_from_string(m)
eq(msg.get_param('name'), "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_then_unencoded_segments
def test_rfc2231_encoded_then_unencoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'My\";
\tname*1=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
# test_headerregistry.TestContentTypeHeader.rfc2231_unencoded_then_encoded_segments
# test_headerregistry.TestContentTypeHeader.rfc2231_quoted_unencoded_then_encoded_segments
def test_rfc2231_unencoded_then_encoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'My\";
\tname*1*=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
# Tests to ensure that signed parts of an email are completely preserved, as
# required by RFC1847 section 2.1. Note that these are incomplete, because the
# email package does not currently always preserve the body. See issue 1670765.
class TestSigned(TestEmailBase):
def _msg_and_obj(self, filename):
with openfile(filename) as fp:
original = fp.read()
msg = email.message_from_string(original)
return original, msg
def _signed_parts_eq(self, original, result):
# Extract the first mime part of each message
import re
repart = re.compile(r'^--([^\n]+)\n(.*?)\n--\1$', re.S | re.M)
inpart = repart.search(original).group(2)
outpart = repart.search(result).group(2)
self.assertEqual(outpart, inpart)
def test_long_headers_as_string(self):
original, msg = self._msg_and_obj('msg_45.txt')
result = msg.as_string()
self._signed_parts_eq(original, result)
def test_long_headers_as_string_maxheaderlen(self):
original, msg = self._msg_and_obj('msg_45.txt')
result = msg.as_string(maxheaderlen=60)
self._signed_parts_eq(original, result)
def test_long_headers_flatten(self):
original, msg = self._msg_and_obj('msg_45.txt')
fp = StringIO()
Generator(fp).flatten(msg)
result = fp.getvalue()
self._signed_parts_eq(original, result)
if __name__ == '__main__':
unittest.main()
=======
# Copyright (C) 2001-2010 Python Software Foundation
# Contact: email-sig@python.org
# email package unit tests
import re
import time
import base64
import unittest
import textwrap
from io import StringIO, BytesIO
from itertools import chain
from random import choice
import email
import email.policy
from email.charset import Charset
from email.header import Header, decode_header, make_header
from email.parser import Parser, HeaderParser
from email.generator import Generator, DecodedGenerator, BytesGenerator
from email.message import Message
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email import utils
from email import errors
from email import encoders
from email import iterators
from email import base64mime
from email import quoprimime
from test.support import unlink
from test.test_email import openfile, TestEmailBase
# These imports are documented to work, but we are testing them using a
# different path, so we import them here just to make sure they are importable.
from email.parser import FeedParser, BytesFeedParser
NL = '\n'
EMPTYSTRING = ''
SPACE = ' '
# Test various aspects of the Message class's API
class TestMessageAPI(TestEmailBase):
def test_get_all(self):
eq = self.assertEqual
msg = self._msgobj('msg_20.txt')
eq(msg.get_all('cc'), ['ccc@zzz.org', 'ddd@zzz.org', 'eee@zzz.org'])
eq(msg.get_all('xx', 'n/a'), 'n/a')
def test_getset_charset(self):
eq = self.assertEqual
msg = Message()
eq(msg.get_charset(), None)
charset = Charset('iso-8859-1')
msg.set_charset(charset)
eq(msg['mime-version'], '1.0')
eq(msg.get_content_type(), 'text/plain')
eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
eq(msg.get_param('charset'), 'iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
eq(msg.get_charset().input_charset, 'iso-8859-1')
# Remove the charset
msg.set_charset(None)
eq(msg.get_charset(), None)
eq(msg['content-type'], 'text/plain')
# Try adding a charset when there's already MIME headers present
msg = Message()
msg['MIME-Version'] = '2.0'
msg['Content-Type'] = 'text/x-weird'
msg['Content-Transfer-Encoding'] = 'quinted-puntable'
msg.set_charset(charset)
eq(msg['mime-version'], '2.0')
eq(msg['content-type'], 'text/x-weird; charset="iso-8859-1"')
eq(msg['content-transfer-encoding'], 'quinted-puntable')
def test_set_charset_from_string(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_set_payload_with_charset(self):
msg = Message()
charset = Charset('iso-8859-1')
msg.set_payload('This is a string payload', charset)
self.assertEqual(msg.get_charset().input_charset, 'iso-8859-1')
def test_set_payload_with_8bit_data_and_charset(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
msg = Message()
msg.set_payload(data, charset)
self.assertEqual(msg['content-transfer-encoding'], 'base64')
self.assertEqual(msg.get_payload(decode=True), data)
self.assertEqual(msg.get_payload(), '0JDQkdCS\n')
def test_set_payload_with_non_ascii_and_charset_body_encoding_none(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
charset.body_encoding = None # Disable base64 encoding
msg = Message()
msg.set_payload(data.decode('utf-8'), charset)
self.assertEqual(msg['content-transfer-encoding'], '8bit')
self.assertEqual(msg.get_payload(decode=True), data)
def test_set_payload_with_8bit_data_and_charset_body_encoding_none(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
charset.body_encoding = None # Disable base64 encoding
msg = Message()
msg.set_payload(data, charset)
self.assertEqual(msg['content-transfer-encoding'], '8bit')
self.assertEqual(msg.get_payload(decode=True), data)
def test_set_payload_to_list(self):
msg = Message()
msg.set_payload([])
self.assertEqual(msg.get_payload(), [])
def test_attach_when_payload_is_string(self):
msg = Message()
msg['Content-Type'] = 'multipart/mixed'
msg.set_payload('string payload')
sub_msg = MIMEMessage(Message())
self.assertRaisesRegex(TypeError, "[Aa]ttach.*non-multipart",
msg.attach, sub_msg)
def test_get_charsets(self):
eq = self.assertEqual
msg = self._msgobj('msg_08.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', 'iso-8859-2', 'koi8-r'])
msg = self._msgobj('msg_09.txt')
charsets = msg.get_charsets('dingbat')
eq(charsets, ['dingbat', 'us-ascii', 'iso-8859-1', 'dingbat',
'koi8-r'])
msg = self._msgobj('msg_12.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', None, 'iso-8859-2',
'iso-8859-3', 'us-ascii', 'koi8-r'])
def test_get_filename(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
msg = self._msgobj('msg_07.txt')
subpart = msg.get_payload(1)
eq(subpart.get_filename(), 'dingusfish.gif')
def test_get_filename_with_name_parameter(self):
eq = self.assertEqual
msg = self._msgobj('msg_44.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
def test_get_boundary(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
# No quotes!
eq(msg.get_boundary(), 'BOUNDARY')
def test_set_boundary(self):
eq = self.assertEqual
# This one has no existing boundary parameter, but the Content-Type:
# header appears fifth.
msg = self._msgobj('msg_01.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
# This one has a Content-Type: header, with a boundary, stuck in the
# middle of its headers. Make sure the order is preserved; it should
# be fifth.
msg = self._msgobj('msg_04.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'multipart/mixed; boundary="BOUNDARY"')
# And this one has no Content-Type: header at all.
msg = self._msgobj('msg_03.txt')
self.assertRaises(errors.HeaderParseError,
msg.set_boundary, 'BOUNDARY')
def test_make_boundary(self):
msg = MIMEMultipart('form-data')
# Note that when the boundary gets created is an implementation
# detail and might change.
self.assertEqual(msg.items()[0][1], 'multipart/form-data')
# Trigger creation of boundary
msg.as_string()
self.assertEqual(msg.items()[0][1][:33],
'multipart/form-data; boundary="==')
# XXX: there ought to be tests of the uniqueness of the boundary, too.
def test_message_rfc822_only(self):
# Issue 7970: message/rfc822 not in multipart parsed by
# HeaderParser caused an exception when flattened.
with openfile('msg_46.txt') as fp:
msgdata = fp.read()
parser = HeaderParser()
msg = parser.parsestr(msgdata)
out = StringIO()
gen = Generator(out, True, 0)
gen.flatten(msg, False)
self.assertEqual(out.getvalue(), msgdata)
def test_byte_message_rfc822_only(self):
# Make sure new bytes header parser also passes this.
with openfile('msg_46.txt') as fp:
msgdata = fp.read().encode('ascii')
parser = email.parser.BytesHeaderParser()
msg = parser.parsebytes(msgdata)
out = BytesIO()
gen = email.generator.BytesGenerator(out)
gen.flatten(msg)
self.assertEqual(out.getvalue(), msgdata)
def test_get_decoded_payload(self):
eq = self.assertEqual
msg = self._msgobj('msg_10.txt')
# The outer message is a multipart
eq(msg.get_payload(decode=True), None)
# Subpart 1 is 7bit encoded
eq(msg.get_payload(0).get_payload(decode=True),
b'This is a 7bit encoded message.\n')
# Subpart 2 is quopri
eq(msg.get_payload(1).get_payload(decode=True),
b'\xa1This is a Quoted Printable encoded message!\n')
# Subpart 3 is base64
eq(msg.get_payload(2).get_payload(decode=True),
b'This is a Base64 encoded message.')
# Subpart 4 is base64 with a trailing newline, which
# used to be stripped (issue 7143).
eq(msg.get_payload(3).get_payload(decode=True),
b'This is a Base64 encoded message.\n')
# Subpart 5 has no Content-Transfer-Encoding: header.
eq(msg.get_payload(4).get_payload(decode=True),
b'This has no Content-Transfer-Encoding: header.\n')
def test_get_decoded_uu_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_payload('begin 666 -\n+:&5L;&\\@=V]R;&0 \n \nend\n')
for cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg['content-transfer-encoding'] = cte
eq(msg.get_payload(decode=True), b'hello world')
# Now try some bogus data
msg.set_payload('foo')
eq(msg.get_payload(decode=True), b'foo')
def test_get_payload_n_raises_on_non_multipart(self):
msg = Message()
self.assertRaises(TypeError, msg.get_payload, 1)
def test_decoded_generator(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
with openfile('msg_17.txt') as fp:
text = fp.read()
s = StringIO()
g = DecodedGenerator(s)
g.flatten(msg)
eq(s.getvalue(), text)
def test__contains__(self):
msg = Message()
msg['From'] = 'Me'
msg['to'] = 'You'
# Check for case insensitivity
self.assertIn('from', msg)
self.assertIn('From', msg)
self.assertIn('FROM', msg)
self.assertIn('to', msg)
self.assertIn('To', msg)
self.assertIn('TO', msg)
def test_as_string(self):
msg = self._msgobj('msg_01.txt')
with openfile('msg_01.txt') as fp:
text = fp.read()
self.assertEqual(text, str(msg))
fullrepr = msg.as_string(unixfrom=True)
lines = fullrepr.split('\n')
self.assertTrue(lines[0].startswith('From '))
self.assertEqual(text, NL.join(lines[1:]))
def test_as_string_policy(self):
msg = self._msgobj('msg_01.txt')
newpolicy = msg.policy.clone(linesep='\r\n')
fullrepr = msg.as_string(policy=newpolicy)
s = StringIO()
g = Generator(s, policy=newpolicy)
g.flatten(msg)
self.assertEqual(fullrepr, s.getvalue())
def test_as_bytes(self):
msg = self._msgobj('msg_01.txt')
with openfile('msg_01.txt') as fp:
data = fp.read().encode('ascii')
self.assertEqual(data, bytes(msg))
fullrepr = msg.as_bytes(unixfrom=True)
lines = fullrepr.split(b'\n')
self.assertTrue(lines[0].startswith(b'From '))
self.assertEqual(data, b'\n'.join(lines[1:]))
def test_as_bytes_policy(self):
msg = self._msgobj('msg_01.txt')
newpolicy = msg.policy.clone(linesep='\r\n')
fullrepr = msg.as_bytes(policy=newpolicy)
s = BytesIO()
g = BytesGenerator(s,policy=newpolicy)
g.flatten(msg)
self.assertEqual(fullrepr, s.getvalue())
# test_headerregistry.TestContentTypeHeader.bad_params
def test_bad_param(self):
msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
self.assertEqual(msg.get_param('baz'), '')
def test_missing_filename(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_filename(), None)
def test_bogus_filename(self):
msg = email.message_from_string(
"Content-Disposition: blarg; filename\n")
self.assertEqual(msg.get_filename(), '')
def test_missing_boundary(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_boundary(), None)
def test_get_params(self):
eq = self.assertEqual
msg = email.message_from_string(
'X-Header: foo=one; bar=two; baz=three\n')
eq(msg.get_params(header='x-header'),
[('foo', 'one'), ('bar', 'two'), ('baz', 'three')])
msg = email.message_from_string(
'X-Header: foo; bar=one; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
eq(msg.get_params(), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
# test_headerregistry.TestContentTypeHeader.spaces_around_param_equals
def test_get_param_liberal(self):
msg = Message()
msg['Content-Type'] = 'Content-Type: Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"'
self.assertEqual(msg.get_param('boundary'), 'CPIMSSMTPC06p5f3tG')
def test_get_param(self):
eq = self.assertEqual
msg = email.message_from_string(
"X-Header: foo=one; bar=two; baz=three\n")
eq(msg.get_param('bar', header='x-header'), 'two')
eq(msg.get_param('quuz', header='x-header'), None)
eq(msg.get_param('quuz'), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_param('foo', header='x-header'), '')
eq(msg.get_param('bar', header='x-header'), 'one')
eq(msg.get_param('baz', header='x-header'), 'two')
# XXX: We are not RFC-2045 compliant! We cannot parse:
# msg["Content-Type"] = 'text/plain; weird="hey; dolly? [you] @ <\\"home\\">?"'
# msg.get_param("weird")
# yet.
# test_headerregistry.TestContentTypeHeader.spaces_around_semis
def test_get_param_funky_continuation_lines(self):
msg = self._msgobj('msg_22.txt')
self.assertEqual(msg.get_payload(1).get_param('name'), 'wibble.JPG')
# test_headerregistry.TestContentTypeHeader.semis_inside_quotes
def test_get_param_with_semis_in_quotes(self):
msg = email.message_from_string(
'Content-Type: image/pjpeg; name="Jim&&Jill"\n')
self.assertEqual(msg.get_param('name'), 'Jim&&Jill')
self.assertEqual(msg.get_param('name', unquote=False),
'"Jim&&Jill"')
# test_headerregistry.TestContentTypeHeader.quotes_inside_rfc2231_value
def test_get_param_with_quotes(self):
msg = email.message_from_string(
'Content-Type: foo; bar*0="baz\\"foobar"; bar*1="\\"baz"')
self.assertEqual(msg.get_param('bar'), 'baz"foobar"baz')
msg = email.message_from_string(
"Content-Type: foo; bar*0=\"baz\\\"foobar\"; bar*1=\"\\\"baz\"")
self.assertEqual(msg.get_param('bar'), 'baz"foobar"baz')
def test_field_containment(self):
msg = email.message_from_string('Header: exists')
self.assertIn('header', msg)
self.assertIn('Header', msg)
self.assertIn('HEADER', msg)
self.assertNotIn('headerx', msg)
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('charset', 'iso-2022-jp')
eq(msg.get_param('charset'), 'iso-2022-jp')
msg.set_param('importance', 'high value')
eq(msg.get_param('importance'), 'high value')
eq(msg.get_param('importance', unquote=False), '"high value"')
eq(msg.get_params(), [('text/plain', ''),
('charset', 'iso-2022-jp'),
('importance', 'high value')])
eq(msg.get_params(unquote=False), [('text/plain', ''),
('charset', '"iso-2022-jp"'),
('importance', '"high value"')])
msg.set_param('charset', 'iso-9999-xx', header='X-Jimmy')
eq(msg.get_param('charset', header='X-Jimmy'), 'iso-9999-xx')
def test_del_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_05.txt')
eq(msg.get_params(),
[('multipart/report', ''), ('report-type', 'delivery-status'),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
old_val = msg.get_param("report-type")
msg.del_param("report-type")
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
msg.set_param("report-type", old_val)
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com'),
('report-type', old_val)])
def test_del_param_on_other_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment', filename='bud.gif')
msg.del_param('filename', 'content-disposition')
self.assertEqual(msg['content-disposition'], 'attachment')
def test_del_param_on_nonexistent_header(self):
msg = Message()
# Deleting param on empty msg should not raise exception.
msg.del_param('filename', 'content-disposition')
def test_del_nonexistent_param(self):
msg = Message()
msg.add_header('Content-Type', 'text/plain', charset='utf-8')
existing_header = msg['Content-Type']
msg.del_param('foobar', header='Content-Type')
self.assertEqual(msg['Content-Type'], existing_header)
def test_set_type(self):
eq = self.assertEqual
msg = Message()
self.assertRaises(ValueError, msg.set_type, 'text')
msg.set_type('text/plain')
eq(msg['content-type'], 'text/plain')
msg.set_param('charset', 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
msg.set_type('text/html')
eq(msg['content-type'], 'text/html; charset="us-ascii"')
def test_set_type_on_other_header(self):
msg = Message()
msg['X-Content-Type'] = 'text/plain'
msg.set_type('application/octet-stream', 'X-Content-Type')
self.assertEqual(msg['x-content-type'], 'application/octet-stream')
def test_get_content_type_missing(self):
msg = Message()
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_type(), 'message/rfc822')
def test_get_content_type_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_maintype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_maintype(), 'message')
def test_get_content_maintype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_maintype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_replace_header(self):
eq = self.assertEqual
msg = Message()
msg.add_header('First', 'One')
msg.add_header('Second', 'Two')
msg.add_header('Third', 'Three')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Two', 'Three'])
msg.replace_header('Second', 'Twenty')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Twenty', 'Three'])
msg.add_header('First', 'Eleven')
msg.replace_header('First', 'One Hundred')
eq(msg.keys(), ['First', 'Second', 'Third', 'First'])
eq(msg.values(), ['One Hundred', 'Twenty', 'Three', 'Eleven'])
self.assertRaises(KeyError, msg.replace_header, 'Fourth', 'Missing')
# test_defect_handling:test_invalid_chars_in_base64_payload
def test_broken_base64_payload(self):
x = 'AwDp0P7//y6LwKEAcPa/6Q=9'
msg = Message()
msg['content-type'] = 'audio/x-midi'
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True),
(b'\x03\x00\xe9\xd0\xfe\xff\xff.\x8b\xc0'
b'\xa1\x00p\xf6\xbf\xe9\x0f'))
self.assertIsInstance(msg.defects[0],
errors.InvalidBase64CharactersDefect)
def test_broken_unicode_payload(self):
# This test improves coverage but is not a compliance test.
# The behavior in this situation is currently undefined by the API.
x = 'this is a br\xf6ken thing to do'
msg = Message()
msg['content-type'] = 'text/plain'
msg['content-transfer-encoding'] = '8bit'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True),
bytes(x, 'raw-unicode-escape'))
def test_questionable_bytes_payload(self):
# This test improves coverage but is not a compliance test,
# since it involves poking inside the black box.
x = 'this is a quéstionable thing to do'.encode('utf-8')
msg = Message()
msg['content-type'] = 'text/plain; charset="utf-8"'
msg['content-transfer-encoding'] = '8bit'
msg._payload = x
self.assertEqual(msg.get_payload(decode=True), x)
# Issue 1078919
def test_ascii_add_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename='bud.gif')
self.assertEqual('attachment; filename="bud.gif"',
msg['Content-Disposition'])
def test_noascii_add_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="Fußballer.ppt")
self.assertEqual(
'attachment; filename*=utf-8\'\'Fu%C3%9Fballer.ppt',
msg['Content-Disposition'])
def test_nonascii_add_header_via_triple(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename=('iso-8859-1', '', 'Fußballer.ppt'))
self.assertEqual(
'attachment; filename*=iso-8859-1\'\'Fu%DFballer.ppt',
msg['Content-Disposition'])
def test_ascii_add_header_with_tspecial(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="windows [filename].ppt")
self.assertEqual(
'attachment; filename="windows [filename].ppt"',
msg['Content-Disposition'])
def test_nonascii_add_header_with_tspecial(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="Fußballer [filename].ppt")
self.assertEqual(
"attachment; filename*=utf-8''Fu%C3%9Fballer%20%5Bfilename%5D.ppt",
msg['Content-Disposition'])
def test_binary_quopri_payload(self):
for charset in ('latin-1', 'ascii'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = 'quoted-printable'
msg.set_payload(b'foo=e6=96=87bar')
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
'get_payload returns wrong result with charset %s.' % charset)
def test_binary_base64_payload(self):
for charset in ('latin-1', 'ascii'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(b'Zm9v5paHYmFy')
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
'get_payload returns wrong result with charset %s.' % charset)
def test_binary_uuencode_payload(self):
for charset in ('latin-1', 'ascii'):
for encoding in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = encoding
msg.set_payload(b"begin 666 -\n)9F]OYI:'8F%R\n \nend\n")
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
str(('get_payload returns wrong result ',
'with charset {0} and encoding {1}.')).\
format(charset, encoding))
def test_add_header_with_name_only_param(self):
msg = Message()
msg.add_header('Content-Disposition', 'inline', foo_bar=None)
self.assertEqual("inline; foo-bar", msg['Content-Disposition'])
def test_add_header_with_no_value(self):
msg = Message()
msg.add_header('X-Status', None)
self.assertEqual('', msg['X-Status'])
# Issue 5871: reject an attempt to embed a header inside a header value
# (header injection attack).
def test_embeded_header_via_Header_rejected(self):
msg = Message()
msg['Dummy'] = Header('dummy\nX-Injected-Header: test')
self.assertRaises(errors.HeaderParseError, msg.as_string)
def test_embeded_header_via_string_rejected(self):
msg = Message()
msg['Dummy'] = 'dummy\nX-Injected-Header: test'
self.assertRaises(errors.HeaderParseError, msg.as_string)
def test_unicode_header_defaults_to_utf8_encoding(self):
# Issue 14291
m = MIMEText('abc\n')
m['Subject'] = 'É test'
self.assertEqual(str(m),textwrap.dedent("""\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: =?utf-8?q?=C3=89_test?=
abc
"""))
def test_unicode_body_defaults_to_utf8_encoding(self):
# Issue 14291
m = MIMEText('É testabc\n')
self.assertEqual(str(m),textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: base64
w4kgdGVzdGFiYwo=
"""))
# Test the email.encoders module
class TestEncoders(unittest.TestCase):
def test_EncodersEncode_base64(self):
with openfile('PyBanner048.gif', 'rb') as fp:
bindata = fp.read()
mimed = email.mime.image.MIMEImage(bindata)
base64ed = mimed.get_payload()
# the transfer-encoded body lines should all be <=76 characters
lines = base64ed.split('\n')
self.assertLessEqual(max([ len(x) for x in lines ]), 76)
def test_encode_empty_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
# 7bit data and the default us-ascii _charset
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
# Similar, but with 8bit data
msg = MIMEText('hello \xf8 world')
eq(msg['content-transfer-encoding'], 'base64')
# And now with a different charset
msg = MIMEText('hello \xf8 world', _charset='iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
def test_encode7or8bit(self):
# Make sure a charset whose input character set is 8bit but
# whose output character set is 7bit gets a transfer-encoding
# of 7bit.
eq = self.assertEqual
msg = MIMEText('文\n', _charset='euc-jp')
eq(msg['content-transfer-encoding'], '7bit')
eq(msg.as_string(), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/plain; charset="iso-2022-jp"
Content-Transfer-Encoding: 7bit
\x1b$BJ8\x1b(B
"""))
def test_qp_encode_latin1(self):
msg = MIMEText('\xe1\xf6\n', 'text', 'ISO-8859-1')
self.assertEqual(str(msg), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/text; charset="iso-8859-1"
Content-Transfer-Encoding: quoted-printable
=E1=F6
"""))
def test_qp_encode_non_latin1(self):
# Issue 16948
msg = MIMEText('\u017c\n', 'text', 'ISO-8859-2')
self.assertEqual(str(msg), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/text; charset="iso-8859-2"
Content-Transfer-Encoding: quoted-printable
=BF
"""))
# Test long header wrapping
class TestLongHeaders(TestEmailBase):
maxDiff = None
def test_split_long_continuation(self):
eq = self.ndiffAssertEqual
msg = email.message_from_string("""\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
def test_another_long_almost_unsplittable_header(self):
eq = self.ndiffAssertEqual
hstr = """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text""")
h = Header(hstr.replace('\t', ' '))
eq(h.encode(), """\
bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text""")
def test_long_nonstring(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = (b'Die Mieter treten hier ein werden mit einem Foerderband '
b'komfortabel den Korridor entlang, an s\xfcdl\xfcndischen '
b'Wandgem\xe4lden vorbei, gegen die rotierenden Klingen '
b'bef\xf6rdert. ')
cz_head = (b'Finan\xe8ni metropole se hroutily pod tlakem jejich '
b'd\xf9vtipu.. ')
utf8_head = ('\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das '
'Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder '
'die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066'
'\u3044\u307e\u3059\u3002')
h = Header(g_head, g, header_name='Subject')
h.append(cz_head, cz)
h.append(utf8_head, utf8)
msg = Message()
msg['Subject'] = h
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: =?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderb?=
=?iso-8859-1?q?and_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen?=
=?iso-8859-1?q?_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef?=
=?iso-8859-1?q?=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hrouti?=
=?iso-8859-2?q?ly_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC5LiA?=
=?utf-8?b?6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn44Gf44KJ?=
=?utf-8?b?44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFzIE51bnN0dWNr?=
=?utf-8?b?IGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWloZXJodW5kIGRhcyBPZGVyIGRp?=
=?utf-8?b?ZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI3jgajoqIDjgaPjgabjgYTjgb7jgZk=?=
=?utf-8?b?44CC?=
""")
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerde?=
=?iso-8859-1?q?rband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndis?=
=?iso-8859-1?q?chen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klinge?=
=?iso-8859-1?q?n_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se?=
=?iso-8859-2?q?_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb?=
=?utf-8?b?44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go?=
=?utf-8?b?44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBp?=
=?utf-8?b?c3QgZGFzIE51bnN0dWNrIGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWlo?=
=?utf-8?b?ZXJodW5kIGRhcyBPZGVyIGRpZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI0=?=
=?utf-8?b?44Go6KiA44Gj44Gm44GE44G+44GZ44CC?=""")
def test_long_header_encode(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation_is_just_a_hint(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals";\t'
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_header_encode_with_different_output_charset(self):
h = Header('文', 'euc-jp')
self.assertEqual(h.encode(), "=?iso-2022-jp?b?GyRCSjgbKEI=?=")
def test_long_header_encode_with_different_output_charset(self):
h = Header(b'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5\xa4\xec\xa4'
b'\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2\xf1\xbc\xd4'
b'\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3\xa4\xc6\xa4'
b'\xa4\xa4\xde\xa4\xb9'.decode('euc-jp'), 'euc-jp')
res = """\
=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKMnE8VCROPjUbKEI=?=
=?iso-2022-jp?b?GyRCRyckckJUJEMkRiQkJF4kORsoQg==?="""
self.assertEqual(h.encode(), res)
def test_header_splitter(self):
eq = self.ndiffAssertEqual
msg = MIMEText('')
# It'd be great if we could use add_header() here, but that doesn't
# guarantee an order of the parameters.
msg['X-Foobar-Spoink-Defrobnit'] = (
'wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), '''\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
''')
def test_no_semis_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join('<%d@dom.ain>' % i for i in range(10))
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <0@dom.ain> <1@dom.ain> <2@dom.ain> <3@dom.ain> <4@dom.ain>
<5@dom.ain> <6@dom.ain> <7@dom.ain> <8@dom.ain> <9@dom.ain>
Test""")
def test_last_split_chunk_does_not_fit(self):
eq = self.ndiffAssertEqual
h = Header('Subject: the first part of this is short, but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
Subject: the first part of this is short,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_splittable_leading_char_followed_by_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header(', but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_multiple_splittable_leading_char_followed_by_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header(', , but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
, ,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_trailing_splitable_on_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header('this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself;')
eq(h.encode(), "this_part_does_not_fit_within_maxlinelen_and_thus_should_"
"be_on_a_line_all_by_itself;")
def test_trailing_splitable_on_overlong_unsplitable_with_leading_splitable(self):
eq = self.ndiffAssertEqual
h = Header('; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_long_header_with_multiple_sequential_split_chars(self):
eq = self.ndiffAssertEqual
h = Header('This is a long line that has two whitespaces in a row. '
'This used to cause truncation of the header when folded')
eq(h.encode(), """\
This is a long line that has two whitespaces in a row. This used to cause
truncation of the header when folded""")
def test_splitter_split_on_punctuation_only_if_fws_with_header(self):
eq = self.ndiffAssertEqual
h = Header('thisverylongheaderhas;semicolons;and,commas,but'
'they;arenotlegal;fold,points')
eq(h.encode(), "thisverylongheaderhas;semicolons;and,commas,butthey;"
"arenotlegal;fold,points")
def test_leading_splittable_in_the_middle_just_before_overlong_last_part(self):
eq = self.ndiffAssertEqual
h = Header('this is a test where we need to have more than one line '
'before; our final line that is just too big to fit;; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself;')
eq(h.encode(), """\
this is a test where we need to have more than one line before;
our final line that is just too big to fit;;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself;""")
def test_overlong_last_part_followed_by_split_point(self):
eq = self.ndiffAssertEqual
h = Header('this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself ')
eq(h.encode(), "this_part_does_not_fit_within_maxlinelen_and_thus_"
"should_be_on_a_line_all_by_itself ")
def test_multiline_with_overlong_parts_separated_by_two_split_points(self):
eq = self.ndiffAssertEqual
h = Header('this_is_a__test_where_we_need_to_have_more_than_one_line_'
'before_our_final_line_; ; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
this_is_a__test_where_we_need_to_have_more_than_one_line_before_our_final_line_;
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_multiline_with_overlong_last_part_followed_by_split_point(self):
eq = self.ndiffAssertEqual
h = Header('this is a test where we need to have more than one line '
'before our final line; ; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
this is a test where we need to have more than one line before our final line;
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_long_header_with_whitespace_runs(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<foo@dom.ain> '] * 10)
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain>\x20\x20
Test""")
def test_long_run_with_semi_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<foo@dom.ain>'] * 10) + '; abc'
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain>; abc
Test""")
def test_splitter_split_on_punctuation_only_if_fws(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = ('thisverylongheaderhas;semicolons;and,commas,but'
'they;arenotlegal;fold,points')
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
# XXX the space after the header should not be there.
eq(sfp.getvalue(), """\
From: test@dom.ain
References:\x20
thisverylongheaderhas;semicolons;and,commas,butthey;arenotlegal;fold,points
Test""")
def test_no_split_long_header(self):
eq = self.ndiffAssertEqual
hstr = 'References: ' + 'x' * 80
h = Header(hstr)
# These come on two lines because Headers are really field value
# classes and don't really know about their field names.
eq(h.encode(), """\
References:
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""")
h = Header('x' * 80)
eq(h.encode(), 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
def test_splitting_multiple_long_lines(self):
eq = self.ndiffAssertEqual
hstr = """\
from babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
from babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)""")
def test_splitting_first_line_only_is_long(self):
eq = self.ndiffAssertEqual
hstr = """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93] helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400"""
h = Header(hstr, maxlinelen=78, header_name='Received',
continuation_ws='\t')
eq(h.encode(), """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93]
helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400""")
def test_long_8bit_header(self):
eq = self.ndiffAssertEqual
msg = Message()
h = Header('Britische Regierung gibt', 'iso-8859-1',
header_name='Subject')
h.append('gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte')
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offs?=
=?iso-8859-1?q?hore-Windkraftprojekte?=""")
msg['Subject'] = h
eq(msg.as_string(maxheaderlen=76), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offs?=
=?iso-8859-1?q?hore-Windkraftprojekte?=
""")
eq(msg.as_string(maxheaderlen=0), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offshore-Windkraftprojekte?=
""")
def test_long_8bit_header_no_charset(self):
eq = self.ndiffAssertEqual
msg = Message()
header_string = ('Britische Regierung gibt gr\xfcnes Licht '
'f\xfcr Offshore-Windkraftprojekte '
'<a-very-long-address@example.com>')
msg['Reply-To'] = header_string
eq(msg.as_string(maxheaderlen=78), """\
Reply-To: =?utf-8?q?Britische_Regierung_gibt_gr=C3=BCnes_Licht_f=C3=BCr_Offs?=
=?utf-8?q?hore-Windkraftprojekte_=3Ca-very-long-address=40example=2Ecom=3E?=
""")
msg = Message()
msg['Reply-To'] = Header(header_string,
header_name='Reply-To')
eq(msg.as_string(maxheaderlen=78), """\
Reply-To: =?utf-8?q?Britische_Regierung_gibt_gr=C3=BCnes_Licht_f=C3=BCr_Offs?=
=?utf-8?q?hore-Windkraftprojekte_=3Ca-very-long-address=40example=2Ecom=3E?=
""")
def test_long_to_header(self):
eq = self.ndiffAssertEqual
to = ('"Someone Test #A" <someone@eecs.umich.edu>,'
'<someone@eecs.umich.edu>, '
'"Someone Test #B" <someone@umich.edu>, '
'"Someone Test #C" <someone@eecs.umich.edu>, '
'"Someone Test #D" <someone@eecs.umich.edu>')
msg = Message()
msg['To'] = to
eq(msg.as_string(maxheaderlen=78), '''\
To: "Someone Test #A" <someone@eecs.umich.edu>,<someone@eecs.umich.edu>,
"Someone Test #B" <someone@umich.edu>,
"Someone Test #C" <someone@eecs.umich.edu>,
"Someone Test #D" <someone@eecs.umich.edu>
''')
def test_long_line_after_append(self):
eq = self.ndiffAssertEqual
s = 'This is an example of string which has almost the limit of header length.'
h = Header(s)
h.append('Add another line.')
eq(h.encode(maxlinelen=76), """\
This is an example of string which has almost the limit of header length.
Add another line.""")
def test_shorter_line_with_append(self):
eq = self.ndiffAssertEqual
s = 'This is a shorter line.'
h = Header(s)
h.append('Add another sentence. (Surprise?)')
eq(h.encode(),
'This is a shorter line. Add another sentence. (Surprise?)')
def test_long_field_name(self):
eq = self.ndiffAssertEqual
fn = 'X-Very-Very-Very-Long-Header-Name'
gs = ('Die Mieter treten hier ein werden mit einem Foerderband '
'komfortabel den Korridor entlang, an s\xfcdl\xfcndischen '
'Wandgem\xe4lden vorbei, gegen die rotierenden Klingen '
'bef\xf6rdert. ')
h = Header(gs, 'iso-8859-1', header_name=fn)
# BAW: this seems broken because the first line is too long
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_e?=
=?iso-8859-1?q?in_werden_mit_einem_Foerderband_komfortabel_den_Korridor_e?=
=?iso-8859-1?q?ntlang=2C_an_s=FCdl=FCndischen_Wandgem=E4lden_vorbei=2C_ge?=
=?iso-8859-1?q?gen_die_rotierenden_Klingen_bef=F6rdert=2E_?=""")
def test_long_received_header(self):
h = ('from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) '
'by hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP; '
'Wed, 05 Mar 2003 18:10:18 -0700')
msg = Message()
msg['Received-1'] = Header(h, continuation_ws='\t')
msg['Received-2'] = h
# This should be splitting on spaces not semicolons.
self.ndiffAssertEqual(msg.as_string(maxheaderlen=78), """\
Received-1: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
Received-2: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
""")
def test_string_headerinst_eq(self):
h = ('<15975.17901.207240.414604@sgigritzmann1.mathematik.'
'tu-muenchen.de> (David Bremner\'s message of '
'"Thu, 6 Mar 2003 13:58:21 +0100")')
msg = Message()
msg['Received-1'] = Header(h, header_name='Received-1',
continuation_ws='\t')
msg['Received-2'] = h
# XXX The space after the ':' should not be there.
self.ndiffAssertEqual(msg.as_string(maxheaderlen=78), """\
Received-1:\x20
<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David
Bremner's message of \"Thu, 6 Mar 2003 13:58:21 +0100\")
Received-2:\x20
<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David
Bremner's message of \"Thu, 6 Mar 2003 13:58:21 +0100\")
""")
def test_long_unbreakable_lines_with_continuation(self):
eq = self.ndiffAssertEqual
msg = Message()
t = """\
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp"""
msg['Face-1'] = t
msg['Face-2'] = Header(t, header_name='Face-2')
msg['Face-3'] = ' ' + t
# XXX This splitting is all wrong. It the first value line should be
# snug against the field name or the space after the header not there.
eq(msg.as_string(maxheaderlen=78), """\
Face-1:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-2:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-3:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
""")
def test_another_long_multiline_header(self):
eq = self.ndiffAssertEqual
m = ('Received: from siimage.com '
'([172.25.1.3]) by zima.siliconimage.com with '
'Microsoft SMTPSVC(5.0.2195.4905); '
'Wed, 16 Oct 2002 07:41:11 -0700')
msg = email.message_from_string(m)
eq(msg.as_string(maxheaderlen=78), '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with
Microsoft SMTPSVC(5.0.2195.4905); Wed, 16 Oct 2002 07:41:11 -0700
''')
def test_long_lines_with_different_header(self):
eq = self.ndiffAssertEqual
h = ('List-Unsubscribe: '
'<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,'
' <mailto:spamassassin-talk-request@lists.sourceforge.net'
'?subject=unsubscribe>')
msg = Message()
msg['List'] = h
msg['List'] = Header(h, header_name='List')
eq(msg.as_string(maxheaderlen=78), """\
List: List-Unsubscribe:
<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
List: List-Unsubscribe:
<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
""")
def test_long_rfc2047_header_with_embedded_fws(self):
h = Header(textwrap.dedent("""\
We're going to pretend this header is in a non-ascii character set
\tto see if line wrapping with encoded words and embedded
folding white space works"""),
charset='utf-8',
header_name='Test')
self.assertEqual(h.encode()+'\n', textwrap.dedent("""\
=?utf-8?q?We=27re_going_to_pretend_this_header_is_in_a_non-ascii_chara?=
=?utf-8?q?cter_set?=
=?utf-8?q?_to_see_if_line_wrapping_with_encoded_words_and_embedded?=
=?utf-8?q?_folding_white_space_works?=""")+'\n')
# Test mangling of "From " lines in the body of a message
class TestFromMangling(unittest.TestCase):
def setUp(self):
self.msg = Message()
self.msg['From'] = 'aaa@bbb.org'
self.msg.set_payload("""\
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangled_from(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
>From the desk of A.A.A.:
Blah blah blah
""")
def test_dont_mangle_from(self):
s = StringIO()
g = Generator(s, mangle_from_=False)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangle_from_in_preamble_and_epilog(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
msg = email.message_from_string(textwrap.dedent("""\
From: foo@bar.com
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=XXX
From somewhere unknown
--XXX
Content-Type: text/plain
foo
--XXX--
From somewhere unknowable
"""))
g.flatten(msg)
self.assertEqual(len([1 for x in s.getvalue().split('\n')
if x.startswith('>From ')]), 2)
def test_mangled_from_with_bad_bytes(self):
source = textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
From: aaa@bbb.org
""").encode('utf-8')
msg = email.message_from_bytes(source + b'From R\xc3\xb6lli\n')
b = BytesIO()
g = BytesGenerator(b, mangle_from_=True)
g.flatten(msg)
self.assertEqual(b.getvalue(), source + b'>From R\xc3\xb6lli\n')
# Test the basic MIMEAudio class
class TestMIMEAudio(unittest.TestCase):
def setUp(self):
with openfile('audiotest.au', 'rb') as fp:
self._audiodata = fp.read()
self._au = MIMEAudio(self._audiodata)
def test_guess_minor_type(self):
self.assertEqual(self._au.get_content_type(), 'audio/basic')
def test_encoding(self):
payload = self._au.get_payload()
self.assertEqual(base64.decodebytes(bytes(payload, 'ascii')),
self._audiodata)
def test_checkSetMinor(self):
au = MIMEAudio(self._audiodata, 'fish')
self.assertEqual(au.get_content_type(), 'audio/fish')
def test_add_header(self):
eq = self.assertEqual
self._au.add_header('Content-Disposition', 'attachment',
filename='audiotest.au')
eq(self._au['content-disposition'],
'attachment; filename="audiotest.au"')
eq(self._au.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'audiotest.au')])
eq(self._au.get_param('filename', header='content-disposition'),
'audiotest.au')
missing = []
eq(self._au.get_param('attachment', header='content-disposition'), '')
self.assertIs(self._au.get_param('foo', failobj=missing,
header='content-disposition'), missing)
# Try some missing stuff
self.assertIs(self._au.get_param('foobar', missing), missing)
self.assertIs(self._au.get_param('attachment', missing,
header='foobar'), missing)
# Test the basic MIMEImage class
class TestMIMEImage(unittest.TestCase):
def setUp(self):
with openfile('PyBanner048.gif', 'rb') as fp:
self._imgdata = fp.read()
self._im = MIMEImage(self._imgdata)
def test_guess_minor_type(self):
self.assertEqual(self._im.get_content_type(), 'image/gif')
def test_encoding(self):
payload = self._im.get_payload()
self.assertEqual(base64.decodebytes(bytes(payload, 'ascii')),
self._imgdata)
def test_checkSetMinor(self):
im = MIMEImage(self._imgdata, 'fish')
self.assertEqual(im.get_content_type(), 'image/fish')
def test_add_header(self):
eq = self.assertEqual
self._im.add_header('Content-Disposition', 'attachment',
filename='dingusfish.gif')
eq(self._im['content-disposition'],
'attachment; filename="dingusfish.gif"')
eq(self._im.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'dingusfish.gif')])
eq(self._im.get_param('filename', header='content-disposition'),
'dingusfish.gif')
missing = []
eq(self._im.get_param('attachment', header='content-disposition'), '')
self.assertIs(self._im.get_param('foo', failobj=missing,
header='content-disposition'), missing)
# Try some missing stuff
self.assertIs(self._im.get_param('foobar', missing), missing)
self.assertIs(self._im.get_param('attachment', missing,
header='foobar'), missing)
# Test the basic MIMEApplication class
class TestMIMEApplication(unittest.TestCase):
def test_headers(self):
eq = self.assertEqual
msg = MIMEApplication(b'\xfa\xfb\xfc\xfd\xfe\xff')
eq(msg.get_content_type(), 'application/octet-stream')
eq(msg['content-transfer-encoding'], 'base64')
def test_body(self):
eq = self.assertEqual
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata)
# whitespace in the cte encoded block is RFC-irrelevant.
eq(msg.get_payload().strip(), '+vv8/f7/')
eq(msg.get_payload(decode=True), bytesdata)
def test_binary_body_with_encode_7or8bit(self):
# Issue 17171.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_7or8bit)
# Treated as a string, this will be invalid code points.
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg.get_payload(decode=True), bytesdata)
self.assertEqual(msg['Content-Transfer-Encoding'], '8bit')
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
self.assertEqual(msg2['Content-Transfer-Encoding'], '8bit')
def test_binary_body_with_encode_noop(self):
# Issue 16564: This does not produce an RFC valid message, since to be
# valid it should have a CTE of binary. But the below works in
# Python2, and is documented as working this way.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_noop)
# Treated as a string, this will be invalid code points.
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg.get_payload(decode=True), bytesdata)
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
def test_binary_body_with_encode_quopri(self):
# Issue 14360.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff '
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_quopri)
self.assertEqual(msg.get_payload(), '=FA=FB=FC=FD=FE=FF=20')
self.assertEqual(msg.get_payload(decode=True), bytesdata)
self.assertEqual(msg['Content-Transfer-Encoding'], 'quoted-printable')
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '=FA=FB=FC=FD=FE=FF=20')
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
self.assertEqual(msg2['Content-Transfer-Encoding'], 'quoted-printable')
def test_binary_body_with_encode_base64(self):
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_base64)
self.assertEqual(msg.get_payload(), '+vv8/f7/\n')
self.assertEqual(msg.get_payload(decode=True), bytesdata)
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '+vv8/f7/\n')
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
# Test the basic MIMEText class
class TestMIMEText(unittest.TestCase):
def setUp(self):
self._msg = MIMEText('hello there')
def test_types(self):
eq = self.assertEqual
eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
self.assertIs(self._msg.get_param('foobar', missing), missing)
self.assertIs(self._msg.get_param('charset', missing, header='foobar'),
missing)
def test_payload(self):
self.assertEqual(self._msg.get_payload(), 'hello there')
self.assertFalse(self._msg.is_multipart())
def test_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_7bit_input(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_7bit_input_no_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there')
eq(msg.get_charset(), 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
self.assertIn('hello there', msg.as_string())
def test_utf8_input(self):
teststr = '\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
eq = self.assertEqual
msg = MIMEText(teststr, _charset='utf-8')
eq(msg.get_charset().output_charset, 'utf-8')
eq(msg['content-type'], 'text/plain; charset="utf-8"')
eq(msg.get_payload(decode=True), teststr.encode('utf-8'))
@unittest.skip("can't fix because of backward compat in email5, "
"will fix in email6")
def test_utf8_input_no_charset(self):
teststr = '\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
self.assertRaises(UnicodeEncodeError, MIMEText, teststr)
# Test complicated multipart/* messages
class TestMultipart(TestEmailBase):
def setUp(self):
with openfile('PyBanner048.gif', 'rb') as fp:
data = fp.read()
container = MIMEBase('multipart', 'mixed', boundary='BOUNDARY')
image = MIMEImage(data, name='dingusfish.gif')
image.add_header('content-disposition', 'attachment',
filename='dingusfish.gif')
intro = MIMEText('''\
Hi there,
This is the dingus fish.
''')
container.attach(intro)
container.attach(image)
container['From'] = 'Barry <barry@digicool.com>'
container['To'] = 'Dingus Lovers <cravindogs@cravindogs.com>'
container['Subject'] = 'Here is your dingus fish'
now = 987809702.54848599
timetuple = time.localtime(now)
if timetuple[-1] == 0:
tzsecs = time.timezone
else:
tzsecs = time.altzone
if tzsecs > 0:
sign = '-'
else:
sign = '+'
tzoffset = ' %s%04d' % (sign, tzsecs / 36)
container['Date'] = time.strftime(
'%a, %d %b %Y %H:%M:%S',
time.localtime(now)) + tzoffset
self._msg = container
self._im = image
self._txt = intro
def test_hierarchy(self):
# convenience
eq = self.assertEqual
raises = self.assertRaises
# tests
m = self._msg
self.assertTrue(m.is_multipart())
eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
m1 = m.get_payload(1)
self.assertIs(m0, self._txt)
self.assertIs(m1, self._im)
eq(m.get_payload(), [m0, m1])
self.assertFalse(m0.is_multipart())
self.assertFalse(m1.is_multipart())
def test_empty_multipart_idempotent(self):
text = """\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
"""
msg = Parser().parsestr(text)
self.ndiffAssertEqual(text, msg.as_string())
def test_no_parts_in_a_multipart_with_none_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_no_parts_in_a_multipart_with_empty_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
outer.epilogue = ''
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_one_part_in_a_multipart(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
msg = MIMEText('hello world')
outer.attach(msg)
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_empty_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_none_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_none_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_nl_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = '\n'
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_message_external_body(self):
eq = self.assertEqual
msg = self._msgobj('msg_36.txt')
eq(len(msg.get_payload()), 2)
msg1 = msg.get_payload(1)
eq(msg1.get_content_type(), 'multipart/alternative')
eq(len(msg1.get_payload()), 2)
for subpart in msg1.get_payload():
eq(subpart.get_content_type(), 'message/external-body')
eq(len(subpart.get_payload()), 1)
subsubpart = subpart.get_payload(0)
eq(subsubpart.get_content_type(), 'text/plain')
def test_double_boundary(self):
# msg_37.txt is a multipart that contains two dash-boundary's in a
# row. Our interpretation of RFC 2046 calls for ignoring the second
# and subsequent boundaries.
msg = self._msgobj('msg_37.txt')
self.assertEqual(len(msg.get_payload()), 3)
def test_nested_inner_contains_outer_boundary(self):
eq = self.ndiffAssertEqual
# msg_38.txt has an inner part that contains outer boundaries. My
# interpretation of RFC 2046 (based on sections 5.1 and 5.1.2) say
# these are illegal and should be interpreted as unterminated inner
# parts.
msg = self._msgobj('msg_38.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
text/plain
text/plain
text/plain
text/plain
""")
def test_nested_with_same_boundary(self):
eq = self.ndiffAssertEqual
# msg 39.txt is similarly evil in that it's got inner parts that use
# the same boundary as outer parts. Again, I believe the way this is
# parsed is closest to the spirit of RFC 2046
msg = self._msgobj('msg_39.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
application/octet-stream
application/octet-stream
text/plain
""")
def test_boundary_in_non_multipart(self):
msg = self._msgobj('msg_40.txt')
self.assertEqual(msg.as_string(), '''\
MIME-Version: 1.0
Content-Type: text/html; boundary="--961284236552522269"
----961284236552522269
Content-Type: text/html;
Content-Transfer-Encoding: 7Bit
<html></html>
----961284236552522269--
''')
def test_boundary_with_leading_space(self):
eq = self.assertEqual
msg = email.message_from_string('''\
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=" XXXX"
-- XXXX
Content-Type: text/plain
-- XXXX
Content-Type: text/plain
-- XXXX--
''')
self.assertTrue(msg.is_multipart())
eq(msg.get_boundary(), ' XXXX')
eq(len(msg.get_payload()), 2)
def test_boundary_without_trailing_newline(self):
m = Parser().parsestr("""\
Content-Type: multipart/mixed; boundary="===============0012394164=="
MIME-Version: 1.0
--===============0012394164==
Content-Type: image/file1.jpg
MIME-Version: 1.0
Content-Transfer-Encoding: base64
YXNkZg==
--===============0012394164==--""")
self.assertEqual(m.get_payload(0).get_payload(), 'YXNkZg==')
# Test some badly formatted messages
class TestNonConformant(TestEmailBase):
def test_parse_missing_minor_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_14.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
# test_defect_handling
def test_same_boundary_inner_outer(self):
msg = self._msgobj('msg_15.txt')
# XXX We can probably eventually do better
inner = msg.get_payload(0)
self.assertTrue(hasattr(inner, 'defects'))
self.assertEqual(len(inner.defects), 1)
self.assertIsInstance(inner.defects[0],
errors.StartBoundaryNotFoundDefect)
# test_defect_handling
def test_multipart_no_boundary(self):
msg = self._msgobj('msg_25.txt')
self.assertIsInstance(msg.get_payload(), str)
self.assertEqual(len(msg.defects), 2)
self.assertIsInstance(msg.defects[0],
errors.NoBoundaryInMultipartDefect)
self.assertIsInstance(msg.defects[1],
errors.MultipartInvariantViolationDefect)
multipart_msg = textwrap.dedent("""\
Date: Wed, 14 Nov 2007 12:56:23 GMT
From: foo@bar.invalid
To: foo@bar.invalid
Subject: Content-Transfer-Encoding: base64 and multipart
MIME-Version: 1.0
Content-Type: multipart/mixed;
boundary="===============3344438784458119861=="{}
--===============3344438784458119861==
Content-Type: text/plain
Test message
--===============3344438784458119861==
Content-Type: application/octet-stream
Content-Transfer-Encoding: base64
YWJj
--===============3344438784458119861==--
""")
# test_defect_handling
def test_multipart_invalid_cte(self):
msg = self._str_msg(
self.multipart_msg.format("\nContent-Transfer-Encoding: base64"))
self.assertEqual(len(msg.defects), 1)
self.assertIsInstance(msg.defects[0],
errors.InvalidMultipartContentTransferEncodingDefect)
# test_defect_handling
def test_multipart_no_cte_no_defect(self):
msg = self._str_msg(self.multipart_msg.format(''))
self.assertEqual(len(msg.defects), 0)
# test_defect_handling
def test_multipart_valid_cte_no_defect(self):
for cte in ('7bit', '8bit', 'BINary'):
msg = self._str_msg(
self.multipart_msg.format(
"\nContent-Transfer-Encoding: {}".format(cte)))
self.assertEqual(len(msg.defects), 0)
# test_headerregistry.TestContentTyopeHeader invalid_1 and invalid_2.
def test_invalid_content_type(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = Message()
# RFC 2045, $5.2 says invalid yields text/plain
msg['Content-Type'] = 'text'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Clear the old value and try something /really/ invalid
del msg['content-type']
msg['Content-Type'] = 'foo'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Still, make sure that the message is idempotently generated
s = StringIO()
g = Generator(s)
g.flatten(msg)
neq(s.getvalue(), 'Content-Type: foo\n\n')
def test_no_start_boundary(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_31.txt')
eq(msg.get_payload(), """\
--BOUNDARY
Content-Type: text/plain
message 1
--BOUNDARY
Content-Type: text/plain
message 2
--BOUNDARY--
""")
def test_no_separating_blank_line(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_35.txt')
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: here's something interesting
counter to RFC 2822, there's no separating newline here
""")
# test_defect_handling
def test_lying_multipart(self):
msg = self._msgobj('msg_41.txt')
self.assertTrue(hasattr(msg, 'defects'))
self.assertEqual(len(msg.defects), 2)
self.assertIsInstance(msg.defects[0],
errors.NoBoundaryInMultipartDefect)
self.assertIsInstance(msg.defects[1],
errors.MultipartInvariantViolationDefect)
# test_defect_handling
def test_missing_start_boundary(self):
outer = self._msgobj('msg_42.txt')
# The message structure is:
#
# multipart/mixed
# text/plain
# message/rfc822
# multipart/mixed [*]
#
# [*] This message is missing its start boundary
bad = outer.get_payload(1).get_payload(0)
self.assertEqual(len(bad.defects), 1)
self.assertIsInstance(bad.defects[0],
errors.StartBoundaryNotFoundDefect)
# test_defect_handling
def test_first_line_is_continuation_header(self):
eq = self.assertEqual
m = ' Line 1\nSubject: test\n\nbody'
msg = email.message_from_string(m)
eq(msg.keys(), ['Subject'])
eq(msg.get_payload(), 'body')
eq(len(msg.defects), 1)
self.assertDefectsEqual(msg.defects,
[errors.FirstHeaderLineIsContinuationDefect])
eq(msg.defects[0].line, ' Line 1\n')
# test_defect_handling
def test_missing_header_body_separator(self):
# Our heuristic if we see a line that doesn't look like a header (no
# leading whitespace but no ':') is to assume that the blank line that
# separates the header from the body is missing, and to stop parsing
# headers and start parsing the body.
msg = self._str_msg('Subject: test\nnot a header\nTo: abc\n\nb\n')
self.assertEqual(msg.keys(), ['Subject'])
self.assertEqual(msg.get_payload(), 'not a header\nTo: abc\n\nb\n')
self.assertDefectsEqual(msg.defects,
[errors.MissingHeaderBodySeparatorDefect])
# Test RFC 2047 header encoding and decoding
class TestRFC2047(TestEmailBase):
def test_rfc2047_multiline(self):
eq = self.assertEqual
s = """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz
foo bar =?mac-iceland?q?r=8Aksm=9Arg=8Cs?="""
dh = decode_header(s)
eq(dh, [
(b'Re: ', None),
(b'r\x8aksm\x9arg\x8cs', 'mac-iceland'),
(b' baz foo bar ', None),
(b'r\x8aksm\x9arg\x8cs', 'mac-iceland')])
header = make_header(dh)
eq(str(header),
'Re: r\xe4ksm\xf6rg\xe5s baz foo bar r\xe4ksm\xf6rg\xe5s')
self.ndiffAssertEqual(header.encode(maxlinelen=76), """\
Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar =?mac-iceland?q?r=8Aksm?=
=?mac-iceland?q?=9Arg=8Cs?=""")
def test_whitespace_keeper_unicode(self):
eq = self.assertEqual
s = '=?ISO-8859-1?Q?Andr=E9?= Pirard <pirard@dom.ain>'
dh = decode_header(s)
eq(dh, [(b'Andr\xe9', 'iso-8859-1'),
(b' Pirard <pirard@dom.ain>', None)])
header = str(make_header(dh))
eq(header, 'Andr\xe9 Pirard <pirard@dom.ain>')
def test_whitespace_keeper_unicode_2(self):
eq = self.assertEqual
s = 'The =?iso-8859-1?b?cXVpY2sgYnJvd24gZm94?= jumped over the =?iso-8859-1?b?bGF6eSBkb2c=?='
dh = decode_header(s)
eq(dh, [(b'The ', None), (b'quick brown fox', 'iso-8859-1'),
(b' jumped over the ', None), (b'lazy dog', 'iso-8859-1')])
hu = str(make_header(dh))
eq(hu, 'The quick brown fox jumped over the lazy dog')
def test_rfc2047_missing_whitespace(self):
s = 'Sm=?ISO-8859-1?B?9g==?=rg=?ISO-8859-1?B?5Q==?=sbord'
dh = decode_header(s)
self.assertEqual(dh, [(b'Sm', None), (b'\xf6', 'iso-8859-1'),
(b'rg', None), (b'\xe5', 'iso-8859-1'),
(b'sbord', None)])
def test_rfc2047_with_whitespace(self):
s = 'Sm =?ISO-8859-1?B?9g==?= rg =?ISO-8859-1?B?5Q==?= sbord'
dh = decode_header(s)
self.assertEqual(dh, [(b'Sm ', None), (b'\xf6', 'iso-8859-1'),
(b' rg ', None), (b'\xe5', 'iso-8859-1'),
(b' sbord', None)])
def test_rfc2047_B_bad_padding(self):
s = '=?iso-8859-1?B?%s?='
data = [ # only test complete bytes
('dm==', b'v'), ('dm=', b'v'), ('dm', b'v'),
('dmk=', b'vi'), ('dmk', b'vi')
]
for q, a in data:
dh = decode_header(s % q)
self.assertEqual(dh, [(a, 'iso-8859-1')])
def test_rfc2047_Q_invalid_digits(self):
# issue 10004.
s = '=?iso-8659-1?Q?andr=e9=zz?='
self.assertEqual(decode_header(s),
[(b'andr\xe9=zz', 'iso-8659-1')])
def test_rfc2047_rfc2047_1(self):
# 1st testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_2(self):
# 2nd testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= b)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b' b)', None)])
def test_rfc2047_rfc2047_3(self):
# 3rd testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_4(self):
# 4th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_5a(self):
# 5th testcase at end of rfc2047 newline is \r\n
s = '(=?ISO-8859-1?Q?a?=\r\n =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_5b(self):
# 5th testcase at end of rfc2047 newline is \n
s = '(=?ISO-8859-1?Q?a?=\n =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_6(self):
# 6th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a_b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a b', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_7(self):
# 7th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-2?Q?_b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b' b', 'iso-8859-2'),
(b')', None)])
self.assertEqual(make_header(decode_header(s)).encode(), s.lower())
self.assertEqual(str(make_header(decode_header(s))), '(a b)')
def test_multiline_header(self):
s = '=?windows-1252?q?=22M=FCller_T=22?=\r\n <T.Mueller@xxx.com>'
self.assertEqual(decode_header(s),
[(b'"M\xfcller T"', 'windows-1252'),
(b'<T.Mueller@xxx.com>', None)])
self.assertEqual(make_header(decode_header(s)).encode(),
''.join(s.splitlines()))
self.assertEqual(str(make_header(decode_header(s))),
'"Müller T" <T.Mueller@xxx.com>')
# Test the MIMEMessage class
class TestMIMEMessage(TestEmailBase):
def setUp(self):
with openfile('msg_11.txt') as fp:
self._text = fp.read()
def test_type_error(self):
self.assertRaises(TypeError, MIMEMessage, 'a plain string')
def test_valid_argument(self):
eq = self.assertEqual
subject = 'A sub-message'
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
subpart = payload[0]
self.assertIs(subpart, m)
eq(subpart['subject'], subject)
def test_bad_multipart(self):
msg1 = Message()
msg1['Subject'] = 'subpart 1'
msg2 = Message()
msg2['Subject'] = 'subpart 2'
r = MIMEMessage(msg1)
self.assertRaises(errors.MultipartConversionError, r.attach, msg2)
def test_generate(self):
# First craft the message to be encapsulated
m = Message()
m['Subject'] = 'An enclosed message'
m.set_payload('Here is the body of the message.\n')
r = MIMEMessage(m)
r['Subject'] = 'The enclosing message'
s = StringIO()
g = Generator(s)
g.flatten(r)
self.assertEqual(s.getvalue(), """\
Content-Type: message/rfc822
MIME-Version: 1.0
Subject: The enclosing message
Subject: An enclosed message
Here is the body of the message.
""")
def test_parse_message_rfc822(self):
eq = self.assertEqual
msg = self._msgobj('msg_11.txt')
eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
submsg = payload[0]
self.assertIsInstance(submsg, Message)
eq(submsg['subject'], 'An enclosed message')
eq(submsg.get_payload(), 'Here is the body of the message.\n')
def test_dsn(self):
eq = self.assertEqual
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
eq(msg.get_content_type(), 'multipart/report')
self.assertTrue(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
eq(subpart.get_content_type(), 'text/plain')
eq(subpart.get_payload(), """\
This report relates to a message you sent with the following header fields:
Message-id: <002001c144a6$8752e060$56104586@oxy.edu>
Date: Sun, 23 Sep 2001 20:10:55 -0700
From: "Ian T. Henry" <henryi@oxy.edu>
To: SoCal Raves <scr@socal-raves.org>
Subject: [scr] yeah for Ians!!
Your message cannot be delivered to the following recipients:
Recipient address: jangel1@cougar.noc.ucla.edu
Reason: recipient reached disk quota
""")
# Subpart 2 contains the machine parsable DSN information. It
# consists of two blocks of headers, represented by two nested Message
# objects.
subpart = msg.get_payload(1)
eq(subpart.get_content_type(), 'message/delivery-status')
eq(len(subpart.get_payload()), 2)
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
dsn1 = subpart.get_payload(0)
self.assertIsInstance(dsn1, Message)
eq(dsn1['original-envelope-id'], '0GK500B4HD0888@cougar.noc.ucla.edu')
eq(dsn1.get_param('dns', header='reporting-mta'), '')
# Try a missing one <wink>
eq(dsn1.get_param('nsd', header='reporting-mta'), None)
dsn2 = subpart.get_payload(1)
self.assertIsInstance(dsn2, Message)
eq(dsn2['action'], 'failed')
eq(dsn2.get_params(header='original-recipient'),
[('rfc822', ''), ('jangel1@cougar.noc.ucla.edu', '')])
eq(dsn2.get_param('rfc822', header='final-recipient'), '')
# Subpart 3 is the original message
subpart = msg.get_payload(2)
eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
subsubpart = payload[0]
self.assertIsInstance(subsubpart, Message)
eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<002001c144a6$8752e060$56104586@oxy.edu>')
def test_epilogue(self):
eq = self.ndiffAssertEqual
with openfile('msg_21.txt') as fp:
text = fp.read()
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = 'End of MIME message\n'
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), text)
def test_no_nl_preamble(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = ''
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: Test
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME message
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
One
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Two
--BOUNDARY--
""")
def test_default_type(self):
eq = self.assertEqual
with openfile('msg_30.txt') as fp:
msg = email.message_from_file(fp)
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_with_explicit_container_type(self):
eq = self.assertEqual
with openfile('msg_28.txt') as fp:
msg = email.message_from_file(fp)
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_non_parsed(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# Set up container
container = MIMEMultipart('digest', 'BOUNDARY')
container.epilogue = ''
# Set up subparts
subpart1a = MIMEText('message 1\n')
subpart2a = MIMEText('message 2\n')
subpart1 = MIMEMessage(subpart1a)
subpart2 = MIMEMessage(subpart2a)
container.attach(subpart1)
container.attach(subpart2)
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
del subpart1['content-type']
del subpart1['mime-version']
del subpart2['content-type']
del subpart2['mime-version']
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
def test_mime_attachments_in_constructor(self):
eq = self.assertEqual
text1 = MIMEText('')
text2 = MIMEText('')
msg = MIMEMultipart(_subparts=(text1, text2))
eq(len(msg.get_payload()), 2)
eq(msg.get_payload(0), text1)
eq(msg.get_payload(1), text2)
def test_default_multipart_constructor(self):
msg = MIMEMultipart()
self.assertTrue(msg.is_multipart())
# A general test of parser->model->generator idempotency. IOW, read a message
# in, parse it into a message object tree, then without touching the tree,
# regenerate the plain text. The original text and the transformed text
# should be identical. Note: that we ignore the Unix-From since that may
# contain a changed date.
class TestIdempotent(TestEmailBase):
linesep = '\n'
def _msgobj(self, filename):
with openfile(filename) as fp:
data = fp.read()
msg = email.message_from_string(data)
return msg, data
def _idempotent(self, msg, text, unixfrom=False):
eq = self.ndiffAssertEqual
s = StringIO()
g = Generator(s, maxheaderlen=0)
g.flatten(msg, unixfrom=unixfrom)
eq(text, s.getvalue())
def test_parse_text_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_01.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_params()[1], ('charset', 'us-ascii'))
eq(msg.get_param('charset'), 'us-ascii')
eq(msg.preamble, None)
eq(msg.epilogue, None)
self._idempotent(msg, text)
def test_parse_untyped_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_03.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_params(), None)
eq(msg.get_param('charset'), None)
self._idempotent(msg, text)
def test_simple_multipart(self):
msg, text = self._msgobj('msg_04.txt')
self._idempotent(msg, text)
def test_MIME_digest(self):
msg, text = self._msgobj('msg_02.txt')
self._idempotent(msg, text)
def test_long_header(self):
msg, text = self._msgobj('msg_27.txt')
self._idempotent(msg, text)
def test_MIME_digest_with_part_headers(self):
msg, text = self._msgobj('msg_28.txt')
self._idempotent(msg, text)
def test_mixed_with_image(self):
msg, text = self._msgobj('msg_06.txt')
self._idempotent(msg, text)
def test_multipart_report(self):
msg, text = self._msgobj('msg_05.txt')
self._idempotent(msg, text)
def test_dsn(self):
msg, text = self._msgobj('msg_16.txt')
self._idempotent(msg, text)
def test_preamble_epilogue(self):
msg, text = self._msgobj('msg_21.txt')
self._idempotent(msg, text)
def test_multipart_one_part(self):
msg, text = self._msgobj('msg_23.txt')
self._idempotent(msg, text)
def test_multipart_no_parts(self):
msg, text = self._msgobj('msg_24.txt')
self._idempotent(msg, text)
def test_no_start_boundary(self):
msg, text = self._msgobj('msg_31.txt')
self._idempotent(msg, text)
def test_rfc2231_charset(self):
msg, text = self._msgobj('msg_32.txt')
self._idempotent(msg, text)
def test_more_rfc2231_parameters(self):
msg, text = self._msgobj('msg_33.txt')
self._idempotent(msg, text)
def test_text_plain_in_a_multipart_digest(self):
msg, text = self._msgobj('msg_34.txt')
self._idempotent(msg, text)
def test_nested_multipart_mixeds(self):
msg, text = self._msgobj('msg_12a.txt')
self._idempotent(msg, text)
def test_message_external_body_idempotent(self):
msg, text = self._msgobj('msg_36.txt')
self._idempotent(msg, text)
def test_message_delivery_status(self):
msg, text = self._msgobj('msg_43.txt')
self._idempotent(msg, text, unixfrom=True)
def test_message_signed_idempotent(self):
msg, text = self._msgobj('msg_45.txt')
self._idempotent(msg, text)
def test_content_type(self):
eq = self.assertEqual
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
eq(msg.get_content_type(), 'multipart/report')
# Test the Content-Type: parameters
params = {}
for pk, pv in msg.get_params():
params[pk] = pv
eq(params['report-type'], 'delivery-status')
eq(params['boundary'], 'D1690A7AC1.996856090/mail.example.com')
eq(msg.preamble, 'This is a MIME-encapsulated message.' + self.linesep)
eq(msg.epilogue, self.linesep)
eq(len(msg.get_payload()), 3)
# Make sure the subparts are what we expect
msg1 = msg.get_payload(0)
eq(msg1.get_content_type(), 'text/plain')
eq(msg1.get_payload(), 'Yadda yadda yadda' + self.linesep)
msg2 = msg.get_payload(1)
eq(msg2.get_content_type(), 'text/plain')
eq(msg2.get_payload(), 'Yadda yadda yadda' + self.linesep)
msg3 = msg.get_payload(2)
eq(msg3.get_content_type(), 'message/rfc822')
self.assertIsInstance(msg3, Message)
payload = msg3.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg4 = payload[0]
self.assertIsInstance(msg4, Message)
eq(msg4.get_payload(), 'Yadda yadda yadda' + self.linesep)
def test_parser(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg1 = payload[0]
self.assertIsInstance(msg1, Message)
eq(msg1.get_content_type(), 'text/plain')
self.assertIsInstance(msg1.get_payload(), str)
eq(msg1.get_payload(), self.linesep)
# Test various other bits of the package's functionality
class TestMiscellaneous(TestEmailBase):
def test_message_from_string(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
msg = email.message_from_string(text)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_file(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
fp.seek(0)
msg = email.message_from_file(fp)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_string_with_class(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
# Create a subclass
class MyMessage(Message):
pass
msg = email.message_from_string(text, MyMessage)
self.assertIsInstance(msg, MyMessage)
# Try something more complicated
with openfile('msg_02.txt') as fp:
text = fp.read()
msg = email.message_from_string(text, MyMessage)
for subpart in msg.walk():
self.assertIsInstance(subpart, MyMessage)
def test_message_from_file_with_class(self):
# Create a subclass
class MyMessage(Message):
pass
with openfile('msg_01.txt') as fp:
msg = email.message_from_file(fp, MyMessage)
self.assertIsInstance(msg, MyMessage)
# Try something more complicated
with openfile('msg_02.txt') as fp:
msg = email.message_from_file(fp, MyMessage)
for subpart in msg.walk():
self.assertIsInstance(subpart, MyMessage)
def test_custom_message_does_not_require_arguments(self):
class MyMessage(Message):
def __init__(self):
super().__init__()
msg = self._str_msg("Subject: test\n\ntest", MyMessage)
self.assertIsInstance(msg, MyMessage)
def test__all__(self):
module = __import__('email')
self.assertEqual(sorted(module.__all__), [
'base64mime', 'charset', 'encoders', 'errors', 'feedparser',
'generator', 'header', 'iterators', 'message',
'message_from_binary_file', 'message_from_bytes',
'message_from_file', 'message_from_string', 'mime', 'parser',
'quoprimime', 'utils',
])
def test_formatdate(self):
now = time.time()
self.assertEqual(utils.parsedate(utils.formatdate(now))[:6],
time.gmtime(now)[:6])
def test_formatdate_localtime(self):
now = time.time()
self.assertEqual(
utils.parsedate(utils.formatdate(now, localtime=True))[:6],
time.localtime(now)[:6])
def test_formatdate_usegmt(self):
now = time.time()
self.assertEqual(
utils.formatdate(now, localtime=False),
time.strftime('%a, %d %b %Y %H:%M:%S -0000', time.gmtime(now)))
self.assertEqual(
utils.formatdate(now, localtime=False, usegmt=True),
time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(now)))
# parsedate and parsedate_tz will become deprecated interfaces someday
def test_parsedate_returns_None_for_invalid_strings(self):
self.assertIsNone(utils.parsedate(''))
self.assertIsNone(utils.parsedate_tz(''))
self.assertIsNone(utils.parsedate('0'))
self.assertIsNone(utils.parsedate_tz('0'))
self.assertIsNone(utils.parsedate('A Complete Waste of Time'))
self.assertIsNone(utils.parsedate_tz('A Complete Waste of Time'))
# Not a part of the spec but, but this has historically worked:
self.assertIsNone(utils.parsedate(None))
self.assertIsNone(utils.parsedate_tz(None))
def test_parsedate_compact(self):
# The FWS after the comma is optional
self.assertEqual(utils.parsedate('Wed,3 Apr 2002 14:58:26 +0800'),
utils.parsedate('Wed, 3 Apr 2002 14:58:26 +0800'))
def test_parsedate_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
(2003, 2, 25, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_compact_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_no_space_before_positive_offset(self):
self.assertEqual(utils.parsedate_tz('Wed, 3 Apr 2002 14:58:26+0800'),
(2002, 4, 3, 14, 58, 26, 0, 1, -1, 28800))
def test_parsedate_no_space_before_negative_offset(self):
# Issue 1155362: we already handled '+' for this case.
self.assertEqual(utils.parsedate_tz('Wed, 3 Apr 2002 14:58:26-0800'),
(2002, 4, 3, 14, 58, 26, 0, 1, -1, -28800))
def test_parsedate_accepts_time_with_dots(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13.47.26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
eq(utils.parsedate_tz('5 Feb 2003 13.47 -0800'),
(2003, 2, 5, 13, 47, 0, 0, 1, -1, -28800))
def test_parsedate_acceptable_to_time_functions(self):
eq = self.assertEqual
timetup = utils.parsedate('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup)), 2003)
timetup = utils.parsedate_tz('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup[:9]))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup[:9])), 2003)
def test_mktime_tz(self):
self.assertEqual(utils.mktime_tz((1970, 1, 1, 0, 0, 0,
-1, -1, -1, 0)), 0)
self.assertEqual(utils.mktime_tz((1970, 1, 1, 0, 0, 0,
-1, -1, -1, 1234)), -1234)
def test_parsedate_y2k(self):
"""Test for parsing a date with a two-digit year.
Parsing a date with a two-digit year should return the correct
four-digit year. RFC822 allows two-digit years, but RFC2822 (which
obsoletes RFC822) requires four-digit years.
"""
self.assertEqual(utils.parsedate_tz('25 Feb 03 13:47:26 -0800'),
utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'))
self.assertEqual(utils.parsedate_tz('25 Feb 71 13:47:26 -0800'),
utils.parsedate_tz('25 Feb 1971 13:47:26 -0800'))
def test_parseaddr_empty(self):
self.assertEqual(utils.parseaddr('<>'), ('', ''))
self.assertEqual(utils.formataddr(utils.parseaddr('<>')), '')
def test_noquote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly Person', 'person@dom.ain')),
'A Silly Person <person@dom.ain>')
def test_escape_dump(self):
self.assertEqual(
utils.formataddr(('A (Very) Silly Person', 'person@dom.ain')),
r'"A (Very) Silly Person" <person@dom.ain>')
self.assertEqual(
utils.parseaddr(r'"A \(Very\) Silly Person" <person@dom.ain>'),
('A (Very) Silly Person', 'person@dom.ain'))
a = r'A \(Special\) Person'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_escape_backslashes(self):
self.assertEqual(
utils.formataddr(('Arthur \Backslash\ Foobar', 'person@dom.ain')),
r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>')
a = r'Arthur \Backslash\ Foobar'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_quotes_unicode_names(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
utf8_base64 = "=?utf-8?b?SMOkbnMgV8O8cnN0?= <person@dom.ain>"
latin1_quopri = "=?iso-8859-1?q?H=E4ns_W=FCrst?= <person@dom.ain>"
self.assertEqual(utils.formataddr((name, addr)), utf8_base64)
self.assertEqual(utils.formataddr((name, addr), 'iso-8859-1'),
latin1_quopri)
def test_accepts_any_charset_like_object(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
utf8_base64 = "=?utf-8?b?SMOkbnMgV8O8cnN0?= <person@dom.ain>"
foobar = "FOOBAR"
class CharsetMock:
def header_encode(self, string):
return foobar
mock = CharsetMock()
mock_expected = "%s <%s>" % (foobar, addr)
self.assertEqual(utils.formataddr((name, addr), mock), mock_expected)
self.assertEqual(utils.formataddr((name, addr), Charset('utf-8')),
utf8_base64)
def test_invalid_charset_like_object_raises_error(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
# A object without a header_encode method:
bad_charset = object()
self.assertRaises(AttributeError, utils.formataddr, (name, addr),
bad_charset)
def test_unicode_address_raises_error(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
addr = 'pers\u00f6n@dom.in'
self.assertRaises(UnicodeError, utils.formataddr, (None, addr))
self.assertRaises(UnicodeError, utils.formataddr, ("Name", addr))
def test_name_with_dot(self):
x = 'John X. Doe <jxd@example.com>'
y = '"John X. Doe" <jxd@example.com>'
a, b = ('John X. Doe', 'jxd@example.com')
self.assertEqual(utils.parseaddr(x), (a, b))
self.assertEqual(utils.parseaddr(y), (a, b))
# formataddr() quotes the name if there's a dot in it
self.assertEqual(utils.formataddr((a, b)), y)
def test_parseaddr_preserves_quoted_pairs_in_addresses(self):
# issue 10005. Note that in the third test the second pair of
# backslashes is not actually a quoted pair because it is not inside a
# comment or quoted string: the address being parsed has a quoted
# string containing a quoted backslash, followed by 'example' and two
# backslashes, followed by another quoted string containing a space and
# the word 'example'. parseaddr copies those two backslashes
# literally. Per rfc5322 this is not technically correct since a \ may
# not appear in an address outside of a quoted string. It is probably
# a sensible Postel interpretation, though.
eq = self.assertEqual
eq(utils.parseaddr('""example" example"@example.com'),
('', '""example" example"@example.com'))
eq(utils.parseaddr('"\\"example\\" example"@example.com'),
('', '"\\"example\\" example"@example.com'))
eq(utils.parseaddr('"\\\\"example\\\\" example"@example.com'),
('', '"\\\\"example\\\\" example"@example.com'))
def test_parseaddr_preserves_spaces_in_local_part(self):
# issue 9286. A normal RFC5322 local part should not contain any
# folding white space, but legacy local parts can (they are a sequence
# of atoms, not dotatoms). On the other hand we strip whitespace from
# before the @ and around dots, on the assumption that the whitespace
# around the punctuation is a mistake in what would otherwise be
# an RFC5322 local part. Leading whitespace is, usual, stripped as well.
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr("merwok wok@xample.com"))
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr("merwok wok@xample.com"))
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr(" merwok wok @xample.com"))
self.assertEqual(('', 'merwok"wok" wok@xample.com'),
utils.parseaddr('merwok"wok" wok@xample.com'))
self.assertEqual(('', 'merwok.wok.wok@xample.com'),
utils.parseaddr('merwok. wok . wok@xample.com'))
def test_formataddr_does_not_quote_parens_in_quoted_string(self):
addr = ("'foo@example.com' (foo@example.com)",
'foo@example.com')
addrstr = ('"\'foo@example.com\' '
'(foo@example.com)" <foo@example.com>')
self.assertEqual(utils.parseaddr(addrstr), addr)
self.assertEqual(utils.formataddr(addr), addrstr)
def test_multiline_from_comment(self):
x = """\
Foo
\tBar <foo@example.com>"""
self.assertEqual(utils.parseaddr(x), ('Foo Bar', 'foo@example.com'))
def test_quote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly; Person', 'person@dom.ain')),
r'"A Silly; Person" <person@dom.ain>')
def test_charset_richcomparisons(self):
eq = self.assertEqual
ne = self.assertNotEqual
cset1 = Charset()
cset2 = Charset()
eq(cset1, 'us-ascii')
eq(cset1, 'US-ASCII')
eq(cset1, 'Us-AsCiI')
eq('us-ascii', cset1)
eq('US-ASCII', cset1)
eq('Us-AsCiI', cset1)
ne(cset1, 'usascii')
ne(cset1, 'USASCII')
ne(cset1, 'UsAsCiI')
ne('usascii', cset1)
ne('USASCII', cset1)
ne('UsAsCiI', cset1)
eq(cset1, cset2)
eq(cset2, cset1)
def test_getaddresses(self):
eq = self.assertEqual
eq(utils.getaddresses(['aperson@dom.ain (Al Person)',
'Bud Person <bperson@dom.ain>']),
[('Al Person', 'aperson@dom.ain'),
('Bud Person', 'bperson@dom.ain')])
def test_getaddresses_nasty(self):
eq = self.assertEqual
eq(utils.getaddresses(['foo: ;']), [('', '')])
eq(utils.getaddresses(
['[]*-- =~$']),
[('', ''), ('', ''), ('', '*--')])
eq(utils.getaddresses(
['foo: ;', '"Jason R. Mastaler" <jason@dom.ain>']),
[('', ''), ('Jason R. Mastaler', 'jason@dom.ain')])
def test_getaddresses_embedded_comment(self):
"""Test proper handling of a nested comment"""
eq = self.assertEqual
addrs = utils.getaddresses(['User ((nested comment)) <foo@bar.com>'])
eq(addrs[0][1], 'foo@bar.com')
def test_utils_quote_unquote(self):
eq = self.assertEqual
msg = Message()
msg.add_header('content-disposition', 'attachment',
filename='foo\\wacky"name')
eq(msg.get_filename(), 'foo\\wacky"name')
def test_get_body_encoding_with_bogus_charset(self):
charset = Charset('not a charset')
self.assertEqual(charset.get_body_encoding(), 'base64')
def test_get_body_encoding_with_uppercase_charset(self):
eq = self.assertEqual
msg = Message()
msg['Content-Type'] = 'text/plain; charset=UTF-8'
eq(msg['content-type'], 'text/plain; charset=UTF-8')
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'utf-8')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), 'base64')
msg.set_payload(b'hello world', charset=charset)
eq(msg.get_payload(), 'aGVsbG8gd29ybGQ=\n')
eq(msg.get_payload(decode=True), b'hello world')
eq(msg['content-transfer-encoding'], 'base64')
# Try another one
msg = Message()
msg['Content-Type'] = 'text/plain; charset="US-ASCII"'
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'us-ascii')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), encoders.encode_7or8bit)
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_charsets_case_insensitive(self):
lc = Charset('us-ascii')
uc = Charset('US-ASCII')
self.assertEqual(lc.get_body_encoding(), uc.get_body_encoding())
def test_partial_falls_inside_message_delivery_status(self):
eq = self.ndiffAssertEqual
# The Parser interface provides chunks of data to FeedParser in 8192
# byte gulps. SF bug #1076485 found one of those chunks inside
# message/delivery-status header block, which triggered an
# unreadline() of NeedMoreData.
msg = self._msgobj('msg_43.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/report
text/plain
message/delivery-status
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/rfc822-headers
""")
def test_make_msgid_domain(self):
self.assertEqual(
email.utils.make_msgid(domain='testdomain-string')[-19:],
'@testdomain-string>')
def test_Generator_linend(self):
# Issue 14645.
with openfile('msg_26.txt', newline='\n') as f:
msgtxt = f.read()
msgtxt_nl = msgtxt.replace('\r\n', '\n')
msg = email.message_from_string(msgtxt)
s = StringIO()
g = email.generator.Generator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), msgtxt_nl)
def test_BytesGenerator_linend(self):
# Issue 14645.
with openfile('msg_26.txt', newline='\n') as f:
msgtxt = f.read()
msgtxt_nl = msgtxt.replace('\r\n', '\n')
msg = email.message_from_string(msgtxt_nl)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue().decode('ascii'), msgtxt)
def test_BytesGenerator_linend_with_non_ascii(self):
# Issue 14645.
with openfile('msg_26.txt', 'rb') as f:
msgtxt = f.read()
msgtxt = msgtxt.replace(b'with attachment', b'fo\xf6')
msgtxt_nl = msgtxt.replace(b'\r\n', b'\n')
msg = email.message_from_bytes(msgtxt_nl)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), msgtxt)
# Test the iterator/generators
class TestIterators(TestEmailBase):
def test_body_line_iterator(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# First a simple non-multipart message
msg = self._msgobj('msg_01.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 6)
neq(EMPTYSTRING.join(lines), msg.get_payload())
# Now a more complicated multipart
msg = self._msgobj('msg_02.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 43)
with openfile('msg_19.txt') as fp:
neq(EMPTYSTRING.join(lines), fp.read())
def test_typed_subpart_iterator(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
it = iterators.typed_subpart_iterator(msg, 'text')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 2)
eq(EMPTYSTRING.join(lines), """\
a simple kind of mirror
to reflect upon our own
a simple kind of mirror
to reflect upon our own
""")
def test_typed_subpart_iterator_default_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_03.txt')
it = iterators.typed_subpart_iterator(msg, 'text', 'plain')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 1)
eq(EMPTYSTRING.join(lines), """\
Hi,
Do you like this message?
-Me
""")
def test_pushCR_LF(self):
'''FeedParser BufferedSubFile.push() assumed it received complete
line endings. A CR ending one push() followed by a LF starting
the next push() added an empty line.
'''
imt = [
("a\r \n", 2),
("b", 0),
("c\n", 1),
("", 0),
("d\r\n", 1),
("e\r", 0),
("\nf", 1),
("\r\n", 1),
]
from email.feedparser import BufferedSubFile, NeedMoreData
bsf = BufferedSubFile()
om = []
nt = 0
for il, n in imt:
bsf.push(il)
nt += n
n1 = 0
for ol in iter(bsf.readline, NeedMoreData):
om.append(ol)
n1 += 1
self.assertEqual(n, n1)
self.assertEqual(len(om), nt)
self.assertEqual(''.join([il for il, n in imt]), ''.join(om))
def test_push_random(self):
from email.feedparser import BufferedSubFile, NeedMoreData
n = 10000
chunksize = 5
chars = 'abcd \t\r\n'
s = ''.join(choice(chars) for i in range(n)) + '\n'
target = s.splitlines(True)
bsf = BufferedSubFile()
lines = []
for i in range(0, len(s), chunksize):
chunk = s[i:i+chunksize]
bsf.push(chunk)
lines.extend(iter(bsf.readline, NeedMoreData))
self.assertEqual(lines, target)
class TestFeedParsers(TestEmailBase):
def parse(self, chunks):
from email.feedparser import FeedParser
feedparser = FeedParser()
for chunk in chunks:
feedparser.feed(chunk)
return feedparser.close()
def test_empty_header_name_handled(self):
# Issue 19996
msg = self.parse("First: val\n: bad\nSecond: val")
self.assertEqual(msg['First'], 'val')
self.assertEqual(msg['Second'], 'val')
def test_newlines(self):
m = self.parse(['a:\nb:\rc:\r\nd:\n'])
self.assertEqual(m.keys(), ['a', 'b', 'c', 'd'])
m = self.parse(['a:\nb:\rc:\r\nd:'])
self.assertEqual(m.keys(), ['a', 'b', 'c', 'd'])
m = self.parse(['a:\rb', 'c:\n'])
self.assertEqual(m.keys(), ['a', 'bc'])
m = self.parse(['a:\r', 'b:\n'])
self.assertEqual(m.keys(), ['a', 'b'])
m = self.parse(['a:\r', '\nb:\n'])
self.assertEqual(m.keys(), ['a', 'b'])
m = self.parse(['a:\x85b:\u2028c:\n'])
self.assertEqual(m.items(), [('a', '\x85'), ('b', '\u2028'), ('c', '')])
m = self.parse(['a:\r', 'b:\x85', 'c:\n'])
self.assertEqual(m.items(), [('a', ''), ('b', '\x85'), ('c', '')])
def test_long_lines(self):
# Expected peak memory use on 32-bit platform: 6*N*M bytes.
M, N = 1000, 20000
m = self.parse(['a:b\n\n'] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), 'x'*M*N)
m = self.parse(['a:b\r\r'] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), 'x'*M*N)
m = self.parse(['a:b\r\r'] + ['x'*M+'\x85'] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), ('x'*M+'\x85')*N)
m = self.parse(['a:\r', 'b: '] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', ''), ('b', 'x'*M*N)])
class TestParsers(TestEmailBase):
def test_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
with openfile('msg_02.txt') as fp:
msg = HeaderParser().parse(fp)
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertIsInstance(msg.get_payload(), str)
def test_bytes_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
with openfile('msg_02.txt', 'rb') as fp:
msg = email.parser.BytesHeaderParser().parse(fp)
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertIsInstance(msg.get_payload(), str)
self.assertIsInstance(msg.get_payload(decode=True), bytes)
def test_bytes_parser_does_not_close_file(self):
with openfile('msg_02.txt', 'rb') as fp:
email.parser.BytesParser().parse(fp)
self.assertFalse(fp.closed)
def test_bytes_parser_on_exception_does_not_close_file(self):
with openfile('msg_15.txt', 'rb') as fp:
bytesParser = email.parser.BytesParser
self.assertRaises(email.errors.StartBoundaryNotFoundDefect,
bytesParser(policy=email.policy.strict).parse,
fp)
self.assertFalse(fp.closed)
def test_parser_does_not_close_file(self):
with openfile('msg_02.txt', 'r') as fp:
email.parser.Parser().parse(fp)
self.assertFalse(fp.closed)
def test_parser_on_exception_does_not_close_file(self):
with openfile('msg_15.txt', 'r') as fp:
parser = email.parser.Parser
self.assertRaises(email.errors.StartBoundaryNotFoundDefect,
parser(policy=email.policy.strict).parse, fp)
self.assertFalse(fp.closed)
def test_whitespace_continuation(self):
eq = self.assertEqual
# This message contains a line after the Subject: header that has only
# whitespace, but it is not empty!
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: the next line has a space on it
\x20
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_whitespace_continuation_last_header(self):
eq = self.assertEqual
# Like the previous test, but the subject line is the last
# header.
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Subject: the next line has a space on it
\x20
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_crlf_separation(self):
eq = self.assertEqual
with openfile('msg_26.txt', newline='\n') as fp:
msg = Parser().parse(fp)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'text/plain')
eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'application/riscos')
def test_crlf_flatten(self):
# Using newline='\n' preserves the crlfs in this input file.
with openfile('msg_26.txt', newline='\n') as fp:
text = fp.read()
msg = email.message_from_string(text)
s = StringIO()
g = Generator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), text)
maxDiff = None
def test_multipart_digest_with_extra_mime_headers(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
with openfile('msg_28.txt') as fp:
msg = email.message_from_file(fp)
# Structure is:
# multipart/digest
# message/rfc822
# text/plain
# message/rfc822
# text/plain
eq(msg.is_multipart(), 1)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'message/rfc822')
eq(part1.is_multipart(), 1)
eq(len(part1.get_payload()), 1)
part1a = part1.get_payload(0)
eq(part1a.is_multipart(), 0)
eq(part1a.get_content_type(), 'text/plain')
neq(part1a.get_payload(), 'message 1\n')
# next message/rfc822
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'message/rfc822')
eq(part2.is_multipart(), 1)
eq(len(part2.get_payload()), 1)
part2a = part2.get_payload(0)
eq(part2a.is_multipart(), 0)
eq(part2a.get_content_type(), 'text/plain')
neq(part2a.get_payload(), 'message 2\n')
def test_three_lines(self):
# A bug report by Andrew McNamara
lines = ['From: Andrew Person <aperson@dom.ain',
'Subject: Test',
'Date: Tue, 20 Aug 2002 16:43:45 +1000']
msg = email.message_from_string(NL.join(lines))
self.assertEqual(msg['date'], 'Tue, 20 Aug 2002 16:43:45 +1000')
def test_strip_line_feed_and_carriage_return_in_headers(self):
eq = self.assertEqual
# For [ 1002475 ] email message parser doesn't handle \r\n correctly
value1 = 'text'
value2 = 'more text'
m = 'Header: %s\r\nNext-Header: %s\r\n\r\nBody\r\n\r\n' % (
value1, value2)
msg = email.message_from_string(m)
eq(msg.get('Header'), value1)
eq(msg.get('Next-Header'), value2)
def test_rfc2822_header_syntax(self):
eq = self.assertEqual
m = '>From: foo\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg), 3)
eq(sorted(field for field in msg), ['!"#QUX;~', '>From', 'From'])
eq(msg.get_payload(), 'body')
def test_rfc2822_space_not_allowed_in_header(self):
eq = self.assertEqual
m = '>From foo@example.com 11:25:53\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 0)
def test_rfc2822_one_character_header(self):
eq = self.assertEqual
m = 'A: first header\nB: second header\nCC: third header\n\nbody'
msg = email.message_from_string(m)
headers = msg.keys()
headers.sort()
eq(headers, ['A', 'B', 'CC'])
eq(msg.get_payload(), 'body')
def test_CRLFLF_at_end_of_part(self):
# issue 5610: feedparser should not eat two chars from body part ending
# with "\r\n\n".
m = (
"From: foo@bar.com\n"
"To: baz\n"
"Mime-Version: 1.0\n"
"Content-Type: multipart/mixed; boundary=BOUNDARY\n"
"\n"
"--BOUNDARY\n"
"Content-Type: text/plain\n"
"\n"
"body ending with CRLF newline\r\n"
"\n"
"--BOUNDARY--\n"
)
msg = email.message_from_string(m)
self.assertTrue(msg.get_payload(0).get_payload().endswith('\r\n'))
class Test8BitBytesHandling(TestEmailBase):
# In Python3 all input is string, but that doesn't work if the actual input
# uses an 8bit transfer encoding. To hack around that, in email 5.1 we
# decode byte streams using the surrogateescape error handler, and
# reconvert to binary at appropriate places if we detect surrogates. This
# doesn't allow us to transform headers with 8bit bytes (they get munged),
# but it does allow us to parse and preserve them, and to decode body
# parts that use an 8bit CTE.
bodytest_msg = textwrap.dedent("""\
From: foo@bar.com
To: baz
Mime-Version: 1.0
Content-Type: text/plain; charset={charset}
Content-Transfer-Encoding: {cte}
{bodyline}
""")
def test_known_8bit_CTE(self):
m = self.bodytest_msg.format(charset='utf-8',
cte='8bit',
bodyline='pöstal').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), "pöstal\n")
self.assertEqual(msg.get_payload(decode=True),
"pöstal\n".encode('utf-8'))
def test_unknown_8bit_CTE(self):
m = self.bodytest_msg.format(charset='notavalidcharset',
cte='8bit',
bodyline='pöstal').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), "p\uFFFD\uFFFDstal\n")
self.assertEqual(msg.get_payload(decode=True),
"pöstal\n".encode('utf-8'))
def test_8bit_in_quopri_body(self):
# This is non-RFC compliant data...without 'decode' the library code
# decodes the body using the charset from the headers, and because the
# source byte really is utf-8 this works. This is likely to fail
# against real dirty data (ie: produce mojibake), but the data is
# invalid anyway so it is as good a guess as any. But this means that
# this test just confirms the current behavior; that behavior is not
# necessarily the best possible behavior. With 'decode' it is
# returning the raw bytes, so that test should be of correct behavior,
# or at least produce the same result that email4 did.
m = self.bodytest_msg.format(charset='utf-8',
cte='quoted-printable',
bodyline='p=C3=B6stál').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), 'p=C3=B6stál\n')
self.assertEqual(msg.get_payload(decode=True),
'pöstál\n'.encode('utf-8'))
def test_invalid_8bit_in_non_8bit_cte_uses_replace(self):
# This is similar to the previous test, but proves that if the 8bit
# byte is undecodeable in the specified charset, it gets replaced
# by the unicode 'unknown' character. Again, this may or may not
# be the ideal behavior. Note that if decode=False none of the
# decoders will get involved, so this is the only test we need
# for this behavior.
m = self.bodytest_msg.format(charset='ascii',
cte='quoted-printable',
bodyline='p=C3=B6stál').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), 'p=C3=B6st\uFFFD\uFFFDl\n')
self.assertEqual(msg.get_payload(decode=True),
'pöstál\n'.encode('utf-8'))
# test_defect_handling:test_invalid_chars_in_base64_payload
def test_8bit_in_base64_body(self):
# If we get 8bit bytes in a base64 body, we can just ignore them
# as being outside the base64 alphabet and decode anyway. But
# we register a defect.
m = self.bodytest_msg.format(charset='utf-8',
cte='base64',
bodyline='cMO2c3RhbAá=').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(decode=True),
'pöstal'.encode('utf-8'))
self.assertIsInstance(msg.defects[0],
errors.InvalidBase64CharactersDefect)
def test_8bit_in_uuencode_body(self):
# Sticking an 8bit byte in a uuencode block makes it undecodable by
# normal means, so the block is returned undecoded, but as bytes.
m = self.bodytest_msg.format(charset='utf-8',
cte='uuencode',
bodyline='<,.V<W1A; á ').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(decode=True),
'<,.V<W1A; á \n'.encode('utf-8'))
headertest_headers = (
('From: foo@bar.com', ('From', 'foo@bar.com')),
('To: báz', ('To', '=?unknown-8bit?q?b=C3=A1z?=')),
('Subject: Maintenant je vous présente mon collègue, le pouf célèbre\n'
'\tJean de Baddie',
('Subject', '=?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_'
'coll=C3=A8gue=2C_le_pouf_c=C3=A9l=C3=A8bre?=\n'
' =?unknown-8bit?q?_Jean_de_Baddie?=')),
('From: göst', ('From', '=?unknown-8bit?b?Z8O2c3Q=?=')),
)
headertest_msg = ('\n'.join([src for (src, _) in headertest_headers]) +
'\nYes, they are flying.\n').encode('utf-8')
def test_get_8bit_header(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertEqual(str(msg.get('to')), 'b\uFFFD\uFFFDz')
self.assertEqual(str(msg['to']), 'b\uFFFD\uFFFDz')
def test_print_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertEqual(str(msg),
textwrap.dedent("""\
From: {}
To: {}
Subject: {}
From: {}
Yes, they are flying.
""").format(*[expected[1] for (_, expected) in
self.headertest_headers]))
def test_values_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([str(x) for x in msg.values()],
['foo@bar.com',
'b\uFFFD\uFFFDz',
'Maintenant je vous pr\uFFFD\uFFFDsente mon '
'coll\uFFFD\uFFFDgue, le pouf '
'c\uFFFD\uFFFDl\uFFFD\uFFFDbre\n'
'\tJean de Baddie',
"g\uFFFD\uFFFDst"])
def test_items_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([(str(x), str(y)) for (x, y) in msg.items()],
[('From', 'foo@bar.com'),
('To', 'b\uFFFD\uFFFDz'),
('Subject', 'Maintenant je vous '
'pr\uFFFD\uFFFDsente '
'mon coll\uFFFD\uFFFDgue, le pouf '
'c\uFFFD\uFFFDl\uFFFD\uFFFDbre\n'
'\tJean de Baddie'),
('From', 'g\uFFFD\uFFFDst')])
def test_get_all_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([str(x) for x in msg.get_all('from')],
['foo@bar.com',
'g\uFFFD\uFFFDst'])
def test_get_content_type_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/pl\xA7in; charset=utf-8
""").encode('latin-1'))
self.assertEqual(msg.get_content_type(), "text/pl\uFFFDin")
self.assertEqual(msg.get_content_maintype(), "text")
self.assertEqual(msg.get_content_subtype(), "pl\uFFFDin")
# test_headerregistry.TestContentTypeHeader.non_ascii_in_params
def test_get_params_with_8bit(self):
msg = email.message_from_bytes(
'X-Header: foo=\xa7ne; b\xa7r=two; baz=three\n'.encode('latin-1'))
self.assertEqual(msg.get_params(header='x-header'),
[('foo', '\uFFFDne'), ('b\uFFFDr', 'two'), ('baz', 'three')])
self.assertEqual(msg.get_param('Foo', header='x-header'), '\uFFFdne')
# XXX: someday you might be able to get 'b\xa7r', for now you can't.
self.assertEqual(msg.get_param('b\xa7r', header='x-header'), None)
# test_headerregistry.TestContentTypeHeader.non_ascii_in_rfc2231_value
def test_get_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
self.assertEqual(msg.get_param('title'),
('us-ascii', 'en', 'This is not f\uFFFDn'))
def test_set_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
msg.set_param('title', 'test')
self.assertEqual(msg.get_param('title'), 'test')
def test_del_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
msg.del_param('title')
self.assertEqual(msg.get_param('title'), None)
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_payload_with_8bit_cte_header(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Transfer-Encoding: b\xa7se64
Content-Type: text/plain; charset=latin-1
payload
""").encode('latin-1'))
self.assertEqual(msg.get_payload(), 'payload\n')
self.assertEqual(msg.get_payload(decode=True), b'payload\n')
non_latin_bin_msg = textwrap.dedent("""\
From: foo@bar.com
To: báz
Subject: Maintenant je vous présente mon collègue, le pouf célèbre
\tJean de Baddie
Mime-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
Да, они летят.
""").encode('utf-8')
def test_bytes_generator(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), self.non_latin_bin_msg)
def test_bytes_generator_handles_None_body(self):
#Issue 11019
msg = email.message.Message()
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), b"\n")
non_latin_bin_msg_as7bit_wrapped = textwrap.dedent("""\
From: foo@bar.com
To: =?unknown-8bit?q?b=C3=A1z?=
Subject: =?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_coll=C3=A8gue?=
=?unknown-8bit?q?=2C_le_pouf_c=C3=A9l=C3=A8bre?=
=?unknown-8bit?q?_Jean_de_Baddie?=
Mime-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: base64
0JTQsCwg0L7QvdC4INC70LXRgtGP0YIuCg==
""")
def test_generator_handles_8bit(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = StringIO()
email.generator.Generator(out).flatten(msg)
self.assertEqual(out.getvalue(), self.non_latin_bin_msg_as7bit_wrapped)
def test_str_generator_should_not_mutate_msg_when_handling_8bit(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
BytesGenerator(out).flatten(msg)
orig_value = out.getvalue()
Generator(StringIO()).flatten(msg) # Should not mutate msg!
out = BytesIO()
BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), orig_value)
def test_bytes_generator_with_unix_from(self):
# The unixfrom contains a current date, so we can't check it
# literally. Just make sure the first word is 'From' and the
# rest of the message matches the input.
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg, unixfrom=True)
lines = out.getvalue().split(b'\n')
self.assertEqual(lines[0].split()[0], b'From')
self.assertEqual(b'\n'.join(lines[1:]), self.non_latin_bin_msg)
non_latin_bin_msg_as7bit = non_latin_bin_msg_as7bit_wrapped.split('\n')
non_latin_bin_msg_as7bit[2:4] = [
'Subject: =?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_'
'coll=C3=A8gue=2C_le_pouf_c=C3=A9l=C3=A8bre?=']
non_latin_bin_msg_as7bit = '\n'.join(non_latin_bin_msg_as7bit)
def test_message_from_binary_file(self):
fn = 'test.msg'
self.addCleanup(unlink, fn)
with open(fn, 'wb') as testfile:
testfile.write(self.non_latin_bin_msg)
with open(fn, 'rb') as testfile:
m = email.parser.BytesParser().parse(testfile)
self.assertEqual(str(m), self.non_latin_bin_msg_as7bit)
latin_bin_msg = textwrap.dedent("""\
From: foo@bar.com
To: Dinsdale
Subject: Nudge nudge, wink, wink
Mime-Version: 1.0
Content-Type: text/plain; charset="latin-1"
Content-Transfer-Encoding: 8bit
oh là là, know what I mean, know what I mean?
""").encode('latin-1')
latin_bin_msg_as7bit = textwrap.dedent("""\
From: foo@bar.com
To: Dinsdale
Subject: Nudge nudge, wink, wink
Mime-Version: 1.0
Content-Type: text/plain; charset="iso-8859-1"
Content-Transfer-Encoding: quoted-printable
oh l=E0 l=E0, know what I mean, know what I mean?
""")
def test_string_generator_reencodes_to_quopri_when_appropriate(self):
m = email.message_from_bytes(self.latin_bin_msg)
self.assertEqual(str(m), self.latin_bin_msg_as7bit)
def test_decoded_generator_emits_unicode_body(self):
m = email.message_from_bytes(self.latin_bin_msg)
out = StringIO()
email.generator.DecodedGenerator(out).flatten(m)
#DecodedHeader output contains an extra blank line compared
#to the input message. RDM: not sure if this is a bug or not,
#but it is not specific to the 8bit->7bit conversion.
self.assertEqual(out.getvalue(),
self.latin_bin_msg.decode('latin-1')+'\n')
def test_bytes_feedparser(self):
bfp = email.feedparser.BytesFeedParser()
for i in range(0, len(self.latin_bin_msg), 10):
bfp.feed(self.latin_bin_msg[i:i+10])
m = bfp.close()
self.assertEqual(str(m), self.latin_bin_msg_as7bit)
def test_crlf_flatten(self):
with openfile('msg_26.txt', 'rb') as fp:
text = fp.read()
msg = email.message_from_bytes(text)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), text)
def test_8bit_multipart(self):
# Issue 11605
source = textwrap.dedent("""\
Date: Fri, 18 Mar 2011 17:15:43 +0100
To: foo@example.com
From: foodwatch-Newsletter <bar@example.com>
Subject: Aktuelles zu Japan, Klonfleisch und Smiley-System
Message-ID: <76a486bee62b0d200f33dc2ca08220ad@localhost.localdomain>
MIME-Version: 1.0
Content-Type: multipart/alternative;
boundary="b1_76a486bee62b0d200f33dc2ca08220ad"
--b1_76a486bee62b0d200f33dc2ca08220ad
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
Guten Tag, ,
mit großer Betroffenheit verfolgen auch wir im foodwatch-Team die
Nachrichten aus Japan.
--b1_76a486bee62b0d200f33dc2ca08220ad
Content-Type: text/html; charset="utf-8"
Content-Transfer-Encoding: 8bit
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html lang="de">
<head>
<title>foodwatch - Newsletter</title>
</head>
<body>
<p>mit großer Betroffenheit verfolgen auch wir im foodwatch-Team
die Nachrichten aus Japan.</p>
</body>
</html>
--b1_76a486bee62b0d200f33dc2ca08220ad--
""").encode('utf-8')
msg = email.message_from_bytes(source)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), source)
def test_bytes_generator_b_encoding_linesep(self):
# Issue 14062: b encoding was tacking on an extra \n.
m = Message()
# This has enough non-ascii that it should always end up b encoded.
m['Subject'] = Header('žluťoučký kůň')
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(m, linesep='\r\n')
self.assertEqual(
s.getvalue(),
b'Subject: =?utf-8?b?xb5sdcWlb3XEjWvDvSBrxa/FiA==?=\r\n\r\n')
def test_generator_b_encoding_linesep(self):
# Since this broke in ByteGenerator, test Generator for completeness.
m = Message()
# This has enough non-ascii that it should always end up b encoded.
m['Subject'] = Header('žluťoučký kůň')
s = StringIO()
g = email.generator.Generator(s)
g.flatten(m, linesep='\r\n')
self.assertEqual(
s.getvalue(),
'Subject: =?utf-8?b?xb5sdcWlb3XEjWvDvSBrxa/FiA==?=\r\n\r\n')
maxDiff = None
class BaseTestBytesGeneratorIdempotent:
maxDiff = None
def _msgobj(self, filename):
with openfile(filename, 'rb') as fp:
data = fp.read()
data = self.normalize_linesep_regex.sub(self.blinesep, data)
msg = email.message_from_bytes(data)
return msg, data
def _idempotent(self, msg, data, unixfrom=False):
b = BytesIO()
g = email.generator.BytesGenerator(b, maxheaderlen=0)
g.flatten(msg, unixfrom=unixfrom, linesep=self.linesep)
self.assertEqual(data, b.getvalue())
class TestBytesGeneratorIdempotentNL(BaseTestBytesGeneratorIdempotent,
TestIdempotent):
linesep = '\n'
blinesep = b'\n'
normalize_linesep_regex = re.compile(br'\r\n')
class TestBytesGeneratorIdempotentCRLF(BaseTestBytesGeneratorIdempotent,
TestIdempotent):
linesep = '\r\n'
blinesep = b'\r\n'
normalize_linesep_regex = re.compile(br'(?<!\r)\n')
class TestBase64(unittest.TestCase):
def test_len(self):
eq = self.assertEqual
eq(base64mime.header_length('hello'),
len(base64mime.body_encode(b'hello', eol='')))
for size in range(15):
if size == 0 : bsize = 0
elif size <= 3 : bsize = 4
elif size <= 6 : bsize = 8
elif size <= 9 : bsize = 12
elif size <= 12: bsize = 16
else : bsize = 20
eq(base64mime.header_length('x' * size), bsize)
def test_decode(self):
eq = self.assertEqual
eq(base64mime.decode(''), b'')
eq(base64mime.decode('aGVsbG8='), b'hello')
def test_encode(self):
eq = self.assertEqual
eq(base64mime.body_encode(b''), b'')
eq(base64mime.body_encode(b'hello'), 'aGVsbG8=\n')
# Test the binary flag
eq(base64mime.body_encode(b'hello\n'), 'aGVsbG8K\n')
# Test the maxlinelen arg
eq(base64mime.body_encode(b'xxxx ' * 20, maxlinelen=40), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IA==
""")
# Test the eol argument
eq(base64mime.body_encode(b'xxxx ' * 20, maxlinelen=40, eol='\r\n'),
"""\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IA==\r
""")
def test_header_encode(self):
eq = self.assertEqual
he = base64mime.header_encode
eq(he('hello'), '=?iso-8859-1?b?aGVsbG8=?=')
eq(he('hello\r\nworld'), '=?iso-8859-1?b?aGVsbG8NCndvcmxk?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?b?aGVsbG8=?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
class TestQuopri(unittest.TestCase):
def setUp(self):
# Set of characters (as byte integers) that don't need to be encoded
# in headers.
self.hlit = list(chain(
range(ord('a'), ord('z') + 1),
range(ord('A'), ord('Z') + 1),
range(ord('0'), ord('9') + 1),
(c for c in b'!*+-/')))
# Set of characters (as byte integers) that do need to be encoded in
# headers.
self.hnon = [c for c in range(256) if c not in self.hlit]
assert len(self.hlit) + len(self.hnon) == 256
# Set of characters (as byte integers) that don't need to be encoded
# in bodies.
self.blit = list(range(ord(' '), ord('~') + 1))
self.blit.append(ord('\t'))
self.blit.remove(ord('='))
# Set of characters (as byte integers) that do need to be encoded in
# bodies.
self.bnon = [c for c in range(256) if c not in self.blit]
assert len(self.blit) + len(self.bnon) == 256
def test_quopri_header_check(self):
for c in self.hlit:
self.assertFalse(quoprimime.header_check(c),
'Should not be header quopri encoded: %s' % chr(c))
for c in self.hnon:
self.assertTrue(quoprimime.header_check(c),
'Should be header quopri encoded: %s' % chr(c))
def test_quopri_body_check(self):
for c in self.blit:
self.assertFalse(quoprimime.body_check(c),
'Should not be body quopri encoded: %s' % chr(c))
for c in self.bnon:
self.assertTrue(quoprimime.body_check(c),
'Should be body quopri encoded: %s' % chr(c))
def test_header_quopri_len(self):
eq = self.assertEqual
eq(quoprimime.header_length(b'hello'), 5)
# RFC 2047 chrome is not included in header_length().
eq(len(quoprimime.header_encode(b'hello', charset='xxx')),
quoprimime.header_length(b'hello') +
# =?xxx?q?...?= means 10 extra characters
10)
eq(quoprimime.header_length(b'h@e@l@l@o@'), 20)
# RFC 2047 chrome is not included in header_length().
eq(len(quoprimime.header_encode(b'h@e@l@l@o@', charset='xxx')),
quoprimime.header_length(b'h@e@l@l@o@') +
# =?xxx?q?...?= means 10 extra characters
10)
for c in self.hlit:
eq(quoprimime.header_length(bytes([c])), 1,
'expected length 1 for %r' % chr(c))
for c in self.hnon:
# Space is special; it's encoded to _
if c == ord(' '):
continue
eq(quoprimime.header_length(bytes([c])), 3,
'expected length 3 for %r' % chr(c))
eq(quoprimime.header_length(b' '), 1)
def test_body_quopri_len(self):
eq = self.assertEqual
for c in self.blit:
eq(quoprimime.body_length(bytes([c])), 1)
for c in self.bnon:
eq(quoprimime.body_length(bytes([c])), 3)
def test_quote_unquote_idempotent(self):
for x in range(256):
c = chr(x)
self.assertEqual(quoprimime.unquote(quoprimime.quote(c)), c)
def _test_header_encode(self, header, expected_encoded_header, charset=None):
if charset is None:
encoded_header = quoprimime.header_encode(header)
else:
encoded_header = quoprimime.header_encode(header, charset)
self.assertEqual(encoded_header, expected_encoded_header)
def test_header_encode_null(self):
self._test_header_encode(b'', '')
def test_header_encode_one_word(self):
self._test_header_encode(b'hello', '=?iso-8859-1?q?hello?=')
def test_header_encode_two_lines(self):
self._test_header_encode(b'hello\nworld',
'=?iso-8859-1?q?hello=0Aworld?=')
def test_header_encode_non_ascii(self):
self._test_header_encode(b'hello\xc7there',
'=?iso-8859-1?q?hello=C7there?=')
def test_header_encode_alt_charset(self):
self._test_header_encode(b'hello', '=?iso-8859-2?q?hello?=',
charset='iso-8859-2')
def _test_header_decode(self, encoded_header, expected_decoded_header):
decoded_header = quoprimime.header_decode(encoded_header)
self.assertEqual(decoded_header, expected_decoded_header)
def test_header_decode_null(self):
self._test_header_decode('', '')
def test_header_decode_one_word(self):
self._test_header_decode('hello', 'hello')
def test_header_decode_two_lines(self):
self._test_header_decode('hello=0Aworld', 'hello\nworld')
def test_header_decode_non_ascii(self):
self._test_header_decode('hello=C7there', 'hello\xc7there')
def test_header_decode_re_bug_18380(self):
# Issue 18380: Call re.sub with a positional argument for flags in the wrong position
self.assertEqual(quoprimime.header_decode('=30' * 257), '0' * 257)
def _test_decode(self, encoded, expected_decoded, eol=None):
if eol is None:
decoded = quoprimime.decode(encoded)
else:
decoded = quoprimime.decode(encoded, eol=eol)
self.assertEqual(decoded, expected_decoded)
def test_decode_null_word(self):
self._test_decode('', '')
def test_decode_null_line_null_word(self):
self._test_decode('\r\n', '\n')
def test_decode_one_word(self):
self._test_decode('hello', 'hello')
def test_decode_one_word_eol(self):
self._test_decode('hello', 'hello', eol='X')
def test_decode_one_line(self):
self._test_decode('hello\r\n', 'hello\n')
def test_decode_one_line_lf(self):
self._test_decode('hello\n', 'hello\n')
def test_decode_one_line_cr(self):
self._test_decode('hello\r', 'hello\n')
def test_decode_one_line_nl(self):
self._test_decode('hello\n', 'helloX', eol='X')
def test_decode_one_line_crnl(self):
self._test_decode('hello\r\n', 'helloX', eol='X')
def test_decode_one_line_one_word(self):
self._test_decode('hello\r\nworld', 'hello\nworld')
def test_decode_one_line_one_word_eol(self):
self._test_decode('hello\r\nworld', 'helloXworld', eol='X')
def test_decode_two_lines(self):
self._test_decode('hello\r\nworld\r\n', 'hello\nworld\n')
def test_decode_two_lines_eol(self):
self._test_decode('hello\r\nworld\r\n', 'helloXworldX', eol='X')
def test_decode_one_long_line(self):
self._test_decode('Spam' * 250, 'Spam' * 250)
def test_decode_one_space(self):
self._test_decode(' ', '')
def test_decode_multiple_spaces(self):
self._test_decode(' ' * 5, '')
def test_decode_one_line_trailing_spaces(self):
self._test_decode('hello \r\n', 'hello\n')
def test_decode_two_lines_trailing_spaces(self):
self._test_decode('hello \r\nworld \r\n', 'hello\nworld\n')
def test_decode_quoted_word(self):
self._test_decode('=22quoted=20words=22', '"quoted words"')
def test_decode_uppercase_quoting(self):
self._test_decode('ab=CD=EF', 'ab\xcd\xef')
def test_decode_lowercase_quoting(self):
self._test_decode('ab=cd=ef', 'ab\xcd\xef')
def test_decode_soft_line_break(self):
self._test_decode('soft line=\r\nbreak', 'soft linebreak')
def test_decode_false_quoting(self):
self._test_decode('A=1,B=A ==> A+B==2', 'A=1,B=A ==> A+B==2')
def _test_encode(self, body, expected_encoded_body, maxlinelen=None, eol=None):
kwargs = {}
if maxlinelen is None:
# Use body_encode's default.
maxlinelen = 76
else:
kwargs['maxlinelen'] = maxlinelen
if eol is None:
# Use body_encode's default.
eol = '\n'
else:
kwargs['eol'] = eol
encoded_body = quoprimime.body_encode(body, **kwargs)
self.assertEqual(encoded_body, expected_encoded_body)
if eol == '\n' or eol == '\r\n':
# We know how to split the result back into lines, so maxlinelen
# can be checked.
for line in encoded_body.splitlines():
self.assertLessEqual(len(line), maxlinelen)
def test_encode_null(self):
self._test_encode('', '')
def test_encode_null_lines(self):
self._test_encode('\n\n', '\n\n')
def test_encode_one_line(self):
self._test_encode('hello\n', 'hello\n')
def test_encode_one_line_crlf(self):
self._test_encode('hello\r\n', 'hello\n')
def test_encode_one_line_eol(self):
self._test_encode('hello\n', 'hello\r\n', eol='\r\n')
def test_encode_one_line_eol_after_non_ascii(self):
# issue 20206; see changeset 0cf700464177 for why the encode/decode.
self._test_encode('hello\u03c5\n'.encode('utf-8').decode('latin1'),
'hello=CF=85\r\n', eol='\r\n')
def test_encode_one_space(self):
self._test_encode(' ', '=20')
def test_encode_one_line_one_space(self):
self._test_encode(' \n', '=20\n')
# XXX: body_encode() expect strings, but uses ord(char) from these strings
# to index into a 256-entry list. For code points above 255, this will fail.
# Should there be a check for 8-bit only ord() values in body, or at least
# a comment about the expected input?
def test_encode_two_lines_one_space(self):
self._test_encode(' \n \n', '=20\n=20\n')
def test_encode_one_word_trailing_spaces(self):
self._test_encode('hello ', 'hello =20')
def test_encode_one_line_trailing_spaces(self):
self._test_encode('hello \n', 'hello =20\n')
def test_encode_one_word_trailing_tab(self):
self._test_encode('hello \t', 'hello =09')
def test_encode_one_line_trailing_tab(self):
self._test_encode('hello \t\n', 'hello =09\n')
def test_encode_trailing_space_before_maxlinelen(self):
self._test_encode('abcd \n1234', 'abcd =\n\n1234', maxlinelen=6)
def test_encode_trailing_space_at_maxlinelen(self):
self._test_encode('abcd \n1234', 'abcd=\n=20\n1234', maxlinelen=5)
def test_encode_trailing_space_beyond_maxlinelen(self):
self._test_encode('abcd \n1234', 'abc=\nd=20\n1234', maxlinelen=4)
def test_encode_whitespace_lines(self):
self._test_encode(' \n' * 5, '=20\n' * 5)
def test_encode_quoted_equals(self):
self._test_encode('a = b', 'a =3D b')
def test_encode_one_long_string(self):
self._test_encode('x' * 100, 'x' * 75 + '=\n' + 'x' * 25)
def test_encode_one_long_line(self):
self._test_encode('x' * 100 + '\n', 'x' * 75 + '=\n' + 'x' * 25 + '\n')
def test_encode_one_very_long_line(self):
self._test_encode('x' * 200 + '\n',
2 * ('x' * 75 + '=\n') + 'x' * 50 + '\n')
def test_encode_shortest_maxlinelen(self):
self._test_encode('=' * 5, '=3D=\n' * 4 + '=3D', maxlinelen=4)
def test_encode_maxlinelen_too_small(self):
self.assertRaises(ValueError, self._test_encode, '', '', maxlinelen=3)
def test_encode(self):
eq = self.assertEqual
eq(quoprimime.body_encode(''), '')
eq(quoprimime.body_encode('hello'), 'hello')
# Test the binary flag
eq(quoprimime.body_encode('hello\r\nworld'), 'hello\nworld')
# Test the maxlinelen arg
eq(quoprimime.body_encode('xxxx ' * 20, maxlinelen=40), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=
x xxxx xxxx xxxx xxxx=20""")
# Test the eol argument
eq(quoprimime.body_encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'),
"""\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=\r
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=\r
x xxxx xxxx xxxx xxxx=20""")
eq(quoprimime.body_encode("""\
one line
two line"""), """\
one line
two line""")
# Test the Charset class
class TestCharset(unittest.TestCase):
def tearDown(self):
from email import charset as CharsetModule
try:
del CharsetModule.CHARSETS['fake']
except KeyError:
pass
def test_codec_encodeable(self):
eq = self.assertEqual
# Make sure us-ascii = no Unicode conversion
c = Charset('us-ascii')
eq(c.header_encode('Hello World!'), 'Hello World!')
# Test 8-bit idempotency with us-ascii
s = '\xa4\xa2\xa4\xa4\xa4\xa6\xa4\xa8\xa4\xaa'
self.assertRaises(UnicodeError, c.header_encode, s)
c = Charset('utf-8')
eq(c.header_encode(s), '=?utf-8?b?wqTCosKkwqTCpMKmwqTCqMKkwqo=?=')
def test_body_encode(self):
eq = self.assertEqual
# Try a charset with QP body encoding
c = Charset('iso-8859-1')
eq('hello w=F6rld', c.body_encode('hello w\xf6rld'))
# Try a charset with Base64 body encoding
c = Charset('utf-8')
eq('aGVsbG8gd29ybGQ=\n', c.body_encode(b'hello world'))
# Try a charset with None body encoding
c = Charset('us-ascii')
eq('hello world', c.body_encode('hello world'))
# Try the convert argument, where input codec != output codec
c = Charset('euc-jp')
# With apologies to Tokio Kikuchi ;)
# XXX FIXME
## try:
## eq('\x1b$B5FCO;~IW\x1b(B',
## c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7'))
## eq('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7',
## c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7', False))
## except LookupError:
## # We probably don't have the Japanese codecs installed
## pass
# Testing SF bug #625509, which we have to fake, since there are no
# built-in encodings where the header encoding is QP but the body
# encoding is not.
from email import charset as CharsetModule
CharsetModule.add_charset('fake', CharsetModule.QP, None, 'utf-8')
c = Charset('fake')
eq('hello world', c.body_encode('hello world'))
def test_unicode_charset_name(self):
charset = Charset('us-ascii')
self.assertEqual(str(charset), 'us-ascii')
self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')
# Test multilingual MIME headers.
class TestHeader(TestEmailBase):
def test_simple(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append(' Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_simple_surprise(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append('Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_header_needs_no_decoding(self):
h = 'no decoding needed'
self.assertEqual(decode_header(h), [(h, None)])
def test_long(self):
h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
maxlinelen=76)
for l in h.encode(splitchars=' ').split('\n '):
self.assertLessEqual(len(l), 76)
def test_multilingual(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = (b'Die Mieter treten hier ein werden mit einem '
b'Foerderband komfortabel den Korridor entlang, '
b'an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, '
b'gegen die rotierenden Klingen bef\xf6rdert. ')
cz_head = (b'Finan\xe8ni metropole se hroutily pod tlakem jejich '
b'd\xf9vtipu.. ')
utf8_head = ('\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das '
'Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder '
'die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066'
'\u3044\u307e\u3059\u3002')
h = Header(g_head, g)
h.append(cz_head, cz)
h.append(utf8_head, utf8)
enc = h.encode(maxlinelen=76)
eq(enc, """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderband_kom?=
=?iso-8859-1?q?fortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen_Wand?=
=?iso-8859-1?q?gem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef=F6r?=
=?iso-8859-1?q?dert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hroutily?=
=?iso-8859-2?q?_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC?=
=?utf-8?b?5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn?=
=?utf-8?b?44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFz?=
=?utf-8?b?IE51bnN0dWNrIGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWloZXJodW5k?=
=?utf-8?b?IGRhcyBPZGVyIGRpZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI3jgajoqIA=?=
=?utf-8?b?44Gj44Gm44GE44G+44GZ44CC?=""")
decoded = decode_header(enc)
eq(len(decoded), 3)
eq(decoded[0], (g_head, 'iso-8859-1'))
eq(decoded[1], (cz_head, 'iso-8859-2'))
eq(decoded[2], (utf8_head.encode('utf-8'), 'utf-8'))
ustr = str(h)
eq(ustr,
(b'Die Mieter treten hier ein werden mit einem Foerderband '
b'komfortabel den Korridor entlang, an s\xc3\xbcdl\xc3\xbcndischen '
b'Wandgem\xc3\xa4lden vorbei, gegen die rotierenden Klingen '
b'bef\xc3\xb6rdert. Finan\xc4\x8dni metropole se hroutily pod '
b'tlakem jejich d\xc5\xafvtipu.. \xe6\xad\xa3\xe7\xa2\xba\xe3\x81'
b'\xab\xe8\xa8\x80\xe3\x81\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3'
b'\xe3\x81\xaf\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3'
b'\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
b'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8\xaa\x9e'
b'\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\xe3\x81\x82\xe3'
b'\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81\x9f\xe3\x82\x89\xe3\x82'
b'\x81\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\xe5\xae\x9f\xe9\x9a\x9b'
b'\xe3\x81\xab\xe3\x81\xaf\xe3\x80\x8cWenn ist das Nunstuck git '
b'und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt '
b'gersput.\xe3\x80\x8d\xe3\x81\xa8\xe8\xa8\x80\xe3\x81\xa3\xe3\x81'
b'\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82'
).decode('utf-8'))
# Test make_header()
newh = make_header(decode_header(enc))
eq(newh, h)
def test_empty_header_encode(self):
h = Header()
self.assertEqual(h.encode(), '')
def test_header_ctor_default_args(self):
eq = self.ndiffAssertEqual
h = Header()
eq(h, '')
h.append('foo', Charset('iso-8859-1'))
eq(h, 'foo')
def test_explicit_maxlinelen(self):
eq = self.ndiffAssertEqual
hstr = ('A very long line that must get split to something other '
'than at the 76th character boundary to test the non-default '
'behavior')
h = Header(hstr)
eq(h.encode(), '''\
A very long line that must get split to something other than at the 76th
character boundary to test the non-default behavior''')
eq(str(h), hstr)
h = Header(hstr, header_name='Subject')
eq(h.encode(), '''\
A very long line that must get split to something other than at the
76th character boundary to test the non-default behavior''')
eq(str(h), hstr)
h = Header(hstr, maxlinelen=1024, header_name='Subject')
eq(h.encode(), hstr)
eq(str(h), hstr)
def test_quopri_splittable(self):
eq = self.ndiffAssertEqual
h = Header(charset='iso-8859-1', maxlinelen=20)
x = 'xxxx ' * 20
h.append(x)
s = h.encode()
eq(s, """\
=?iso-8859-1?q?xxx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_?=""")
eq(x, str(make_header(decode_header(s))))
h = Header(charset='iso-8859-1', maxlinelen=40)
h.append('xxxx ' * 20)
s = h.encode()
eq(s, """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xxx?=
=?iso-8859-1?q?x_xxxx_xxxx_xxxx_xxxx_?=
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=
=?iso-8859-1?q?_xxxx_xxxx_?=""")
eq(x, str(make_header(decode_header(s))))
def test_base64_splittable(self):
eq = self.ndiffAssertEqual
h = Header(charset='koi8-r', maxlinelen=20)
x = 'xxxx ' * 20
h.append(x)
s = h.encode()
eq(s, """\
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IA==?=""")
eq(x, str(make_header(decode_header(s))))
h = Header(charset='koi8-r', maxlinelen=40)
h.append(x)
s = h.encode()
eq(s, """\
=?koi8-r?b?eHh4eCB4eHh4IHh4eHggeHh4?=
=?koi8-r?b?eCB4eHh4IHh4eHggeHh4eCB4?=
=?koi8-r?b?eHh4IHh4eHggeHh4eCB4eHh4?=
=?koi8-r?b?IHh4eHggeHh4eCB4eHh4IHh4?=
=?koi8-r?b?eHggeHh4eCB4eHh4IHh4eHgg?=
=?koi8-r?b?eHh4eCB4eHh4IA==?=""")
eq(x, str(make_header(decode_header(s))))
def test_us_ascii_header(self):
eq = self.assertEqual
s = 'hello'
x = decode_header(s)
eq(x, [('hello', None)])
h = make_header(x)
eq(s, h.encode())
def test_string_charset(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
eq(h, 'hello')
## def test_unicode_error(self):
## raises = self.assertRaises
## raises(UnicodeError, Header, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, '[P\xf6stal]', 'us-ascii')
## h = Header()
## raises(UnicodeError, h.append, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, h.append, '[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, u'\u83ca\u5730\u6642\u592b', 'iso-8859-1')
def test_utf8_shortest(self):
eq = self.assertEqual
h = Header('p\xf6stal', 'utf-8')
eq(h.encode(), '=?utf-8?q?p=C3=B6stal?=')
h = Header('\u83ca\u5730\u6642\u592b', 'utf-8')
eq(h.encode(), '=?utf-8?b?6I+K5Zyw5pmC5aSr?=')
def test_bad_8bit_header(self):
raises = self.assertRaises
eq = self.assertEqual
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
raises(UnicodeError, Header, x)
h = Header()
raises(UnicodeError, h.append, x)
e = x.decode('utf-8', 'replace')
eq(str(Header(x, errors='replace')), e)
h.append(x, errors='replace')
eq(str(h), e)
def test_escaped_8bit_header(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
e = x.decode('ascii', 'surrogateescape')
h = Header(e, charset=email.charset.UNKNOWN8BIT)
self.assertEqual(str(h),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h), [(x, 'unknown-8bit')])
def test_header_handles_binary_unknown8bit(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
h = Header(x, charset=email.charset.UNKNOWN8BIT)
self.assertEqual(str(h),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h), [(x, 'unknown-8bit')])
def test_make_header_handles_binary_unknown8bit(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
h = Header(x, charset=email.charset.UNKNOWN8BIT)
h2 = email.header.make_header(email.header.decode_header(h))
self.assertEqual(str(h2),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h2), [(x, 'unknown-8bit')])
def test_modify_returned_list_does_not_change_header(self):
h = Header('test')
chunks = email.header.decode_header(h)
chunks.append(('ascii', 'test2'))
self.assertEqual(str(h), 'test')
def test_encoded_adjacent_nonencoded(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
h.append('world')
s = h.encode()
eq(s, '=?iso-8859-1?q?hello?= world')
h = make_header(decode_header(s))
eq(h.encode(), s)
def test_whitespace_keeper(self):
eq = self.assertEqual
s = 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztk=?= =?koi8-r?q?=CA?= zz.'
parts = decode_header(s)
eq(parts, [(b'Subject: ', None), (b'\xf0\xd2\xcf\xd7\xc5\xd2\xcb\xc1 \xce\xc1 \xc6\xc9\xce\xc1\xcc\xd8\xce\xd9\xca', 'koi8-r'), (b' zz.', None)])
hdr = make_header(parts)
eq(hdr.encode(),
'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztnK?= zz.')
def test_broken_base64_header(self):
raises = self.assertRaises
s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3I ?='
raises(errors.HeaderParseError, decode_header, s)
def test_shift_jis_charset(self):
h = Header('文', charset='shift_jis')
self.assertEqual(h.encode(), '=?iso-2022-jp?b?GyRCSjgbKEI=?=')
def test_flatten_header_with_no_value(self):
# Issue 11401 (regression from email 4.x) Note that the space after
# the header doesn't reflect the input, but this is also the way
# email 4.x behaved. At some point it would be nice to fix that.
msg = email.message_from_string("EmptyHeader:")
self.assertEqual(str(msg), "EmptyHeader: \n\n")
def test_encode_preserves_leading_ws_on_value(self):
msg = Message()
msg['SomeHeader'] = ' value with leading ws'
self.assertEqual(str(msg), "SomeHeader: value with leading ws\n\n")
# Test RFC 2231 header parameters (en/de)coding
class TestRFC2231(TestEmailBase):
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_with_double_quotes
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_inside_double_quotes
def test_get_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_29.txt')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
eq(msg.get_param('title', unquote=False),
('us-ascii', 'en', '"This is even more ***fun*** isn\'t it!"'))
def test_set_param(self):
eq = self.ndiffAssertEqual
msg = Message()
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii')
eq(msg.get_param('title'),
('us-ascii', '', 'This is even more ***fun*** isn\'t it!'))
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
msg = self._msgobj('msg_01.txt')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.as_string(maxheaderlen=78), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21
Hi,
Do you like this message?
-Me
""")
def test_set_param_requote(self):
msg = Message()
msg.set_param('title', 'foo')
self.assertEqual(msg['content-type'], 'text/plain; title="foo"')
msg.set_param('title', 'bar', requote=False)
self.assertEqual(msg['content-type'], 'text/plain; title=bar')
# tspecial is still quoted.
msg.set_param('title', "(bar)bell", requote=False)
self.assertEqual(msg['content-type'], 'text/plain; title="(bar)bell"')
def test_del_param(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_01.txt')
msg.set_param('foo', 'bar', charset='us-ascii', language='en')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
msg.del_param('foo', header='Content-Type')
eq(msg.as_string(maxheaderlen=78), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset="us-ascii";
title*=us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21
Hi,
Do you like this message?
-Me
""")
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_charset
# I changed the charset name, though, because the one in the file isn't
# a legal charset name. Should add a test for an illegal charset.
def test_rfc2231_get_content_charset(self):
eq = self.assertEqual
msg = self._msgobj('msg_32.txt')
eq(msg.get_content_charset(), 'us-ascii')
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_no_double_quotes
def test_rfc2231_parse_rfc_quoting(self):
m = textwrap.dedent('''\
Content-Disposition: inline;
\tfilename*0*=''This%20is%20even%20more%20;
\tfilename*1*=%2A%2A%2Afun%2A%2A%2A%20;
\tfilename*2="is it not.pdf"
''')
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
self.assertEqual(m, msg.as_string())
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_with_double_quotes
def test_rfc2231_parse_extra_quoting(self):
m = textwrap.dedent('''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
''')
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
self.assertEqual(m, msg.as_string())
# test_headerregistry.TestContentTypeHeader.rfc2231_no_language_or_charset
# but new test uses *0* because otherwise lang/charset is not valid.
# test_headerregistry.TestContentTypeHeader.rfc2231_segmented_normal_values
def test_rfc2231_no_language_or_charset(self):
m = '''\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename="file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm"
Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEM; NAME*1=P_nsmail.htm
'''
msg = email.message_from_string(m)
param = msg.get_param('NAME')
self.assertNotIsInstance(param, tuple)
self.assertEqual(
param,
'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm')
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_no_charset
def test_rfc2231_no_language_or_charset_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
# Duplicate of previous test?
def test_rfc2231_no_language_or_charset_in_filename_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
# test_headerregistry.TestContentTypeHeader.rfc2231_partly_encoded,
# but the test below is wrong (the first part should be decoded).
def test_rfc2231_partly_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20***fun*** is it not.pdf')
def test_rfc2231_partly_nonencoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="This%20is%20even%20more%20";
\tfilename*1="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20is it not.pdf')
def test_rfc2231_no_language_or_charset_in_boundary(self):
m = '''\
Content-Type: multipart/alternative;
\tboundary*0*="''This%20is%20even%20more%20";
\tboundary*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tboundary*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_boundary(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_charset(self):
# This is a nonsensical charset value, but tests the code anyway
m = '''\
Content-Type: text/plain;
\tcharset*0*="This%20is%20even%20more%20";
\tcharset*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tcharset*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_content_charset(),
'this is even more ***fun*** is it not.pdf')
# test_headerregistry.TestContentTypeHeader.rfc2231_unknown_charset_treated_as_ascii
def test_rfc2231_bad_encoding_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="bogus'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_charset(self):
m = """\
Content-Type: text/plain; charset*=bogus''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_charset(self):
m = """\
Content-Type: text/plain; charset*=ascii''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="ascii'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2*="is it not.pdf%E2"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf\ufffd')
def test_rfc2231_unknown_encoding(self):
m = """\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename*=X-UNKNOWN''myfile.txt
"""
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(), 'myfile.txt')
def test_rfc2231_single_tick_in_filename_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, None)
eq(language, None)
eq(s, "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_inside_double_quotes
def test_rfc2231_single_tick_in_filename(self):
m = """\
Content-Type: application/x-foo; name*0=\"Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertNotIsInstance(param, tuple)
self.assertEqual(param, "Frank's Document")
def test_rfc2231_missing_tick(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="'This%20is%20broken";
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
"'This is broken")
def test_rfc2231_missing_tick_with_encoded_non_ascii(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="'This%20is%E2broken";
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
"'This is\ufffdbroken")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_in_value_with_charset_and_lang
def test_rfc2231_tick_attack_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_in_non_encoded_value
def test_rfc2231_tick_attack(self):
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertNotIsInstance(param, tuple)
self.assertEqual(param, "us-ascii'en-us'Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quotes_inside_quotes
def test_rfc2231_no_extended_values(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo; name=\"Frank's Document\"
"""
msg = email.message_from_string(m)
eq(msg.get_param('name'), "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_then_unencoded_segments
def test_rfc2231_encoded_then_unencoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'My\";
\tname*1=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
# test_headerregistry.TestContentTypeHeader.rfc2231_unencoded_then_encoded_segments
# test_headerregistry.TestContentTypeHeader.rfc2231_quoted_unencoded_then_encoded_segments
def test_rfc2231_unencoded_then_encoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'My\";
\tname*1*=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
# Tests to ensure that signed parts of an email are completely preserved, as
# required by RFC1847 section 2.1. Note that these are incomplete, because the
# email package does not currently always preserve the body. See issue 1670765.
class TestSigned(TestEmailBase):
def _msg_and_obj(self, filename):
with openfile(filename) as fp:
original = fp.read()
msg = email.message_from_string(original)
return original, msg
def _signed_parts_eq(self, original, result):
# Extract the first mime part of each message
import re
repart = re.compile(r'^--([^\n]+)\n(.*?)\n--\1$', re.S | re.M)
inpart = repart.search(original).group(2)
outpart = repart.search(result).group(2)
self.assertEqual(outpart, inpart)
def test_long_headers_as_string(self):
original, msg = self._msg_and_obj('msg_45.txt')
result = msg.as_string()
self._signed_parts_eq(original, result)
def test_long_headers_as_string_maxheaderlen(self):
original, msg = self._msg_and_obj('msg_45.txt')
result = msg.as_string(maxheaderlen=60)
self._signed_parts_eq(original, result)
def test_long_headers_flatten(self):
original, msg = self._msg_and_obj('msg_45.txt')
fp = StringIO()
Generator(fp).flatten(msg)
result = fp.getvalue()
self._signed_parts_eq(original, result)
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Copyright (C) 2001-2010 Python Software Foundation
# Contact: email-sig@python.org
# email package unit tests
import re
import time
import base64
import unittest
import textwrap
from io import StringIO, BytesIO
from itertools import chain
from random import choice
import email
import email.policy
from email.charset import Charset
from email.header import Header, decode_header, make_header
from email.parser import Parser, HeaderParser
from email.generator import Generator, DecodedGenerator, BytesGenerator
from email.message import Message
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email import utils
from email import errors
from email import encoders
from email import iterators
from email import base64mime
from email import quoprimime
from test.support import unlink
from test.test_email import openfile, TestEmailBase
# These imports are documented to work, but we are testing them using a
# different path, so we import them here just to make sure they are importable.
from email.parser import FeedParser, BytesFeedParser
NL = '\n'
EMPTYSTRING = ''
SPACE = ' '
# Test various aspects of the Message class's API
class TestMessageAPI(TestEmailBase):
def test_get_all(self):
eq = self.assertEqual
msg = self._msgobj('msg_20.txt')
eq(msg.get_all('cc'), ['ccc@zzz.org', 'ddd@zzz.org', 'eee@zzz.org'])
eq(msg.get_all('xx', 'n/a'), 'n/a')
def test_getset_charset(self):
eq = self.assertEqual
msg = Message()
eq(msg.get_charset(), None)
charset = Charset('iso-8859-1')
msg.set_charset(charset)
eq(msg['mime-version'], '1.0')
eq(msg.get_content_type(), 'text/plain')
eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
eq(msg.get_param('charset'), 'iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
eq(msg.get_charset().input_charset, 'iso-8859-1')
# Remove the charset
msg.set_charset(None)
eq(msg.get_charset(), None)
eq(msg['content-type'], 'text/plain')
# Try adding a charset when there's already MIME headers present
msg = Message()
msg['MIME-Version'] = '2.0'
msg['Content-Type'] = 'text/x-weird'
msg['Content-Transfer-Encoding'] = 'quinted-puntable'
msg.set_charset(charset)
eq(msg['mime-version'], '2.0')
eq(msg['content-type'], 'text/x-weird; charset="iso-8859-1"')
eq(msg['content-transfer-encoding'], 'quinted-puntable')
def test_set_charset_from_string(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_set_payload_with_charset(self):
msg = Message()
charset = Charset('iso-8859-1')
msg.set_payload('This is a string payload', charset)
self.assertEqual(msg.get_charset().input_charset, 'iso-8859-1')
def test_set_payload_with_8bit_data_and_charset(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
msg = Message()
msg.set_payload(data, charset)
self.assertEqual(msg['content-transfer-encoding'], 'base64')
self.assertEqual(msg.get_payload(decode=True), data)
self.assertEqual(msg.get_payload(), '0JDQkdCS\n')
def test_set_payload_with_non_ascii_and_charset_body_encoding_none(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
charset.body_encoding = None # Disable base64 encoding
msg = Message()
msg.set_payload(data.decode('utf-8'), charset)
self.assertEqual(msg['content-transfer-encoding'], '8bit')
self.assertEqual(msg.get_payload(decode=True), data)
def test_set_payload_with_8bit_data_and_charset_body_encoding_none(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
charset.body_encoding = None # Disable base64 encoding
msg = Message()
msg.set_payload(data, charset)
self.assertEqual(msg['content-transfer-encoding'], '8bit')
self.assertEqual(msg.get_payload(decode=True), data)
def test_set_payload_to_list(self):
msg = Message()
msg.set_payload([])
self.assertEqual(msg.get_payload(), [])
def test_attach_when_payload_is_string(self):
msg = Message()
msg['Content-Type'] = 'multipart/mixed'
msg.set_payload('string payload')
sub_msg = MIMEMessage(Message())
self.assertRaisesRegex(TypeError, "[Aa]ttach.*non-multipart",
msg.attach, sub_msg)
def test_get_charsets(self):
eq = self.assertEqual
msg = self._msgobj('msg_08.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', 'iso-8859-2', 'koi8-r'])
msg = self._msgobj('msg_09.txt')
charsets = msg.get_charsets('dingbat')
eq(charsets, ['dingbat', 'us-ascii', 'iso-8859-1', 'dingbat',
'koi8-r'])
msg = self._msgobj('msg_12.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', None, 'iso-8859-2',
'iso-8859-3', 'us-ascii', 'koi8-r'])
def test_get_filename(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
msg = self._msgobj('msg_07.txt')
subpart = msg.get_payload(1)
eq(subpart.get_filename(), 'dingusfish.gif')
def test_get_filename_with_name_parameter(self):
eq = self.assertEqual
msg = self._msgobj('msg_44.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
def test_get_boundary(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
# No quotes!
eq(msg.get_boundary(), 'BOUNDARY')
def test_set_boundary(self):
eq = self.assertEqual
# This one has no existing boundary parameter, but the Content-Type:
# header appears fifth.
msg = self._msgobj('msg_01.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
# This one has a Content-Type: header, with a boundary, stuck in the
# middle of its headers. Make sure the order is preserved; it should
# be fifth.
msg = self._msgobj('msg_04.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'multipart/mixed; boundary="BOUNDARY"')
# And this one has no Content-Type: header at all.
msg = self._msgobj('msg_03.txt')
self.assertRaises(errors.HeaderParseError,
msg.set_boundary, 'BOUNDARY')
def test_make_boundary(self):
msg = MIMEMultipart('form-data')
# Note that when the boundary gets created is an implementation
# detail and might change.
self.assertEqual(msg.items()[0][1], 'multipart/form-data')
# Trigger creation of boundary
msg.as_string()
self.assertEqual(msg.items()[0][1][:33],
'multipart/form-data; boundary="==')
# XXX: there ought to be tests of the uniqueness of the boundary, too.
def test_message_rfc822_only(self):
# Issue 7970: message/rfc822 not in multipart parsed by
# HeaderParser caused an exception when flattened.
with openfile('msg_46.txt') as fp:
msgdata = fp.read()
parser = HeaderParser()
msg = parser.parsestr(msgdata)
out = StringIO()
gen = Generator(out, True, 0)
gen.flatten(msg, False)
self.assertEqual(out.getvalue(), msgdata)
def test_byte_message_rfc822_only(self):
# Make sure new bytes header parser also passes this.
with openfile('msg_46.txt') as fp:
msgdata = fp.read().encode('ascii')
parser = email.parser.BytesHeaderParser()
msg = parser.parsebytes(msgdata)
out = BytesIO()
gen = email.generator.BytesGenerator(out)
gen.flatten(msg)
self.assertEqual(out.getvalue(), msgdata)
def test_get_decoded_payload(self):
eq = self.assertEqual
msg = self._msgobj('msg_10.txt')
# The outer message is a multipart
eq(msg.get_payload(decode=True), None)
# Subpart 1 is 7bit encoded
eq(msg.get_payload(0).get_payload(decode=True),
b'This is a 7bit encoded message.\n')
# Subpart 2 is quopri
eq(msg.get_payload(1).get_payload(decode=True),
b'\xa1This is a Quoted Printable encoded message!\n')
# Subpart 3 is base64
eq(msg.get_payload(2).get_payload(decode=True),
b'This is a Base64 encoded message.')
# Subpart 4 is base64 with a trailing newline, which
# used to be stripped (issue 7143).
eq(msg.get_payload(3).get_payload(decode=True),
b'This is a Base64 encoded message.\n')
# Subpart 5 has no Content-Transfer-Encoding: header.
eq(msg.get_payload(4).get_payload(decode=True),
b'This has no Content-Transfer-Encoding: header.\n')
def test_get_decoded_uu_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_payload('begin 666 -\n+:&5L;&\\@=V]R;&0 \n \nend\n')
for cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg['content-transfer-encoding'] = cte
eq(msg.get_payload(decode=True), b'hello world')
# Now try some bogus data
msg.set_payload('foo')
eq(msg.get_payload(decode=True), b'foo')
def test_get_payload_n_raises_on_non_multipart(self):
msg = Message()
self.assertRaises(TypeError, msg.get_payload, 1)
def test_decoded_generator(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
with openfile('msg_17.txt') as fp:
text = fp.read()
s = StringIO()
g = DecodedGenerator(s)
g.flatten(msg)
eq(s.getvalue(), text)
def test__contains__(self):
msg = Message()
msg['From'] = 'Me'
msg['to'] = 'You'
# Check for case insensitivity
self.assertIn('from', msg)
self.assertIn('From', msg)
self.assertIn('FROM', msg)
self.assertIn('to', msg)
self.assertIn('To', msg)
self.assertIn('TO', msg)
def test_as_string(self):
msg = self._msgobj('msg_01.txt')
with openfile('msg_01.txt') as fp:
text = fp.read()
self.assertEqual(text, str(msg))
fullrepr = msg.as_string(unixfrom=True)
lines = fullrepr.split('\n')
self.assertTrue(lines[0].startswith('From '))
self.assertEqual(text, NL.join(lines[1:]))
def test_as_string_policy(self):
msg = self._msgobj('msg_01.txt')
newpolicy = msg.policy.clone(linesep='\r\n')
fullrepr = msg.as_string(policy=newpolicy)
s = StringIO()
g = Generator(s, policy=newpolicy)
g.flatten(msg)
self.assertEqual(fullrepr, s.getvalue())
def test_as_bytes(self):
msg = self._msgobj('msg_01.txt')
with openfile('msg_01.txt') as fp:
data = fp.read().encode('ascii')
self.assertEqual(data, bytes(msg))
fullrepr = msg.as_bytes(unixfrom=True)
lines = fullrepr.split(b'\n')
self.assertTrue(lines[0].startswith(b'From '))
self.assertEqual(data, b'\n'.join(lines[1:]))
def test_as_bytes_policy(self):
msg = self._msgobj('msg_01.txt')
newpolicy = msg.policy.clone(linesep='\r\n')
fullrepr = msg.as_bytes(policy=newpolicy)
s = BytesIO()
g = BytesGenerator(s,policy=newpolicy)
g.flatten(msg)
self.assertEqual(fullrepr, s.getvalue())
# test_headerregistry.TestContentTypeHeader.bad_params
def test_bad_param(self):
msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
self.assertEqual(msg.get_param('baz'), '')
def test_missing_filename(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_filename(), None)
def test_bogus_filename(self):
msg = email.message_from_string(
"Content-Disposition: blarg; filename\n")
self.assertEqual(msg.get_filename(), '')
def test_missing_boundary(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_boundary(), None)
def test_get_params(self):
eq = self.assertEqual
msg = email.message_from_string(
'X-Header: foo=one; bar=two; baz=three\n')
eq(msg.get_params(header='x-header'),
[('foo', 'one'), ('bar', 'two'), ('baz', 'three')])
msg = email.message_from_string(
'X-Header: foo; bar=one; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
eq(msg.get_params(), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
# test_headerregistry.TestContentTypeHeader.spaces_around_param_equals
def test_get_param_liberal(self):
msg = Message()
msg['Content-Type'] = 'Content-Type: Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"'
self.assertEqual(msg.get_param('boundary'), 'CPIMSSMTPC06p5f3tG')
def test_get_param(self):
eq = self.assertEqual
msg = email.message_from_string(
"X-Header: foo=one; bar=two; baz=three\n")
eq(msg.get_param('bar', header='x-header'), 'two')
eq(msg.get_param('quuz', header='x-header'), None)
eq(msg.get_param('quuz'), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_param('foo', header='x-header'), '')
eq(msg.get_param('bar', header='x-header'), 'one')
eq(msg.get_param('baz', header='x-header'), 'two')
# XXX: We are not RFC-2045 compliant! We cannot parse:
# msg["Content-Type"] = 'text/plain; weird="hey; dolly? [you] @ <\\"home\\">?"'
# msg.get_param("weird")
# yet.
# test_headerregistry.TestContentTypeHeader.spaces_around_semis
def test_get_param_funky_continuation_lines(self):
msg = self._msgobj('msg_22.txt')
self.assertEqual(msg.get_payload(1).get_param('name'), 'wibble.JPG')
# test_headerregistry.TestContentTypeHeader.semis_inside_quotes
def test_get_param_with_semis_in_quotes(self):
msg = email.message_from_string(
'Content-Type: image/pjpeg; name="Jim&&Jill"\n')
self.assertEqual(msg.get_param('name'), 'Jim&&Jill')
self.assertEqual(msg.get_param('name', unquote=False),
'"Jim&&Jill"')
# test_headerregistry.TestContentTypeHeader.quotes_inside_rfc2231_value
def test_get_param_with_quotes(self):
msg = email.message_from_string(
'Content-Type: foo; bar*0="baz\\"foobar"; bar*1="\\"baz"')
self.assertEqual(msg.get_param('bar'), 'baz"foobar"baz')
msg = email.message_from_string(
"Content-Type: foo; bar*0=\"baz\\\"foobar\"; bar*1=\"\\\"baz\"")
self.assertEqual(msg.get_param('bar'), 'baz"foobar"baz')
def test_field_containment(self):
msg = email.message_from_string('Header: exists')
self.assertIn('header', msg)
self.assertIn('Header', msg)
self.assertIn('HEADER', msg)
self.assertNotIn('headerx', msg)
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('charset', 'iso-2022-jp')
eq(msg.get_param('charset'), 'iso-2022-jp')
msg.set_param('importance', 'high value')
eq(msg.get_param('importance'), 'high value')
eq(msg.get_param('importance', unquote=False), '"high value"')
eq(msg.get_params(), [('text/plain', ''),
('charset', 'iso-2022-jp'),
('importance', 'high value')])
eq(msg.get_params(unquote=False), [('text/plain', ''),
('charset', '"iso-2022-jp"'),
('importance', '"high value"')])
msg.set_param('charset', 'iso-9999-xx', header='X-Jimmy')
eq(msg.get_param('charset', header='X-Jimmy'), 'iso-9999-xx')
def test_del_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_05.txt')
eq(msg.get_params(),
[('multipart/report', ''), ('report-type', 'delivery-status'),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
old_val = msg.get_param("report-type")
msg.del_param("report-type")
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
msg.set_param("report-type", old_val)
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com'),
('report-type', old_val)])
def test_del_param_on_other_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment', filename='bud.gif')
msg.del_param('filename', 'content-disposition')
self.assertEqual(msg['content-disposition'], 'attachment')
def test_del_param_on_nonexistent_header(self):
msg = Message()
# Deleting param on empty msg should not raise exception.
msg.del_param('filename', 'content-disposition')
def test_del_nonexistent_param(self):
msg = Message()
msg.add_header('Content-Type', 'text/plain', charset='utf-8')
existing_header = msg['Content-Type']
msg.del_param('foobar', header='Content-Type')
self.assertEqual(msg['Content-Type'], existing_header)
def test_set_type(self):
eq = self.assertEqual
msg = Message()
self.assertRaises(ValueError, msg.set_type, 'text')
msg.set_type('text/plain')
eq(msg['content-type'], 'text/plain')
msg.set_param('charset', 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
msg.set_type('text/html')
eq(msg['content-type'], 'text/html; charset="us-ascii"')
def test_set_type_on_other_header(self):
msg = Message()
msg['X-Content-Type'] = 'text/plain'
msg.set_type('application/octet-stream', 'X-Content-Type')
self.assertEqual(msg['x-content-type'], 'application/octet-stream')
def test_get_content_type_missing(self):
msg = Message()
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_type(), 'message/rfc822')
def test_get_content_type_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_maintype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_maintype(), 'message')
def test_get_content_maintype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_maintype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_replace_header(self):
eq = self.assertEqual
msg = Message()
msg.add_header('First', 'One')
msg.add_header('Second', 'Two')
msg.add_header('Third', 'Three')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Two', 'Three'])
msg.replace_header('Second', 'Twenty')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Twenty', 'Three'])
msg.add_header('First', 'Eleven')
msg.replace_header('First', 'One Hundred')
eq(msg.keys(), ['First', 'Second', 'Third', 'First'])
eq(msg.values(), ['One Hundred', 'Twenty', 'Three', 'Eleven'])
self.assertRaises(KeyError, msg.replace_header, 'Fourth', 'Missing')
# test_defect_handling:test_invalid_chars_in_base64_payload
def test_broken_base64_payload(self):
x = 'AwDp0P7//y6LwKEAcPa/6Q=9'
msg = Message()
msg['content-type'] = 'audio/x-midi'
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True),
(b'\x03\x00\xe9\xd0\xfe\xff\xff.\x8b\xc0'
b'\xa1\x00p\xf6\xbf\xe9\x0f'))
self.assertIsInstance(msg.defects[0],
errors.InvalidBase64CharactersDefect)
def test_broken_unicode_payload(self):
# This test improves coverage but is not a compliance test.
# The behavior in this situation is currently undefined by the API.
x = 'this is a br\xf6ken thing to do'
msg = Message()
msg['content-type'] = 'text/plain'
msg['content-transfer-encoding'] = '8bit'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True),
bytes(x, 'raw-unicode-escape'))
def test_questionable_bytes_payload(self):
# This test improves coverage but is not a compliance test,
# since it involves poking inside the black box.
x = 'this is a quéstionable thing to do'.encode('utf-8')
msg = Message()
msg['content-type'] = 'text/plain; charset="utf-8"'
msg['content-transfer-encoding'] = '8bit'
msg._payload = x
self.assertEqual(msg.get_payload(decode=True), x)
# Issue 1078919
def test_ascii_add_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename='bud.gif')
self.assertEqual('attachment; filename="bud.gif"',
msg['Content-Disposition'])
def test_noascii_add_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="Fußballer.ppt")
self.assertEqual(
'attachment; filename*=utf-8\'\'Fu%C3%9Fballer.ppt',
msg['Content-Disposition'])
def test_nonascii_add_header_via_triple(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename=('iso-8859-1', '', 'Fußballer.ppt'))
self.assertEqual(
'attachment; filename*=iso-8859-1\'\'Fu%DFballer.ppt',
msg['Content-Disposition'])
def test_ascii_add_header_with_tspecial(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="windows [filename].ppt")
self.assertEqual(
'attachment; filename="windows [filename].ppt"',
msg['Content-Disposition'])
def test_nonascii_add_header_with_tspecial(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="Fußballer [filename].ppt")
self.assertEqual(
"attachment; filename*=utf-8''Fu%C3%9Fballer%20%5Bfilename%5D.ppt",
msg['Content-Disposition'])
def test_binary_quopri_payload(self):
for charset in ('latin-1', 'ascii'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = 'quoted-printable'
msg.set_payload(b'foo=e6=96=87bar')
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
'get_payload returns wrong result with charset %s.' % charset)
def test_binary_base64_payload(self):
for charset in ('latin-1', 'ascii'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(b'Zm9v5paHYmFy')
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
'get_payload returns wrong result with charset %s.' % charset)
def test_binary_uuencode_payload(self):
for charset in ('latin-1', 'ascii'):
for encoding in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = encoding
msg.set_payload(b"begin 666 -\n)9F]OYI:'8F%R\n \nend\n")
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
str(('get_payload returns wrong result ',
'with charset {0} and encoding {1}.')).\
format(charset, encoding))
def test_add_header_with_name_only_param(self):
msg = Message()
msg.add_header('Content-Disposition', 'inline', foo_bar=None)
self.assertEqual("inline; foo-bar", msg['Content-Disposition'])
def test_add_header_with_no_value(self):
msg = Message()
msg.add_header('X-Status', None)
self.assertEqual('', msg['X-Status'])
# Issue 5871: reject an attempt to embed a header inside a header value
# (header injection attack).
def test_embeded_header_via_Header_rejected(self):
msg = Message()
msg['Dummy'] = Header('dummy\nX-Injected-Header: test')
self.assertRaises(errors.HeaderParseError, msg.as_string)
def test_embeded_header_via_string_rejected(self):
msg = Message()
msg['Dummy'] = 'dummy\nX-Injected-Header: test'
self.assertRaises(errors.HeaderParseError, msg.as_string)
def test_unicode_header_defaults_to_utf8_encoding(self):
# Issue 14291
m = MIMEText('abc\n')
m['Subject'] = 'É test'
self.assertEqual(str(m),textwrap.dedent("""\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: =?utf-8?q?=C3=89_test?=
abc
"""))
def test_unicode_body_defaults_to_utf8_encoding(self):
# Issue 14291
m = MIMEText('É testabc\n')
self.assertEqual(str(m),textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: base64
w4kgdGVzdGFiYwo=
"""))
# Test the email.encoders module
class TestEncoders(unittest.TestCase):
def test_EncodersEncode_base64(self):
with openfile('PyBanner048.gif', 'rb') as fp:
bindata = fp.read()
mimed = email.mime.image.MIMEImage(bindata)
base64ed = mimed.get_payload()
# the transfer-encoded body lines should all be <=76 characters
lines = base64ed.split('\n')
self.assertLessEqual(max([ len(x) for x in lines ]), 76)
def test_encode_empty_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
# 7bit data and the default us-ascii _charset
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
# Similar, but with 8bit data
msg = MIMEText('hello \xf8 world')
eq(msg['content-transfer-encoding'], 'base64')
# And now with a different charset
msg = MIMEText('hello \xf8 world', _charset='iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
def test_encode7or8bit(self):
# Make sure a charset whose input character set is 8bit but
# whose output character set is 7bit gets a transfer-encoding
# of 7bit.
eq = self.assertEqual
msg = MIMEText('文\n', _charset='euc-jp')
eq(msg['content-transfer-encoding'], '7bit')
eq(msg.as_string(), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/plain; charset="iso-2022-jp"
Content-Transfer-Encoding: 7bit
\x1b$BJ8\x1b(B
"""))
def test_qp_encode_latin1(self):
msg = MIMEText('\xe1\xf6\n', 'text', 'ISO-8859-1')
self.assertEqual(str(msg), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/text; charset="iso-8859-1"
Content-Transfer-Encoding: quoted-printable
=E1=F6
"""))
def test_qp_encode_non_latin1(self):
# Issue 16948
msg = MIMEText('\u017c\n', 'text', 'ISO-8859-2')
self.assertEqual(str(msg), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/text; charset="iso-8859-2"
Content-Transfer-Encoding: quoted-printable
=BF
"""))
# Test long header wrapping
class TestLongHeaders(TestEmailBase):
maxDiff = None
def test_split_long_continuation(self):
eq = self.ndiffAssertEqual
msg = email.message_from_string("""\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
def test_another_long_almost_unsplittable_header(self):
eq = self.ndiffAssertEqual
hstr = """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text""")
h = Header(hstr.replace('\t', ' '))
eq(h.encode(), """\
bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text""")
def test_long_nonstring(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = (b'Die Mieter treten hier ein werden mit einem Foerderband '
b'komfortabel den Korridor entlang, an s\xfcdl\xfcndischen '
b'Wandgem\xe4lden vorbei, gegen die rotierenden Klingen '
b'bef\xf6rdert. ')
cz_head = (b'Finan\xe8ni metropole se hroutily pod tlakem jejich '
b'd\xf9vtipu.. ')
utf8_head = ('\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das '
'Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder '
'die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066'
'\u3044\u307e\u3059\u3002')
h = Header(g_head, g, header_name='Subject')
h.append(cz_head, cz)
h.append(utf8_head, utf8)
msg = Message()
msg['Subject'] = h
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: =?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderb?=
=?iso-8859-1?q?and_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen?=
=?iso-8859-1?q?_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef?=
=?iso-8859-1?q?=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hrouti?=
=?iso-8859-2?q?ly_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC5LiA?=
=?utf-8?b?6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn44Gf44KJ?=
=?utf-8?b?44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFzIE51bnN0dWNr?=
=?utf-8?b?IGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWloZXJodW5kIGRhcyBPZGVyIGRp?=
=?utf-8?b?ZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI3jgajoqIDjgaPjgabjgYTjgb7jgZk=?=
=?utf-8?b?44CC?=
""")
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerde?=
=?iso-8859-1?q?rband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndis?=
=?iso-8859-1?q?chen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klinge?=
=?iso-8859-1?q?n_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se?=
=?iso-8859-2?q?_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb?=
=?utf-8?b?44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go?=
=?utf-8?b?44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBp?=
=?utf-8?b?c3QgZGFzIE51bnN0dWNrIGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWlo?=
=?utf-8?b?ZXJodW5kIGRhcyBPZGVyIGRpZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI0=?=
=?utf-8?b?44Go6KiA44Gj44Gm44GE44G+44GZ44CC?=""")
def test_long_header_encode(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation_is_just_a_hint(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals";\t'
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_header_encode_with_different_output_charset(self):
h = Header('文', 'euc-jp')
self.assertEqual(h.encode(), "=?iso-2022-jp?b?GyRCSjgbKEI=?=")
def test_long_header_encode_with_different_output_charset(self):
h = Header(b'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5\xa4\xec\xa4'
b'\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2\xf1\xbc\xd4'
b'\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3\xa4\xc6\xa4'
b'\xa4\xa4\xde\xa4\xb9'.decode('euc-jp'), 'euc-jp')
res = """\
=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKMnE8VCROPjUbKEI=?=
=?iso-2022-jp?b?GyRCRyckckJUJEMkRiQkJF4kORsoQg==?="""
self.assertEqual(h.encode(), res)
def test_header_splitter(self):
eq = self.ndiffAssertEqual
msg = MIMEText('')
# It'd be great if we could use add_header() here, but that doesn't
# guarantee an order of the parameters.
msg['X-Foobar-Spoink-Defrobnit'] = (
'wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), '''\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
''')
def test_no_semis_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join('<%d@dom.ain>' % i for i in range(10))
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <0@dom.ain> <1@dom.ain> <2@dom.ain> <3@dom.ain> <4@dom.ain>
<5@dom.ain> <6@dom.ain> <7@dom.ain> <8@dom.ain> <9@dom.ain>
Test""")
def test_last_split_chunk_does_not_fit(self):
eq = self.ndiffAssertEqual
h = Header('Subject: the first part of this is short, but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
Subject: the first part of this is short,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_splittable_leading_char_followed_by_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header(', but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_multiple_splittable_leading_char_followed_by_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header(', , but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
, ,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_trailing_splitable_on_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header('this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself;')
eq(h.encode(), "this_part_does_not_fit_within_maxlinelen_and_thus_should_"
"be_on_a_line_all_by_itself;")
def test_trailing_splitable_on_overlong_unsplitable_with_leading_splitable(self):
eq = self.ndiffAssertEqual
h = Header('; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_long_header_with_multiple_sequential_split_chars(self):
eq = self.ndiffAssertEqual
h = Header('This is a long line that has two whitespaces in a row. '
'This used to cause truncation of the header when folded')
eq(h.encode(), """\
This is a long line that has two whitespaces in a row. This used to cause
truncation of the header when folded""")
def test_splitter_split_on_punctuation_only_if_fws_with_header(self):
eq = self.ndiffAssertEqual
h = Header('thisverylongheaderhas;semicolons;and,commas,but'
'they;arenotlegal;fold,points')
eq(h.encode(), "thisverylongheaderhas;semicolons;and,commas,butthey;"
"arenotlegal;fold,points")
def test_leading_splittable_in_the_middle_just_before_overlong_last_part(self):
eq = self.ndiffAssertEqual
h = Header('this is a test where we need to have more than one line '
'before; our final line that is just too big to fit;; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself;')
eq(h.encode(), """\
this is a test where we need to have more than one line before;
our final line that is just too big to fit;;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself;""")
def test_overlong_last_part_followed_by_split_point(self):
eq = self.ndiffAssertEqual
h = Header('this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself ')
eq(h.encode(), "this_part_does_not_fit_within_maxlinelen_and_thus_"
"should_be_on_a_line_all_by_itself ")
def test_multiline_with_overlong_parts_separated_by_two_split_points(self):
eq = self.ndiffAssertEqual
h = Header('this_is_a__test_where_we_need_to_have_more_than_one_line_'
'before_our_final_line_; ; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
this_is_a__test_where_we_need_to_have_more_than_one_line_before_our_final_line_;
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_multiline_with_overlong_last_part_followed_by_split_point(self):
eq = self.ndiffAssertEqual
h = Header('this is a test where we need to have more than one line '
'before our final line; ; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
this is a test where we need to have more than one line before our final line;
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_long_header_with_whitespace_runs(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<foo@dom.ain> '] * 10)
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain>\x20\x20
Test""")
def test_long_run_with_semi_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<foo@dom.ain>'] * 10) + '; abc'
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain>; abc
Test""")
def test_splitter_split_on_punctuation_only_if_fws(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = ('thisverylongheaderhas;semicolons;and,commas,but'
'they;arenotlegal;fold,points')
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
# XXX the space after the header should not be there.
eq(sfp.getvalue(), """\
From: test@dom.ain
References:\x20
thisverylongheaderhas;semicolons;and,commas,butthey;arenotlegal;fold,points
Test""")
def test_no_split_long_header(self):
eq = self.ndiffAssertEqual
hstr = 'References: ' + 'x' * 80
h = Header(hstr)
# These come on two lines because Headers are really field value
# classes and don't really know about their field names.
eq(h.encode(), """\
References:
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""")
h = Header('x' * 80)
eq(h.encode(), 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
def test_splitting_multiple_long_lines(self):
eq = self.ndiffAssertEqual
hstr = """\
from babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
from babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)""")
def test_splitting_first_line_only_is_long(self):
eq = self.ndiffAssertEqual
hstr = """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93] helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400"""
h = Header(hstr, maxlinelen=78, header_name='Received',
continuation_ws='\t')
eq(h.encode(), """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93]
helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400""")
def test_long_8bit_header(self):
eq = self.ndiffAssertEqual
msg = Message()
h = Header('Britische Regierung gibt', 'iso-8859-1',
header_name='Subject')
h.append('gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte')
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offs?=
=?iso-8859-1?q?hore-Windkraftprojekte?=""")
msg['Subject'] = h
eq(msg.as_string(maxheaderlen=76), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offs?=
=?iso-8859-1?q?hore-Windkraftprojekte?=
""")
eq(msg.as_string(maxheaderlen=0), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offshore-Windkraftprojekte?=
""")
def test_long_8bit_header_no_charset(self):
eq = self.ndiffAssertEqual
msg = Message()
header_string = ('Britische Regierung gibt gr\xfcnes Licht '
'f\xfcr Offshore-Windkraftprojekte '
'<a-very-long-address@example.com>')
msg['Reply-To'] = header_string
eq(msg.as_string(maxheaderlen=78), """\
Reply-To: =?utf-8?q?Britische_Regierung_gibt_gr=C3=BCnes_Licht_f=C3=BCr_Offs?=
=?utf-8?q?hore-Windkraftprojekte_=3Ca-very-long-address=40example=2Ecom=3E?=
""")
msg = Message()
msg['Reply-To'] = Header(header_string,
header_name='Reply-To')
eq(msg.as_string(maxheaderlen=78), """\
Reply-To: =?utf-8?q?Britische_Regierung_gibt_gr=C3=BCnes_Licht_f=C3=BCr_Offs?=
=?utf-8?q?hore-Windkraftprojekte_=3Ca-very-long-address=40example=2Ecom=3E?=
""")
def test_long_to_header(self):
eq = self.ndiffAssertEqual
to = ('"Someone Test #A" <someone@eecs.umich.edu>,'
'<someone@eecs.umich.edu>, '
'"Someone Test #B" <someone@umich.edu>, '
'"Someone Test #C" <someone@eecs.umich.edu>, '
'"Someone Test #D" <someone@eecs.umich.edu>')
msg = Message()
msg['To'] = to
eq(msg.as_string(maxheaderlen=78), '''\
To: "Someone Test #A" <someone@eecs.umich.edu>,<someone@eecs.umich.edu>,
"Someone Test #B" <someone@umich.edu>,
"Someone Test #C" <someone@eecs.umich.edu>,
"Someone Test #D" <someone@eecs.umich.edu>
''')
def test_long_line_after_append(self):
eq = self.ndiffAssertEqual
s = 'This is an example of string which has almost the limit of header length.'
h = Header(s)
h.append('Add another line.')
eq(h.encode(maxlinelen=76), """\
This is an example of string which has almost the limit of header length.
Add another line.""")
def test_shorter_line_with_append(self):
eq = self.ndiffAssertEqual
s = 'This is a shorter line.'
h = Header(s)
h.append('Add another sentence. (Surprise?)')
eq(h.encode(),
'This is a shorter line. Add another sentence. (Surprise?)')
def test_long_field_name(self):
eq = self.ndiffAssertEqual
fn = 'X-Very-Very-Very-Long-Header-Name'
gs = ('Die Mieter treten hier ein werden mit einem Foerderband '
'komfortabel den Korridor entlang, an s\xfcdl\xfcndischen '
'Wandgem\xe4lden vorbei, gegen die rotierenden Klingen '
'bef\xf6rdert. ')
h = Header(gs, 'iso-8859-1', header_name=fn)
# BAW: this seems broken because the first line is too long
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_e?=
=?iso-8859-1?q?in_werden_mit_einem_Foerderband_komfortabel_den_Korridor_e?=
=?iso-8859-1?q?ntlang=2C_an_s=FCdl=FCndischen_Wandgem=E4lden_vorbei=2C_ge?=
=?iso-8859-1?q?gen_die_rotierenden_Klingen_bef=F6rdert=2E_?=""")
def test_long_received_header(self):
h = ('from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) '
'by hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP; '
'Wed, 05 Mar 2003 18:10:18 -0700')
msg = Message()
msg['Received-1'] = Header(h, continuation_ws='\t')
msg['Received-2'] = h
# This should be splitting on spaces not semicolons.
self.ndiffAssertEqual(msg.as_string(maxheaderlen=78), """\
Received-1: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
Received-2: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
""")
def test_string_headerinst_eq(self):
h = ('<15975.17901.207240.414604@sgigritzmann1.mathematik.'
'tu-muenchen.de> (David Bremner\'s message of '
'"Thu, 6 Mar 2003 13:58:21 +0100")')
msg = Message()
msg['Received-1'] = Header(h, header_name='Received-1',
continuation_ws='\t')
msg['Received-2'] = h
# XXX The space after the ':' should not be there.
self.ndiffAssertEqual(msg.as_string(maxheaderlen=78), """\
Received-1:\x20
<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David
Bremner's message of \"Thu, 6 Mar 2003 13:58:21 +0100\")
Received-2:\x20
<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David
Bremner's message of \"Thu, 6 Mar 2003 13:58:21 +0100\")
""")
def test_long_unbreakable_lines_with_continuation(self):
eq = self.ndiffAssertEqual
msg = Message()
t = """\
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp"""
msg['Face-1'] = t
msg['Face-2'] = Header(t, header_name='Face-2')
msg['Face-3'] = ' ' + t
# XXX This splitting is all wrong. It the first value line should be
# snug against the field name or the space after the header not there.
eq(msg.as_string(maxheaderlen=78), """\
Face-1:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-2:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-3:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
""")
def test_another_long_multiline_header(self):
eq = self.ndiffAssertEqual
m = ('Received: from siimage.com '
'([172.25.1.3]) by zima.siliconimage.com with '
'Microsoft SMTPSVC(5.0.2195.4905); '
'Wed, 16 Oct 2002 07:41:11 -0700')
msg = email.message_from_string(m)
eq(msg.as_string(maxheaderlen=78), '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with
Microsoft SMTPSVC(5.0.2195.4905); Wed, 16 Oct 2002 07:41:11 -0700
''')
def test_long_lines_with_different_header(self):
eq = self.ndiffAssertEqual
h = ('List-Unsubscribe: '
'<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,'
' <mailto:spamassassin-talk-request@lists.sourceforge.net'
'?subject=unsubscribe>')
msg = Message()
msg['List'] = h
msg['List'] = Header(h, header_name='List')
eq(msg.as_string(maxheaderlen=78), """\
List: List-Unsubscribe:
<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
List: List-Unsubscribe:
<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
""")
def test_long_rfc2047_header_with_embedded_fws(self):
h = Header(textwrap.dedent("""\
We're going to pretend this header is in a non-ascii character set
\tto see if line wrapping with encoded words and embedded
folding white space works"""),
charset='utf-8',
header_name='Test')
self.assertEqual(h.encode()+'\n', textwrap.dedent("""\
=?utf-8?q?We=27re_going_to_pretend_this_header_is_in_a_non-ascii_chara?=
=?utf-8?q?cter_set?=
=?utf-8?q?_to_see_if_line_wrapping_with_encoded_words_and_embedded?=
=?utf-8?q?_folding_white_space_works?=""")+'\n')
# Test mangling of "From " lines in the body of a message
class TestFromMangling(unittest.TestCase):
def setUp(self):
self.msg = Message()
self.msg['From'] = 'aaa@bbb.org'
self.msg.set_payload("""\
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangled_from(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
>From the desk of A.A.A.:
Blah blah blah
""")
def test_dont_mangle_from(self):
s = StringIO()
g = Generator(s, mangle_from_=False)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangle_from_in_preamble_and_epilog(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
msg = email.message_from_string(textwrap.dedent("""\
From: foo@bar.com
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=XXX
From somewhere unknown
--XXX
Content-Type: text/plain
foo
--XXX--
From somewhere unknowable
"""))
g.flatten(msg)
self.assertEqual(len([1 for x in s.getvalue().split('\n')
if x.startswith('>From ')]), 2)
def test_mangled_from_with_bad_bytes(self):
source = textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
From: aaa@bbb.org
""").encode('utf-8')
msg = email.message_from_bytes(source + b'From R\xc3\xb6lli\n')
b = BytesIO()
g = BytesGenerator(b, mangle_from_=True)
g.flatten(msg)
self.assertEqual(b.getvalue(), source + b'>From R\xc3\xb6lli\n')
# Test the basic MIMEAudio class
class TestMIMEAudio(unittest.TestCase):
def setUp(self):
with openfile('audiotest.au', 'rb') as fp:
self._audiodata = fp.read()
self._au = MIMEAudio(self._audiodata)
def test_guess_minor_type(self):
self.assertEqual(self._au.get_content_type(), 'audio/basic')
def test_encoding(self):
payload = self._au.get_payload()
self.assertEqual(base64.decodebytes(bytes(payload, 'ascii')),
self._audiodata)
def test_checkSetMinor(self):
au = MIMEAudio(self._audiodata, 'fish')
self.assertEqual(au.get_content_type(), 'audio/fish')
def test_add_header(self):
eq = self.assertEqual
self._au.add_header('Content-Disposition', 'attachment',
filename='audiotest.au')
eq(self._au['content-disposition'],
'attachment; filename="audiotest.au"')
eq(self._au.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'audiotest.au')])
eq(self._au.get_param('filename', header='content-disposition'),
'audiotest.au')
missing = []
eq(self._au.get_param('attachment', header='content-disposition'), '')
self.assertIs(self._au.get_param('foo', failobj=missing,
header='content-disposition'), missing)
# Try some missing stuff
self.assertIs(self._au.get_param('foobar', missing), missing)
self.assertIs(self._au.get_param('attachment', missing,
header='foobar'), missing)
# Test the basic MIMEImage class
class TestMIMEImage(unittest.TestCase):
def setUp(self):
with openfile('PyBanner048.gif', 'rb') as fp:
self._imgdata = fp.read()
self._im = MIMEImage(self._imgdata)
def test_guess_minor_type(self):
self.assertEqual(self._im.get_content_type(), 'image/gif')
def test_encoding(self):
payload = self._im.get_payload()
self.assertEqual(base64.decodebytes(bytes(payload, 'ascii')),
self._imgdata)
def test_checkSetMinor(self):
im = MIMEImage(self._imgdata, 'fish')
self.assertEqual(im.get_content_type(), 'image/fish')
def test_add_header(self):
eq = self.assertEqual
self._im.add_header('Content-Disposition', 'attachment',
filename='dingusfish.gif')
eq(self._im['content-disposition'],
'attachment; filename="dingusfish.gif"')
eq(self._im.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'dingusfish.gif')])
eq(self._im.get_param('filename', header='content-disposition'),
'dingusfish.gif')
missing = []
eq(self._im.get_param('attachment', header='content-disposition'), '')
self.assertIs(self._im.get_param('foo', failobj=missing,
header='content-disposition'), missing)
# Try some missing stuff
self.assertIs(self._im.get_param('foobar', missing), missing)
self.assertIs(self._im.get_param('attachment', missing,
header='foobar'), missing)
# Test the basic MIMEApplication class
class TestMIMEApplication(unittest.TestCase):
def test_headers(self):
eq = self.assertEqual
msg = MIMEApplication(b'\xfa\xfb\xfc\xfd\xfe\xff')
eq(msg.get_content_type(), 'application/octet-stream')
eq(msg['content-transfer-encoding'], 'base64')
def test_body(self):
eq = self.assertEqual
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata)
# whitespace in the cte encoded block is RFC-irrelevant.
eq(msg.get_payload().strip(), '+vv8/f7/')
eq(msg.get_payload(decode=True), bytesdata)
def test_binary_body_with_encode_7or8bit(self):
# Issue 17171.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_7or8bit)
# Treated as a string, this will be invalid code points.
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg.get_payload(decode=True), bytesdata)
self.assertEqual(msg['Content-Transfer-Encoding'], '8bit')
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
self.assertEqual(msg2['Content-Transfer-Encoding'], '8bit')
def test_binary_body_with_encode_noop(self):
# Issue 16564: This does not produce an RFC valid message, since to be
# valid it should have a CTE of binary. But the below works in
# Python2, and is documented as working this way.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_noop)
# Treated as a string, this will be invalid code points.
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg.get_payload(decode=True), bytesdata)
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
def test_binary_body_with_encode_quopri(self):
# Issue 14360.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff '
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_quopri)
self.assertEqual(msg.get_payload(), '=FA=FB=FC=FD=FE=FF=20')
self.assertEqual(msg.get_payload(decode=True), bytesdata)
self.assertEqual(msg['Content-Transfer-Encoding'], 'quoted-printable')
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '=FA=FB=FC=FD=FE=FF=20')
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
self.assertEqual(msg2['Content-Transfer-Encoding'], 'quoted-printable')
def test_binary_body_with_encode_base64(self):
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_base64)
self.assertEqual(msg.get_payload(), '+vv8/f7/\n')
self.assertEqual(msg.get_payload(decode=True), bytesdata)
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '+vv8/f7/\n')
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
# Test the basic MIMEText class
class TestMIMEText(unittest.TestCase):
def setUp(self):
self._msg = MIMEText('hello there')
def test_types(self):
eq = self.assertEqual
eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
self.assertIs(self._msg.get_param('foobar', missing), missing)
self.assertIs(self._msg.get_param('charset', missing, header='foobar'),
missing)
def test_payload(self):
self.assertEqual(self._msg.get_payload(), 'hello there')
self.assertFalse(self._msg.is_multipart())
def test_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_7bit_input(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_7bit_input_no_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there')
eq(msg.get_charset(), 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
self.assertIn('hello there', msg.as_string())
def test_utf8_input(self):
teststr = '\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
eq = self.assertEqual
msg = MIMEText(teststr, _charset='utf-8')
eq(msg.get_charset().output_charset, 'utf-8')
eq(msg['content-type'], 'text/plain; charset="utf-8"')
eq(msg.get_payload(decode=True), teststr.encode('utf-8'))
@unittest.skip("can't fix because of backward compat in email5, "
"will fix in email6")
def test_utf8_input_no_charset(self):
teststr = '\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
self.assertRaises(UnicodeEncodeError, MIMEText, teststr)
# Test complicated multipart/* messages
class TestMultipart(TestEmailBase):
def setUp(self):
with openfile('PyBanner048.gif', 'rb') as fp:
data = fp.read()
container = MIMEBase('multipart', 'mixed', boundary='BOUNDARY')
image = MIMEImage(data, name='dingusfish.gif')
image.add_header('content-disposition', 'attachment',
filename='dingusfish.gif')
intro = MIMEText('''\
Hi there,
This is the dingus fish.
''')
container.attach(intro)
container.attach(image)
container['From'] = 'Barry <barry@digicool.com>'
container['To'] = 'Dingus Lovers <cravindogs@cravindogs.com>'
container['Subject'] = 'Here is your dingus fish'
now = 987809702.54848599
timetuple = time.localtime(now)
if timetuple[-1] == 0:
tzsecs = time.timezone
else:
tzsecs = time.altzone
if tzsecs > 0:
sign = '-'
else:
sign = '+'
tzoffset = ' %s%04d' % (sign, tzsecs / 36)
container['Date'] = time.strftime(
'%a, %d %b %Y %H:%M:%S',
time.localtime(now)) + tzoffset
self._msg = container
self._im = image
self._txt = intro
def test_hierarchy(self):
# convenience
eq = self.assertEqual
raises = self.assertRaises
# tests
m = self._msg
self.assertTrue(m.is_multipart())
eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
m1 = m.get_payload(1)
self.assertIs(m0, self._txt)
self.assertIs(m1, self._im)
eq(m.get_payload(), [m0, m1])
self.assertFalse(m0.is_multipart())
self.assertFalse(m1.is_multipart())
def test_empty_multipart_idempotent(self):
text = """\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
"""
msg = Parser().parsestr(text)
self.ndiffAssertEqual(text, msg.as_string())
def test_no_parts_in_a_multipart_with_none_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_no_parts_in_a_multipart_with_empty_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
outer.epilogue = ''
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_one_part_in_a_multipart(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
msg = MIMEText('hello world')
outer.attach(msg)
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_empty_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_none_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_none_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_nl_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = '\n'
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_message_external_body(self):
eq = self.assertEqual
msg = self._msgobj('msg_36.txt')
eq(len(msg.get_payload()), 2)
msg1 = msg.get_payload(1)
eq(msg1.get_content_type(), 'multipart/alternative')
eq(len(msg1.get_payload()), 2)
for subpart in msg1.get_payload():
eq(subpart.get_content_type(), 'message/external-body')
eq(len(subpart.get_payload()), 1)
subsubpart = subpart.get_payload(0)
eq(subsubpart.get_content_type(), 'text/plain')
def test_double_boundary(self):
# msg_37.txt is a multipart that contains two dash-boundary's in a
# row. Our interpretation of RFC 2046 calls for ignoring the second
# and subsequent boundaries.
msg = self._msgobj('msg_37.txt')
self.assertEqual(len(msg.get_payload()), 3)
def test_nested_inner_contains_outer_boundary(self):
eq = self.ndiffAssertEqual
# msg_38.txt has an inner part that contains outer boundaries. My
# interpretation of RFC 2046 (based on sections 5.1 and 5.1.2) say
# these are illegal and should be interpreted as unterminated inner
# parts.
msg = self._msgobj('msg_38.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
text/plain
text/plain
text/plain
text/plain
""")
def test_nested_with_same_boundary(self):
eq = self.ndiffAssertEqual
# msg 39.txt is similarly evil in that it's got inner parts that use
# the same boundary as outer parts. Again, I believe the way this is
# parsed is closest to the spirit of RFC 2046
msg = self._msgobj('msg_39.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
application/octet-stream
application/octet-stream
text/plain
""")
def test_boundary_in_non_multipart(self):
msg = self._msgobj('msg_40.txt')
self.assertEqual(msg.as_string(), '''\
MIME-Version: 1.0
Content-Type: text/html; boundary="--961284236552522269"
----961284236552522269
Content-Type: text/html;
Content-Transfer-Encoding: 7Bit
<html></html>
----961284236552522269--
''')
def test_boundary_with_leading_space(self):
eq = self.assertEqual
msg = email.message_from_string('''\
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=" XXXX"
-- XXXX
Content-Type: text/plain
-- XXXX
Content-Type: text/plain
-- XXXX--
''')
self.assertTrue(msg.is_multipart())
eq(msg.get_boundary(), ' XXXX')
eq(len(msg.get_payload()), 2)
def test_boundary_without_trailing_newline(self):
m = Parser().parsestr("""\
Content-Type: multipart/mixed; boundary="===============0012394164=="
MIME-Version: 1.0
--===============0012394164==
Content-Type: image/file1.jpg
MIME-Version: 1.0
Content-Transfer-Encoding: base64
YXNkZg==
--===============0012394164==--""")
self.assertEqual(m.get_payload(0).get_payload(), 'YXNkZg==')
# Test some badly formatted messages
class TestNonConformant(TestEmailBase):
def test_parse_missing_minor_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_14.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
# test_defect_handling
def test_same_boundary_inner_outer(self):
msg = self._msgobj('msg_15.txt')
# XXX We can probably eventually do better
inner = msg.get_payload(0)
self.assertTrue(hasattr(inner, 'defects'))
self.assertEqual(len(inner.defects), 1)
self.assertIsInstance(inner.defects[0],
errors.StartBoundaryNotFoundDefect)
# test_defect_handling
def test_multipart_no_boundary(self):
msg = self._msgobj('msg_25.txt')
self.assertIsInstance(msg.get_payload(), str)
self.assertEqual(len(msg.defects), 2)
self.assertIsInstance(msg.defects[0],
errors.NoBoundaryInMultipartDefect)
self.assertIsInstance(msg.defects[1],
errors.MultipartInvariantViolationDefect)
multipart_msg = textwrap.dedent("""\
Date: Wed, 14 Nov 2007 12:56:23 GMT
From: foo@bar.invalid
To: foo@bar.invalid
Subject: Content-Transfer-Encoding: base64 and multipart
MIME-Version: 1.0
Content-Type: multipart/mixed;
boundary="===============3344438784458119861=="{}
--===============3344438784458119861==
Content-Type: text/plain
Test message
--===============3344438784458119861==
Content-Type: application/octet-stream
Content-Transfer-Encoding: base64
YWJj
--===============3344438784458119861==--
""")
# test_defect_handling
def test_multipart_invalid_cte(self):
msg = self._str_msg(
self.multipart_msg.format("\nContent-Transfer-Encoding: base64"))
self.assertEqual(len(msg.defects), 1)
self.assertIsInstance(msg.defects[0],
errors.InvalidMultipartContentTransferEncodingDefect)
# test_defect_handling
def test_multipart_no_cte_no_defect(self):
msg = self._str_msg(self.multipart_msg.format(''))
self.assertEqual(len(msg.defects), 0)
# test_defect_handling
def test_multipart_valid_cte_no_defect(self):
for cte in ('7bit', '8bit', 'BINary'):
msg = self._str_msg(
self.multipart_msg.format(
"\nContent-Transfer-Encoding: {}".format(cte)))
self.assertEqual(len(msg.defects), 0)
# test_headerregistry.TestContentTyopeHeader invalid_1 and invalid_2.
def test_invalid_content_type(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = Message()
# RFC 2045, $5.2 says invalid yields text/plain
msg['Content-Type'] = 'text'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Clear the old value and try something /really/ invalid
del msg['content-type']
msg['Content-Type'] = 'foo'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Still, make sure that the message is idempotently generated
s = StringIO()
g = Generator(s)
g.flatten(msg)
neq(s.getvalue(), 'Content-Type: foo\n\n')
def test_no_start_boundary(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_31.txt')
eq(msg.get_payload(), """\
--BOUNDARY
Content-Type: text/plain
message 1
--BOUNDARY
Content-Type: text/plain
message 2
--BOUNDARY--
""")
def test_no_separating_blank_line(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_35.txt')
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: here's something interesting
counter to RFC 2822, there's no separating newline here
""")
# test_defect_handling
def test_lying_multipart(self):
msg = self._msgobj('msg_41.txt')
self.assertTrue(hasattr(msg, 'defects'))
self.assertEqual(len(msg.defects), 2)
self.assertIsInstance(msg.defects[0],
errors.NoBoundaryInMultipartDefect)
self.assertIsInstance(msg.defects[1],
errors.MultipartInvariantViolationDefect)
# test_defect_handling
def test_missing_start_boundary(self):
outer = self._msgobj('msg_42.txt')
# The message structure is:
#
# multipart/mixed
# text/plain
# message/rfc822
# multipart/mixed [*]
#
# [*] This message is missing its start boundary
bad = outer.get_payload(1).get_payload(0)
self.assertEqual(len(bad.defects), 1)
self.assertIsInstance(bad.defects[0],
errors.StartBoundaryNotFoundDefect)
# test_defect_handling
def test_first_line_is_continuation_header(self):
eq = self.assertEqual
m = ' Line 1\nSubject: test\n\nbody'
msg = email.message_from_string(m)
eq(msg.keys(), ['Subject'])
eq(msg.get_payload(), 'body')
eq(len(msg.defects), 1)
self.assertDefectsEqual(msg.defects,
[errors.FirstHeaderLineIsContinuationDefect])
eq(msg.defects[0].line, ' Line 1\n')
# test_defect_handling
def test_missing_header_body_separator(self):
# Our heuristic if we see a line that doesn't look like a header (no
# leading whitespace but no ':') is to assume that the blank line that
# separates the header from the body is missing, and to stop parsing
# headers and start parsing the body.
msg = self._str_msg('Subject: test\nnot a header\nTo: abc\n\nb\n')
self.assertEqual(msg.keys(), ['Subject'])
self.assertEqual(msg.get_payload(), 'not a header\nTo: abc\n\nb\n')
self.assertDefectsEqual(msg.defects,
[errors.MissingHeaderBodySeparatorDefect])
# Test RFC 2047 header encoding and decoding
class TestRFC2047(TestEmailBase):
def test_rfc2047_multiline(self):
eq = self.assertEqual
s = """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz
foo bar =?mac-iceland?q?r=8Aksm=9Arg=8Cs?="""
dh = decode_header(s)
eq(dh, [
(b'Re: ', None),
(b'r\x8aksm\x9arg\x8cs', 'mac-iceland'),
(b' baz foo bar ', None),
(b'r\x8aksm\x9arg\x8cs', 'mac-iceland')])
header = make_header(dh)
eq(str(header),
'Re: r\xe4ksm\xf6rg\xe5s baz foo bar r\xe4ksm\xf6rg\xe5s')
self.ndiffAssertEqual(header.encode(maxlinelen=76), """\
Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar =?mac-iceland?q?r=8Aksm?=
=?mac-iceland?q?=9Arg=8Cs?=""")
def test_whitespace_keeper_unicode(self):
eq = self.assertEqual
s = '=?ISO-8859-1?Q?Andr=E9?= Pirard <pirard@dom.ain>'
dh = decode_header(s)
eq(dh, [(b'Andr\xe9', 'iso-8859-1'),
(b' Pirard <pirard@dom.ain>', None)])
header = str(make_header(dh))
eq(header, 'Andr\xe9 Pirard <pirard@dom.ain>')
def test_whitespace_keeper_unicode_2(self):
eq = self.assertEqual
s = 'The =?iso-8859-1?b?cXVpY2sgYnJvd24gZm94?= jumped over the =?iso-8859-1?b?bGF6eSBkb2c=?='
dh = decode_header(s)
eq(dh, [(b'The ', None), (b'quick brown fox', 'iso-8859-1'),
(b' jumped over the ', None), (b'lazy dog', 'iso-8859-1')])
hu = str(make_header(dh))
eq(hu, 'The quick brown fox jumped over the lazy dog')
def test_rfc2047_missing_whitespace(self):
s = 'Sm=?ISO-8859-1?B?9g==?=rg=?ISO-8859-1?B?5Q==?=sbord'
dh = decode_header(s)
self.assertEqual(dh, [(b'Sm', None), (b'\xf6', 'iso-8859-1'),
(b'rg', None), (b'\xe5', 'iso-8859-1'),
(b'sbord', None)])
def test_rfc2047_with_whitespace(self):
s = 'Sm =?ISO-8859-1?B?9g==?= rg =?ISO-8859-1?B?5Q==?= sbord'
dh = decode_header(s)
self.assertEqual(dh, [(b'Sm ', None), (b'\xf6', 'iso-8859-1'),
(b' rg ', None), (b'\xe5', 'iso-8859-1'),
(b' sbord', None)])
def test_rfc2047_B_bad_padding(self):
s = '=?iso-8859-1?B?%s?='
data = [ # only test complete bytes
('dm==', b'v'), ('dm=', b'v'), ('dm', b'v'),
('dmk=', b'vi'), ('dmk', b'vi')
]
for q, a in data:
dh = decode_header(s % q)
self.assertEqual(dh, [(a, 'iso-8859-1')])
def test_rfc2047_Q_invalid_digits(self):
# issue 10004.
s = '=?iso-8659-1?Q?andr=e9=zz?='
self.assertEqual(decode_header(s),
[(b'andr\xe9=zz', 'iso-8659-1')])
def test_rfc2047_rfc2047_1(self):
# 1st testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_2(self):
# 2nd testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= b)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b' b)', None)])
def test_rfc2047_rfc2047_3(self):
# 3rd testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_4(self):
# 4th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_5a(self):
# 5th testcase at end of rfc2047 newline is \r\n
s = '(=?ISO-8859-1?Q?a?=\r\n =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_5b(self):
# 5th testcase at end of rfc2047 newline is \n
s = '(=?ISO-8859-1?Q?a?=\n =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_6(self):
# 6th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a_b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a b', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_7(self):
# 7th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-2?Q?_b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b' b', 'iso-8859-2'),
(b')', None)])
self.assertEqual(make_header(decode_header(s)).encode(), s.lower())
self.assertEqual(str(make_header(decode_header(s))), '(a b)')
def test_multiline_header(self):
s = '=?windows-1252?q?=22M=FCller_T=22?=\r\n <T.Mueller@xxx.com>'
self.assertEqual(decode_header(s),
[(b'"M\xfcller T"', 'windows-1252'),
(b'<T.Mueller@xxx.com>', None)])
self.assertEqual(make_header(decode_header(s)).encode(),
''.join(s.splitlines()))
self.assertEqual(str(make_header(decode_header(s))),
'"Müller T" <T.Mueller@xxx.com>')
# Test the MIMEMessage class
class TestMIMEMessage(TestEmailBase):
def setUp(self):
with openfile('msg_11.txt') as fp:
self._text = fp.read()
def test_type_error(self):
self.assertRaises(TypeError, MIMEMessage, 'a plain string')
def test_valid_argument(self):
eq = self.assertEqual
subject = 'A sub-message'
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
subpart = payload[0]
self.assertIs(subpart, m)
eq(subpart['subject'], subject)
def test_bad_multipart(self):
msg1 = Message()
msg1['Subject'] = 'subpart 1'
msg2 = Message()
msg2['Subject'] = 'subpart 2'
r = MIMEMessage(msg1)
self.assertRaises(errors.MultipartConversionError, r.attach, msg2)
def test_generate(self):
# First craft the message to be encapsulated
m = Message()
m['Subject'] = 'An enclosed message'
m.set_payload('Here is the body of the message.\n')
r = MIMEMessage(m)
r['Subject'] = 'The enclosing message'
s = StringIO()
g = Generator(s)
g.flatten(r)
self.assertEqual(s.getvalue(), """\
Content-Type: message/rfc822
MIME-Version: 1.0
Subject: The enclosing message
Subject: An enclosed message
Here is the body of the message.
""")
def test_parse_message_rfc822(self):
eq = self.assertEqual
msg = self._msgobj('msg_11.txt')
eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
submsg = payload[0]
self.assertIsInstance(submsg, Message)
eq(submsg['subject'], 'An enclosed message')
eq(submsg.get_payload(), 'Here is the body of the message.\n')
def test_dsn(self):
eq = self.assertEqual
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
eq(msg.get_content_type(), 'multipart/report')
self.assertTrue(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
eq(subpart.get_content_type(), 'text/plain')
eq(subpart.get_payload(), """\
This report relates to a message you sent with the following header fields:
Message-id: <002001c144a6$8752e060$56104586@oxy.edu>
Date: Sun, 23 Sep 2001 20:10:55 -0700
From: "Ian T. Henry" <henryi@oxy.edu>
To: SoCal Raves <scr@socal-raves.org>
Subject: [scr] yeah for Ians!!
Your message cannot be delivered to the following recipients:
Recipient address: jangel1@cougar.noc.ucla.edu
Reason: recipient reached disk quota
""")
# Subpart 2 contains the machine parsable DSN information. It
# consists of two blocks of headers, represented by two nested Message
# objects.
subpart = msg.get_payload(1)
eq(subpart.get_content_type(), 'message/delivery-status')
eq(len(subpart.get_payload()), 2)
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
dsn1 = subpart.get_payload(0)
self.assertIsInstance(dsn1, Message)
eq(dsn1['original-envelope-id'], '0GK500B4HD0888@cougar.noc.ucla.edu')
eq(dsn1.get_param('dns', header='reporting-mta'), '')
# Try a missing one <wink>
eq(dsn1.get_param('nsd', header='reporting-mta'), None)
dsn2 = subpart.get_payload(1)
self.assertIsInstance(dsn2, Message)
eq(dsn2['action'], 'failed')
eq(dsn2.get_params(header='original-recipient'),
[('rfc822', ''), ('jangel1@cougar.noc.ucla.edu', '')])
eq(dsn2.get_param('rfc822', header='final-recipient'), '')
# Subpart 3 is the original message
subpart = msg.get_payload(2)
eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
subsubpart = payload[0]
self.assertIsInstance(subsubpart, Message)
eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<002001c144a6$8752e060$56104586@oxy.edu>')
def test_epilogue(self):
eq = self.ndiffAssertEqual
with openfile('msg_21.txt') as fp:
text = fp.read()
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = 'End of MIME message\n'
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), text)
def test_no_nl_preamble(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = ''
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: Test
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME message
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
One
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Two
--BOUNDARY--
""")
def test_default_type(self):
eq = self.assertEqual
with openfile('msg_30.txt') as fp:
msg = email.message_from_file(fp)
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_with_explicit_container_type(self):
eq = self.assertEqual
with openfile('msg_28.txt') as fp:
msg = email.message_from_file(fp)
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_non_parsed(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# Set up container
container = MIMEMultipart('digest', 'BOUNDARY')
container.epilogue = ''
# Set up subparts
subpart1a = MIMEText('message 1\n')
subpart2a = MIMEText('message 2\n')
subpart1 = MIMEMessage(subpart1a)
subpart2 = MIMEMessage(subpart2a)
container.attach(subpart1)
container.attach(subpart2)
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
del subpart1['content-type']
del subpart1['mime-version']
del subpart2['content-type']
del subpart2['mime-version']
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
def test_mime_attachments_in_constructor(self):
eq = self.assertEqual
text1 = MIMEText('')
text2 = MIMEText('')
msg = MIMEMultipart(_subparts=(text1, text2))
eq(len(msg.get_payload()), 2)
eq(msg.get_payload(0), text1)
eq(msg.get_payload(1), text2)
def test_default_multipart_constructor(self):
msg = MIMEMultipart()
self.assertTrue(msg.is_multipart())
# A general test of parser->model->generator idempotency. IOW, read a message
# in, parse it into a message object tree, then without touching the tree,
# regenerate the plain text. The original text and the transformed text
# should be identical. Note: that we ignore the Unix-From since that may
# contain a changed date.
class TestIdempotent(TestEmailBase):
linesep = '\n'
def _msgobj(self, filename):
with openfile(filename) as fp:
data = fp.read()
msg = email.message_from_string(data)
return msg, data
def _idempotent(self, msg, text, unixfrom=False):
eq = self.ndiffAssertEqual
s = StringIO()
g = Generator(s, maxheaderlen=0)
g.flatten(msg, unixfrom=unixfrom)
eq(text, s.getvalue())
def test_parse_text_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_01.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_params()[1], ('charset', 'us-ascii'))
eq(msg.get_param('charset'), 'us-ascii')
eq(msg.preamble, None)
eq(msg.epilogue, None)
self._idempotent(msg, text)
def test_parse_untyped_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_03.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_params(), None)
eq(msg.get_param('charset'), None)
self._idempotent(msg, text)
def test_simple_multipart(self):
msg, text = self._msgobj('msg_04.txt')
self._idempotent(msg, text)
def test_MIME_digest(self):
msg, text = self._msgobj('msg_02.txt')
self._idempotent(msg, text)
def test_long_header(self):
msg, text = self._msgobj('msg_27.txt')
self._idempotent(msg, text)
def test_MIME_digest_with_part_headers(self):
msg, text = self._msgobj('msg_28.txt')
self._idempotent(msg, text)
def test_mixed_with_image(self):
msg, text = self._msgobj('msg_06.txt')
self._idempotent(msg, text)
def test_multipart_report(self):
msg, text = self._msgobj('msg_05.txt')
self._idempotent(msg, text)
def test_dsn(self):
msg, text = self._msgobj('msg_16.txt')
self._idempotent(msg, text)
def test_preamble_epilogue(self):
msg, text = self._msgobj('msg_21.txt')
self._idempotent(msg, text)
def test_multipart_one_part(self):
msg, text = self._msgobj('msg_23.txt')
self._idempotent(msg, text)
def test_multipart_no_parts(self):
msg, text = self._msgobj('msg_24.txt')
self._idempotent(msg, text)
def test_no_start_boundary(self):
msg, text = self._msgobj('msg_31.txt')
self._idempotent(msg, text)
def test_rfc2231_charset(self):
msg, text = self._msgobj('msg_32.txt')
self._idempotent(msg, text)
def test_more_rfc2231_parameters(self):
msg, text = self._msgobj('msg_33.txt')
self._idempotent(msg, text)
def test_text_plain_in_a_multipart_digest(self):
msg, text = self._msgobj('msg_34.txt')
self._idempotent(msg, text)
def test_nested_multipart_mixeds(self):
msg, text = self._msgobj('msg_12a.txt')
self._idempotent(msg, text)
def test_message_external_body_idempotent(self):
msg, text = self._msgobj('msg_36.txt')
self._idempotent(msg, text)
def test_message_delivery_status(self):
msg, text = self._msgobj('msg_43.txt')
self._idempotent(msg, text, unixfrom=True)
def test_message_signed_idempotent(self):
msg, text = self._msgobj('msg_45.txt')
self._idempotent(msg, text)
def test_content_type(self):
eq = self.assertEqual
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
eq(msg.get_content_type(), 'multipart/report')
# Test the Content-Type: parameters
params = {}
for pk, pv in msg.get_params():
params[pk] = pv
eq(params['report-type'], 'delivery-status')
eq(params['boundary'], 'D1690A7AC1.996856090/mail.example.com')
eq(msg.preamble, 'This is a MIME-encapsulated message.' + self.linesep)
eq(msg.epilogue, self.linesep)
eq(len(msg.get_payload()), 3)
# Make sure the subparts are what we expect
msg1 = msg.get_payload(0)
eq(msg1.get_content_type(), 'text/plain')
eq(msg1.get_payload(), 'Yadda yadda yadda' + self.linesep)
msg2 = msg.get_payload(1)
eq(msg2.get_content_type(), 'text/plain')
eq(msg2.get_payload(), 'Yadda yadda yadda' + self.linesep)
msg3 = msg.get_payload(2)
eq(msg3.get_content_type(), 'message/rfc822')
self.assertIsInstance(msg3, Message)
payload = msg3.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg4 = payload[0]
self.assertIsInstance(msg4, Message)
eq(msg4.get_payload(), 'Yadda yadda yadda' + self.linesep)
def test_parser(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg1 = payload[0]
self.assertIsInstance(msg1, Message)
eq(msg1.get_content_type(), 'text/plain')
self.assertIsInstance(msg1.get_payload(), str)
eq(msg1.get_payload(), self.linesep)
# Test various other bits of the package's functionality
class TestMiscellaneous(TestEmailBase):
def test_message_from_string(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
msg = email.message_from_string(text)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_file(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
fp.seek(0)
msg = email.message_from_file(fp)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_string_with_class(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
# Create a subclass
class MyMessage(Message):
pass
msg = email.message_from_string(text, MyMessage)
self.assertIsInstance(msg, MyMessage)
# Try something more complicated
with openfile('msg_02.txt') as fp:
text = fp.read()
msg = email.message_from_string(text, MyMessage)
for subpart in msg.walk():
self.assertIsInstance(subpart, MyMessage)
def test_message_from_file_with_class(self):
# Create a subclass
class MyMessage(Message):
pass
with openfile('msg_01.txt') as fp:
msg = email.message_from_file(fp, MyMessage)
self.assertIsInstance(msg, MyMessage)
# Try something more complicated
with openfile('msg_02.txt') as fp:
msg = email.message_from_file(fp, MyMessage)
for subpart in msg.walk():
self.assertIsInstance(subpart, MyMessage)
def test_custom_message_does_not_require_arguments(self):
class MyMessage(Message):
def __init__(self):
super().__init__()
msg = self._str_msg("Subject: test\n\ntest", MyMessage)
self.assertIsInstance(msg, MyMessage)
def test__all__(self):
module = __import__('email')
self.assertEqual(sorted(module.__all__), [
'base64mime', 'charset', 'encoders', 'errors', 'feedparser',
'generator', 'header', 'iterators', 'message',
'message_from_binary_file', 'message_from_bytes',
'message_from_file', 'message_from_string', 'mime', 'parser',
'quoprimime', 'utils',
])
def test_formatdate(self):
now = time.time()
self.assertEqual(utils.parsedate(utils.formatdate(now))[:6],
time.gmtime(now)[:6])
def test_formatdate_localtime(self):
now = time.time()
self.assertEqual(
utils.parsedate(utils.formatdate(now, localtime=True))[:6],
time.localtime(now)[:6])
def test_formatdate_usegmt(self):
now = time.time()
self.assertEqual(
utils.formatdate(now, localtime=False),
time.strftime('%a, %d %b %Y %H:%M:%S -0000', time.gmtime(now)))
self.assertEqual(
utils.formatdate(now, localtime=False, usegmt=True),
time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(now)))
# parsedate and parsedate_tz will become deprecated interfaces someday
def test_parsedate_returns_None_for_invalid_strings(self):
self.assertIsNone(utils.parsedate(''))
self.assertIsNone(utils.parsedate_tz(''))
self.assertIsNone(utils.parsedate('0'))
self.assertIsNone(utils.parsedate_tz('0'))
self.assertIsNone(utils.parsedate('A Complete Waste of Time'))
self.assertIsNone(utils.parsedate_tz('A Complete Waste of Time'))
# Not a part of the spec but, but this has historically worked:
self.assertIsNone(utils.parsedate(None))
self.assertIsNone(utils.parsedate_tz(None))
def test_parsedate_compact(self):
# The FWS after the comma is optional
self.assertEqual(utils.parsedate('Wed,3 Apr 2002 14:58:26 +0800'),
utils.parsedate('Wed, 3 Apr 2002 14:58:26 +0800'))
def test_parsedate_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
(2003, 2, 25, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_compact_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_no_space_before_positive_offset(self):
self.assertEqual(utils.parsedate_tz('Wed, 3 Apr 2002 14:58:26+0800'),
(2002, 4, 3, 14, 58, 26, 0, 1, -1, 28800))
def test_parsedate_no_space_before_negative_offset(self):
# Issue 1155362: we already handled '+' for this case.
self.assertEqual(utils.parsedate_tz('Wed, 3 Apr 2002 14:58:26-0800'),
(2002, 4, 3, 14, 58, 26, 0, 1, -1, -28800))
def test_parsedate_accepts_time_with_dots(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13.47.26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
eq(utils.parsedate_tz('5 Feb 2003 13.47 -0800'),
(2003, 2, 5, 13, 47, 0, 0, 1, -1, -28800))
def test_parsedate_acceptable_to_time_functions(self):
eq = self.assertEqual
timetup = utils.parsedate('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup)), 2003)
timetup = utils.parsedate_tz('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup[:9]))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup[:9])), 2003)
def test_mktime_tz(self):
self.assertEqual(utils.mktime_tz((1970, 1, 1, 0, 0, 0,
-1, -1, -1, 0)), 0)
self.assertEqual(utils.mktime_tz((1970, 1, 1, 0, 0, 0,
-1, -1, -1, 1234)), -1234)
def test_parsedate_y2k(self):
"""Test for parsing a date with a two-digit year.
Parsing a date with a two-digit year should return the correct
four-digit year. RFC822 allows two-digit years, but RFC2822 (which
obsoletes RFC822) requires four-digit years.
"""
self.assertEqual(utils.parsedate_tz('25 Feb 03 13:47:26 -0800'),
utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'))
self.assertEqual(utils.parsedate_tz('25 Feb 71 13:47:26 -0800'),
utils.parsedate_tz('25 Feb 1971 13:47:26 -0800'))
def test_parseaddr_empty(self):
self.assertEqual(utils.parseaddr('<>'), ('', ''))
self.assertEqual(utils.formataddr(utils.parseaddr('<>')), '')
def test_noquote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly Person', 'person@dom.ain')),
'A Silly Person <person@dom.ain>')
def test_escape_dump(self):
self.assertEqual(
utils.formataddr(('A (Very) Silly Person', 'person@dom.ain')),
r'"A (Very) Silly Person" <person@dom.ain>')
self.assertEqual(
utils.parseaddr(r'"A \(Very\) Silly Person" <person@dom.ain>'),
('A (Very) Silly Person', 'person@dom.ain'))
a = r'A \(Special\) Person'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_escape_backslashes(self):
self.assertEqual(
utils.formataddr(('Arthur \Backslash\ Foobar', 'person@dom.ain')),
r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>')
a = r'Arthur \Backslash\ Foobar'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_quotes_unicode_names(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
utf8_base64 = "=?utf-8?b?SMOkbnMgV8O8cnN0?= <person@dom.ain>"
latin1_quopri = "=?iso-8859-1?q?H=E4ns_W=FCrst?= <person@dom.ain>"
self.assertEqual(utils.formataddr((name, addr)), utf8_base64)
self.assertEqual(utils.formataddr((name, addr), 'iso-8859-1'),
latin1_quopri)
def test_accepts_any_charset_like_object(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
utf8_base64 = "=?utf-8?b?SMOkbnMgV8O8cnN0?= <person@dom.ain>"
foobar = "FOOBAR"
class CharsetMock:
def header_encode(self, string):
return foobar
mock = CharsetMock()
mock_expected = "%s <%s>" % (foobar, addr)
self.assertEqual(utils.formataddr((name, addr), mock), mock_expected)
self.assertEqual(utils.formataddr((name, addr), Charset('utf-8')),
utf8_base64)
def test_invalid_charset_like_object_raises_error(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
# A object without a header_encode method:
bad_charset = object()
self.assertRaises(AttributeError, utils.formataddr, (name, addr),
bad_charset)
def test_unicode_address_raises_error(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
addr = 'pers\u00f6n@dom.in'
self.assertRaises(UnicodeError, utils.formataddr, (None, addr))
self.assertRaises(UnicodeError, utils.formataddr, ("Name", addr))
def test_name_with_dot(self):
x = 'John X. Doe <jxd@example.com>'
y = '"John X. Doe" <jxd@example.com>'
a, b = ('John X. Doe', 'jxd@example.com')
self.assertEqual(utils.parseaddr(x), (a, b))
self.assertEqual(utils.parseaddr(y), (a, b))
# formataddr() quotes the name if there's a dot in it
self.assertEqual(utils.formataddr((a, b)), y)
def test_parseaddr_preserves_quoted_pairs_in_addresses(self):
# issue 10005. Note that in the third test the second pair of
# backslashes is not actually a quoted pair because it is not inside a
# comment or quoted string: the address being parsed has a quoted
# string containing a quoted backslash, followed by 'example' and two
# backslashes, followed by another quoted string containing a space and
# the word 'example'. parseaddr copies those two backslashes
# literally. Per rfc5322 this is not technically correct since a \ may
# not appear in an address outside of a quoted string. It is probably
# a sensible Postel interpretation, though.
eq = self.assertEqual
eq(utils.parseaddr('""example" example"@example.com'),
('', '""example" example"@example.com'))
eq(utils.parseaddr('"\\"example\\" example"@example.com'),
('', '"\\"example\\" example"@example.com'))
eq(utils.parseaddr('"\\\\"example\\\\" example"@example.com'),
('', '"\\\\"example\\\\" example"@example.com'))
def test_parseaddr_preserves_spaces_in_local_part(self):
# issue 9286. A normal RFC5322 local part should not contain any
# folding white space, but legacy local parts can (they are a sequence
# of atoms, not dotatoms). On the other hand we strip whitespace from
# before the @ and around dots, on the assumption that the whitespace
# around the punctuation is a mistake in what would otherwise be
# an RFC5322 local part. Leading whitespace is, usual, stripped as well.
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr("merwok wok@xample.com"))
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr("merwok wok@xample.com"))
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr(" merwok wok @xample.com"))
self.assertEqual(('', 'merwok"wok" wok@xample.com'),
utils.parseaddr('merwok"wok" wok@xample.com'))
self.assertEqual(('', 'merwok.wok.wok@xample.com'),
utils.parseaddr('merwok. wok . wok@xample.com'))
def test_formataddr_does_not_quote_parens_in_quoted_string(self):
addr = ("'foo@example.com' (foo@example.com)",
'foo@example.com')
addrstr = ('"\'foo@example.com\' '
'(foo@example.com)" <foo@example.com>')
self.assertEqual(utils.parseaddr(addrstr), addr)
self.assertEqual(utils.formataddr(addr), addrstr)
def test_multiline_from_comment(self):
x = """\
Foo
\tBar <foo@example.com>"""
self.assertEqual(utils.parseaddr(x), ('Foo Bar', 'foo@example.com'))
def test_quote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly; Person', 'person@dom.ain')),
r'"A Silly; Person" <person@dom.ain>')
def test_charset_richcomparisons(self):
eq = self.assertEqual
ne = self.assertNotEqual
cset1 = Charset()
cset2 = Charset()
eq(cset1, 'us-ascii')
eq(cset1, 'US-ASCII')
eq(cset1, 'Us-AsCiI')
eq('us-ascii', cset1)
eq('US-ASCII', cset1)
eq('Us-AsCiI', cset1)
ne(cset1, 'usascii')
ne(cset1, 'USASCII')
ne(cset1, 'UsAsCiI')
ne('usascii', cset1)
ne('USASCII', cset1)
ne('UsAsCiI', cset1)
eq(cset1, cset2)
eq(cset2, cset1)
def test_getaddresses(self):
eq = self.assertEqual
eq(utils.getaddresses(['aperson@dom.ain (Al Person)',
'Bud Person <bperson@dom.ain>']),
[('Al Person', 'aperson@dom.ain'),
('Bud Person', 'bperson@dom.ain')])
def test_getaddresses_nasty(self):
eq = self.assertEqual
eq(utils.getaddresses(['foo: ;']), [('', '')])
eq(utils.getaddresses(
['[]*-- =~$']),
[('', ''), ('', ''), ('', '*--')])
eq(utils.getaddresses(
['foo: ;', '"Jason R. Mastaler" <jason@dom.ain>']),
[('', ''), ('Jason R. Mastaler', 'jason@dom.ain')])
def test_getaddresses_embedded_comment(self):
"""Test proper handling of a nested comment"""
eq = self.assertEqual
addrs = utils.getaddresses(['User ((nested comment)) <foo@bar.com>'])
eq(addrs[0][1], 'foo@bar.com')
def test_utils_quote_unquote(self):
eq = self.assertEqual
msg = Message()
msg.add_header('content-disposition', 'attachment',
filename='foo\\wacky"name')
eq(msg.get_filename(), 'foo\\wacky"name')
def test_get_body_encoding_with_bogus_charset(self):
charset = Charset('not a charset')
self.assertEqual(charset.get_body_encoding(), 'base64')
def test_get_body_encoding_with_uppercase_charset(self):
eq = self.assertEqual
msg = Message()
msg['Content-Type'] = 'text/plain; charset=UTF-8'
eq(msg['content-type'], 'text/plain; charset=UTF-8')
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'utf-8')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), 'base64')
msg.set_payload(b'hello world', charset=charset)
eq(msg.get_payload(), 'aGVsbG8gd29ybGQ=\n')
eq(msg.get_payload(decode=True), b'hello world')
eq(msg['content-transfer-encoding'], 'base64')
# Try another one
msg = Message()
msg['Content-Type'] = 'text/plain; charset="US-ASCII"'
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'us-ascii')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), encoders.encode_7or8bit)
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_charsets_case_insensitive(self):
lc = Charset('us-ascii')
uc = Charset('US-ASCII')
self.assertEqual(lc.get_body_encoding(), uc.get_body_encoding())
def test_partial_falls_inside_message_delivery_status(self):
eq = self.ndiffAssertEqual
# The Parser interface provides chunks of data to FeedParser in 8192
# byte gulps. SF bug #1076485 found one of those chunks inside
# message/delivery-status header block, which triggered an
# unreadline() of NeedMoreData.
msg = self._msgobj('msg_43.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/report
text/plain
message/delivery-status
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/rfc822-headers
""")
def test_make_msgid_domain(self):
self.assertEqual(
email.utils.make_msgid(domain='testdomain-string')[-19:],
'@testdomain-string>')
def test_Generator_linend(self):
# Issue 14645.
with openfile('msg_26.txt', newline='\n') as f:
msgtxt = f.read()
msgtxt_nl = msgtxt.replace('\r\n', '\n')
msg = email.message_from_string(msgtxt)
s = StringIO()
g = email.generator.Generator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), msgtxt_nl)
def test_BytesGenerator_linend(self):
# Issue 14645.
with openfile('msg_26.txt', newline='\n') as f:
msgtxt = f.read()
msgtxt_nl = msgtxt.replace('\r\n', '\n')
msg = email.message_from_string(msgtxt_nl)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue().decode('ascii'), msgtxt)
def test_BytesGenerator_linend_with_non_ascii(self):
# Issue 14645.
with openfile('msg_26.txt', 'rb') as f:
msgtxt = f.read()
msgtxt = msgtxt.replace(b'with attachment', b'fo\xf6')
msgtxt_nl = msgtxt.replace(b'\r\n', b'\n')
msg = email.message_from_bytes(msgtxt_nl)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), msgtxt)
# Test the iterator/generators
class TestIterators(TestEmailBase):
def test_body_line_iterator(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# First a simple non-multipart message
msg = self._msgobj('msg_01.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 6)
neq(EMPTYSTRING.join(lines), msg.get_payload())
# Now a more complicated multipart
msg = self._msgobj('msg_02.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 43)
with openfile('msg_19.txt') as fp:
neq(EMPTYSTRING.join(lines), fp.read())
def test_typed_subpart_iterator(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
it = iterators.typed_subpart_iterator(msg, 'text')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 2)
eq(EMPTYSTRING.join(lines), """\
a simple kind of mirror
to reflect upon our own
a simple kind of mirror
to reflect upon our own
""")
def test_typed_subpart_iterator_default_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_03.txt')
it = iterators.typed_subpart_iterator(msg, 'text', 'plain')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 1)
eq(EMPTYSTRING.join(lines), """\
Hi,
Do you like this message?
-Me
""")
def test_pushCR_LF(self):
'''FeedParser BufferedSubFile.push() assumed it received complete
line endings. A CR ending one push() followed by a LF starting
the next push() added an empty line.
'''
imt = [
("a\r \n", 2),
("b", 0),
("c\n", 1),
("", 0),
("d\r\n", 1),
("e\r", 0),
("\nf", 1),
("\r\n", 1),
]
from email.feedparser import BufferedSubFile, NeedMoreData
bsf = BufferedSubFile()
om = []
nt = 0
for il, n in imt:
bsf.push(il)
nt += n
n1 = 0
for ol in iter(bsf.readline, NeedMoreData):
om.append(ol)
n1 += 1
self.assertEqual(n, n1)
self.assertEqual(len(om), nt)
self.assertEqual(''.join([il for il, n in imt]), ''.join(om))
def test_push_random(self):
from email.feedparser import BufferedSubFile, NeedMoreData
n = 10000
chunksize = 5
chars = 'abcd \t\r\n'
s = ''.join(choice(chars) for i in range(n)) + '\n'
target = s.splitlines(True)
bsf = BufferedSubFile()
lines = []
for i in range(0, len(s), chunksize):
chunk = s[i:i+chunksize]
bsf.push(chunk)
lines.extend(iter(bsf.readline, NeedMoreData))
self.assertEqual(lines, target)
class TestFeedParsers(TestEmailBase):
def parse(self, chunks):
from email.feedparser import FeedParser
feedparser = FeedParser()
for chunk in chunks:
feedparser.feed(chunk)
return feedparser.close()
def test_empty_header_name_handled(self):
# Issue 19996
msg = self.parse("First: val\n: bad\nSecond: val")
self.assertEqual(msg['First'], 'val')
self.assertEqual(msg['Second'], 'val')
def test_newlines(self):
m = self.parse(['a:\nb:\rc:\r\nd:\n'])
self.assertEqual(m.keys(), ['a', 'b', 'c', 'd'])
m = self.parse(['a:\nb:\rc:\r\nd:'])
self.assertEqual(m.keys(), ['a', 'b', 'c', 'd'])
m = self.parse(['a:\rb', 'c:\n'])
self.assertEqual(m.keys(), ['a', 'bc'])
m = self.parse(['a:\r', 'b:\n'])
self.assertEqual(m.keys(), ['a', 'b'])
m = self.parse(['a:\r', '\nb:\n'])
self.assertEqual(m.keys(), ['a', 'b'])
m = self.parse(['a:\x85b:\u2028c:\n'])
self.assertEqual(m.items(), [('a', '\x85'), ('b', '\u2028'), ('c', '')])
m = self.parse(['a:\r', 'b:\x85', 'c:\n'])
self.assertEqual(m.items(), [('a', ''), ('b', '\x85'), ('c', '')])
def test_long_lines(self):
# Expected peak memory use on 32-bit platform: 6*N*M bytes.
M, N = 1000, 20000
m = self.parse(['a:b\n\n'] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), 'x'*M*N)
m = self.parse(['a:b\r\r'] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), 'x'*M*N)
m = self.parse(['a:b\r\r'] + ['x'*M+'\x85'] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), ('x'*M+'\x85')*N)
m = self.parse(['a:\r', 'b: '] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', ''), ('b', 'x'*M*N)])
class TestParsers(TestEmailBase):
def test_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
with openfile('msg_02.txt') as fp:
msg = HeaderParser().parse(fp)
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertIsInstance(msg.get_payload(), str)
def test_bytes_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
with openfile('msg_02.txt', 'rb') as fp:
msg = email.parser.BytesHeaderParser().parse(fp)
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertIsInstance(msg.get_payload(), str)
self.assertIsInstance(msg.get_payload(decode=True), bytes)
def test_bytes_parser_does_not_close_file(self):
with openfile('msg_02.txt', 'rb') as fp:
email.parser.BytesParser().parse(fp)
self.assertFalse(fp.closed)
def test_bytes_parser_on_exception_does_not_close_file(self):
with openfile('msg_15.txt', 'rb') as fp:
bytesParser = email.parser.BytesParser
self.assertRaises(email.errors.StartBoundaryNotFoundDefect,
bytesParser(policy=email.policy.strict).parse,
fp)
self.assertFalse(fp.closed)
def test_parser_does_not_close_file(self):
with openfile('msg_02.txt', 'r') as fp:
email.parser.Parser().parse(fp)
self.assertFalse(fp.closed)
def test_parser_on_exception_does_not_close_file(self):
with openfile('msg_15.txt', 'r') as fp:
parser = email.parser.Parser
self.assertRaises(email.errors.StartBoundaryNotFoundDefect,
parser(policy=email.policy.strict).parse, fp)
self.assertFalse(fp.closed)
def test_whitespace_continuation(self):
eq = self.assertEqual
# This message contains a line after the Subject: header that has only
# whitespace, but it is not empty!
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: the next line has a space on it
\x20
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_whitespace_continuation_last_header(self):
eq = self.assertEqual
# Like the previous test, but the subject line is the last
# header.
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Subject: the next line has a space on it
\x20
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_crlf_separation(self):
eq = self.assertEqual
with openfile('msg_26.txt', newline='\n') as fp:
msg = Parser().parse(fp)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'text/plain')
eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'application/riscos')
def test_crlf_flatten(self):
# Using newline='\n' preserves the crlfs in this input file.
with openfile('msg_26.txt', newline='\n') as fp:
text = fp.read()
msg = email.message_from_string(text)
s = StringIO()
g = Generator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), text)
maxDiff = None
def test_multipart_digest_with_extra_mime_headers(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
with openfile('msg_28.txt') as fp:
msg = email.message_from_file(fp)
# Structure is:
# multipart/digest
# message/rfc822
# text/plain
# message/rfc822
# text/plain
eq(msg.is_multipart(), 1)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'message/rfc822')
eq(part1.is_multipart(), 1)
eq(len(part1.get_payload()), 1)
part1a = part1.get_payload(0)
eq(part1a.is_multipart(), 0)
eq(part1a.get_content_type(), 'text/plain')
neq(part1a.get_payload(), 'message 1\n')
# next message/rfc822
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'message/rfc822')
eq(part2.is_multipart(), 1)
eq(len(part2.get_payload()), 1)
part2a = part2.get_payload(0)
eq(part2a.is_multipart(), 0)
eq(part2a.get_content_type(), 'text/plain')
neq(part2a.get_payload(), 'message 2\n')
def test_three_lines(self):
# A bug report by Andrew McNamara
lines = ['From: Andrew Person <aperson@dom.ain',
'Subject: Test',
'Date: Tue, 20 Aug 2002 16:43:45 +1000']
msg = email.message_from_string(NL.join(lines))
self.assertEqual(msg['date'], 'Tue, 20 Aug 2002 16:43:45 +1000')
def test_strip_line_feed_and_carriage_return_in_headers(self):
eq = self.assertEqual
# For [ 1002475 ] email message parser doesn't handle \r\n correctly
value1 = 'text'
value2 = 'more text'
m = 'Header: %s\r\nNext-Header: %s\r\n\r\nBody\r\n\r\n' % (
value1, value2)
msg = email.message_from_string(m)
eq(msg.get('Header'), value1)
eq(msg.get('Next-Header'), value2)
def test_rfc2822_header_syntax(self):
eq = self.assertEqual
m = '>From: foo\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg), 3)
eq(sorted(field for field in msg), ['!"#QUX;~', '>From', 'From'])
eq(msg.get_payload(), 'body')
def test_rfc2822_space_not_allowed_in_header(self):
eq = self.assertEqual
m = '>From foo@example.com 11:25:53\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 0)
def test_rfc2822_one_character_header(self):
eq = self.assertEqual
m = 'A: first header\nB: second header\nCC: third header\n\nbody'
msg = email.message_from_string(m)
headers = msg.keys()
headers.sort()
eq(headers, ['A', 'B', 'CC'])
eq(msg.get_payload(), 'body')
def test_CRLFLF_at_end_of_part(self):
# issue 5610: feedparser should not eat two chars from body part ending
# with "\r\n\n".
m = (
"From: foo@bar.com\n"
"To: baz\n"
"Mime-Version: 1.0\n"
"Content-Type: multipart/mixed; boundary=BOUNDARY\n"
"\n"
"--BOUNDARY\n"
"Content-Type: text/plain\n"
"\n"
"body ending with CRLF newline\r\n"
"\n"
"--BOUNDARY--\n"
)
msg = email.message_from_string(m)
self.assertTrue(msg.get_payload(0).get_payload().endswith('\r\n'))
class Test8BitBytesHandling(TestEmailBase):
# In Python3 all input is string, but that doesn't work if the actual input
# uses an 8bit transfer encoding. To hack around that, in email 5.1 we
# decode byte streams using the surrogateescape error handler, and
# reconvert to binary at appropriate places if we detect surrogates. This
# doesn't allow us to transform headers with 8bit bytes (they get munged),
# but it does allow us to parse and preserve them, and to decode body
# parts that use an 8bit CTE.
bodytest_msg = textwrap.dedent("""\
From: foo@bar.com
To: baz
Mime-Version: 1.0
Content-Type: text/plain; charset={charset}
Content-Transfer-Encoding: {cte}
{bodyline}
""")
def test_known_8bit_CTE(self):
m = self.bodytest_msg.format(charset='utf-8',
cte='8bit',
bodyline='pöstal').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), "pöstal\n")
self.assertEqual(msg.get_payload(decode=True),
"pöstal\n".encode('utf-8'))
def test_unknown_8bit_CTE(self):
m = self.bodytest_msg.format(charset='notavalidcharset',
cte='8bit',
bodyline='pöstal').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), "p\uFFFD\uFFFDstal\n")
self.assertEqual(msg.get_payload(decode=True),
"pöstal\n".encode('utf-8'))
def test_8bit_in_quopri_body(self):
# This is non-RFC compliant data...without 'decode' the library code
# decodes the body using the charset from the headers, and because the
# source byte really is utf-8 this works. This is likely to fail
# against real dirty data (ie: produce mojibake), but the data is
# invalid anyway so it is as good a guess as any. But this means that
# this test just confirms the current behavior; that behavior is not
# necessarily the best possible behavior. With 'decode' it is
# returning the raw bytes, so that test should be of correct behavior,
# or at least produce the same result that email4 did.
m = self.bodytest_msg.format(charset='utf-8',
cte='quoted-printable',
bodyline='p=C3=B6stál').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), 'p=C3=B6stál\n')
self.assertEqual(msg.get_payload(decode=True),
'pöstál\n'.encode('utf-8'))
def test_invalid_8bit_in_non_8bit_cte_uses_replace(self):
# This is similar to the previous test, but proves that if the 8bit
# byte is undecodeable in the specified charset, it gets replaced
# by the unicode 'unknown' character. Again, this may or may not
# be the ideal behavior. Note that if decode=False none of the
# decoders will get involved, so this is the only test we need
# for this behavior.
m = self.bodytest_msg.format(charset='ascii',
cte='quoted-printable',
bodyline='p=C3=B6stál').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), 'p=C3=B6st\uFFFD\uFFFDl\n')
self.assertEqual(msg.get_payload(decode=True),
'pöstál\n'.encode('utf-8'))
# test_defect_handling:test_invalid_chars_in_base64_payload
def test_8bit_in_base64_body(self):
# If we get 8bit bytes in a base64 body, we can just ignore them
# as being outside the base64 alphabet and decode anyway. But
# we register a defect.
m = self.bodytest_msg.format(charset='utf-8',
cte='base64',
bodyline='cMO2c3RhbAá=').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(decode=True),
'pöstal'.encode('utf-8'))
self.assertIsInstance(msg.defects[0],
errors.InvalidBase64CharactersDefect)
def test_8bit_in_uuencode_body(self):
# Sticking an 8bit byte in a uuencode block makes it undecodable by
# normal means, so the block is returned undecoded, but as bytes.
m = self.bodytest_msg.format(charset='utf-8',
cte='uuencode',
bodyline='<,.V<W1A; á ').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(decode=True),
'<,.V<W1A; á \n'.encode('utf-8'))
headertest_headers = (
('From: foo@bar.com', ('From', 'foo@bar.com')),
('To: báz', ('To', '=?unknown-8bit?q?b=C3=A1z?=')),
('Subject: Maintenant je vous présente mon collègue, le pouf célèbre\n'
'\tJean de Baddie',
('Subject', '=?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_'
'coll=C3=A8gue=2C_le_pouf_c=C3=A9l=C3=A8bre?=\n'
' =?unknown-8bit?q?_Jean_de_Baddie?=')),
('From: göst', ('From', '=?unknown-8bit?b?Z8O2c3Q=?=')),
)
headertest_msg = ('\n'.join([src for (src, _) in headertest_headers]) +
'\nYes, they are flying.\n').encode('utf-8')
def test_get_8bit_header(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertEqual(str(msg.get('to')), 'b\uFFFD\uFFFDz')
self.assertEqual(str(msg['to']), 'b\uFFFD\uFFFDz')
def test_print_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertEqual(str(msg),
textwrap.dedent("""\
From: {}
To: {}
Subject: {}
From: {}
Yes, they are flying.
""").format(*[expected[1] for (_, expected) in
self.headertest_headers]))
def test_values_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([str(x) for x in msg.values()],
['foo@bar.com',
'b\uFFFD\uFFFDz',
'Maintenant je vous pr\uFFFD\uFFFDsente mon '
'coll\uFFFD\uFFFDgue, le pouf '
'c\uFFFD\uFFFDl\uFFFD\uFFFDbre\n'
'\tJean de Baddie',
"g\uFFFD\uFFFDst"])
def test_items_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([(str(x), str(y)) for (x, y) in msg.items()],
[('From', 'foo@bar.com'),
('To', 'b\uFFFD\uFFFDz'),
('Subject', 'Maintenant je vous '
'pr\uFFFD\uFFFDsente '
'mon coll\uFFFD\uFFFDgue, le pouf '
'c\uFFFD\uFFFDl\uFFFD\uFFFDbre\n'
'\tJean de Baddie'),
('From', 'g\uFFFD\uFFFDst')])
def test_get_all_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([str(x) for x in msg.get_all('from')],
['foo@bar.com',
'g\uFFFD\uFFFDst'])
def test_get_content_type_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/pl\xA7in; charset=utf-8
""").encode('latin-1'))
self.assertEqual(msg.get_content_type(), "text/pl\uFFFDin")
self.assertEqual(msg.get_content_maintype(), "text")
self.assertEqual(msg.get_content_subtype(), "pl\uFFFDin")
# test_headerregistry.TestContentTypeHeader.non_ascii_in_params
def test_get_params_with_8bit(self):
msg = email.message_from_bytes(
'X-Header: foo=\xa7ne; b\xa7r=two; baz=three\n'.encode('latin-1'))
self.assertEqual(msg.get_params(header='x-header'),
[('foo', '\uFFFDne'), ('b\uFFFDr', 'two'), ('baz', 'three')])
self.assertEqual(msg.get_param('Foo', header='x-header'), '\uFFFdne')
# XXX: someday you might be able to get 'b\xa7r', for now you can't.
self.assertEqual(msg.get_param('b\xa7r', header='x-header'), None)
# test_headerregistry.TestContentTypeHeader.non_ascii_in_rfc2231_value
def test_get_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
self.assertEqual(msg.get_param('title'),
('us-ascii', 'en', 'This is not f\uFFFDn'))
def test_set_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
msg.set_param('title', 'test')
self.assertEqual(msg.get_param('title'), 'test')
def test_del_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
msg.del_param('title')
self.assertEqual(msg.get_param('title'), None)
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_payload_with_8bit_cte_header(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Transfer-Encoding: b\xa7se64
Content-Type: text/plain; charset=latin-1
payload
""").encode('latin-1'))
self.assertEqual(msg.get_payload(), 'payload\n')
self.assertEqual(msg.get_payload(decode=True), b'payload\n')
non_latin_bin_msg = textwrap.dedent("""\
From: foo@bar.com
To: báz
Subject: Maintenant je vous présente mon collègue, le pouf célèbre
\tJean de Baddie
Mime-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
Да, они летят.
""").encode('utf-8')
def test_bytes_generator(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), self.non_latin_bin_msg)
def test_bytes_generator_handles_None_body(self):
#Issue 11019
msg = email.message.Message()
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), b"\n")
non_latin_bin_msg_as7bit_wrapped = textwrap.dedent("""\
From: foo@bar.com
To: =?unknown-8bit?q?b=C3=A1z?=
Subject: =?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_coll=C3=A8gue?=
=?unknown-8bit?q?=2C_le_pouf_c=C3=A9l=C3=A8bre?=
=?unknown-8bit?q?_Jean_de_Baddie?=
Mime-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: base64
0JTQsCwg0L7QvdC4INC70LXRgtGP0YIuCg==
""")
def test_generator_handles_8bit(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = StringIO()
email.generator.Generator(out).flatten(msg)
self.assertEqual(out.getvalue(), self.non_latin_bin_msg_as7bit_wrapped)
def test_str_generator_should_not_mutate_msg_when_handling_8bit(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
BytesGenerator(out).flatten(msg)
orig_value = out.getvalue()
Generator(StringIO()).flatten(msg) # Should not mutate msg!
out = BytesIO()
BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), orig_value)
def test_bytes_generator_with_unix_from(self):
# The unixfrom contains a current date, so we can't check it
# literally. Just make sure the first word is 'From' and the
# rest of the message matches the input.
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg, unixfrom=True)
lines = out.getvalue().split(b'\n')
self.assertEqual(lines[0].split()[0], b'From')
self.assertEqual(b'\n'.join(lines[1:]), self.non_latin_bin_msg)
non_latin_bin_msg_as7bit = non_latin_bin_msg_as7bit_wrapped.split('\n')
non_latin_bin_msg_as7bit[2:4] = [
'Subject: =?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_'
'coll=C3=A8gue=2C_le_pouf_c=C3=A9l=C3=A8bre?=']
non_latin_bin_msg_as7bit = '\n'.join(non_latin_bin_msg_as7bit)
def test_message_from_binary_file(self):
fn = 'test.msg'
self.addCleanup(unlink, fn)
with open(fn, 'wb') as testfile:
testfile.write(self.non_latin_bin_msg)
with open(fn, 'rb') as testfile:
m = email.parser.BytesParser().parse(testfile)
self.assertEqual(str(m), self.non_latin_bin_msg_as7bit)
latin_bin_msg = textwrap.dedent("""\
From: foo@bar.com
To: Dinsdale
Subject: Nudge nudge, wink, wink
Mime-Version: 1.0
Content-Type: text/plain; charset="latin-1"
Content-Transfer-Encoding: 8bit
oh là là, know what I mean, know what I mean?
""").encode('latin-1')
latin_bin_msg_as7bit = textwrap.dedent("""\
From: foo@bar.com
To: Dinsdale
Subject: Nudge nudge, wink, wink
Mime-Version: 1.0
Content-Type: text/plain; charset="iso-8859-1"
Content-Transfer-Encoding: quoted-printable
oh l=E0 l=E0, know what I mean, know what I mean?
""")
def test_string_generator_reencodes_to_quopri_when_appropriate(self):
m = email.message_from_bytes(self.latin_bin_msg)
self.assertEqual(str(m), self.latin_bin_msg_as7bit)
def test_decoded_generator_emits_unicode_body(self):
m = email.message_from_bytes(self.latin_bin_msg)
out = StringIO()
email.generator.DecodedGenerator(out).flatten(m)
#DecodedHeader output contains an extra blank line compared
#to the input message. RDM: not sure if this is a bug or not,
#but it is not specific to the 8bit->7bit conversion.
self.assertEqual(out.getvalue(),
self.latin_bin_msg.decode('latin-1')+'\n')
def test_bytes_feedparser(self):
bfp = email.feedparser.BytesFeedParser()
for i in range(0, len(self.latin_bin_msg), 10):
bfp.feed(self.latin_bin_msg[i:i+10])
m = bfp.close()
self.assertEqual(str(m), self.latin_bin_msg_as7bit)
def test_crlf_flatten(self):
with openfile('msg_26.txt', 'rb') as fp:
text = fp.read()
msg = email.message_from_bytes(text)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), text)
def test_8bit_multipart(self):
# Issue 11605
source = textwrap.dedent("""\
Date: Fri, 18 Mar 2011 17:15:43 +0100
To: foo@example.com
From: foodwatch-Newsletter <bar@example.com>
Subject: Aktuelles zu Japan, Klonfleisch und Smiley-System
Message-ID: <76a486bee62b0d200f33dc2ca08220ad@localhost.localdomain>
MIME-Version: 1.0
Content-Type: multipart/alternative;
boundary="b1_76a486bee62b0d200f33dc2ca08220ad"
--b1_76a486bee62b0d200f33dc2ca08220ad
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
Guten Tag, ,
mit großer Betroffenheit verfolgen auch wir im foodwatch-Team die
Nachrichten aus Japan.
--b1_76a486bee62b0d200f33dc2ca08220ad
Content-Type: text/html; charset="utf-8"
Content-Transfer-Encoding: 8bit
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html lang="de">
<head>
<title>foodwatch - Newsletter</title>
</head>
<body>
<p>mit großer Betroffenheit verfolgen auch wir im foodwatch-Team
die Nachrichten aus Japan.</p>
</body>
</html>
--b1_76a486bee62b0d200f33dc2ca08220ad--
""").encode('utf-8')
msg = email.message_from_bytes(source)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), source)
def test_bytes_generator_b_encoding_linesep(self):
# Issue 14062: b encoding was tacking on an extra \n.
m = Message()
# This has enough non-ascii that it should always end up b encoded.
m['Subject'] = Header('žluťoučký kůň')
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(m, linesep='\r\n')
self.assertEqual(
s.getvalue(),
b'Subject: =?utf-8?b?xb5sdcWlb3XEjWvDvSBrxa/FiA==?=\r\n\r\n')
def test_generator_b_encoding_linesep(self):
# Since this broke in ByteGenerator, test Generator for completeness.
m = Message()
# This has enough non-ascii that it should always end up b encoded.
m['Subject'] = Header('žluťoučký kůň')
s = StringIO()
g = email.generator.Generator(s)
g.flatten(m, linesep='\r\n')
self.assertEqual(
s.getvalue(),
'Subject: =?utf-8?b?xb5sdcWlb3XEjWvDvSBrxa/FiA==?=\r\n\r\n')
maxDiff = None
class BaseTestBytesGeneratorIdempotent:
maxDiff = None
def _msgobj(self, filename):
with openfile(filename, 'rb') as fp:
data = fp.read()
data = self.normalize_linesep_regex.sub(self.blinesep, data)
msg = email.message_from_bytes(data)
return msg, data
def _idempotent(self, msg, data, unixfrom=False):
b = BytesIO()
g = email.generator.BytesGenerator(b, maxheaderlen=0)
g.flatten(msg, unixfrom=unixfrom, linesep=self.linesep)
self.assertEqual(data, b.getvalue())
class TestBytesGeneratorIdempotentNL(BaseTestBytesGeneratorIdempotent,
TestIdempotent):
linesep = '\n'
blinesep = b'\n'
normalize_linesep_regex = re.compile(br'\r\n')
class TestBytesGeneratorIdempotentCRLF(BaseTestBytesGeneratorIdempotent,
TestIdempotent):
linesep = '\r\n'
blinesep = b'\r\n'
normalize_linesep_regex = re.compile(br'(?<!\r)\n')
class TestBase64(unittest.TestCase):
def test_len(self):
eq = self.assertEqual
eq(base64mime.header_length('hello'),
len(base64mime.body_encode(b'hello', eol='')))
for size in range(15):
if size == 0 : bsize = 0
elif size <= 3 : bsize = 4
elif size <= 6 : bsize = 8
elif size <= 9 : bsize = 12
elif size <= 12: bsize = 16
else : bsize = 20
eq(base64mime.header_length('x' * size), bsize)
def test_decode(self):
eq = self.assertEqual
eq(base64mime.decode(''), b'')
eq(base64mime.decode('aGVsbG8='), b'hello')
def test_encode(self):
eq = self.assertEqual
eq(base64mime.body_encode(b''), b'')
eq(base64mime.body_encode(b'hello'), 'aGVsbG8=\n')
# Test the binary flag
eq(base64mime.body_encode(b'hello\n'), 'aGVsbG8K\n')
# Test the maxlinelen arg
eq(base64mime.body_encode(b'xxxx ' * 20, maxlinelen=40), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IA==
""")
# Test the eol argument
eq(base64mime.body_encode(b'xxxx ' * 20, maxlinelen=40, eol='\r\n'),
"""\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IA==\r
""")
def test_header_encode(self):
eq = self.assertEqual
he = base64mime.header_encode
eq(he('hello'), '=?iso-8859-1?b?aGVsbG8=?=')
eq(he('hello\r\nworld'), '=?iso-8859-1?b?aGVsbG8NCndvcmxk?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?b?aGVsbG8=?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
class TestQuopri(unittest.TestCase):
def setUp(self):
# Set of characters (as byte integers) that don't need to be encoded
# in headers.
self.hlit = list(chain(
range(ord('a'), ord('z') + 1),
range(ord('A'), ord('Z') + 1),
range(ord('0'), ord('9') + 1),
(c for c in b'!*+-/')))
# Set of characters (as byte integers) that do need to be encoded in
# headers.
self.hnon = [c for c in range(256) if c not in self.hlit]
assert len(self.hlit) + len(self.hnon) == 256
# Set of characters (as byte integers) that don't need to be encoded
# in bodies.
self.blit = list(range(ord(' '), ord('~') + 1))
self.blit.append(ord('\t'))
self.blit.remove(ord('='))
# Set of characters (as byte integers) that do need to be encoded in
# bodies.
self.bnon = [c for c in range(256) if c not in self.blit]
assert len(self.blit) + len(self.bnon) == 256
def test_quopri_header_check(self):
for c in self.hlit:
self.assertFalse(quoprimime.header_check(c),
'Should not be header quopri encoded: %s' % chr(c))
for c in self.hnon:
self.assertTrue(quoprimime.header_check(c),
'Should be header quopri encoded: %s' % chr(c))
def test_quopri_body_check(self):
for c in self.blit:
self.assertFalse(quoprimime.body_check(c),
'Should not be body quopri encoded: %s' % chr(c))
for c in self.bnon:
self.assertTrue(quoprimime.body_check(c),
'Should be body quopri encoded: %s' % chr(c))
def test_header_quopri_len(self):
eq = self.assertEqual
eq(quoprimime.header_length(b'hello'), 5)
# RFC 2047 chrome is not included in header_length().
eq(len(quoprimime.header_encode(b'hello', charset='xxx')),
quoprimime.header_length(b'hello') +
# =?xxx?q?...?= means 10 extra characters
10)
eq(quoprimime.header_length(b'h@e@l@l@o@'), 20)
# RFC 2047 chrome is not included in header_length().
eq(len(quoprimime.header_encode(b'h@e@l@l@o@', charset='xxx')),
quoprimime.header_length(b'h@e@l@l@o@') +
# =?xxx?q?...?= means 10 extra characters
10)
for c in self.hlit:
eq(quoprimime.header_length(bytes([c])), 1,
'expected length 1 for %r' % chr(c))
for c in self.hnon:
# Space is special; it's encoded to _
if c == ord(' '):
continue
eq(quoprimime.header_length(bytes([c])), 3,
'expected length 3 for %r' % chr(c))
eq(quoprimime.header_length(b' '), 1)
def test_body_quopri_len(self):
eq = self.assertEqual
for c in self.blit:
eq(quoprimime.body_length(bytes([c])), 1)
for c in self.bnon:
eq(quoprimime.body_length(bytes([c])), 3)
def test_quote_unquote_idempotent(self):
for x in range(256):
c = chr(x)
self.assertEqual(quoprimime.unquote(quoprimime.quote(c)), c)
def _test_header_encode(self, header, expected_encoded_header, charset=None):
if charset is None:
encoded_header = quoprimime.header_encode(header)
else:
encoded_header = quoprimime.header_encode(header, charset)
self.assertEqual(encoded_header, expected_encoded_header)
def test_header_encode_null(self):
self._test_header_encode(b'', '')
def test_header_encode_one_word(self):
self._test_header_encode(b'hello', '=?iso-8859-1?q?hello?=')
def test_header_encode_two_lines(self):
self._test_header_encode(b'hello\nworld',
'=?iso-8859-1?q?hello=0Aworld?=')
def test_header_encode_non_ascii(self):
self._test_header_encode(b'hello\xc7there',
'=?iso-8859-1?q?hello=C7there?=')
def test_header_encode_alt_charset(self):
self._test_header_encode(b'hello', '=?iso-8859-2?q?hello?=',
charset='iso-8859-2')
def _test_header_decode(self, encoded_header, expected_decoded_header):
decoded_header = quoprimime.header_decode(encoded_header)
self.assertEqual(decoded_header, expected_decoded_header)
def test_header_decode_null(self):
self._test_header_decode('', '')
def test_header_decode_one_word(self):
self._test_header_decode('hello', 'hello')
def test_header_decode_two_lines(self):
self._test_header_decode('hello=0Aworld', 'hello\nworld')
def test_header_decode_non_ascii(self):
self._test_header_decode('hello=C7there', 'hello\xc7there')
def test_header_decode_re_bug_18380(self):
# Issue 18380: Call re.sub with a positional argument for flags in the wrong position
self.assertEqual(quoprimime.header_decode('=30' * 257), '0' * 257)
def _test_decode(self, encoded, expected_decoded, eol=None):
if eol is None:
decoded = quoprimime.decode(encoded)
else:
decoded = quoprimime.decode(encoded, eol=eol)
self.assertEqual(decoded, expected_decoded)
def test_decode_null_word(self):
self._test_decode('', '')
def test_decode_null_line_null_word(self):
self._test_decode('\r\n', '\n')
def test_decode_one_word(self):
self._test_decode('hello', 'hello')
def test_decode_one_word_eol(self):
self._test_decode('hello', 'hello', eol='X')
def test_decode_one_line(self):
self._test_decode('hello\r\n', 'hello\n')
def test_decode_one_line_lf(self):
self._test_decode('hello\n', 'hello\n')
def test_decode_one_line_cr(self):
self._test_decode('hello\r', 'hello\n')
def test_decode_one_line_nl(self):
self._test_decode('hello\n', 'helloX', eol='X')
def test_decode_one_line_crnl(self):
self._test_decode('hello\r\n', 'helloX', eol='X')
def test_decode_one_line_one_word(self):
self._test_decode('hello\r\nworld', 'hello\nworld')
def test_decode_one_line_one_word_eol(self):
self._test_decode('hello\r\nworld', 'helloXworld', eol='X')
def test_decode_two_lines(self):
self._test_decode('hello\r\nworld\r\n', 'hello\nworld\n')
def test_decode_two_lines_eol(self):
self._test_decode('hello\r\nworld\r\n', 'helloXworldX', eol='X')
def test_decode_one_long_line(self):
self._test_decode('Spam' * 250, 'Spam' * 250)
def test_decode_one_space(self):
self._test_decode(' ', '')
def test_decode_multiple_spaces(self):
self._test_decode(' ' * 5, '')
def test_decode_one_line_trailing_spaces(self):
self._test_decode('hello \r\n', 'hello\n')
def test_decode_two_lines_trailing_spaces(self):
self._test_decode('hello \r\nworld \r\n', 'hello\nworld\n')
def test_decode_quoted_word(self):
self._test_decode('=22quoted=20words=22', '"quoted words"')
def test_decode_uppercase_quoting(self):
self._test_decode('ab=CD=EF', 'ab\xcd\xef')
def test_decode_lowercase_quoting(self):
self._test_decode('ab=cd=ef', 'ab\xcd\xef')
def test_decode_soft_line_break(self):
self._test_decode('soft line=\r\nbreak', 'soft linebreak')
def test_decode_false_quoting(self):
self._test_decode('A=1,B=A ==> A+B==2', 'A=1,B=A ==> A+B==2')
def _test_encode(self, body, expected_encoded_body, maxlinelen=None, eol=None):
kwargs = {}
if maxlinelen is None:
# Use body_encode's default.
maxlinelen = 76
else:
kwargs['maxlinelen'] = maxlinelen
if eol is None:
# Use body_encode's default.
eol = '\n'
else:
kwargs['eol'] = eol
encoded_body = quoprimime.body_encode(body, **kwargs)
self.assertEqual(encoded_body, expected_encoded_body)
if eol == '\n' or eol == '\r\n':
# We know how to split the result back into lines, so maxlinelen
# can be checked.
for line in encoded_body.splitlines():
self.assertLessEqual(len(line), maxlinelen)
def test_encode_null(self):
self._test_encode('', '')
def test_encode_null_lines(self):
self._test_encode('\n\n', '\n\n')
def test_encode_one_line(self):
self._test_encode('hello\n', 'hello\n')
def test_encode_one_line_crlf(self):
self._test_encode('hello\r\n', 'hello\n')
def test_encode_one_line_eol(self):
self._test_encode('hello\n', 'hello\r\n', eol='\r\n')
def test_encode_one_line_eol_after_non_ascii(self):
# issue 20206; see changeset 0cf700464177 for why the encode/decode.
self._test_encode('hello\u03c5\n'.encode('utf-8').decode('latin1'),
'hello=CF=85\r\n', eol='\r\n')
def test_encode_one_space(self):
self._test_encode(' ', '=20')
def test_encode_one_line_one_space(self):
self._test_encode(' \n', '=20\n')
# XXX: body_encode() expect strings, but uses ord(char) from these strings
# to index into a 256-entry list. For code points above 255, this will fail.
# Should there be a check for 8-bit only ord() values in body, or at least
# a comment about the expected input?
def test_encode_two_lines_one_space(self):
self._test_encode(' \n \n', '=20\n=20\n')
def test_encode_one_word_trailing_spaces(self):
self._test_encode('hello ', 'hello =20')
def test_encode_one_line_trailing_spaces(self):
self._test_encode('hello \n', 'hello =20\n')
def test_encode_one_word_trailing_tab(self):
self._test_encode('hello \t', 'hello =09')
def test_encode_one_line_trailing_tab(self):
self._test_encode('hello \t\n', 'hello =09\n')
def test_encode_trailing_space_before_maxlinelen(self):
self._test_encode('abcd \n1234', 'abcd =\n\n1234', maxlinelen=6)
def test_encode_trailing_space_at_maxlinelen(self):
self._test_encode('abcd \n1234', 'abcd=\n=20\n1234', maxlinelen=5)
def test_encode_trailing_space_beyond_maxlinelen(self):
self._test_encode('abcd \n1234', 'abc=\nd=20\n1234', maxlinelen=4)
def test_encode_whitespace_lines(self):
self._test_encode(' \n' * 5, '=20\n' * 5)
def test_encode_quoted_equals(self):
self._test_encode('a = b', 'a =3D b')
def test_encode_one_long_string(self):
self._test_encode('x' * 100, 'x' * 75 + '=\n' + 'x' * 25)
def test_encode_one_long_line(self):
self._test_encode('x' * 100 + '\n', 'x' * 75 + '=\n' + 'x' * 25 + '\n')
def test_encode_one_very_long_line(self):
self._test_encode('x' * 200 + '\n',
2 * ('x' * 75 + '=\n') + 'x' * 50 + '\n')
def test_encode_shortest_maxlinelen(self):
self._test_encode('=' * 5, '=3D=\n' * 4 + '=3D', maxlinelen=4)
def test_encode_maxlinelen_too_small(self):
self.assertRaises(ValueError, self._test_encode, '', '', maxlinelen=3)
def test_encode(self):
eq = self.assertEqual
eq(quoprimime.body_encode(''), '')
eq(quoprimime.body_encode('hello'), 'hello')
# Test the binary flag
eq(quoprimime.body_encode('hello\r\nworld'), 'hello\nworld')
# Test the maxlinelen arg
eq(quoprimime.body_encode('xxxx ' * 20, maxlinelen=40), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=
x xxxx xxxx xxxx xxxx=20""")
# Test the eol argument
eq(quoprimime.body_encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'),
"""\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=\r
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=\r
x xxxx xxxx xxxx xxxx=20""")
eq(quoprimime.body_encode("""\
one line
two line"""), """\
one line
two line""")
# Test the Charset class
class TestCharset(unittest.TestCase):
def tearDown(self):
from email import charset as CharsetModule
try:
del CharsetModule.CHARSETS['fake']
except KeyError:
pass
def test_codec_encodeable(self):
eq = self.assertEqual
# Make sure us-ascii = no Unicode conversion
c = Charset('us-ascii')
eq(c.header_encode('Hello World!'), 'Hello World!')
# Test 8-bit idempotency with us-ascii
s = '\xa4\xa2\xa4\xa4\xa4\xa6\xa4\xa8\xa4\xaa'
self.assertRaises(UnicodeError, c.header_encode, s)
c = Charset('utf-8')
eq(c.header_encode(s), '=?utf-8?b?wqTCosKkwqTCpMKmwqTCqMKkwqo=?=')
def test_body_encode(self):
eq = self.assertEqual
# Try a charset with QP body encoding
c = Charset('iso-8859-1')
eq('hello w=F6rld', c.body_encode('hello w\xf6rld'))
# Try a charset with Base64 body encoding
c = Charset('utf-8')
eq('aGVsbG8gd29ybGQ=\n', c.body_encode(b'hello world'))
# Try a charset with None body encoding
c = Charset('us-ascii')
eq('hello world', c.body_encode('hello world'))
# Try the convert argument, where input codec != output codec
c = Charset('euc-jp')
# With apologies to Tokio Kikuchi ;)
# XXX FIXME
## try:
## eq('\x1b$B5FCO;~IW\x1b(B',
## c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7'))
## eq('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7',
## c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7', False))
## except LookupError:
## # We probably don't have the Japanese codecs installed
## pass
# Testing SF bug #625509, which we have to fake, since there are no
# built-in encodings where the header encoding is QP but the body
# encoding is not.
from email import charset as CharsetModule
CharsetModule.add_charset('fake', CharsetModule.QP, None, 'utf-8')
c = Charset('fake')
eq('hello world', c.body_encode('hello world'))
def test_unicode_charset_name(self):
charset = Charset('us-ascii')
self.assertEqual(str(charset), 'us-ascii')
self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')
# Test multilingual MIME headers.
class TestHeader(TestEmailBase):
def test_simple(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append(' Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_simple_surprise(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append('Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_header_needs_no_decoding(self):
h = 'no decoding needed'
self.assertEqual(decode_header(h), [(h, None)])
def test_long(self):
h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
maxlinelen=76)
for l in h.encode(splitchars=' ').split('\n '):
self.assertLessEqual(len(l), 76)
def test_multilingual(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = (b'Die Mieter treten hier ein werden mit einem '
b'Foerderband komfortabel den Korridor entlang, '
b'an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, '
b'gegen die rotierenden Klingen bef\xf6rdert. ')
cz_head = (b'Finan\xe8ni metropole se hroutily pod tlakem jejich '
b'd\xf9vtipu.. ')
utf8_head = ('\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das '
'Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder '
'die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066'
'\u3044\u307e\u3059\u3002')
h = Header(g_head, g)
h.append(cz_head, cz)
h.append(utf8_head, utf8)
enc = h.encode(maxlinelen=76)
eq(enc, """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderband_kom?=
=?iso-8859-1?q?fortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen_Wand?=
=?iso-8859-1?q?gem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef=F6r?=
=?iso-8859-1?q?dert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hroutily?=
=?iso-8859-2?q?_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC?=
=?utf-8?b?5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn?=
=?utf-8?b?44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFz?=
=?utf-8?b?IE51bnN0dWNrIGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWloZXJodW5k?=
=?utf-8?b?IGRhcyBPZGVyIGRpZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI3jgajoqIA=?=
=?utf-8?b?44Gj44Gm44GE44G+44GZ44CC?=""")
decoded = decode_header(enc)
eq(len(decoded), 3)
eq(decoded[0], (g_head, 'iso-8859-1'))
eq(decoded[1], (cz_head, 'iso-8859-2'))
eq(decoded[2], (utf8_head.encode('utf-8'), 'utf-8'))
ustr = str(h)
eq(ustr,
(b'Die Mieter treten hier ein werden mit einem Foerderband '
b'komfortabel den Korridor entlang, an s\xc3\xbcdl\xc3\xbcndischen '
b'Wandgem\xc3\xa4lden vorbei, gegen die rotierenden Klingen '
b'bef\xc3\xb6rdert. Finan\xc4\x8dni metropole se hroutily pod '
b'tlakem jejich d\xc5\xafvtipu.. \xe6\xad\xa3\xe7\xa2\xba\xe3\x81'
b'\xab\xe8\xa8\x80\xe3\x81\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3'
b'\xe3\x81\xaf\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3'
b'\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
b'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8\xaa\x9e'
b'\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\xe3\x81\x82\xe3'
b'\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81\x9f\xe3\x82\x89\xe3\x82'
b'\x81\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\xe5\xae\x9f\xe9\x9a\x9b'
b'\xe3\x81\xab\xe3\x81\xaf\xe3\x80\x8cWenn ist das Nunstuck git '
b'und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt '
b'gersput.\xe3\x80\x8d\xe3\x81\xa8\xe8\xa8\x80\xe3\x81\xa3\xe3\x81'
b'\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82'
).decode('utf-8'))
# Test make_header()
newh = make_header(decode_header(enc))
eq(newh, h)
def test_empty_header_encode(self):
h = Header()
self.assertEqual(h.encode(), '')
def test_header_ctor_default_args(self):
eq = self.ndiffAssertEqual
h = Header()
eq(h, '')
h.append('foo', Charset('iso-8859-1'))
eq(h, 'foo')
def test_explicit_maxlinelen(self):
eq = self.ndiffAssertEqual
hstr = ('A very long line that must get split to something other '
'than at the 76th character boundary to test the non-default '
'behavior')
h = Header(hstr)
eq(h.encode(), '''\
A very long line that must get split to something other than at the 76th
character boundary to test the non-default behavior''')
eq(str(h), hstr)
h = Header(hstr, header_name='Subject')
eq(h.encode(), '''\
A very long line that must get split to something other than at the
76th character boundary to test the non-default behavior''')
eq(str(h), hstr)
h = Header(hstr, maxlinelen=1024, header_name='Subject')
eq(h.encode(), hstr)
eq(str(h), hstr)
def test_quopri_splittable(self):
eq = self.ndiffAssertEqual
h = Header(charset='iso-8859-1', maxlinelen=20)
x = 'xxxx ' * 20
h.append(x)
s = h.encode()
eq(s, """\
=?iso-8859-1?q?xxx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_?=""")
eq(x, str(make_header(decode_header(s))))
h = Header(charset='iso-8859-1', maxlinelen=40)
h.append('xxxx ' * 20)
s = h.encode()
eq(s, """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xxx?=
=?iso-8859-1?q?x_xxxx_xxxx_xxxx_xxxx_?=
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=
=?iso-8859-1?q?_xxxx_xxxx_?=""")
eq(x, str(make_header(decode_header(s))))
def test_base64_splittable(self):
eq = self.ndiffAssertEqual
h = Header(charset='koi8-r', maxlinelen=20)
x = 'xxxx ' * 20
h.append(x)
s = h.encode()
eq(s, """\
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IA==?=""")
eq(x, str(make_header(decode_header(s))))
h = Header(charset='koi8-r', maxlinelen=40)
h.append(x)
s = h.encode()
eq(s, """\
=?koi8-r?b?eHh4eCB4eHh4IHh4eHggeHh4?=
=?koi8-r?b?eCB4eHh4IHh4eHggeHh4eCB4?=
=?koi8-r?b?eHh4IHh4eHggeHh4eCB4eHh4?=
=?koi8-r?b?IHh4eHggeHh4eCB4eHh4IHh4?=
=?koi8-r?b?eHggeHh4eCB4eHh4IHh4eHgg?=
=?koi8-r?b?eHh4eCB4eHh4IA==?=""")
eq(x, str(make_header(decode_header(s))))
def test_us_ascii_header(self):
eq = self.assertEqual
s = 'hello'
x = decode_header(s)
eq(x, [('hello', None)])
h = make_header(x)
eq(s, h.encode())
def test_string_charset(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
eq(h, 'hello')
## def test_unicode_error(self):
## raises = self.assertRaises
## raises(UnicodeError, Header, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, '[P\xf6stal]', 'us-ascii')
## h = Header()
## raises(UnicodeError, h.append, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, h.append, '[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, u'\u83ca\u5730\u6642\u592b', 'iso-8859-1')
def test_utf8_shortest(self):
eq = self.assertEqual
h = Header('p\xf6stal', 'utf-8')
eq(h.encode(), '=?utf-8?q?p=C3=B6stal?=')
h = Header('\u83ca\u5730\u6642\u592b', 'utf-8')
eq(h.encode(), '=?utf-8?b?6I+K5Zyw5pmC5aSr?=')
def test_bad_8bit_header(self):
raises = self.assertRaises
eq = self.assertEqual
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
raises(UnicodeError, Header, x)
h = Header()
raises(UnicodeError, h.append, x)
e = x.decode('utf-8', 'replace')
eq(str(Header(x, errors='replace')), e)
h.append(x, errors='replace')
eq(str(h), e)
def test_escaped_8bit_header(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
e = x.decode('ascii', 'surrogateescape')
h = Header(e, charset=email.charset.UNKNOWN8BIT)
self.assertEqual(str(h),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h), [(x, 'unknown-8bit')])
def test_header_handles_binary_unknown8bit(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
h = Header(x, charset=email.charset.UNKNOWN8BIT)
self.assertEqual(str(h),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h), [(x, 'unknown-8bit')])
def test_make_header_handles_binary_unknown8bit(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
h = Header(x, charset=email.charset.UNKNOWN8BIT)
h2 = email.header.make_header(email.header.decode_header(h))
self.assertEqual(str(h2),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h2), [(x, 'unknown-8bit')])
def test_modify_returned_list_does_not_change_header(self):
h = Header('test')
chunks = email.header.decode_header(h)
chunks.append(('ascii', 'test2'))
self.assertEqual(str(h), 'test')
def test_encoded_adjacent_nonencoded(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
h.append('world')
s = h.encode()
eq(s, '=?iso-8859-1?q?hello?= world')
h = make_header(decode_header(s))
eq(h.encode(), s)
def test_whitespace_keeper(self):
eq = self.assertEqual
s = 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztk=?= =?koi8-r?q?=CA?= zz.'
parts = decode_header(s)
eq(parts, [(b'Subject: ', None), (b'\xf0\xd2\xcf\xd7\xc5\xd2\xcb\xc1 \xce\xc1 \xc6\xc9\xce\xc1\xcc\xd8\xce\xd9\xca', 'koi8-r'), (b' zz.', None)])
hdr = make_header(parts)
eq(hdr.encode(),
'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztnK?= zz.')
def test_broken_base64_header(self):
raises = self.assertRaises
s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3I ?='
raises(errors.HeaderParseError, decode_header, s)
def test_shift_jis_charset(self):
h = Header('文', charset='shift_jis')
self.assertEqual(h.encode(), '=?iso-2022-jp?b?GyRCSjgbKEI=?=')
def test_flatten_header_with_no_value(self):
# Issue 11401 (regression from email 4.x) Note that the space after
# the header doesn't reflect the input, but this is also the way
# email 4.x behaved. At some point it would be nice to fix that.
msg = email.message_from_string("EmptyHeader:")
self.assertEqual(str(msg), "EmptyHeader: \n\n")
def test_encode_preserves_leading_ws_on_value(self):
msg = Message()
msg['SomeHeader'] = ' value with leading ws'
self.assertEqual(str(msg), "SomeHeader: value with leading ws\n\n")
# Test RFC 2231 header parameters (en/de)coding
class TestRFC2231(TestEmailBase):
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_with_double_quotes
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_inside_double_quotes
def test_get_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_29.txt')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
eq(msg.get_param('title', unquote=False),
('us-ascii', 'en', '"This is even more ***fun*** isn\'t it!"'))
def test_set_param(self):
eq = self.ndiffAssertEqual
msg = Message()
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii')
eq(msg.get_param('title'),
('us-ascii', '', 'This is even more ***fun*** isn\'t it!'))
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
msg = self._msgobj('msg_01.txt')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.as_string(maxheaderlen=78), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21
Hi,
Do you like this message?
-Me
""")
def test_set_param_requote(self):
msg = Message()
msg.set_param('title', 'foo')
self.assertEqual(msg['content-type'], 'text/plain; title="foo"')
msg.set_param('title', 'bar', requote=False)
self.assertEqual(msg['content-type'], 'text/plain; title=bar')
# tspecial is still quoted.
msg.set_param('title', "(bar)bell", requote=False)
self.assertEqual(msg['content-type'], 'text/plain; title="(bar)bell"')
def test_del_param(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_01.txt')
msg.set_param('foo', 'bar', charset='us-ascii', language='en')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
msg.del_param('foo', header='Content-Type')
eq(msg.as_string(maxheaderlen=78), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset="us-ascii";
title*=us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21
Hi,
Do you like this message?
-Me
""")
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_charset
# I changed the charset name, though, because the one in the file isn't
# a legal charset name. Should add a test for an illegal charset.
def test_rfc2231_get_content_charset(self):
eq = self.assertEqual
msg = self._msgobj('msg_32.txt')
eq(msg.get_content_charset(), 'us-ascii')
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_no_double_quotes
def test_rfc2231_parse_rfc_quoting(self):
m = textwrap.dedent('''\
Content-Disposition: inline;
\tfilename*0*=''This%20is%20even%20more%20;
\tfilename*1*=%2A%2A%2Afun%2A%2A%2A%20;
\tfilename*2="is it not.pdf"
''')
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
self.assertEqual(m, msg.as_string())
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_with_double_quotes
def test_rfc2231_parse_extra_quoting(self):
m = textwrap.dedent('''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
''')
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
self.assertEqual(m, msg.as_string())
# test_headerregistry.TestContentTypeHeader.rfc2231_no_language_or_charset
# but new test uses *0* because otherwise lang/charset is not valid.
# test_headerregistry.TestContentTypeHeader.rfc2231_segmented_normal_values
def test_rfc2231_no_language_or_charset(self):
m = '''\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename="file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm"
Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEM; NAME*1=P_nsmail.htm
'''
msg = email.message_from_string(m)
param = msg.get_param('NAME')
self.assertNotIsInstance(param, tuple)
self.assertEqual(
param,
'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm')
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_no_charset
def test_rfc2231_no_language_or_charset_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
# Duplicate of previous test?
def test_rfc2231_no_language_or_charset_in_filename_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
# test_headerregistry.TestContentTypeHeader.rfc2231_partly_encoded,
# but the test below is wrong (the first part should be decoded).
def test_rfc2231_partly_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20***fun*** is it not.pdf')
def test_rfc2231_partly_nonencoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="This%20is%20even%20more%20";
\tfilename*1="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20is it not.pdf')
def test_rfc2231_no_language_or_charset_in_boundary(self):
m = '''\
Content-Type: multipart/alternative;
\tboundary*0*="''This%20is%20even%20more%20";
\tboundary*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tboundary*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_boundary(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_charset(self):
# This is a nonsensical charset value, but tests the code anyway
m = '''\
Content-Type: text/plain;
\tcharset*0*="This%20is%20even%20more%20";
\tcharset*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tcharset*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_content_charset(),
'this is even more ***fun*** is it not.pdf')
# test_headerregistry.TestContentTypeHeader.rfc2231_unknown_charset_treated_as_ascii
def test_rfc2231_bad_encoding_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="bogus'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_charset(self):
m = """\
Content-Type: text/plain; charset*=bogus''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_charset(self):
m = """\
Content-Type: text/plain; charset*=ascii''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="ascii'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2*="is it not.pdf%E2"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf\ufffd')
def test_rfc2231_unknown_encoding(self):
m = """\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename*=X-UNKNOWN''myfile.txt
"""
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(), 'myfile.txt')
def test_rfc2231_single_tick_in_filename_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, None)
eq(language, None)
eq(s, "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_inside_double_quotes
def test_rfc2231_single_tick_in_filename(self):
m = """\
Content-Type: application/x-foo; name*0=\"Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertNotIsInstance(param, tuple)
self.assertEqual(param, "Frank's Document")
def test_rfc2231_missing_tick(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="'This%20is%20broken";
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
"'This is broken")
def test_rfc2231_missing_tick_with_encoded_non_ascii(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="'This%20is%E2broken";
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
"'This is\ufffdbroken")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_in_value_with_charset_and_lang
def test_rfc2231_tick_attack_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_in_non_encoded_value
def test_rfc2231_tick_attack(self):
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertNotIsInstance(param, tuple)
self.assertEqual(param, "us-ascii'en-us'Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quotes_inside_quotes
def test_rfc2231_no_extended_values(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo; name=\"Frank's Document\"
"""
msg = email.message_from_string(m)
eq(msg.get_param('name'), "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_then_unencoded_segments
def test_rfc2231_encoded_then_unencoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'My\";
\tname*1=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
# test_headerregistry.TestContentTypeHeader.rfc2231_unencoded_then_encoded_segments
# test_headerregistry.TestContentTypeHeader.rfc2231_quoted_unencoded_then_encoded_segments
def test_rfc2231_unencoded_then_encoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'My\";
\tname*1*=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
# Tests to ensure that signed parts of an email are completely preserved, as
# required by RFC1847 section 2.1. Note that these are incomplete, because the
# email package does not currently always preserve the body. See issue 1670765.
class TestSigned(TestEmailBase):
def _msg_and_obj(self, filename):
with openfile(filename) as fp:
original = fp.read()
msg = email.message_from_string(original)
return original, msg
def _signed_parts_eq(self, original, result):
# Extract the first mime part of each message
import re
repart = re.compile(r'^--([^\n]+)\n(.*?)\n--\1$', re.S | re.M)
inpart = repart.search(original).group(2)
outpart = repart.search(result).group(2)
self.assertEqual(outpart, inpart)
def test_long_headers_as_string(self):
original, msg = self._msg_and_obj('msg_45.txt')
result = msg.as_string()
self._signed_parts_eq(original, result)
def test_long_headers_as_string_maxheaderlen(self):
original, msg = self._msg_and_obj('msg_45.txt')
result = msg.as_string(maxheaderlen=60)
self._signed_parts_eq(original, result)
def test_long_headers_flatten(self):
original, msg = self._msg_and_obj('msg_45.txt')
fp = StringIO()
Generator(fp).flatten(msg)
result = fp.getvalue()
self._signed_parts_eq(original, result)
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| {
"content_hash": "4e8948624a9e7ef4a03e4c33275c550b",
"timestamp": "",
"source": "github",
"line_count": 15891,
"max_line_length": 460,
"avg_line_length": 38.47379019570826,
"alnum_prop": 0.5994942646801453,
"repo_name": "ArcherSys/ArcherSys",
"id": "47141e70b059a1471acee28c1548b4b4b6db0874",
"size": "611582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/test/test_email/test_email.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
This package implements various grain boundary analyses
"""
| {
"content_hash": "2696a5a4215fea39b87030580bc7f6b1",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 55,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.765625,
"repo_name": "davidwaroquiers/pymatgen",
"id": "d8c9c1b04368a6c22d90c0e68a8152647e2711f7",
"size": "158",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/gb/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "38793"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "OpenEdge ABL",
"bytes": "312"
},
{
"name": "Python",
"bytes": "9213466"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12027"
}
],
"symlink_target": ""
} |
import os
import nibabel as nib
def save2nifti(data, file_name):
"""Save 3D/4D dataset as nifti file.
Note that the header is derived from MNI standard template, thus the
data should be in RAS space.
FSL is required.
"""
fsl_dir = os.getenv('FSLDIR')
# for code testing
if not fsl_dir:
fsl_dir = r'/Users/sealhuang/repo/FreeROI/froi'
template = os.path.join(fsl_dir, 'data', 'standard',
'MNI152_T1_2mm_brain.nii.gz')
header = nib.load(template).header
header['cal_max'] = data.max()
header['cal_min'] = 0
img = nib.Nifti1Image(data, None, header)
nib.save(img, file_name)
| {
"content_hash": "a52537d202cf05fbb531bcc6333d69ff",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 72,
"avg_line_length": 31.714285714285715,
"alnum_prop": 0.6216216216216216,
"repo_name": "sealhuang/brainDecodingToolbox",
"id": "470a6a22c5628e3927d2050c76de3cd99b6b296d",
"size": "781",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "braincode/io/nifti.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "774"
},
{
"name": "Python",
"bytes": "181593"
}
],
"symlink_target": ""
} |
import argparse
import os
import re
import shutil
import subprocess
import sys
import urlparse
DEFAULT_USER_REQUEST = True
DEFAULT_USE_TEST_SCHEDULER = True
# 0 means the batch would be the whole list of urls.
DEFAULT_BATCH_SIZE = 0
DEFAULT_VERBOSE = False
DEFAULT_TEST_CMD = 'OfflinePageSavePageLaterEvaluationTest.testFailureRate'
CONFIG_FILENAME = 'test_config'
CONFIG_TEMPLATE = """\
IsUserRequested = {is_user_requested}
UseTestScheduler = {use_test_scheduler}
ScheduleBatchSize = {schedule_batch_size}
"""
def main(args):
# Setting up the argument parser.
parser = argparse.ArgumentParser()
parser.add_argument(
'--output-directory',
dest='output_dir',
help='Directory for output. Default is ~/offline_eval_output/')
parser.add_argument(
'--user-requested',
dest='user_request',
action='store_true',
help='Testing as user-requested urls. Default option.')
parser.add_argument(
'--not-user-requested',
dest='user_request',
action='store_false',
help='Testing as not user-requested urls.')
parser.add_argument(
'--use-test-scheduler',
dest='use_test_scheduler',
action='store_true',
help='Use test scheduler to avoid real scheduling. Default option.')
parser.add_argument(
'--not-use-test-scheduler',
dest='use_test_scheduler',
action='store_false',
help='Use GCMNetworkManager for scheduling.')
parser.add_argument(
'--batch-size',
type=int,
dest='schedule_batch_size',
help='Number of pages to be queued after previous batch completes.')
parser.add_argument(
'-v',
'--verbose',
dest='verbose',
action='store_true',
help='Make test runner verbose.')
parser.add_argument(
'-d',
'--device',
type=str,
dest='device_id',
help='Specify which device to be used. See \'adb devices\'.')
parser.add_argument('build_output_dir', help='Path to build directory.')
parser.add_argument(
'test_urls_file', help='Path to input file with urls to be tested.')
parser.set_defaults(
output_dir=os.path.expanduser('~/offline_eval_output'),
user_request=DEFAULT_USER_REQUEST,
use_test_scheduler=DEFAULT_USE_TEST_SCHEDULER,
schedule_batch_size=DEFAULT_BATCH_SIZE,
verbose=DEFAULT_VERBOSE)
# Get the arguments and several paths.
options, extra_args = parser.parse_known_args(args)
if extra_args:
print 'Unknown args: ' + ', '.join(
extra_args) + '. Please check and run again.'
return
build_dir_path = os.path.abspath(
os.path.join(os.getcwd(), options.build_output_dir))
test_runner_path = os.path.join(build_dir_path,
'bin/run_chrome_public_test_apk')
config_output_path = os.path.join(options.output_dir, CONFIG_FILENAME)
def get_adb_command(args):
adb_path = os.path.join(
build_dir_path,
'../../third_party/android_sdk/public/platform-tools/adb')
if options.device_id != None:
return [adb_path, '-s', options.device_id] + args
return [adb_path] + args
# In case adb server is not started
subprocess.call(get_adb_command(['start-server']))
external_dir = subprocess.check_output(
get_adb_command(['shell', 'echo', '$EXTERNAL_STORAGE'])).strip()
# Create the output directory for results, and have a copy of test config
# there.
if not os.path.exists(options.output_dir):
print 'Creating output directory for results... ' + options.output_dir
os.makedirs(options.output_dir)
with open(config_output_path, 'w') as config:
config.write(
CONFIG_TEMPLATE.format(
is_user_requested=options.user_request,
use_test_scheduler=options.use_test_scheduler,
schedule_batch_size=options.schedule_batch_size))
print 'Uploading config file and input file onto the device.'
subprocess.call(
get_adb_command(
['push', config_output_path, external_dir + '/paquete/test_config']))
subprocess.call(
get_adb_command([
'push', options.test_urls_file, external_dir +
'/paquete/offline_eval_urls.txt'
]))
print 'Start running test with following configurations:'
print CONFIG_TEMPLATE.format(
is_user_requested=options.user_request,
use_test_scheduler=options.use_test_scheduler,
schedule_batch_size=options.schedule_batch_size)
# Run test with timeout-scale as 20.0 and strict mode off.
# This scale is only applied to timeouts which are defined as scalable ones
# in the test framework (like the timeout used to decide if Chrome doesn't
# start properly), on svelte devices we would hit the 'no tab selected'
# assertion since the starting time is longer than expected by the framework.
# So we're setting the scale to 20. It will not affect the annotation-based
# timeouts.
# Also turning off the strict mode so that we won't run into StrictMode
# violations when writing to files.
test_runner_cmd = [
test_runner_path,
'--timeout-scale',
'20.0',
'--strict-mode',
'off',
]
if options.verbose:
test_runner_cmd += ['-v']
if options.device_id != None:
test_runner_cmd += ['-d', options.device_id]
test_runner_cmd += ['-f', DEFAULT_TEST_CMD]
subprocess.call(test_runner_cmd)
print 'Fetching results from device...'
archive_dir = os.path.join(options.output_dir, 'archives/')
if os.path.exists(archive_dir):
shutil.rmtree(archive_dir)
subprocess.call(
get_adb_command(['pull', external_dir + '/paquete/archives', archive_dir
]))
subprocess.call(
get_adb_command([
'pull', external_dir + '/paquete/offline_eval_results.txt',
options.output_dir
]))
subprocess.call(
get_adb_command([
'pull', external_dir + '/paquete/offline_eval_logs.txt',
options.output_dir
]))
print 'Test finished!'
print 'Renaming archive files with host names.'
pattern = 'Content-Location: (.*)'
for filename in os.listdir(archive_dir):
path = os.path.join(archive_dir, filename)
with open(path) as f:
content = f.read()
result = re.search(pattern, content)
if (result == None):
continue
url = result.group(1)
url_parse = urlparse.urlparse(url)
hostname = url_parse[1].replace('.', '_')
url_path = re.sub('[^0-9a-zA-Z]+', '_', url_parse[2][1:])
if (len(hostname) == 0):
hostname = 'error_parsing_hostname'
continue
newname = hostname + '-' + url_path
newpath = os.path.join(archive_dir, newname + '.mhtml')
os.rename(path, newpath)
print 'Renaming finished.'
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "f580b48d3864c472de7ba7446d115c98",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 79,
"avg_line_length": 34.19796954314721,
"alnum_prop": 0.6534065607837316,
"repo_name": "chromium/chromium",
"id": "55fe656b76ea3c2217b393a0af86083e7a533e94",
"size": "7548",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "chrome/browser/offline_pages/android/evaluation/run_offline_page_evaluation_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""BBOB noiseless testbed.
The optimisation test functions are represented as classes
:py:class:`F1` to :py:class:`F24` and :py:class:`F101` to
:py:class:`F130`.
Each of these classes has an _evalfull method which expects as argument
an array of row vectors and returns a 'noisy' and a 'noiseless' float
values.
This module implements the class :py:class:`BBOBFunction` and
sub-classes:
* :py:class:`BBOBNfreeFunction` which have all the methods common to the
classes :py:class:`F1` to :py:class:`F24`
* :py:class:`BBOBGaussFunction`, :py:class:`BBOBCauchyFunction`,
:py:class:`BBOBUniformFunction` which have methods in classes from
:py:class:`F101` to :py:class:`F130`
Module attributes:
* :py:data:`dictbbob` is a dictionary such that dictbbob[2] contains
the test function class F2 and f2 = dictbbob[2]() returns
the instance 0 of the test function that can be
called as f2([1,2,3]).
* :py:data:`nfreeIDs` == range(1,25) indices for the noiseless functions that can be
found in dictbbob
* :py:data:`noisyIDs` == range(101, 131) indices for the noisy functions that can be
found in dictbbob. We have nfreeIDs + noisyIDs == sorted(dictbbob.keys())
* :py:data:`nfreeinfos` function infos
Examples:
>>> from cocopp.eaf import bbobbenchmarks as bn
>>> for s in bn.nfreeinfos:
... print s
1: Noise-free Sphere function
2: Separable ellipsoid with monotone transformation
<BLANKLINE>
Parameter: condition number (default 1e6)
<BLANKLINE>
<BLANKLINE>
3: Rastrigin with monotone transformation separable "condition" 10
4: skew Rastrigin-Bueche, condition 10, skew-"condition" 100
5: Linear slope
6: Attractive sector function
7: Step-ellipsoid, condition 100, noise-free
8: Rosenbrock noise-free
9: Rosenbrock, rotated
10: Ellipsoid with monotone transformation, condition 1e6
11: Discus (tablet) with monotone transformation, condition 1e6
12: Bent cigar with asymmetric space distortion, condition 1e6
13: Sharp ridge
14: Sum of different powers, between x^2 and x^6, noise-free
15: Rastrigin with asymmetric non-linear distortion, "condition" 10
16: Weierstrass, condition 100
17: Schaffers F7 with asymmetric non-linear transformation, condition 10
18: Schaffers F7 with asymmetric non-linear transformation, condition 1000
19: F8F2 sum of Griewank-Rosenbrock 2-D blocks, noise-free
20: Schwefel with tridiagonal variable transformation
21: Gallagher with 101 Gaussian peaks, condition up to 1000, one global rotation, noise-free
22: Gallagher with 21 Gaussian peaks, condition up to 1000, one global rotation
23: Katsuura function
24: Lunacek bi-Rastrigin, condition 100
<BLANKLINE>
in PPSN 2008, Rastrigin part rotated and scaled
<BLANKLINE>
<BLANKLINE>
>>> f3 = bn.F3(13) # instantiate function 3 on instance 13
>>> f3.evaluate([0, 1, 2]) # also: f3([0, 1, 2]) # doctest: +ELLIPSIS
59.8733529...
>>> f3.evaluate([[0, 1, 2], [3, 4, 5]])
array([ 59.87335291, 441.17409304])
>>> print bn.instantiate(5)[1] # returns evaluation function and target
51.53
>>> print bn.nfreeIDs # list noise-free functions
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
>>> for i in bn.nfreeIDs: # evaluate all noiseless functions once
... print bn.instantiate(i)[0]([0., 0., 0., 0.]),
-77.27454592 6180022.82173 92.9877507529 92.9877507529 140.510117618 70877.9554128 -72.5505202195 33355.7924722 -339.94 4374717.49343 15631566.3487 4715481.0865 550.599783901 -17.2991756229 27.3633128519 -227.827833529 -24.3305918781 131.420159348 40.7103737427 6160.81782924 376.746889545 107.830426761 220.482266557 106.094767386
"""
# TODO: define interface for this module.
# TODO: funId is expected to be a number since it is used as rseed.
import warnings
from pdb import set_trace
import numpy as np
from math import floor as floor
from numpy import dot, linspace, diag, tile, zeros, sign, resize
from numpy.random import standard_normal as _randn # TODO: may bring confusion
from numpy.random import random as _rand # TODO: may bring confusion
import sys
sys.path.insert(0, '../')
from csep.loglikelihood import calcLogLikelihood
from models.mathUtil import calcNumberBins
import models.model
"""
% VAL = BENCHMARKS(X, FUNCID)
% VAL = BENCHMARKS(X, STRFUNC)
% Input:
% X -- solution column vector or matrix of column vectors
% FUNCID -- number of function to be executed with X as input,
% by default 8.
% STRFUNC -- function as string to be executed with X as input
% Output: function value(s) of solution(s)
% Examples:
% F = BENCHMARKS([1 2 3]', 17);
% F = BENCHMARKS([1 2 3]', 'f1');
%
% NBS = BENCHMARKS()
% NBS = BENCHMARKS('FunctionIndices')
% Output:
% NBS -- array of valid benchmark function numbers,
% presumably 1:24
%
% FHS = BENCHMARKS('handles')
% Output:
% FHS -- cell array of function handles
% Examples:
% FHS = BENCHMARKS('handles');
% f = FHS{1}(x); % evaluates x on the sphere function f1
% f = feval(FHS{1}, x); % ditto
%
% see also: functions FGENERIC, BENCHMARKINFOS, BENCHMARKSNOISY
% Authors (copyright 2009): Nikolaus Hansen, Raymond Ros, Steffen Finck
% Version = 'Revision: $Revision: 1115 $'
% Last Modified: $Date: 2009-02-09 19:22:42 +0100 (Mon, 09 Feb 2009) $
% INTERFACE OF BENCHMARK FUNCTIONS
% FHS = BENCHMARKS('handles');
% FUNC = FHS{1};
%
% [FVALUE, FTRUE] = FUNC(X)
% [FVALUE, FTRUE] = FUNC(X, [], IINSTANCE)
% Input: X -- matrix of column vectors
% IINSTANCE -- instance number of the function, sets function
% instance (XOPT, FOPT, rotation matrices,...)
% up until a new number is set, or the function is
% cleared. Default is zero.
% Output: row vectors with function value for each input column
% FVALUE -- function value
% FTRUE -- noise-less, deterministic function value
% [FOPT STRFUNCTION] = FUNC('any_even_empty_string', ...)
% Output:
% FOPT -- function value at optimum
% STRFUNCTION -- not yet implemented: function description string, ID before first whitespace
% [FOPT STRFUNCTION] = FUNC('any_even_empty_string', DIM, NTRIAL)
% Sets rotation matrices and xopt depending on NTRIAL (by changing the random seed).
% Output:
% FOPT -- function value at optimum
% STRFUNCTION -- not yet implemented: function description string, ID before first whitespace
% [FOPT, XOPT] = FUNC('xopt', DIM)
% Output:
% FOPT -- function value at optimum XOPT
% XOPT -- optimal solution vector in DIM-D
% [FOPT, MATRIX] = FUNC('linearTF', DIM) % might vanish in future
% Output:
% FOPT -- function value at optimum XOPT
% MATRIX -- used transformation matrix
"""
### FUNCTION DEFINITION ###
def compute_xopt(rseed, dim):
"""Generate a random vector used as optimum argument.
Rounded by four digits, but never to zero.
"""
xopt = 8 * np.floor(1e4 * unif(dim, rseed))/1e4 - 4
idx = (xopt == 0)
xopt[idx] = -1e-5
return xopt
def compute_rotation(seed, dim):
"""Returns an orthogonal basis.
The rotation is used in several ways and in combination with
non-linear transformations. Search space rotation invariant
algorithms are not expected to be invariant under this rotation.
"""
B = np.reshape(gauss(dim * dim, seed), (dim, dim))
for i in range(dim):
for j in range(0, i):
B[i] = B[i] - dot(B[i], B[j]) * B[j]
B[i] = B[i] / (np.sum(B[i]**2) ** .5)
return B
def monotoneTFosc(f):
"""Maps [-inf,inf] to [-inf,inf] with different constants
for positive and negative part.
"""
if np.isscalar(f):
if f > 0.:
f = np.log(f) / 0.1
f = np.exp(f + 0.49*(np.sin(f) + np.sin(0.79*f))) ** 0.1
elif f < 0.:
f = np.log(-f) / 0.1
f = -np.exp(f + 0.49*(np.sin(0.55*f) + np.sin(0.31*f))) ** 0.1
return f
else:
f = np.asarray(f)
g = f.copy()
idx = (f > 0)
g[idx] = np.log(f[idx]) / 0.1
g[idx] = np.exp(g[idx] + 0.49*(np.sin(g[idx]) + np.sin(0.79*g[idx]))) ** 0.1
idx = (f < 0)
g[idx] = np.log(-f[idx]) / 0.1
g[idx] = -np.exp(g[idx] + 0.49*(np.sin(0.55*g[idx]) + np.sin(0.31*g[idx]))) ** 0.1
return g
def defaultboundaryhandling(x, fac):
"""Returns a float penalty for being outside of boundaries [-5, 5]"""
xoutside = np.maximum(0., np.abs(x) - 5) * sign(x)
fpen = fac * np.sum(xoutside**2, -1) # penalty
return fpen
def gauss(N, seed):
"""Samples N standard normally distributed numbers
being the same for a given seed
"""
r = unif(2*N, seed)
g = np.sqrt(-2 * np.log(r[:N])) * np.cos(2 * np.pi * r[N:2*N])
if np.any(g == 0.):
g[g == 0] = 1e-99
return g
def unif(N, inseed):
"""Generates N uniform numbers with starting seed."""
# initialization
inseed = np.abs(inseed)
if inseed < 1.:
inseed = 1.
rgrand = 32 * [0.]
aktseed = inseed
for i in xrange(39, -1, -1):
tmp = floor(aktseed/127773.)
aktseed = 16807. * (aktseed - tmp * 127773.) - 2836. * tmp
if aktseed < 0:
aktseed = aktseed + 2147483647.
if i < 32:
rgrand[i] = aktseed
aktrand = rgrand[0]
# sample numbers
r = int(N) * [0.]
for i in xrange(int(N)):
tmp = floor(aktseed/127773.)
aktseed = 16807. * (aktseed - tmp * 127773.) - 2836. * tmp
if aktseed < 0:
aktseed = aktseed + 2147483647.
tmp = int(floor(aktrand / 67108865.))
aktrand = rgrand[tmp]
rgrand[tmp] = aktseed
r[i] = aktrand / 2.147483647e9
r = np.asarray(r)
if (r == 0).any():
warning.warn('zero sampled(?), set to 1e-99')
r[r == 0] = 1e-99
return r
# for testing and comparing to other implementations,
# myrand and myrandn are used only for sampling the noise
# Rename to myrand and myrandn to rand and randn and
# comment lines 24 and 25.
_randomnseed = 30. # warning this is a global variable...
def _myrandn(size):
"""Normal random distribution sampling.
For testing and comparing purpose.
"""
global _randomnseed
_randomnseed = _randomnseed + 1.
if _randomnseed > 1e9:
_randomnseed = 1.
res = np.reshape(gauss(np.prod(size), _randomnseed), size)
return res
_randomseed = 30. # warning this is a global variable...
def _myrand(size):
"""Uniform random distribution sampling.
For testing and comparing purpose.
"""
global _randomseed
_randomseed = _randomseed + 1
if _randomseed > 1e9:
_randomseed = 1
res = np.reshape(unif(np.prod(size), _randomseed), size)
return res
def fGauss(ftrue, beta):
"""Returns Gaussian model noisy value."""
# expects ftrue to be a np.array
popsi = np.shape(ftrue)
fval = ftrue * np.exp(beta * _randn(popsi)) # with gauss noise
tol = 1e-8
fval = fval + 1.01 * tol
idx = ftrue < tol
try:
fval[idx] = ftrue[idx]
except IndexError: # fval is a scalar
if idx:
fval = ftrue
return fval
def fUniform(ftrue, alpha, beta):
"""Returns uniform model noisy value."""
# expects ftrue to be a np.array
popsi = np.shape(ftrue)
fval = (_rand(popsi) ** beta * ftrue *
np.maximum(1., (1e9 / (ftrue + 1e-99)) ** (alpha * _rand(popsi))))
tol = 1e-8
fval = fval + 1.01 * tol
idx = ftrue < tol
try:
fval[idx] = ftrue[idx]
except IndexError: # fval is a scalar
if idx:
fval = ftrue
return fval
def fCauchy(ftrue, alpha, p):
"""Returns Cauchy model noisy value
Cauchy with median 1e3*alpha and with p=0.2, zero otherwise
P(Cauchy > 1,10,100,1000) = 0.25, 0.032, 0.0032, 0.00032
"""
# expects ftrue to be a np.array
popsi = np.shape(ftrue)
fval = ftrue + alpha * np.maximum(0., 1e3 + (_rand(popsi) < p) *
_randn(popsi) / (np.abs(_randn(popsi)) + 1e-199))
tol = 1e-8
fval = fval + 1.01 * tol
idx = ftrue < tol
try:
fval[idx] = ftrue[idx]
except IndexError: # fval is a scalar
if idx:
fval = ftrue
return fval
### CLASS DEFINITION ###
class AbstractTestFunction():
"""Abstract class for test functions.
Defines methods to be implemented in test functions which are to be
provided to method setfun of class Logger.
In particular, (a) the attribute fopt and (b) the method _evalfull.
The _evalfull method returns two values, the possibly noisy value and
the noise-free value. The latter is only meant to be for recording purpose.
"""
def __call__(self, x): # makes the instances callable
"""Returns the objective function value of argument x.
Example:
>>> from cocopp.eaf import bbobbenchmarks as bn
>>> f3 = bn.F3(13) # instantiate function 3 on instance 13
>>> f3([0, 1, 2]) # call f3, same as f3.evaluate([0, 1, 2]) # doctest: +ELLIPSIS
59.8733529...
"""
return self.evaluate(x)
def evaluate(self, x):
"""Returns the objective function value (in case noisy).
"""
return self._evalfull(x)[0]
# TODO: is it better to leave evaluate out and check for hasattr('evaluate') in ExpLogger?
def _evalfull(self, x):
"""return noisy and noise-free value, the latter for recording purpose. """
raise NotImplementedError
def getfopt(self):
"""Returns the best function value of this instance of the function."""
# TODO: getfopt error:
# import bbobbenchmarks as bb
# bb.instantiate(1)[0].getfopt()
# AttributeError: F1 instance has no attribute '_fopt'
if not hasattr(self, 'iinstance'):
raise Exception('This function class has not been instantiated yet.')
return self._fopt
def setfopt(self, fopt):
try:
self._fopt = float(fopt)
except ValueError:
raise Exception('Optimal function value must be cast-able to a float.')
fopt = property(getfopt, setfopt)
class BBOBFunction(AbstractTestFunction):
"""Abstract class of BBOB test functions.
Implements some base functions that are used by the test functions
of BBOB such as initialisations of class attributes.
"""
def __init__(self, iinstance=0, zerox=False, zerof=False, param=None, **kwargs):
"""Common initialisation.
Keyword arguments:
iinstance -- instance of the function (int)
zerox -- sets xopt to [0, ..., 0]
zerof -- sets fopt to 0
param -- parameter of the function (if applicable)
kwargs -- additional attributes
"""
# Either self.rrseed or self.funId have to be defined for BBOBFunctions
# TODO: enforce
try:
rrseed = self.rrseed
except AttributeError:
rrseed = self.funId
try:
self.rseed = rrseed + 1e4 * iinstance
except TypeError:
# rrseed AND iinstance have to be float
warnings.warn('self.rseed could not be set, reset to 1 instead.')
self.rseed = 1
self.zerox = zerox
if zerof:
self.fopt = 0.
else:
# self.fopt = min(1000, max(-1000, (np.round(100*100*gauss(1, self.rseed)[0]/gauss(1, self.rseed+1)[0])/100)))
region = param[0]
year = param[1]
qntyears = param[2]
observation = models.model.loadModelDB(region+'jmaData', year+qntyears+1)
self.fopt = -calcLogLikelihood(observation, observation)
self.iinstance = iinstance
self.dim = None
self.lastshape = None
self.param = param
for i, v in kwargs.iteritems():
setattr(self, i, v)
self._xopt = None
def shape_(self, x):
# this part is common to all evaluate function
# it is assumed x are row vectors
curshape = np.shape(x)
dim = np.shape(x)[-1]
return curshape, dim
def getiinstance(self):
"""Designates the instance of the function class.
An instance in this case means a given target function value, a
given optimal argument x, and given transformations for the
function. It needs to have a string representation. Preferably
it should be a number or a string.
"""
return self._iinstance
def setiinstance(self, iinstance):
self._iinstance = iinstance
iinstance = property(getiinstance, setiinstance)
def shortstr(self):
"""Gives a short string self representation (shorter than str(self))."""
res = 'F%s' % str(self.funId)
if hasattr(self, 'param'):
res += '_p%s' % str(self.param) # NH param -> self.param
return res
def __eq__(self, obj):
return (self.funId == obj.funId
and (not hasattr(self, 'param') or self.param == obj.param))
# TODO: make this test on other attributes than param?
# def dimensionality(self, dim):
# """Return the availability of dimensionality dim."""
# return True
# GETTERS
# def getfopt(self):
# """Optimal Function Value."""
# return self._fopt
# fopt = property(getfopt)
def _setxopt(self, xopt):
"""Return the argument of the optimum of the function."""
self._xopt = xopt
def _getxopt(self):
"""Return the argument of the optimum of the function."""
if self._xopt is None:
warnings.warn('You need to evaluate object to set dimension first.')
return self._xopt
xopt = property(_getxopt, _setxopt)
# def getrange(self):
# """Return the domain of the function."""
# #TODO: could depend on the dimension
# # TODO: return exception NotImplemented yet
# pass
# range = property(getrange)
# def getparam(self):
# """Optional parameter value."""
# return self._param
# param = property(getparam)
# def getitrial(self):
# """Instance id number."""
# return self._itrial
# itrial = property(getitrial)
# def getlinearTf(self):
# return self._linearTf
# linearTf = property(getlinearTf)
# def getrotation(self):
# return self._rotation
# rotation = property(getrotation)
class BBOBNfreeFunction(BBOBFunction):
"""Class of the noise-free functions of BBOB."""
def noise(self, ftrue):
"""Returns the noise-free function values."""
return ftrue.copy()
class BBOBGaussFunction(BBOBFunction):
"""Class of the Gauss noise functions of BBOB.
Attribute gaussbeta needs to be defined by inheriting classes.
"""
# gaussbeta = None
def noise(self, ftrue):
"""Returns the noisy function values."""
return fGauss(ftrue, self.gaussbeta)
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 100.)
class BBOBUniformFunction(BBOBFunction, object):
"""Class of the uniform noise functions of BBOB.
Attributes unifalphafac and unifbeta need to be defined by inheriting
classes.
"""
# unifalphafac = None
# unifbeta = None
def noise(self, ftrue):
"""Returns the noisy function values."""
return fUniform(ftrue, self.unifalphafac * (0.49 + 1. / self.dim), self.unifbeta)
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 100.)
class BBOBCauchyFunction(BBOBFunction):
"""Class of the Cauchy noise functions of BBOB.
Attributes cauchyalpha and cauchyp need to be defined by inheriting
classes.
"""
# cauchyalpha = None
# cauchyp = None
def noise(self, ftrue):
"""Returns the noisy function values."""
return fCauchy(ftrue, self.cauchyalpha, self.cauchyp)
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 100.)
class _FSphere(BBOBFunction):
"""Abstract Sphere function.
Method boundaryhandling needs to be defined.
"""
rrseed = 1
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
# COMPUTATION core
ftrue = np.sum(x**2, -1)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F1(_FSphere, BBOBNfreeFunction):
"""Noise-free Sphere function"""
funId = 1
def boundaryhandling(self, x):
return 0.
class F101(_FSphere, BBOBGaussFunction):
"""Sphere with moderate Gauss noise"""
funId = 101
gaussbeta = 0.01
class F102(_FSphere, BBOBUniformFunction):
"""Sphere with moderate uniform noise"""
funId = 102
unifalphafac = 0.01
unifbeta = 0.01
class F103(_FSphere, BBOBCauchyFunction):
"""Sphere with moderate Cauchy noise"""
funId = 103
cauchyalpha = 0.01
cauchyp = 0.05
class F107(_FSphere, BBOBGaussFunction):
"""Sphere with Gauss noise"""
funId = 107
gaussbeta = 1.
class F108(_FSphere, BBOBUniformFunction):
"""Sphere with uniform noise"""
funId = 108
unifalphafac = 1.
unifbeta = 1.
class F109(_FSphere, BBOBCauchyFunction):
"""Sphere with Cauchy noise"""
funId = 109
cauchyalpha = 1.
cauchyp = 0.2
class F2(BBOBNfreeFunction):
"""Separable ellipsoid with monotone transformation
Parameter: condition number (default 1e6)
"""
funId = 2
def _evalfull(self, individual, modelOmega, mean):
logValue = float('Inf')
genomeModel=models.model.newModel(modelOmega[0].definitions)
genomeModel.bins=list(individual)
modelLambda=models.model.newModel(modelOmega[0].definitions)
modelLambda.bins=calcNumberBins(genomeModel.bins, mean)
for i in range(len(modelOmega)):
tempValue=calcLogLikelihood(modelLambda, modelOmega[i])
# calcLogLikelihood.cache_clear()
if tempValue < logValue:
logValue = tempValue
return -logValue, -logValue
# class F2(BBOBNfreeFunction):
# """Separable ellipsoid with monotone transformation
# Parameter: condition number (default 1e6)
# """
# funId = 2
# paramValues = (1e0, 1e6)
# condition = 1e6
# def initwithsize(self, curshape, dim):
# # DIM-dependent initialization
# if self.dim != dim:
# if self.zerox:
# self.xopt = zeros(dim)
# else:
# self.xopt = compute_xopt(self.rseed, dim)
# if hasattr(self, 'param') and self.param: # not self.param is None
# tmp = self.param
# else:
# tmp = self.condition
# self.scales = tmp ** linspace(0, 1, dim)
# # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
# if self.lastshape != curshape:
# self.dim = dim
# self.lastshape = curshape
# self.arrxopt = resize(self.xopt, curshape)
# def _evalfull(self, x):
# fadd = self.fopt
# curshape, dim = self.shape_(x)
# # it is assumed x are row vectors
# if self.lastshape != curshape:
# self.initwithsize(curshape, dim)
# # TRANSFORMATION IN SEARCH SPACE
# x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
# # COMPUTATION core
# ftrue = dot(monotoneTFosc(x)**2, self.scales)
# fval = self.noise(ftrue) # without noise
# # FINALIZE
# ftrue += fadd
# fval += fadd
# return fval, ftrue
class F3(BBOBNfreeFunction):
"""Rastrigin with monotone transformation separable "condition" 10"""
funId = 3
condition = 10.
beta = 0.2
def initwithsize(self, curshape, dim):
# DIM-dependent initialisation
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrscales = resize(self.scales, curshape)
self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt
x = monotoneTFosc(x)
idx = (x > 0)
x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))
x = self.arrscales * x
# COMPUTATION core
ftrue = 10 * (self.dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)
fval = self.noise(ftrue) # without noise
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F4(BBOBNfreeFunction):
"""skew Rastrigin-Bueche, condition 10, skew-"condition" 100"""
funId = 4
condition = 10.
alpha = 100.
maxindex = np.inf # 1:2:min(DIM,maxindex) are the skew variables
rrseed = 3
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.xopt[:min(dim, self.maxindex):2] = abs(self.xopt[:min(dim, self.maxindex):2])
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrscales = resize(self.scales, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
xoutside = np.maximum(0., np.abs(x) - 5) * sign(x)
fpen = 1e2 * np.sum(xoutside**2, -1) # penalty
fadd = fadd + fpen # self.fadd becomes an array
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # shift optimum to zero
x = monotoneTFosc(x)
try:
tmpx = x[:, :min(self.dim, self.maxindex):2] # tmpx is a reference to a part of x
except IndexError:
tmpx = x[:min(self.dim, self.maxindex):2] # tmpx is a reference to a part of x
tmpx[tmpx > 0] = self.alpha ** .5 * tmpx[tmpx > 0] # this modifies x
x = self.arrscales * x # scale while assuming that Xopt == 0
# COMPUTATION core
ftrue = 10 * (self.dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F5(BBOBNfreeFunction):
"""Linear slope"""
funId = 5
alpha = 100.
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim) # TODO: what happens here?
else:
self.xopt = 5 * sign(compute_xopt(self.rseed, dim))
self.scales = -sign(self.xopt) * (self.alpha ** .5) ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
fadd = fadd + 5 * np.sum(np.abs(self.scales))
# BOUNDARY HANDLING
# move "too" good coordinates back into domain
x = np.array(x) # convert x and make a copy of x.
#The following may modify x directly.
idx_out_of_bounds = (x * self.arrxopt) > 25 # 25 == 5 * 5
x[idx_out_of_bounds] = sign(x[idx_out_of_bounds]) * 5
# TRANSFORMATION IN SEARCH SPACE
# COMPUTATION core
ftrue = dot(x, self.scales)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F6(BBOBNfreeFunction):
"""Attractive sector function"""
funId = 6
condition = 10.
alpha = 100.
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.linearTF) # TODO: check
# COMPUTATION core
idx = (x * self.arrxopt) > 0
x[idx] = self.alpha * x[idx]
ftrue = monotoneTFosc(np.sum(x**2, -1)) ** .9
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class _FStepEllipsoid(BBOBFunction):
"""Abstract Step-ellipsoid, condition 100
Method boundaryhandling needs to be defined.
"""
rrseed = 7
condition = 100.
alpha = 10.
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = self.condition ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim),
diag(((self.condition/10.)**.5) ** linspace(0, 1, dim)))
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.linearTF)
try:
x1 = x[:, 0]
except IndexError:
x1 = x[0]
idx = np.abs(x) > .5
x[idx] = np.round(x[idx])
x[np.negative(idx)] = np.round(self.alpha * x[np.negative(idx)]) / self.alpha
x = dot(x, self.rotation)
# COMPUTATION core
ftrue = .1 * np.maximum(1e-4 * np.abs(x1), dot(x ** 2, self.scales))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F7(_FStepEllipsoid, BBOBNfreeFunction):
"""Step-ellipsoid, condition 100, noise-free"""
funId = 7
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 1.)
class F113(_FStepEllipsoid, BBOBGaussFunction):
"""Step-ellipsoid with gauss noise, condition 100"""
funId = 113
gaussbeta = 1.
class F114(_FStepEllipsoid, BBOBUniformFunction):
"""Step-ellipsoid with uniform noise, condition 100"""
funId = 114
unifalphafac = 1.
unifbeta = 1.
class F115(_FStepEllipsoid, BBOBCauchyFunction):
"""Step-ellipsoid with Cauchy noise, condition 100"""
funId = 115
cauchyalpha = 1.
cauchyp = 0.2
class _FRosenbrock(BBOBFunction):
"""Abstract Rosenbrock, non-rotated
Method boundaryhandling needs to be defined.
"""
rrseed = 8
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = .75 * compute_xopt(self.rseed, dim) # different from all others
self.scales = max(1, dim ** .5 / 8.)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= self.arrxopt!
x = self.scales * x
x = x + 1 # shift zero to factual optimum 1
# COMPUTATION core
try:
ftrue = (1e2 * np.sum((x[:, :-1] ** 2 - x[:, 1:]) ** 2, -1) +
np.sum((x[:, :-1] - 1.) ** 2, -1))
except IndexError:
ftrue = (1e2 * np.sum((x[:-1] ** 2 - x[1:]) ** 2) +
np.sum((x[:-1] - 1.) ** 2))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F8(_FRosenbrock, BBOBNfreeFunction):
"""Rosenbrock noise-free"""
funId = 8
def boundaryhandling(self, x):
return 0.
class F104(_FRosenbrock, BBOBGaussFunction):
"""Rosenbrock non-rotated with moderate Gauss noise"""
funId = 104
gaussbeta = 0.01
class F105(_FRosenbrock, BBOBUniformFunction):
"""Rosenbrock non-rotated with moderate uniform noise"""
funId = 105
unifalphafac = 0.01
unifbeta = 0.01
class F106(_FRosenbrock, BBOBCauchyFunction):
"""Rosenbrock non-rotated with moderate Cauchy noise"""
funId = 106
cauchyalpha = 0.01
cauchyp = 0.05
class F110(_FRosenbrock, BBOBGaussFunction):
"""Rosenbrock non-rotated with Gauss noise"""
funId = 110
gaussbeta = 1.
class F111(_FRosenbrock, BBOBUniformFunction):
"""Rosenbrock non-rotated with uniform noise"""
funId = 111
unifalphafac = 1.
unifbeta = 1.
class F112(_FRosenbrock, BBOBCauchyFunction):
"""Rosenbrock non-rotated with Cauchy noise"""
funId = 112
cauchyalpha = 1.
cauchyp = 0.2
class F9(BBOBNfreeFunction):
"""Rosenbrock, rotated"""
funId = 9
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
scale = max(1, dim ** .5 / 8.) # nota: different from scales in F8
self.linearTF = scale * compute_rotation(self.rseed, dim)
self.xopt = np.hstack(dot(.5 * np.ones((1, dim)), self.linearTF.T)) / scale ** 2
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = dot(x, self.linearTF) + 0.5 # different from F8
# COMPUTATION core
try:
ftrue = (1e2 * np.sum((x[:, :-1] ** 2 - x[:, 1:]) ** 2, -1) +
np.sum((x[:, :-1] - 1.) ** 2, -1))
except IndexError:
ftrue = (1e2 * np.sum((x[:-1] ** 2 - x[1:]) ** 2) +
np.sum((x[:-1] - 1.) ** 2))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class _FEllipsoid(BBOBFunction):
"""Abstract Ellipsoid with monotone transformation.
Method boundaryhandling needs to be defined.
"""
rrseed = 10
condition = 1e6
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = self.condition ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
x = monotoneTFosc(x)
# COMPUTATION core
ftrue = dot(x ** 2, self.scales)
try:
ftrue = np.hstack(ftrue)
except TypeError: # argument 2 to map() must support iteration
pass
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F10(_FEllipsoid, BBOBNfreeFunction):
"""Ellipsoid with monotone transformation, condition 1e6"""
funId = 10
condition = 1e6
def boundaryhandling(self, x):
return 0.
class F116(_FEllipsoid, BBOBGaussFunction):
"""Ellipsoid with Gauss noise, monotone x-transformation, condition 1e4"""
funId = 116
condition = 1e4
gaussbeta = 1.
class F117(_FEllipsoid, BBOBUniformFunction):
"""Ellipsoid with uniform noise, monotone x-transformation, condition 1e4"""
funId = 117
condition = 1e4
unifalphafac = 1.
unifbeta = 1.
class F118(_FEllipsoid, BBOBCauchyFunction):
"""Ellipsoid with Cauchy noise, monotone x-transformation, condition 1e4"""
funId = 118
condition = 1e4
cauchyalpha = 1.
cauchyp = 0.2
class F11(BBOBNfreeFunction):
"""Discus (tablet) with monotone transformation, condition 1e6"""
funId = 11
condition = 1e6
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
x = monotoneTFosc(x)
# COMPUTATION core
try:
ftrue = np.sum(x**2, -1) + (self.condition - 1.) * x[:, 0] ** 2
except IndexError:
ftrue = np.sum(x**2) + (self.condition - 1.) * x[0] ** 2
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F12(BBOBNfreeFunction):
"""Bent cigar with asymmetric space distortion, condition 1e6"""
funId = 12
condition = 1e6
beta = .5
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed + 1e6, dim) # different from others
self.rotation = compute_rotation(self.rseed + 1e6, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation) # no scaling here, because it would go to the arrExpo
idx = x > 0
x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))
x = dot(x, self.rotation)
# COMPUTATION core
try:
ftrue = self.condition * np.sum(x**2, -1) + (1 - self.condition) * x[:, 0] ** 2
except IndexError:
ftrue = self.condition * np.sum(x**2) + (1 - self.condition) * x[0] ** 2
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F13(BBOBNfreeFunction):
"""Sharp ridge"""
funId = 13
condition = 10.
alpha = 100. # slope
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.linearTF)
# COMPUTATION core
try:
ftrue = x[:, 0] ** 2 + self.alpha * np.sqrt(np.sum(x[:, 1:] ** 2, -1))
except IndexError:
ftrue = x[0] ** 2 + self.alpha * np.sqrt(np.sum(x[1:] ** 2, -1))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class _FDiffPow(BBOBFunction):
"""Abstract Sum of different powers, between x^2 and x^6.
Method boundaryhandling needs to be defined.
"""
alpha = 4.
rrseed = 14
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrexpo = resize(2. + self.alpha * linspace(0, 1, dim), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
# COMPUTATION core
ftrue = np.sqrt(np.sum(np.abs(x) ** self.arrexpo, -1))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F14(_FDiffPow, BBOBNfreeFunction):
"""Sum of different powers, between x^2 and x^6, noise-free"""
funId = 14
def boundaryhandling(self, x):
return 0.
class F119(_FDiffPow, BBOBGaussFunction):
"""Sum of different powers with Gauss noise, between x^2 and x^6"""
funId = 119
gaussbeta = 1.
class F120(_FDiffPow, BBOBUniformFunction):
"""Sum of different powers with uniform noise, between x^2 and x^6"""
funId = 120
unifalphafac = 1.
unifbeta = 1.
class F121(_FDiffPow, BBOBCauchyFunction):
"""Sum of different powers with seldom Cauchy noise, between x^2 and x^6"""
funId = 121
cauchyalpha = 1.
cauchyp = 0.2
class F15(BBOBNfreeFunction):
"""Rastrigin with asymmetric non-linear distortion, "condition" 10"""
funId = 15
condition = 10.
beta = 0.2
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation) # no scaling here, because it would go to the arrexpo
x = monotoneTFosc(x)
idx = x > 0.
x[idx] = x[idx] ** (1. + self.arrexpo[idx] * np.sqrt(x[idx])) # smooth in zero
x = dot(x, self.linearTF)
# COMPUTATION core
ftrue = 10. * (dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F16(BBOBNfreeFunction):
"""Weierstrass, condition 100"""
funId = 16
condition = 100.
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (1. / self.condition ** .5) ** linspace(0, 1, dim) # CAVE?
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
K = np.arange(0, 12)
self.aK = np.reshape(0.5 ** K, (1, 12))
self.bK = np.reshape(3. ** K, (1, 12))
self.f0 = np.sum(self.aK * np.cos(2 * np.pi * self.bK * 0.5)) # optimal value
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x)
fpen = (10. / dim) * np.sum(xoutside ** 2, -1)
fadd = fadd + fpen
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
x = monotoneTFosc(x)
x = dot(x, self.linearTF)
# COMPUTATION core
if len(curshape) < 2: # popsize is one
ftrue = np.sum(dot(self.aK, np.cos(dot(self.bK.T, 2 * np.pi * (np.reshape(x, (1, len(x))) + 0.5)))))
else:
ftrue = np.zeros(curshape[0]) # curshape[0] is popsize
for k, i in enumerate(x):
# TODO: simplify next line
ftrue[k] = np.sum(dot(self.aK, np.cos(dot(self.bK.T, 2 * np.pi * (np.reshape(i, (1, len(i))) + 0.5)))))
ftrue = 10. * (ftrue / dim - self.f0) ** 3
try:
ftrue = np.hstack(ftrue)
except TypeError:
pass
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class _FSchaffersF7(BBOBFunction):
"""Abstract Schaffers F7 with asymmetric non-linear transformation, condition 10
Class attribute condition and method boundaryhandling need to be defined.
"""
rrseed = 17
condition = None
beta = 0.5
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1 , dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
idx = x > 0
x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))
x = dot(x, self.linearTF)
# COMPUTATION core
try:
s = x[:, :-1] ** 2 + x[:, 1:] ** 2
except IndexError:
s = x[:-1] ** 2 + x[1:] ** 2
ftrue = np.mean(s ** .25 * (np.sin(50 * s ** .1) ** 2 + 1), -1) ** 2
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F17(_FSchaffersF7, BBOBNfreeFunction):
"""Schaffers F7 with asymmetric non-linear transformation, condition 10"""
funId = 17
condition = 10.
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 10.)
class F18(_FSchaffersF7, BBOBNfreeFunction):
"""Schaffers F7 with asymmetric non-linear transformation, condition 1000"""
funId = 18
condition = 1000.
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 10.)
class F122(_FSchaffersF7, BBOBGaussFunction):
"""Schaffers F7 with Gauss noise, with asymmetric non-linear transformation, condition 10"""
funId = 122
condition = 10.
gaussbeta = 1.
class F123(_FSchaffersF7, BBOBUniformFunction):
"""Schaffers F7 with uniform noise, asymmetric non-linear transformation, condition 10"""
funId = 123
condition = 10.
unifalphafac = 1.
unifbeta = 1.
class F124(_FSchaffersF7, BBOBCauchyFunction): # TODO: check boundary handling
"""Schaffers F7 with seldom Cauchy noise, asymmetric non-linear transformation, condition 10"""
funId = 124
condition = 10.
cauchyalpha = 1.
cauchyp = 0.2
class _F8F2(BBOBFunction):
"""Abstract F8F2 sum of Griewank-Rosenbrock 2-D blocks
Class attribute facftrue and method boundaryhandling need to be defined.
"""
facftrue = None
rrseed = 19
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
scale = max(1, dim ** .5 / 8.)
self.linearTF = scale * compute_rotation(self.rseed, dim)
#if self.zerox:
# self.xopt = zeros(dim) # does not work here
#else:
# TODO: clean this line
self.xopt = np.hstack(dot(self.linearTF, 0.5 * np.ones((dim, 1)) / scale ** 2))
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = dot(x, self.linearTF) + 0.5 # cannot be replaced with x -= arrxopt!
# COMPUTATION core
try:
f2 = 100. * (x[:, :-1] ** 2 - x[:, 1:]) ** 2 + (1. - x[:, :-1]) ** 2
except IndexError:
f2 = 100. * (x[:-1] ** 2 - x[1:]) ** 2 + (1. - x[:-1]) ** 2
ftrue = self.facftrue + self.facftrue * np.sum(f2 / 4000. - np.cos(f2), -1) / (dim - 1.)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F19(_F8F2, BBOBNfreeFunction):
"""F8F2 sum of Griewank-Rosenbrock 2-D blocks, noise-free"""
funId = 19
facftrue = 10.
def boundaryhandling(self, x):
return 0.
class F125(_F8F2, BBOBGaussFunction):
"""F8F2 sum of Griewank-Rosenbrock 2-D blocks with Gauss noise"""
funId = 125
facftrue = 1.
gaussbeta = 1.
class F126(_F8F2, BBOBUniformFunction):
"""F8F2 sum of Griewank-Rosenbrock 2-D blocks with uniform noise"""
funId = 126
facftrue = 1.
unifalphafac = 1.
unifbeta = 1.
class F127(_F8F2, BBOBCauchyFunction):
"""F8F2 sum of Griewank-Rosenbrock 2-D blocks with seldom Cauchy noise"""
funId = 127
facftrue = 1.
cauchyalpha = 1.
cauchyp = 0.2
class F20(BBOBNfreeFunction):
"""Schwefel with tridiagonal variable transformation"""
funId = 20
condition = 10.
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = 0.5 * sign(unif(dim, self.rseed) - 0.5) * 4.2096874633
self.scales = (self.condition ** .5) ** np.linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(2 * np.abs(self.xopt), curshape)
self.arrscales = resize(self.scales, curshape)
self.arrsigns = resize(sign(self.xopt), curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# TRANSFORMATION IN SEARCH SPACE
x = 2 * self.arrsigns * x # makes the below boundary handling effective for coordinates
try:
x[:, 1:] = x[:, 1:] + .25 * (x[:, :-1] - self.arrxopt[:, :-1])
except IndexError:
x[1:] = x[1:] + .25 * (x[:-1] - self.arrxopt[:-1])
x = 100. * (self.arrscales * (x - self.arrxopt) + self.arrxopt)
# BOUNDARY HANDLING
xoutside = np.maximum(0., np.abs(x) - 500.) * sign(x) # in [-500, 500]
fpen = 0.01 * np.sum(xoutside ** 2, -1)
fadd = fadd + fpen
# COMPUTATION core
ftrue = 0.01 * ((418.9828872724339) - np.mean(x * np.sin(np.sqrt(np.abs(x))), -1))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class _FGallagher(BBOBFunction):
"""Abstract Gallagher with nhighpeaks Gaussian peaks, condition up to 1000, one global rotation
Attribute fac2, nhighpeaks, highpeakcond and method boundary
handling need to be defined.
"""
rrseed = 21
maxcondition = 1000.
fitvalues = (1.1, 9.1)
fac2 = None # added: factor for xopt not too close to boundaries, used by F22
nhighpeaks = None
highpeakcond = None
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
self.rotation = compute_rotation(self.rseed, dim)
arrcondition = self.maxcondition ** linspace(0, 1, self.nhighpeaks - 1)
idx = np.argsort(unif(self.nhighpeaks - 1, self.rseed)) # random permutation
arrcondition = np.insert(arrcondition[idx], 0, self.highpeakcond)
self.arrscales = []
for i, e in enumerate(arrcondition):
s = e ** linspace(-.5, .5, dim)
idx = np.argsort(unif(dim, self.rseed + 1e3 * i)) # permutation instead of rotation
self.arrscales.append(s[idx]) # this is inverse Cov
self.arrscales = np.vstack(self.arrscales)
# compute peak values, 10 is global optimum
self.peakvalues = np.insert(linspace(self.fitvalues[0], self.fitvalues[1], self.nhighpeaks - 1), 0, 10.)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.xlocal = dot(self.fac2 * np.reshape(10. * unif(dim * self.nhighpeaks, self.rseed) - 5., (self.nhighpeaks, dim)),
self.rotation)
if self.zerox:
self.xlocal[0, :] = zeros(dim)
else:
# global optimum not too close to boundary
self.xlocal[0, :] = 0.8 * self.xlocal[0, :]
self.xopt = dot(self.xlocal[0, :], self.rotation.T)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = dot(x, self.rotation)
# COMPUTATION core
fac = -0.5 / dim
# f = NaN(nhighpeaks, popsi)
# TODO: optimize
if len(curshape) < 2: # popsize is 1 in this case
f = np.zeros(self.nhighpeaks)
xx = tile(x, (self.nhighpeaks, 1)) - self.xlocal
f[:] = self.peakvalues * np.exp(fac * np.sum(self.arrscales * xx ** 2, 1))
elif curshape[0] < .5 * self.nhighpeaks:
f = np.zeros((curshape[0], self.nhighpeaks))
for k, e in enumerate(x):
xx = tile(e, (self.nhighpeaks, 1)) - self.xlocal
f[k, :] = self.peakvalues * np.exp(fac * np.sum(self.arrscales * xx ** 2, 1))
else:
f = np.zeros((curshape[0], self.nhighpeaks))
for i in range(self.nhighpeaks):
xx = (x - tile(self.xlocal[i, :], (curshape[0], 1)))
f[:, i] = self.peakvalues[i] * np.exp(fac * (dot(xx ** 2, self.arrscales[i, :])))
ftrue = monotoneTFosc(10 - np.max(f, -1)) ** 2
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F21(_FGallagher, BBOBNfreeFunction):
"""Gallagher with 101 Gaussian peaks, condition up to 1000, one global rotation, noise-free"""
funId = 21
nhighpeaks = 101
fac2 = 1.
highpeakcond = 1000. ** .5
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 1.)
class F22(_FGallagher, BBOBNfreeFunction):
"""Gallagher with 21 Gaussian peaks, condition up to 1000, one global rotation"""
funId = 22
rrseed = 22
nhighpeaks = 21
fac2 = 0.98
highpeakcond = 1000.
def boundaryhandling(self, x):
return defaultboundaryhandling(x, 1.)
class F128(_FGallagher, BBOBGaussFunction): # TODO: check boundary handling
"""Gallagher with 101 Gaussian peaks with Gauss noise, condition up to 1000, one global rotation"""
funId = 128
nhighpeaks = 101
fac2 = 1.
highpeakcond = 1000. ** .5
gaussbeta = 1.
class F129(_FGallagher, BBOBUniformFunction):
"""Gallagher with 101 Gaussian peaks with uniform noise, condition up to 1000, one global rotation"""
funId = 129
nhighpeaks = 101
fac2 = 1.
highpeakcond = 1000. ** .5
unifalphafac = 1.
unifbeta = 1.
class F130(_FGallagher, BBOBCauchyFunction):
"""Gallagher with 101 Gaussian peaks with seldom Cauchy noise, condition up to 1000, one global rotation"""
funId = 130
nhighpeaks = 101
fac2 = 1.
highpeakcond = 1000. ** .5
cauchyalpha = 1.
cauchyp = 0.2
class F23(BBOBNfreeFunction):
"""Katsuura function"""
funId = 23
condition = 100.
arr2k = np.reshape(2. ** (np.arange(1, 33)), (1, 32)) # bug-fix for 32-bit (NH): 2 -> 2. (relevance is minor)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x)
fpen = np.sum(xoutside ** 2, -1)
fadd = fadd + fpen
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.linearTF)
# COMPUTATION core
if len(curshape) < 2: # popsize is 1 in this case
arr = dot(np.reshape(x, (dim, 1)), self.arr2k) # dim times d array
ftrue = (-10. / dim ** 2. +
10. / dim ** 2. *
np.prod(1 + np.arange(1, dim + 1) * np.dot(np.abs(arr - np.round(arr)), self.arr2k.T ** -1.).T) ** (10. / dim ** 1.2))
else:
ftrue = zeros(curshape[0])
for k, e in enumerate(x):
arr = dot(np.reshape(e, (dim, 1)), self.arr2k) # dim times d array
ftrue[k] = (-10. / dim ** 2. +
10. / dim ** 2. *
np.prod(1 + np.arange(1, dim + 1) * np.dot(np.abs(arr - np.round(arr)), self.arr2k.T ** -1.).T) ** (10. / dim ** 1.2))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
class F24(BBOBNfreeFunction):
"""Lunacek bi-Rastrigin, condition 100
in PPSN 2008, Rastrigin part rotated and scaled
"""
funId = 24
condition = 100.
_mu1 = 2.5
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = .5 * self._mu1 * sign(gauss(dim, self.rseed))
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
#self.arrxopt = resize(self.xopt, curshape)
self.arrscales = resize(2. * sign(self.xopt), curshape) # makes up for xopt
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x)
fpen = 1e4 * np.sum(xoutside ** 2, -1)
fadd = fadd + fpen
# TRANSFORMATION IN SEARCH SPACE
x = self.arrscales * x
# COMPUTATION core
s = 1 - .5 / ((dim + 20) ** .5 - 4.1) # tested up to DIM = 160 p in [0.25,0.33]
d = 1 # shift [1,3], smaller is more difficult
mu2 = -((self._mu1 ** 2 - d) / s) ** .5
ftrue = np.minimum(np.sum((x - self._mu1) ** 2, -1),
d * dim + s * np.sum((x - mu2) ** 2, -1))
ftrue = ftrue + 10 * (dim - np.sum(np.cos(2 * np.pi * dot(x - self._mu1, self.linearTF)), -1))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
#dictbbob = {'sphere': F1, 'ellipsoid': F2, 'Rastrigin': F3}
nfreefunclasses = (F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14,
F15, F16, F17, F18, F19, F20, F21, F22, F23, F24) # hard coded
noisyfunclasses = (F101, F102, F103, F104, F105, F106, F107, F108, F109, F110,
F111, F112, F113, F114, F115, F116, F117, F118, F119, F120,
F121, F122, F123, F124, F125, F126, F127, F128, F129, F130)
dictbbobnfree = dict((i.funId, i) for i in nfreefunclasses)
nfreeIDs = sorted(dictbbobnfree.keys()) # was: "nfreenames"
nfreeinfos = [str(i) + ': ' + dictbbobnfree[i].__doc__ for i in nfreeIDs]
dictbbobnoisy = dict((i.funId, i) for i in noisyfunclasses)
noisyIDs = sorted(dictbbobnoisy.keys()) # was noisynames
funclasses = list(nfreefunclasses) + list(noisyfunclasses)
dictbbob = dict((i.funId, i) for i in funclasses)
#TODO: pb xopt f9, 21, 22
class _FTemplate(BBOBNfreeFunction):
"""Template based on F1"""
funId = 421337
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.linearTf = None
self.rotation = None
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
# COMPUTATION core
ftrue = np.sum(x**2, 1)
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
def instantiate(ifun, iinstance=0, param=None, **kwargs):
"""Returns test function ifun, by default instance 0."""
res = dictbbob[ifun](iinstance=iinstance, param=param, **kwargs) # calling BBOBFunction.__init__(iinstance, param,...)
return res, res.fopt
def get_param(ifun):
"""Returns the parameter values of the function ifun."""
try:
return dictbbob[ifun].paramValues
except AttributeError:
return (None, )
if __name__ == "__main__":
import doctest
doctest.testmod() # run all doctests in this module
| {
"content_hash": "573706b1abc141f6cae07a2ac3d9ff1f",
"timestamp": "",
"source": "github",
"line_count": 2172,
"max_line_length": 331,
"avg_line_length": 33.06077348066298,
"alnum_prop": 0.5845866755793226,
"repo_name": "PyQuake/earthquakemodels",
"id": "57478fe8935c10f0efe41c078cc23b44115d1d63",
"size": "71855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/gaModel/bbobbenchmarks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from optparse import make_option
from django.conf import settings
from datetime import datetime
from requests import HTTPError
from go_http.contacts import ContactsApiClient
from subscription.models import Subscription
SUBSCRIPTION_STANDARD = 1 # less than week 32 when reg
SUBSCRIPTION_LATER = 2 # 32-35 when reg
SUBSCRIPTION_ACCELERATED = 3 # over 35 weeks when reg
SUBSCRIPTION_BABY1 = 4
SUBSCRIPTION_BABY2 = 5
SUBSCRIPTION_MISCARRIAGE = 6
SUBSCRIPTION_STILLBIRTH = 7
SUBSCRIPTION_BABYLOSS = 8
SUBSCRIPTION_SUBSCRIPTION = 9 # personal aka public line reg
SUBSCRIPTION_CHW = 10 # chw line reg
class Command(BaseCommand):
help = "Puts a contact in a group in vumi"
client_class = ContactsApiClient
option_list = BaseCommand.option_list + (
make_option('--filter_messageset',
dest='message_set_id', default=None, type='int',
help='What message set do you want to look at'),
make_option('--filter_status',
dest='process_status', default=None, type='int',
help='What status should the processing be at'),
make_option('--filter_seq', dest='next_sequence_number',
default=None, type='int',
help='What status should the processing be at'),
make_option('--set_group', dest='set_group', default=None, type='str',
help='What group should they be added to'),
make_option('--dry_run', action='store_true', default=False),
)
def get_now(self):
return datetime.now()
def handle(self, *args, **options):
subscribers = Subscription.objects.filter(
message_set_id=options["message_set_id"],
process_status=options["process_status"],
next_sequence_number=options["next_sequence_number"])
self.stdout.write("Affected records: %s\n" % (subscribers.count()))
if not options["dry_run"]:
contacts = self.client_class(settings.VUMI_GO_API_TOKEN)
counter = 0.0
started = self.get_now()
for subscriber in subscribers:
self.stdout.write("Updating: %s\n" % (subscriber.contact_key,))
try:
contact = contacts.get_contact(subscriber.contact_key)
contact.pop("key", None)
contact.pop("$VERSION", None)
contact.pop("created_at", None)
contact["groups"].append(options["set_group"])
updatedcontact = contacts.update_contact(
subscriber.contact_key, contact)
self.stdout.write(
"Groups now: %s\n" % (str(updatedcontact["groups"])))
# Tracker updates
counter += 1.0
delta = self.get_now() - started
# Make sure we're not dividing by zero
if delta.seconds > 0:
per_second = (
counter / float(
(self.get_now() - started).seconds))
else:
per_second = 'unknown'
self.stdout.write(
"Updated %s subscribers at %s per second\n" % (
counter, per_second))
except ValueError as err:
self.stdout.write(
"Contact %s threw %s\n" % (
subscriber.contact_key, err))
except KeyError as err:
self.stdout.write(
"Contact %s threw KeyError on %s\n" % (
subscriber.contact_key, err))
except HTTPError as err:
self.stdout.write(
"Contact %s threw %s\n" % (subscriber.contact_key,
err.response.status_code))
self.stdout.write("Contacts updated\n")
| {
"content_hash": "e23b1ea8edde7be8a15bb977a4ecb610",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 79,
"avg_line_length": 41.04,
"alnum_prop": 0.5365497076023392,
"repo_name": "praekelt/ndoh-control",
"id": "aa6bdfd8d5c100631f7b99b7497a443da5c547c1",
"size": "4104",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "subscription/management/commands/add_to_group.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19562"
},
{
"name": "HTML",
"bytes": "32320"
},
{
"name": "JavaScript",
"bytes": "65518"
},
{
"name": "Nginx",
"bytes": "777"
},
{
"name": "Python",
"bytes": "553807"
},
{
"name": "Shell",
"bytes": "465"
}
],
"symlink_target": ""
} |
"""
This is an example python program. The triple quotation marks indicate
a comment block. It is good practice, especially for code that you submit
in this class, to have lots of documentation comments in your code.
"""
# Comments on a single line can also be designated by the leading pound sign
# (hash mark).
"""
Start the code by importing the libraries that you are going to need.
For this class, numpy, scipy, pyfits (or the io.fits package in
astropy), and matplotlib.pyplot are probably the most common libraries
that we will use. You can also write your own libraries. You can
import a library (import numpy), import a function or sub-library from
a library (from numpy import sqrt), and import a library while giving
it a nickname (import numpy as n).
"""
import numpy
from matplotlib import pyplot as plt
#-----------------------------------------------------------------------
"""
You may want to define functions that will do important things. Here is
a function definition (of a somewhat silly function). Note that in python
the indentation tells you what is associated with what. Thus, you should
make sure that everything that belongs together (as in a function definition,
a for loop, an if statement, etc., has the same indentation. The end of
the indentation indicates the end of the block
"""
def plot_my_data(x,y,verbose=True):
"""
This function will plot y vs x. It has two required parameters, x and y.
It also has an optional parameter, called 'verbose'. The default value
of the verbose parameter is True. Thus, if you want verbose to be true,
then you don't have to pass it to the function. All optional parameters
must have a default value. This can be a logical (True or False), an
integer, a float, a string, etc.
"""
# Note the indentation for the if block
if (verbose):
print ""
print "Now plotting the data."
# In the plot command below, the 'bo' tells the program to plot blue ('b')
# circles ('o')
print ""
print "Close the plot window to continue..."
plt.plot(x,y,'bo')
#-----------------------------------------------------------------------
"""
Here is the main program.
One very useful task is that which reads in a text file containing data
(as floating point numbers) in columns. The data are put into a 2-dimensional
numpy array. Note that in this example, polyfit_data.txt contains 50 lines
of data and 2 columns. Therefore, in python-speak, it is a 50x2 array.
"""
file = 'polyfit_data.txt'
data = numpy.loadtxt(file)
print ""
print "Dimensions of input data are %d x %d" % (data.shape[0],data.shape[1])
"""
Once the data are loaded in, you can separate the columns. Note that in
python, (1) arrays are zero-indexed, i.e., the first element of an array
has an index of 0, and not an index of 1, and (2) for 2-d arrays, the first
index refers to y and the second refers to x.
"""
x = data[:,0] # Extracts the first column
y = data[:,1] # Extracts the second column
"""
In the above definitions, x and y are also numpy arrays, having sizes 50x1.
numpy arrays have all sorts of nice functions associated with them, such
as mean and standard deviation, but not median (which is a separate numpy
function).
"""
ymean = y.mean()
yrms = y.std()
ymed = numpy.median(y)
print ""
print "The y vector has mean=%f, rms=%f, and median=%f." % (ymean,yrms,ymed)
"""
Note that for data that are more than one dimension, you can take the overall
mean or the mean of the columns or of the rows. You can do those things
by using the optional axis parameter.
"""
meanall = data.mean()
meancols = data.mean(axis=0)
meanrows = data.mean(axis=1)
print "The overall data set has mean = %5.2f" % meanall
print "The means of the columns are:",meancols
"""
You can create new arrays from existing arrays by testing for some
condition.
"""
# The following line creates a mask that is true where y is greater than 1.5
# Note that this mask is just an array of True or False that has the same
# size as y. Therefore, you can use it to select the corresponding members
# of the x array as well.
mask = y>1.5
print ""
print "mask = ",mask
newx1 = x[mask]
newy1 = y[mask]
print ""
print "newy1 = ",newy1
# You could have taken a shortcut for the above step. For example:
newy2 = y[y>1.5]
print "newy2 = ",newy2
"""
Finally, call the function to plot the data
"""
plt.figure(1)
plot_my_data(x,y)
plt.figure(2)
plot_my_data(newx1,newy1)
plt.show()
| {
"content_hash": "f6b6c504c0a94290c92373bdd7123d8b",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 78,
"avg_line_length": 31.492957746478872,
"alnum_prop": 0.6947674418604651,
"repo_name": "cdfassnacht/CodeCDF",
"id": "6cd0a02a845505df97251410570cd8c96c3d61e2",
"size": "4472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1977044"
},
{
"name": "DIGITAL Command Language",
"bytes": "1074"
},
{
"name": "Dockerfile",
"bytes": "836"
},
{
"name": "IDL",
"bytes": "3470777"
},
{
"name": "Perl",
"bytes": "6217"
},
{
"name": "Prolog",
"bytes": "280792"
},
{
"name": "Python",
"bytes": "341000"
},
{
"name": "Roff",
"bytes": "15608"
},
{
"name": "Shell",
"bytes": "82802"
},
{
"name": "TeX",
"bytes": "159505"
}
],
"symlink_target": ""
} |
def match(command, settings):
toks = command.script.split()
return (len(toks) > 0
and toks[0].endswith('.py')
and ('Permission denied' in command.stderr or
'command not found' in command.stderr))
def get_new_command(command, settings):
return 'python ' + command.script
| {
"content_hash": "da96e83a4c2bd5a4204a2a3e10755e3e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 32.5,
"alnum_prop": 0.6123076923076923,
"repo_name": "dionyziz/thefuck",
"id": "507a934b14030e88bb1553168482128299c16ca7",
"size": "456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thefuck/rules/python_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23047"
}
],
"symlink_target": ""
} |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as 'Required' below must be included for upload to PyPI.
# Fields marked as 'Optional' may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='spark-nlp', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='4.2.3', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the 'Summary' metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='John Snow Labs Spark NLP is a natural language processing library built on top of Apache Spark ML. It provides simple, performant & accurate NLP annotations for machine learning pipelines, that scale easily in a distributed environment.', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the 'Description' metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, 'applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst' (see link below)
#
# This field corresponds to the 'Description-Content-Type' metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the 'Home-Page' metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/JohnSnowLabs/spark-nlp', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='John Snow Labs', # Optional
# This should be a valid email address corresponding to the author listed
# above.
# author_email='pypa-dev@googlegroups.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
# Pick your license as you wish
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Internationalization',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: Build Tools',
'Topic :: Text Processing :: Linguistic',
'Topic :: Scientific/Engineering',
'Typing :: Typed',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='NLP spark vision speech deep learning transformer tensorflow BERT GPT-2 Wav2Vec2 ViT', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=['my_module'],
packages=find_packages(exclude=['test*', 'tmp*']),
include_package_data=False # Needed to install jar file
)
| {
"content_hash": "6b9c2b48174a22d6842b091a483d495a",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 267,
"avg_line_length": 43.02054794520548,
"alnum_prop": 0.6865148861646234,
"repo_name": "JohnSnowLabs/spark-nlp",
"id": "14449a04d3e41d1e0127143f989a1418df3841dd",
"size": "6281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "14452"
},
{
"name": "Java",
"bytes": "223289"
},
{
"name": "Makefile",
"bytes": "819"
},
{
"name": "Python",
"bytes": "1694517"
},
{
"name": "Scala",
"bytes": "4116435"
},
{
"name": "Shell",
"bytes": "5286"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.core.urlresolvers import resolve
from lists.views import home_page,view_list
from django.http import HttpRequest
from django.template.loader import render_to_string
from lists.models import Item,List
# Create your tests here.
class ListAndItemModelsTest(TestCase):
def test_saving_and_retriving_items(self):
list_=List()
list_.save()
first_item=Item.objects.create(text='The first (ever) list item',list=list_)
second_item=Item.objects.create(text='The second list item',list=list_)
saved_list=List.objects.first()
self.assertEqual(saved_list,list_)
saved_items=Item.objects.all()
self.assertEqual(saved_items.count(),2)
first_saved_item=saved_items[0]
second_saved_item=saved_items[1]
self.assertEqual(first_saved_item.text,'The first (ever) list item')
self.assertEqual(first_saved_item.list,list_)
self.assertEqual(second_saved_item.list,list_)
self.assertEqual(second_saved_item.text,'The second list item')
def test_home_page_can_save_a_Post_request(self):
request=HttpRequest()
request.method='POST'
request.POST['item_text']='A new list item'
response=home_page(request)
self.assertEqual(Item.objects.count(),1)
new_item=Item.objects.first()
self.assertEqual(new_item.text,'A new list item')
#self.assertIn('A new list item',response.content.decode())
excepted_html=render_to_string(
'home.html',
{'new_item_text': 'A new list item'})
'''def test_home_page_can_redirect(self):
request=HttpRequest()
request.method='POST'
request.POST['item_text']='A new list item'
response=home_page(request)
self.assertEqual(response.status_code,302)
self.assertEqual(response['location'],'/')
'''
def test_cannot_save_empyt_list_items(self):
list_=List.objects.create()
item=Item(list=list_,text='')
with self.assertRaises(ValidationError):
item.save()
item.full_clean()
| {
"content_hash": "b05dcd30ce0c3fb58685ddaa18092887",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 84,
"avg_line_length": 34.46875,
"alnum_prop": 0.655938349954669,
"repo_name": "HaoPatrick/tdd-pyhton",
"id": "d493024c98c7850e0e8ad207e0af01a910363ee1",
"size": "2206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lists/tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "HTML",
"bytes": "1950"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "27485"
}
],
"symlink_target": ""
} |
from .sub_resource import SubResource
class VirtualNetworkGatewayIPConfiguration(SubResource):
"""IP configuration for virtual network gateway.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param private_ip_allocation_method: The private IP allocation method.
Possible values are: 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2017_11_01.models.IPAllocationMethod
:param subnet: The reference of the subnet resource.
:type subnet: ~azure.mgmt.network.v2017_11_01.models.SubResource
:param public_ip_address: The reference of the public IP resource.
:type public_ip_address:
~azure.mgmt.network.v2017_11_01.models.SubResource
:ivar provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, name=None, etag=None):
super(VirtualNetworkGatewayIPConfiguration, self).__init__(id=id)
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = None
self.name = name
self.etag = etag
| {
"content_hash": "3416830e2bbbf902cc14838fced58fe1",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 126,
"avg_line_length": 43.72222222222222,
"alnum_prop": 0.6670902160101652,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "55aad7008a92625b91e173db8302b47dd8c2ec3b",
"size": "2835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/virtual_network_gateway_ip_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('publication_backbone', '0002_auto_20151207_1759'),
]
operations = [
migrations.RenameField(
model_name='publication',
old_name='lead',
new_name='description',
),
]
| {
"content_hash": "256a66368c181e1057fed5a4e8bc5cc9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 60,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.5963541666666666,
"repo_name": "Excentrics/publication-backbone",
"id": "7c14fa457a5dc1371ac05bd0d8704c9222c832f6",
"size": "408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "publication_backbone/migrations/0003_auto_20151207_1805.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "447762"
},
{
"name": "HTML",
"bytes": "217091"
},
{
"name": "JavaScript",
"bytes": "904819"
},
{
"name": "Python",
"bytes": "470545"
}
],
"symlink_target": ""
} |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='geo_positions.proto',
package='Proto.JSON',
syntax='proto3',
serialized_pb=b'\n\x13geo_positions.proto\x12\nProto.JSON\"\xfa\x01\n\rgeo_positions\x12<\n\tpositions\x18\x01 \x03(\x0b\x32).Proto.JSON.geo_positions.position_object\x1a\xaa\x01\n\x0fposition_object\x12\x11\n\ttimestamp\x18\x01 \x01(\t\x12\x0b\n\x03lat\x18\x02 \x01(\x01\x12\x0b\n\x03lon\x18\x03 \x01(\x01\x12\r\n\x05speed\x18\x04 \x01(\x01\x12\x0e\n\x06\x63ourse\x18\x05 \x01(\x01\x12\x11\n\televation\x18\x06 \x01(\x01\x12\x10\n\x08\x61\x63\x63uracy\x18\x07 \x01(\x01\x12\x14\n\x0cis_from_mock\x18\x08 \x01(\x08\x12\x10\n\x08provider\x18\t \x01(\tB0\n\x1enet.ktc.miles.model.proto.JSONB\x0cGeoPositionsH\x01\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GEO_POSITIONS_POSITION_OBJECT = _descriptor.Descriptor(
name='position_object',
full_name='Proto.JSON.geo_positions.position_object',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='Proto.JSON.geo_positions.position_object.timestamp', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lat', full_name='Proto.JSON.geo_positions.position_object.lat', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lon', full_name='Proto.JSON.geo_positions.position_object.lon', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='speed', full_name='Proto.JSON.geo_positions.position_object.speed', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='course', full_name='Proto.JSON.geo_positions.position_object.course', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='elevation', full_name='Proto.JSON.geo_positions.position_object.elevation', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accuracy', full_name='Proto.JSON.geo_positions.position_object.accuracy', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_from_mock', full_name='Proto.JSON.geo_positions.position_object.is_from_mock', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='provider', full_name='Proto.JSON.geo_positions.position_object.provider', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=286,
)
_GEO_POSITIONS = _descriptor.Descriptor(
name='geo_positions',
full_name='Proto.JSON.geo_positions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='positions', full_name='Proto.JSON.geo_positions.positions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_GEO_POSITIONS_POSITION_OBJECT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=36,
serialized_end=286,
)
_GEO_POSITIONS_POSITION_OBJECT.containing_type = _GEO_POSITIONS
_GEO_POSITIONS.fields_by_name['positions'].message_type = _GEO_POSITIONS_POSITION_OBJECT
DESCRIPTOR.message_types_by_name['geo_positions'] = _GEO_POSITIONS
geo_positions = _reflection.GeneratedProtocolMessageType('geo_positions', (_message.Message,), dict(
position_object = _reflection.GeneratedProtocolMessageType('position_object', (_message.Message,), dict(
DESCRIPTOR = _GEO_POSITIONS_POSITION_OBJECT,
__module__ = 'geo_positions_pb2'
# @@protoc_insertion_point(class_scope:Proto.JSON.geo_positions.position_object)
))
,
DESCRIPTOR = _GEO_POSITIONS,
__module__ = 'geo_positions_pb2'
# @@protoc_insertion_point(class_scope:Proto.JSON.geo_positions)
))
_sym_db.RegisterMessage(geo_positions)
_sym_db.RegisterMessage(geo_positions.position_object)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\036net.ktc.miles.model.proto.JSONB\014GeoPositionsH\001')
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "8ce933ad5277edaec05d95319a9c0f88",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 627,
"avg_line_length": 40.54320987654321,
"alnum_prop": 0.7082825822168087,
"repo_name": "jameshp/deviceadminserver",
"id": "242172e804ec036427bba11dd960cabd4ad5a016",
"size": "6658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "message_creator/geo_positions_pb2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1837"
},
{
"name": "Dart",
"bytes": "3786"
},
{
"name": "Python",
"bytes": "1443391"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, division
import re
from contextlib import contextmanager
from pony.orm.core import Database
from pony.utils import import_module
def test_exception_msg(test_case, exc_msg, test_msg=None):
if test_msg is None: return
error_template = "incorrect exception message. expected '%s', got '%s'"
error_msg = error_template % (test_msg, exc_msg)
assert test_msg not in ('...', '....', '.....', '......')
if '...' not in test_msg:
test_case.assertEqual(test_msg, exc_msg, error_msg)
else:
pattern = ''.join(
'[%s]' % char for char in test_msg.replace('\\', '\\\\')
.replace('[', '\\[')
).replace('[.][.][.]', '.*')
regex = re.compile(pattern)
if not regex.match(exc_msg):
test_case.fail(error_template % (test_msg, exc_msg))
def raises_exception(exc_class, test_msg=None):
def decorator(func):
def wrapper(test_case, *args, **kwargs):
try:
func(test_case, *args, **kwargs)
test_case.fail("Expected exception %s wasn't raised" % exc_class.__name__)
except exc_class as e:
if not e.args: test_case.assertEqual(test_msg, None)
else: test_exception_msg(test_case, str(e), test_msg)
wrapper.__name__ = func.__name__
return wrapper
return decorator
@contextmanager
def raises_if(test_case, cond, exc_class, test_msg=None):
try:
yield
except exc_class as e:
test_case.assertTrue(cond)
test_exception_msg(test_case, str(e), test_msg)
else:
test_case.assertFalse(cond, "Expected exception %s wasn't raised" % exc_class.__name__)
def flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, str):
result.extend(flatten(el))
else:
result.append(el)
return result
class TestConnection(object):
def __init__(con, database):
con.database = database
if database and database.provider_name == 'postgres':
con.autocommit = True
def commit(con):
pass
def rollback(con):
pass
def cursor(con):
return test_cursor
class TestCursor(object):
def __init__(cursor):
cursor.description = []
cursor.rowcount = 0
def execute(cursor, sql, args=None):
pass
def fetchone(cursor):
return None
def fetchmany(cursor, size):
return []
def fetchall(cursor):
return []
test_cursor = TestCursor()
class TestPool(object):
def __init__(pool, database):
pool.database = database
def connect(pool):
return TestConnection(pool.database), True
def release(pool, con):
pass
def drop(pool, con):
pass
def disconnect(pool):
pass
class TestDatabase(Database):
real_provider_name = None
raw_server_version = None
sql = None
def bind(self, provider, *args, **kwargs):
provider_name = provider
assert isinstance(provider_name, str)
if self.real_provider_name is not None:
provider_name = self.real_provider_name
self.provider_name = provider_name
provider_module = import_module('pony.orm.dbproviders.' + provider_name)
provider_cls = provider_module.provider_cls
raw_server_version = self.raw_server_version
if raw_server_version is None:
if provider_name == 'sqlite': raw_server_version = '3.7.17'
elif provider_name in ('postgres', 'pygresql'): raw_server_version = '9.2'
elif provider_name == 'oracle': raw_server_version = '11.2.0.2.0'
elif provider_name == 'mysql': raw_server_version = '5.6.11'
else: assert False, provider_name # pragma: no cover
t = [ int(component) for component in raw_server_version.split('.') ]
if len(t) == 2: t.append(0)
server_version = tuple(t)
if provider_name in ('postgres', 'pygresql'):
server_version = int('%d%02d%02d' % server_version)
class TestProvider(provider_cls):
json1_available = False # for SQLite
def inspect_connection(provider, connection):
pass
TestProvider.server_version = server_version
kwargs['pony_check_connection'] = False
kwargs['pony_pool_mockup'] = TestPool(self)
Database.bind(self, TestProvider, *args, **kwargs)
def _execute(database, sql, globals, locals, frame_depth):
assert False # pragma: no cover
def _exec_sql(database, sql, arguments=None, returning_id=False):
assert type(arguments) is not list and not returning_id
database.sql = sql
database.arguments = arguments
return test_cursor
def generate_mapping(database, filename=None, check_tables=True, create_tables=False):
return Database.generate_mapping(database, filename, create_tables=False, check_tables=False)
| {
"content_hash": "910a41ed385fba4971ebe2ec55767aed",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 101,
"avg_line_length": 36.19285714285714,
"alnum_prop": 0.6003552397868561,
"repo_name": "ponyorm/pony",
"id": "f4cdb32731d698201b2d3e01b3708aed4cd1b697",
"size": "5067",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pony/orm/tests/testutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1254"
},
{
"name": "HTML",
"bytes": "3440"
},
{
"name": "JavaScript",
"bytes": "26609"
},
{
"name": "Python",
"bytes": "1286176"
},
{
"name": "Shell",
"bytes": "744"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from olympia.ratings.api_urls import ratings_v3, ratings_v4
v3_api_urls = [
url(r'^abuse/', include('olympia.abuse.urls')),
url(r'^accounts/', include('olympia.accounts.urls')),
url(r'^addons/', include('olympia.addons.api_urls')),
url(r'^', include('olympia.discovery.api_urls')),
url(r'^reviews/', include(ratings_v3.urls)),
url(r'^reviewers/', include('olympia.reviewers.api_urls')),
url(r'^', include('olympia.signing.urls')),
url(r'^activity/', include('olympia.activity.urls')),
url(r'^github/', include('olympia.github.urls')),
]
v4_api_urls = [
url(r'^abuse/', include('olympia.abuse.urls')),
url(r'^accounts/', include('olympia.accounts.urls')),
url(r'^addons/', include('olympia.addons.api_urls')),
url(r'^', include('olympia.discovery.api_urls')),
url(r'^ratings/', include(ratings_v4.urls)),
url(r'^reviewers/', include('olympia.reviewers.api_urls')),
url(r'^', include('olympia.signing.urls')),
url(r'^activity/', include('olympia.activity.urls')),
url(r'^github/', include('olympia.github.urls')),
]
urlpatterns = [
url(r'^v3/', include(v3_api_urls, namespace='v3')),
url(r'^v4/', include(v4_api_urls, namespace='v4')),
url(r'^v4dev/', include(v4_api_urls, namespace='v4dev')),
]
| {
"content_hash": "8fd89a50944409620b828feb37b1df25",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 63,
"avg_line_length": 38.970588235294116,
"alnum_prop": 0.6407547169811321,
"repo_name": "atiqueahmedziad/addons-server",
"id": "58be5e1991bc82378ba307a23697b2e2efe391d8",
"size": "1325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/api/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "810065"
},
{
"name": "Dockerfile",
"bytes": "2868"
},
{
"name": "HTML",
"bytes": "599024"
},
{
"name": "JavaScript",
"bytes": "1070220"
},
{
"name": "Makefile",
"bytes": "820"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5272277"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11171"
},
{
"name": "Smarty",
"bytes": "1497"
}
],
"symlink_target": ""
} |
import copy
import datetime
import hashlib
import uuid
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_utils import timeutils
import six
from six.moves import range
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import utils as test_utils
from keystone.token import provider
CONF = cfg.CONF
NULL_OBJECT = object()
class TokenTests(object):
def _create_token_id(self):
# Use a token signed by the cms module
token_id = ""
for i in range(1, 20):
token_id += uuid.uuid4().hex
return cms.cms_sign_token(token_id,
CONF.signing.certfile,
CONF.signing.keyfile)
def _assert_revoked_token_list_matches_token_persistence(
self, revoked_token_id_list):
# Assert that the list passed in matches the list returned by the
# token persistence service
persistence_list = [
x['id']
for x in self.token_provider_api.list_revoked_tokens()
]
self.assertEqual(persistence_list, revoked_token_id_list)
def test_token_crud(self):
token_id = self._create_token_id()
data = {'id': token_id, 'a': 'b',
'trust_id': None,
'user': {'id': 'testuserid'},
'token_data': {'access': {'token': {
'audit_ids': [uuid.uuid4().hex]}}}}
data_ref = self.token_provider_api._persistence.create_token(token_id,
data)
expires = data_ref.pop('expires')
data_ref.pop('user_id')
self.assertIsInstance(expires, datetime.datetime)
data_ref.pop('id')
data.pop('id')
self.assertDictEqual(data, data_ref)
new_data_ref = self.token_provider_api._persistence.get_token(token_id)
expires = new_data_ref.pop('expires')
self.assertIsInstance(expires, datetime.datetime)
new_data_ref.pop('user_id')
new_data_ref.pop('id')
self.assertEqual(data, new_data_ref)
self.token_provider_api._persistence.delete_token(token_id)
self.assertRaises(
exception.TokenNotFound,
self.token_provider_api._persistence.get_token, token_id)
self.assertRaises(
exception.TokenNotFound,
self.token_provider_api._persistence.delete_token, token_id)
def create_token_sample_data(self, token_id=None, tenant_id=None,
trust_id=None, user_id=None, expires=None):
if token_id is None:
token_id = self._create_token_id()
if user_id is None:
user_id = 'testuserid'
# FIXME(morganfainberg): These tokens look nothing like "Real" tokens.
# This should be fixed when token issuance is cleaned up.
data = {'id': token_id, 'a': 'b',
'user': {'id': user_id},
'access': {'token': {'audit_ids': [uuid.uuid4().hex]}}}
if tenant_id is not None:
data['tenant'] = {'id': tenant_id, 'name': tenant_id}
if tenant_id is NULL_OBJECT:
data['tenant'] = None
if expires is not None:
data['expires'] = expires
if trust_id is not None:
data['trust_id'] = trust_id
data['access'].setdefault('trust', {})
# Testuserid2 is used here since a trustee will be different in
# the cases of impersonation and therefore should not match the
# token's user_id.
data['access']['trust']['trustee_user_id'] = 'testuserid2'
data['token_version'] = provider.V2
# Issue token stores a copy of all token data at token['token_data'].
# This emulates that assumption as part of the test.
data['token_data'] = copy.deepcopy(data)
new_token = self.token_provider_api._persistence.create_token(token_id,
data)
return new_token['id'], data
def test_delete_tokens(self):
tokens = self.token_provider_api._persistence._list_tokens(
'testuserid')
self.assertEqual(0, len(tokens))
token_id1, data = self.create_token_sample_data(
tenant_id='testtenantid')
token_id2, data = self.create_token_sample_data(
tenant_id='testtenantid')
token_id3, data = self.create_token_sample_data(
tenant_id='testtenantid',
user_id='testuserid1')
tokens = self.token_provider_api._persistence._list_tokens(
'testuserid')
self.assertEqual(2, len(tokens))
self.assertIn(token_id2, tokens)
self.assertIn(token_id1, tokens)
self.token_provider_api._persistence.delete_tokens(
user_id='testuserid',
tenant_id='testtenantid')
tokens = self.token_provider_api._persistence._list_tokens(
'testuserid')
self.assertEqual(0, len(tokens))
self.assertRaises(exception.TokenNotFound,
self.token_provider_api._persistence.get_token,
token_id1)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api._persistence.get_token,
token_id2)
self.token_provider_api._persistence.get_token(token_id3)
def test_delete_tokens_trust(self):
tokens = self.token_provider_api._persistence._list_tokens(
user_id='testuserid')
self.assertEqual(0, len(tokens))
token_id1, data = self.create_token_sample_data(
tenant_id='testtenantid',
trust_id='testtrustid')
token_id2, data = self.create_token_sample_data(
tenant_id='testtenantid',
user_id='testuserid1',
trust_id='testtrustid1')
tokens = self.token_provider_api._persistence._list_tokens(
'testuserid')
self.assertEqual(1, len(tokens))
self.assertIn(token_id1, tokens)
self.token_provider_api._persistence.delete_tokens(
user_id='testuserid',
tenant_id='testtenantid',
trust_id='testtrustid')
self.assertRaises(exception.TokenNotFound,
self.token_provider_api._persistence.get_token,
token_id1)
self.token_provider_api._persistence.get_token(token_id2)
def _test_token_list(self, token_list_fn):
tokens = token_list_fn('testuserid')
self.assertEqual(0, len(tokens))
token_id1, data = self.create_token_sample_data()
tokens = token_list_fn('testuserid')
self.assertEqual(1, len(tokens))
self.assertIn(token_id1, tokens)
token_id2, data = self.create_token_sample_data()
tokens = token_list_fn('testuserid')
self.assertEqual(2, len(tokens))
self.assertIn(token_id2, tokens)
self.assertIn(token_id1, tokens)
self.token_provider_api._persistence.delete_token(token_id1)
tokens = token_list_fn('testuserid')
self.assertIn(token_id2, tokens)
self.assertNotIn(token_id1, tokens)
self.token_provider_api._persistence.delete_token(token_id2)
tokens = token_list_fn('testuserid')
self.assertNotIn(token_id2, tokens)
self.assertNotIn(token_id1, tokens)
# tenant-specific tokens
tenant1 = uuid.uuid4().hex
tenant2 = uuid.uuid4().hex
token_id3, data = self.create_token_sample_data(tenant_id=tenant1)
token_id4, data = self.create_token_sample_data(tenant_id=tenant2)
# test for existing but empty tenant (LP:1078497)
token_id5, data = self.create_token_sample_data(tenant_id=NULL_OBJECT)
tokens = token_list_fn('testuserid')
self.assertEqual(3, len(tokens))
self.assertNotIn(token_id1, tokens)
self.assertNotIn(token_id2, tokens)
self.assertIn(token_id3, tokens)
self.assertIn(token_id4, tokens)
self.assertIn(token_id5, tokens)
tokens = token_list_fn('testuserid', tenant2)
self.assertEqual(1, len(tokens))
self.assertNotIn(token_id1, tokens)
self.assertNotIn(token_id2, tokens)
self.assertNotIn(token_id3, tokens)
self.assertIn(token_id4, tokens)
def test_token_list(self):
self._test_token_list(
self.token_provider_api._persistence._list_tokens)
def test_token_list_trust(self):
trust_id = uuid.uuid4().hex
token_id5, data = self.create_token_sample_data(trust_id=trust_id)
tokens = self.token_provider_api._persistence._list_tokens(
'testuserid', trust_id=trust_id)
self.assertEqual(1, len(tokens))
self.assertIn(token_id5, tokens)
def test_get_token_returns_not_found(self):
self.assertRaises(exception.TokenNotFound,
self.token_provider_api._persistence.get_token,
uuid.uuid4().hex)
def test_delete_token_returns_not_found(self):
self.assertRaises(exception.TokenNotFound,
self.token_provider_api._persistence.delete_token,
uuid.uuid4().hex)
def test_expired_token(self):
token_id = uuid.uuid4().hex
expire_time = timeutils.utcnow() - datetime.timedelta(minutes=1)
data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
'expires': expire_time,
'trust_id': None,
'user': {'id': 'testuserid'}}
data_ref = self.token_provider_api._persistence.create_token(token_id,
data)
data_ref.pop('user_id')
self.assertDictEqual(data, data_ref)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api._persistence.get_token,
token_id)
def test_null_expires_token(self):
token_id = uuid.uuid4().hex
data = {'id': token_id, 'id_hash': token_id, 'a': 'b', 'expires': None,
'user': {'id': 'testuserid'}}
data_ref = self.token_provider_api._persistence.create_token(token_id,
data)
self.assertIsNotNone(data_ref['expires'])
new_data_ref = self.token_provider_api._persistence.get_token(token_id)
# MySQL doesn't store microseconds, so discard them before testing
data_ref['expires'] = data_ref['expires'].replace(microsecond=0)
new_data_ref['expires'] = new_data_ref['expires'].replace(
microsecond=0)
self.assertEqual(data_ref, new_data_ref)
def check_list_revoked_tokens(self, token_infos):
revocation_list = self.token_provider_api.list_revoked_tokens()
revoked_ids = [x['id'] for x in revocation_list]
revoked_audit_ids = [x['audit_id'] for x in revocation_list]
self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
for token_id, audit_id in token_infos:
self.assertIn(token_id, revoked_ids)
self.assertIn(audit_id, revoked_audit_ids)
def delete_token(self):
token_id = uuid.uuid4().hex
audit_id = uuid.uuid4().hex
data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
'user': {'id': 'testuserid'},
'token_data': {'token': {'audit_ids': [audit_id]}}}
data_ref = self.token_provider_api._persistence.create_token(token_id,
data)
self.token_provider_api._persistence.delete_token(token_id)
self.assertRaises(
exception.TokenNotFound,
self.token_provider_api._persistence.get_token,
data_ref['id'])
self.assertRaises(
exception.TokenNotFound,
self.token_provider_api._persistence.delete_token,
data_ref['id'])
return (token_id, audit_id)
def test_list_revoked_tokens_returns_empty_list(self):
revoked_ids = [x['id']
for x in self.token_provider_api.list_revoked_tokens()]
self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
self.assertEqual([], revoked_ids)
def test_list_revoked_tokens_for_single_token(self):
self.check_list_revoked_tokens([self.delete_token()])
def test_list_revoked_tokens_for_multiple_tokens(self):
self.check_list_revoked_tokens([self.delete_token()
for x in range(2)])
def test_flush_expired_token(self):
token_id = uuid.uuid4().hex
expire_time = timeutils.utcnow() - datetime.timedelta(minutes=1)
data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
'expires': expire_time,
'trust_id': None,
'user': {'id': 'testuserid'}}
data_ref = self.token_provider_api._persistence.create_token(token_id,
data)
data_ref.pop('user_id')
self.assertDictEqual(data, data_ref)
token_id = uuid.uuid4().hex
expire_time = timeutils.utcnow() + datetime.timedelta(minutes=1)
data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
'expires': expire_time,
'trust_id': None,
'user': {'id': 'testuserid'}}
data_ref = self.token_provider_api._persistence.create_token(token_id,
data)
data_ref.pop('user_id')
self.assertDictEqual(data, data_ref)
self.token_provider_api._persistence.flush_expired_tokens()
tokens = self.token_provider_api._persistence._list_tokens(
'testuserid')
self.assertEqual(1, len(tokens))
self.assertIn(token_id, tokens)
@unit.skip_if_cache_disabled('token')
def test_revocation_list_cache(self):
expire_time = timeutils.utcnow() + datetime.timedelta(minutes=10)
token_id = uuid.uuid4().hex
token_data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
'expires': expire_time,
'trust_id': None,
'user': {'id': 'testuserid'},
'token_data': {'token': {
'audit_ids': [uuid.uuid4().hex]}}}
token2_id = uuid.uuid4().hex
token2_data = {'id_hash': token2_id, 'id': token2_id, 'a': 'b',
'expires': expire_time,
'trust_id': None,
'user': {'id': 'testuserid'},
'token_data': {'token': {
'audit_ids': [uuid.uuid4().hex]}}}
# Create 2 Tokens.
self.token_provider_api._persistence.create_token(token_id,
token_data)
self.token_provider_api._persistence.create_token(token2_id,
token2_data)
# Verify the revocation list is empty.
self.assertEqual(
[], self.token_provider_api._persistence.list_revoked_tokens())
self.assertEqual([], self.token_provider_api.list_revoked_tokens())
# Delete a token directly, bypassing the manager.
self.token_provider_api._persistence.driver.delete_token(token_id)
# Verify the revocation list is still empty.
self.assertEqual(
[], self.token_provider_api._persistence.list_revoked_tokens())
self.assertEqual([], self.token_provider_api.list_revoked_tokens())
# Invalidate the revocation list.
self.token_provider_api._persistence.invalidate_revocation_list()
# Verify the deleted token is in the revocation list.
revoked_ids = [x['id']
for x in self.token_provider_api.list_revoked_tokens()]
self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
self.assertIn(token_id, revoked_ids)
# Delete the second token, through the manager
self.token_provider_api._persistence.delete_token(token2_id)
revoked_ids = [x['id']
for x in self.token_provider_api.list_revoked_tokens()]
self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
# Verify both tokens are in the revocation list.
self.assertIn(token_id, revoked_ids)
self.assertIn(token2_id, revoked_ids)
def _test_predictable_revoked_pki_token_id(self, hash_fn):
token_id = self._create_token_id()
token_id_hash = hash_fn(token_id.encode('utf-8')).hexdigest()
token = {'user': {'id': uuid.uuid4().hex},
'token_data': {'token': {'audit_ids': [uuid.uuid4().hex]}}}
self.token_provider_api._persistence.create_token(token_id, token)
self.token_provider_api._persistence.delete_token(token_id)
revoked_ids = [x['id']
for x in self.token_provider_api.list_revoked_tokens()]
self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
self.assertIn(token_id_hash, revoked_ids)
self.assertNotIn(token_id, revoked_ids)
for t in self.token_provider_api._persistence.list_revoked_tokens():
self.assertIn('expires', t)
def test_predictable_revoked_pki_token_id_default(self):
self._test_predictable_revoked_pki_token_id(hashlib.md5)
def test_predictable_revoked_pki_token_id_sha256(self):
self.config_fixture.config(group='token', hash_algorithm='sha256')
self._test_predictable_revoked_pki_token_id(hashlib.sha256)
def test_predictable_revoked_uuid_token_id(self):
token_id = uuid.uuid4().hex
token = {'user': {'id': uuid.uuid4().hex},
'token_data': {'token': {'audit_ids': [uuid.uuid4().hex]}}}
self.token_provider_api._persistence.create_token(token_id, token)
self.token_provider_api._persistence.delete_token(token_id)
revoked_tokens = self.token_provider_api.list_revoked_tokens()
revoked_ids = [x['id'] for x in revoked_tokens]
self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
self.assertIn(token_id, revoked_ids)
for t in revoked_tokens:
self.assertIn('expires', t)
def test_create_unicode_token_id(self):
token_id = six.text_type(self._create_token_id())
self.create_token_sample_data(token_id=token_id)
self.token_provider_api._persistence.get_token(token_id)
def test_create_unicode_user_id(self):
user_id = six.text_type(uuid.uuid4().hex)
token_id, data = self.create_token_sample_data(user_id=user_id)
self.token_provider_api._persistence.get_token(token_id)
def test_token_expire_timezone(self):
@test_utils.timezone
def _create_token(expire_time):
token_id = uuid.uuid4().hex
user_id = six.text_type(uuid.uuid4().hex)
return self.create_token_sample_data(token_id=token_id,
user_id=user_id,
expires=expire_time)
for d in ['+0', '-11', '-8', '-5', '+5', '+8', '+14']:
test_utils.TZ = 'UTC' + d
expire_time = timeutils.utcnow() + datetime.timedelta(minutes=1)
token_id, data_in = _create_token(expire_time)
data_get = self.token_provider_api._persistence.get_token(token_id)
self.assertEqual(data_in['id'], data_get['id'],
'TZ=%s' % test_utils.TZ)
expire_time_expired = (
timeutils.utcnow() + datetime.timedelta(minutes=-1))
token_id, data_in = _create_token(expire_time_expired)
self.assertRaises(exception.TokenNotFound,
self.token_provider_api._persistence.get_token,
data_in['id'])
class TokenCacheInvalidation(object):
def _create_test_data(self):
self.user = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id)
self.tenant = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id)
# Create an equivalent of a scoped token
token_dict = {'user': self.user, 'tenant': self.tenant,
'metadata': {}, 'id': 'placeholder'}
token_id, data = self.token_provider_api.issue_v2_token(token_dict)
self.scoped_token_id = token_id
# ..and an un-scoped one
token_dict = {'user': self.user, 'tenant': None,
'metadata': {}, 'id': 'placeholder'}
token_id, data = self.token_provider_api.issue_v2_token(token_dict)
self.unscoped_token_id = token_id
# Validate them, in the various ways possible - this will load the
# responses into the token cache.
self._check_scoped_tokens_are_valid()
self._check_unscoped_tokens_are_valid()
def _check_unscoped_tokens_are_invalid(self):
self.assertRaises(
exception.TokenNotFound,
self.token_provider_api.validate_token,
self.unscoped_token_id)
self.assertRaises(
exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
self.unscoped_token_id)
def _check_scoped_tokens_are_invalid(self):
self.assertRaises(
exception.TokenNotFound,
self.token_provider_api.validate_token,
self.scoped_token_id)
self.assertRaises(
exception.TokenNotFound,
self.token_provider_api.validate_token,
self.scoped_token_id,
self.tenant['id'])
self.assertRaises(
exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
self.scoped_token_id)
self.assertRaises(
exception.TokenNotFound,
self.token_provider_api.validate_v2_token,
self.scoped_token_id,
self.tenant['id'])
def _check_scoped_tokens_are_valid(self):
self.token_provider_api.validate_token(self.scoped_token_id)
self.token_provider_api.validate_token(
self.scoped_token_id, belongs_to=self.tenant['id'])
self.token_provider_api.validate_v2_token(self.scoped_token_id)
self.token_provider_api.validate_v2_token(
self.scoped_token_id, belongs_to=self.tenant['id'])
def _check_unscoped_tokens_are_valid(self):
self.token_provider_api.validate_token(self.unscoped_token_id)
self.token_provider_api.validate_v2_token(self.unscoped_token_id)
def test_delete_unscoped_token(self):
self.token_provider_api._persistence.delete_token(
self.unscoped_token_id)
self._check_unscoped_tokens_are_invalid()
self._check_scoped_tokens_are_valid()
def test_delete_scoped_token_by_id(self):
self.token_provider_api._persistence.delete_token(self.scoped_token_id)
self._check_scoped_tokens_are_invalid()
self._check_unscoped_tokens_are_valid()
def test_delete_scoped_token_by_user(self):
self.token_provider_api._persistence.delete_tokens(self.user['id'])
# Since we are deleting all tokens for this user, they should all
# now be invalid.
self._check_scoped_tokens_are_invalid()
self._check_unscoped_tokens_are_invalid()
def test_delete_scoped_token_by_user_and_tenant(self):
self.token_provider_api._persistence.delete_tokens(
self.user['id'],
tenant_id=self.tenant['id'])
self._check_scoped_tokens_are_invalid()
self._check_unscoped_tokens_are_valid()
| {
"content_hash": "dc87ea6c6e69869be8d8ecba02fc9004",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 79,
"avg_line_length": 44.59554730983302,
"alnum_prop": 0.5842659233681408,
"repo_name": "klmitch/keystone",
"id": "feb7e017ac931db84bc5fd3ea4b0e600933fdc03",
"size": "24583",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/tests/unit/token/test_backends.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "4405298"
}
],
"symlink_target": ""
} |
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from flaskMDL.settings import ProdConfig
from flaskMDL.assets import assets
from flaskMDL.extensions import (
bcrypt,
cache,
db,
login_manager,
migrate,
debug_toolbar,
)
from flaskMDL import public, user
def create_app(config_object=ProdConfig):
"""An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
"""
app = Flask(__name__)
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
return app
def register_extensions(app):
assets.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
return None
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
| {
"content_hash": "71bb0c7c615fff97ddb31e00f1a11aed",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 73,
"avg_line_length": 26.672727272727272,
"alnum_prop": 0.6939331970006817,
"repo_name": "jimmyho/Flask-Material-Lite",
"id": "ea52456463c95d089b93e335f7dd6e47b5af1df4",
"size": "1491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskMDL/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10089"
},
{
"name": "HTML",
"bytes": "21401"
},
{
"name": "JavaScript",
"bytes": "85"
},
{
"name": "Python",
"bytes": "24242"
}
],
"symlink_target": ""
} |
from saw.mods import Mod
class Filter:
_filters = dict()
_loaded = False
@classmethod
def init(cls):
if cls._loaded:
return
cls._filters = Mod.load_modules(__file__, 'saw.filters')
cls._loaded = True
@classmethod
def exists(cls, name):
cls.init()
return name in cls._filters
@classmethod
def get(cls, filter_name, node):
if not cls.exists(filter_name):
raise Exception("Filter not found!")
# get class name of input variable and call filter's method with its name.
filter_class = cls._filters[filter_name]
if not hasattr(filter_class, 'filter'):
raise Exception("Filter '%s' has not main method!" % filter_name)
def callback(*args, **kw):
return filter_class().filter(node, *args, **kw)
return callback | {
"content_hash": "9648b63c7ed1bf17125a87a1d6db12b3",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 82,
"avg_line_length": 28.258064516129032,
"alnum_prop": 0.5856164383561644,
"repo_name": "diNard/Saw",
"id": "21f393f98c1d85a67867dc16ba8544304446dff0",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saw/filters/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54587"
}
],
"symlink_target": ""
} |
from flask import redirect, g
from flask_appbuilder import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
from superset import appbuilder
from superset.models.sql_lab import Query, SavedQuery
from .base import SupersetModelView, BaseSupersetView, DeleteMixin
class QueryView(SupersetModelView):
datamodel = SQLAInterface(Query)
list_columns = ['user', 'database', 'status', 'start_time', 'end_time']
label_columns = {
'user': _('User'),
'database': _('Database'),
'status': _('Status'),
'start_time': _('Start Time'),
'end_time': _('End Time'),
}
appbuilder.add_view(
QueryView,
"Queries",
label=__("Queries"),
category="Manage",
category_label=__("Manage"),
icon="fa-search")
class SavedQueryView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(SavedQuery)
list_title = _('List Saved Query')
show_title = _('Show Saved Query')
add_title = _('Add Saved Query')
edit_title = _('Edit Saved Query')
list_columns = [
'label', 'user', 'database', 'schema', 'description',
'modified', 'pop_tab_link']
show_columns = [
'id', 'label', 'user', 'database',
'description', 'sql', 'pop_tab_link']
search_columns = ('label', 'user', 'database', 'schema', 'changed_on')
add_columns = ['label', 'database', 'description', 'sql']
edit_columns = add_columns
base_order = ('changed_on', 'desc')
label_columns = {
'label': _('Label'),
'user': _('User'),
'database': _('Database'),
'description': _('Description'),
'modified': _('Modified'),
'end_time': _('End Time'),
'pop_tab_link': _('Pop Tab Link'),
'changed_on': _('Changed on'),
}
def pre_add(self, obj):
obj.user = g.user
def pre_update(self, obj):
self.pre_add(obj)
class SavedQueryViewApi(SavedQueryView):
show_columns = ['label', 'db_id', 'schema', 'description', 'sql']
add_columns = show_columns
edit_columns = add_columns
appbuilder.add_view_no_menu(SavedQueryViewApi)
appbuilder.add_view_no_menu(SavedQueryView)
appbuilder.add_link(
__('Saved Queries'),
href='/sqllab/my_queries/',
icon="fa-save",
category='SQL Lab')
class SqlLab(BaseSupersetView):
"""The base views for Superset!"""
@expose("/my_queries/")
def my_queries(self):
"""Assigns a list of found users to the given role."""
return redirect(
'/savedqueryview/list/?_flt_0_user={}'.format(g.user.id))
appbuilder.add_view_no_menu(SqlLab)
| {
"content_hash": "f02bd8b32a2330a31ba242d79b022af5",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 75,
"avg_line_length": 28.69148936170213,
"alnum_prop": 0.6129032258064516,
"repo_name": "FrederichCheng/incubator-superset",
"id": "eea71d467abfba2b3943d2343825a635f6e3b291",
"size": "2697",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "superset/views/sql_lab.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "66366"
},
{
"name": "HTML",
"bytes": "101805"
},
{
"name": "JavaScript",
"bytes": "647403"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "744524"
},
{
"name": "Shell",
"bytes": "1046"
}
],
"symlink_target": ""
} |
import sys
SUITE = 'cpu2006'
def abort():
sys.stderr.write('Usage: configure.py {}-<benchmark name>-<input size>\n'.format(SUITE))
sys.exit(1)
def decompose(program):
chunks = program.split('-')
if len(chunks) != 3:
abort()
return chunks[1], chunks[2]
def process(programs):
if len(programs) == 0:
abort()
if len(programs) == 1:
return process_single(programs[0])
else:
return process_multiple(programs)
def process_single(program):
benchmark, input = decompose(program)
return ['-n', 1, '-p', '{}-{}'.format(SUITE, benchmark), '-i', input]
def process_multiple(programs):
benchmarks = []
total_cores = 0
for program in programs:
cores = 1
benchmark, input = decompose(program)
benchmarks.append('{}-{}-{}-{}'.format(SUITE, benchmark, input, cores))
total_cores += cores
benchmarks = ','.join(benchmarks)
return ['-n', total_cores, '--benchmarks={}'.format(benchmarks)]
arguments = process(sys.argv[1:])
sys.stdout.write(' '.join([str(argument) for argument in arguments]))
| {
"content_hash": "9baf543782aa63a3b5048ac9e5a30930",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 92,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.616289592760181,
"repo_name": "learning-on-chip/studio",
"id": "b2d5f62334de0685eb314f04eb89980c79991564",
"size": "1128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "program/cpu2006/configure.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2964"
},
{
"name": "Python",
"bytes": "92805"
}
],
"symlink_target": ""
} |
from ..utils.data_utils import get_file
import numpy as np
def load_data(path='boston_housing.npz', seed=113, test_split=0.2):
"""Loads the Boston Housing dataset.
# Arguments
path: path where to cache the dataset locally
(relative to ~/.keras/datasets).
seed: Random seed for shuffling the data
before computing the test split.
test_split: fraction of the data to reserve as test set.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
assert 0 <= test_split < 1
path = get_file(path, origin='https://s3.amazonaws.com/keras-datasets/boston_housing.npz')
f = np.load(path)
x = f['x']
y = f['y']
f.close()
np.random.seed(seed)
np.random.shuffle(x)
np.random.seed(seed)
np.random.shuffle(y)
x_train = np.array(x[:int(len(x) * (1 - test_split))])
y_train = np.array(y[:int(len(x) * (1 - test_split))])
x_test = np.array(x[int(len(x) * (1 - test_split)):])
y_test = np.array(y[int(len(x) * (1 - test_split)):])
return (x_train, y_train), (x_test, y_test)
| {
"content_hash": "1ffc1e6cd6b27b0f45e63d8d24e02af3",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 94,
"avg_line_length": 32.794117647058826,
"alnum_prop": 0.5973094170403588,
"repo_name": "parag2489/keras_superpixel_pooling",
"id": "e4ee571c8481b2e96914cba879ef53d3eb0964eb",
"size": "1115",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "datasets/boston_housing.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1107020"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/ithorian/shared_ith_jacket_s04.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","ith_jacket_s04")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "1ab09d786e3a6dc87ec575bc2d9e278e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.7071651090342679,
"repo_name": "obi-two/Rebelion",
"id": "c7d3df44cb531118640c06504e8f1edc804c6d58",
"size": "466",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/wearables/ithorian/shared_ith_jacket_s04.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
'''
Render a graph full of gtk widgets and draw some lines!
'''
import gtk
import gtk.gdk as gdk
import vwidget.main as vw_main
import visgraph.renderers as vg_render
import visgraph.drawing.bezier as vw_bezier
import visgraph.drawing.catmullrom as vw_catmullrom
zero_zero = (0,0)
class GtkVisGraphOverview(gtk.DrawingArea):
def __init__(self, graph, layout, scrollwin=None, ):
gtk.DrawingArea.__init__(self)
self._vg_graph = graph
self._vg_layout = layout
self._vg_scrollwin = scrollwin
self.connect('expose-event', self.expose_event_cb)
self.connect('button_press_event', self.button_press_event)
self.set_events( self.get_events() | gtk.gdk.BUTTON_PRESS_MASK)
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color())
self.modify_fg(gtk.STATE_NORMAL, gtk.gdk.Color(green=65535))
if scrollwin != None:
scrollwin.connect('destroy', self.scroll_destroy_cb)
def scroll_destroy_cb(self, widget):
self.destroy()
def expose_event_cb(self, layout, event):
style = self.get_style()
gc = style.fg_gc[gtk.STATE_NORMAL]
rect = self.get_allocation()
owidth = rect.width
oheight = rect.height
lwidth, lheight = self._vg_layout.getLayoutSize()
for nid, ninfo in self._vg_graph.getNodes():
nwidth, nheight = ninfo.get('size', zero_zero)
if nwidth == 0:
continue
xpos, ypos = ninfo.get('position', zero_zero)
drawx = ((xpos * owidth) / lwidth)
drawy = (ypos * oheight) / lheight
sizex = owidth * nwidth / lwidth
sizey = oheight * nheight / lheight
colorstr = ninfo.get('color')
if colorstr == None:
colorstr = '#0f0'
color = gtk.gdk.color_parse(colorstr)
#self.modify_fg(gtk.STATE_NORMAL, color)
self.window.draw_rectangle(gc, False, drawx, drawy, sizex, sizey)
#c = self.window.cairo_create()
#c.set_source_rgb(color.red / float(65535), color.green / float(65535), color.blue / float(65535))
#c.rectangle(drawx, drawy, sizex, sizey)# event.area.x, event.area.y, event.area.width, event.area.height)
#c.set_line_width(0.5)
#c.stroke()
def button_press_event(self, widget, event):
if self._vg_scrollwin:
rect = self.get_allocation()
xper = event.x / float(rect.width)
yper = event.y / float(rect.height)
hadj = self._vg_scrollwin.get_hadjustment()
vadj = self._vg_scrollwin.get_vadjustment()
hvis = hadj.page_size
vvis = vadj.page_size
hadj.value = min(max((hadj.upper * xper) - (hvis / 2), 0), hadj.upper - hvis)
vadj.value = min(max((vadj.upper * yper) - (vvis / 2), 0), vadj.upper - vvis)
self._vg_scrollwin.set_hadjustment(hadj)
self._vg_scrollwin.set_vadjustment(vadj)
#FIXME use cairo to draw!
class GtkVisGraphRenderer(gtk.Layout, vg_render.GraphRenderer):
def __init__(self, graph):
gtk.Layout.__init__(self)
vg_render.GraphRenderer.__init__(self, graph)
self._vg_lines = []
self.connect('expose-event', self.expose_event_cb)
self.connect('button_press_event', self.button_press_event)
self.set_events( self.get_events() | gtk.gdk.BUTTON_PRESS_MASK)
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color())
self.modify_fg(gtk.STATE_NORMAL, gtk.gdk.Color(green=65535))
def beginRender(self, width, height):
vg_render.GraphRenderer.beginRender(self, width, height)
self._vg_lines = []
self.set_size(width, height)
def renderNode(self, nid, ninfo, xpos, ypos):
widget = ninfo.get('widget')
# FIXME honor color, etc...?
if widget != None:
self.move(widget, xpos, ypos)
def renderEdge(self, eid, einfo, points):
# FIXME deal with colors etc...
self._vg_lines.append(points)
def setNodeSizes(self, graph):
for nid, ninfo in graph.getNodes():
# Skip "ghost" nodes...
if ninfo.get('ghost'):
continue
widget = ninfo.get('widget')
if widget == None:
widget = gtk.Label(str(nid))
widget.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color())
widget.modify_fg(gtk.STATE_NORMAL, gtk.gdk.Color(green=65535))
ninfo['widget'] = widget
# Put them all at 0,0 for now...
self.put(widget, 0, 0)
# Get them all to render...
self.show_all()
vw_main.doiterations()
# Now that we have rendered them...
for nid, ninfo in graph.getNodes():
widget = ninfo.get('widget')
if widget == None:
continue
size = widget.size_request()
ninfo['size'] = size
def button_press_event(self, widget, event):
print 'CLICK %d %d' % (event.x, event.y)
def expose_event_cb(self, layout, event):
style = self.get_style()
gc = style.fg_gc[gtk.STATE_NORMAL]
for points in self._vg_lines:
self.bin_window.draw_lines(gc, points)
| {
"content_hash": "268487cfbcb8dad6ac9c958eb1e85205",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 118,
"avg_line_length": 31.352941176470587,
"alnum_prop": 0.5772983114446529,
"repo_name": "joshuahoman/vivisect",
"id": "c83fcece71c504674ede6244d8f6dd185e1d1d2f",
"size": "5331",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "visgraph/renderers/gtkrend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "11663390"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
} |
"""
You are given a positive integer num consisting only of digits 6 and 9.
Return the maximum number you can get by changing at most one digit (6 becomes 9, and 9 becomes 6).
Example 1:
Input: num = 9669
Output: 9969
Explanation:
Changing the first digit results in 6669.
Changing the second digit results in 9969.
Changing the third digit results in 9699.
Changing the fourth digit results in 9666.
The maximum number is 9969.
Example 2:
Input: num = 9996
Output: 9999
Explanation: Changing the last digit 6 to 9 results in the maximum number.
Example 3:
Input: num = 9999
Output: 9999
Explanation: It is better not to apply any change.
Constraints:
1 <= num <= 104
num consists of only 6 and 9 digits.
"""
class Solution:
def maximum69Number (self, num: int) -> int:
s = list(str(num))
for i, e in enumerate(s):
if e == '6':
s[i] = '9'
break
return int(''.join(s)) | {
"content_hash": "fa5134e8ca591c8bfec9cfd8bf6ca31f",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 99,
"avg_line_length": 20.782608695652176,
"alnum_prop": 0.6673640167364017,
"repo_name": "franklingu/leetcode-solutions",
"id": "f04440abc5264a87b45ae4c1dd2f5fd0d8ee7798",
"size": "959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/maximum-69-number/Solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8919"
},
{
"name": "Java",
"bytes": "173033"
},
{
"name": "Python",
"bytes": "996874"
},
{
"name": "Shell",
"bytes": "2559"
}
],
"symlink_target": ""
} |
"""
Django settings for DjangoChannel project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^!=hb@vr6eg1$s(08w76&spf-ef5gdg7#v74g=t1&(2_aus#fg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'App',
'channels',
]
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [os.environ.get('REDIS_URL', 'redis://localhost:6379')],
},
"ROUTING": "Config.routing.channel_routing",
},
}
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/App/static/'
| {
"content_hash": "20f9e0c35793ed068d375e607dfeee8e",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 99,
"avg_line_length": 26.263157894736842,
"alnum_prop": 0.6739192671056399,
"repo_name": "ivermac/DjangoChannel",
"id": "6bf1d8600887a3a25c1d6a66f7822450e25c5b9d",
"size": "3493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Config/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11628"
},
{
"name": "HTML",
"bytes": "3125"
},
{
"name": "JavaScript",
"bytes": "1064"
},
{
"name": "Python",
"bytes": "11910"
}
],
"symlink_target": ""
} |
from bokeh.io import show, output_file
from bokeh.plotting import figure
output_file("bar_basic.html")
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
p = figure(x_range=fruits, plot_height=350, title="Fruit Counts",
toolbar_location=None, tools="")
p.vbar(x=fruits, top=[5, 3, 4, 2, 4, 6], width=0.9)
p.xgrid.grid_line_color = None
p.y_range.start = 0
show(p)
| {
"content_hash": "71dba89a05e06d3db4addd16846ce4eb",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 77,
"avg_line_length": 25.5,
"alnum_prop": 0.6691176470588235,
"repo_name": "philippjfr/bokeh",
"id": "858f11e28241ee32a8e923366ba67660c0fd5832",
"size": "408",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/plotting/file/bar_basic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "104935"
},
{
"name": "CoffeeScript",
"bytes": "1236045"
},
{
"name": "HTML",
"bytes": "48230"
},
{
"name": "JavaScript",
"bytes": "57759"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2642580"
},
{
"name": "Shell",
"bytes": "8519"
},
{
"name": "TypeScript",
"bytes": "228756"
}
],
"symlink_target": ""
} |
""" """
# FIXME: this is seriously under-documented
from datetime import datetime
from actstream.managers import ActionManager, stream
class MyActionManager(ActionManager):
@stream
def mystream(self, obj, verb='posted', time=None):
if time is None:
# FIXME from Joel: this probably should be TZ-aware
time = datetime.now()
return obj.actor_actions.filter(verb=verb, timestamp__lte=time)
| {
"content_hash": "70be8301d949036aae74adab6bd68861",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 71,
"avg_line_length": 30.266666666666666,
"alnum_prop": 0.6607929515418502,
"repo_name": "ProjectFacet/facet",
"id": "435d2542d3241132b53b880bb60e0e3ec6df8e0a",
"size": "454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/editorial/managers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4350483"
},
{
"name": "HTML",
"bytes": "1677386"
},
{
"name": "JavaScript",
"bytes": "1120019"
},
{
"name": "Python",
"bytes": "804022"
},
{
"name": "Ruby",
"bytes": "225"
},
{
"name": "Shell",
"bytes": "889"
}
],
"symlink_target": ""
} |
from inc_noesis import *
def registerNoesisTypes():
handle = noesis.register("The Legend of Korra", ".dat")
noesis.setHandlerExtractArc(handle, datExtract)
return 1
def datExtract(fileName, fileLen, justChecking):
with open(fileName, "rb") as fs:
if justChecking:
return 1
magicword = noeStrFromBytes(fs.read(3))
if magicword == "DAT":
fs.read(1)
fcount = noeUnpack("<I", fs.read(4))[0]
offtable = noeUnpack("<I", fs.read(4))[0]
typetable = noeUnpack("<I", fs.read(4))[0]
fnametable = noeUnpack("<I", fs.read(4))[0]
sizetable = noeUnpack("<I", fs.read(4))[0]
fs.seek(fnametable, 0)
fnsz = noeUnpack("<I", fs.read(4))[0]
fnametable = fs.tell()
for i in range(fcount):
fs.seek(offtable, 0)
offset = noeUnpack("<I", fs.read(4))[0]
offtable = fs.tell()
fs.seek(fnametable, 0)
fileName = noeStrFromBytes(noeParseToZero(fs.read(fnsz)))
fnametable = fs.tell()
fs.seek(sizetable, 0)
size = noeUnpack("<I", fs.read(4))[0]
sizetable = fs.tell()
fs.seek(offset, 0)
print("Writing", fileName)
rapi.exportArchiveFile(fileName, fs.read(size))
else:
print("Invalid archive!!!")
return 1
| {
"content_hash": "72fdc0b763e6bc324e246036e09b6df2",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.49736842105263157,
"repo_name": "TheDeverEric/noesis-importers",
"id": "96efa20f7d9f312ecfdf29564e90af69b3238994",
"size": "1892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Eric Van Hoven/fmt_legendofkorra_dat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27112"
}
],
"symlink_target": ""
} |
"""Sync script for GitNamed."""
import os
import sys
import subprocess
import settings
named_conf_master = os.path.join(settings.named_path, 'named.conf.master')
named_conf_slave = os.path.join(settings.named_path, 'named.conf.slave')
transfer_key_name = 'master2slave'
transfer_key = u'''
key %s {
algorithm hmac-md5;
secret "%s";
};
''' % (transfer_key_name, settings.transfer_key_body)
nameconf_master = u'''zone "%s" {
type master;
file "zones/%s";
%s;
allow-transfer {
%s;
};
%s};
'''
nameconf_slave = u'''zone "%s" {
type slave;
file "zones/%s";
masters {
%s;
};
};
'''
dyndns_update = u''' allow-update {key %s; };\n'''
all_slave = ' '.join('%s;' % slave_ip
for (slave_ip, system) in settings.slave_ips.items())
notify_str = 'also-notify {%s}' % all_slave
def get_user(system):
if system == 'centos':
return 'named'
elif system == 'debian':
return 'bind'
else:
sys.stderr.write("can't determinate OS\n")
sys.exit(-1)
def is_file(name):
return os.path.isfile(os.path.join(settings.zones_path, name))
def get_master(z):
transfer_key = 'key %s' % transfer_key_name;
if z in settings.dzones:
key = settings.dzones[z]
dstring = dyndns_update % key
transfer_key += '; key %s' % key
else:
dstring = ''
return nameconf_master%(z, z, notify_str, transfer_key, dstring)
def reload_slave(slave_ip, system):
user = get_user(system)
sys.stdout.write("reloading %s\n" % slave_ip)
# copy named.conf.master to slave
slave_arg = '%s@%s:%s' % (user, slave_ip, settings.named_path)
code = subprocess.call(['/usr/bin/scp', named_conf_slave,
slave_arg])
if code:
sys.stderr.write('copy %s to slave %s failed\n' %
(named_conf_slave, slave_ip))
sys.exit(-1)
# reload slave dns
rndc_conf = '%s/rndc.key' % settings.named_path
code = subprocess.call('/usr/bin/ssh %s@%s '
'/usr/sbin/rndc -k %s -s localhost reload' %
(user, slave_ip, rndc_conf), shell=True)
if code:
sys.stderr.write('reload slave name server %s failed\n' % slave_ip)
sys.exit(-1)
def main():
os.chdir(settings.named_path)
# pull code from git repo
code = subprocess.call('/usr/bin/git pull hub master', shell=True)
if code:
sys.stderr.write('git pull code failed\n')
# get all zones, exclude journal file
zones = [f for f in os.listdir(settings.zones_path)
if is_file(f) and not f.endswith('.jnl')]
# create named.conf.master
with open(named_conf_master, 'w') as f:
f.write('include "%s/named.conf";\n\n' % settings.named_path)
f.write(transfer_key)
f.write('\n'.join([get_master(z) for z in zones]))
# create named.conf.slave
with open(named_conf_slave, 'w') as f:
f.write('include "%s/named.conf";\n\n' % settings.named_path)
f.write(transfer_key)
f.write('server %s { keys %s; };\n\n' %
(settings.master_ip, transfer_key_name))
f.write('\n'.join([nameconf_slave%(z, z,
settings.master_ip) for z in zones]))
# reload master dns
rndc_conf = '%s/rndc.key' % settings.named_path
code = subprocess.call('/usr/sbin/rndc -k %s -s localhost reload' %
rndc_conf, shell=True)
if code:
sys.stderr.write('reload master name server failed\n')
sys.exit(-1)
for (slave_ip,system) in settings.slave_ips.items():
reload_slave(slave_ip, system)
if __name__ == '__main__':
main()
| {
"content_hash": "f57f3a0400590de8ea320a456ede9fac",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 75,
"avg_line_length": 27.746268656716417,
"alnum_prop": 0.5745024206562668,
"repo_name": "Wyvernsquare/dns",
"id": "f4a640aef67866beefbc36856cf890e3590adc5d",
"size": "4414",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "script/syndns.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "1755"
},
{
"name": "Python",
"bytes": "9120"
},
{
"name": "Shell",
"bytes": "1998"
}
],
"symlink_target": ""
} |
"""Group action implementations"""
import six
import sys
from keystoneauth1 import exceptions as ks_exc
from openstackclient.common import command
from openstackclient.common import utils
from openstackclient.i18n import _
from openstackclient.identity import common
class AddUserToGroup(command.Command):
"""Add user to group"""
def get_parser(self, prog_name):
parser = super(AddUserToGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Group to contain <user> (name or ID)'),
)
parser.add_argument(
'user',
metavar='<user>',
help=_('User to add to <group> (name or ID)'),
)
common.add_group_domain_option_to_parser(parser)
common.add_user_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
user_id = common.find_user(identity_client,
parsed_args.user,
parsed_args.user_domain).id
group_id = common.find_group(identity_client,
parsed_args.group,
parsed_args.group_domain).id
try:
identity_client.users.add_to_group(user_id, group_id)
except Exception:
msg = _("%(user)s not added to group %(group)s\n") % {
'user': parsed_args.user,
'group': parsed_args.group,
}
sys.stderr.write(msg)
else:
msg = _("%(user)s added to group %(group)s\n") % {
'user': parsed_args.user,
'group': parsed_args.group,
}
sys.stdout.write(msg)
class CheckUserInGroup(command.Command):
"""Check user membership in group"""
def get_parser(self, prog_name):
parser = super(CheckUserInGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Group to check (name or ID)'),
)
parser.add_argument(
'user',
metavar='<user>',
help=_('User to check (name or ID)'),
)
common.add_group_domain_option_to_parser(parser)
common.add_user_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
user_id = common.find_user(identity_client,
parsed_args.user,
parsed_args.user_domain).id
group_id = common.find_group(identity_client,
parsed_args.group,
parsed_args.group_domain).id
try:
identity_client.users.check_in_group(user_id, group_id)
except Exception:
msg = _("%(user)s not in group %(group)s\n") % {
'user': parsed_args.user,
'group': parsed_args.group,
}
sys.stderr.write(msg)
else:
msg = _("%(user)s in group %(group)s\n") % {
'user': parsed_args.user,
'group': parsed_args.group,
}
sys.stdout.write(msg)
class CreateGroup(command.ShowOne):
"""Create new group"""
def get_parser(self, prog_name):
parser = super(CreateGroup, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<group-name>',
help=_('New group name'),
)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Domain to contain new group (name or ID)'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New group description'),
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing group'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client,
parsed_args.domain).id
try:
group = identity_client.groups.create(
name=parsed_args.name,
domain=domain,
description=parsed_args.description)
except ks_exc.Conflict as e:
if parsed_args.or_show:
group = utils.find_resource(identity_client.groups,
parsed_args.name,
domain_id=domain)
self.log.info(_('Returning existing group %s'), group.name)
else:
raise e
group._info.pop('links')
return zip(*sorted(six.iteritems(group._info)))
class DeleteGroup(command.Command):
"""Delete group(s)"""
def get_parser(self, prog_name):
parser = super(DeleteGroup, self).get_parser(prog_name)
parser.add_argument(
'groups',
metavar='<group>',
nargs="+",
help=_('Group(s) to delete (name or ID)'),
)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Domain containing group(s) (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
for group in parsed_args.groups:
group_obj = common.find_group(identity_client,
group,
parsed_args.domain)
identity_client.groups.delete(group_obj.id)
class ListGroup(command.Lister):
"""List groups"""
def get_parser(self, prog_name):
parser = super(ListGroup, self).get_parser(prog_name)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Filter group list by <domain> (name or ID)'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Filter group list by <user> (name or ID)'),
)
common.add_user_domain_option_to_parser(parser)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
domain = None
if parsed_args.domain:
domain = common.find_domain(identity_client,
parsed_args.domain).id
if parsed_args.user:
user = common.find_user(
identity_client,
parsed_args.user,
parsed_args.user_domain,
).id
else:
user = None
# List groups
if parsed_args.long:
columns = ('ID', 'Name', 'Domain ID', 'Description')
else:
columns = ('ID', 'Name')
data = identity_client.groups.list(
domain=domain,
user=user,
)
return (
columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data)
)
class RemoveUserFromGroup(command.Command):
"""Remove user from group"""
def get_parser(self, prog_name):
parser = super(RemoveUserFromGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Group containing <user> (name or ID)'),
)
parser.add_argument(
'user',
metavar='<user>',
help=_('User to remove from <group> (name or ID)'),
)
common.add_group_domain_option_to_parser(parser)
common.add_user_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
user_id = common.find_user(identity_client,
parsed_args.user,
parsed_args.user_domain).id
group_id = common.find_group(identity_client,
parsed_args.group,
parsed_args.group_domain).id
try:
identity_client.users.remove_from_group(user_id, group_id)
except Exception:
msg = _("%(user)s not removed from group %(group)s\n") % {
'user': parsed_args.user,
'group': parsed_args.group,
}
sys.stderr.write(msg)
else:
msg = _("%(user)s removed from group %(group)s\n") % {
'user': parsed_args.user,
'group': parsed_args.group,
}
sys.stdout.write(msg)
class SetGroup(command.Command):
"""Set group properties"""
def get_parser(self, prog_name):
parser = super(SetGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Group to modify (name or ID)'),
)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Domain containing <group> (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('New group name'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New group description'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
group = common.find_group(identity_client, parsed_args.group,
parsed_args.domain)
kwargs = {}
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.description:
kwargs['description'] = parsed_args.description
if not len(kwargs):
sys.stderr.write("Group not updated, no arguments present\n")
return
identity_client.groups.update(group.id, **kwargs)
class ShowGroup(command.ShowOne):
"""Display group details"""
def get_parser(self, prog_name):
parser = super(ShowGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar='<group>',
help=_('Group to display (name or ID)'),
)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Domain containing <group> (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
group = common.find_group(identity_client,
parsed_args.group,
domain_name_or_id=parsed_args.domain)
group._info.pop('links')
return zip(*sorted(six.iteritems(group._info)))
| {
"content_hash": "98fe4d620ee6efc065f34b71cb141bea",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 75,
"avg_line_length": 32.114525139664806,
"alnum_prop": 0.5080455771070714,
"repo_name": "redhat-openstack/python-openstackclient",
"id": "fdb94da64142f2dd0937f2fac0fbe85aa81acc36",
"size": "12110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master-patches",
"path": "openstackclient/identity/v3/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2229284"
},
{
"name": "Shell",
"bytes": "591"
}
],
"symlink_target": ""
} |
import torch
from torch.autograd import Function
from _ext import ext_lib
class ReLUF(Function):
def forward(self, input):
self.save_for_backward(input)
output = input.new()
if not input.is_cuda:
ext_lib.relu_forward(input, output)
else:
raise Exception("No CUDA Implementation")
return output
def backward(self, grad_output):
input, = self.saved_tensors
grad_input = grad_output.new()
if not grad_output.is_cuda:
ext_lib.relu_backward(grad_output, input, grad_input)
else:
raise Exception("No CUDA Implementation")
return grad_input
| {
"content_hash": "781895a179d1be382f7ed00789f1d296",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 65,
"avg_line_length": 27,
"alnum_prop": 0.6133333333333333,
"repo_name": "DingKe/pytorch_workplace",
"id": "fbe8ef48262add5ce40fa6310c6852c61d7ec38c",
"size": "695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cffi/functions/relu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "680"
},
{
"name": "C++",
"bytes": "159"
},
{
"name": "Python",
"bytes": "119448"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv("Market_Basket_Optimisation.csv",header = None)
transactions = []
for i in range(0,7501):
transactions.append([str(dataset.values[i,j]) for j in range(0,20)])
from apyori import apriori
# min_support = 3*7/7500
rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2)
results = list(rules)
| {
"content_hash": "ebb644ec34fd4c51d4e3b9a62085b5ac",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 102,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.7159090909090909,
"repo_name": "jigargandhi/UdemyMachineLearning",
"id": "14359b10f193b31ad7fd9bee77fb22ae37a34ada",
"size": "822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Machine Learning A-Z Template Folder/Part 5 - Association Rule Learning/Section 28 - Apriori/j_apriori.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157569"
},
{
"name": "R",
"bytes": "74375"
}
],
"symlink_target": ""
} |
import hashlib
import os
import socket
import six
def tostr(s, encoding='utf-8'):
if six.PY2:
return s.encode(encoding)
if isinstance(s, bytes):
return s.decode(encoding)
return s
def read_file_content(filename, default=''):
if not os.path.isfile(filename):
return default
with open(filename, 'rb') as f:
return f.read()
def sha_file(path):
sha = hashlib.sha1()
with open(path, 'rb') as f:
while True:
data = f.read(65536)
if not data:
break
sha.update(data)
return sha.hexdigest()
def write_file_content(filename, content):
with open(filename, 'w') as f:
f.write(content.encode('utf-8'))
def virt2real(path):
return os.path.join(os.getcwd(), path.lstrip('/'))
def real2virt(path):
return os.path.relpath(path, os.getcwd()).replace('\\', '/')
def current_ip():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
try:
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except OSError:
return "127.0.0.1"
| {
"content_hash": "c2350f1fa2e2c476f4395776b626ff69",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 64,
"avg_line_length": 21.30188679245283,
"alnum_prop": 0.579273693534101,
"repo_name": "openatx/weditor",
"id": "b1a8f1b0f43fc48d0b1f950f3c03cf01c0c65bbe",
"size": "1148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weditor/web/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83444"
},
{
"name": "HTML",
"bytes": "17844"
},
{
"name": "JavaScript",
"bytes": "1257418"
},
{
"name": "Python",
"bytes": "42017"
}
],
"symlink_target": ""
} |
from test import root_path
# Actual Imports
from utilities import TT
from iterators import BatchGenerator, Dataset, ImageIterator
from dataset import icpr2012
root = root_path()
# 1. Test BatchGenerator
# flt, mapper = icpr2012()
TT.verbose = True
# batches = BatchGenerator(Dataset(root+'/datasets/ICPR 2012/testing/set1', mapper=mapper, filename_filter=flt), 1000, 500)
# for batch in batches:
# continue
batches = BatchGenerator(ImageIterator(root+'/tests/patch_at/utilities.tiff', root+'/tests/patch_at/utilities.csv'),
batch_size=1000)
for batch in batches:
continue
| {
"content_hash": "9b6bed804c6413e0e0e827a98a34b947",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 123,
"avg_line_length": 32,
"alnum_prop": 0.7302631578947368,
"repo_name": "znck/mitosis-detection",
"id": "64553a9d548f63b0d353bb6e427f5587d249f1fc",
"size": "621",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/iterators-test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33951"
}
],
"symlink_target": ""
} |
from common_settings import *
from bundle_config import config
DEBUG = True
TEMPLATE_DEBUG = DEBUG
import os
MEDIA_ROOT = os.path.join(config["core"]["data_directory"], "media")
STATIC_ROOT = os.path.join(config["core"]["data_directory"], "static")
LOGIN_REDIRECT_URL = "/badge/" | {
"content_hash": "6068d5c5182e52cced779255c17c62a5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 70,
"avg_line_length": 25.636363636363637,
"alnum_prop": 0.723404255319149,
"repo_name": "silverstripesoftware/make_my_badge",
"id": "f0d131a6f935d336aeb410e1fb12de30172c7255",
"size": "307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "make_my_badge/production_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22996"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="showticklabels",
parent_name="histogram2dcontour.colorbar",
**kwargs
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "c268ebb454d8d1ed4648f1e74668b256",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 77,
"avg_line_length": 31,
"alnum_prop": 0.5977229601518027,
"repo_name": "plotly/python-api",
"id": "a6bc8dda55128f5467b79ad7a0e7be361afba4d5",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/_showticklabels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
class Environment(object):
def __init__(self, config_file=""):
self.kintone_domain = ""
self.kintone_id = ""
self.kintone_password = ""
self.database_uri = ""
self.mail_domain = ""
self.mail_api_key = ""
self.translator_client_id = ""
self.translator_client_secret = ""
import os
config_file = config_file
if not config_file:
default_path = os.path.join(os.path.dirname(__file__), "../../environment.yaml")
if os.path.isfile(default_path):
config_file = default_path
try:
if config_file:
with open(config_file) as cf:
import yaml
e = yaml.load(cf)
_get = lambda k, d: "" if not d or k not in d else d[k]
yget = lambda k, sk="": _get(k, e) if not sk else _get(sk, _get(k, e))
self.kintone_domain = yget("domain")
self.kintone_id = yget("login", "id")
self.kintone_password = yget("login", "password")
self.database_uri = yget("database_uri")
self.mail_domain = yget("mail", "domain")
self.mail_api_key = yget("mail", "api_key")
self.translator_client_id = yget("translator", "client_id")
self.translator_client_secret = yget("translator", "client_secret")
if not self.database_uri:
self.database_uri = "mongodb://localhost:27017/kanaria"
else:
self.kintone_domain = os.environ.get("KINTONE_DOMAIN", "")
self.kintone_id = os.environ.get("KINTONE_ID", "")
self.kintone_password = os.environ.get("KINTONE_PASSWORD", "")
self.database_uri = os.environ.get("MONGO_URI", "")
if not self.database_uri:
self.database_uri = os.environ.get("MONGOLAB_URI", "")
if not self.database_uri:
self.database_uri = os.environ.get("MONGOHQ_URI", "")
self.mail_domain = os.environ.get("MAIL_DOMAIN")
self.mail_api_key = os.environ.get("MAIL_API_KEY")
self.translator_client_id = os.environ.get("TRANSLATOR_CLIENT_ID")
self.translator_client_secret = os.environ.get("TRANSLATOR_CLIENT_SECRET")
except Exception as ex:
raise Exception("environment is not set. please confirm environment.yaml on your root or environment variables")
@classmethod
def get_db(cls, env=None):
from kanaria.core.service.db import MongoDBService
env = env if env else Environment()
return MongoDBService(env.database_uri)
@classmethod
def get_kintone_service(cls, env=None):
from pykintone.account import Account, kintoneService
env = env if env else Environment()
account = Account(env.kintone_domain, env.kintone_id, env.kintone_password)
service = kintoneService(account)
return service
@classmethod
def get_translator(cls, env=None):
import pyoxford
env = env if env else Environment()
translator = pyoxford.translator(env.translator_client_id, env.translator_client_secret)
return translator
def make_mail_address(self, user_name):
return "{0}@{1}".format(user_name, self.mail_domain)
def __str__(self):
result = self.kintone_domain + " {0}/{1}".format(self.kintone_id, self.kintone_password)
result += "\n" + self.database_uri
return result
| {
"content_hash": "199d4d8606f02939e826ced9511c6171",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 124,
"avg_line_length": 44.036144578313255,
"alnum_prop": 0.559781121751026,
"repo_name": "icoxfog417/kanaria",
"id": "ed9aaf3137d27350cd9e322b993622350ab9f603",
"size": "3655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kanaria/core/environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47953"
}
],
"symlink_target": ""
} |
import datetime
import sys
import threading
import uuid
import fixtures
import kombu
import testscenarios
from oslo import messaging
from oslo.messaging._drivers import amqpdriver
from oslo.messaging._drivers import common as driver_common
from oslo.messaging._drivers import impl_rabbit as rabbit_driver
from oslo.messaging.openstack.common import jsonutils
from tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class TestRabbitDriverLoad(test_utils.BaseTestCase):
def setUp(self):
super(TestRabbitDriverLoad, self).setUp()
self.messaging_conf.transport_driver = 'rabbit'
self.messaging_conf.in_memory = True
def test_driver_load(self):
transport = messaging.get_transport(self.conf)
self.assertIsInstance(transport._driver, rabbit_driver.RabbitDriver)
class TestRabbitTransportURL(test_utils.BaseTestCase):
scenarios = [
('none', dict(url=None, expected=None)),
('empty',
dict(url='rabbit:///',
expected=dict(virtual_host=''))),
('localhost',
dict(url='rabbit://localhost/',
expected=dict(hostname='localhost',
username='',
password='',
virtual_host=''))),
('virtual_host',
dict(url='rabbit:///vhost',
expected=dict(virtual_host='vhost'))),
('no_creds',
dict(url='rabbit://host/virtual_host',
expected=dict(hostname='host',
username='',
password='',
virtual_host='virtual_host'))),
('no_port',
dict(url='rabbit://user:password@host/virtual_host',
expected=dict(hostname='host',
username='user',
password='password',
virtual_host='virtual_host'))),
('full_url',
dict(url='rabbit://user:password@host:10/virtual_host',
expected=dict(hostname='host',
port=10,
username='user',
password='password',
virtual_host='virtual_host'))),
]
def setUp(self):
super(TestRabbitTransportURL, self).setUp()
self.messaging_conf.transport_driver = 'rabbit'
self.messaging_conf.in_memory = True
self._server_params = []
cnx_init = rabbit_driver.Connection.__init__
def record_params(cnx, conf, server_params=None):
self._server_params.append(server_params)
return cnx_init(cnx, conf, server_params)
def dummy_send(cnx, topic, msg, timeout=None):
pass
self.stubs.Set(rabbit_driver.Connection, '__init__', record_params)
self.stubs.Set(rabbit_driver.Connection, 'topic_send', dummy_send)
self._driver = messaging.get_transport(self.conf, self.url)._driver
self._target = messaging.Target(topic='testtopic')
def test_transport_url_listen(self):
self._driver.listen(self._target)
self.assertEqual(self._server_params[0], self.expected)
def test_transport_url_listen_for_notification(self):
self._driver.listen_for_notifications(
[(messaging.Target(topic='topic'), 'info')])
self.assertEqual(self._server_params[0], self.expected)
def test_transport_url_send(self):
self._driver.send(self._target, {}, {})
self.assertEqual(self._server_params[0], self.expected)
class TestSendReceive(test_utils.BaseTestCase):
_n_senders = [
('single_sender', dict(n_senders=1)),
('multiple_senders', dict(n_senders=10)),
]
_context = [
('empty_context', dict(ctxt={})),
('with_context', dict(ctxt={'user': 'mark'})),
]
_reply = [
('rx_id', dict(rx_id=True, reply=None)),
('none', dict(rx_id=False, reply=None)),
('empty_list', dict(rx_id=False, reply=[])),
('empty_dict', dict(rx_id=False, reply={})),
('false', dict(rx_id=False, reply=False)),
('zero', dict(rx_id=False, reply=0)),
]
_failure = [
('success', dict(failure=False)),
('failure', dict(failure=True, expected=False)),
('expected_failure', dict(failure=True, expected=True)),
]
_timeout = [
('no_timeout', dict(timeout=None)),
('timeout', dict(timeout=0.01)), # FIXME(markmc): timeout=0 is broken?
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._n_senders,
cls._context,
cls._reply,
cls._failure,
cls._timeout)
def setUp(self):
super(TestSendReceive, self).setUp()
self.messaging_conf.transport_driver = 'rabbit'
self.messaging_conf.in_memory = True
def test_send_receive(self):
transport = messaging.get_transport(self.conf)
self.addCleanup(transport.cleanup)
driver = transport._driver
target = messaging.Target(topic='testtopic')
listener = driver.listen(target)
senders = []
replies = []
msgs = []
errors = []
def stub_error(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
errors.append(str(msg) % a)
self.stubs.Set(driver_common.LOG, 'error', stub_error)
def send_and_wait_for_reply(i):
try:
replies.append(driver.send(target,
self.ctxt,
{'tx_id': i},
wait_for_reply=True,
timeout=self.timeout))
self.assertFalse(self.failure)
self.assertIsNone(self.timeout)
except (ZeroDivisionError, messaging.MessagingTimeout) as e:
replies.append(e)
self.assertTrue(self.failure or self.timeout is not None)
while len(senders) < self.n_senders:
senders.append(threading.Thread(target=send_and_wait_for_reply,
args=(len(senders), )))
for i in range(len(senders)):
senders[i].start()
received = listener.poll()
self.assertIsNotNone(received)
self.assertEqual(received.ctxt, self.ctxt)
self.assertEqual(received.message, {'tx_id': i})
msgs.append(received)
# reply in reverse, except reply to the first guy second from last
order = list(range(len(senders)-1, -1, -1))
if len(order) > 1:
order[-1], order[-2] = order[-2], order[-1]
for i in order:
if self.timeout is None:
if self.failure:
try:
raise ZeroDivisionError
except Exception:
failure = sys.exc_info()
msgs[i].reply(failure=failure,
log_failure=not self.expected)
elif self.rx_id:
msgs[i].reply({'rx_id': i})
else:
msgs[i].reply(self.reply)
senders[i].join()
self.assertEqual(len(replies), len(senders))
for i, reply in enumerate(replies):
if self.timeout is not None:
self.assertIsInstance(reply, messaging.MessagingTimeout)
elif self.failure:
self.assertIsInstance(reply, ZeroDivisionError)
elif self.rx_id:
self.assertEqual(reply, {'rx_id': order[i]})
else:
self.assertEqual(reply, self.reply)
if not self.timeout and self.failure and not self.expected:
self.assertTrue(len(errors) > 0, errors)
else:
self.assertEqual(len(errors), 0, errors)
TestSendReceive.generate_scenarios()
class TestRacyWaitForReply(test_utils.BaseTestCase):
def setUp(self):
super(TestRacyWaitForReply, self).setUp()
self.messaging_conf.transport_driver = 'rabbit'
self.messaging_conf.in_memory = True
def test_send_receive(self):
transport = messaging.get_transport(self.conf)
self.addCleanup(transport.cleanup)
driver = transport._driver
target = messaging.Target(topic='testtopic')
listener = driver.listen(target)
senders = []
replies = []
msgs = []
wait_conditions = []
orig_reply_waiter = amqpdriver.ReplyWaiter.wait
def reply_waiter(self, msg_id, timeout):
if wait_conditions:
with wait_conditions[0]:
wait_conditions.pop().wait()
return orig_reply_waiter(self, msg_id, timeout)
self.stubs.Set(amqpdriver.ReplyWaiter, 'wait', reply_waiter)
def send_and_wait_for_reply(i):
replies.append(driver.send(target,
{},
{'tx_id': i},
wait_for_reply=True,
timeout=None))
while len(senders) < 2:
t = threading.Thread(target=send_and_wait_for_reply,
args=(len(senders), ))
t.daemon = True
senders.append(t)
# Start the first guy, receive his message, but delay his polling
notify_condition = threading.Condition()
wait_conditions.append(notify_condition)
senders[0].start()
msgs.append(listener.poll())
self.assertEqual(msgs[-1].message, {'tx_id': 0})
# Start the second guy, receive his message
senders[1].start()
msgs.append(listener.poll())
self.assertEqual(msgs[-1].message, {'tx_id': 1})
# Reply to both in order, making the second thread queue
# the reply meant for the first thread
msgs[0].reply({'rx_id': 0})
msgs[1].reply({'rx_id': 1})
# Wait for the second thread to finish
senders[1].join()
# Let the first thread continue
with notify_condition:
notify_condition.notify()
# Wait for the first thread to finish
senders[0].join()
# Verify replies were received out of order
self.assertEqual(len(replies), len(senders))
self.assertEqual(replies[0], {'rx_id': 1})
self.assertEqual(replies[1], {'rx_id': 0})
def _declare_queue(target):
connection = kombu.connection.BrokerConnection(transport='memory')
# Kludge to speed up tests.
connection.transport.polling_interval = 0.0
connection.connect()
channel = connection.channel()
# work around 'memory' transport bug in 1.1.3
channel._new_queue('ae.undeliver')
if target.fanout:
exchange = kombu.entity.Exchange(name=target.topic + '_fanout',
type='fanout',
durable=False,
auto_delete=True)
queue = kombu.entity.Queue(name=target.topic + '_fanout_12345',
channel=channel,
exchange=exchange,
routing_key=target.topic)
if target.server:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
topic = '%s.%s' % (target.topic, target.server)
queue = kombu.entity.Queue(name=topic,
channel=channel,
exchange=exchange,
routing_key=topic)
else:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
queue = kombu.entity.Queue(name=target.topic,
channel=channel,
exchange=exchange,
routing_key=target.topic)
queue.declare()
return connection, channel, queue
class TestRequestWireFormat(test_utils.BaseTestCase):
_target = [
('topic_target',
dict(topic='testtopic', server=None, fanout=False)),
('server_target',
dict(topic='testtopic', server='testserver', fanout=False)),
# NOTE(markmc): https://github.com/celery/kombu/issues/195
('fanout_target',
dict(topic='testtopic', server=None, fanout=True,
skip_msg='Requires kombu>2.5.12 to fix kombu issue #195')),
]
_msg = [
('empty_msg',
dict(msg={}, expected={})),
('primitive_msg',
dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})),
('complex_msg',
dict(msg={'a': {'b': datetime.datetime(1920, 2, 3, 4, 5, 6, 7)}},
expected={'a': {'b': '1920-02-03T04:05:06.000007'}})),
]
_context = [
('empty_ctxt', dict(ctxt={}, expected_ctxt={})),
('user_project_ctxt',
dict(ctxt={'user': 'mark', 'project': 'snarkybunch'},
expected_ctxt={'_context_user': 'mark',
'_context_project': 'snarkybunch'})),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._msg,
cls._context,
cls._target)
def setUp(self):
super(TestRequestWireFormat, self).setUp()
self.messaging_conf.transport_driver = 'rabbit'
self.messaging_conf.in_memory = True
self.uuids = []
self.orig_uuid4 = uuid.uuid4
self.useFixture(fixtures.MonkeyPatch('uuid.uuid4', self.mock_uuid4))
def mock_uuid4(self):
self.uuids.append(self.orig_uuid4())
return self.uuids[-1]
def test_request_wire_format(self):
if hasattr(self, 'skip_msg'):
self.skipTest(self.skip_msg)
transport = messaging.get_transport(self.conf)
self.addCleanup(transport.cleanup)
driver = transport._driver
target = messaging.Target(topic=self.topic,
server=self.server,
fanout=self.fanout)
connection, channel, queue = _declare_queue(target)
self.addCleanup(connection.release)
driver.send(target, self.ctxt, self.msg)
msgs = []
def callback(msg):
msg = channel.message_to_python(msg)
msg.ack()
msgs.append(msg.payload)
queue.consume(callback=callback,
consumer_tag='1',
nowait=False)
connection.drain_events()
self.assertEqual(1, len(msgs))
self.assertIn('oslo.message', msgs[0])
received = msgs[0]
received['oslo.message'] = jsonutils.loads(received['oslo.message'])
# FIXME(markmc): add _msg_id and _reply_q check
expected_msg = {
'_unique_id': self.uuids[0].hex,
}
expected_msg.update(self.expected)
expected_msg.update(self.expected_ctxt)
expected = {
'oslo.version': '2.0',
'oslo.message': expected_msg,
}
self.assertEqual(expected, received)
TestRequestWireFormat.generate_scenarios()
def _create_producer(target):
connection = kombu.connection.BrokerConnection(transport='memory')
# Kludge to speed up tests.
connection.transport.polling_interval = 0.0
connection.connect()
channel = connection.channel()
# work around 'memory' transport bug in 1.1.3
channel._new_queue('ae.undeliver')
if target.fanout:
exchange = kombu.entity.Exchange(name=target.topic + '_fanout',
type='fanout',
durable=False,
auto_delete=True)
producer = kombu.messaging.Producer(exchange=exchange,
channel=channel,
routing_key=target.topic)
elif target.server:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
topic = '%s.%s' % (target.topic, target.server)
producer = kombu.messaging.Producer(exchange=exchange,
channel=channel,
routing_key=topic)
else:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
producer = kombu.messaging.Producer(exchange=exchange,
channel=channel,
routing_key=target.topic)
return connection, producer
class TestReplyWireFormat(test_utils.BaseTestCase):
_target = [
('topic_target',
dict(topic='testtopic', server=None, fanout=False)),
('server_target',
dict(topic='testtopic', server='testserver', fanout=False)),
# NOTE(markmc): https://github.com/celery/kombu/issues/195
('fanout_target',
dict(topic='testtopic', server=None, fanout=True,
skip_msg='Requires kombu>2.5.12 to fix kombu issue #195')),
]
_msg = [
('empty_msg',
dict(msg={}, expected={})),
('primitive_msg',
dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})),
('complex_msg',
dict(msg={'a': {'b': '1920-02-03T04:05:06.000007'}},
expected={'a': {'b': '1920-02-03T04:05:06.000007'}})),
]
_context = [
('empty_ctxt', dict(ctxt={}, expected_ctxt={})),
('user_project_ctxt',
dict(ctxt={'_context_user': 'mark',
'_context_project': 'snarkybunch'},
expected_ctxt={'user': 'mark', 'project': 'snarkybunch'})),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._msg,
cls._context,
cls._target)
def setUp(self):
super(TestReplyWireFormat, self).setUp()
self.messaging_conf.transport_driver = 'rabbit'
self.messaging_conf.in_memory = True
def test_reply_wire_format(self):
if hasattr(self, 'skip_msg'):
self.skipTest(self.skip_msg)
transport = messaging.get_transport(self.conf)
self.addCleanup(transport.cleanup)
driver = transport._driver
target = messaging.Target(topic=self.topic,
server=self.server,
fanout=self.fanout)
listener = driver.listen(target)
connection, producer = _create_producer(target)
self.addCleanup(connection.release)
msg = {
'oslo.version': '2.0',
'oslo.message': {}
}
msg['oslo.message'].update(self.msg)
msg['oslo.message'].update(self.ctxt)
msg['oslo.message'].update({
'_msg_id': uuid.uuid4().hex,
'_unique_id': uuid.uuid4().hex,
'_reply_q': 'reply_' + uuid.uuid4().hex,
})
msg['oslo.message'] = jsonutils.dumps(msg['oslo.message'])
producer.publish(msg)
received = listener.poll()
self.assertIsNotNone(received)
self.assertEqual(self.expected_ctxt, received.ctxt)
self.assertEqual(self.expected, received.message)
TestReplyWireFormat.generate_scenarios()
class RpcKombuHATestCase(test_utils.BaseTestCase):
def test_reconnect_order(self):
brokers = ['host1', 'host2', 'host3', 'host4', 'host5']
brokers_count = len(brokers)
self.conf.rabbit_hosts = brokers
self.conf.rabbit_max_retries = 1
info = {'attempt': 0}
def _connect(myself, params):
# do as little work that is enough to pass connection attempt
myself.connection = kombu.connection.BrokerConnection(**params)
myself.connection_errors = myself.connection.connection_errors
expected_broker = brokers[info['attempt'] % brokers_count]
self.assertEqual(params['hostname'], expected_broker)
info['attempt'] += 1
# just make sure connection instantiation does not fail with an
# exception
self.stubs.Set(rabbit_driver.Connection, '_connect', _connect)
# starting from the first broker in the list
connection = rabbit_driver.Connection(self.conf)
# now that we have connection object, revert to the real 'connect'
# implementation
self.stubs.UnsetAll()
for i in range(len(brokers)):
self.assertRaises(driver_common.RPCException, connection.reconnect)
connection.close()
| {
"content_hash": "30a580140e6b31360007977f6a4cd166",
"timestamp": "",
"source": "github",
"line_count": 632,
"max_line_length": 79,
"avg_line_length": 34.610759493670884,
"alnum_prop": 0.5282070037487429,
"repo_name": "JioCloud/oslo.messaging",
"id": "39afb3ca489327a11fa37e0441ddb4e0b4c2e2de",
"size": "22480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_rabbit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "488745"
}
],
"symlink_target": ""
} |
"""
Linux on Hyper-V and Azure Test Code, ver. 1.0.0
Copyright (c) Microsoft Corporation
All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
See the Apache Version 2.0 License for specific language governing
permissions and limitations under the License.
"""
import os
import time
import logging
import paramiko
import socket
from winrm import protocol
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%y/%m/%d %H:%M:%S', level=logging.INFO)
log = logging.getLogger(__name__)
class SSHClient(object):
"""
This class creates a paramiko.SSHClient() object that represents
a session with an SSH server. You can use the SSHClient object to send
commands to the remote host and manipulate files on the remote host.
:param server: A server hostname or ip.
:param host_key_file: The path to the user's .ssh key files.
:param user: The username for the SSH connection. Default = 'root'.
:param timeout: The optional timeout variable for the TCP connection.
:param ssh_pwd: An optional password to use for authentication or for
unlocking the private key.
:param ssh_key_file: SSH key pem data
"""
def __init__(self, server, host_key_file='~/.ssh/known_hosts', user='root', timeout=None,
ssh_pwd=None, ssh_key_file=None):
self.server = server
self.host_key_file = host_key_file
self.user = user
self._timeout = timeout
self._pkey = paramiko.RSAKey.from_private_key_file(ssh_key_file, password=ssh_pwd)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect()
def connect(self, num_retries=5):
"""
Connect to an SSH server and authenticate with it.
:type num_retries: int
:param num_retries: The maximum number of connection attempts.
"""
retry = 0
while retry < num_retries:
try:
self._ssh_client.connect(self.server, username=self.user, pkey=self._pkey,
timeout=self._timeout)
return
except socket.error as se:
(value, message) = se.args
if value in (51, 61, 111):
log.error('SSH Connection refused, will retry in 5 seconds')
time.sleep(5)
retry += 1
else:
raise
except paramiko.BadHostKeyException:
log.error("{} has an entry in ~/.ssh/known_hosts and it doesn't match".format(
self.server))
retry += 1
except EOFError:
log.error('Unexpected Error from SSH Connection, retry in 5 seconds')
time.sleep(5)
retry += 1
log.error('Could not establish SSH connection')
def open_sftp(self):
"""
Open an SFTP session on the SSH server.
:rtype: :class:`paramiko.sftp_client.SFTPClient`
:return: An SFTP client object.
"""
return self._ssh_client.open_sftp()
def get_file(self, src, dst):
"""
Open an SFTP session on the remote host, and copy a file from
the remote host to the specified path on the local host.
:type src: string
:param src: The path to the target file on the remote host.
:type dst: string
:param dst: The path on your local host where you want to store the file.
"""
sftp_client = self.open_sftp()
sftp_client.get(src, dst)
def put_file(self, src, dst):
"""
Open an SFTP session on the remote host, and copy a file from
the local host to the specified path on the remote host.
:type src: string
:param src: The path to the target file on your local host.
:type dst: string
:param dst: The path on the remote host where you want to store the file.
"""
sftp_client = self.open_sftp()
sftp_client.put(src, dst)
def run(self, command, timeout=None):
"""
Run a command on the remote host.
:type command: string
:param command: The command that you want to send to the remote host.
:param timeout: pass timeout along the line.
:rtype: tuple
:return: This function returns a tuple that contains an integer status,
the stdout from the command, and the stderr from the command.
"""
status = 0
t = []
try:
t = self._ssh_client.exec_command(command, timeout=timeout)
except paramiko.SSHException:
status = 1
std_out = t[1].read()
std_err = t[2].read()
t[0].close()
t[1].close()
t[2].close()
return status, std_out, std_err
def run_pty(self, command):
"""
Request a pseudo-terminal from a server, and execute a command on that server.
:type command: string
:param command: The command that you want to run on the remote host.
:rtype: :class:`paramiko.channel.Channel`
:return: An open channel object.
"""
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
return channel
def close(self):
"""
Close an SSH session and any open channels that are tied to it.
"""
transport = self._ssh_client.get_transport()
transport.close()
class WinRMClient(object):
"""
This class creates a WinRM object that represents a session with a Windows server.
:param host: A server hostname or ip.
:param user: The username for the winrm connection.
:param password: Password to use for authentication.
:param port: WinRM port used to connect. Default is 5986.
:param proto: Protocol used for communication. Default is https.
"""
def __init__(self, host=None, user=None, password=None, port=5986, proto='https'):
self.host = host
self.user = user
self.password = password
self.port = port
self.proto = proto
def run(self, cmd=None, ps=False, transport='ssl', server_cert_validation='ignore'):
"""
Run WinRM command.
:param cmd: Windows command to run
:param ps: <bool> to run powershell command instead
:param transport: Cryptographic protocol. Default is ssl.
:param server_cert_validation: Server side validation type. Default is ignore.
:return: std_out, std_err, exit_code
"""
if not cmd:
log.error('Please provide command to run remotely.')
if ps:
cmd = 'powershell -NoProfile -NonInteractive ' + cmd
secure_host = '{}://{}:{}/wsman'.format(self.proto, self.host, self.port)
protocol.Protocol.DEFAULT_TIMEOUT = "PT7200S"
try:
p = protocol.Protocol(endpoint=secure_host, transport=transport,
username=self.user, password=self.password,
server_cert_validation=server_cert_validation)
shell_id = p.open_shell()
command_id = p.run_command(shell_id, cmd)
std_out, std_err, exit_code = p.get_command_output(shell_id, command_id)
log.info('Output: {}'.format(std_out))
log.debug('Output: {}\nError: {}\nExit Code: {}'.format(std_out, std_err, exit_code))
if exit_code != 0:
log.error('{}.\nFailed to run command: {}'.format(std_err, cmd))
p.cleanup_command(shell_id, command_id)
p.close_shell(shell_id)
except Exception as e:
log.error(e)
raise
return std_out
| {
"content_hash": "83be63ce248bd532445d62220b4881b4",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 97,
"avg_line_length": 39.649532710280376,
"alnum_prop": 0.6051856216853271,
"repo_name": "mihaico/lis-test",
"id": "767bdc1eeaa8da17847cb42c76a0b6161331b263",
"size": "8485",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "WS2012R2/lisa/tools/middleware_bench/utils/cmdshell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "30794"
},
{
"name": "PowerShell",
"bytes": "3503812"
},
{
"name": "Python",
"bytes": "494772"
},
{
"name": "Shell",
"bytes": "2059774"
}
],
"symlink_target": ""
} |
from rx.observable import Observable
from rx.internal.basic import noop
from rx.subjects import AsyncSubject
from rx.disposables import CompositeDisposable
from rx.concurrency import immediate_scheduler, current_thread_scheduler
from rx.internal import extensionmethod
class ChainObservable(Observable):
def _subscribe(self, observer):
g = CompositeDisposable()
def action(scheduler, state):
observer.on_next(self.head)
g.add(self.tail.merge_observable().subscribe(observer))
g.add(current_thread_scheduler.schedule(action))
return g
def __init__(self, head):
super(ChainObservable, self).__init__(self._subscribe)
self.head = head
self.tail = AsyncSubject()
def on_completed(self):
self.on_next(Observable.empty())
def on_error(self, e):
self.on_next(Observable.throw_exception(e))
def on_next(self, v):
self.tail.on_next(v)
self.tail.on_completed()
@extensionmethod(Observable)
def many_select(self, selector, scheduler=None):
"""Comonadic bind operator. Internally projects a new observable for each
value, and it pushes each observable into the user-defined selector function
that projects/queries each observable into some result.
Keyword arguments:
selector -- {Function} A transform function to apply to each element.
scheduler -- {Object} [Optional] Scheduler used to execute the
operation. If not specified, defaults to the ImmediateScheduler.
Returns {Observable} An observable sequence which results from the
comonadic bind operation.
"""
scheduler = scheduler or immediate_scheduler
source = self
def factory():
chain = [None]
def mapper(x):
curr = ChainObservable(x)
chain[0] and chain[0].on_next(x)
chain[0] = curr
return curr
def on_error(e):
if chain[0]:
chain[0].on_error(e)
def on_completed():
if chain[0]:
chain[0].on_completed()
return source.map(
mapper
).tap(
noop, on_error, on_completed
).observe_on(
scheduler
).map(
selector
)
return Observable.defer(factory)
| {
"content_hash": "6061993a119ccaa80a339bc6dd5d9b67",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 81,
"avg_line_length": 28.012048192771083,
"alnum_prop": 0.632258064516129,
"repo_name": "dbrattli/RxPY",
"id": "c775492d435c55c79bb7ce7673dac16c495122b0",
"size": "2325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rx/linq/observable/manyselect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1334787"
}
],
"symlink_target": ""
} |
"""
Created on Fri Nov 13 11:51:49 2015
@author: pabloem
"""
import os
from PIL import Image
def filePaths(rootdir, key):
""" Returns a list with the paths of all the files that starts with the
sting key.
rootdir: init directory to start the search recursively.
key: string used to filter the search.
"""
filepaths = []
for i in os.walk(rootdir):
dirpath, dirnames, filenames = i
if filenames:
for name in filenames:
if name[0:5] == key:
filepaths.append(os.path.join(dirpath, name))
return filepaths
imagePaths = filePaths('../data', 'photo')
for imagePath in imagePaths:
img = Image.open(imagePath)
print(img.format, img.size, img.mode)
originalSize = imagePath + 'Original'
img.save(originalSize, 'JPEG')
size = (img.size[0]/2, img.size[1]/2)
imgNew = img.resize(size)
imgNew.save(imagePath, 'JPEG')
| {
"content_hash": "415ce93651fc40cfb9975da313886bfd",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 75,
"avg_line_length": 25.944444444444443,
"alnum_prop": 0.6295503211991434,
"repo_name": "pabloesm/site_recipes",
"id": "60920341a63ce62be58137eec0b7a7ff1dd38462",
"size": "958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/resizeImages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47808"
},
{
"name": "HTML",
"bytes": "8777"
},
{
"name": "JavaScript",
"bytes": "22057398"
},
{
"name": "Python",
"bytes": "958"
}
],
"symlink_target": ""
} |
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import platform
import subprocess as sp
def hide_console():
"""Startup-info for subprocess.Popen which hides the console on
Windows.
"""
if platform.system() != 'Windows':
return None
si = sp.STARTUPINFO()
si.dwFlags |= sp.STARTF_USESHOWWINDOW
si.wShowWindow = sp.SW_HIDE
return si
| {
"content_hash": "a72ebf8505c634d57bab62d4c8ea89be",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 69,
"avg_line_length": 30.966666666666665,
"alnum_prop": 0.7029063509149623,
"repo_name": "lukas-ke/faint-graphics-editor",
"id": "bbf085767ef163a45e97005e24543b15adc1e64b",
"size": "978",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/faint/extra/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49581"
},
{
"name": "C++",
"bytes": "3170874"
},
{
"name": "Emacs Lisp",
"bytes": "13474"
},
{
"name": "HTML",
"bytes": "26096"
},
{
"name": "NSIS",
"bytes": "2088"
},
{
"name": "Python",
"bytes": "537915"
}
],
"symlink_target": ""
} |
import masterPlugin
import MatrixUtils
## Wrapper for MatrixUtils.printWorkingDirectory()
class printWorkingDirectory(masterPlugin.masterPlugin):
def __init__(this):
super().__init__()
this.command = "printWorkingDirectory"
this.aliases = ["pwd"]
this.commandInfo = {'requiredArguments': None,
'optionalArguments': None,
'argumentInfo': None,
'help': 'Prints the current working directory'}
def execute(this, arguments, WORKINGMATRIX):
MatrixUtils.printWorkingDirectory()
| {
"content_hash": "ee79fc1e09a14ee70da0408029ef55dd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 59,
"avg_line_length": 29.57894736842105,
"alnum_prop": 0.6583629893238434,
"repo_name": "charlesdaniels/hercm",
"id": "360843732dd23fb0c184db3c1c9b792c5d28c059",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python33/menuPlugins/printWorkingDirectory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8577"
},
{
"name": "Makefile",
"bytes": "139"
},
{
"name": "Python",
"bytes": "113163"
}
],
"symlink_target": ""
} |
from gwt.ui.ScrollPanel import (
DOM,
Event,
Factory,
ScrollPanel,
SimplePanel,
)
| {
"content_hash": "2133d2caa85a8eed4e6fa10ea4b1d9a9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 32,
"avg_line_length": 14.571428571428571,
"alnum_prop": 0.6176470588235294,
"repo_name": "anandology/pyjamas",
"id": "3df7f9cbf26aab6882c0d540bc8ce41f1abef407",
"size": "102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/pyjamas/ui/ScrollPanel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "325172"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "6383764"
},
{
"name": "Shell",
"bytes": "19448"
}
],
"symlink_target": ""
} |
"""
WSGI config for myrestapi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myrestapi.settings")
application = get_wsgi_application()
| {
"content_hash": "7746ebce70627c2735d4589e8d26317c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.6875,
"alnum_prop": 0.7721518987341772,
"repo_name": "JoseMariaMicoli/superlist-api-Django",
"id": "3cc74bb5a31ba668cedfacfa55b8687696004217",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myrestapi/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48755"
},
{
"name": "HTML",
"bytes": "17978"
},
{
"name": "JavaScript",
"bytes": "60476"
},
{
"name": "Python",
"bytes": "9703"
}
],
"symlink_target": ""
} |
"""
Created on Mon Aug 8 10:56:46 2016
Credit:
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
# NetworkX:http://networkx.github.io/
Modified by @author: ahmadw1 on Aug 8 2016
PageRank analysis of graph structure.
pagerank function updated to return the number of iterations
in addition to the pagerank vector.
"""
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import not_implemented_for
__author__ = """\n""".join(["Aric Hagberg <aric.hagberg@gmail.com>",
"Brandon Liu <brandon.k.liu@gmail.com"])
__all__ = ['pagerank', 'pagerank_numpy', 'pagerank_scipy', 'google_matrix']
@not_implemented_for('multigraph')
def pagerank(G, alpha=0.85, personalization=None,
max_iter=100, tol=1.0e-6, nstart=None, weight='weight',
dangling=None):
"""Return the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float, optional
Damping parameter for PageRank, default=0.85.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key for every graph node and nonzero personalization value for each node.
By default, a uniform distribution is used.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
nstart : dictionary, optional
Starting value of PageRank iteration for each node.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified). This must be selected to result in an irreducible transition
matrix (see notes under google_matrix). It may be common to have the
dangling dict to be the same as the personalization dict.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
nIter : integer
Number of iterations taken for convergence of pagerank vector
Examples
--------
>>> G = nx.DiGraph(nx.path_graph(4))
>>> pr = nx.pagerank(G, alpha=0.9)
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The PageRank algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs by converting each edge in the
directed graph to two edges.
See Also
--------
pagerank_numpy, pagerank_scipy, google_matrix
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
if len(G) == 0:
return {}
if not G.is_directed():
D = G.to_directed()
else:
D = G
# Create a copy in (right) stochastic form
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
# Choose fixed starting vector if not given
if nstart is None:
x = dict.fromkeys(W, 1.0 / N)
else:
# Normalized nstart vector
s = float(sum(nstart.values()))
x = dict((k, v / s) for k, v in nstart.items())
if personalization is None:
# Assign uniform personalization vector if not given
p = dict.fromkeys(W, 1.0 / N)
else:
missing = set(G) - set(personalization)
if missing:
raise NetworkXError('Personalization dictionary '
'must have a value for every node. '
'Missing nodes %s' % missing)
s = float(sum(personalization.values()))
p = dict((k, v / s) for k, v in personalization.items())
if dangling is None:
# Use personalization vector if dangling vector not specified
dangling_weights = p
else:
missing = set(G) - set(dangling)
if missing:
raise NetworkXError('Dangling node dictionary '
'must have a value for every node. '
'Missing nodes %s' % missing)
s = float(sum(dangling.values()))
dangling_weights = dict((k, v/s) for k, v in dangling.items())
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
# power iteration: make up to max_iter iterations
for _ in range(max_iter):
#print _
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
# this matrix multiply looks odd because it is
# doing a left multiply x^T=xlast^T*W
for nbr in W[n]:
x[nbr] += alpha * xlast[n] * W[n][nbr][weight]
x[n] += danglesum * dangling_weights[n] + (1.0 - alpha) * p[n]
# check convergence, l1 norm
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N*tol:
return x, _+1
raise NetworkXError('pagerank: power iteration failed to converge '
'in %d iterations.' % max_iter)
def google_matrix(G, alpha=0.85, personalization=None,
nodelist=None, weight='weight', dangling=None):
"""Return the Google matrix of the graph.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float
The damping factor.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key for every graph node and nonzero personalization value for each node.
By default, a uniform distribution is used.
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified) This must be selected to result in an irreducible transition
matrix (see notes below). It may be common to have the dangling dict to
be the same as the personalization dict.
Returns
-------
A : NumPy matrix
Google matrix of the graph
Notes
-----
The matrix returned represents the transition matrix that describes the
Markov chain used in PageRank. For PageRank to converge to a unique
solution (i.e., a unique stationary distribution in a Markov chain), the
transition matrix must be irreducible. In other words, it must be that
there exists a path between every pair of nodes in the graph, or else there
is the potential of "rank sinks."
This implementation works with Multi(Di)Graphs. For multigraphs the
weight between two nodes is set to be the sum of all edge weights
between those nodes.
See Also
--------
pagerank, pagerank_numpy, pagerank_scipy
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
M = nx.to_numpy_matrix(G, nodelist=nodelist, weight=weight)
N = len(G)
if N == 0:
return M
# Personalization vector
if personalization is None:
p = np.repeat(1.0 / N, N)
else:
missing = set(nodelist) - set(personalization)
if missing:
raise NetworkXError('Personalization vector dictionary '
'must have a value for every node. '
'Missing nodes %s' % missing)
p = np.array([personalization[n] for n in nodelist], dtype=float)
p /= p.sum()
# Dangling nodes
if dangling is None:
dangling_weights = p
else:
missing = set(nodelist) - set(dangling)
if missing:
raise NetworkXError('Dangling node dictionary '
'must have a value for every node. '
'Missing nodes %s' % missing)
# Convert the dangling dictionary into an array in nodelist order
dangling_weights = np.array([dangling[n] for n in nodelist],
dtype=float)
dangling_weights /= dangling_weights.sum()
dangling_nodes = np.where(M.sum(axis=1) == 0)[0]
# Assign dangling_weights to any dangling nodes (nodes with no out links)
for node in dangling_nodes:
M[node] = dangling_weights
M /= M.sum(axis=1) # Normalize rows to sum to 1
return alpha * M + (1 - alpha) * p
def pagerank_numpy(G, alpha=0.85, personalization=None, weight='weight',
dangling=None):
"""Return the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float, optional
Damping parameter for PageRank, default=0.85.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key for every graph node and nonzero personalization value for each
node. By default, a uniform distribution is used.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified) This must be selected to result in an irreducible transition
matrix (see notes under google_matrix). It may be common to have the
dangling dict to be the same as the personalization dict.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value.
Examples
--------
>>> G = nx.DiGraph(nx.path_graph(4))
>>> pr = nx.pagerank_numpy(G, alpha=0.9)
Notes
-----
The eigenvector calculation uses NumPy's interface to the LAPACK
eigenvalue solvers. This will be the fastest and most accurate
for small graphs.
This implementation works with Multi(Di)Graphs. For multigraphs the
weight between two nodes is set to be the sum of all edge weights
between those nodes.
See Also
--------
pagerank, pagerank_scipy, google_matrix
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
import numpy as np
if len(G) == 0:
return {}
M = google_matrix(G, alpha, personalization=personalization,
weight=weight, dangling=dangling)
# use numpy LAPACK solver
eigenvalues, eigenvectors = np.linalg.eig(M.T)
ind = eigenvalues.argsort()
# eigenvector of largest eigenvalue at ind[-1], normalized
largest = np.array(eigenvectors[:, ind[-1]]).flatten().real
norm = float(largest.sum())
return dict(zip(G, map(float, largest / norm)))
def pagerank_scipy(G, alpha=0.85, personalization=None,
max_iter=100, tol=1.0e-6, weight='weight',
dangling=None):
"""Return the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float, optional
Damping parameter for PageRank, default=0.85.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key for every graph node and nonzero personalization value for each
node. By default, a uniform distribution is used.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified) This must be selected to result in an irreducible transition
matrix (see notes under google_matrix). It may be common to have the
dangling dict to be the same as the personalization dict.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Examples
--------
>>> G = nx.DiGraph(nx.path_graph(4))
>>> pr = nx.pagerank_scipy(G, alpha=0.9)
Notes
-----
The eigenvector calculation uses power iteration with a SciPy
sparse matrix representation.
This implementation works with Multi(Di)Graphs. For multigraphs the
weight between two nodes is set to be the sum of all edge weights
between those nodes.
See Also
--------
pagerank, pagerank_numpy, google_matrix
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
import scipy.sparse
N = len(G)
if N == 0:
return {}
nodelist = G.nodes()
M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
dtype=float)
S = scipy.array(M.sum(axis=1)).flatten()
S[S != 0] = 1.0 / S[S != 0]
Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')
M = Q * M
# initial vector
x = scipy.repeat(1.0 / N, N)
# Personalization vector
if personalization is None:
p = scipy.repeat(1.0 / N, N)
else:
missing = set(nodelist) - set(personalization)
if missing:
raise NetworkXError('Personalization vector dictionary '
'must have a value for every node. '
'Missing nodes %s' % missing)
p = scipy.array([personalization[n] for n in nodelist],
dtype=float)
p = p / p.sum()
# Dangling nodes
if dangling is None:
dangling_weights = p
else:
missing = set(nodelist) - set(dangling)
if missing:
raise NetworkXError('Dangling node dictionary '
'must have a value for every node. '
'Missing nodes %s' % missing)
# Convert the dangling dictionary into an array in nodelist order
dangling_weights = scipy.array([dangling[n] for n in nodelist],
dtype=float)
dangling_weights /= dangling_weights.sum()
is_dangling = scipy.where(S == 0)[0]
# power iteration: make up to max_iter iterations
for _ in range(max_iter):
xlast = x
x = alpha * (x * M + sum(x[is_dangling]) * dangling_weights) + \
(1 - alpha) * p
# check convergence, l1 norm
err = scipy.absolute(x - xlast).sum()
if err < N * tol:
return dict(zip(nodelist, map(float, x)))
raise NetworkXError('pagerank_scipy: power iteration failed to converge '
'in %d iterations.' % max_iter)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| {
"content_hash": "e6007085c2a70a6253851773f65b1931",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 90,
"avg_line_length": 36.152475247524755,
"alnum_prop": 0.6345511310730131,
"repo_name": "walid-ahmad/TieDecay",
"id": "d64a16b5d6214b5a26b965892fb7f44ddff8e899",
"size": "18281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prcust.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30396"
}
],
"symlink_target": ""
} |
import foauth.providers
class Dropbox(foauth.providers.OAuth2):
# General info about the provider
provider_url = 'https://www.dropbox.com/'
docs_url = 'https://www.dropbox.com/developers/reference/api'
favicon_url = 'https://cf.dropboxstatic.com/static/images/favicon-vflk5FiAC.ico'
category = 'Files'
# URLs to interact with the API
authorize_url = 'https://www.dropbox.com/oauth2/authorize'
access_token_url = 'https://api.dropboxapi.com/oauth2/token'
api_domains = ['api.dropboxapi.com', 'content.dropboxapi.com', 'notify.dropboxapi.com']
available_permissions = [
(None, 'read and write to your entire Dropbox'),
]
def get_user_id(self, key):
r = self.api(key, self.api_domains[0], u'/2/users/get_current_account', method='POST')
return unicode(r.json()[u'account_id'])
| {
"content_hash": "d83aab76990242b8ff78cc486965b9dd",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 94,
"avg_line_length": 38.68181818181818,
"alnum_prop": 0.6768507638072856,
"repo_name": "foauth/foauth.org",
"id": "ec606b0f3dfbed9faf9e823bcba97cda8d41fe35",
"size": "851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/dropbox.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8695"
},
{
"name": "HTML",
"bytes": "31228"
},
{
"name": "Python",
"bytes": "124340"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 20 13:32:28 2016
@author: pablo
"""
import os
dir_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import sys
sys.path.append(dir_path) #path to src
from src import np,odeint,interp1d,Hyperplume,plt
class SSM(Hyperplume):
"""Self Similar model of a plasma plume expansion.Class SSM inherits methods __init__,solver
and query from parent class Hyperplume, and particularizes them. """
def __init__(self,plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},M_0=40,d_0=0.2,z_span=np.linspace(0,100,500),r_span=np.linspace(0,40,500),n_init=0.0472*np.linspace(1,0,500)**2):
"""Constructor __init__ loads and initialises the main class attributes.
Calls parent class Hyperplume constructor method __init__ to store main plasma properties as attributes in the class.
Args:
plasma (dict): simple_plasma object dictionary containing basic plasma parameters.
z_span (numpy.ndarray): axial region where the problem will be integrated.
r_span (numpy.ndarray): initial far-field plasma radial profile.
n_init (numpy.ndarray): initial dimensional density front.
M_0 (float): Plasma Mach number at (z,r) = (0,0)
d_0 (float): Tangent of initial plume divergence angle
Implementation:
>>> e_charge,ion_mass,Plasma_temp,gamma_value=1.6e-19,2.1801714e-25,2.1801714e-19,1 #Main Plasma Parameters
>>> Plasma = Hyperplume().simple_plasma(e_charge,ion_mass,Plasma_temp,gamma_value) #Loading Plasma dict
>>> z_span = np.linspace(0,110,5000) # Axial plume grid for integration
>>> r_0 = np.linspace(0,10,5000) #Initial plume radial profile
>>> n0 = np.exp(-6.15/2 * r_0**2) #Initial far-field plume density
>>> M0,d0=20,0.2 #SSM parameters: Mach number and initial far-field plume diveregence
>>> Plume = SSM(Plasma,M0,d0,z_span,r_0,n0) #Creating SS plume
"""
super(SSM,self).__init__(plasma,z_span,r_span,n_init)
self.M_0,self.d_0 = M_0,d_0 #Loading Mach, and initial divergece. Variables inherent to SSM Plume
def solver(self):
"""Solver method solves for model constriants C and h, as well as the initial dimensionless axial velocity vector upsilon
and initial dimensionless density profile nu, using SSM model equations. It then saves this plume variables as as class attributes,
in the form of interpolation libraries over the entire plume grid.
Solver method is a particularization of the abstrac Hyperplume.solver() method
Implementation:
>>> Plume.solver() # be sure to create a valid SSM plasma plume before applying the plume solver method
To access the interpolation libraries and SSM constraints particularly:
>>> print(Plume.C,Plume.h) #SSM model constraints
>>> Plume.nu_interp #Initial dimensionless density interpolation library
>>> Plume.upsilon_interp #Initial dimensionless axial velocity interpolation library
"""
nu = self.n0/self.n0[0] #Dimensionles initial density front
nu_prime = np.empty(nu.size) #Derivative of initial dimensionless density front. Needed for upsilon calculations
nu_prime = self.eta_deriver(self.eta,nu) #Call to superclass Hyperplume() method self.eta_deriver(x,y)
nu_prime[0] = 0 # Edge array conditions for the derivative of the density front
nu_prime[-1] = (nu[-1] - nu[-2]) / (self.eta[-1] - self.eta[-2])
self.C = -2*(nu[1] - nu[0]) / (self.eta[1] - self.eta[0])**2 #Scaling Separtion Constant of SSM Model
upsilon = np.sqrt(-nu**(self.Gamma-2) * nu_prime / (self.eta * self.C)) #Dimensionles initial axial velocity front
upsilon[0] = 1
def dh_fun(h,Z):
"""dh_fun function calculates the derivative of the self-similar dilation function h(z),
and saves the results as a class attribute in column-array format
Args:
h (numpy.ndarray): SSM model scaling function
Z (numpy.ndarray): axial span for integration. Coincidet with initial axial span loaded in SSS
class constructor,for accruacy and correctness)
Returns:
df (numpy.ndarray): derivative of SSM scaling function
"""
"Checking thermal expanion model (isothermal or polytropic coesfficient)"
if self.Gamma == 1:
dh = np.sqrt(self.d_0**2 + (self.C/self.M_0**2) * 2 * np.log(h))
else:
dh = np.sqrt(self.d_0**2 + (self.C/self.M_0**2) * -(h**(2-2*self.Gamma) -1) * 1/(self.Gamma-1))
return dh
h_init = 1
h = odeint(dh_fun,h_init, self.z_span) # solves numerically the ODE in dh_fun, to obtain
dh = dh_fun(h, self.z_span) # Call fun dh_fun to solve for dh
"""Creation of 1D interpolation libraries for the main attributes, to be used later in query method at
the targeted (Z,r) in the plume."""
self.h = np.reshape(h,self.z_span.size) #MMM20170424: what is this? what for?
#PABLO20170424: output h from odeint (line 108) is a column array h.shape = ().
#Here, it is reshaped in proper form for python interp1d method (next line)
self.h_interp = interp1d(self.z_span,self.h,kind='linear') #Creating interpolation library of self-similarity h(z) function
self.dh = np.reshape(dh,self.z_span.size) #PABLO20170424:same as before
self.dh_interp = interp1d(self.z_span,self.dh,kind = 'linear')
self.nu_interp = interp1d(self.eta,nu,kind = 'linear') #Creating interpolation library of self-similarity derivative dh(z) function
self.nu_prime_interp = interp1d(self.eta,nu_prime,kind = 'linear') #Creating interpolation library of dimensionless initial density
self.upsilon_interp = interp1d(self.eta,upsilon,kind='linear') #Creating interpolation library of dimensionless initial axial velocity
def query(self,z,r):
""" Method query returns the density, velocity profile, temperature, the electric potential and SSM error at
particular (z,r) points by interpolation over the Plume grid.
SSM method query is a particulatization of the abstract Hyperplume method Hyperplume.query()
Args:
z (int,numpy.ndarray): axial target points where plasma variables are retrieved. Single points, arrays of locations and meshgrids are valid.
r (int,numpy.ndarray): axial target points where plasma variables are retrieved. Single points, arrays of locations and meshgrids are valid.
Returns:
lnn (int,numpy.ndarray): logarithmic plasma density at specified (z,r) points in plume grid
u_z (int,numpy.ndarray): plasma axial velocity at specified (z,r) points in plume grid
u_r (int,numpy.ndarray): plasma radial velocity at specified (z,r) points in plume grid
T (int,numpy.ndarray): plasma temperature at specified (z,r) points in plume grid
phi (int,numpy.ndarray): plasma ambipolar electric potential at specified (z,r) points in plume grid
error (int,numpy.ndarray): SSM error created by imposing model constraints at specified (z,r) points in plume grid
eta (int,numpy.ndarray): ion current stream lines at specified (z,r) points in plume grid
Usage:
>>> z,r = np.linspace(0,100,50),np.linspace(0,50,40) #target (z,r) for plume study
>>> lnn,u_z,u_r,T,phi,error,eta=Plume.query(z,r)
"""
eta = r/self.h_interp(z) #calculation of eta at user targeted grid point
n = self.n0[0] * self.nu_interp(eta) * 1/self.h_interp(z)**2 #Dimensional density at targetd (z,r) points
lnn = np.log(n)
#Calling various Hyperplume methods to calculate remaining plasma parameters based on plume density
T = self.temp(n,self.n0[0],self.T_0,self.Gamma) #Dimensional Temperature at targetd (z,r) points
phi = self.phi(n,self.n0[0],self.T_0,self.Gamma,self.q_ion) #Dimensional potential at targetd (z,r) points
u_z = self.M_0*np.sqrt(self.Gamma*self.T_0/self.m_ion) * self.upsilon_interp(eta) #Dimensional axial velocity at targetd (z,r) points
u_r = self.d_0 * u_z * self.dh_interp(z) * eta #Dimensional radial velocity at targetd (z,r) points
error = self.C * self.dh_interp(z) / (self.M_0**2 * (self.h_interp(z)**(2*self.Gamma-1))) * (4 * eta * self.nu_interp(eta) / self.nu_prime_interp(eta) + 2 * eta**2) #SSM error at targetd (z,r) points
return lnn,u_z,u_r,T,phi,error,eta
# Helper functions
def type_parks(plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},M_0=40,d_0=0.2,z_span=np.linspace(0,100,500),r_0=np.linspace(0,40,500),C=-2*np.log(0.05)):
""" type_parks functions allow the user to generate default plume density profiles based on the theoretical
Parks plume model. The function creates the initial density profile following
the theoretical model, and creates a SSM Plume object with unique characteristics
Args:
plasma (dict): Hyperplume's simple_plasma object, or otherwise a similar plasma dictionary containing basic parameters.
z_span (numpy.ndarray): axial region where the problem will be integrated.
r_0 (numpy.ndarray): initial far-field plasma radial profile.
M_0 (float): Plasma Mach number at (z,r) = (0,0)
d_0 (float): Tangent of initial plume divergence angle
C (float): SSM model constraint. C is a separation constant used for scaling the Self-Similarity plume proble.
C is used to determine the initial density profile derived by Parks. In particular:
n_parks = np.exp(-C*r_0**2 /2)
Returns:
Plume (object): SSM Plume object preloaded and solved with Parks theoretical density and axial velocity models.
Usage:
>>> Plasma = Hyperplume().simple_plasma(e_charge,ion_mass,Plasma_temp,gamma_value) #Loading Plasma dict
>>> z_span = np.linspace(0,110,5000) # Axial plume grid for integration
>>> r_0 = np.linspace(0,10,5000) #Initial plume radial profile
>>> C = 6.15
>>>Plume_parks = type_parks(Plasma,z_span,r_0,C)
>>> lnn,u_z_,u_r,T,phi,error,eta=Plume_parks.query(z,r)
"""
if plasma['Electrons']['Gamma'] is not 1:
print ('Gamma must be 1 for Parks model')
else:
n0 = np.exp(-C * r_0**2 /2) # fixing plume initial far-region density profile following Parks model
Plume = SSM(plasma,M_0,d_0,z_span,r_0,n0) # creating SSM plume
Plume.solver() # solving plume with general model equations
Plume.upsilon = np.ones(Plume.n0.shape) # fixing plume initial far-region axial velocity profile following Parks model
_,_,_,_,_,_,eta = Plume.query(z_span,r_0) #calculating eta values following general model equations
Plume.upsilon_interp = interp1d(eta,Plume.upsilon,kind='linear') # interpolating in fixed parks inital velocity profile
return Plume
def type_korsun(plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},M_0=40,d_0=0.2,z_span=np.linspace(0,100,500),r_0=np.linspace(0,40,500),C = 2*(0.05**(-2/1.3)-1)):
""" type_parks functions allow the user to generate default plume density profiles based on the theoretical
Korsun plume model. The function creates the initial density profile following
the theoretical model, and creates a SSM Plume object with unique characteristics
Args:
plasma (dict): Hyperplume's simple_plasma object, or otherwise a similar plasma dictionary containing basic parameters.
z_span (numpy.ndarray): axial region where the problem will be integrated.
r_0 (numpy.ndarray): initial far-field plasma radial profile.
M_0 (float): Plasma Mach number at (z,r) = (0,0)
d_0 (float): Tangent of initial plume divergence angle
C (float): SSM model constraint. C is a separation constant used for scaling the Self-Similarity plume problem.
C is used to determine the initial density and axial velocity profiles derived by Korsun. In particular:
n_parks = 1 / (1 + C / 2 * eta_0**2 )
upsilon_parks = (1 + C / 2 * eta_0**2 )**(gamma/2)
Returns:
Plume (object): SSM Plume object preloaded and solved with Parks theoretical density and axial velocity models.
Usage:
>>> Plasma = Hyperplume().simple_plasma(e_charge,ion_mass,Plasma_temp,gamma_value) #Loading Plasma dict
>>> z_span = np.linspace(0,110,5000) # Axial plume grid for integration
>>> r_0 = np.linspace(0,10,5000) #Initial plume radial profile
>>> C = 6.15
>>>Plume_parks = type_parks(Plasma,z_span,r_0,C)
>>> lnn,u_z_,u_r,T,phi,error,eta=Plume_parks.query(z,r)
"""
n0 = 1 / (1 + C / 2 * r_0**2 ) # fixing plume initial far-region density profile following Korsun model
Plume = SSM(plasma,M_0,d_0,z_span,r_0,n0) # creating SSM plume
Plume.solver() # solving plume with general model equations
Plume.upsilon = (1 + C / 2 * r_0**2 )**(-Plume.Gamma/2) # fixing plume initial far-region axial velocity profile following Korsun model
_,_,_,_,_,_,eta = Plume.query(z_span,r_0) # retrieving eta values following general model equations
Plume.upsilon_interp = interp1d(eta,Plume.upsilon,kind='linear') # interpolating in fixed Korsun inital velocity profile
return Plume
def type_ashkenazy(plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},M_0=40,d_0=0.2,z_span=np.linspace(0,100,500),r_0=np.linspace(0,40,500),C = 0.2**2*(1-2*np.log(0.01)/np.log(1+0.2**2))):
""" type_ashkenazy functions allow the user to generate default plume density profiles based on the theoretical
ashkenazy plume model. The function creates the initial density profile following
the theoretical model, and creates a SSM Plume object with unique characteristics
Args:
plasma (dict): Hyperplume's simple_plasma object, or otherwise a similar plasma dictionary containing basic parameters.
z_span (numpy.ndarray): axial region where the problem will be integrated.
r_0 (numpy.ndarray): initial far-field plasma radial profile.
M_0 (float): Plasma Mach number at (z,r) = (0,0)
d_0 (float): Tangent of initial plume divergence angle
C (float): SSM model constraint. C is a separation constant used for scaling the Self-Similarity plume problem.
C is used to determine the initial density profile derived by Ashkenazy. In particular:
n_parks = (1 + k*eta_0**2)**(-C/(2*k))
upsilon_parks = (1 + k*eta_0**2)**(-1/2), where k = d_0**2
Returns:
Plume (object): SSM Plume object preloaded and solved with Parks theoretical density and axial velocity models.
Usage:
>>> Plasma = Hyperplume().simple_plasma(e_charge,ion_mass,Plasma_temp,gamma_value) #Loading Plasma dict
>>> z_span = np.linspace(0,110,5000) # Axial plume grid for integration
>>> r_0 = np.linspace(0,10,5000) #Initial plume radial profile
>>> C = 6.15
>>>Plume_parks = type_parks(Plasma,z_span,r_0,C)
>>> lnn,u_z_,u_r,T,phi,error,eta=Plume_parks.query(z,r)
"""
if plasma['Electrons']['Gamma'] is not 1:
print('Gamma must be 1 for Ashkenazy model')
else:
k = d_0**2
n0 = (1 + k*r_0**2)**(-C/(2*k)) # fixing plume initial far-region density profile following Ashkenazy model
Plume = SSM(plasma,M_0,d_0,z_span,r_0,n0) # creating SSM plume
Plume.solver() # solving plume with general model equations
Plume.upsilon = (1 + k*r_0**2)**(-1/2) # fixing plume initial far-region axial velocity profile following Ashkenazy model
_,_,_,_,_,_,eta = Plume.query(z_span,r_0) # retrieving eta values following general model equations
Plume.upsilon_interp = interp1d(eta,Plume.upsilon,kind='linear') # interpolating in fixed Korsun inital velocity profile
return Plume
| {
"content_hash": "c83099f1e628bd9b61642f76f993a6b3",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 280,
"avg_line_length": 53.096969696969694,
"alnum_prop": 0.6223604611345737,
"repo_name": "Pabsm94/HyperPlume",
"id": "7854584c6d5f0474a568180df09a67ab4a2d2c14",
"size": "17524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SSM/SSM_plume.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138271"
}
],
"symlink_target": ""
} |
import os
from distutils.core import setup
from Cython.Distutils import Extension
from Cython.Build import cythonize
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.join(CUR_DIR, 'src')
TPM_DIR = os.path.join(SRC_DIR, 'tpm')
include_dirs = [SRC_DIR]
src_files = ["pytpm/_tpm.pyx"]
# TPM library and path to the library.
library_dirs = [os.path.expanduser("~/lib/tpm")]
libraries = ['tpm']
ext_modules = [
Extension(
"pytpm._tpm", src_files,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries
)
]
setup(
name='pytpm',
packages=['pytpm'],
package_dir={'pytpm': 'pytpm'},
package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']},
ext_modules=cythonize(ext_modules)
)
| {
"content_hash": "dc368be46f39a7b04744b16dbc46f6cb",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 56,
"avg_line_length": 24.806451612903224,
"alnum_prop": 0.6423927178153446,
"repo_name": "phn/pytpm",
"id": "576c54cc4416f099963d9e3a9f9b22f5af470ae5",
"size": "769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup-dev.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2421423"
},
{
"name": "C++",
"bytes": "11497"
},
{
"name": "Python",
"bytes": "371301"
}
],
"symlink_target": ""
} |
from .client import PredictionServiceClient
from .async_client import PredictionServiceAsyncClient
__all__ = (
"PredictionServiceClient",
"PredictionServiceAsyncClient",
)
| {
"content_hash": "565e6bd0a7b02ac7f2f1d71024daa8ad",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 54,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.7845303867403315,
"repo_name": "googleapis/python-automl",
"id": "d5ecd0dd035dda16c60928c0f9e4c6016bdd9dc1",
"size": "781",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "google/cloud/automl_v1beta1/services/prediction_service/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2347989"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_get_api_tag_description.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.api_tag_description.get(
resource_group_name="rg1",
service_name="apimService1",
api_id="59d6bb8f1f7fab13dc67ec9b",
tag_description_id="59306a29e4bbd510dc24e5f9",
)
print(response)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementGetApiTagDescription.json
if __name__ == "__main__":
main()
| {
"content_hash": "ba5b7585a62ace1588a0bfc9cce66475",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 156,
"avg_line_length": 34.285714285714285,
"alnum_prop": 0.7341666666666666,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e62bc2c99f3c568da5ac7721c12343c2c809994c",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_get_api_tag_description.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import contextlib
import datetime
import sys
import types
import warnings
from freezegun import freeze_time
class ModuleWithWarning:
"""
A module that triggers warnings on attribute access.
This does not happen with regular modules, there has to be a bit of lazy
module magic going on in order for this to happen.
Examples of modules that uses this pattern in real projects can be found at:
py.code - the compiler package import causes a warning to be emitted:
https://github.com/pytest-dev/py/blob/67987e26aadddbbe7d1ec76c16ea9be346ae9811/py/__init__.py
https://github.com/pytest-dev/py/blob/67987e26aadddbbe7d1ec76c16ea9be346ae9811/py/_code/_assertionold.py#L3
celery.task - the sets module is listed in __all__ in celery.task and freeze_time accesses it:
https://github.com/celery/celery/blob/46c92025cdec07a4a30ad44901cf66cb27346638/celery/task/__init__.py
https://github.com/celery/celery/blob/46c92025cdec07a4a30ad44901cf66cb27346638/celery/task/sets.py
"""
__name__ = 'module_with_warning'
__dict__ = {}
warning_triggered = False
counter = 0
@property
def attribute_that_emits_a_warning(self):
# Use unique warning messages to avoid messages being only reported once
self.__class__.counter += 1
warnings.warn(f'this is test warning #{self.__class__.counter}')
self.warning_triggered = True
@contextlib.contextmanager
def assert_module_with_emitted_warning():
"""Install a module that triggers warnings into sys.modules and ensure the
warning was triggered in the with-block. """
module = sys.modules['module_with_warning'] = ModuleWithWarning()
try:
yield
finally:
del sys.modules['module_with_warning']
assert module.warning_triggered
@contextlib.contextmanager
def assert_no_warnings():
"""A context manager that makes sure no warnings was emitted."""
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.filterwarnings('always')
yield
assert not caught_warnings
def test_ignore_warnings_in_start():
"""Make sure that modules being introspected in start() does not emit warnings."""
with assert_module_with_emitted_warning():
freezer = freeze_time(datetime.datetime(2016, 10, 27, 9, 56))
try:
with assert_no_warnings():
freezer.start()
finally:
freezer.stop()
def test_ignore_warnings_in_stop():
"""Make sure that modules that was loaded after start() does not trigger
warnings in stop()"""
freezer = freeze_time(datetime.datetime(2016, 10, 27, 9, 56))
freezer.start()
with assert_module_with_emitted_warning():
with assert_no_warnings():
freezer.stop()
| {
"content_hash": "9d5ae42ff6e771ce0432e9cb00954746",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 111,
"avg_line_length": 33.226190476190474,
"alnum_prop": 0.6936581870297385,
"repo_name": "spulec/freezegun",
"id": "5ee32ae48dd08e970e0615852b1176f7cf0afc2b",
"size": "2791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_warnings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "271"
},
{
"name": "Python",
"bytes": "87774"
}
],
"symlink_target": ""
} |
import json
import numpy as np
import pandas as pd
# Configura contextos
contextos = [
{"arquivo_comentarios":"../../dados/dadospessoais-comentarios-pdfs-filtrado.csv",
"prefixo_saida":"../site/data/"}
]
#for i in range(1, 7):
# obj = {"arquivo_comentarios" : "../../data/contextos/dadospessoais-comentarios-pdfs-filtrado-contexto{}.csv".format(i),
# "prefixo_saida" : "../../data_web/contexto{}/".format(i)}
# contextos.append(obj)
anteprojeto = pd.read_csv("../../dados/dadospessoais-anteprojeto.csv",
dtype={"commentable_id" : pd.core.common.CategoricalDtype,
"commentable_parent" : pd.core.common.CategoricalDtype,
"commentable_article" : pd.core.common.CategoricalDtype,
"commentable_chapter" : pd.core.common.CategoricalDtype,
"commentable_axis" : pd.core.common.CategoricalDtype,
"commentable_type" : pd.core.common.CategoricalDtype,
"commentable_name" : pd.core.common.CategoricalDtype,
"commentable_text" : np.character})
for contexto in contextos:
comentarios = pd.read_csv(contexto["arquivo_comentarios"],
parse_dates=['comment_date'],
dtype={"source" : pd.core.common.CategoricalDtype,
"comment_id" : pd.core.common.CategoricalDtype,
"author_id" : pd.core.common.CategoricalDtype,
"author_name" : np.character,
"comment_parent" : pd.core.common.CategoricalDtype,
"commentable_id" : pd.core.common.CategoricalDtype,
"comment_text" : np.character })
comentarios.drop(["commentable_name"], axis=1, inplace=True)
df = pd.merge(comentarios, anteprojeto, on="commentable_id")
# Grafo
grafo = {"nodes":[], "links":[]}
# Usuarios
top_usuarios = df.groupby(["author_id", "author_name"]).size()
top_usuarios.sort_values(ascending=False)
usuarios = [{"group":0, "type":
"user", "id":"TCP{}".format(author_id),
"name":author_name,
"value":int(count)}
for (author_id, author_name), count in top_usuarios.iteritems()]
grafo["nodes"].extend(usuarios)
# Itens
top_itens = df.groupby(["commentable_id", "commentable_name", "commentable_axis"]).size()
itens = [{"id":"TCI{}".format(commentable_id),
"name":commentable_name,
"type":"item",
"group":int(commentable_axis),
"value":int(count)}
for (commentable_id, commentable_name, commentable_axis), count in top_itens.iteritems()]
# Contagem de comentarios em itens
frequencia = df.groupby(["author_id", "commentable_id"]).size()
# Filtra itens que não foram comentados
itens_selecionados = set()
for (usuario, item), count in frequencia.iteritems():
itens_selecionados.add(item)
itens = [item for item in itens if item["id"][3:] in itens_selecionados]
grafo["nodes"].extend(itens)
# Nodos do grafo
nodos_ids = {nodo["id"]:i for i, nodo in enumerate(grafo["nodes"])}
# Criação dos eixos do grafo
for (usuario, item), count in frequencia.iteritems():
grafo["links"].append({"source":nodos_ids["TCP{}".format(usuario)],
"target":nodos_ids["TCI{}".format(item)],
"value":int(count)})
# Saida
with open("{}participantes_itens_comentaveis_graph.json".format(contexto["prefixo_saida"]), "w") as outfile:
json.dump(grafo, outfile, indent=4)
| {
"content_hash": "c8d9fb7a572ccb449f4783d7c6502028",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 124,
"avg_line_length": 39.35576923076923,
"alnum_prop": 0.5289518690447105,
"repo_name": "W3CBrasil/AI-Social",
"id": "6d7f61084280353c8c327f8c0e90ef9db22e14ab",
"size": "4096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/code/grafo_participacao.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "51364"
},
{
"name": "C++",
"bytes": "3317"
},
{
"name": "CSS",
"bytes": "66345"
},
{
"name": "HTML",
"bytes": "40984"
},
{
"name": "JavaScript",
"bytes": "81067"
},
{
"name": "Makefile",
"bytes": "981"
},
{
"name": "PHP",
"bytes": "24989"
},
{
"name": "Python",
"bytes": "90259"
},
{
"name": "Ruby",
"bytes": "864"
},
{
"name": "Shell",
"bytes": "2623"
}
],
"symlink_target": ""
} |
import subprocess
import sys
import setup_util
def start(args):
setup_util.replace_text("cowboy/src/hello_world_app.erl", "\"benchmarkdbpass\", \".*\", 3306", "\"benchmarkdbpass\", \"" + args.database_host + "\", 3306")
try:
subprocess.check_call("./rebar get-deps", shell=True, cwd="cowboy")
subprocess.check_call("./rebar compile", shell=True, cwd="cowboy")
subprocess.check_call("erl -pa ebin deps/*/ebin +sbwt very_long +swt very_low -s hello_world -noshell -detached", shell=True, cwd="cowboy")
return 0
except subprocess.CalledProcessError:
return 1
def stop():
try:
subprocess.check_call("killall beam.smp", shell=True, cwd="/usr/bin")
return 0
except subprocess.CalledProcessError:
return 1
| {
"content_hash": "c1af17434947f6808454619c5b1a47ba",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 157,
"avg_line_length": 37.15,
"alnum_prop": 0.6877523553162853,
"repo_name": "domix/FrameworkBenchmarks",
"id": "60537f4b29c6cff5e3ae050a075b16bdb296eb39",
"size": "743",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cowboy/setup_erlang.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import json
import string
import random
import nova_v2_base
def cinder_request(self,
url_detail,
request_type='get',
request_name=None,
data=None,
locust_name=None):
url = self.get_endpoint('volumev2')
if url_detail:
url = os.path.join(url, url_detail)
headers = {'X-Auth-Project-Id': self.keystone_tenant,
'X-Auth-Token': self.auth_token,
'Content-Type': 'application/json',
'Accept': 'application/json'}
if data:
response = getattr(self.client, request_type)(url,
headers=headers,
data=json.dumps(data),
name=locust_name)
else:
response = getattr(self.client, request_type)(url,
headers=headers,
name=locust_name)
self.output(url)
self.output("Response status code: %s" % response.status_code)
self.output("Response content: %s" % response.content)
return response
def get_volume_id(self):
""" Return a random volume from currently
available volumes
"""
response = cinder_request(self, 'volumes', 'get')
volume_list = json.loads(response.content)['volumes']
volume_id = random.choice([i['id'] for i in volume_list])
return volume_id
def get_snapshot_id(self):
""" Return a random snapshot from currently
available snapshots
"""
response = cinder_request(self, 'snapshots', 'get')
snapshot_list = json.loads(response.content)['snapshots']
snapshot_id = random.choice([i['id'] for i in snapshot_list])
return snapshot_id
def get_image_id(self):
""" Return a random image from currently
available images
"""
response = nova_api.nova_request(self, 'images', 'get')
image_list = json.loads(response.content)['images']
image_id = random.choice([i['id'] for i in image_list])
return image_id
def get_server_id(self):
response = nova_api.nova_request(self, 'servers', 'get')
server_list = json.loads(response.content)['servers']
server_id = random.choice([i['id'] for i in server_list])
return server_id
def list_volumes(self):
return cinder_request(self,
'volumes',
'get',
'cinder_list_volumes')
def list_volumes_detail(self):
return cinder_request(self,
'volumes/detail',
'get',
'cinder_list_volumes_detail')
def list_volume_detail(self, volume_id=None):
if not volume_id:
volume_id = get_volume_id(self)
return cinder_request(self,
'volumes/%s' % volume_id,
'get',
'cinder_list_volume_detail',
locust_name='volumes/[id]')
def list_volume_types(self):
return cinder_request(self,
'types',
'get',
'cinder_list_volume_types')
def list_snapshots(self):
return cinder_request(self, 'snapshots', 'get',
'cinder_list_snapshots')
def list_snapshots_detail(self):
return cinder_request(self,
'snapshots/detail',
'get',
'cinder_list_snapshots_detail')
def list_snapshot_detail(self, snapshot_id=None):
if not snapshot_id:
snapshot_id = get_snapshot_id(self)
return cinder_request(self,
'snapshots/%s' %snapshot_id,
'get',
'cinder_list_snapshot_detail',
locust_name='snapshots/[id]')
def list_images(self):
return cinder_request(self,
'images',
'get',
'cinder_list_images')
def list_images_detail(self):
return cinder_request(self,
'images/detail',
'get',
'cinder_list_images_detail')
def list_image_detail(self, image_id=None):
if not image_id:
# get available images and randomly
# choose one
image_id = get_image_id(self)
return cinder_request(self,
'images/%s' % image_id,
'get',
'cinder_list_image_detail',
locust_name='images/[id]')
def list_image_metadata(self, image_id=None):
if not image_id:
image_id = get_image_id(self)
return cinder_request(self,
'images/%s/metadata' % image_id,
'get',
'cinder_list_image_metadata',
locust_name='images/[id]/metadata')
def update_image_metadata(self, image_id = None, metadata=None):
if not image_id:
image_id = get_image_id(self)
if not metadata:
metadata = get_test_metadata(self)
data = {"metadata":metadata}
return cinder_request(self,
'images/%s/metadata' % image_id,
'post',
'cinder_update_image_metadata',
data,
locust_name='images/[id]/metadata')
def overwrite_image_metadata(self, image_id = None, metadata=None):
if not image_id:
image_id = get_image_id(self)
if not metadata:
metadata = get_test_metadata(self)
data = {"metadata":metadata}
return cinder_request(self,
'images/%s/metadata' % image_id,
'put',
'cinder_overwrite_image_metadata',
data,
locust_name='images/[id]/metadata')
def create_volume(self,
volume_id=None,
snapshot_id=None,
image_id=None,
description=None,
size=1,
name=None,
bootable=False,
metadata={}
):
if not name:
name = "volume-%s" % uuid.uuid4()
data = {
"volume": {
"source_volid": volume_id,
"snapshot_id": snapshot_id,
"description": description,
"size": size,
"name": name,
"imageRef": image_id,
"bootable": bootable,
"metadata": metadata
}
}
response = cinder_request(self,
'volumes',
'post',
'cinder_create_volume',
data)
return response
def delete_volume(self, volume_id):
cinder_request(self,
'volumes/%s' % volume_id,
'delete',
'cinder_delete_volume',
locust_name='volumes/[id]')
def create_snapshot(self,
volume_id=None,
name=None,
force=False,
description=None):
if not name:
name = "snapshot-%s" % uuid.uuid4()
if not volume_id:
volume_id = get_volume_id(self)
data = { "snapshot": {
"name": name,
"description": description,
"volume_id": volume_id,
"force": force
}
}
response = cinder_request(self,
'snapshots',
'post',
'cinder_create_snapshot',
data)
return response
def delete_snapshot(self, snapshot_id):
cinder_request(self,
'snapshots/%s' % snapshot_id,
'delete',
'cinder_delete_snapshot',
locust_name='volumes/[id]')
| {
"content_hash": "9eafde66fa75e1e3c05b185d1cedd857",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 76,
"avg_line_length": 31.03409090909091,
"alnum_prop": 0.4738191138777005,
"repo_name": "pcrews/rannsaka",
"id": "c2fcef9fe3061731da8d4fe3d21502506829d617",
"size": "8193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rannsaka/locust_files/task_funcs/cinder_v1_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "190458"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import unittest
from unittest import mock
from unittest.mock import PropertyMock
from airflow.providers.google.cloud.hooks.spanner import SpannerHook
from airflow.providers.google.common.consts import CLIENT_INFO
from tests.providers.google.cloud.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST,
mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
SPANNER_INSTANCE = 'instance'
SPANNER_CONFIGURATION = 'configuration'
SPANNER_DATABASE = 'database-name'
class TestGcpSpannerHookDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id,
):
self.spanner_hook_default_project_id = SpannerHook(gcp_conn_id='test')
@mock.patch("airflow.providers.google.cloud.hooks.spanner.SpannerHook.get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.spanner.Client")
def test_spanner_client_creation(self, mock_client, mock_get_creds):
result = self.spanner_hook_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=CLIENT_INFO,
)
assert mock_client.return_value == result
assert self.spanner_hook_default_project_id._client == result
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_get_existing_instance(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_instance(
instance_id=SPANNER_INSTANCE, project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
assert res is not None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_get_existing_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_instance(
instance_id=SPANNER_INSTANCE, project_id='new-project'
)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
assert res is not None
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_create_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
create_method = instance_method.return_value.create
create_method.return_value = False
res = self.spanner_hook_default_project_id.create_instance(
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=1,
display_name=SPANNER_DATABASE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(
instance_id='instance',
configuration_name='configuration',
display_name='database-name',
node_count=1,
)
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_create_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
create_method = instance_method.return_value.create
create_method.return_value = False
res = self.spanner_hook_default_project_id.create_instance(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=1,
display_name=SPANNER_DATABASE,
)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(
instance_id='instance',
configuration_name='configuration',
display_name='database-name',
node_count=1,
)
assert res is None
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_update_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
update_method = instance_method.return_value.update
update_method.return_value = False
res = self.spanner_hook_default_project_id.update_instance(
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=2,
display_name=SPANNER_DATABASE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(
instance_id='instance',
configuration_name='configuration',
display_name='database-name',
node_count=2,
)
update_method.assert_called_once_with()
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_update_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
update_method = instance_method.return_value.update
update_method.return_value = False
res = self.spanner_hook_default_project_id.update_instance(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=2,
display_name=SPANNER_DATABASE,
)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(
instance_id='instance',
configuration_name='configuration',
display_name='database-name',
node_count=2,
)
update_method.assert_called_once_with()
assert res is None
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_delete_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
delete_method.return_value = False
res = self.spanner_hook_default_project_id.delete_instance(
instance_id=SPANNER_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with('instance')
delete_method.assert_called_once_with()
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
delete_method.return_value = False
res = self.spanner_hook_default_project_id.delete_instance(
project_id='new-project', instance_id=SPANNER_INSTANCE
)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with('instance')
delete_method.assert_called_once_with()
assert res is None
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_get_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_exists_method = instance_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
assert res is not None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_get_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_exists_method = instance_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.get_database(
project_id='new-project', instance_id=SPANNER_INSTANCE, database_id=SPANNER_DATABASE
)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
assert res is not None
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_create_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_create_method = database_method.return_value.create
res = self.spanner_hook_default_project_id.create_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[],
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name', ddl_statements=[])
database_create_method.assert_called_once_with()
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_create_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_create_method = database_method.return_value.create
res = self.spanner_hook_default_project_id.create_database(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[],
)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name', ddl_statements=[])
database_create_method.assert_called_once_with()
assert res is None
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_update_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_ddl_method = database_method.return_value.update_ddl
res = self.spanner_hook_default_project_id.update_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[],
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_update_ddl_method.assert_called_once_with(ddl_statements=[], operation_id=None)
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_update_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_ddl_method = database_method.return_value.update_ddl
res = self.spanner_hook_default_project_id.update_database(
project_id='new-project',
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[],
)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_update_ddl_method.assert_called_once_with(ddl_statements=[], operation_id=None)
assert res is None
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_delete_database(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.delete_database(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
database_drop_method.assert_called_once_with()
assert res
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_delete_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_default_project_id.delete_database(
project_id='new-project', instance_id=SPANNER_INSTANCE, database_id=SPANNER_DATABASE
)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
database_drop_method.assert_called_once_with()
assert res
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_execute_dml(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
run_in_transaction_method = database_method.return_value.run_in_transaction
res = self.spanner_hook_default_project_id.execute_dml(
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
queries='',
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
run_in_transaction_method.assert_called_once_with(mock.ANY)
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_execute_dml_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
run_in_transaction_method = database_method.return_value.run_in_transaction
res = self.spanner_hook_default_project_id.execute_dml(
project_id='new-project', instance_id=SPANNER_INSTANCE, database_id=SPANNER_DATABASE, queries=''
)
get_client.assert_called_once_with(project_id='new-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
run_in_transaction_method.assert_called_once_with(mock.ANY)
assert res is None
class TestGcpSpannerHookNoDefaultProjectID(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id,
):
self.spanner_hook_no_default_project_id = SpannerHook(gcp_conn_id='test')
@mock.patch(
"airflow.providers.google.cloud.hooks.spanner.SpannerHook.get_credentials",
return_value="CREDENTIALS",
)
@mock.patch("airflow.providers.google.cloud.hooks.spanner.Client")
def test_spanner_client_creation(self, mock_client, mock_get_creds):
result = self.spanner_hook_no_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=CLIENT_INFO,
)
assert mock_client.return_value == result
assert self.spanner_hook_no_default_project_id._client == result
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_get_existing_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.spanner_hook_no_default_project_id.get_instance(
instance_id=SPANNER_INSTANCE, project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
assert res is not None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_get_non_existing_instance(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
res = self.spanner_hook_no_default_project_id.get_instance(
instance_id=SPANNER_INSTANCE, project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_create_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
create_method = instance_method.return_value.create
create_method.return_value = False
res = self.spanner_hook_no_default_project_id.create_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=1,
display_name=SPANNER_DATABASE,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(
instance_id='instance',
configuration_name='configuration',
display_name='database-name',
node_count=1,
)
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_update_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
update_method = instance_method.return_value.update
update_method.return_value = False
res = self.spanner_hook_no_default_project_id.update_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
configuration_name=SPANNER_CONFIGURATION,
node_count=2,
display_name=SPANNER_DATABASE,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(
instance_id='instance',
configuration_name='configuration',
display_name='database-name',
node_count=2,
)
update_method.assert_called_once_with()
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
delete_method.return_value = False
res = self.spanner_hook_no_default_project_id.delete_instance(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, instance_id=SPANNER_INSTANCE
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with('instance')
delete_method.assert_called_once_with()
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_get_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_exists_method = instance_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_no_default_project_id.get_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
assert res is not None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_create_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_create_method = database_method.return_value.create
res = self.spanner_hook_no_default_project_id.create_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[],
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name', ddl_statements=[])
database_create_method.assert_called_once_with()
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_update_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_ddl_method = database_method.return_value.update_ddl
res = self.spanner_hook_no_default_project_id.update_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
ddl_statements=[],
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_update_ddl_method.assert_called_once_with(ddl_statements=[], operation_id=None)
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_update_database_overridden_project_id_and_operation(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_update_ddl_method = database_method.return_value.update_ddl
res = self.spanner_hook_no_default_project_id.update_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
operation_id="operation",
ddl_statements=[],
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_update_ddl_method.assert_called_once_with(ddl_statements=[], operation_id="operation")
assert res is None
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_delete_database_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = True
res = self.spanner_hook_no_default_project_id.delete_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
database_drop_method.assert_called_once_with()
assert res
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_delete_database_missing_database(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
database_drop_method = database_method.return_value.drop
database_exists_method = database_method.return_value.exists
database_exists_method.return_value = False
self.spanner_hook_no_default_project_id.delete_database(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
database_exists_method.assert_called_once_with()
database_drop_method.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.spanner.SpannerHook._get_client')
def test_execute_dml_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
database_method = instance_method.return_value.database
run_in_transaction_method = database_method.return_value.run_in_transaction
res = self.spanner_hook_no_default_project_id.execute_dml(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_id=SPANNER_INSTANCE,
database_id=SPANNER_DATABASE,
queries='',
)
get_client.assert_called_once_with(project_id='example-project')
instance_method.assert_called_once_with(instance_id='instance')
database_method.assert_called_once_with(database_id='database-name')
run_in_transaction_method.assert_called_once_with(mock.ANY)
assert res is None
| {
"content_hash": "0ecd5be2361f36237a73b0cc247bdf69",
"timestamp": "",
"source": "github",
"line_count": 655,
"max_line_length": 108,
"avg_line_length": 50.98473282442748,
"alnum_prop": 0.6854618954933374,
"repo_name": "cfei18/incubator-airflow",
"id": "2f9f3d6a888e7810b3d0f6b71786a2bd3e281c2e",
"size": "34182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/google/cloud/hooks/test_spanner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "stancache/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| {
"content_hash": "20e1ce25d410f58ded186da174841bc1",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 79,
"avg_line_length": 35.245098039215684,
"alnum_prop": 0.5697357440890125,
"repo_name": "jburos/stancache",
"id": "568bba33b117a2da2f9d2d1c8c729eabd641ec6e",
"size": "18450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stancache/_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "102232"
},
{
"name": "Shell",
"bytes": "627"
}
],
"symlink_target": ""
} |
"""
Visualize the Urban Aerosol Distribution
========================================
_thumb: .4, .4
"""
import seaborn as sns
import opcsim
sns.set(style='ticks', font_scale=1.25)
# Load the example urban distribution
d = opcsim.load_distribution("Urban")
# Plot the number-weighted pdf with modes
ax = opcsim.plots.pdfplot(d, with_modes=True)
# Set the title and axes labels
ax.set_title("Urban Distribution", fontsize=18)
# Add a legend
ax.legend(loc='best')
# Set the ylim
ax.set_ylim(0, None)
# Remove the top and right spines
sns.despine()
| {
"content_hash": "b88abd56bee212433232433d6d43bb87",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 47,
"avg_line_length": 21.23076923076923,
"alnum_prop": 0.6757246376811594,
"repo_name": "dhhagan/opcsim",
"id": "68f5b77559fd6ded9745dfe4072db948ebd6417f",
"size": "552",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/examples/urban_distribution_pdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "148"
},
{
"name": "Jupyter Notebook",
"bytes": "698"
},
{
"name": "Python",
"bytes": "195835"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
} |
from django import forms
from . import models
class QACreateForm(forms.ModelForm):
class Meta:
model = models.QualityAssuranceCheck
fields = [
'content_changed',
'technical_quality_changed',
'comment'
]
class QAEditForm(forms.ModelForm):
class Meta:
model = models.QualityAssuranceCheck
fields = [
'content_changed',
'technical_quality_changed',
'source_action',
'comment'
]
| {
"content_hash": "cc9c22a8663c0467fe69630b3141b7c5",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 44,
"avg_line_length": 22.565217391304348,
"alnum_prop": 0.5606936416184971,
"repo_name": "WebArchivCZ/Seeder",
"id": "4670ef04ab13aeed765d28b5f397c606697be933",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Seeder/qa/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40916"
},
{
"name": "HTML",
"bytes": "191411"
},
{
"name": "JavaScript",
"bytes": "35092"
},
{
"name": "PHP",
"bytes": "996"
},
{
"name": "Python",
"bytes": "298522"
},
{
"name": "Shell",
"bytes": "691"
}
],
"symlink_target": ""
} |
import numpy as np
from scipy.integrate import dblquad
from scipy.integrate import quad
from scipy.stats import uniform
from cgpm.cgpm import CGpm
from cgpm.network.importance import ImportanceNetwork
from cgpm.uncorrelated.directed import DirectedXyGpm
from cgpm.uncorrelated.uniformx import UniformX
from cgpm.utils import general as gu
class SinY(CGpm):
def __init__(self, outputs=None, inputs=None, noise=None, rng=None):
if rng is None:
rng = gu.gen_rng(1)
if outputs is None:
outputs = [0]
if inputs is None:
inputs = [1]
if noise is None:
noise = .1
self.rng = rng
self.outputs = outputs
self.inputs = inputs
self.noise = noise
self.uniform = uniform(scale=self.noise)
@gu.simulate_many
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
assert targets == self.outputs
assert inputs.keys() == self.inputs
assert not constraints
x = inputs[self.inputs[0]]
noise = self.rng.uniform(high=self.noise)
if np.cos(x) < 0:
y = np.cos(x) + noise
else:
y = np.cos(x) - noise
return {self.outputs[0]: y}
def logpdf(self, rowid, targets, constraints=None, inputs=None):
assert targets.keys() == self.outputs
assert inputs.keys() == self.inputs
assert not constraints
x = inputs[self.inputs[0]]
y = targets[self.outputs[0]]
if np.cos(x) < 0:
return self.uniform.logpdf(y-np.cos(x))
else:
return self.uniform.logpdf(np.cos(x)-y)
class Sin(DirectedXyGpm):
"""Y = cos(X) + Noise."""
def __init__(self, outputs=None, inputs=None, noise=None, rng=None):
DirectedXyGpm.__init__(
self, outputs=outputs, inputs=inputs, noise=noise, rng=rng)
self.x = UniformX(
outputs=[self.outputs[0]], low=-1.5*np.pi, high=1.5*np.pi)
self.y = SinY(
outputs=[self.outputs[1]],
inputs=[self.outputs[0]],
noise=noise)
self.network = ImportanceNetwork([self.x, self.y], rng=self.rng)
# All further methods are here for historical reasons and are not invoked.
# Should override simuate and logpdf from the importance network in
# DirectedXyGpm to activate them.
def logpdf_xy(self, x, y):
if not self.D[0] <= x <= self.D[1]:
return -float('inf')
if np.cos(x) < 0 and not np.cos(x) <= y <= np.cos(x) + self.noise:
return -float('inf')
if np.cos(x) > 0 and not np.cos(x) - self.noise <= y <= np.cos(x):
return -float('inf')
return -np.log(self.D[1]-self.D[0]) - np.log(self.noise)
def logpdf_x(self, x):
if not self.D[0] <= x <= self.D[1]:
return -float('inf')
return -np.log(self.D[1]-self.D[0])
def logpdf_y(self, y):
if 0 <= y:
length, overflow = self._valid_x(y)
length += 2*overflow
else:
length, overflow = self._valid_x(-y)
length = 2*length + overflow
length *= 2
return np.log(length) - np.log(self.noise) - np.log(self.D[1]-self.D[0])
def logpdf_x_given_y(self, x, y):
raise NotImplementedError
def logpdf_y_given_x(self, y, x):
raise NotImplementedError
def mutual_information(self):
def mi_integrand(x, y):
return np.exp(self.logpdf_xy(x,y)) * \
(self.logpdf_xy(x,y) - self.logpdf_x(x) - self.logpdf_y(y))
return dblquad(
lambda y, x: mi_integrand(x,y), self.D[0], self.D[1],
self._lower_y, self._upper_y)
def _valid_x(self, y):
"""Compute valid regions of x for y \in [0, 1], with overflow."""
assert 0<=y<=1
x_max = np.arccos(y)
if y+self.noise < 1:
x_min = np.arccos(y+self.noise)
else:
x_min = 0
# compute overflow
overflow = 0
if y < self.noise:
overflow = np.arccos(y-self.noise) - np.pi / 2
return x_max - x_min, overflow
def _lower_y(self, x):
if np.cos(x) < 0:
return np.cos(x)
else:
return np.cos(x) - self.noise
def _upper_y(self, x):
if np.cos(x) < 0:
return np.cos(x) + self.noise
else:
return np.cos(x)
def _sanity_test(self):
# Marginal of x integrates to one.
print quad(lambda x: np.exp(self.logpdf_x(x)), self.D[0], self.D[1])
# Marginal of y integrates to one.
print quad(lambda y: np.exp(self.logpdf_y(y)), -1 ,1)
# Joint of x,y integrates to one; quadrature will fail for small noise.
print dblquad(
lambda y,x: np.exp(self.logpdf_xy(x,y)), self.D[0], self.D[1],
lambda x: -1, lambda x: 1)
| {
"content_hash": "1405451304c8fc3a67876978efa38d91",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 80,
"avg_line_length": 33.394557823129254,
"alnum_prop": 0.5569362395599918,
"repo_name": "probcomp/cgpm",
"id": "5033419afcc54411f408893ba2906478ac920519",
"size": "5539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/uncorrelated/sin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "797705"
},
{
"name": "Shell",
"bytes": "2759"
}
],
"symlink_target": ""
} |
def sieve():
pass
| {
"content_hash": "243fe72566cd141a777c29c3f1654fe0",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 12,
"avg_line_length": 11,
"alnum_prop": 0.5454545454545454,
"repo_name": "rootulp/xpython",
"id": "dff3aafa8d77c50be9d1030ef03ef581b089c6aa",
"size": "22",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/sieve/sieve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "209553"
},
{
"name": "Shell",
"bytes": "640"
}
],
"symlink_target": ""
} |
from multithreadCrawler import *
from snapshotAnalyzer import *
crawlModeActivated = False
if __name__ == '__main__':
# url = raw_input('URL: ')
snapshotList = []
if crawlModeActivated:
url = 'http://www.aone-video.com/avi.htm'
urlList = getURLs(url)
snapshotList = getSnapshots(urlList,num_thread = 30)
print "Data Crawling is done! Analyzing now..."
else:
snapshotList = pickle.load(open("data.p","rb"))
snapshotList[0].openHTML()
keyword = str(raw_input('Enter search keyword: '))
listLength = len(snapshotList)
for i in range(listLength):
snapshot = snapshotList[i]
print '%s/%s. Analyzing %s'%(i,listLength,snapshot.getDate())
if snapshot.contain(keyword):
print 'OK'
elif snapshot.contain("Got an HTTP 302 response at crawl time"):
print 'Error 302! Continue'
else:
#Find new keyword
print 'Unable to find: '+keyword
print 'Suspect changes in content!'
if i>0:
snapshotList[i-1].compareHTML(snapshotList[i])
else:
snapshotList[0].openHTML()
newKeyword = str(raw_input('Enter new search keyword: '))
#enforce user to key in correct keyword
while (not snapshot.contain(newKeyword)) and (newKeyword!='-1'):
print 'Unable to find: '+newKeyword
newKeyword = str(raw_input('Enter new search keyword: '))
if newKeyword == '-1':
print 'Page Error! Skip current snapshot (on day %s) and continue comparing'%snapshot.getDate()
else:
print 'Search keyword is changed to: %s'%newKeyword
keyword = newKeyword
| {
"content_hash": "60c32e0fed663dfab6e6f93742b3abe4",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 111,
"avg_line_length": 35.31372549019608,
"alnum_prop": 0.575235980011105,
"repo_name": "trthanhquang/wayback-data-collector",
"id": "a8f3cc578bcdb7342153f752644d0a7dcec411de",
"size": "1801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "louis-html-analyzer/compareSnapshot_Sequential_Example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "177821"
}
],
"symlink_target": ""
} |
"""JSON utility functions."""
from __future__ import annotations
from collections import deque
import json
import logging
import os
import tempfile
from typing import Any, Callable
from homeassistant.core import Event, State
from homeassistant.exceptions import HomeAssistantError
_LOGGER = logging.getLogger(__name__)
class SerializationError(HomeAssistantError):
"""Error serializing the data to JSON."""
class WriteError(HomeAssistantError):
"""Error writing the data."""
def load_json(filename: str, default: list | dict | None = None) -> list | dict:
"""Load JSON data from a file and return as dict or list.
Defaults to returning empty dict if file is not found.
"""
try:
with open(filename, encoding="utf-8") as fdesc:
return json.loads(fdesc.read()) # type: ignore
except FileNotFoundError:
# This is not a fatal error
_LOGGER.debug("JSON file not found: %s", filename)
except ValueError as error:
_LOGGER.exception("Could not parse JSON content: %s", filename)
raise HomeAssistantError(error) from error
except OSError as error:
_LOGGER.exception("JSON file reading failed: %s", filename)
raise HomeAssistantError(error) from error
return {} if default is None else default
def save_json(
filename: str,
data: list | dict,
private: bool = False,
*,
encoder: type[json.JSONEncoder] | None = None,
) -> None:
"""Save JSON data to a file.
Returns True on success.
"""
try:
json_data = json.dumps(data, indent=4, cls=encoder)
except TypeError as error:
msg = f"Failed to serialize to JSON: {filename}. Bad data at {format_unserializable_data(find_paths_unserializable_data(data))}"
_LOGGER.error(msg)
raise SerializationError(msg) from error
tmp_filename = ""
tmp_path = os.path.split(filename)[0]
try:
# Modern versions of Python tempfile create this file with mode 0o600
with tempfile.NamedTemporaryFile(
mode="w", encoding="utf-8", dir=tmp_path, delete=False
) as fdesc:
fdesc.write(json_data)
tmp_filename = fdesc.name
if not private:
os.chmod(tmp_filename, 0o644)
os.replace(tmp_filename, filename)
except OSError as error:
_LOGGER.exception("Saving JSON file failed: %s", filename)
raise WriteError(error) from error
finally:
if os.path.exists(tmp_filename):
try:
os.remove(tmp_filename)
except OSError as err:
# If we are cleaning up then something else went wrong, so
# we should suppress likely follow-on errors in the cleanup
_LOGGER.error("JSON replacement cleanup failed: %s", err)
def format_unserializable_data(data: dict[str, Any]) -> str:
"""Format output of find_paths in a friendly way.
Format is comma separated: <path>=<value>(<type>)
"""
return ", ".join(f"{path}={value}({type(value)}" for path, value in data.items())
def find_paths_unserializable_data(
bad_data: Any, *, dump: Callable[[Any], str] = json.dumps
) -> dict[str, Any]:
"""Find the paths to unserializable data.
This method is slow! Only use for error handling.
"""
to_process = deque([(bad_data, "$")])
invalid = {}
while to_process:
obj, obj_path = to_process.popleft()
try:
dump(obj)
continue
except (ValueError, TypeError):
pass
# We convert objects with as_dict to their dict values so we can find bad data inside it
if hasattr(obj, "as_dict"):
desc = obj.__class__.__name__
if isinstance(obj, State):
desc += f": {obj.entity_id}"
elif isinstance(obj, Event):
desc += f": {obj.event_type}"
obj_path += f"({desc})"
obj = obj.as_dict()
if isinstance(obj, dict):
for key, value in obj.items():
try:
# Is key valid?
dump({key: None})
except TypeError:
invalid[f"{obj_path}<key: {key}>"] = key
else:
# Process value
to_process.append((value, f"{obj_path}.{key}"))
elif isinstance(obj, list):
for idx, value in enumerate(obj):
to_process.append((value, f"{obj_path}[{idx}]"))
else:
invalid[obj_path] = obj
return invalid
| {
"content_hash": "0328c5d1ed0c307a277a796c7ada0b2d",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 136,
"avg_line_length": 32.23943661971831,
"alnum_prop": 0.5941459152468327,
"repo_name": "adrienbrault/home-assistant",
"id": "fac008d9f0fd2c8182889eca20a9529bc01e469f",
"size": "4578",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/util/json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
EnsureSConsVersion(1,2)
import os
import inspect
import platform
def get_cuda_paths():
"""Determines CUDA {bin,lib,include} paths
returns (bin_path,lib_path,inc_path)
"""
# determine defaults
if os.name == 'nt':
bin_path = 'C:/CUDA/bin'
lib_path = 'C:/CUDA/lib'
inc_path = 'C:/CUDA/include'
elif os.name == 'posix':
bin_path = '/usr/local/cuda/bin'
lib_path = '/usr/local/cuda/lib'
inc_path = '/usr/local/cuda/include'
else:
raise ValueError, 'Error: unknown OS. Where is nvcc installed?'
if platform.platform()[:6] != 'Darwin' and \
platform.machine()[-2:] == '64':
lib_path += '64'
# override with environement variables
if 'CUDA_BIN_PATH' in os.environ:
bin_path = os.path.abspath(os.environ['CUDA_BIN_PATH'])
if 'CUDA_LIB_PATH' in os.environ:
lib_path = os.path.abspath(os.environ['CUDA_LIB_PATH'])
if 'CUDA_INC_PATH' in os.environ:
inc_path = os.path.abspath(os.environ['CUDA_INC_PATH'])
return (bin_path,lib_path,inc_path)
def getTools():
result = []
if os.name == 'nt':
result = ['default', 'msvc']
elif os.name == 'posix':
result = ['default', 'gcc']
else:
result = ['default']
return result;
OldEnvironment = Environment;
# this dictionary maps the name of a compiler program to a dictionary mapping the name of
# a compiler switch of interest to the specific switch implementing the feature
gCompilerOptions = {
'gcc' : {'warn_all' : '-Wall', 'warn_errors' : '-Werror', 'optimization' : '-O2', 'debug' : '-g', 'exception_handling' : '', 'omp' : '-fopenmp'},
'g++' : {'warn_all' : '-Wall', 'warn_errors' : '-Werror', 'optimization' : '-O2', 'debug' : '-g', 'exception_handling' : '', 'omp' : '-fopenmp'},
'cl' : {'warn_all' : '/Wall', 'warn_errors' : '/WX', 'optimization' : '/Ox', 'debug' : ['/Zi', '-D_DEBUG', '/MTd'], 'exception_handling' : '/EHsc', 'omp' : '/openmp'}
}
# this dictionary maps the name of a linker program to a dictionary mapping the name of
# a linker switch of interest to the specific switch implementing the feature
gLinkerOptions = {
'gcc' : {'debug' : ''},
'g++' : {'debug' : ''},
'link' : {'debug' : '/debug' }
}
def getCFLAGS(mode, warn, warnings_as_errors, CC):
result = []
if mode == 'release':
# turn on optimization
result.append(gCompilerOptions[CC]['optimization'])
elif mode == 'debug':
# turn on debug mode
result.append(gCompilerOptions[CC]['debug'])
result.append('-DTHRUST_DEBUG')
if warn:
# turn on all warnings
result.append(gCompilerOptions[CC]['warn_all'])
if warnings_as_errors:
# treat warnings as errors
result.append(gCompilerOptions[CC]['warn_errors'])
# avoid problems specific to windows
if CC == 'cl':
# avoid min/max problems due to windows.h
result.append('/DNOMINMAX')
# suppress warnings due to "decorated name length exceeded"
result.append('/wd4503')
return result
def getCXXFLAGS(mode, warn, warnings_as_errors, CXX):
result = []
if mode == 'release':
# turn on optimization
result.append(gCompilerOptions[CXX]['optimization'])
elif mode == 'debug':
# turn on debug mode
result.append(gCompilerOptions[CXX]['debug'])
# enable exception handling
result.append(gCompilerOptions[CXX]['exception_handling'])
if warn:
# turn on all warnings
result.append(gCompilerOptions[CXX]['warn_all'])
if warnings_as_errors:
# treat warnings as errors
result.append(gCompilerOptions[CXX]['warn_errors'])
return result
def getNVCCFLAGS(mode, arch):
result = ['-arch=' + arch]
if platform.platform()[:6] == 'Darwin':
if platform.machine()[-2:] == '64':
result.append('-m64')
else:
result.append('-m32')
if mode == 'debug':
# turn on debug mode
# XXX make this work when we've debugged nvcc -G
#result.append('-G')
pass
return result
def getLINKFLAGS(mode, LINK):
result = []
if mode == 'debug':
# turn on debug mode
result.append(gLinkerOptions[LINK]['debug'])
return result
def Environment(*args, **keywords):
# allow the user discretion to choose the MSVC version
vars = Variables()
if os.name == 'nt':
vars.Add(EnumVariable('MSVC_VERSION', 'MS Visual C++ version', None, allowed_values=('8.0', '9.0', '10.0')))
# add a variable to handle RELEASE/DEBUG mode
vars.Add(EnumVariable('mode', 'Release versus debug mode', 'release',
allowed_values = ('release', 'debug')))
# add a variable to handle compute capability
vars.Add(EnumVariable('arch', 'Compute capability code generation', None,
allowed_values = ('sm_10', 'sm_11', 'sm_12', 'sm_13', 'sm_20', 'sm_21', 'sm_30','sm_32','sm_35','sm_37','sm_50','sm_52','sm_53')))
# add a variable to handle warnings
if os.name == 'posix':
vars.Add(BoolVariable('Wall', 'Enable all compilation warnings', 1))
else:
vars.Add(BoolVariable('Wall', 'Enable all compilation warnings', 0))
# add a variable to treat warnings as errors
vars.Add(BoolVariable('Werror', 'Treat warnings as errors', 0))
# create an Environment
env = OldEnvironment(*args, tools = getTools(), variables = vars, **keywords)
# get the absolute path to the directory containing
# this source file
thisFile = inspect.getabsfile(Environment)
thisDir = os.path.dirname(thisFile)
# enable nvcc
env.Tool('nvcc', toolpath = [os.path.join(thisDir)])
# get C compiler switches
env.Append(CFLAGS = getCFLAGS(env['mode'], env['Wall'], env['Werror'], env.subst('$CC')))
# get CXX compiler switches
env.Append(CXXFLAGS = getCXXFLAGS(env['mode'], env['Wall'], env['Werror'], env.subst('$CXX')))
# get NVCC compiler switches
if 'arch' in env:
env.Append(NVCCFLAGS = getNVCCFLAGS(env['mode'], env['arch']))
# get linker switches
env.Append(LINKFLAGS = getLINKFLAGS(env['mode'], env.subst('$LINK')))
# get CUDA paths
(cuda_exe_path,cuda_lib_path,cuda_inc_path) = get_cuda_paths()
env.Append(LIBPATH = [cuda_lib_path])
env.Append(CPPPATH = [cuda_inc_path])
# link against the standard library
# we don't have to do this on Windows
if os.name == 'posix':
env.Append(LIBS = ['stdc++'])
# link against backend-specific runtimes
# XXX we shouldn't have to link against cudart unless we're using the
# cuda runtime, but cudafe inserts some dependencies when compiling .cu files
# XXX ideally this gets handled in nvcc.py if possible
env.Append(LIBS = ['cudart'])
# import the LD_LIBRARY_PATH so we can run commands which depend
# on shared libraries
# XXX we should probably just copy the entire environment
if os.name == 'posix':
for k in os.environ:
if k in ('DYLD_LIBRARY_PATH', 'LD_LIBRARY_PATH') or k.startswith('NV') or k.startswith('CUDA'):
env['ENV'][k] = os.environ[k]
# generate help text
Help(vars.GenerateHelpText(env))
return env
| {
"content_hash": "146eb32af9682a7f31be02f7f3d5c7a5",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 175,
"avg_line_length": 31.066964285714285,
"alnum_prop": 0.64290846385975,
"repo_name": "brendanlong/cuda-scons",
"id": "0dd19a1e5342d86145bf9474bbea29ce70df2872",
"size": "6959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build-env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cuda",
"bytes": "203"
},
{
"name": "Python",
"bytes": "11632"
}
],
"symlink_target": ""
} |
from django.forms import MultiWidget, HiddenInput
from versatileimagefield.widgets import (ClearableFileInputWithImagePreview,
SizedImageCenterpointWidgetMixIn)
class ImagePreviewFileInput(ClearableFileInputWithImagePreview):
template_with_initial_and_imagepreview = """
<div class="sizedimage-mod preview">
<div class="image-wrap outer">
<div class="point-stage" id="%(point_stage_id)s"
data-image_preview_id="%(image_preview_id)s">
<div class="ppoi-point" id="%(ppoi_id)s"></div>
</div>
<div class="image-wrap inner">
%(image_preview)s
</div>
</div>
</div>"""
class ImagePreviewWidget(SizedImageCenterpointWidgetMixIn, MultiWidget):
def __init__(self, attrs=None):
widgets = (ImagePreviewFileInput(attrs={'class': 'file-chooser'}),
HiddenInput(attrs={'class': 'ppoi-input'}))
super(ImagePreviewWidget, self).__init__(widgets, attrs) | {
"content_hash": "530d043d1770732e0fb5cd0d6c178f2c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 76,
"avg_line_length": 40.15384615384615,
"alnum_prop": 0.6120689655172413,
"repo_name": "taedori81/saleor",
"id": "fc462ace16d1ec3b73cad164e08610cb0fa9a694",
"size": "1044",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "saleor/dashboard/product/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "26259"
},
{
"name": "HTML",
"bytes": "182756"
},
{
"name": "JavaScript",
"bytes": "18075"
},
{
"name": "Python",
"bytes": "274882"
},
{
"name": "Shell",
"bytes": "612"
}
],
"symlink_target": ""
} |
import os
import tempfile
from wsgiref.util import FileWrapper
from celery.task import task
from celery.utils.log import get_task_logger
from django.conf import settings
import zipfile
from corehq.apps.app_manager.models import get_app
from corehq.apps.hqmedia.cache import BulkMultimediaStatusCache
from corehq.apps.hqmedia.models import CommCareMultimedia
from soil import DownloadBase
from django.utils.translation import ugettext as _
from soil.util import expose_file_download, expose_cached_download
logging = get_task_logger(__name__)
MULTIMEDIA_EXTENSIONS = ('.mp3', '.wav', '.jpg', '.png', '.gif', '.3gp', '.mp4', '.zip', )
@task
def process_bulk_upload_zip(processing_id, domain, app_id, username=None, share_media=False,
license_name=None, author=None, attribution_notes=None):
"""
Responsible for processing the uploaded zip from Bulk Upload.
"""
status = BulkMultimediaStatusCache.get(processing_id)
if not status:
# no download data available, abort
return
app = get_app(domain, app_id)
status.in_celery = True
status.save()
uploaded_zip = status.get_upload_zip()
if not uploaded_zip:
return
zipped_files = uploaded_zip.namelist()
status.total_files = len(zipped_files)
checked_paths = []
try:
for index, path in enumerate(zipped_files):
status.update_progress(len(checked_paths))
checked_paths.append(path)
file_name = os.path.basename(path)
try:
data = uploaded_zip.read(path)
except Exception as e:
status.add_unmatched_path(path, _("Error reading file: %s" % e))
continue
media_class = CommCareMultimedia.get_class_by_data(data, filename=path)
if not media_class:
status.add_skipped_path(path, CommCareMultimedia.get_mime_type(data))
continue
app_paths = list(app.get_all_paths_of_type(media_class.__name__))
app_paths_lower = [p.lower() for p in app_paths]
form_path = media_class.get_form_path(path, lowercase=True)
if not form_path in app_paths_lower:
status.add_unmatched_path(path,
_("Did not match any %s paths in application." % media_class.get_nice_name()))
continue
index_of_path = app_paths_lower.index(form_path)
form_path = app_paths[index_of_path] # this is the correct capitalization as specified in the form
multimedia = media_class.get_by_data(data)
if not multimedia:
status.add_unmatched_path(path,
_("Matching path found, but could not save the data to couch."))
continue
is_new = not form_path in app.multimedia_map.keys()
is_updated = multimedia.attach_data(data,
original_filename=file_name,
username=username)
if not is_updated and not getattr(multimedia, '_id'):
status.add_unmatched_path(form_path,
_("Matching path found, but didn't save new multimedia correctly."))
continue
if is_updated or is_new:
multimedia.add_domain(domain, owner=True)
if share_media:
multimedia.update_or_add_license(domain, type=license_name, author=author,
attribution_notes=attribution_notes)
app.create_mapping(multimedia, form_path)
media_info = multimedia.get_media_info(form_path, is_updated=is_updated, original_path=path)
status.add_matched_path(media_class, media_info)
status.update_progress(len(checked_paths))
except Exception as e:
status.mark_with_error(_("Error while processing zip: %s" % e))
uploaded_zip.close()
status.complete = True
status.save()
@task
def build_application_zip(include_multimedia_files, include_index_files,
app, download_id, compress_zip=False, filename="commcare.zip"):
from corehq.apps.hqmedia.views import iter_app_files
DownloadBase.set_progress(build_application_zip, 0, 100)
errors = []
compression = zipfile.ZIP_DEFLATED if compress_zip else zipfile.ZIP_STORED
use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
if use_transfer:
fpath = os.path.join(settings.SHARED_DRIVE_CONF.transfer_dir, "{}{}{}{}".format(
app._id,
'mm' if include_multimedia_files else '',
'ccz' if include_index_files else '',
app.version,
))
else:
_, fpath = tempfile.mkstemp()
if not (os.path.isfile(fpath) and use_transfer): # Don't rebuild the file if it is already there
files, errors = iter_app_files(app, include_multimedia_files, include_index_files)
with open(fpath, 'wb') as tmp:
with zipfile.ZipFile(tmp, "w") as z:
for path, data in files:
# don't compress multimedia files
extension = os.path.splitext(path)[1]
file_compression = zipfile.ZIP_STORED if extension in MULTIMEDIA_EXTENSIONS else compression
z.writestr(path, data, file_compression)
common_kwargs = dict(
mimetype='application/zip' if compress_zip else 'application/x-zip-compressed',
content_disposition='attachment; filename="{fname}"'.format(fname=filename),
download_id=download_id,
)
if use_transfer:
expose_file_download(
fpath,
use_transfer=use_transfer,
**common_kwargs
)
else:
expose_cached_download(
FileWrapper(open(fpath)),
expiry=(1 * 60 * 60),
**common_kwargs
)
DownloadBase.set_progress(build_application_zip, 100, 100)
return {
"errors": errors,
}
| {
"content_hash": "33c79b6b77809009691a34ccb085b8dd",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 120,
"avg_line_length": 38.75471698113208,
"alnum_prop": 0.5949367088607594,
"repo_name": "puttarajubr/commcare-hq",
"id": "92cca4f06c84e7442427ded6a311bb2679c5907b",
"size": "6162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/hqmedia/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
"""
This script is used initialize the sample project with user-specific values
"""
# Imports ######################################################################
from __future__ import print_function
import os
import re
import sys
import time
import shutil
# Metadata #####################################################################
__author__ = "Timothy McFadden"
__creationDate__ = "11/25/2014"
__license__ = "MIT"
__version__ = "0.01"
# Globals ######################################################################
DEBUG = False
def get_user_data():
project_name = user_input("Enter the name of your new project: ")
fullname = user_input("Enter your full name (e.g. Firstname Lastname): ")
github_username = user_input("Enter your github username: ")
github_project = user_input("Enter the name of your github project", default=project_name.replace(" ", "_"))
travis_username = user_input("Enter your Travic-CI username", default=github_username)
if all([project_name, fullname, github_username, github_project, travis_username]):
return (
project_name, fullname, github_username, github_project,
travis_username)
else:
raise Exception("Cannot leave a field empty!")
def user_input(prompt, input_type=str, default=None, show_default=True, regex=None, choices=None):
'''Continues to prompt a user for valid input until valid input has been received.'''
if input_type not in [str, int, float, long]:
raise TypeError("Only [str, int, float, long] types are supported")
elif regex and not hasattr(regex, 'match'):
raise TypeError("regex should be a compiled re object; it was a '{0:s}'".format(type(regex)))
while True:
if default and show_default:
_prompt = '{0:s} [{1:s}]: '.format(prompt, str(default))
else:
_prompt = str(prompt)
retval = raw_input(_prompt)
if default and len(retval) == 0:
retval = default
if regex and (regex.match(retval) is None):
print("[{0:s}] doesn't match [{1:s}]".format(retval, regex.pattern))
continue
if input_type is int:
retval = int(retval)
elif input_type is float:
retval = float(retval)
elif input_type is long:
retval = long(retval)
if choices and (retval not in choices):
info = "[{0:s}] is not a valid choice".format(str(retval))
print(info)
continue
return retval
def replace_in_file(filepath, replace_list):
"""Takes a list of replacements, applies it to the text of the file, and
writes the result back over the file.
"""
with open(filepath, 'rb') as fh:
text = fh.read()
for replace, _with in replace_list:
text = re.sub(replace, _with, text)
if DEBUG:
print(text)
else:
with open(filepath, 'wb') as fh:
fh.write(text)
def get_name_patch(path):
"""An OS-agnostic way of matching the patch paths with the os path."""
if os.path.isfile(path):
for key, function in patches["files"].items():
if key in path:
return function
return None
def process(user_data, root_directory):
for root, dirs, files in os.walk(root_directory, topdown=False):
for name in files:
patch = get_name_patch(os.path.join(root, name))
if patch:
patch(user_data, os.path.join(root, name))
for name in dirs:
if name in patches["dirs"]:
patches["dirs"][name](user_data, os.path.join(root, name))
def license(user_data, filepath):
replace_list = [
("CURRENTYEAR", user_data["current_year"]),
("FULLNAME", user_data["fullname"])
]
replace_in_file(filepath, replace_list)
def proj_dir(user_data, dirpath):
newpath = re.sub("SAMPLEPROJDIRNAME", user_data["project_dir_name"], dirpath)
if DEBUG:
print(newpath)
else:
shutil.move(dirpath, newpath)
def readme(user_data, filepath):
replace_list = [
("TRAVISUSERNAME", user_data["travis_username"]),
("SAMPLEPROJDIRNAME", user_data["project_dir_name"])
]
replace_in_file(filepath, replace_list)
def auto_generate(user_data, filepath):
replace_list = [
("TRAVISUSERNAME", user_data["travis_username"]),
("SAMPLEPROJDIRNAME", user_data["project_dir_name"])
]
replace_in_file(filepath, replace_list)
def conf(user_data, filepath):
replace_list = [
('"SAMPLEPROJ"', '"%s"' % user_data["project_name"]),
("SAMPLEPROJDIRNAME", user_data["project_dir_name"]),
("FULLNAME", user_data["fullname"]),
("CURRENTYEAR", user_data["current_year"]),
]
replace_in_file(filepath, replace_list)
def index_rst(user_data, filepath):
newhead = "Documentation for %s" % user_data["project_name"]
newtag = "=" * len(newhead)
replace_list = [
("SAMPLEPROJ", user_data["project_name"]),
("============================", newtag)
]
replace_in_file(filepath, replace_list)
def auto_rst(user_data, filepath):
replace_list = [
("SAMPLEPROJ", user_data["project_name"]),
("SAMPLEPROJDIRNAME", user_data["project_dir_name"]),
("GHUSERNAME", user_data["github_username"]),
("GHPROJNAME", user_data["github_project_name"]),
]
replace_in_file(filepath, replace_list)
def create_release(user_data, filepath):
replace_list = [
("SAMPLEPROJ", user_data["project_name"]),
("SAMPLEPROJDIRNAME", user_data["project_dir_name"]),
]
replace_in_file(filepath, replace_list)
def logger(user_data, filepath):
replace_list = [
("FULLNAME", user_data["fullname"]),
("CURRENTYEAR", user_data["current_year"]),
("99/99/9999", time.strftime("%m/%d/%Y", time.localtime())),
("SAMPLEPROJDIRNAME", user_data["project_dir_name"]),
]
replace_in_file(filepath, replace_list)
def init_py(user_data, filepath):
replace_list = [
("FULLNAME", user_data["fullname"]),
("CURRENTYEAR", user_data["current_year"]),
("99/99/9999", time.strftime("%m/%d/%Y", time.localtime())),
("SAMPLEPROJDIRNAME", user_data["project_dir_name"]),
("SAMPEPROJNAME", user_data["project_name"]),
]
replace_in_file(filepath, replace_list)
def manifest(user_data, filepath):
replace_list = [
("SAMPLEPROJDIRNAME", user_data["project_dir_name"]),
]
replace_in_file(filepath, replace_list)
def setup_py(user_data, filepath):
replace_list = [
("SAMPEPROJNAME", user_data["project_name"]),
("SAMPLEPROJDIRNAME", user_data["project_dir_name"]),
("GHUSERNAME", user_data["github_username"]),
("GHPROJNAME", user_data["github_project_name"]),
("FULLNAME", user_data["fullname"]),
]
replace_in_file(filepath, replace_list)
def test_sample(user_data, filepath):
init_py(user_data, filepath)
newpath = re.sub("test_SAMPLEPROJDIRNAME", "test_%s" % user_data["project_dir_name"], filepath)
if DEBUG:
print(newpath)
else:
os.rename(filepath, newpath)
patches = {
"files": {
"LICENSE.txt": license,
"README.md": readme,
os.path.join("src", "MANIFEST.in"): manifest,
os.path.join("src", "README.rst"): readme,
os.path.join("src", "setup.py"): setup_py,
os.path.join("docs", "auto-generate.py"): auto_generate,
os.path.join("docs", "conf.py"): conf,
os.path.join("docs", "index.rst"): index_rst,
os.path.join("rst", "auto.rst"): auto_rst,
os.path.join("rst", "intro.rst"): auto_rst,
os.path.join("scripts", "create-release.py"): create_release,
os.path.join("SAMPLEPROJDIRNAME", "logger.py"): logger,
os.path.join("SAMPLEPROJDIRNAME", "__init__.py"): init_py,
os.path.join("SAMPLEPROJDIRNAME", "__main__.py"): init_py,
os.path.join("SAMPLEPROJDIRNAME", "tests", "test_SAMPLEPROJDIRNAME.py"): init_py,
os.path.join("test_SAMPLEPROJDIRNAME.py"): test_sample,
},
"dirs": {
"SAMPLEPROJDIRNAME": proj_dir,
},
}
################################################################################
if __name__ == '__main__':
print("**** WARNING: THIS PROCESS IS NOT REVERSIBLE ****")
confirm = user_input("Enter Y to continue: ")
if confirm.upper() != "Y":
sys.exit()
root_directory = os.path.join(os.path.dirname(__file__), "..")
project_name, fullname, github_username, github_project_name, travis_username = get_user_data()
current_year = time.strftime("%Y", time.localtime())
user_data = {
"project_name": project_name,
"project_dir_name": project_name.replace(" ", "_"),
"fullname": fullname,
"github_username": github_username,
"github_project_name": github_project_name,
"travis_username": travis_username,
"current_year": current_year
}
process(user_data, root_directory)
thisfile = os.path.abspath(__file__)
newname = "_already_used_" + os.path.basename(__file__)
os.rename(__file__, newname)
| {
"content_hash": "a1e7e7e92500f674af43e4e67bdf854f",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 112,
"avg_line_length": 32.71024734982332,
"alnum_prop": 0.584206546397321,
"repo_name": "mtik00/glowing-octo-dubstep",
"id": "7d7ad1ec7a5c28c4154d826f895ab0262de9ebe1",
"size": "9279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/initialize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19622"
}
],
"symlink_target": ""
} |
from app import app, models
from flask import render_template, request, abort, jsonify
from .forms import DataAddForm
from flask import make_response
import urllib.parse
DESC_TABLE = {
'id': False,
'word_id': False,
'word_string': False,
'fresh_rate': True,
'rank_good': True,
'rank_bad': True,
'viewed': True,
'vote': True,
'report': True
}
@app.route('/')
@app.route('/index')
@app.route('/home')
@app.route('/words')
@app.route('/words/<int:asdf>')
@app.route('/admin')
@app.route('/admin/<int:asdf>')
@app.route('/candidates')
@app.route('/candidates/<int:asdf>')
@app.route('/search')
@app.route('/about')
def index(**kwargs):
return make_response(open('app/templates/index.html').read())
@app.route('/add_data', methods=['GET', 'POST'])
def add_data():
form = DataAddForm()
if form.validate_on_submit():
models.open_save_file(form.filename.data)
# flash('filename = %s' % form.filename.data)
return render_template('add_file.html', title='add csv', form=form)
@app.route('/read_data', methods=['GET', 'POST'])
def read_data():
form = DataAddForm()
if form.validate_on_submit():
models.word_search(models.parse_string(form.filename.data), 0, 14, 'fresh_rate')
return render_template('add_file.html', title='read csv', form=form)
@app.route('/redis', methods=['GET', 'POST'])
def redis_add():
form = DataAddForm()
if form.validate_on_submit():
rrr = int(form.filename.data)
if rrr == -1:
models.elapse_time()
elif rrr == -2:
models.update_fresh_rate()
return render_template('add_file.html', title='add redis', form=form)
@app.route('/api/result', methods=['GET'])
def result_json():
word_regex = urllib.parse.unquote(request.args.get('word'))
page_num = request.args.get('page')
page_num = 1 if page_num is None else int(page_num)
column_name = request.args.get('sort')
column_name = 'word_string' if column_name is None else column_name
return models.get_search_json(word_regex, page_num, 15,
column_name, DESC_TABLE[column_name])
@app.route('/api/admin', methods=['GET'])
def admin_json():
page_num = request.args.get('page')
page_num = 1 if page_num is None else int(page_num)
recent = request.args.get('recent')
recent = 0 if recent is None else recent
return models.get_admin_json(page_num, 15, int(recent))
@app.route('/api/search', methods=['POST'])
def search_json():
if not request.json or 'word' not in request.json:
abort(400)
word_regex = models.parse_to_regex(request.json['word'])
page_num = 1 if 'page' not in request.json else request.json['page']
column_name = 'word_string' if 'sort' not in request.json else request.json['sort']
fetch_num = 15 if 'maxshow' not in request.json else request.json['maxshow']
return models.get_search_json(word_regex, page_num, fetch_num,
column_name, DESC_TABLE[column_name])
@app.route('/api/word', methods=['GET'])
def word_json():
word_id = int(request.args.get('id'))
cand_json = models.get_cand_word_json(word_id)
if cand_json is None:
models.word_view(word_id)
return models.get_word_json(word_id, 15)
else:
return cand_json
@app.route('/api/candidate', methods=['GET'])
def candidate_json():
page_num = request.args.get('page')
page_num = 1 if page_num is None else int(page_num)
column_name = request.args.get('sort')
column_name = 'word_string' if column_name is None else column_name
return models.get_candidate_json(page_num, 15,
column_name, DESC_TABLE[column_name])
@app.route('/api/candidate_word', methods=['GET'])
def candidate_word_json():
word_id = int(request.args.get('id'))
return models.get_cand_word_json(word_id)
@app.route('/api/update', methods=['POST'])
def update_json():
if not request.json or 'call_func' not in request.json:
abort(400)
call_func = request.json['call_func']
obj = None if 'obj' not in request.json else request.json['obj']
if call_func == 'word_candidate_insert':
models.word_candidate_insert(obj[0])
elif call_func == 'word_candidate_upvote':
models.word_candidate_upvote(int(obj[0]))
elif call_func == 'word_candidate_downvote':
models.word_candidate_downvote(int(obj[0]))
elif call_func == 'word_search_insert':
models.word_search_insert(obj[0])
elif call_func == 'report':
models.report(int(obj[0]),
int(obj[1]),
obj[2])
elif call_func == 'candidate_report':
models.candidate_report(int(obj[0]),
int(obj[1]),
obj[2])
elif call_func == 'word_report':
models.word_report(int(obj[0]),
int(obj[1]),
obj[2])
elif call_func == 'word_delete':
models.word_delete(int(obj[0]))
elif call_func == 'word_upvote':
models.word_upvote(int(obj[0]))
elif call_func == 'word_downvote':
models.word_downvote(int(obj[0]))
elif call_func == 'tag_insert':
models.tag_list_insert(int(obj[0]), obj)
elif call_func == 'tag_upvote':
models.tag_upvote(int(obj[0]),
int(obj[1]))
elif call_func == 'tag_downvote':
models.tag_downvote(int(obj[0]),
int(obj[1]))
elif call_func == 'update_fresh_rate':
models.update_fresh_rate()
elif call_func == 'elapse_time':
models.elapse_time()
return jsonify({'ok': 'ok'})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
| {
"content_hash": "27717e0e33feb498328dab4f8f6963d0",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 88,
"avg_line_length": 33.73988439306358,
"alnum_prop": 0.603392153503512,
"repo_name": "wonook/LimeDictionary",
"id": "42e61ca7318523eaa5981181ccea030b1d7097e5",
"size": "5837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27212"
},
{
"name": "HTML",
"bytes": "12787"
},
{
"name": "JavaScript",
"bytes": "91333"
},
{
"name": "Python",
"bytes": "29069"
}
],
"symlink_target": ""
} |
import copy
import random
import time
from oslo.config import cfg
import six
from cinder.openstack.common._i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
def list_opts():
"""Entry point for oslo.config-generator."""
return [(None, copy.deepcopy(periodic_opts))]
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on the default
interval of 60 seconds.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parenthesis.
#
# In the 'with-parenthesis' case (with kwargs present), this function needs
# to return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parenthesis' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
continue
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
continue
# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
def _nearest_boundary(last_run, spacing):
"""Find nearest boundary which is in the past, which is a multiple of the
spacing with the last run as an offset.
Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
31, 38...
0% to 5% of the spacing value will be added to this value to ensure tasks
do not synchronize. This jitter is rounded to the nearest second, this
means that spacings smaller than 20 seconds will not have jitter.
"""
current_time = time.time()
if last_run is None:
return current_time
delta = current_time - last_run
offset = delta % spacing
# Add up to 5% jitter
jitter = int(spacing * (random.random() / 20))
return current_time - offset + jitter
@six.add_metaclass(_PeriodicTasksMeta)
class PeriodicTasks(object):
def __init__(self):
super(PeriodicTasks, self).__init__()
self._periodic_last_run = {}
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]
# Check if due, if not skip
idle_for = min(idle_for, spacing)
if last_run is not None:
delta = last_run + spacing - time.time()
if delta > 0:
idle_for = min(idle_for, delta)
continue
LOG.debug("Running periodic task %(full_task_name)s",
{"full_task_name": full_task_name})
self._periodic_last_run[task_name] = _nearest_boundary(
last_run, spacing)
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
{"full_task_name": full_task_name, "e": e})
time.sleep(0)
return idle_for
| {
"content_hash": "7de546e96d72af2d643f2ddc496728ad",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 79,
"avg_line_length": 35.984924623115575,
"alnum_prop": 0.5839966485127775,
"repo_name": "blueboxgroup/cinder",
"id": "f05bca10bdc6ca4a5f43840ec9746471aaff9566",
"size": "7736",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/openstack/common/periodic_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10024269"
},
{
"name": "Shell",
"bytes": "9905"
}
],
"symlink_target": ""
} |
"""Members service."""
from .config import MemberServiceConfig
from .service import MemberService
__all__ = (
"MemberService",
"MemberServiceConfig",
)
| {
"content_hash": "dc52d6cc368d0166a211d192114185d1",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 39,
"avg_line_length": 16.3,
"alnum_prop": 0.6993865030674846,
"repo_name": "inveniosoftware/invenio-communities",
"id": "c0852bf2738a16fa1511218b0b8e2f4e0b4e1247",
"size": "419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_communities/members/services/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21753"
},
{
"name": "JavaScript",
"bytes": "226672"
},
{
"name": "Python",
"bytes": "361750"
},
{
"name": "Shell",
"bytes": "1881"
}
],
"symlink_target": ""
} |
"""Return the short version string."""
from mpfmonitor._version import __short_version__
print("{}.x".format(__short_version__))
| {
"content_hash": "77f7d5b4ced5f13a1dd647a7631d0398",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 49,
"avg_line_length": 43,
"alnum_prop": 0.6976744186046512,
"repo_name": "missionpinball/mpf-monitor",
"id": "2d3833d491afed96c8820ea00e918fba793c81c0",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "get_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106758"
}
],
"symlink_target": ""
} |
"""Module containing the core callback.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from soc.tasks import grading_survey_group as grading_group_tasks
from soc.tasks import surveys as survey_tasks
from soc.tasks.updates import start_update
from soc.views.models import club
from soc.views.models import club_admin
from soc.views.models import club_member
from soc.views.models import cron
from soc.views.models import document
from soc.views.models import host
from soc.views.models import job
from soc.views.models import mentor
from soc.views.models import notification
from soc.views.models import organization
from soc.views.models import org_admin
from soc.views.models import priority_group
from soc.views.models import program
from soc.views.models import request
from soc.views.models import site
from soc.views.models import sponsor
from soc.views.models import student
from soc.views.models import student_project
from soc.views.models import survey
from soc.views.models import timeline
from soc.views.models import user
from soc.views.models import user_self
class Callback(object):
"""Callback object that handles interaction between the core.
"""
API_VERSION = 1
def __init__(self, core):
"""Initializes a new Callback object for the specified core.
"""
self.core = core
# disable clubs
self.enable_clubs = False
def registerWithSitemap(self):
"""Called by the server when sitemap entries should be registered.
"""
self.core.requireUniqueService('registerWithSitemap')
if self.enable_clubs:
self.core.registerSitemapEntry(club.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(club_admin.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(club_member.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(cron.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(document.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(host.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(job.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(mentor.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(notification.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(organization.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(org_admin.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(priority_group.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(program.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(request.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(site.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(sponsor.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(student.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(student_project.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(survey.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(timeline.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(user_self.view.getDjangoURLPatterns())
self.core.registerSitemapEntry(user.view.getDjangoURLPatterns())
# register task URL's
self.core.registerSitemapEntry(grading_group_tasks.getDjangoURLPatterns())
self.core.registerSitemapEntry(start_update.getDjangoURLPatterns())
self.core.registerSitemapEntry(survey_tasks.getDjangoURLPatterns())
def registerWithSidebar(self):
"""Called by the server when sidebar entries should be registered.
"""
self.core.requireUniqueService('registerWithSidebar')
if self.enable_clubs:
self.core.registerSidebarEntry(club.view.getSidebarMenus)
self.core.registerSidebarEntry(club.view.getExtraMenus)
self.core.registerSidebarEntry(club_admin.view.getSidebarMenus)
self.core.registerSidebarEntry(club_member.view.getSidebarMenus)
self.core.registerSidebarEntry(host.view.getSidebarMenus)
self.core.registerSidebarEntry(job.view.getSidebarMenus)
self.core.registerSidebarEntry(priority_group.view.getSidebarMenus)
self.core.registerSidebarEntry(request.view.getSidebarMenus)
self.core.registerSidebarEntry(site.view.getSidebarMenus)
self.core.registerSidebarEntry(sponsor.view.getExtraMenus)
self.core.registerSidebarEntry(sponsor.view.getSidebarMenus)
self.core.registerSidebarEntry(user_self.view.getSidebarMenus)
self.core.registerSidebarEntry(user.view.getSidebarMenus)
def registerRights(self):
"""Called by the server when the document rights should be registered.
"""
site_membership = {
'admin': [],
'restricted': ['host'],
'member': ['user'],
'list': ['host'],
}
club_membership = {
'admin': ['host', 'club_admin'],
'restricted': ['host', 'club_admin'],
'member': ['host', 'club_admin', 'club_member'],
'list': ['host', 'club_admin', 'club_member'],
}
sponsor_membership = {
'admin': ['host'],
'restricted': ['host'],
'member': ['host'],
'list': ['host'],
}
program_membership = {
'admin': ['host'],
'restricted': ['host', 'org_admin'],
'member': ['host', 'org_admin', 'org_mentor', 'org_student'],
'list': ['host', 'org_admin', 'org_mentor'],
}
organization_membership = {
'admin': ['host', 'org_admin'],
'restricted': ['host', 'org_admin', 'org_mentor'],
'member': ['host', 'org_admin', 'org_mentor', 'org_student'],
'list': ['host', 'org_admin', 'org_mentor'],
}
user_membership = {
'admin': ['user_self'],
'restricted': ['user_self'], # ,'friends'
'member': ['user'],
'list': ['user_self'],
}
self.core.registerRight('site', site_membership)
self.core.registerRight('club', club_membership)
self.core.registerRight('sponsor', sponsor_membership)
self.core.registerRight('program', program_membership)
self.core.registerRight('org', organization_membership)
self.core.registerRight('user', user_membership)
| {
"content_hash": "14f7f6a2555895a83ad45536e520935e",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 38.43478260869565,
"alnum_prop": 0.7207498383968972,
"repo_name": "MatthewWilkes/mw4068-packaging",
"id": "5e148a24f468655a57020bd1ec7097ad79e2b32b",
"size": "6773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/melange/src/soc/modules/soc_core/callback.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "68827"
},
{
"name": "HTML",
"bytes": "586705"
},
{
"name": "JavaScript",
"bytes": "441502"
},
{
"name": "Python",
"bytes": "2136551"
},
{
"name": "Shell",
"bytes": "5667"
}
],
"symlink_target": ""
} |
import os
import sys
def convert_line_endings(file_path):
if '\r\n' in open(file_path, 'rb').read():
print '%s contains DOS line endings. Converting' % file_path
with open(file_path, 'rb') as infile:
text = infile.read()
text = text.replace('\r\n', '\n')
with open(file_path, 'wb') as outfile:
outfile.write(text)
def process_path(path, extension_list):
if os.path.isfile(path):
convert_line_endings(path)
else:
for root, dirs, files in os.walk(path):
# ignore hidden files and directories
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
for file_path in files:
file_ext = os.path.splitext(file_path)[1]
if file_ext in extension_list:
full_path = os.path.join(root, file_path)
convert_line_endings(full_path)
if __name__ == "__main__":
if len(sys.argv) > 1:
extensions = sys.argv[2:]
if not extensions:
extensions = ['.cs']
process_path(sys.argv[1], extensions)
else:
process_path('.', ['.cs'])
process_path('tests', ['.h', '.c', '.cpp', '.m', '.mm', '.py', '.sh', '.bat'])
| {
"content_hash": "76c9cb2f03caef87c14e892f6bd62716",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 86,
"avg_line_length": 34.91891891891892,
"alnum_prop": 0.5185758513931888,
"repo_name": "markfinal/BuildAMation",
"id": "771665ea0da09272f5fa49e4a99c2201460c4cc3",
"size": "1311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codingtools/convert_line_endings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "25495"
},
{
"name": "Batchfile",
"bytes": "6399"
},
{
"name": "C",
"bytes": "249476"
},
{
"name": "C#",
"bytes": "3318412"
},
{
"name": "C++",
"bytes": "35331"
},
{
"name": "Objective-C",
"bytes": "10923"
},
{
"name": "PowerShell",
"bytes": "737"
},
{
"name": "Python",
"bytes": "97729"
},
{
"name": "Shell",
"bytes": "5166"
}
],
"symlink_target": ""
} |
import unittest
import sys
try:
from django.conf import settings
settings.configure(DEBUG=True, TEMPLATE_DEBUG=True)
except ImportError, e:
pass
from hamlpy.template.loaders import get_haml_loader, TemplateDoesNotExist
class DummyLoader(object):
"""
A dummy template loader that only loads templates from self.templates
"""
templates = {
"in_dict.txt" : "in_dict content",
"loader_test.hamlpy" : "loader_test content",
}
def __init__(self, *args, **kwargs):
self.Loader = self.__class__
def load_template_source(self, template_name, *args, **kwargs):
try:
return (self.templates[template_name], "test:%s" % template_name)
except KeyError:
raise TemplateDoesNotExist(template_name)
class LoaderTest(unittest.TestCase):
"""
Tests for the django template loader.
A dummy template loader is used that loads only from a dictionary of templates.
"""
def setUp(self):
dummy_loader = DummyLoader()
hamlpy_loader_class = get_haml_loader(dummy_loader)
self.hamlpy_loader = hamlpy_loader_class()
def _test_assert_exception(self, template_name):
try:
self.hamlpy_loader.load_template_source(template_name)
except TemplateDoesNotExist:
self.assertTrue(True)
else:
self.assertTrue(False, '\'%s\' should not be loaded by the hamlpy tempalte loader.' % template_name)
def test_file_not_in_dict(self):
# not_in_dict.txt doesn't exit, so we're expecting an exception
self._test_assert_exception('not_in_dict.hamlpy')
def test_file_in_dict(self):
# in_dict.txt in in dict, but with an extension not supported by
# the loader, so we expect an exception
self._test_assert_exception('in_dict.txt')
def test_file_should_load(self):
# loader_test.hamlpy is in the dict, so it should load fine
try:
self.hamlpy_loader.load_template_source('loader_test.hamlpy')
except TemplateDoesNotExist:
self.assertTrue(False, '\'loader_test.hamlpy\' should be loaded by the hamlpy tempalte loader, but it was not.')
else:
self.assertTrue(True)
def test_file_different_extension(self):
# loader_test.hamlpy is in dict, but we're going to try
# to load loader_test.txt
# we expect an exception since the extension is not supported by
# the loader
self._test_assert_exception('loader_test.txt')
| {
"content_hash": "e03fcefbe46f19bc0aff35297ba3aa23",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 124,
"avg_line_length": 34.851351351351354,
"alnum_prop": 0.6393951143854207,
"repo_name": "GetHappie/HamlPy",
"id": "27643aaa68120a5480deaa406c2c1da58d31215a",
"size": "2579",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "hamlpy/test/loader_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8021"
},
{
"name": "Python",
"bytes": "78306"
},
{
"name": "Ruby",
"bytes": "88"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
} |
import SomethingNotExist
| {
"content_hash": "801c28e2b5fc631bbdbca38e26c0a85b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.92,
"repo_name": "huoxudong125/dlr",
"id": "64a248922117b9de4df45b6dfd79cdd3fbeea9d7",
"size": "44",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Src/Hosts/Silverlight/Tests/tests/regressions/fixtures/x_import_1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "4887"
},
{
"name": "Batchfile",
"bytes": "11220"
},
{
"name": "C",
"bytes": "1231"
},
{
"name": "C#",
"bytes": "7367932"
},
{
"name": "C++",
"bytes": "106677"
},
{
"name": "CSS",
"bytes": "8115"
},
{
"name": "HTML",
"bytes": "195477"
},
{
"name": "JavaScript",
"bytes": "99301"
},
{
"name": "Makefile",
"bytes": "449"
},
{
"name": "PowerShell",
"bytes": "2619"
},
{
"name": "Python",
"bytes": "282073"
},
{
"name": "Ruby",
"bytes": "84462"
},
{
"name": "Visual Basic",
"bytes": "9936"
}
],
"symlink_target": ""
} |
# For more information, see http://lilyx.net/pages/nltkjapanesecorpus.html
from __future__ import print_function
import sys
from nltk.corpus.reader import util
from nltk import compat
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
class ChasenCorpusReader(CorpusReader):
def __init__(self, root, fileids, encoding='utf8', sent_splitter=None):
self._sent_splitter = sent_splitter
CorpusReader.__init__(self, root, fileids, encoding)
def raw(self, fileids=None):
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def words(self, fileids=None):
return concat([ChasenCorpusView(fileid, enc,
False, False, False, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)])
def tagged_words(self, fileids=None):
return concat([ChasenCorpusView(fileid, enc,
True, False, False, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)])
def sents(self, fileids=None):
return concat([ChasenCorpusView(fileid, enc,
False, True, False, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)])
def tagged_sents(self, fileids=None):
return concat([ChasenCorpusView(fileid, enc,
True, True, False, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)])
def paras(self, fileids=None):
return concat([ChasenCorpusView(fileid, enc,
False, True, True, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)])
def tagged_paras(self, fileids=None):
return concat([ChasenCorpusView(fileid, enc,
True, True, True, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)])
class ChasenCorpusView(StreamBackedCorpusView):
"""
A specialized corpus view for ChasenReader. Similar to ``TaggedCorpusView``,
but this'll use fixed sets of word and sentence tokenizer.
"""
def __init__(self, corpus_file, encoding,
tagged, group_by_sent, group_by_para, sent_splitter=None):
self._tagged = tagged
self._group_by_sent = group_by_sent
self._group_by_para = group_by_para
self._sent_splitter = sent_splitter
StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
def read_block(self, stream):
"""Reads one paragraph at a time."""
block = []
for para_str in read_regexp_block(stream, r".", r"^EOS\n"):
para = []
sent = []
for line in para_str.splitlines():
_eos = line.strip() == 'EOS'
_cells = line.split('\t')
w = (_cells[0], '\t'.join(_cells[1:]))
if not _eos: sent.append(w)
if _eos or (self._sent_splitter and self._sent_splitter(w)):
if not self._tagged:
sent = [w for (w,t) in sent]
if self._group_by_sent:
para.append(sent)
else:
para.extend(sent)
sent = []
if len(sent)>0:
if not self._tagged:
sent = [w for (w,t) in sent]
if self._group_by_sent:
para.append(sent)
else:
para.extend(sent)
if self._group_by_para:
block.append(para)
else:
block.extend(para)
return block
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
jeita = LazyCorpusLoader(
'jeita', ChasenCorpusReader, r'.*chasen', encoding='utf-8')
print('/'.join( jeita.words()[22100:22140] ))
print('\nEOS\n'.join('\n'.join("%s/%s" % (w[0],w[1].split('\t')[2]) for w in sent)
for sent in jeita.tagged_sents()[2170:2173]))
def test():
from nltk.corpus.util import LazyCorpusLoader
jeita = LazyCorpusLoader(
'jeita', ChasenCorpusReader, r'.*chasen', encoding='utf-8')
assert isinstance(jeita.tagged_words()[0][1], compat.string_types)
if __name__ == '__main__':
demo()
test()
| {
"content_hash": "68ae794225194d79c4d0182ec4033689",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 86,
"avg_line_length": 35.208955223880594,
"alnum_prop": 0.536244171259008,
"repo_name": "enriquesanchezb/practica_utad_2016",
"id": "a34bee756387fb817b64ea14d6fd73a7943092f7",
"size": "4879",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/nltk/corpus/reader/chasen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5785"
},
{
"name": "HTML",
"bytes": "284450"
},
{
"name": "JavaScript",
"bytes": "20876"
},
{
"name": "Python",
"bytes": "6659896"
},
{
"name": "Shell",
"bytes": "3296"
}
],
"symlink_target": ""
} |
"""
Implentation of Brocade Quantum Plugin.
"""
from oslo.config import cfg
from quantum.agent import securitygroups_rpc as sg_rpc
from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from quantum.api.rpc.agentnotifiers import l3_rpc_agent_api
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.common import utils
from quantum.db import agents_db
from quantum.db import agentschedulers_db
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import l3_rpc_base
from quantum.db import securitygroups_rpc_base as sg_db_rpc
from quantum.extensions import portbindings
from quantum.extensions import securitygroup as ext_sg
from quantum.openstack.common import context
from quantum.openstack.common import importutils
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.openstack.common.rpc import proxy
from quantum.plugins.brocade.db import models as brocade_db
from quantum.plugins.brocade import vlanbm as vbm
from quantum import policy
from quantum import scheduler
LOG = logging.getLogger(__name__)
PLUGIN_VERSION = 0.88
AGENT_OWNER_PREFIX = "network:"
NOS_DRIVER = 'quantum.plugins.brocade.nos.nosdriver.NOSdriver'
SWITCH_OPTS = [cfg.StrOpt('address', default=''),
cfg.StrOpt('username', default=''),
cfg.StrOpt('password', default='', secret=True),
cfg.StrOpt('ostype', default='NOS')
]
PHYSICAL_INTERFACE_OPTS = [cfg.StrOpt('physical_interface', default='eth0')
]
cfg.CONF.register_opts(SWITCH_OPTS, "SWITCH")
cfg.CONF.register_opts(PHYSICAL_INTERFACE_OPTS, "PHYSICAL_INTERFACE")
cfg.CONF.register_opts(scheduler.AGENTS_SCHEDULER_OPTS)
class BridgeRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
"""Agent callback."""
RPC_API_VERSION = '1.1'
# Device names start with "tap"
# history
# 1.1 Support Security Group RPC
TAP_PREFIX_LEN = 3
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def get_port_from_device(cls, device):
"""Get port from the brocade specific db."""
# TODO(shh) context is not being passed as
# an argument to this function;
#
# need to be fixed in:
# file: quantum/db/securtygroups_rpc_base.py
# function: securitygroup_rules_for_devices()
# which needs to pass context to us
# Doing what other plugins are doing
session = db.get_session()
port = brocade_db.get_port_from_device(
session, device[cls.TAP_PREFIX_LEN:])
# TODO(shiv): need to extend the db model to include device owners
# make it appears that the device owner is of type network
if port:
port['device'] = device
port['device_owner'] = AGENT_OWNER_PREFIX
port['binding:vif_type'] = 'bridge'
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
locals())
port = brocade_db.get_port(rpc_context, device[self.TAP_PREFIX_LEN:])
if port:
entry = {'device': device,
'vlan_id': port.vlan_id,
'network_id': port.network_id,
'port_id': port.port_id,
'physical_network': port.physical_interface,
'admin_state_up': port.admin_state_up
}
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
device = kwargs.get('device')
port = self.get_port_from_device(device)
if port:
entry = {'device': device,
'exists': True}
# Set port status to DOWN
port_id = port['port_id']
brocade_db.update_port_state(rpc_context, port_id, False)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
'''Agent side of the linux bridge rpc API.
API version history:
1.0 - Initial version.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic = topic
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, physical_network, vlan_id):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
physical_network=physical_network,
vlan_id=vlan_id),
topic=self.topic_port_update)
class BrocadePluginV2(db_base_plugin_v2.QuantumDbPluginV2,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.AgentSchedulerDbMixin):
"""BrocadePluginV2 is a Quantum plugin.
Provides L2 Virtual Network functionality using VDX. Upper
layer driver class that interfaces to NETCONF layer below.
"""
def __init__(self):
"""Initialize Brocade Plugin, specify switch address
and db configuration.
"""
self.supported_extension_aliases = ["binding", "security-group",
"agent", "agent_scheduler"]
self.binding_view = "extension:port_binding:view"
self.binding_set = "extension:port_binding:set"
self.physical_interface = (cfg.CONF.PHYSICAL_INTERFACE.
physical_interface)
db.configure_db()
self.ctxt = context.get_admin_context()
self.ctxt.session = db.get_session()
self._vlan_bitmap = vbm.VlanBitmap(self.ctxt)
self._setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver)
self.brocade_init()
def brocade_init(self):
"""Brocade specific initialization."""
self._switch = {'address': cfg.CONF.SWITCH.address,
'username': cfg.CONF.SWITCH.username,
'password': cfg.CONF.SWITCH.password
}
self._driver = importutils.import_object(NOS_DRIVER)
def _setup_rpc(self):
# RPC support
self.topic = topics.PLUGIN
self.rpc_context = context.RequestContext('quantum', 'quantum',
is_admin=False)
self.conn = rpc.create_connection(new=True)
self.callbacks = BridgeRpcCallbacks()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.notifier = AgentNotifierApi(topics.AGENT)
self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotify
def create_network(self, context, network):
"""This call to create network translates to creation of
port-profile on the physical switch.
"""
with context.session.begin(subtransactions=True):
net = super(BrocadePluginV2, self).create_network(context, network)
net_uuid = net['id']
vlan_id = self._vlan_bitmap.get_next_vlan(None)
switch = self._switch
try:
self._driver.create_network(switch['address'],
switch['username'],
switch['password'],
vlan_id)
except Exception as e:
# Proper formatting
LOG.warning(_("Brocade NOS driver:"))
LOG.warning(_("%s"), e)
LOG.debug(_("Returning the allocated vlan (%d) to the pool"),
vlan_id)
self._vlan_bitmap.release_vlan(int(vlan_id))
raise Exception("Brocade plugin raised exception, check logs")
brocade_db.create_network(context, net_uuid, vlan_id)
LOG.info(_("Allocated vlan (%d) from the pool"), vlan_id)
return net
def delete_network(self, context, net_id):
"""This call to delete the network translates to removing
the port-profile on the physical switch.
"""
with context.session.begin(subtransactions=True):
result = super(BrocadePluginV2, self).delete_network(context,
net_id)
# we must delete all ports in db first (foreign key constraint)
# there is no need to delete port in the driver (its a no-op)
# (actually: note there is no such call to the driver)
bports = brocade_db.get_ports(context, net_id)
for bport in bports:
brocade_db.delete_port(context, bport['port_id'])
# find the vlan for this network
net = brocade_db.get_network(context, net_id)
vlan_id = net['vlan']
# Tell hw to do remove PP
switch = self._switch
try:
self._driver.delete_network(switch['address'],
switch['username'],
switch['password'],
net_id)
except Exception as e:
# Proper formatting
LOG.warning(_("Brocade NOS driver:"))
LOG.warning(_("%s"), e)
raise Exception("Brocade plugin raised exception, check logs")
# now ok to delete the network
brocade_db.delete_network(context, net_id)
# relinquish vlan in bitmap
self._vlan_bitmap.release_vlan(int(vlan_id))
return result
def create_port(self, context, port):
"""Create logical port on the switch."""
tenant_id = port['port']['tenant_id']
network_id = port['port']['network_id']
admin_state_up = port['port']['admin_state_up']
physical_interface = self.physical_interface
with context.session.begin(subtransactions=True):
bnet = brocade_db.get_network(context, network_id)
vlan_id = bnet['vlan']
quantum_port = super(BrocadePluginV2, self).create_port(context,
port)
interface_mac = quantum_port['mac_address']
port_id = quantum_port['id']
switch = self._switch
# convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx
mac = self.mac_reformat_62to34(interface_mac)
try:
self._driver.associate_mac_to_network(switch['address'],
switch['username'],
switch['password'],
vlan_id,
mac)
except Exception as e:
# Proper formatting
LOG.warning(_("Brocade NOS driver:"))
LOG.warning(_("%s"), e)
raise Exception("Brocade plugin raised exception, check logs")
# save to brocade persistent db
brocade_db.create_port(context, port_id, network_id,
physical_interface,
vlan_id, tenant_id, admin_state_up)
# apply any extensions
return self._extend_port_dict_binding(context, quantum_port)
def delete_port(self, context, port_id):
with context.session.begin(subtransactions=True):
super(BrocadePluginV2, self).delete_port(context, port_id)
brocade_db.delete_port(context, port_id)
def update_port(self, context, port_id, port):
original_port = self.get_port(context, port_id)
session = context.session
port_updated = False
with session.begin(subtransactions=True):
# delete the port binding and read it with the new rules
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._delete_port_security_group_bindings(context, port_id)
self._process_port_create_security_group(
context,
port_id,
port['port'][ext_sg.SECURITYGROUPS])
port_updated = True
port = super(BrocadePluginV2, self).update_port(
context, port_id, port)
self._extend_port_dict_security_group(context, port)
if original_port['admin_state_up'] != port['admin_state_up']:
port_updated = True
if (original_port['fixed_ips'] != port['fixed_ips'] or
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
port.get(ext_sg.SECURITYGROUPS))):
self.notifier.security_groups_member_updated(
context, port.get(ext_sg.SECURITYGROUPS))
if port_updated:
self._notify_port_updated(context, port)
return self._extend_port_dict_binding(context, port)
def get_port(self, context, port_id, fields=None):
with context.session.begin(subtransactions=True):
port = super(BrocadePluginV2, self).get_port(
context, port_id, fields)
self._extend_port_dict_security_group(context, port)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
res_ports = []
with context.session.begin(subtransactions=True):
ports = super(BrocadePluginV2, self).get_ports(context,
filters,
fields)
for port in ports:
self._extend_port_dict_security_group(context, port)
self._extend_port_dict_binding(context, port)
res_ports.append(self._fields(port, fields))
return res_ports
def _notify_port_updated(self, context, port):
port_id = port['id']
bport = brocade_db.get_port(context, port_id)
self.notifier.port_update(context, port,
bport.physical_interface,
bport.vlan_id)
def _extend_port_dict_binding(self, context, port):
if self._check_view_auth(context, port, self.binding_view):
port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_BRIDGE
port['binding:vif_type'] = portbindings.VIF_TYPE_BRIDGE
port[portbindings.CAPABILITIES] = {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}
return port
def _check_view_auth(self, context, resource, action):
return policy.check(context, action, resource)
def get_plugin_version(self):
"""Get version number of the plugin."""
return PLUGIN_VERSION
@staticmethod
def mac_reformat_62to34(interface_mac):
"""Transform MAC address format.
Transforms from 6 groups of 2 hexadecimal numbers delimited by ":"
to 3 groups of 4 hexadecimals numbers delimited by ".".
:param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx
:type interface_mac: string
:returns: MAC address in the format xxxx.xxxx.xxxx
:rtype: string
"""
mac = interface_mac.replace(":", "")
mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12]
return mac
| {
"content_hash": "0505560c18a2b82c51ec4fbc4c33d7fb",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 79,
"avg_line_length": 40.284424379232505,
"alnum_prop": 0.5611341477081699,
"repo_name": "liqin75/vse-vpnaas-plugin",
"id": "895a967ef60fc41e95244abddd0e531d2f2746bb",
"size": "18731",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quantum/plugins/brocade/QuantumPlugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "4225761"
},
{
"name": "Scala",
"bytes": "4561"
},
{
"name": "Shell",
"bytes": "9412"
},
{
"name": "XML",
"bytes": "50907"
}
],
"symlink_target": ""
} |
try:
from panda3d.core import NodePath, TextNode
import panda3d
except ImportError:
panda3d = None
#coloredtextbox class: this will be converted to a canvas drone using "build_canvasdrone"
class coloredtextbox(object):
#obligatory argument list for __init__: canvasdrone, object, identifier, parameters
def __init__(self, canvasdrone, ctb, identifier, parameters):
if panda3d is None: raise ImportError("Cannot locate Panda3D")
if identifier is None: identifier = ""
self.node = None
self.pnode = canvasdrone._get_parent_nodepath(identifier, ctb.box)
self._show(ctb, identifier)
#obligatory method "update". Argument list: object, identifier, parameters
def update(self, ctb, identifier, parameters):
self._show(ctb, identifier)
#obligatory method "remove"
def remove(self):
if self.pnode is not None:
self.pnode.removeNode()
self.pnode = None
def _show(self, ctb, identifier):
if self.node is not None: self.node.removeNode()
tnode = TextNode(identifier)
tnode.setText(ctb.text)
r, g, b, a = ctb.textcolor.r / 255.0, ctb.textcolor.g / 255.0, ctb.textcolor.b / 255.0, ctb.textcolor.a / 255.0
tnode.setTextColor(r, g, b, a)
r, g, b, a = ctb.boxcolor.r / 255.0, ctb.boxcolor.g / 255.0, ctb.boxcolor.b / 255.0, ctb.boxcolor.a / 255.0
tnode.setCardColor(r, g, b, a)
tnode.setCardAsMargin(0, 0, 0, 0)
tnode.setCardDecal(True)
node = NodePath(tnode)
self._scale(tnode, node)
node.reparentTo(self.pnode)
self.node = node
def _scale(self, tnode, node):
top, bottom = tnode.getTop(), tnode.getBottom()
l, r = tnode.getLeft(), tnode.getRight()
w, h = r - l, top - bottom
scalex = 0
if w > 0: scalex = 1.0 / w
scaley = 0
if h > 0: scaley = 1.0 / h
node.setScale(scalex, 1, -scaley)
dimx = w * scalex
midx = (l * scalex + r * scalex) / 2.0
dimy = h * scaley
midy = (top * scaley + bottom * scaley) / 2.0
node.setPos(-midx + 0.5, 0, midy - 0.5)
import bee
from dragonfly.canvas import canvasdrone
from dragonfly.pandahive import build_canvasdrone
coloredtextbox_panda = build_canvasdrone(
wrappedclass=coloredtextbox,
classname="coloredtextbox_panda",
drawshow="show",
drawshowtype="ColoredTextBox",
baseclass=canvasdrone
)
| {
"content_hash": "31e1560c2381fa6abf4cdeed1725b98d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 119,
"avg_line_length": 34.91549295774648,
"alnum_prop": 0.6236385639370714,
"repo_name": "agoose77/hivesystem",
"id": "d45daeccabefff96b5a586758281db0c1695fd1d",
"size": "2558",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tutorial/canvas/canvas1d/workers/coloredtextbox_panda.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2491478"
},
{
"name": "Shell",
"bytes": "1164"
}
],
"symlink_target": ""
} |
from tdbus import SimpleDBusConnection, DBUS_BUS_SYSTEM, DBusHandler, signal_handler, DBusError
import logging
logging.basicConfig(level=logging.DEBUG)
CONN_AVAHI = 'org.freedesktop.Avahi'
PATH_SERVER = '/'
IFACE_SERVER = 'org.freedesktop.Avahi.Server'
conn = SimpleDBusConnection(DBUS_BUS_SYSTEM)
try:
result = conn.call_method(PATH_SERVER, 'GetVersionString',
interface=IFACE_SERVER, destination=CONN_AVAHI)
except DBusError:
print 'Avahi NOT available.'
raise
print 'Avahi is available at %s' % CONN_AVAHI
print 'Avahi version: %s' % result.get_args()[0]
print
print 'Browsing service types on domain: local'
print 'Press CTRL-c to exit'
print
result = conn.call_method('/', 'ServiceTypeBrowserNew', interface=IFACE_SERVER,
destination=CONN_AVAHI, format='iisu', args=(-1, 0, 'local', 0))
browser = result.get_args()[0]
print browser
class AvahiHandler(DBusHandler):
@signal_handler()
def ItemNew(self, message):
args = message.get_args()
print 'service %s exists on domain %s' % (args[2], args[3])
conn.add_handler(AvahiHandler())
conn.dispatch()
| {
"content_hash": "a9331ee03ef34342afc68786ff16f002",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 95,
"avg_line_length": 28.974358974358974,
"alnum_prop": 0.7053097345132744,
"repo_name": "DanLipsitt/python-tdbus",
"id": "915c3bf0813eb72e55ee42b63244a7ca3176b821",
"size": "1521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/avahi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "59281"
},
{
"name": "Python",
"bytes": "41917"
}
],
"symlink_target": ""
} |
import grpc
from ..registry import RegistryService_pb2 as registry_dot_RegistryService__pb2
class RegistryServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.FindRegisteredModel = channel.unary_unary(
'/ai.verta.registry.RegistryService/FindRegisteredModel',
request_serializer=registry_dot_RegistryService__pb2.FindRegisteredModelRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.FindRegisteredModelRequest.Response.FromString,
)
self.GetRegisteredModel = channel.unary_unary(
'/ai.verta.registry.RegistryService/GetRegisteredModel',
request_serializer=registry_dot_RegistryService__pb2.GetRegisteredModelRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.GetRegisteredModelRequest.Response.FromString,
)
self.GetRegisteredModelCount = channel.unary_unary(
'/ai.verta.registry.RegistryService/GetRegisteredModelCount',
request_serializer=registry_dot_RegistryService__pb2.GetRegisteredModelCountRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.GetRegisteredModelCountRequest.Response.FromString,
)
self.CreateRegisteredModel = channel.unary_unary(
'/ai.verta.registry.RegistryService/CreateRegisteredModel',
request_serializer=registry_dot_RegistryService__pb2.SetRegisteredModel.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.SetRegisteredModel.Response.FromString,
)
self.UpdateRegisteredModel = channel.unary_unary(
'/ai.verta.registry.RegistryService/UpdateRegisteredModel',
request_serializer=registry_dot_RegistryService__pb2.SetRegisteredModel.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.SetRegisteredModel.Response.FromString,
)
self.DeleteRegisteredModel = channel.unary_unary(
'/ai.verta.registry.RegistryService/DeleteRegisteredModel',
request_serializer=registry_dot_RegistryService__pb2.DeleteRegisteredModelRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.DeleteRegisteredModelRequest.Response.FromString,
)
self.FindModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/FindModelVersion',
request_serializer=registry_dot_RegistryService__pb2.FindModelVersionRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.FindModelVersionRequest.Response.FromString,
)
self.GetModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/GetModelVersion',
request_serializer=registry_dot_RegistryService__pb2.GetModelVersionRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.GetModelVersionRequest.Response.FromString,
)
self.CreateModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/CreateModelVersion',
request_serializer=registry_dot_RegistryService__pb2.SetModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.SetModelVersion.Response.FromString,
)
self.UpdateModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/UpdateModelVersion',
request_serializer=registry_dot_RegistryService__pb2.SetModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.SetModelVersion.Response.FromString,
)
self.SetLockModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/SetLockModelVersion',
request_serializer=registry_dot_RegistryService__pb2.SetLockModelVersionRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.SetLockModelVersionRequest.Response.FromString,
)
self.DeleteModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/DeleteModelVersion',
request_serializer=registry_dot_RegistryService__pb2.DeleteModelVersionRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.DeleteModelVersionRequest.Response.FromString,
)
self.getUrlForArtifact = channel.unary_unary(
'/ai.verta.registry.RegistryService/getUrlForArtifact',
request_serializer=registry_dot_RegistryService__pb2.GetUrlForArtifact.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.GetUrlForArtifact.Response.FromString,
)
self.commitArtifactPart = channel.unary_unary(
'/ai.verta.registry.RegistryService/commitArtifactPart',
request_serializer=registry_dot_RegistryService__pb2.CommitArtifactPart.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.CommitArtifactPart.Response.FromString,
)
self.getCommittedArtifactParts = channel.unary_unary(
'/ai.verta.registry.RegistryService/getCommittedArtifactParts',
request_serializer=registry_dot_RegistryService__pb2.GetCommittedArtifactParts.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.GetCommittedArtifactParts.Response.FromString,
)
self.commitMultipartArtifact = channel.unary_unary(
'/ai.verta.registry.RegistryService/commitMultipartArtifact',
request_serializer=registry_dot_RegistryService__pb2.CommitMultipartArtifact.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.CommitMultipartArtifact.Response.FromString,
)
self.logDatasetsInModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/logDatasetsInModelVersion',
request_serializer=registry_dot_RegistryService__pb2.LogDatasetsInModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.LogDatasetsInModelVersion.Response.FromString,
)
self.logCodeBlobInModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/logCodeBlobInModelVersion',
request_serializer=registry_dot_RegistryService__pb2.LogCodeBlobInModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.LogCodeBlobInModelVersion.Response.FromString,
)
self.logAttributesInModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/logAttributesInModelVersion',
request_serializer=registry_dot_RegistryService__pb2.LogAttributesInModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.LogAttributesInModelVersion.Response.FromString,
)
self.logDockerMetadataInModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/logDockerMetadataInModelVersion',
request_serializer=registry_dot_RegistryService__pb2.LogDockerMetadataInModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.LogDockerMetadataInModelVersion.Response.FromString,
)
class RegistryServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def FindRegisteredModel(self, request, context):
"""CRUD for RegisteredModel
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetRegisteredModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetRegisteredModelCount(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateRegisteredModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateRegisteredModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteRegisteredModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FindModelVersion(self, request, context):
"""CRUD for Model Version
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLockModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getUrlForArtifact(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def commitArtifactPart(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getCommittedArtifactParts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def commitMultipartArtifact(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def logDatasetsInModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def logCodeBlobInModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def logAttributesInModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def logDockerMetadataInModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RegistryServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'FindRegisteredModel': grpc.unary_unary_rpc_method_handler(
servicer.FindRegisteredModel,
request_deserializer=registry_dot_RegistryService__pb2.FindRegisteredModelRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.FindRegisteredModelRequest.Response.SerializeToString,
),
'GetRegisteredModel': grpc.unary_unary_rpc_method_handler(
servicer.GetRegisteredModel,
request_deserializer=registry_dot_RegistryService__pb2.GetRegisteredModelRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.GetRegisteredModelRequest.Response.SerializeToString,
),
'GetRegisteredModelCount': grpc.unary_unary_rpc_method_handler(
servicer.GetRegisteredModelCount,
request_deserializer=registry_dot_RegistryService__pb2.GetRegisteredModelCountRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.GetRegisteredModelCountRequest.Response.SerializeToString,
),
'CreateRegisteredModel': grpc.unary_unary_rpc_method_handler(
servicer.CreateRegisteredModel,
request_deserializer=registry_dot_RegistryService__pb2.SetRegisteredModel.FromString,
response_serializer=registry_dot_RegistryService__pb2.SetRegisteredModel.Response.SerializeToString,
),
'UpdateRegisteredModel': grpc.unary_unary_rpc_method_handler(
servicer.UpdateRegisteredModel,
request_deserializer=registry_dot_RegistryService__pb2.SetRegisteredModel.FromString,
response_serializer=registry_dot_RegistryService__pb2.SetRegisteredModel.Response.SerializeToString,
),
'DeleteRegisteredModel': grpc.unary_unary_rpc_method_handler(
servicer.DeleteRegisteredModel,
request_deserializer=registry_dot_RegistryService__pb2.DeleteRegisteredModelRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.DeleteRegisteredModelRequest.Response.SerializeToString,
),
'FindModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.FindModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.FindModelVersionRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.FindModelVersionRequest.Response.SerializeToString,
),
'GetModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.GetModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.GetModelVersionRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.GetModelVersionRequest.Response.SerializeToString,
),
'CreateModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.CreateModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.SetModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.SetModelVersion.Response.SerializeToString,
),
'UpdateModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.UpdateModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.SetModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.SetModelVersion.Response.SerializeToString,
),
'SetLockModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.SetLockModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.SetLockModelVersionRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.SetLockModelVersionRequest.Response.SerializeToString,
),
'DeleteModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.DeleteModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.DeleteModelVersionRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.DeleteModelVersionRequest.Response.SerializeToString,
),
'getUrlForArtifact': grpc.unary_unary_rpc_method_handler(
servicer.getUrlForArtifact,
request_deserializer=registry_dot_RegistryService__pb2.GetUrlForArtifact.FromString,
response_serializer=registry_dot_RegistryService__pb2.GetUrlForArtifact.Response.SerializeToString,
),
'commitArtifactPart': grpc.unary_unary_rpc_method_handler(
servicer.commitArtifactPart,
request_deserializer=registry_dot_RegistryService__pb2.CommitArtifactPart.FromString,
response_serializer=registry_dot_RegistryService__pb2.CommitArtifactPart.Response.SerializeToString,
),
'getCommittedArtifactParts': grpc.unary_unary_rpc_method_handler(
servicer.getCommittedArtifactParts,
request_deserializer=registry_dot_RegistryService__pb2.GetCommittedArtifactParts.FromString,
response_serializer=registry_dot_RegistryService__pb2.GetCommittedArtifactParts.Response.SerializeToString,
),
'commitMultipartArtifact': grpc.unary_unary_rpc_method_handler(
servicer.commitMultipartArtifact,
request_deserializer=registry_dot_RegistryService__pb2.CommitMultipartArtifact.FromString,
response_serializer=registry_dot_RegistryService__pb2.CommitMultipartArtifact.Response.SerializeToString,
),
'logDatasetsInModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.logDatasetsInModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.LogDatasetsInModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.LogDatasetsInModelVersion.Response.SerializeToString,
),
'logCodeBlobInModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.logCodeBlobInModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.LogCodeBlobInModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.LogCodeBlobInModelVersion.Response.SerializeToString,
),
'logAttributesInModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.logAttributesInModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.LogAttributesInModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.LogAttributesInModelVersion.Response.SerializeToString,
),
'logDockerMetadataInModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.logDockerMetadataInModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.LogDockerMetadataInModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.LogDockerMetadataInModelVersion.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ai.verta.registry.RegistryService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| {
"content_hash": "755b7a12802f439f47734be95d10caed",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 123,
"avg_line_length": 54.15760869565217,
"alnum_prop": 0.7653788258906171,
"repo_name": "mitdbg/modeldb",
"id": "66e6e4665b9efcbfd0f780f76e2c52f15483cc88",
"size": "20000",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "client/verta/verta/_protos/public/registry/RegistryService_pb2_grpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43352"
},
{
"name": "Dockerfile",
"bytes": "235"
},
{
"name": "HTML",
"bytes": "30924"
},
{
"name": "Java",
"bytes": "393927"
},
{
"name": "JavaScript",
"bytes": "1017682"
},
{
"name": "Python",
"bytes": "178774"
},
{
"name": "Scala",
"bytes": "251259"
},
{
"name": "Shell",
"bytes": "16870"
},
{
"name": "Thrift",
"bytes": "55683"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import warnings
from django.template import loader
from django.utils import six
from . import filters, filterset
from .. import compat
class DjangoFilterBackend(object):
default_filter_set = filterset.FilterSet
@property
def template(self):
if compat.is_crispy():
return 'django_filters/rest_framework/crispy_form.html'
return 'django_filters/rest_framework/form.html'
def get_filter_class(self, view, queryset=None):
"""
Return the django-filters `FilterSet` used to filter the queryset.
"""
filter_class = getattr(view, 'filter_class', None)
filter_fields = getattr(view, 'filter_fields', None)
if filter_class:
filter_model = filter_class.Meta.model
assert issubclass(queryset.model, filter_model), \
'FilterSet model %s does not match queryset model %s' % \
(filter_model, queryset.model)
return filter_class
if filter_fields:
MetaBase = getattr(self.default_filter_set, 'Meta', object)
class AutoFilterSet(self.default_filter_set):
class Meta(MetaBase):
model = queryset.model
fields = filter_fields
return AutoFilterSet
return None
def filter_queryset(self, request, queryset, view):
filter_class = self.get_filter_class(view, queryset)
if filter_class:
return filter_class(request.query_params, queryset=queryset, request=request).qs
return queryset
def to_html(self, request, queryset, view):
filter_class = self.get_filter_class(view, queryset)
if not filter_class:
return None
filter_instance = filter_class(request.query_params, queryset=queryset, request=request)
template = loader.get_template(self.template)
context = {
'filter': filter_instance
}
return template.render(context, request)
def get_coreschema_field(self, field):
if isinstance(field, filters.NumberFilter):
field_cls = compat.coreschema.Number
else:
field_cls = compat.coreschema.String
return field_cls(
description=six.text_type(field.extra.get('help_text', ''))
)
def get_schema_fields(self, view):
# This is not compatible with widgets where the query param differs from the
# filter's attribute name. Notably, this includes `MultiWidget`, where query
# params will be of the format `<name>_0`, `<name>_1`, etc...
assert compat.coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert compat.coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
filter_class = getattr(view, 'filter_class', None)
if filter_class is None:
try:
filter_class = self.get_filter_class(view, view.get_queryset())
except Exception:
warnings.warn(
"{} is not compatible with schema generation".format(view.__class__)
)
filter_class = None
return [] if not filter_class else [
compat.coreapi.Field(
name=field_name,
required=field.extra['required'],
location='query',
schema=self.get_coreschema_field(field)
) for field_name, field in filter_class.base_filters.items()
]
| {
"content_hash": "d0ad94716d1be154e20233e89fdb018d",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 105,
"avg_line_length": 34.81553398058252,
"alnum_prop": 0.6070831009481317,
"repo_name": "rvmoura96/projeto-almoxarifado",
"id": "a837324cf1ea53a6ef2436fd4b82d1319460b2c0",
"size": "3587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myvenv/Lib/site-packages/django_filters/rest_framework/backends.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1298"
},
{
"name": "C",
"bytes": "426659"
},
{
"name": "C++",
"bytes": "237226"
},
{
"name": "CSS",
"bytes": "47496"
},
{
"name": "DTrace",
"bytes": "863"
},
{
"name": "HTML",
"bytes": "106823"
},
{
"name": "JavaScript",
"bytes": "115482"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "11286094"
},
{
"name": "Shell",
"bytes": "182"
},
{
"name": "Tcl",
"bytes": "1295070"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.