code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
while True:
s = ''.join([random.SystemRandom().choice(string.digits + string.ascii_letters)
for n in range(length)])
if (s not in container):
break
return s
|
def _generate_random_string(self, container, length=20)
|
Generate a random cookie or token string not in container.
The cookie or token should be secure in the sense that it should not
be likely to be able guess a value. Because it is not derived from
anything else, there is no vulnerability of the token from computation,
or possible leakage of information from the token.
| 3.001622
| 3.036939
| 0.988371
|
if (self.account_allowed(account)):
cookie = self._generate_random_string(self.access_cookies)
self.access_cookies[cookie] = int(time.time())
return cookie
else:
return None
|
def access_cookie(self, account)
|
Make and store access cookie for a given account.
If account is allowed then make a cookie and add it to the dict
of accepted access cookies with current timestamp as the value.
Return the access cookie.
Otherwise return None.
| 4.314742
| 3.639851
| 1.185417
|
if (cookie in self.access_cookies):
age = int(time.time()) - self.access_cookies[cookie]
if (age <= (self.access_cookie_lifetime + 1)):
self.logger.info(log_msg + " " + cookie +
" ACCEPTED COOKIE (%ds old)" % age)
return True
# Expired...
self.logger.info(log_msg + " " + cookie +
" EXPIRED COOKIE (%ds old > %ds)" %
(age, self.access_cookie_lifetime))
# Keep cookie for 2x lifetim in order to generate
# helpful expired message
if (age > (self.access_cookie_lifetime * 2)):
del self.access_cookies[cookie]
return False
else:
self.logger.info(log_msg + " " + cookie + " REJECTED COOKIE")
return False
|
def access_cookie_valid(self, cookie, log_msg)
|
Check access cookie validity.
Returns true if the access cookie is valid. The set of allowed
access cookies is stored in self.access_cookies.
Uses log_msg as prefix to info level log message of accetance or
rejection.
| 3.237198
| 3.107763
| 1.041649
|
if (cookie):
token = self._generate_random_string(self.access_tokens)
self.access_tokens[token] = (cookie, int(time.time()))
return token
else:
return None
|
def access_token(self, cookie)
|
Make and store access token as proxy for the access cookie.
Create an access token to act as a proxy for access cookie, add it to
the dict of accepted access tokens with (cookie, current timestamp)
as the value. Return the access token. Return None if cookie is not set.
| 4.161844
| 3.44685
| 1.207434
|
if (token in self.access_tokens):
(cookie, issue_time) = self.access_tokens[token]
age = int(time.time()) - issue_time
if (age <= (self.access_token_lifetime + 1)):
self.logger.info(log_msg + " " + token +
" ACCEPTED TOKEN (%ds old)" % age)
return True
# Expired...
self.logger.info(log_msg + " " + token +
" EXPIRED TOKEN (%ds old > %ds)" %
(age, self.access_token_lifetime))
# Keep token for 2x lifetim in order to generate
# helpful expired message
if (age > (self.access_token_lifetime * 2)):
del self.access_tokens[token]
return False
else:
self.logger.info(log_msg + " " + token + " REJECTED TOKEN")
return False
|
def access_token_valid(self, token, log_msg)
|
Check token validity.
Returns true if the token is valid. The set of allowed access tokens
is stored in self.access_tokens.
Uses log_msg as prefix to info level log message of acceptance or
rejection.
| 3.23452
| 3.154788
| 1.025273
|
authz_header = request.headers.get('Authorization', '[none]')
if (not authz_header.startswith('Bearer ')):
return False
token = authz_header[7:]
return self.access_token_valid(
token, "info_authn: Authorization header")
|
def info_authn(self)
|
Check to see if user if authenticated for info.json.
Must have Authorization header with value that has the form
"Bearer TOKEN", where TOKEN is an appropriate and valid access
token.
| 5.409265
| 4.687061
| 1.154085
|
authn_cookie = request.cookies.get(
self.access_cookie_name, default='[none]')
return self.access_cookie_valid(authn_cookie, "image_authn: auth cookie")
|
def image_authn(self)
|
Check to see if user if authenticated for image requests.
Must have access cookie with an appropriate value.
| 8.180407
| 6.629347
| 1.233969
|
response = make_response(
"<html><script>window.close();</script></html>", 200,
{'Content-Type': "text/html"})
response.set_cookie(self.account_cookie_name, expires=0)
response.set_cookie(self.access_cookie_name, expires=0)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
|
def logout_handler(self, **args)
|
Handler for logout button.
Delete cookies and return HTML that immediately closes window
| 2.651713
| 2.58467
| 1.025939
|
message_id = request.args.get('messageId', default=None)
origin = request.args.get('origin', default='unknown_origin')
self.logger.info("access_token_handler: origin = " + origin)
account = request.cookies.get(self.account_cookie_name, default='')
token = self.access_token(account)
# Build JSON response
data_str = json.dumps(self.access_token_response(token, message_id))
ct = "application/json"
# If message_id is set the wrap in HTML with postMessage JavaScript
# for a browser client
if (message_id is not None):
data_str = % (token, data_str, origin)
ct = "text/html"
# Send response along with cookie
response = make_response(data_str, 200, {'Content-Type': ct})
if (token):
self.logger.info(
"access_token_handler: setting access token = " + token)
# Set the cookie for the image content
cookie = self.access_cookie(token)
self.logger.info(
"access_token_handler: setting access cookie = " + cookie)
response.set_cookie(self.access_cookie_name, cookie)
else:
self.logger.info(
"access_token_handler: auth failed, sending error")
response.headers['Access-control-allow-origin'] = '*'
return response
|
def access_token_handler(self, **args)
|
Get access token based on cookie sent with this request.
This handler deals with two cases:
1) Non-browser client (indicated by no messageId set in request)
where the response is a simple JSON response.
2) Browser client (indicate by messageId setin request) where
the request must be made from a an iFrame and the response is
sent as JSON wrapped in HTML containing a postMessage() script
that conveys the access token to the viewer.
| 3.679349
| 3.220331
| 1.142537
|
response = make_response(
"<html><script>window.close();</script></html>", 200,
{'Content-Type': "text/html"})
response.set_cookie(self.account_cookie_name,
account_cookie_value)
return response
|
def set_cookie_close_window_response(self, account_cookie_value)
|
Response to set account cookie and close window HTML/JavaScript.
| 2.701104
| 2.619893
| 1.030998
|
cls.tmpdir = ('/tmp' if (tmpdir is None) else tmpdir)
# Shell setup command (e.g set library path)
cls.shellsetup = ('' if (shellsetup is None) else shellsetup)
if (pnmdir is None):
cls.pnmdir = '/usr/bin'
for dir in ('/usr/local/bin', '/sw/bin'):
if (os.path.isfile(os.path.join(dir, 'pngtopnm'))):
cls.pnmdir = dir
else:
cls.pnmdir = pnmdir
# Recklessly assume everything else under cls.pnmdir
cls.pngtopnm = os.path.join(cls.pnmdir, 'pngtopnm')
cls.jpegtopnm = os.path.join(cls.pnmdir, 'jpegtopnm')
cls.pnmfile = os.path.join(cls.pnmdir, 'pnmfile')
cls.pnmcut = os.path.join(cls.pnmdir, 'pnmcut')
cls.pnmscale = os.path.join(cls.pnmdir, 'pnmscale')
cls.pnmrotate = os.path.join(cls.pnmdir, 'pnmrotate')
cls.pnmflip = os.path.join(cls.pnmdir, 'pnmflip')
cls.pnmtopng = os.path.join(cls.pnmdir, 'pnmtopng')
cls.ppmtopgm = os.path.join(cls.pnmdir, 'ppmtopgm')
cls.pnmtotiff = os.path.join(cls.pnmdir, 'pnmtotiff')
cls.pnmtojpeg = os.path.join(cls.pnmdir, 'pnmtojpeg')
cls.pamditherbw = os.path.join(cls.pnmdir, 'pamditherbw')
# Need djatoka to get jp2 output
cls.djatoka_comp = '/Users/simeon/packages/adore-djatoka-1.1/bin/compress.sh'
|
def find_binaries(cls, tmpdir=None, shellsetup=None, pnmdir=None)
|
Set instance variables for directory and binary locations.
FIXME - should accept params to set things other than defaults.
| 2.738865
| 2.675936
| 1.023517
|
pid = os.getpid()
self.basename = os.path.join(self.tmpdir, 'iiif_netpbm_' + str(pid))
outfile = self.basename + '.pnm'
# Convert source file to pnm
filetype = self.file_type(self.srcfile)
if (filetype == 'png'):
if (self.shell_call(self.pngtopnm + ' ' + self.srcfile + ' > ' + outfile)):
raise IIIFError(text="Oops... got error from pngtopnm.")
elif (filetype == 'jpg'):
if (self.shell_call(self.jpegtopnm + ' ' + self.srcfile + ' > ' + outfile)):
raise IIIFError(text="Oops... got error from jpegtopnm.")
else:
raise IIIFError(code='501',
text='bad input file format (only know how to read png/jpeg)')
self.tmpfile = outfile
# Get size
(self.width, self.height) = self.image_size(self.tmpfile)
|
def do_first(self)
|
Create PNM file from input image file.
| 4.004137
| 3.726855
| 1.074401
|
infile = self.tmpfile
outfile = self.basename + '.reg'
# simeon@ice ~>cat m.pnm | pnmcut 10 10 100 200 > m1.pnm
if (x is None):
# print "region: full"
self.tmpfile = infile
else:
# print "region: (%d,%d,%d,%d)" % (x,y,w,h)
if (self.shell_call('cat ' + infile + ' | ' + self.pnmcut + ' ' + str(x) + ' ' + str(y) + ' ' + str(w) + ' ' + str(h) + ' > ' + outfile)):
raise IIIFError(text="Oops... got nonzero output from pnmcut.")
self.width = w
self.height = h
self.tmpfile = outfile
|
def do_region(self, x, y, w, h)
|
Apply region selection.
| 5.743151
| 5.721034
| 1.003866
|
# simeon@ice ~>cat m1.pnm | pnmscale -width 50 > m2.pnm
infile = self.tmpfile
outfile = self.basename + '.siz'
if (w is None):
# print "size: no scaling"
self.tmpfile = infile
else:
# print "size: scaling to (%d,%d)" % (w,h)
if (self.shell_call('cat ' + infile + ' | ' + self.pnmscale + ' -width ' + str(w) + ' -height ' + str(h) + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from pnmscale.")
self.width = w
self.height = h
self.tmpfile = outfile
|
def do_size(self, w, h)
|
Apply size scaling.
| 6.917137
| 6.505359
| 1.063298
|
infile = self.tmpfile
outfile = self.basename + '.col'
# Quality (bit-depth):
if (quality == 'grey' or quality == 'gray'):
if (self.shell_call('cat ' + infile + ' | ' + self.ppmtopgm + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from ppmtopgm.")
self.tmpfile = outfile
elif (quality == 'bitonal'):
if (self.shell_call('cat ' + infile + ' | ' + self.ppmtopgm + ' | ' + self.pamditherbw + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from ppmtopgm.")
self.tmpfile = outfile
elif ((quality == 'native' and self.api_version < '2.0') or
(quality == 'default' and self.api_version >= '2.0') or
quality == 'color'):
self.tmpfile = infile
else:
raise IIIFError(code=400, parameter='quality',
text="Unknown quality parameter value requested.")
|
def do_quality(self, quality)
|
Apply value of quality parameter.
| 4.51947
| 4.379608
| 1.031935
|
infile = self.tmpfile
outfile = self.basename + '.out'
outfile_jp2 = self.basename + '.jp2'
# Now convert finished pnm file to output format
# simeon@ice ~>cat m3.pnm | pnmtojpeg > m4.jpg
# simeon@ice ~>cat m3.pnm | pnmtotiff > m4.jpg
# pnmtotiff: computing colormap...
# pnmtotiff: Too many colors - proceeding to write a 24-bit RGB file.
# pnmtotiff: If you want an 8-bit palette file, try doing a 'ppmquant 256'.
# simeon@ice ~>cat m3.pnm | pnmtopng > m4.png
fmt = ('png' if (format is None) else format)
if (fmt == 'png'):
# print "format: png"
if (self.shell_call(self.pnmtopng + ' ' + infile + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from pnmtopng.")
mime_type = "image/png"
elif (fmt == 'jpg'):
# print "format: jpg"
if (self.shell_call(self.pnmtojpeg + ' ' + infile + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from pnmtojpeg.")
mime_type = "image/jpeg"
elif (fmt == 'tiff' or fmt == 'jp2'):
# print "format: tiff/jp2"
if (self.shell_call(self.pnmtotiff + ' ' + infile + ' > ' + outfile)):
raise IIIFError(
text="Oops... got nonzero output from pnmtotiff.")
mime_type = "image/tiff"
if (fmt == 'jp2'):
# use djatoka after tiff
if (self.shell_call(DJATOKA_COMP + ' -i ' + outfile + ' -o ' + outfile_jp2)):
raise IIIFError(
text="Oops... got nonzero output from DJATOKA_COMP.")
mime_type = "image/jp2"
outfile = tmpfile_jp2
else:
raise IIIFError(code=415, parameter='format',
text="Unsupported output file format (%s), only png,jpg,tiff are supported." % (fmt))
self.outfile = outfile
self.output_format = fmt
self.mime_type = mime_type
|
def do_format(self, format)
|
Apply format selection.
| 3.592196
| 3.574776
| 1.004873
|
try:
magic_text = magic.from_file(file)
if (isinstance(magic_text, bytes)):
# In python2 and travis python3 (?!) decode to get unicode string
magic_text = magic_text.decode('utf-8')
except (TypeError, IOError):
return
if (re.search('PNG image data', magic_text)):
return('png')
elif (re.search('JPEG image data', magic_text)):
return('jpg')
# failed
return
|
def file_type(self, file)
|
Use python-magic to determine file type.
Returns 'png' or 'jpg' on success, nothing on failure.
| 5.138153
| 4.55039
| 1.129168
|
pout = os.popen(self.shellsetup + self.pnmfile + ' ' + pnmfile, 'r')
pnmfileout = pout.read(200)
pout.close()
m = re.search(', (\d+) by (\d+) ', pnmfileout)
if (m is None):
raise IIIFError(
text="Bad output from pnmfile when trying to get size.")
w = int(m.group(1))
h = int(m.group(2))
# print "pnmfile output = %s" % (pnmfileout)
# print "image size = %d,%d" % (w,h)
return(w, h)
|
def image_size(self, pnmfile)
|
Get width and height of pnm file.
simeon@homebox src>pnmfile /tmp/214-2.png
/tmp/214-2.png:PPM raw, 100 by 100 maxval 255
| 3.956345
| 4.002149
| 0.988555
|
return(subprocess.call(self.shellsetup + shellcmd, shell=True))
|
def shell_call(self, shellcmd)
|
Shell call with necessary setup first.
| 8.642337
| 5.85419
| 1.476265
|
for file in glob.glob(self.basename + '*'):
os.unlink(file)
|
def cleanup(self)
|
Clean up any temporary files.
| 6.755671
| 4.924075
| 1.371968
|
headers = dict(self.headers)
if (api_version < '1.1'):
headers['Content-Type'] = 'text/xml'
response = self.as_xml()
else:
headers['Content-Type'] = 'text/plain'
response = self.as_txt()
return(response, self.code, headers)
|
def image_server_response(self, api_version=None)
|
Response, code and headers for image server error response.
api_version selects the format (XML of 1.0). The return value is
a tuple of
response - body of HTTP response
status - the HTTP status code
headers - a dict of HTTP headers which will include the Content-Type
As a side effect the routine sets self.content_type
to the correct media type for the response.
| 3.446667
| 3.031394
| 1.136991
|
# Build tree
spacing = ("\n" if (self.pretty_xml) else "")
root = Element('error', {'xmlns': I3F_NS})
root.text = spacing
e_parameter = Element('parameter', {})
e_parameter.text = self.parameter
e_parameter.tail = spacing
root.append(e_parameter)
if (self.text):
e_text = Element('text', {})
e_text.text = self.text
e_text.tail = spacing
root.append(e_text)
# Write out as XML document to return
tree = ElementTree(root)
xml_buf = io.BytesIO()
if (sys.version_info < (2, 7)):
tree.write(xml_buf, encoding='UTF-8')
else:
tree.write(xml_buf, encoding='UTF-8',
xml_declaration=True, method='xml')
return(xml_buf.getvalue().decode('utf-8'))
|
def as_xml(self)
|
XML representation of the error to be used in HTTP response.
This XML format follows the IIIF Image API v1.0 specification,
see <http://iiif.io/api/image/1.0/#error>
| 3.019342
| 2.937738
| 1.027778
|
s = "IIIF Image Server Error\n\n"
s += self.text if (self.text) else 'UNKNOWN_ERROR'
s += "\n\n"
if (self.parameter):
s += "parameter=%s\n" % self.parameter
if (self.code):
s += "code=%d\n\n" % self.code
for header in sorted(self.headers):
s += "header %s=%s\n" % (header, self.headers[header])
return s
|
def as_txt(self)
|
Text rendering of error response.
Designed for use with Image API version 1.1 and above where the
error response is suggested to be text or html but not otherwise
specified. Intended to provide useful information for debugging.
| 3.528671
| 3.288234
| 1.07312
|
params = {
'response_type': 'code',
'client_id': self.google_api_client_id,
'redirect_uri': self.scheme_host_port_prefix(
'http', config.host, config.port, prefix) + '/home',
'scope': self.google_api_scope,
'state': self.request_args_get('next', default=''),
}
url = self.google_oauth2_url + 'auth?' + urlencode(params)
return self.login_handler_redirect(url)
|
def login_handler(self, config=None, prefix=None, **args)
|
OAuth starts here, redirect user to Google.
| 3.790846
| 3.51332
| 1.078993
|
gresponse = self.google_get_token(config, prefix)
gdata = self.google_get_data(config, gresponse)
email = gdata.get('email', 'NO_EMAIL')
name = gdata.get('name', 'NO_NAME')
# Make and store cookie from identity, set and close window
cookie = self.access_cookie(name + ' ' + email)
return self.set_cookie_close_window_response(cookie)
|
def home_handler(self, config=None, prefix=None, **args)
|
Handler for /home redirect path after Google auth.
OAuth ends up back here from Google. Set the account cookie
and close window to trigger next step.
| 6.907313
| 5.340991
| 1.293264
|
params = {
'code': self.request_args_get(
'code',
default=''),
'client_id': self.google_api_client_id,
'client_secret': self.google_api_client_secret,
'redirect_uri': self.scheme_host_port_prefix(
'http', config.host, config.port, prefix) + '/home',
'grant_type': 'authorization_code',
}
payload = urlencode(params).encode('utf-8')
url = self.google_oauth2_url + 'token'
req = Request(url, payload)
json_str = urlopen(req).read()
return json.loads(json_str.decode('utf-8'))
|
def google_get_token(self, config, prefix)
|
Make request to Google API to get token.
| 2.722962
| 2.612714
| 1.042197
|
params = {
'access_token': response['access_token'],
}
payload = urlencode(params)
url = self.google_api_url + 'userinfo?' + payload
req = Request(url)
json_str = urlopen(req).read()
return json.loads(json_str.decode('utf-8'))
|
def google_get_data(self, config, response)
|
Make request to Google API to get profile data for the user.
| 2.836013
| 2.528225
| 1.121741
|
if (self.api_version == '1.0'):
uri_pattern = r'http://library.stanford.edu/iiif/image-api/compliance.html#level%d'
elif (self.api_version == '1.1'):
uri_pattern = r'http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level%d'
elif (self.api_version == '2.0' or
self.api_version == '2.1'):
uri_pattern = r'http://iiif.io/api/image/2/level%d.json'
else:
return
if (self.compliance_level is None):
return
return(uri_pattern % self.compliance_level)
|
def compliance_uri(self)
|
Compliance URI based on api_version.
Value is based on api_version and complicance_level, will be
None if either are unset/unrecognized. The assumption here is
that the api_version and level are orthogonal, override this
method if that isn't true.
| 2.247639
| 2.104977
| 1.067774
|
# set if specified
if (srcfile is not None):
self.srcfile = srcfile
if (request is not None):
self.request = request
if (outfile is not None):
self.outfile = outfile
if (self.outfile is not None):
# create path to output dir if necessary
dir = os.path.dirname(self.outfile)
if (not os.path.exists(dir)):
os.makedirs(dir)
#
self.do_first()
(x, y, w, h) = self.region_to_apply()
self.do_region(x, y, w, h)
(w, h) = self.size_to_apply()
self.do_size(w, h)
(mirror, rot) = self.rotation_to_apply(no_mirror=True)
self.do_rotation(mirror, rot)
(quality) = self.quality_to_apply()
self.do_quality(quality)
self.do_format(self.request.format)
self.do_last()
return(self.outfile, self.mime_type)
|
def derive(self, srcfile=None, request=None, outfile=None)
|
Do sequence of manipulations for IIIF to derive output image.
Named argments:
srcfile -- source image file
request -- IIIFRequest object with parsed parameters
outfile -- output image file. If set the the output file will be
written to that file, otherwise a new temporary file
will be created and outfile set to its location.
See order in spec: http://www-sul.stanford.edu/iiif/image-api/#order
Region THEN Size THEN Rotation THEN Quality THEN Format
Typical use:
r = IIIFRequest(region=...)
m = IIIFManipulator()
try:
m.derive(srcfile='a.jpg',request=r)
# .. serve m.outfile
except IIIFError as e:
# ..
finally:
m.cleanup() #removes temp m.outfile
| 2.632265
| 2.484844
| 1.059328
|
if (x is not None):
raise IIIFError(code=501, parameter="region",
text="Null manipulator supports only region=/full/.")
|
def do_region(self, x, y, w, h)
|
Null implementation of region selection.
| 42.950249
| 40.896057
| 1.05023
|
if (mirror):
raise IIIFError(code=501, parameter="rotation",
text="Null manipulator does not support mirroring.")
if (rot != 0.0):
raise IIIFError(code=501, parameter="rotation",
text="Null manipulator supports only rotation=(0|360).")
|
def do_rotation(self, mirror, rot)
|
Null implementation of rotate and/or mirror.
| 6.80499
| 6.388999
| 1.065111
|
if (self.api_version >= '2.0'):
if (quality != "default"):
raise IIIFError(code=501, parameter="default",
text="Null manipulator supports only quality=default.")
else: # versions 1.0 and 1.1
if (quality != "native"):
raise IIIFError(code=501, parameter="native",
text="Null manipulator supports only quality=native.")
|
def do_quality(self, quality)
|
Null implementation of quality.
| 5.688189
| 5.518194
| 1.030806
|
if (format is not None):
raise IIIFError(code=415, parameter="format",
text="Null manipulator does not support specification of output format.")
#
if (self.outfile is None):
self.outfile = self.srcfile
else:
try:
shutil.copyfile(self.srcfile, self.outfile)
except IOError as e:
raise IIIFError(code=500,
text="Failed to copy file (%s)." % (str(e)))
self.mime_type = None
|
def do_format(self, format)
|
Null implementation of format selection.
This is the last step, this null implementation does not accept any
specification of a format because we don't even know what the input
format is.
| 5.12332
| 5.041851
| 1.016159
|
if (self.request.region_full or
(self.request.region_pct and
self.request.region_xywh == (0, 0, 100, 100))):
return(None, None, None, None)
# Cannot do anything else unless we know size (in self.width and
# self.height)
if (self.width <= 0 or self.height <= 0):
raise IIIFError(code=501, parameter='region',
text="Region parameters require knowledge of image size which is not implemented.")
if (self.request.region_square):
if (self.width <= self.height):
y_offset = (self.height - self.width) / 2
return(0, y_offset, self.width, self.width)
else: # self.width>self.height
x_offset = (self.width - self.height) / 2
return(x_offset, 0, self.height, self.height)
# pct or explicit pixel sizes
pct = self.request.region_pct
(x, y, w, h) = self.request.region_xywh
# Convert pct to pixels based on actual size
if (pct):
x = int((x / 100.0) * self.width + 0.5)
y = int((y / 100.0) * self.height + 0.5)
w = int((w / 100.0) * self.width + 0.5)
h = int((h / 100.0) * self.height + 0.5)
# Check if boundary extends beyond image and truncate
if ((x + w) > self.width):
w = self.width - x
if ((y + h) > self.height):
h = self.height - y
# Final check to see if we have the whole image
if (w == 0 or h == 0):
raise IIIFZeroSizeError(code=400, parameter='region',
text="Region parameters would result in zero size result image.")
if (x == 0 and y == 0 and w == self.width and h == self.height):
return(None, None, None, None)
return(x, y, w, h)
|
def region_to_apply(self)
|
Return the x,y,w,h parameters to extract given image width and height.
Assume image width and height are available in self.width and
self.height, and self.request is IIIFRequest object
Expected use:
(x,y,w,h) = self.region_to_apply()
if (x is None):
# full image
else:
# extract
Returns (None,None,None,None) if no extraction is required.
| 2.697373
| 2.431689
| 1.109259
|
if (self.request.size_full or self.request.size_pct == 100.0):
# full size
return(None, None)
# Not trivially full size, look at possibilities in turn
w = self.width
h = self.height
if (self.request.size_max):
# use size limits if present, else full
if (self.max_area and self.max_area < (w * h)):
scale = (float(self.max_area) / float(w * h)) ** 0.5
w = int(w * scale + 0.5)
h = int(h * scale + 0.5)
if (self.max_width):
max_height = self.max_height if self.max_height is not None else self.max_width
if (self.max_width < w):
# calculate wrt original width, height rather than
# w, h to avoid compounding rounding issues
scale = float(self.max_width) / float(self.width)
w = int(self.width * scale + 0.5)
h = int(self.height * scale + 0.5)
if (max_height < h):
scale = float(max_height) / float(self.height)
w = int(self.width * scale + 0.5)
h = int(self.height * scale + 0.5)
elif (self.request.size_pct is not None):
w = int(self.width * self.request.size_pct / 100.0 + 0.5)
h = int(self.height * self.request.size_pct / 100.0 + 0.5)
elif (self.request.size_bang):
# Have "!w,h" form
(mw, mh) = self.request.size_wh
# Pick smaller fraction and then work from that...
frac = min((float(mw) / float(self.width)),
(float(mh) / float(self.height)))
w = int(self.width * frac + 0.5)
h = int(self.height * frac + 0.5)
else:
# Must now be "w,h", "w," or ",h". If both are specified then this will the size,
# otherwise find other to keep aspect ratio
(w, h) = self.request.size_wh
if (w is None):
w = int(self.width * h / self.height + 0.5)
elif (h is None):
h = int(self.height * w / self.width + 0.5)
# Now have w,h, sanity check and return
if (w == 0 or h == 0):
raise IIIFZeroSizeError(
code=400, parameter='size',
text="Size parameter would result in zero size result image (%d,%d)." % (w, h))
# Below would be test for scaling up image size, this is allowed by spec
# if ( w>self.width or h>self.height ):
# raise IIIFError(code=400,parameter='size',
# text="Size requests scaling up image to larger than orginal.")
if (w == self.width and h == self.height):
return(None, None)
return(w, h)
|
def size_to_apply(self)
|
Calculate size of image scaled using size parameters.
Assumes current image width and height are available in self.width and
self.height, and self.request is IIIFRequest object.
Formats are: w, ,h w,h pct:p !w,h full max
Returns (None,None) if no scaling is required.
If max is requested and neither max_area or max_width are
specified then this is the same as full. Otherwise the limits
are used to determine the size.
| 3.441186
| 3.169224
| 1.085813
|
rotation = self.request.rotation_deg
if (no_mirror and self.request.rotation_mirror):
raise IIIFError(code=501, parameter="rotation",
text="This implementation does not support mirroring.")
if (only90s and (rotation != 0.0 and rotation != 90.0 and
rotation != 180.0 and rotation != 270.0)):
raise IIIFError(code=501, parameter="rotation",
text="This implementation supports only 0,90,180,270 degree rotations.")
return(self.request.rotation_mirror, rotation)
|
def rotation_to_apply(self, only90s=False, no_mirror=False)
|
Check and interpret rotation.
Returns a truth value as to whether to mirror, and a floating point
number 0 <= angle < 360 (degrees).
| 3.276622
| 3.139001
| 1.043842
|
if (self.request.quality is None):
if (self.api_version <= '1.1'):
return('native')
else:
return('default')
return(self.request.quality)
|
def quality_to_apply(self)
|
Value of quality parameter to use in processing request.
Simple substitution of 'native' or 'default' if no quality
parameter is specified.
| 6.571771
| 4.092235
| 1.605912
|
if (not tile_height):
tile_height = tile_width
sf = 1
scale_factors = [sf]
for j in range(30): # limit of 2^30, should be enough!
sf = 2 * sf
if (tile_width * sf > self.width and
tile_height * sf > self.height):
break
scale_factors.append(sf)
return scale_factors
|
def scale_factors(self, tile_width, tile_height=None)
|
Return a set of scale factors for given tile and window size.
Gives a set of scale factors, starting at 1, and in multiples
of 2. Largest scale_factor is so that one tile will cover the
entire image (self.width,self.height).
If tile_height is not specified then tiles are assumed to be
squares of tile_width pixels.
| 3.695422
| 3.544992
| 1.042434
|
if (size is None):
size = self.sz
# Have we go to the smallest element?
if (size <= 3):
if (_not_diagonal(x, y)):
return None
else:
return (0, 0, 0)
divisor = size // 3
if (_not_diagonal(x // divisor, y // divisor)):
return None
return self.pixel(x % divisor, y % divisor, divisor)
|
def pixel(self, x, y, size=None)
|
Return color for a pixel.
| 5.569152
| 5.421461
| 1.027242
|
red = int(n * self.shade_factor)
if (red > 255):
red = 255
return (red, 50, 100)
|
def color(self, n)
|
Color of pixel that reached limit after n iterations.
Returns a color tuple for use with PIL, tending toward
red as we tend toward self.max_iter iterations.
| 4.770202
| 4.819401
| 0.989791
|
z = z * z + self.c
if (abs(z) > 2.0):
return self.color(n)
n += 1
if (n > self.max_iter):
return None
return self.mpixel(z, n)
|
def mpixel(self, z, n=0)
|
Iteration in Mandlebrot coordinate z.
| 4.229063
| 3.103725
| 1.362576
|
x = (ix - self.xoffset + 0.5) / self.scale
y = (iy - self.yoffset + 0.5) / self.scale
z = complex(x, y)
self.set_c(z)
return self.mpixel(z)
|
def pixel(self, ix, iy)
|
Return color for a pixel.
Does translation from image coordinates (ix,iy) into
the complex plane coordinate z = x+yi, and then calls
self.mpixel(z) to find the color at point z.
| 4.089431
| 2.66531
| 1.534317
|
for sf in scale_factors:
if (sf * tilesize >= width and sf * tilesize >= height):
continue # avoid any full-region tiles
rts = tilesize * sf # tile size in original region
xt = (width - 1) // rts + 1
yt = (height - 1) // rts + 1
for nx in range(xt):
rx = nx * rts
rxe = rx + rts
if (rxe > width):
rxe = width
rw = rxe - rx
# same as sw = int(math.ceil(rw/float(sf)))
sw = (rw + sf - 1) // sf
for ny in range(yt):
ry = ny * rts
rye = ry + rts
if (rye > height):
rye = height
rh = rye - ry
# same as sh = int(math.ceil(rh/float(sf)))
sh = (rh + sf - 1) // sf
yield([rx, ry, rw, rh], [sw, sh])
|
def static_partial_tile_sizes(width, height, tilesize, scale_factors)
|
Generator for partial tile sizes for zoomed in views.
Positional arguments:
width -- width of full size image
height -- height of full size image
tilesize -- width and height of tiles
scale_factors -- iterable of scale factors, typically [1,2,4..]
Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile
| 2.644631
| 2.566571
| 1.030414
|
# FIXME - Not sure what correct algorithm is for this, from
# observation of Openseadragon it seems that one keeps halving
# the pixel size of the full image until until both width and
# height are less than the tile size. After that all subsequent
# halving of the image size are used, all the way down to 1,1.
# It seems that without these reduced size full-region images,
# OpenSeadragon will not display any unzoomed image in small windows.
#
# I do not understand the algorithm that OpenSeadragon uses (or
# know where it is in the code) to decide how small a version of
# the complete image to request. It seems that there is a bug in
# OpenSeadragon here because in some cases it requests images
# of size 1,1 multiple times, which is anyway a useless image.
for level in range(0, 20):
factor = 2.0**level
sw = int(width / factor + 0.5)
sh = int(height / factor + 0.5)
if (sw < tilesize and sh < tilesize):
if (sw < 1 or sh < 1):
break
yield([sw, sh])
|
def static_full_sizes(width, height, tilesize)
|
Generator for scaled-down full image sizes.
Positional arguments:
width -- width of full size image
height -- height of full size image
tilesize -- width and height of tiles
Yields [sw,sh], the size for each full-region tile that is less than
the tilesize. This includes tiles up to the full image size if that
is smaller than the tilesize.
| 8.671844
| 8.314171
| 1.04302
|
if extra.startswith('/'):
extra = extra[1:]
r = IIIFRequest(identifier='dummy',
api_version=self.api_version)
r.parse_url(extra)
if (r.info):
raise IIIFStaticError("Attempt to specify Image Information in extras.")
return(r)
|
def parse_extra(self, extra)
|
Parse extra request parameters to IIIFRequest object.
| 11.154676
| 8.050307
| 1.385621
|
if (osd_version in self.osd_config):
return(self.osd_config[osd_version])
else:
raise IIIFStaticError("OpenSeadragon version %s not supported, available versions are %s" %
(osd_version, ', '.join(sorted(self.osd_config.keys()))))
|
def get_osd_config(self, osd_version)
|
Select appropriate portion of config.
If the version requested is not supported the raise an exception with
a helpful error message listing the versions supported.
| 3.421946
| 3.461904
| 0.988458
|
self.src = src
self.identifier = identifier
# Get image details and calculate tiles
im = self.manipulator_klass()
im.srcfile = self.src
im.set_max_image_pixels(self.max_image_pixels)
im.do_first()
width = im.width
height = im.height
scale_factors = im.scale_factors(self.tilesize)
# Setup destination and IIIF identifier
self.setup_destination()
# Write out images
for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors):
self.generate_tile(region, size)
sizes = []
for size in static_full_sizes(width, height, self.tilesize):
# See https://github.com/zimeon/iiif/issues/9
sizes.append({'width': size[0], 'height': size[1]})
self.generate_tile('full', size)
for request in self.extras:
request.identifier = self.identifier
if (request.is_scaled_full_image()):
sizes.append({'width': request.size_wh[0],
'height': request.size_wh[1]})
self.generate_file(request)
# Write info.json
qualities = ['default'] if (self.api_version > '1.1') else ['native']
info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier,
width=width, height=height, scale_factors=scale_factors,
tile_width=self.tilesize, tile_height=self.tilesize,
formats=['jpg'], qualities=qualities, sizes=sizes,
api_version=self.api_version)
json_file = os.path.join(self.dst, self.identifier, 'info.json')
if (self.dryrun):
self.logger.warning(
"dryrun mode, would write the following files:")
self.logger.warning("%s / %s/%s" %
(self.dst, self.identifier, 'info.json'))
else:
with open(json_file, 'w') as f:
f.write(info.as_json())
f.close()
self.logger.info("%s / %s/%s" %
(self.dst, self.identifier, 'info.json'))
self.logger.debug("Written %s" % (json_file))
|
def generate(self, src=None, identifier=None)
|
Generate static files for one source image.
| 3.665956
| 3.583325
| 1.02306
|
r = IIIFRequest(identifier=self.identifier,
api_version=self.api_version)
if (region == 'full'):
r.region_full = True
else:
r.region_xywh = region # [rx,ry,rw,rh]
r.size_wh = size # [sw,sh]
r.format = 'jpg'
self.generate_file(r, True)
|
def generate_tile(self, region, size)
|
Generate one tile for this given region, size of this image.
| 5.568275
| 5.17928
| 1.075106
|
use_canonical = self.get_osd_config(self.osd_version)['use_canonical']
height = None
if (undistorted and use_canonical):
height = r.size_wh[1]
r.size_wh = [r.size_wh[0], None] # [sw,sh] -> [sw,]
path = r.url()
# Generate...
if (self.dryrun):
self.logger.info("%s / %s" % (self.dst, path))
else:
m = self.manipulator_klass(api_version=self.api_version)
try:
m.derive(srcfile=self.src, request=r,
outfile=os.path.join(self.dst, path))
self.logger.info("%s / %s" % (self.dst, path))
except IIIFZeroSizeError:
self.logger.info("%s / %s - zero size, skipped" %
(self.dst, path))
return # done if zero size
if (r.region_full and use_canonical and height is not None):
# In v2.0 of the spec, the canonical URI form `w,` for scaled
# images of the full region was introduced. This is somewhat at
# odds with the requirement for `w,h` specified in `sizes` to
# be available, and has problems of precision with tall narrow
# images. Hopefully will be fixed in 3.0 but for now symlink
# the `w,h` form to the `w,` dirs so that might use the specified
# `w,h` also work. See
# <https://github.com/IIIF/iiif.io/issues/544>
#
# FIXME - This is ugly because we duplicate code in
# iiif.request.url to construct the partial URL
region_dir = os.path.join(r.quote(r.identifier), "full")
wh_dir = "%d,%d" % (r.size_wh[0], height)
wh_path = os.path.join(region_dir, wh_dir)
wc_dir = "%d," % (r.size_wh[0])
wc_path = os.path.join(region_dir, wc_dir)
if (not self.dryrun):
ln = os.path.join(self.dst, wh_path)
if (os.path.exists(ln)):
os.remove(ln)
os.symlink(wc_dir, ln)
self.logger.info("%s / %s -> %s" % (self.dst, wh_path, wc_path))
|
def generate_file(self, r, undistorted=False)
|
Generate file for IIIFRequest object r from this image.
FIXME - Would be nicer to have the test for an undistorted image request
based on the IIIFRequest object, and then know whether to apply canonicalization
or not.
Logically we might use `w,h` instead of the Image API v2.0 canonical
form `w,` if the api_version is 1.x. However, OSD 1.2.1 and 2.x assume
the new canonical form even in the case where the API version is declared
earlier. Thus, determine whether to use the canonical or `w,h` form based
solely on the setting of osd_version.
| 5.613821
| 4.850849
| 1.157286
|
# Do we have a separate identifier?
if (not self.identifier):
# No separate identifier specified, split off the last path segment
# of the source name, strip the extension to get the identifier
self.identifier = os.path.splitext(os.path.split(self.src)[1])[0]
# Done if dryrun, else setup self.dst first
if (self.dryrun):
return
if (not self.dst):
raise IIIFStaticError("No destination directory specified!")
dst = self.dst
if (os.path.isdir(dst)):
# Exists, OK
pass
elif (os.path.isfile(dst)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % dst)
else:
os.makedirs(dst)
# Second, create identifier based subdir if necessary
outd = os.path.join(dst, self.identifier)
if (os.path.isdir(outd)):
# Nothing for now, perhaps should delete?
self.logger.warning(
"Output directory %s already exists, adding/updating files" % outd)
pass
elif (os.path.isfile(outd)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % outd)
else:
os.makedirs(outd)
self.logger.debug("Output directory %s" % outd)
|
def setup_destination(self)
|
Setup output directory based on self.dst and self.identifier.
Returns the output directory name on success, raises and exception on
failure.
| 3.883533
| 3.596374
| 1.079847
|
osd_config = self.get_osd_config(self.osd_version)
osd_base = osd_config['base']
osd_dir = osd_config['dir'] # relative to base
osd_js = os.path.join(osd_dir, osd_config['js'])
osd_images = os.path.join(osd_dir, osd_config['images'])
if (os.path.isdir(html_dir)):
# Exists, fine
pass
elif (os.path.isfile(html_dir)):
raise IIIFStaticError(
"Can't write to directory %s: a file of that name exists" % html_dir)
else:
os.makedirs(html_dir)
self.logger.info("Writing HTML to %s" % (html_dir))
with open(os.path.join(self.template_dir, 'static_osd.html'), 'r') as f:
template = f.read()
outfile = self.identifier + '.html'
outpath = os.path.join(html_dir, outfile)
with open(outpath, 'w') as f:
info_json_uri = '/'.join([self.identifier, 'info.json'])
if (self.prefix):
info_json_uri = '/'.join([self.prefix, info_json_uri])
d = dict(identifier=self.identifier,
api_version=self.api_version,
osd_version=self.osd_version,
osd_uri=osd_js,
osd_images_prefix=osd_images,
osd_height=osd_width,
osd_width=osd_height,
info_json_uri=info_json_uri)
f.write(Template(template).safe_substitute(d))
self.logger.info("%s / %s" % (html_dir, outfile))
# Do we want to copy OSD in there too? If so, do it only if
# we haven't already
if (include_osd):
if (self.copied_osd):
self.logger.info("OpenSeadragon already copied")
else:
# Make directory, copy JavaScript and icons (from osd_images)
osd_path = os.path.join(html_dir, osd_dir)
if (not os.path.isdir(osd_path)):
os.makedirs(osd_path)
shutil.copyfile(os.path.join(osd_base, osd_js),
os.path.join(html_dir, osd_js))
self.logger.info("%s / %s" % (html_dir, osd_js))
osd_images_path = os.path.join(html_dir, osd_images)
if (os.path.isdir(osd_images_path)):
self.logger.warning(
"OpenSeadragon images directory (%s) already exists, skipping"
% osd_images_path)
else:
shutil.copytree(os.path.join(osd_base, osd_images),
osd_images_path)
self.logger.info("%s / %s/*" % (html_dir, osd_images))
self.copied_osd = True
|
def write_html(self, html_dir='/tmp', include_osd=False,
osd_width=500, osd_height=500)
|
Write HTML test page using OpenSeadragon for the tiles generated.
Assumes that the generate(..) method has already been called to set up
identifier etc. Parameters:
html_dir - output directory for HTML files, will be created if it
does not already exist
include_osd - true to include OpenSeadragon code
osd_width - width of OpenSeadragon pane in pixels
osd_height - height of OpenSeadragon pane in pixels
| 2.325105
| 2.294621
| 1.013285
|
if isinstance(key, int):
return _get_value_for_key(key, obj, default)
return _get_value_for_keys(key.split('.'), obj, default)
|
def get_value(key, obj, default=missing)
|
Helper for pulling a keyed value off various types of objects
| 2.790436
| 3.15031
| 0.885766
|
for key, val in original.items():
if key not in self.fields:
data[key] = val
return data
|
def _handle_load_unknown(self, data, original)
|
Preserve unknown keys during deserialization.
| 4.092432
| 3.183159
| 1.285651
|
for key, val in original.items():
if key not in self.fields:
data[key] = val
return data
|
def _handle_dump_unknown(self, data, original)
|
Preserve unknown keys during deserialization.
| 4.005523
| 3.185715
| 1.257339
|
validated_data = {
spec.VERSION: data[spec.VERSION],
spec.KIND: data[spec.KIND],
}
if data.get(spec.LOGGING):
validated_data[spec.LOGGING] = LoggingConfig.from_dict(
data[spec.LOGGING])
if data.get(spec.TAGS):
validated_data[spec.TAGS] = data[spec.TAGS]
if data.get(spec.HP_TUNING):
validated_data[spec.HP_TUNING] = HPTuningConfig.from_dict(
data[spec.HP_TUNING])
return validated_data
|
def validate_headers(spec, data)
|
Validates headers data and creates the config objects
| 2.27107
| 2.169838
| 1.046654
|
data = copy.deepcopy(data)
validated_data = {}
def validate_keys(section, config, section_data):
if not isinstance(section_data, dict) or section == spec.MODEL:
return
extra_args = [key for key in section_data.keys() if key not in config.SCHEMA().fields]
if extra_args:
raise PolyaxonfileError('Extra arguments passed for `{}`: {}'.format(
section, extra_args))
def add_validated_section(section, config):
if data.get(section):
section_data = data[section]
validate_keys(section=section, config=config, section_data=section_data)
validated_data[section] = config.from_dict(section_data)
add_validated_section(spec.ENVIRONMENT, spec.ENVIRONMENT_CONFIG)
add_validated_section(spec.BUILD, BuildConfig)
add_validated_section(spec.RUN, RunConfig)
add_validated_section(spec.MODEL, ModelConfig)
add_validated_section(spec.TRAIN, TrainConfig)
add_validated_section(spec.EVAL, EvalConfig)
return validated_data
|
def validate(spec, data)
|
Validates the data and creates the config objects
| 2.763274
| 2.659963
| 1.038839
|
environment = data.get('environment')
if environment and environment.replicas:
validate_replicas(data.get('framework'), environment.replicas)
|
def validate_replicas(self, data)
|
Validate distributed experiment
| 8.568229
| 8.157669
| 1.050328
|
parsed_data = Parser.parse(self, self._data, matrix_declaration)
del parsed_data[self.HP_TUNING]
validator.validate(spec=self, data=parsed_data)
return ExperimentSpecification(values=[parsed_data, {'kind': self._EXPERIMENT}])
|
def get_experiment_spec(self, matrix_declaration)
|
Returns an experiment spec for this group spec and the given matrix declaration.
| 12.439118
| 12.292585
| 1.01192
|
if BaseSpecification.BUILD not in self._data:
return None
return BuildConfig.from_dict(self._data[BaseSpecification.BUILD])
|
def get_build_spec(self)
|
Returns a build spec for this group spec.
| 8.225156
| 6.868295
| 1.197554
|
is_grid_search = (
data.get('grid_search') is not None or
(data.get('grid_search') is None and
data.get('random_search') is None and
data.get('hyperband') is None and
data.get('bo') is None)
)
is_bo = data.get('bo') is not None
validate_matrix(data.get('matrix'), is_grid_search=is_grid_search, is_bo=is_bo)
|
def validate_matrix(self, data)
|
Validates matrix data and creates the config objects
| 2.717423
| 2.707375
| 1.003711
|
if not self._anchor:
self._anchor = str(uuid.uuid4())
return self._anchor
|
def anchor(self)
|
Generate a html anchor name
:return:
| 4.286237
| 4.288086
| 0.999569
|
cases = [x.html() for x in self.cases]
return .format(anchor=self.anchor(),
name=tag.text(self.name),
count=len(cases),
cases="".join(cases))
|
def html(self)
|
Render this test class as html
:return:
| 9.019391
| 9.348022
| 0.964845
|
return .format(name=tag.text(self.name), value=tag.text(self.value))
|
def html(self)
|
Render those properties as html
:return:
| 11.075394
| 10.398663
| 1.065079
|
failure = ""
skipped = None
stdout = tag.text(self.stdout)
stderr = tag.text(self.stderr)
if self.skipped:
skipped = .format(msg=tag.text(self.skipped_msg),
skip=tag.text(self.skipped))
if self.failed():
failure = .format(msg=tag.text(self.failure_msg),
fail=tag.text(self.failure))
properties = [x.html() for x in self.properties]
return .format(anchor=self.anchor(),
testname=self.name,
testclassname=self.testclass.name,
duration=self.duration,
failure=failure,
skipped=skipped,
properties="".join(properties),
stdout=stdout,
stderr=stderr)
|
def html(self)
|
Render this test case as HTML
:return:
| 3.59189
| 3.452922
| 1.040247
|
tests = list()
for testclass in self.classes:
tests.extend(self.classes[testclass].cases)
return tests
|
def all(self)
|
Return all testcases
:return:
| 7.089425
| 4.920145
| 1.440898
|
return [test for test in self.all() if not test.failed() and not test.skipped()]
|
def passed(self)
|
Return all the passing testcases
:return:
| 7.564874
| 7.376765
| 1.0255
|
fails = ""
skips = ""
if len(self.failed()):
faillist = list()
for failure in self.failed():
faillist.append(
.format(anchor=failure.anchor(),
name=tag.text(
failure.testclass.name + '.' + failure.name)))
fails = .format(faillist="".join(faillist))
if len(self.skipped()):
skiplist = list()
for skipped in self.skipped():
skiplist.append(
.format(anchor=skipped.anchor(),
name=tag.text(
skipped.testclass.name + skipped.name)))
skips = .format(skiplist="".join(skiplist))
classlist = list()
for classname in self.classes:
testclass = self.classes[classname]
cases = list()
for testcase in testclass.cases:
if "pkcs11" in testcase.name:
assert True
cases.append(
.format(anchor=testcase.anchor(),
name=tag.text(testcase.name)))
classlist.append(.format(anchor=testclass.anchor(),
name=testclass.name,
cases="".join(cases)))
return .format(failed=fails,
skips=skips,
classlist="".join(classlist))
|
def toc(self)
|
Return a html table of contents
:return:
| 3.023529
| 3.044838
| 0.993002
|
classes = list()
package = ""
if self.package is not None:
package = "Package: " + self.package + "<br/>"
for classname in self.classes:
classes.append(self.classes[classname].html())
errs = ""
for error in self.errors:
if not len(errs):
errs += "<tr><th colspan='2' align='left'>Errors</th></tr>"
for part in ["type", "message", "text"]:
if part in error:
errs += "<tr><td>{}</td><td><pre>{}</pre></td></tr>".format(
part,
tag.text(error[part]))
stdio = ""
if self.stderr or self.stdout:
stdio += "<tr><th colspan='2' align='left'>Output</th></tr>"
if self.stderr:
stdio += "<tr><td>Stderr</td><td><pre>{}</pre></td></tr>".format(
tag.text(self.stderr))
if self.stdout:
stdio += "<tr><td>Stdout</td><td><pre>{}</pre></td></tr>".format(
tag.text(self.stdout))
props = ""
if len(self.properties):
props += "<table>"
propnames = sorted(self.properties)
for prop in propnames:
props += "<tr><th>{}</th><td>{}</td></tr>".format(prop, self.properties[prop])
props += "</table>"
return .format(name=tag.text(self.name),
anchor=self.anchor(),
duration=self.duration,
errs=errs,
stdio=stdio,
toc=self.toc(),
package=package,
properties=props,
classes="".join(classes),
count=len(self.all()),
fails=len(self.failed()))
|
def html(self)
|
Render this as html.
:return:
| 2.566578
| 2.557221
| 1.003659
|
thisdir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(thisdir, self.css), "r") as cssfile:
return cssfile.read()
|
def get_css(self)
|
Return the content of the css file
:return:
| 2.638483
| 2.86916
| 0.919601
|
suites = None
if isinstance(self.tree, ET.Element):
root = self.tree
else:
root = self.tree.getroot()
if root.tag == "testrun":
root = root[0]
if root.tag == "testsuite":
suites = [root]
if root.tag == "testsuites":
suites = [x for x in root]
assert suites, "could not find test suites in results xml"
for suite in suites:
cursuite = Suite()
self.suites.append(cursuite)
cursuite.name = suite.attrib["name"]
if "package" in suite.attrib:
cursuite.package = suite.attrib["package"]
cursuite.duration = float(suite.attrib.get("time", '0').replace(',',''))
for element in suite:
if element.tag == "error":
# top level error?
errtag = {
"message": element.attrib.get("message", ""),
"type": element.attrib.get("type", ""),
"text": element.text
}
cursuite.errors.append(errtag)
if element.tag == "system-out":
cursuite.stdout = element.text
if element.tag == "system-err":
cursuite.stderr = element.text
if element.tag == "properties":
for prop in element:
if prop.tag == "property":
cursuite.properties[prop.attrib["name"]] = prop.attrib["value"]
if element.tag == "testcase":
testcase = element
if not testcase.attrib.get("classname", None):
testcase.attrib["classname"] = NO_CLASSNAME
if testcase.attrib["classname"] not in cursuite:
testclass = Class()
testclass.name = testcase.attrib["classname"]
cursuite[testclass.name] = testclass
testclass = cursuite[testcase.attrib["classname"]]
newcase = Case()
newcase.name = testcase.attrib["name"]
newcase.testclass = testclass
newcase.duration = float(testcase.attrib.get("time", '0').replace(',',''))
testclass.cases.append(newcase)
# does this test case have any children?
for child in testcase:
if child.tag == "skipped":
newcase.skipped = child.text
if "message" in child.attrib:
newcase.skipped_msg = child.attrib["message"]
elif child.tag == "system-out":
newcase.stdout = child.text
elif child.tag == "system-err":
newcase.stderr = child.text
elif child.tag == "failure":
newcase.failure = child.text
if "message" in child.attrib:
newcase.failure_msg = child.attrib["message"]
elif child.tag == "error":
newcase.failure = child.text
if "message" in child.attrib:
newcase.failure_msg = child.attrib["message"]
elif child.tag == "properties":
for property in child:
newproperty = Property()
newproperty.name = property.attrib["name"]
newproperty.value = property.attrib["value"]
newcase.properties.append(newproperty)
|
def process(self)
|
populate the report from the xml
:return:
| 1.831434
| 1.811605
| 1.010945
|
if len(self.suites) > 1:
tochtml = "<ul>"
for suite in self.suites:
tochtml += '<li><a href="#{anchor}">{name}</a></li>'.format(
anchor=suite.anchor(),
name=tag.text(suite.name))
tochtml += "</ul>"
return tochtml
else:
return ""
|
def toc(self)
|
If this report has multiple suite results, make a table of contents listing each suite
:return:
| 3.068226
| 2.703845
| 1.134764
|
page = self.get_html_head()
page += "<body><h1>Test Report</h1>"
page += self.toc()
for suite in self.suites:
page += suite.html()
page += "</body></html>"
return page
|
def html(self)
|
Render the test suite as a HTML report with links to errors first.
:return:
| 4.45275
| 3.645131
| 1.221561
|
(opts, args) = PARSER.parse_args(args) if args else PARSER.parse_args()
if not len(args):
PARSER.print_usage()
sys.exit(1)
outfilename = args[0] + ".html"
if len(args) > 1:
outfilename = args[1]
report = parser.Junit(args[0])
html = report.html()
with open(outfilename, "wb") as outfile:
outfile.write(html.encode('utf-8'))
|
def run(args)
|
Run this tool
:param args:
:return:
| 3.023221
| 3.008185
| 1.004998
|
warnings = {}
msg = '{}'.format(end_of_line)
# generate warnings for continuous variables
if self._continuous:
# highlight far outliers
outlier_mask = self.cont_describe.far_outliers > 1
outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index)
if outlier_vars:
warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars
# highlight possible multimodal distributions using hartigan's dip test
# -1 values indicate NaN
modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05)
modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars
# highlight non normal distributions
# -1 values indicate NaN
modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001)
modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index)
if modal_vars:
warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars
# create the warning string
for n,k in enumerate(sorted(warnings)):
msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line)
return msg
|
def _generate_remark_str(self, end_of_line = '\n')
|
Generate a series of remarks that the user should consider
when interpreting the summary statistics.
| 3.328349
| 3.278923
| 1.015074
|
# assume all non-numerical and date columns are categorical
numeric_cols = set(data._get_numeric_data().columns.values)
date_cols = set(data.select_dtypes(include=[np.datetime64]).columns)
likely_cat = set(data.columns) - numeric_cols
likely_cat = list(likely_cat - date_cols)
# check proportion of unique values if numerical
for var in data._get_numeric_data().columns:
likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05
if likely_flag:
likely_cat.append(var)
return likely_cat
|
def _detect_categorical_columns(self,data)
|
Detect categorical columns if they are not specified.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
likely_cat : list
List of variables that appear to be categorical.
| 3.784716
| 3.629067
| 1.04289
|
return np.nanstd(x.values,ddof=self._ddof)
|
def _std(self,x)
|
Compute standard deviation with ddof degrees of freedom
| 6.03044
| 5.161947
| 1.168249
|
vals = x.values[~np.isnan(x.values)]
try:
q1, q3 = np.percentile(vals, [25, 75])
iqr = q3 - q1
low_bound = q1 - (iqr * threshold)
high_bound = q3 + (iqr * threshold)
outliers = np.where((vals > high_bound) | (vals < low_bound))
except:
outliers = []
return outliers
|
def _tukey(self,x,threshold)
|
Count outliers according to Tukey's rule.
Where Q1 is the lower quartile and Q3 is the upper quartile,
an outlier is an observation outside of the range:
[Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)]
k = 1.5 indicates an outlier
k = 3.0 indicates an outlier that is "far out"
| 2.042495
| 2.000463
| 1.021011
|
outliers = self._tukey(x, threshold = 1.5)
return np.size(outliers)
|
def _outliers(self,x)
|
Compute number of outliers
| 8.087596
| 7.178161
| 1.126695
|
outliers = self._tukey(x, threshold = 3.0)
return np.size(outliers)
|
def _far_outliers(self,x)
|
Compute number of "far out" outliers
| 9.236197
| 7.839552
| 1.178154
|
# set decimal places
if isinstance(self._decimals,int):
n = self._decimals
elif isinstance(self._decimals,dict):
try:
n = self._decimals[x.name]
except:
n = 1
else:
n = 1
warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n))
if x.name in self._nonnormal:
f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n)
return f.format(np.nanmedian(x.values),
np.nanpercentile(x.values,25), np.nanpercentile(x.values,75))
else:
f = '{{:.{}f}} ({{:.{}f}})'.format(n,n)
return f.format(np.nanmean(x.values),
np.nanstd(x.values,ddof=self._ddof))
|
def _t1_summary(self,x)
|
Compute median [IQR] or mean (Std) for the input series.
Parameters
----------
x : pandas Series
Series of values to be summarised.
| 2.730471
| 2.66144
| 1.025938
|
aggfuncs = [pd.Series.count,np.mean,np.median,self._std,
self._q25,self._q75,min,max,self._t1_summary,self._diptest,
self._outliers,self._far_outliers,self._normaltest]
# coerce continuous data to numeric
cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce')
# check all data in each continuous column is numeric
bad_cols = cont_data.count() != data[self._continuous].count()
bad_cols = cont_data.columns[bad_cols]
if len(bad_cols)>0:
raise InputError(.format(bad_cols.values))
# check for coerced column containing all NaN to warn user
for column in cont_data.columns[cont_data.count() == 0]:
self._non_continuous_warning(column)
if self._groupby:
# add the groupby column back
cont_data = cont_data.merge(data[[self._groupby]],
left_index=True, right_index=True)
# group and aggregate data
df_cont = pd.pivot_table(cont_data,
columns=[self._groupby],
aggfunc=aggfuncs)
else:
# if no groupby, just add single group column
df_cont = cont_data.apply(aggfuncs).T
df_cont.columns.name = 'overall'
df_cont.columns = pd.MultiIndex.from_product([df_cont.columns,
['overall']])
df_cont.index.rename('variable',inplace=True)
# remove prefix underscore from column names (e.g. _std -> std)
agg_rename = df_cont.columns.levels[0]
agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename]
df_cont.columns.set_levels(agg_rename, level=0, inplace=True)
return df_cont
|
def _create_cont_describe(self,data)
|
Describe the continuous data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cont : pandas DataFrame
Summarise the continuous variables.
| 3.833933
| 3.884817
| 0.986902
|
group_dict = {}
for g in self._groupbylvls:
if self._groupby:
d_slice = data.loc[data[self._groupby] == g, self._categorical]
else:
d_slice = data[self._categorical].copy()
# create a dataframe with freq, proportion
df = d_slice.copy()
# convert type to string to avoid int converted to boolean, avoiding nans
for column in df.columns:
df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values]
df = df.melt().groupby(['variable','value']).size().to_frame(name='freq')
df.index.set_names('level', level=1, inplace=True)
df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100
# set number of decimal places for percent
if isinstance(self._decimals,int):
n = self._decimals
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
elif isinstance(self._decimals,dict):
df.loc[:,'percent'] = df.apply(self._format_cat, axis=1)
else:
n = 1
f = '{{:.{}f}}'.format(n)
df['percent'] = df['percent'].astype(float).map(f.format)
# add n column, listing total non-null values for each variable
ct = d_slice.count().to_frame(name='n')
ct.index.name = 'variable'
df = df.join(ct)
# add null count
nulls = d_slice.isnull().sum().to_frame(name='isnull')
nulls.index.name = 'variable'
# only save null count to the first category for each variable
# do this by extracting the first category from the df row index
levels = df.reset_index()[['variable','level']].groupby('variable').first()
# add this category to the nulls table
nulls = nulls.join(levels)
nulls.set_index('level', append=True, inplace=True)
# join nulls to categorical
df = df.join(nulls)
# add summary column
df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')'
# add to dictionary
group_dict[g] = df
df_cat = pd.concat(group_dict,axis=1)
# ensure the groups are the 2nd level of the column index
if df_cat.columns.nlevels>1:
df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0)
return df_cat
|
def _create_cat_describe(self,data)
|
Describe the categorical data.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df_cat : pandas DataFrame
Summarise the categorical variables.
| 3.493469
| 3.555158
| 0.982648
|
# list features of the variable e.g. matched, paired, n_expected
df=pd.DataFrame(index=self._continuous+self._categorical,
columns=['continuous','nonnormal','min_observed','pval','ptest'])
df.index.rename('variable', inplace=True)
df['continuous'] = np.where(df.index.isin(self._continuous),True,False)
df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False)
# list values for each variable, grouped by groupby levels
for v in df.index:
is_continuous = df.loc[v]['continuous']
is_categorical = ~df.loc[v]['continuous']
is_normal = ~df.loc[v]['nonnormal']
# if continuous, group data into list of lists
if is_continuous:
catlevels = None
grouped_data = []
for s in self._groupbylvls:
lvl_data = data.loc[data[self._groupby]==s, v]
# coerce to numeric and drop non-numeric data
lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna()
# append to overall group data
grouped_data.append(lvl_data.values)
min_observed = len(min(grouped_data,key=len))
# if categorical, create contingency table
elif is_categorical:
catlevels = sorted(data[v].astype('category').cat.categories)
grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v])
min_observed = grouped_data.sum(axis=1).min()
# minimum number of observations across all levels
df.loc[v,'min_observed'] = min_observed
# compute pvalues
df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v,
grouped_data,is_continuous,is_categorical,
is_normal,min_observed,catlevels)
return df
|
def _create_significance_table(self,data)
|
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
A table containing the p-values, test name, etc.
| 3.855447
| 3.921577
| 0.983137
|
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
|
def _create_cont_table(self,data)
|
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
| 5.76783
| 5.699296
| 1.012025
|
table = self.cat_describe['t1_summary'].copy()
# add the total count of null values across all levels
isnull = data[self._categorical].isnull().sum().to_frame(name='isnull')
isnull.index.rename('variable', inplace=True)
try:
table = table.join(isnull)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(isnull)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table
|
def _create_cat_table(self,data)
|
Create table one for categorical data.
Returns
----------
table : pandas DataFrame
A table summarising the categorical variables.
| 5.029494
| 5.286982
| 0.951298
|
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels
|
def _create_row_labels(self)
|
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
| 3.636369
| 3.245672
| 1.120374
|
'''
Scale factor for one-dimensional plug-in bandwidth selection.
'''
if deriv_order == 0:
return (3.0*nbr_data_pts/4)**(-1.0/5)
if deriv_order == 2:
return (7.0*nbr_data_pts/4)**(-1.0/9)
raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
|
def bandwidth_factor(nbr_data_pts, deriv_order=0)
|
Scale factor for one-dimensional plug-in bandwidth selection.
| 4.533295
| 3.447042
| 1.315126
|
params_string = ""
# Parameters are passed. Turn the dict into a string like "a=1 b=2 c=3" string.
for key, value in sorted(params.items()):
# Strip off a leading underscore from the attribute's key to allow attributes like '_class'
# to be used as a CSS class specification instead of the reserved Python keyword 'class'.
key = key.lstrip("_")
params_string += u' {0}="{1}"'.format(key, value)
# Create the tag string
tag_string = u"<{0}{1}>".format(tag, params_string)
# Add text and closing tag if required.
if text:
tag_string += u"{0}</{1}>".format(text, tag)
return tag_string
|
def make_html_tag(tag, text=None, **params)
|
Create an HTML tag string.
tag
The HTML tag to use (e.g. 'a', 'span' or 'div')
text
The text to enclose between opening and closing tag. If no text is specified then only
the opening tag is returned.
Example::
make_html_tag('a', text="Hello", href="/another/page")
-> <a href="/another/page">Hello</a>
To use reserved Python keywords like "class" as a parameter prepend it with
an underscore. Instead of "class='green'" use "_class='green'".
Warning: Quotes and apostrophes are not escaped.
| 4.190655
| 4.388627
| 0.95489
|
leftmost_page = max(self.first_page, (self.page - radius))
rightmost_page = min(self.last_page, (self.page + radius))
nav_items = []
# Create a link to the first page (unless we are on the first page
# or there would be no need to insert '..' spacers)
if self.page != self.first_page and self.first_page < leftmost_page:
page = link_map["first_page"].copy()
page["value"] = unicode(page["number"])
nav_items.append(self.link_tag(page))
for item in link_map["range_pages"]:
nav_items.append(self.link_tag(item))
# Create a link to the very last page (unless we are on the last
# page or there would be no need to insert '..' spacers)
if self.page != self.last_page and rightmost_page < self.last_page:
page = link_map["last_page"].copy()
page["value"] = unicode(page["number"])
nav_items.append(self.link_tag(page))
return self.separator.join(nav_items)
|
def _range(self, link_map, radius)
|
Return range of linked pages to substiture placeholder in pattern
| 2.74912
| 2.694674
| 1.020205
|
text = item["value"]
target_url = item["href"]
if not item["href"] or item["type"] in ("span", "current_page"):
if item["attrs"]:
text = make_html_tag("span", **item["attrs"]) + text + "</span>"
return text
return make_html_tag("a", text=text, href=target_url, **item["attrs"])
|
def default_link_tag(item)
|
Create an A-HREF tag that points to another page.
| 4.66943
| 4.257999
| 1.096625
|
'''Tags a string `corpus`.'''
# Assume untokenized corpus has \n between sentences and ' ' between words
s_split = SentenceTokenizer().tokenize if tokenize else lambda t: t.split('\n')
w_split = WordTokenizer().tokenize if tokenize else lambda s: s.split()
def split_sents(corpus):
for s in s_split(corpus):
yield w_split(s)
prev, prev2 = self.START
tokens = []
for words in split_sents(corpus):
context = self.START + [self._normalize(w) for w in words] + self.END
for i, word in enumerate(words):
tag = self.tagdict.get(word)
if not tag:
features = self._get_features(i, word, context, prev, prev2)
tag = self.model.predict(features)
tokens.append((word, tag))
prev2 = prev
prev = tag
return tokens
|
def tag(self, corpus, tokenize=True)
|
Tags a string `corpus`.
| 4.022725
| 4.029649
| 0.998282
|
'''Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
controls the number of Perceptron training iterations.
:param sentences: A list of (words, tags) tuples.
:param save_loc: If not ``None``, saves a pickled model in this location.
:param nr_iter: Number of training iterations.
'''
self._make_tagdict(sentences)
self.model.classes = self.classes
for iter_ in range(nr_iter):
c = 0
n = 0
for words, tags in sentences:
prev, prev2 = self.START
context = self.START + [self._normalize(w) for w in words] \
+ self.END
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(i, word, context, prev, prev2)
guess = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev
prev = guess
c += guess == tags[i]
n += 1
random.shuffle(sentences)
logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n)))
self.model.average_weights()
# Pickle as a binary file
if save_loc is not None:
pickle.dump((self.model.weights, self.tagdict, self.classes),
open(save_loc, 'wb'), -1)
return None
|
def train(self, sentences, save_loc=None, nr_iter=5)
|
Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
controls the number of Perceptron training iterations.
:param sentences: A list of (words, tags) tuples.
:param save_loc: If not ``None``, saves a pickled model in this location.
:param nr_iter: Number of training iterations.
| 3.451465
| 2.775986
| 1.243329
|
'''Load a pickled model.'''
try:
w_td_c = pickle.load(open(loc, 'rb'))
except IOError:
msg = ("Missing trontagger.pickle file.")
raise MissingCorpusError(msg)
self.model.weights, self.tagdict, self.classes = w_td_c
self.model.classes = self.classes
return None
|
def load(self, loc)
|
Load a pickled model.
| 7.565444
| 7.872456
| 0.961002
|
'''Normalization used in pre-processing.
- All words are lower cased
- Digits in the range 1800-2100 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
'''
if '-' in word and word[0] != '-':
return '!HYPHEN'
elif word.isdigit() and len(word) == 4:
return '!YEAR'
elif word[0].isdigit():
return '!DIGITS'
else:
return word.lower()
|
def _normalize(self, word)
|
Normalization used in pre-processing.
- All words are lower cased
- Digits in the range 1800-2100 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
| 4.768972
| 2.08333
| 2.28911
|
'''Map tokens into a feature representation, implemented as a
{hashable: float} dict. If the features change, a new model must be
trained.
'''
def add(name, *args):
features[' '.join((name,) + tuple(args))] += 1
i += len(self.START)
features = defaultdict(int)
# It's useful to have a constant feature, which acts sort of like a prior
add('bias')
add('i suffix', word[-3:])
add('i pref1', word[0])
add('i-1 tag', prev)
add('i-2 tag', prev2)
add('i tag+i-2 tag', prev, prev2)
add('i word', context[i])
add('i-1 tag+i word', prev, context[i])
add('i-1 word', context[i-1])
add('i-1 suffix', context[i-1][-3:])
add('i-2 word', context[i-2])
add('i+1 word', context[i+1])
add('i+1 suffix', context[i+1][-3:])
add('i+2 word', context[i+2])
return features
|
def _get_features(self, i, word, context, prev, prev2)
|
Map tokens into a feature representation, implemented as a
{hashable: float} dict. If the features change, a new model must be
trained.
| 3.916029
| 2.949827
| 1.327545
|
'''Make a tag dictionary for single-tag words.'''
counts = defaultdict(lambda: defaultdict(int))
for words, tags in sentences:
for word, tag in zip(words, tags):
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh:
self.tagdict[word] = tag
|
def _make_tagdict(self, sentences)
|
Make a tag dictionary for single-tag words.
| 3.520892
| 3.215505
| 1.094973
|
'''Return an averaged perceptron model trained on ``examples`` for
``nr_iter`` iterations.
'''
model = AveragedPerceptron()
for i in range(nr_iter):
random.shuffle(examples)
for features, class_ in examples:
scores = model.predict(features)
guess, score = max(scores.items(), key=lambda i: i[1])
if guess != class_:
model.update(class_, guess, features)
model.average_weights()
return model
|
def train(nr_iter, examples)
|
Return an averaged perceptron model trained on ``examples`` for
``nr_iter`` iterations.
| 3.969149
| 2.816963
| 1.409017
|
timestamp = int(time())
header = bytearray(HEADER_SIZE)
# Fill the header bytearray with RTP header fields
# ...
header[0] = header[0] | V << 6;
header[0] = header[0] | P << 5;
header[0] = header[0] | X << 4;
header[0] = header[0] | CC;
header[1] = header[1] | M << 7;
header[1] = header[1] | PT;
header[2] = (seqNum >> 8) & 0xFF;
header[3] = seqNum & 0xFF;
header[4] = (timestamp >> 24) & 0xFF;
header[5] = (timestamp >> 16) & 0xFF;
header[6] = (timestamp >> 8) & 0xFF;
header[7] = timestamp & 0xFF;
header[8] = (SSRC >> 24) & 0xFF;
header[9] = (SSRC >> 16) & 0xFF;
header[10] = (SSRC >> 8) & 0xFF;
header[11] = SSRC & 0xFF
self.header = header
# Get the payload
# ...
self.payload = payload
|
def encode(self, V, P, X, CC, seqNum, M, PT, SSRC, payload)
|
Encode the RTP packet with header fields and payload.
| 1.802928
| 1.771887
| 1.017519
|
self.header = bytearray(byteStream[:HEADER_SIZE])
self.payload = byteStream[HEADER_SIZE:]
|
def decode(self, byteStream)
|
Decode the RTP packet.
| 5.188659
| 4.252159
| 1.220241
|
timestamp = self.header[4] << 24 | self.header[5] << 16 | self.header[6] << 8 | self.header[7]
return int(timestamp)
|
def timestamp(self)
|
Return timestamp.
| 2.848319
| 2.412085
| 1.180854
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.