_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q7300
IIIFAuth.login_service_description
train
def login_service_description(self): """Login service description. The login service description _MUST_ include the token service description. The authentication pattern is indicated via the profile URI which is built using self.auth_pattern. """ label = 'Login to ' + self.name if (self.auth_type): label = label + ' (' + self.auth_type + ')' desc = {"@id": self.login_uri, "profile": self.profile_base + self.auth_pattern, "label": label} if (self.header): desc['header'] = self.header if (self.description): desc['description'] = self.description return desc
python
{ "resource": "" }
q7301
IIIFAuth.logout_service_description
train
def logout_service_description(self): """Logout service description.""" label = 'Logout from ' + self.name if (self.auth_type): label = label + ' (' + self.auth_type + ')' return({"@id": self.logout_uri, "profile": self.profile_base + 'logout', "label": label})
python
{ "resource": "" }
q7302
IIIFAuth.access_token_response
train
def access_token_response(self, token, message_id=None): """Access token response structure. Success if token is set, otherwise (None, empty string) give error response. If message_id is set then an extra messageId attribute is set in the response to handle postMessage() responses. """ if (token): data = {"accessToken": token, "expiresIn": self.access_token_lifetime} if (message_id): data['messageId'] = message_id else: data = {"error": "client_unauthorized", "description": "No authorization details received"} return data
python
{ "resource": "" }
q7303
IIIFAuth._generate_random_string
train
def _generate_random_string(self, container, length=20): """Generate a random cookie or token string not in container. The cookie or token should be secure in the sense that it should not be likely to be able guess a value. Because it is not derived from anything else, there is no vulnerability of the token from computation, or possible leakage of information from the token. """ while True: s = ''.join([random.SystemRandom().choice(string.digits + string.ascii_letters) for n in range(length)]) if (s not in container): break return s
python
{ "resource": "" }
q7304
IIIFAuth.access_cookie
train
def access_cookie(self, account): """Make and store access cookie for a given account. If account is allowed then make a cookie and add it to the dict of accepted access cookies with current timestamp as the value. Return the access cookie. Otherwise return None. """ if (self.account_allowed(account)): cookie = self._generate_random_string(self.access_cookies) self.access_cookies[cookie] = int(time.time()) return cookie else: return None
python
{ "resource": "" }
q7305
IIIFAuth.access_cookie_valid
train
def access_cookie_valid(self, cookie, log_msg): """Check access cookie validity. Returns true if the access cookie is valid. The set of allowed access cookies is stored in self.access_cookies. Uses log_msg as prefix to info level log message of accetance or rejection. """ if (cookie in self.access_cookies): age = int(time.time()) - self.access_cookies[cookie] if (age <= (self.access_cookie_lifetime + 1)): self.logger.info(log_msg + " " + cookie + " ACCEPTED COOKIE (%ds old)" % age) return True # Expired... self.logger.info(log_msg + " " + cookie + " EXPIRED COOKIE (%ds old > %ds)" % (age, self.access_cookie_lifetime)) # Keep cookie for 2x lifetim in order to generate # helpful expired message if (age > (self.access_cookie_lifetime * 2)): del self.access_cookies[cookie] return False else: self.logger.info(log_msg + " " + cookie + " REJECTED COOKIE") return False
python
{ "resource": "" }
q7306
IIIFAuth.access_token
train
def access_token(self, cookie): """Make and store access token as proxy for the access cookie. Create an access token to act as a proxy for access cookie, add it to the dict of accepted access tokens with (cookie, current timestamp) as the value. Return the access token. Return None if cookie is not set. """ if (cookie): token = self._generate_random_string(self.access_tokens) self.access_tokens[token] = (cookie, int(time.time())) return token else: return None
python
{ "resource": "" }
q7307
IIIFAuth.access_token_valid
train
def access_token_valid(self, token, log_msg): """Check token validity. Returns true if the token is valid. The set of allowed access tokens is stored in self.access_tokens. Uses log_msg as prefix to info level log message of acceptance or rejection. """ if (token in self.access_tokens): (cookie, issue_time) = self.access_tokens[token] age = int(time.time()) - issue_time if (age <= (self.access_token_lifetime + 1)): self.logger.info(log_msg + " " + token + " ACCEPTED TOKEN (%ds old)" % age) return True # Expired... self.logger.info(log_msg + " " + token + " EXPIRED TOKEN (%ds old > %ds)" % (age, self.access_token_lifetime)) # Keep token for 2x lifetim in order to generate # helpful expired message if (age > (self.access_token_lifetime * 2)): del self.access_tokens[token] return False else: self.logger.info(log_msg + " " + token + " REJECTED TOKEN") return False
python
{ "resource": "" }
q7308
IIIFAuthFlask.info_authn
train
def info_authn(self): """Check to see if user if authenticated for info.json. Must have Authorization header with value that has the form "Bearer TOKEN", where TOKEN is an appropriate and valid access token. """ authz_header = request.headers.get('Authorization', '[none]') if (not authz_header.startswith('Bearer ')): return False token = authz_header[7:] return self.access_token_valid( token, "info_authn: Authorization header")
python
{ "resource": "" }
q7309
IIIFAuthFlask.image_authn
train
def image_authn(self): """Check to see if user if authenticated for image requests. Must have access cookie with an appropriate value. """ authn_cookie = request.cookies.get( self.access_cookie_name, default='[none]') return self.access_cookie_valid(authn_cookie, "image_authn: auth cookie")
python
{ "resource": "" }
q7310
IIIFAuthFlask.logout_handler
train
def logout_handler(self, **args): """Handler for logout button. Delete cookies and return HTML that immediately closes window """ response = make_response( "<html><script>window.close();</script></html>", 200, {'Content-Type': "text/html"}) response.set_cookie(self.account_cookie_name, expires=0) response.set_cookie(self.access_cookie_name, expires=0) response.headers['Access-Control-Allow-Origin'] = '*' return response
python
{ "resource": "" }
q7311
IIIFManipulatorNetpbm.find_binaries
train
def find_binaries(cls, tmpdir=None, shellsetup=None, pnmdir=None): """Set instance variables for directory and binary locations. FIXME - should accept params to set things other than defaults. """ cls.tmpdir = ('/tmp' if (tmpdir is None) else tmpdir) # Shell setup command (e.g set library path) cls.shellsetup = ('' if (shellsetup is None) else shellsetup) if (pnmdir is None): cls.pnmdir = '/usr/bin' for dir in ('/usr/local/bin', '/sw/bin'): if (os.path.isfile(os.path.join(dir, 'pngtopnm'))): cls.pnmdir = dir else: cls.pnmdir = pnmdir # Recklessly assume everything else under cls.pnmdir cls.pngtopnm = os.path.join(cls.pnmdir, 'pngtopnm') cls.jpegtopnm = os.path.join(cls.pnmdir, 'jpegtopnm') cls.pnmfile = os.path.join(cls.pnmdir, 'pnmfile') cls.pnmcut = os.path.join(cls.pnmdir, 'pnmcut') cls.pnmscale = os.path.join(cls.pnmdir, 'pnmscale') cls.pnmrotate = os.path.join(cls.pnmdir, 'pnmrotate') cls.pnmflip = os.path.join(cls.pnmdir, 'pnmflip') cls.pnmtopng = os.path.join(cls.pnmdir, 'pnmtopng') cls.ppmtopgm = os.path.join(cls.pnmdir, 'ppmtopgm') cls.pnmtotiff = os.path.join(cls.pnmdir, 'pnmtotiff') cls.pnmtojpeg = os.path.join(cls.pnmdir, 'pnmtojpeg') cls.pamditherbw = os.path.join(cls.pnmdir, 'pamditherbw') # Need djatoka to get jp2 output cls.djatoka_comp = '/Users/simeon/packages/adore-djatoka-1.1/bin/compress.sh'
python
{ "resource": "" }
q7312
IIIFManipulatorNetpbm.do_first
train
def do_first(self): """Create PNM file from input image file.""" pid = os.getpid() self.basename = os.path.join(self.tmpdir, 'iiif_netpbm_' + str(pid)) outfile = self.basename + '.pnm' # Convert source file to pnm filetype = self.file_type(self.srcfile) if (filetype == 'png'): if (self.shell_call(self.pngtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from pngtopnm.") elif (filetype == 'jpg'): if (self.shell_call(self.jpegtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from jpegtopnm.") else: raise IIIFError(code='501', text='bad input file format (only know how to read png/jpeg)') self.tmpfile = outfile # Get size (self.width, self.height) = self.image_size(self.tmpfile)
python
{ "resource": "" }
q7313
IIIFManipulatorNetpbm.file_type
train
def file_type(self, file): """Use python-magic to determine file type. Returns 'png' or 'jpg' on success, nothing on failure. """ try: magic_text = magic.from_file(file) if (isinstance(magic_text, bytes)): # In python2 and travis python3 (?!) decode to get unicode string magic_text = magic_text.decode('utf-8') except (TypeError, IOError): return if (re.search('PNG image data', magic_text)): return('png') elif (re.search('JPEG image data', magic_text)): return('jpg') # failed return
python
{ "resource": "" }
q7314
IIIFManipulatorNetpbm.image_size
train
def image_size(self, pnmfile): """Get width and height of pnm file. simeon@homebox src>pnmfile /tmp/214-2.png /tmp/214-2.png:PPM raw, 100 by 100 maxval 255 """ pout = os.popen(self.shellsetup + self.pnmfile + ' ' + pnmfile, 'r') pnmfileout = pout.read(200) pout.close() m = re.search(', (\d+) by (\d+) ', pnmfileout) if (m is None): raise IIIFError( text="Bad output from pnmfile when trying to get size.") w = int(m.group(1)) h = int(m.group(2)) # print "pnmfile output = %s" % (pnmfileout) # print "image size = %d,%d" % (w,h) return(w, h)
python
{ "resource": "" }
q7315
IIIFManipulatorNetpbm.shell_call
train
def shell_call(self, shellcmd): """Shell call with necessary setup first.""" return(subprocess.call(self.shellsetup + shellcmd, shell=True))
python
{ "resource": "" }
q7316
IIIFManipulatorNetpbm.cleanup
train
def cleanup(self): """Clean up any temporary files.""" for file in glob.glob(self.basename + '*'): os.unlink(file)
python
{ "resource": "" }
q7317
IIIFError.image_server_response
train
def image_server_response(self, api_version=None): """Response, code and headers for image server error response. api_version selects the format (XML of 1.0). The return value is a tuple of response - body of HTTP response status - the HTTP status code headers - a dict of HTTP headers which will include the Content-Type As a side effect the routine sets self.content_type to the correct media type for the response. """ headers = dict(self.headers) if (api_version < '1.1'): headers['Content-Type'] = 'text/xml' response = self.as_xml() else: headers['Content-Type'] = 'text/plain' response = self.as_txt() return(response, self.code, headers)
python
{ "resource": "" }
q7318
IIIFError.as_xml
train
def as_xml(self): """XML representation of the error to be used in HTTP response. This XML format follows the IIIF Image API v1.0 specification, see <http://iiif.io/api/image/1.0/#error> """ # Build tree spacing = ("\n" if (self.pretty_xml) else "") root = Element('error', {'xmlns': I3F_NS}) root.text = spacing e_parameter = Element('parameter', {}) e_parameter.text = self.parameter e_parameter.tail = spacing root.append(e_parameter) if (self.text): e_text = Element('text', {}) e_text.text = self.text e_text.tail = spacing root.append(e_text) # Write out as XML document to return tree = ElementTree(root) xml_buf = io.BytesIO() if (sys.version_info < (2, 7)): tree.write(xml_buf, encoding='UTF-8') else: tree.write(xml_buf, encoding='UTF-8', xml_declaration=True, method='xml') return(xml_buf.getvalue().decode('utf-8'))
python
{ "resource": "" }
q7319
IIIFError.as_txt
train
def as_txt(self): """Text rendering of error response. Designed for use with Image API version 1.1 and above where the error response is suggested to be text or html but not otherwise specified. Intended to provide useful information for debugging. """ s = "IIIF Image Server Error\n\n" s += self.text if (self.text) else 'UNKNOWN_ERROR' s += "\n\n" if (self.parameter): s += "parameter=%s\n" % self.parameter if (self.code): s += "code=%d\n\n" % self.code for header in sorted(self.headers): s += "header %s=%s\n" % (header, self.headers[header]) return s
python
{ "resource": "" }
q7320
IIIFAuthGoogle.login_handler
train
def login_handler(self, config=None, prefix=None, **args): """OAuth starts here, redirect user to Google.""" params = { 'response_type': 'code', 'client_id': self.google_api_client_id, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'scope': self.google_api_scope, 'state': self.request_args_get('next', default=''), } url = self.google_oauth2_url + 'auth?' + urlencode(params) return self.login_handler_redirect(url)
python
{ "resource": "" }
q7321
IIIFAuthGoogle.google_get_token
train
def google_get_token(self, config, prefix): """Make request to Google API to get token.""" params = { 'code': self.request_args_get( 'code', default=''), 'client_id': self.google_api_client_id, 'client_secret': self.google_api_client_secret, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'grant_type': 'authorization_code', } payload = urlencode(params).encode('utf-8') url = self.google_oauth2_url + 'token' req = Request(url, payload) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
python
{ "resource": "" }
q7322
IIIFAuthGoogle.google_get_data
train
def google_get_data(self, config, response): """Make request to Google API to get profile data for the user.""" params = { 'access_token': response['access_token'], } payload = urlencode(params) url = self.google_api_url + 'userinfo?' + payload req = Request(url) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
python
{ "resource": "" }
q7323
IIIFManipulator.compliance_uri
train
def compliance_uri(self): """Compliance URI based on api_version. Value is based on api_version and complicance_level, will be None if either are unset/unrecognized. The assumption here is that the api_version and level are orthogonal, override this method if that isn't true. """ if (self.api_version == '1.0'): uri_pattern = r'http://library.stanford.edu/iiif/image-api/compliance.html#level%d' elif (self.api_version == '1.1'): uri_pattern = r'http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level%d' elif (self.api_version == '2.0' or self.api_version == '2.1'): uri_pattern = r'http://iiif.io/api/image/2/level%d.json' else: return if (self.compliance_level is None): return return(uri_pattern % self.compliance_level)
python
{ "resource": "" }
q7324
IIIFManipulator.derive
train
def derive(self, srcfile=None, request=None, outfile=None): """Do sequence of manipulations for IIIF to derive output image. Named argments: srcfile -- source image file request -- IIIFRequest object with parsed parameters outfile -- output image file. If set the the output file will be written to that file, otherwise a new temporary file will be created and outfile set to its location. See order in spec: http://www-sul.stanford.edu/iiif/image-api/#order Region THEN Size THEN Rotation THEN Quality THEN Format Typical use: r = IIIFRequest(region=...) m = IIIFManipulator() try: m.derive(srcfile='a.jpg',request=r) # .. serve m.outfile except IIIFError as e: # .. finally: m.cleanup() #removes temp m.outfile """ # set if specified if (srcfile is not None): self.srcfile = srcfile if (request is not None): self.request = request if (outfile is not None): self.outfile = outfile if (self.outfile is not None): # create path to output dir if necessary dir = os.path.dirname(self.outfile) if (not os.path.exists(dir)): os.makedirs(dir) # self.do_first() (x, y, w, h) = self.region_to_apply() self.do_region(x, y, w, h) (w, h) = self.size_to_apply() self.do_size(w, h) (mirror, rot) = self.rotation_to_apply(no_mirror=True) self.do_rotation(mirror, rot) (quality) = self.quality_to_apply() self.do_quality(quality) self.do_format(self.request.format) self.do_last() return(self.outfile, self.mime_type)
python
{ "resource": "" }
q7325
IIIFManipulator.do_region
train
def do_region(self, x, y, w, h): """Null implementation of region selection.""" if (x is not None): raise IIIFError(code=501, parameter="region", text="Null manipulator supports only region=/full/.")
python
{ "resource": "" }
q7326
IIIFManipulator.do_quality
train
def do_quality(self, quality): """Null implementation of quality.""" if (self.api_version >= '2.0'): if (quality != "default"): raise IIIFError(code=501, parameter="default", text="Null manipulator supports only quality=default.") else: # versions 1.0 and 1.1 if (quality != "native"): raise IIIFError(code=501, parameter="native", text="Null manipulator supports only quality=native.")
python
{ "resource": "" }
q7327
IIIFManipulator.do_format
train
def do_format(self, format): """Null implementation of format selection. This is the last step, this null implementation does not accept any specification of a format because we don't even know what the input format is. """ if (format is not None): raise IIIFError(code=415, parameter="format", text="Null manipulator does not support specification of output format.") # if (self.outfile is None): self.outfile = self.srcfile else: try: shutil.copyfile(self.srcfile, self.outfile) except IOError as e: raise IIIFError(code=500, text="Failed to copy file (%s)." % (str(e))) self.mime_type = None
python
{ "resource": "" }
q7328
IIIFManipulator.region_to_apply
train
def region_to_apply(self): """Return the x,y,w,h parameters to extract given image width and height. Assume image width and height are available in self.width and self.height, and self.request is IIIFRequest object Expected use: (x,y,w,h) = self.region_to_apply() if (x is None): # full image else: # extract Returns (None,None,None,None) if no extraction is required. """ if (self.request.region_full or (self.request.region_pct and self.request.region_xywh == (0, 0, 100, 100))): return(None, None, None, None) # Cannot do anything else unless we know size (in self.width and # self.height) if (self.width <= 0 or self.height <= 0): raise IIIFError(code=501, parameter='region', text="Region parameters require knowledge of image size which is not implemented.") if (self.request.region_square): if (self.width <= self.height): y_offset = (self.height - self.width) / 2 return(0, y_offset, self.width, self.width) else: # self.width>self.height x_offset = (self.width - self.height) / 2 return(x_offset, 0, self.height, self.height) # pct or explicit pixel sizes pct = self.request.region_pct (x, y, w, h) = self.request.region_xywh # Convert pct to pixels based on actual size if (pct): x = int((x / 100.0) * self.width + 0.5) y = int((y / 100.0) * self.height + 0.5) w = int((w / 100.0) * self.width + 0.5) h = int((h / 100.0) * self.height + 0.5) # Check if boundary extends beyond image and truncate if ((x + w) > self.width): w = self.width - x if ((y + h) > self.height): h = self.height - y # Final check to see if we have the whole image if (w == 0 or h == 0): raise IIIFZeroSizeError(code=400, parameter='region', text="Region parameters would result in zero size result image.") if (x == 0 and y == 0 and w == self.width and h == self.height): return(None, None, None, None) return(x, y, w, h)
python
{ "resource": "" }
q7329
IIIFManipulator.size_to_apply
train
def size_to_apply(self): """Calculate size of image scaled using size parameters. Assumes current image width and height are available in self.width and self.height, and self.request is IIIFRequest object. Formats are: w, ,h w,h pct:p !w,h full max Returns (None,None) if no scaling is required. If max is requested and neither max_area or max_width are specified then this is the same as full. Otherwise the limits are used to determine the size. """ if (self.request.size_full or self.request.size_pct == 100.0): # full size return(None, None) # Not trivially full size, look at possibilities in turn w = self.width h = self.height if (self.request.size_max): # use size limits if present, else full if (self.max_area and self.max_area < (w * h)): scale = (float(self.max_area) / float(w * h)) ** 0.5 w = int(w * scale + 0.5) h = int(h * scale + 0.5) if (self.max_width): max_height = self.max_height if self.max_height is not None else self.max_width if (self.max_width < w): # calculate wrt original width, height rather than # w, h to avoid compounding rounding issues scale = float(self.max_width) / float(self.width) w = int(self.width * scale + 0.5) h = int(self.height * scale + 0.5) if (max_height < h): scale = float(max_height) / float(self.height) w = int(self.width * scale + 0.5) h = int(self.height * scale + 0.5) elif (self.request.size_pct is not None): w = int(self.width * self.request.size_pct / 100.0 + 0.5) h = int(self.height * self.request.size_pct / 100.0 + 0.5) elif (self.request.size_bang): # Have "!w,h" form (mw, mh) = self.request.size_wh # Pick smaller fraction and then work from that... frac = min((float(mw) / float(self.width)), (float(mh) / float(self.height))) w = int(self.width * frac + 0.5) h = int(self.height * frac + 0.5) else: # Must now be "w,h", "w," or ",h". If both are specified then this will the size, # otherwise find other to keep aspect ratio (w, h) = self.request.size_wh if (w is None): w = int(self.width * h / self.height + 0.5) elif (h is None): h = int(self.height * w / self.width + 0.5) # Now have w,h, sanity check and return if (w == 0 or h == 0): raise IIIFZeroSizeError( code=400, parameter='size', text="Size parameter would result in zero size result image (%d,%d)." % (w, h)) # Below would be test for scaling up image size, this is allowed by spec # if ( w>self.width or h>self.height ): # raise IIIFError(code=400,parameter='size', # text="Size requests scaling up image to larger than orginal.") if (w == self.width and h == self.height): return(None, None) return(w, h)
python
{ "resource": "" }
q7330
IIIFManipulator.quality_to_apply
train
def quality_to_apply(self): """Value of quality parameter to use in processing request. Simple substitution of 'native' or 'default' if no quality parameter is specified. """ if (self.request.quality is None): if (self.api_version <= '1.1'): return('native') else: return('default') return(self.request.quality)
python
{ "resource": "" }
q7331
IIIFManipulator.scale_factors
train
def scale_factors(self, tile_width, tile_height=None): """Return a set of scale factors for given tile and window size. Gives a set of scale factors, starting at 1, and in multiples of 2. Largest scale_factor is so that one tile will cover the entire image (self.width,self.height). If tile_height is not specified then tiles are assumed to be squares of tile_width pixels. """ if (not tile_height): tile_height = tile_width sf = 1 scale_factors = [sf] for j in range(30): # limit of 2^30, should be enough! sf = 2 * sf if (tile_width * sf > self.width and tile_height * sf > self.height): break scale_factors.append(sf) return scale_factors
python
{ "resource": "" }
q7332
PixelGen.color
train
def color(self, n): """Color of pixel that reached limit after n iterations. Returns a color tuple for use with PIL, tending toward red as we tend toward self.max_iter iterations. """ red = int(n * self.shade_factor) if (red > 255): red = 255 return (red, 50, 100)
python
{ "resource": "" }
q7333
PixelGen.mpixel
train
def mpixel(self, z, n=0): """Iteration in Mandlebrot coordinate z.""" z = z * z + self.c if (abs(z) > 2.0): return self.color(n) n += 1 if (n > self.max_iter): return None return self.mpixel(z, n)
python
{ "resource": "" }
q7334
static_partial_tile_sizes
train
def static_partial_tile_sizes(width, height, tilesize, scale_factors): """Generator for partial tile sizes for zoomed in views. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles scale_factors -- iterable of scale factors, typically [1,2,4..] Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile """ for sf in scale_factors: if (sf * tilesize >= width and sf * tilesize >= height): continue # avoid any full-region tiles rts = tilesize * sf # tile size in original region xt = (width - 1) // rts + 1 yt = (height - 1) // rts + 1 for nx in range(xt): rx = nx * rts rxe = rx + rts if (rxe > width): rxe = width rw = rxe - rx # same as sw = int(math.ceil(rw/float(sf))) sw = (rw + sf - 1) // sf for ny in range(yt): ry = ny * rts rye = ry + rts if (rye > height): rye = height rh = rye - ry # same as sh = int(math.ceil(rh/float(sf))) sh = (rh + sf - 1) // sf yield([rx, ry, rw, rh], [sw, sh])
python
{ "resource": "" }
q7335
static_full_sizes
train
def static_full_sizes(width, height, tilesize): """Generator for scaled-down full image sizes. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles Yields [sw,sh], the size for each full-region tile that is less than the tilesize. This includes tiles up to the full image size if that is smaller than the tilesize. """ # FIXME - Not sure what correct algorithm is for this, from # observation of Openseadragon it seems that one keeps halving # the pixel size of the full image until until both width and # height are less than the tile size. After that all subsequent # halving of the image size are used, all the way down to 1,1. # It seems that without these reduced size full-region images, # OpenSeadragon will not display any unzoomed image in small windows. # # I do not understand the algorithm that OpenSeadragon uses (or # know where it is in the code) to decide how small a version of # the complete image to request. It seems that there is a bug in # OpenSeadragon here because in some cases it requests images # of size 1,1 multiple times, which is anyway a useless image. for level in range(0, 20): factor = 2.0**level sw = int(width / factor + 0.5) sh = int(height / factor + 0.5) if (sw < tilesize and sh < tilesize): if (sw < 1 or sh < 1): break yield([sw, sh])
python
{ "resource": "" }
q7336
IIIFStatic.parse_extra
train
def parse_extra(self, extra): """Parse extra request parameters to IIIFRequest object.""" if extra.startswith('/'): extra = extra[1:] r = IIIFRequest(identifier='dummy', api_version=self.api_version) r.parse_url(extra) if (r.info): raise IIIFStaticError("Attempt to specify Image Information in extras.") return(r)
python
{ "resource": "" }
q7337
IIIFStatic.get_osd_config
train
def get_osd_config(self, osd_version): """Select appropriate portion of config. If the version requested is not supported the raise an exception with a helpful error message listing the versions supported. """ if (osd_version in self.osd_config): return(self.osd_config[osd_version]) else: raise IIIFStaticError("OpenSeadragon version %s not supported, available versions are %s" % (osd_version, ', '.join(sorted(self.osd_config.keys()))))
python
{ "resource": "" }
q7338
IIIFStatic.generate
train
def generate(self, src=None, identifier=None): """Generate static files for one source image.""" self.src = src self.identifier = identifier # Get image details and calculate tiles im = self.manipulator_klass() im.srcfile = self.src im.set_max_image_pixels(self.max_image_pixels) im.do_first() width = im.width height = im.height scale_factors = im.scale_factors(self.tilesize) # Setup destination and IIIF identifier self.setup_destination() # Write out images for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors): self.generate_tile(region, size) sizes = [] for size in static_full_sizes(width, height, self.tilesize): # See https://github.com/zimeon/iiif/issues/9 sizes.append({'width': size[0], 'height': size[1]}) self.generate_tile('full', size) for request in self.extras: request.identifier = self.identifier if (request.is_scaled_full_image()): sizes.append({'width': request.size_wh[0], 'height': request.size_wh[1]}) self.generate_file(request) # Write info.json qualities = ['default'] if (self.api_version > '1.1') else ['native'] info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier, width=width, height=height, scale_factors=scale_factors, tile_width=self.tilesize, tile_height=self.tilesize, formats=['jpg'], qualities=qualities, sizes=sizes, api_version=self.api_version) json_file = os.path.join(self.dst, self.identifier, 'info.json') if (self.dryrun): self.logger.warning( "dryrun mode, would write the following files:") self.logger.warning("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) else: with open(json_file, 'w') as f: f.write(info.as_json()) f.close() self.logger.info("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) self.logger.debug("Written %s" % (json_file))
python
{ "resource": "" }
q7339
IIIFStatic.generate_tile
train
def generate_tile(self, region, size): """Generate one tile for this given region, size of this image.""" r = IIIFRequest(identifier=self.identifier, api_version=self.api_version) if (region == 'full'): r.region_full = True else: r.region_xywh = region # [rx,ry,rw,rh] r.size_wh = size # [sw,sh] r.format = 'jpg' self.generate_file(r, True)
python
{ "resource": "" }
q7340
IIIFStatic.generate_file
train
def generate_file(self, r, undistorted=False): """Generate file for IIIFRequest object r from this image. FIXME - Would be nicer to have the test for an undistorted image request based on the IIIFRequest object, and then know whether to apply canonicalization or not. Logically we might use `w,h` instead of the Image API v2.0 canonical form `w,` if the api_version is 1.x. However, OSD 1.2.1 and 2.x assume the new canonical form even in the case where the API version is declared earlier. Thus, determine whether to use the canonical or `w,h` form based solely on the setting of osd_version. """ use_canonical = self.get_osd_config(self.osd_version)['use_canonical'] height = None if (undistorted and use_canonical): height = r.size_wh[1] r.size_wh = [r.size_wh[0], None] # [sw,sh] -> [sw,] path = r.url() # Generate... if (self.dryrun): self.logger.info("%s / %s" % (self.dst, path)) else: m = self.manipulator_klass(api_version=self.api_version) try: m.derive(srcfile=self.src, request=r, outfile=os.path.join(self.dst, path)) self.logger.info("%s / %s" % (self.dst, path)) except IIIFZeroSizeError: self.logger.info("%s / %s - zero size, skipped" % (self.dst, path)) return # done if zero size if (r.region_full and use_canonical and height is not None): # In v2.0 of the spec, the canonical URI form `w,` for scaled # images of the full region was introduced. This is somewhat at # odds with the requirement for `w,h` specified in `sizes` to # be available, and has problems of precision with tall narrow # images. Hopefully will be fixed in 3.0 but for now symlink # the `w,h` form to the `w,` dirs so that might use the specified # `w,h` also work. See # <https://github.com/IIIF/iiif.io/issues/544> # # FIXME - This is ugly because we duplicate code in # iiif.request.url to construct the partial URL region_dir = os.path.join(r.quote(r.identifier), "full") wh_dir = "%d,%d" % (r.size_wh[0], height) wh_path = os.path.join(region_dir, wh_dir) wc_dir = "%d," % (r.size_wh[0]) wc_path = os.path.join(region_dir, wc_dir) if (not self.dryrun): ln = os.path.join(self.dst, wh_path) if (os.path.exists(ln)): os.remove(ln) os.symlink(wc_dir, ln) self.logger.info("%s / %s -> %s" % (self.dst, wh_path, wc_path))
python
{ "resource": "" }
q7341
IIIFStatic.setup_destination
train
def setup_destination(self): """Setup output directory based on self.dst and self.identifier. Returns the output directory name on success, raises and exception on failure. """ # Do we have a separate identifier? if (not self.identifier): # No separate identifier specified, split off the last path segment # of the source name, strip the extension to get the identifier self.identifier = os.path.splitext(os.path.split(self.src)[1])[0] # Done if dryrun, else setup self.dst first if (self.dryrun): return if (not self.dst): raise IIIFStaticError("No destination directory specified!") dst = self.dst if (os.path.isdir(dst)): # Exists, OK pass elif (os.path.isfile(dst)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % dst) else: os.makedirs(dst) # Second, create identifier based subdir if necessary outd = os.path.join(dst, self.identifier) if (os.path.isdir(outd)): # Nothing for now, perhaps should delete? self.logger.warning( "Output directory %s already exists, adding/updating files" % outd) pass elif (os.path.isfile(outd)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % outd) else: os.makedirs(outd) self.logger.debug("Output directory %s" % outd)
python
{ "resource": "" }
q7342
IIIFStatic.write_html
train
def write_html(self, html_dir='/tmp', include_osd=False, osd_width=500, osd_height=500): """Write HTML test page using OpenSeadragon for the tiles generated. Assumes that the generate(..) method has already been called to set up identifier etc. Parameters: html_dir - output directory for HTML files, will be created if it does not already exist include_osd - true to include OpenSeadragon code osd_width - width of OpenSeadragon pane in pixels osd_height - height of OpenSeadragon pane in pixels """ osd_config = self.get_osd_config(self.osd_version) osd_base = osd_config['base'] osd_dir = osd_config['dir'] # relative to base osd_js = os.path.join(osd_dir, osd_config['js']) osd_images = os.path.join(osd_dir, osd_config['images']) if (os.path.isdir(html_dir)): # Exists, fine pass elif (os.path.isfile(html_dir)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % html_dir) else: os.makedirs(html_dir) self.logger.info("Writing HTML to %s" % (html_dir)) with open(os.path.join(self.template_dir, 'static_osd.html'), 'r') as f: template = f.read() outfile = self.identifier + '.html' outpath = os.path.join(html_dir, outfile) with open(outpath, 'w') as f: info_json_uri = '/'.join([self.identifier, 'info.json']) if (self.prefix): info_json_uri = '/'.join([self.prefix, info_json_uri]) d = dict(identifier=self.identifier, api_version=self.api_version, osd_version=self.osd_version, osd_uri=osd_js, osd_images_prefix=osd_images, osd_height=osd_width, osd_width=osd_height, info_json_uri=info_json_uri) f.write(Template(template).safe_substitute(d)) self.logger.info("%s / %s" % (html_dir, outfile)) # Do we want to copy OSD in there too? If so, do it only if # we haven't already if (include_osd): if (self.copied_osd): self.logger.info("OpenSeadragon already copied") else: # Make directory, copy JavaScript and icons (from osd_images) osd_path = os.path.join(html_dir, osd_dir) if (not os.path.isdir(osd_path)): os.makedirs(osd_path) shutil.copyfile(os.path.join(osd_base, osd_js), os.path.join(html_dir, osd_js)) self.logger.info("%s / %s" % (html_dir, osd_js)) osd_images_path = os.path.join(html_dir, osd_images) if (os.path.isdir(osd_images_path)): self.logger.warning( "OpenSeadragon images directory (%s) already exists, skipping" % osd_images_path) else: shutil.copytree(os.path.join(osd_base, osd_images), osd_images_path) self.logger.info("%s / %s/*" % (html_dir, osd_images)) self.copied_osd = True
python
{ "resource": "" }
q7343
get_value
train
def get_value(key, obj, default=missing): """Helper for pulling a keyed value off various types of objects""" if isinstance(key, int): return _get_value_for_key(key, obj, default) return _get_value_for_keys(key.split('.'), obj, default)
python
{ "resource": "" }
q7344
validate_headers
train
def validate_headers(spec, data): """Validates headers data and creates the config objects""" validated_data = { spec.VERSION: data[spec.VERSION], spec.KIND: data[spec.KIND], } if data.get(spec.LOGGING): validated_data[spec.LOGGING] = LoggingConfig.from_dict( data[spec.LOGGING]) if data.get(spec.TAGS): validated_data[spec.TAGS] = data[spec.TAGS] if data.get(spec.HP_TUNING): validated_data[spec.HP_TUNING] = HPTuningConfig.from_dict( data[spec.HP_TUNING]) return validated_data
python
{ "resource": "" }
q7345
validate
train
def validate(spec, data): """Validates the data and creates the config objects""" data = copy.deepcopy(data) validated_data = {} def validate_keys(section, config, section_data): if not isinstance(section_data, dict) or section == spec.MODEL: return extra_args = [key for key in section_data.keys() if key not in config.SCHEMA().fields] if extra_args: raise PolyaxonfileError('Extra arguments passed for `{}`: {}'.format( section, extra_args)) def add_validated_section(section, config): if data.get(section): section_data = data[section] validate_keys(section=section, config=config, section_data=section_data) validated_data[section] = config.from_dict(section_data) add_validated_section(spec.ENVIRONMENT, spec.ENVIRONMENT_CONFIG) add_validated_section(spec.BUILD, BuildConfig) add_validated_section(spec.RUN, RunConfig) add_validated_section(spec.MODEL, ModelConfig) add_validated_section(spec.TRAIN, TrainConfig) add_validated_section(spec.EVAL, EvalConfig) return validated_data
python
{ "resource": "" }
q7346
ExperimentSchema.validate_replicas
train
def validate_replicas(self, data): """Validate distributed experiment""" environment = data.get('environment') if environment and environment.replicas: validate_replicas(data.get('framework'), environment.replicas)
python
{ "resource": "" }
q7347
GroupSpecification.get_experiment_spec
train
def get_experiment_spec(self, matrix_declaration): """Returns an experiment spec for this group spec and the given matrix declaration.""" parsed_data = Parser.parse(self, self._data, matrix_declaration) del parsed_data[self.HP_TUNING] validator.validate(spec=self, data=parsed_data) return ExperimentSpecification(values=[parsed_data, {'kind': self._EXPERIMENT}])
python
{ "resource": "" }
q7348
GroupSpecification.get_build_spec
train
def get_build_spec(self): """Returns a build spec for this group spec.""" if BaseSpecification.BUILD not in self._data: return None return BuildConfig.from_dict(self._data[BaseSpecification.BUILD])
python
{ "resource": "" }
q7349
HPTuningSchema.validate_matrix
train
def validate_matrix(self, data): """Validates matrix data and creates the config objects""" is_grid_search = ( data.get('grid_search') is not None or (data.get('grid_search') is None and data.get('random_search') is None and data.get('hyperband') is None and data.get('bo') is None) ) is_bo = data.get('bo') is not None validate_matrix(data.get('matrix'), is_grid_search=is_grid_search, is_bo=is_bo)
python
{ "resource": "" }
q7350
TableOne._generate_remark_str
train
def _generate_remark_str(self, end_of_line = '\n'): """ Generate a series of remarks that the user should consider when interpreting the summary statistics. """ warnings = {} msg = '{}'.format(end_of_line) # generate warnings for continuous variables if self._continuous: # highlight far outliers outlier_mask = self.cont_describe.far_outliers > 1 outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index) if outlier_vars: warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars # highlight possible multimodal distributions using hartigan's dip test # -1 values indicate NaN modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05) modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index) if modal_vars: warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars # highlight non normal distributions # -1 values indicate NaN modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001) modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index) if modal_vars: warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars # create the warning string for n,k in enumerate(sorted(warnings)): msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line) return msg
python
{ "resource": "" }
q7351
TableOne._detect_categorical_columns
train
def _detect_categorical_columns(self,data): """ Detect categorical columns if they are not specified. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- likely_cat : list List of variables that appear to be categorical. """ # assume all non-numerical and date columns are categorical numeric_cols = set(data._get_numeric_data().columns.values) date_cols = set(data.select_dtypes(include=[np.datetime64]).columns) likely_cat = set(data.columns) - numeric_cols likely_cat = list(likely_cat - date_cols) # check proportion of unique values if numerical for var in data._get_numeric_data().columns: likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05 if likely_flag: likely_cat.append(var) return likely_cat
python
{ "resource": "" }
q7352
TableOne._std
train
def _std(self,x): """ Compute standard deviation with ddof degrees of freedom """ return np.nanstd(x.values,ddof=self._ddof)
python
{ "resource": "" }
q7353
TableOne._tukey
train
def _tukey(self,x,threshold): """ Count outliers according to Tukey's rule. Where Q1 is the lower quartile and Q3 is the upper quartile, an outlier is an observation outside of the range: [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)] k = 1.5 indicates an outlier k = 3.0 indicates an outlier that is "far out" """ vals = x.values[~np.isnan(x.values)] try: q1, q3 = np.percentile(vals, [25, 75]) iqr = q3 - q1 low_bound = q1 - (iqr * threshold) high_bound = q3 + (iqr * threshold) outliers = np.where((vals > high_bound) | (vals < low_bound)) except: outliers = [] return outliers
python
{ "resource": "" }
q7354
TableOne._outliers
train
def _outliers(self,x): """ Compute number of outliers """ outliers = self._tukey(x, threshold = 1.5) return np.size(outliers)
python
{ "resource": "" }
q7355
TableOne._far_outliers
train
def _far_outliers(self,x): """ Compute number of "far out" outliers """ outliers = self._tukey(x, threshold = 3.0) return np.size(outliers)
python
{ "resource": "" }
q7356
TableOne._create_cat_describe
train
def _create_cat_describe(self,data): """ Describe the categorical data. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df_cat : pandas DataFrame Summarise the categorical variables. """ group_dict = {} for g in self._groupbylvls: if self._groupby: d_slice = data.loc[data[self._groupby] == g, self._categorical] else: d_slice = data[self._categorical].copy() # create a dataframe with freq, proportion df = d_slice.copy() # convert type to string to avoid int converted to boolean, avoiding nans for column in df.columns: df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values] df = df.melt().groupby(['variable','value']).size().to_frame(name='freq') df.index.set_names('level', level=1, inplace=True) df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100 # set number of decimal places for percent if isinstance(self._decimals,int): n = self._decimals f = '{{:.{}f}}'.format(n) df['percent'] = df['percent'].astype(float).map(f.format) elif isinstance(self._decimals,dict): df.loc[:,'percent'] = df.apply(self._format_cat, axis=1) else: n = 1 f = '{{:.{}f}}'.format(n) df['percent'] = df['percent'].astype(float).map(f.format) # add n column, listing total non-null values for each variable ct = d_slice.count().to_frame(name='n') ct.index.name = 'variable' df = df.join(ct) # add null count nulls = d_slice.isnull().sum().to_frame(name='isnull') nulls.index.name = 'variable' # only save null count to the first category for each variable # do this by extracting the first category from the df row index levels = df.reset_index()[['variable','level']].groupby('variable').first() # add this category to the nulls table nulls = nulls.join(levels) nulls.set_index('level', append=True, inplace=True) # join nulls to categorical df = df.join(nulls) # add summary column df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')' # add to dictionary group_dict[g] = df df_cat = pd.concat(group_dict,axis=1) # ensure the groups are the 2nd level of the column index if df_cat.columns.nlevels>1: df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0) return df_cat
python
{ "resource": "" }
q7357
TableOne._create_significance_table
train
def _create_significance_table(self,data): """ Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc. """ # list features of the variable e.g. matched, paired, n_expected df=pd.DataFrame(index=self._continuous+self._categorical, columns=['continuous','nonnormal','min_observed','pval','ptest']) df.index.rename('variable', inplace=True) df['continuous'] = np.where(df.index.isin(self._continuous),True,False) df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False) # list values for each variable, grouped by groupby levels for v in df.index: is_continuous = df.loc[v]['continuous'] is_categorical = ~df.loc[v]['continuous'] is_normal = ~df.loc[v]['nonnormal'] # if continuous, group data into list of lists if is_continuous: catlevels = None grouped_data = [] for s in self._groupbylvls: lvl_data = data.loc[data[self._groupby]==s, v] # coerce to numeric and drop non-numeric data lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna() # append to overall group data grouped_data.append(lvl_data.values) min_observed = len(min(grouped_data,key=len)) # if categorical, create contingency table elif is_categorical: catlevels = sorted(data[v].astype('category').cat.categories) grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v]) min_observed = grouped_data.sum(axis=1).min() # minimum number of observations across all levels df.loc[v,'min_observed'] = min_observed # compute pvalues df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v, grouped_data,is_continuous,is_categorical, is_normal,min_observed,catlevels) return df
python
{ "resource": "" }
q7358
TableOne._create_cont_table
train
def _create_cont_table(self,data): """ Create tableone for continuous data. Returns ---------- table : pandas DataFrame A table summarising the continuous variables. """ # remove the t1_summary level table = self.cont_describe[['t1_summary']].copy() table.columns = table.columns.droplevel(level=0) # add a column of null counts as 1-count() from previous function nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull') try: table = table.join(nulltable) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(nulltable) # add an empty level column, for joining with cat table table['level'] = '' table.set_index([table.index,'level'],inplace=True) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
python
{ "resource": "" }
q7359
TableOne._create_cat_table
train
def _create_cat_table(self,data): """ Create table one for categorical data. Returns ---------- table : pandas DataFrame A table summarising the categorical variables. """ table = self.cat_describe['t1_summary'].copy() # add the total count of null values across all levels isnull = data[self._categorical].isnull().sum().to_frame(name='isnull') isnull.index.rename('variable', inplace=True) try: table = table.join(isnull) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(isnull) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
python
{ "resource": "" }
q7360
TableOne._create_row_labels
train
def _create_row_labels(self): """ Take the original labels for rows. Rename if alternative labels are provided. Append label suffix if label_suffix is True. Returns ---------- labels : dictionary Dictionary, keys are original column name, values are final label. """ # start with the original column names labels = {} for c in self._columns: labels[c] = c # replace column names with alternative names if provided if self._alt_labels: for k in self._alt_labels.keys(): labels[k] = self._alt_labels[k] # append the label suffix if self._label_suffix: for k in labels.keys(): if k in self._nonnormal: labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]") elif k in self._categorical: labels[k] = "{}, {}".format(labels[k],"n (%)") else: labels[k] = "{}, {}".format(labels[k],"mean (SD)") return labels
python
{ "resource": "" }
q7361
bandwidth_factor
train
def bandwidth_factor(nbr_data_pts, deriv_order=0): ''' Scale factor for one-dimensional plug-in bandwidth selection. ''' if deriv_order == 0: return (3.0*nbr_data_pts/4)**(-1.0/5) if deriv_order == 2: return (7.0*nbr_data_pts/4)**(-1.0/9) raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
python
{ "resource": "" }
q7362
make_html_tag
train
def make_html_tag(tag, text=None, **params): """Create an HTML tag string. tag The HTML tag to use (e.g. 'a', 'span' or 'div') text The text to enclose between opening and closing tag. If no text is specified then only the opening tag is returned. Example:: make_html_tag('a', text="Hello", href="/another/page") -> <a href="/another/page">Hello</a> To use reserved Python keywords like "class" as a parameter prepend it with an underscore. Instead of "class='green'" use "_class='green'". Warning: Quotes and apostrophes are not escaped.""" params_string = "" # Parameters are passed. Turn the dict into a string like "a=1 b=2 c=3" string. for key, value in sorted(params.items()): # Strip off a leading underscore from the attribute's key to allow attributes like '_class' # to be used as a CSS class specification instead of the reserved Python keyword 'class'. key = key.lstrip("_") params_string += u' {0}="{1}"'.format(key, value) # Create the tag string tag_string = u"<{0}{1}>".format(tag, params_string) # Add text and closing tag if required. if text: tag_string += u"{0}</{1}>".format(text, tag) return tag_string
python
{ "resource": "" }
q7363
Page._range
train
def _range(self, link_map, radius): """ Return range of linked pages to substiture placeholder in pattern """ leftmost_page = max(self.first_page, (self.page - radius)) rightmost_page = min(self.last_page, (self.page + radius)) nav_items = [] # Create a link to the first page (unless we are on the first page # or there would be no need to insert '..' spacers) if self.page != self.first_page and self.first_page < leftmost_page: page = link_map["first_page"].copy() page["value"] = unicode(page["number"]) nav_items.append(self.link_tag(page)) for item in link_map["range_pages"]: nav_items.append(self.link_tag(item)) # Create a link to the very last page (unless we are on the last # page or there would be no need to insert '..' spacers) if self.page != self.last_page and rightmost_page < self.last_page: page = link_map["last_page"].copy() page["value"] = unicode(page["number"]) nav_items.append(self.link_tag(page)) return self.separator.join(nav_items)
python
{ "resource": "" }
q7364
Page.default_link_tag
train
def default_link_tag(item): """ Create an A-HREF tag that points to another page. """ text = item["value"] target_url = item["href"] if not item["href"] or item["type"] in ("span", "current_page"): if item["attrs"]: text = make_html_tag("span", **item["attrs"]) + text + "</span>" return text return make_html_tag("a", text=text, href=target_url, **item["attrs"])
python
{ "resource": "" }
q7365
PerceptronTagger.tag
train
def tag(self, corpus, tokenize=True): '''Tags a string `corpus`.''' # Assume untokenized corpus has \n between sentences and ' ' between words s_split = SentenceTokenizer().tokenize if tokenize else lambda t: t.split('\n') w_split = WordTokenizer().tokenize if tokenize else lambda s: s.split() def split_sents(corpus): for s in s_split(corpus): yield w_split(s) prev, prev2 = self.START tokens = [] for words in split_sents(corpus): context = self.START + [self._normalize(w) for w in words] + self.END for i, word in enumerate(words): tag = self.tagdict.get(word) if not tag: features = self._get_features(i, word, context, prev, prev2) tag = self.model.predict(features) tokens.append((word, tag)) prev2 = prev prev = tag return tokens
python
{ "resource": "" }
q7366
PerceptronTagger.train
train
def train(self, sentences, save_loc=None, nr_iter=5): '''Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` controls the number of Perceptron training iterations. :param sentences: A list of (words, tags) tuples. :param save_loc: If not ``None``, saves a pickled model in this location. :param nr_iter: Number of training iterations. ''' self._make_tagdict(sentences) self.model.classes = self.classes for iter_ in range(nr_iter): c = 0 n = 0 for words, tags in sentences: prev, prev2 = self.START context = self.START + [self._normalize(w) for w in words] \ + self.END for i, word in enumerate(words): guess = self.tagdict.get(word) if not guess: feats = self._get_features(i, word, context, prev, prev2) guess = self.model.predict(feats) self.model.update(tags[i], guess, feats) prev2 = prev prev = guess c += guess == tags[i] n += 1 random.shuffle(sentences) logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n))) self.model.average_weights() # Pickle as a binary file if save_loc is not None: pickle.dump((self.model.weights, self.tagdict, self.classes), open(save_loc, 'wb'), -1) return None
python
{ "resource": "" }
q7367
PerceptronTagger.load
train
def load(self, loc): '''Load a pickled model.''' try: w_td_c = pickle.load(open(loc, 'rb')) except IOError: msg = ("Missing trontagger.pickle file.") raise MissingCorpusError(msg) self.model.weights, self.tagdict, self.classes = w_td_c self.model.classes = self.classes return None
python
{ "resource": "" }
q7368
PerceptronTagger._normalize
train
def _normalize(self, word): '''Normalization used in pre-processing. - All words are lower cased - Digits in the range 1800-2100 are represented as !YEAR; - Other digits are represented as !DIGITS :rtype: str ''' if '-' in word and word[0] != '-': return '!HYPHEN' elif word.isdigit() and len(word) == 4: return '!YEAR' elif word[0].isdigit(): return '!DIGITS' else: return word.lower()
python
{ "resource": "" }
q7369
PerceptronTagger._make_tagdict
train
def _make_tagdict(self, sentences): '''Make a tag dictionary for single-tag words.''' counts = defaultdict(lambda: defaultdict(int)) for words, tags in sentences: for word, tag in zip(words, tags): counts[word][tag] += 1 self.classes.add(tag) freq_thresh = 20 ambiguity_thresh = 0.97 for word, tag_freqs in counts.items(): tag, mode = max(tag_freqs.items(), key=lambda item: item[1]) n = sum(tag_freqs.values()) # Don't add rare words to the tag dictionary # Only add quite unambiguous words if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh: self.tagdict[word] = tag
python
{ "resource": "" }
q7370
train
train
def train(nr_iter, examples): '''Return an averaged perceptron model trained on ``examples`` for ``nr_iter`` iterations. ''' model = AveragedPerceptron() for i in range(nr_iter): random.shuffle(examples) for features, class_ in examples: scores = model.predict(features) guess, score = max(scores.items(), key=lambda i: i[1]) if guess != class_: model.update(class_, guess, features) model.average_weights() return model
python
{ "resource": "" }
q7371
RtpPacket.decode
train
def decode(self, byteStream): """Decode the RTP packet.""" self.header = bytearray(byteStream[:HEADER_SIZE]) self.payload = byteStream[HEADER_SIZE:]
python
{ "resource": "" }
q7372
RtpPacket.timestamp
train
def timestamp(self): """Return timestamp.""" timestamp = self.header[4] << 24 | self.header[5] << 16 | self.header[6] << 8 | self.header[7] return int(timestamp)
python
{ "resource": "" }
q7373
preview_stream
train
def preview_stream(stream): """ Display stream in an OpenCV window until "q" key is pressed """ # together with waitkeys later, helps to close the video window effectively _cv2.startWindowThread() for frame in stream.frame_generator(): if frame is not None: _cv2.imshow('Video', frame) _cv2.moveWindow('Video',5,5) else: break key = _cv2.waitKey(1) & 0xFF if key == ord("q"): break _cv2.waitKey(1) _cv2.destroyAllWindows() _cv2.waitKey(1)
python
{ "resource": "" }
q7374
printrec
train
def printrec(recst): """ Pretty-printing rtsp strings """ try: recst = recst.decode('UTF-8') except AttributeError: pass recs=[ x for x in recst.split('\r\n') if x ] for rec in recs: print(rec) print("\n")
python
{ "resource": "" }
q7375
get_resources
train
def get_resources(connection): """ Do an RTSP-DESCRIBE request, then parse out available resources from the response """ resp = connection.describe(verbose=False).split('\r\n') resources = [x.replace('a=control:','') for x in resp if (x.find('control:') != -1 and x[-1] != '*' )] return resources
python
{ "resource": "" }
q7376
FFmpegClient.fetch_image
train
def fetch_image(self,rtsp_server_uri = _source,timeout_secs = 15): """ Fetch a single frame using FFMPEG. Convert to PIL Image. Slow. """ self._check_ffmpeg() cmd = "ffmpeg -rtsp_transport tcp -i {} -loglevel quiet -frames 1 -f image2pipe -".format(rtsp_server_uri) #stdout = _sp.check_output(ffmpeg_cmd,timeout = timeout_secs) with _sp.Popen(cmd, shell=True, stdout=_sp.PIPE) as process: try: stdout,stderr = process.communicate(timeout=timeout_secs) except _sp.TimeoutExpired as e: process.kill() raise TimeoutError("Connection to {} timed out".format(rtsp_server_uri),e) return _Image.open(_io.BytesIO(stdout))
python
{ "resource": "" }
q7377
Client.setupMovie
train
def setupMovie(self): """Setup button handler.""" if self.state == self.INIT: self.sendRtspRequest(self.SETUP)
python
{ "resource": "" }
q7378
Client.exitClient
train
def exitClient(self): """Teardown button handler.""" self.sendRtspRequest(self.TEARDOWN) #self.handler() os.remove(CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT) # Delete the cache image from video rate = float(self.counter/self.frameNbr) print('-'*60 + "\nRTP Packet Loss Rate :" + str(rate) +"\n" + '-'*60) sys.exit(0)
python
{ "resource": "" }
q7379
Client.pauseMovie
train
def pauseMovie(self): """Pause button handler.""" if self.state == self.PLAYING: self.sendRtspRequest(self.PAUSE)
python
{ "resource": "" }
q7380
Client.updateMovie
train
def updateMovie(self, imageFile): """Update the image file as video frame in the GUI.""" try: photo = ImageTk.PhotoImage(Image.open(imageFile)) #stuck here !!!!!! except: print("photo error") print('-'*60) traceback.print_exc(file=sys.stdout) print('-'*60) self.label.configure(image = photo, height=288) self.label.image = photo
python
{ "resource": "" }
q7381
Client.sendRtspRequest
train
def sendRtspRequest(self, requestCode): """Send RTSP request to the server.""" #------------- # TO COMPLETE #------------- # Setup request if requestCode == self.SETUP and self.state == self.INIT: threading.Thread(target=self.recvRtspReply).start() # Update RTSP sequence number. # ... self.rtspSeq = 1 # Write the RTSP request to be sent. # request = ... request = "SETUP " + str(self.fileName) + "\n" + str(self.rtspSeq) + "\n" + " RTSP/1.0 RTP/UDP " + str(self.rtpPort) self.rtspSocket.send(request) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.SETUP # Play request elif requestCode == self.PLAY and self.state == self.READY: # Update RTSP sequence number. # ... self.rtspSeq = self.rtspSeq + 1 # Write the RTSP request to be sent. # request = ... request = "PLAY " + "\n" + str(self.rtspSeq) self.rtspSocket.send(request) print('-'*60 + "\nPLAY request sent to Server...\n" + '-'*60) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.PLAY # Pause request elif requestCode == self.PAUSE and self.state == self.PLAYING: # Update RTSP sequence number. # ... self.rtspSeq = self.rtspSeq + 1 # Write the RTSP request to be sent. # request = ... request = "PAUSE " + "\n" + str(self.rtspSeq) self.rtspSocket.send(request) print('-'*60 + "\nPAUSE request sent to Server...\n" + '-'*60) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.PAUSE # Resume request # Teardown request elif requestCode == self.TEARDOWN and not self.state == self.INIT: # Update RTSP sequence number. # ... self.rtspSeq = self.rtspSeq + 1 # Write the RTSP request to be sent. # request = ... request = "TEARDOWN " + "\n" + str(self.rtspSeq) self.rtspSocket.send(request) print('-'*60 + "\nTEARDOWN request sent to Server...\n" + '-'*60) # Keep track of the sent request. # self.requestSent = ... self.requestSent = self.TEARDOWN else: return
python
{ "resource": "" }
q7382
Client.recvRtspReply
train
def recvRtspReply(self): """Receive RTSP reply from the server.""" while True: reply = self.rtspSocket.recv(1024) if reply: self.parseRtspReply(reply) # Close the RTSP socket upon requesting Teardown if self.requestSent == self.TEARDOWN: self.rtspSocket.shutdown(socket.SHUT_RDWR) self.rtspSocket.close() break
python
{ "resource": "" }
q7383
Client.parseRtspReply
train
def parseRtspReply(self, data): print("Parsing Received Rtsp data...") """Parse the RTSP reply from the server.""" lines = data.split('\n') seqNum = int(lines[1].split(' ')[1]) # Process only if the server reply's sequence number is the same as the request's if seqNum == self.rtspSeq: session = int(lines[2].split(' ')[1]) # New RTSP session ID if self.sessionId == 0: self.sessionId = session # Process only if the session ID is the same if self.sessionId == session: if int(lines[0].split(' ')[1]) == 200: if self.requestSent == self.SETUP: #------------- # TO COMPLETE #------------- # Update RTSP state. print("Updating RTSP state...") # self.state = ... self.state = self.READY # Open RTP port. #self.openRtpPort() print("Setting Up RtpPort for Video Stream") self.openRtpPort() elif self.requestSent == self.PLAY: self.state = self.PLAYING print('-'*60 + "\nClient is PLAYING...\n" + '-'*60) elif self.requestSent == self.PAUSE: self.state = self.READY # The play thread exits. A new thread is created on resume. self.playEvent.set() elif self.requestSent == self.TEARDOWN: # self.state = ... # Flag the teardownAcked to close the socket. self.teardownAcked = 1
python
{ "resource": "" }
q7384
LocalVideoFeed.preview
train
def preview(self): """ Blocking function. Opens OpenCV window to display stream. """ win_name = 'Camera' cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE) cv2.moveWindow(win_name,20,20) self.open() while(self.isOpened()): cv2.imshow(win_name,self._stream.read()[1]) if cv2.waitKey(25) & 0xFF == ord('q'): break cv2.waitKey() cv2.destroyAllWindows() cv2.waitKey()
python
{ "resource": "" }
q7385
Client.createWidgets
train
def createWidgets(self): """Build GUI.""" # Create Setup button self.setup = Button(self.master, width=20, padx=3, pady=3) self.setup["text"] = "Setup" self.setup["command"] = self.setupMovie self.setup.grid(row=1, column=0, padx=2, pady=2) # Create Play button self.start = Button(self.master, width=20, padx=3, pady=3) self.start["text"] = "Play" self.start["command"] = self.playMovie self.start.grid(row=1, column=1, padx=2, pady=2) # Create Pause button self.pause = Button(self.master, width=20, padx=3, pady=3) self.pause["text"] = "Pause" self.pause["command"] = self.pauseMovie self.pause.grid(row=1, column=2, padx=2, pady=2) # Create Teardown button self.teardown = Button(self.master, width=20, padx=3, pady=3) self.teardown["text"] = "Teardown" self.teardown["command"] = self.exitClient self.teardown.grid(row=1, column=3, padx=2, pady=2) # Create a label to display the movie self.label = Label(self.master, height=19) self.label.grid(row=0, column=0, columnspan=4, sticky=W+E+N+S, padx=5, pady=5)
python
{ "resource": "" }
q7386
Client.playMovie
train
def playMovie(self): """Play button handler.""" if self.state == self.READY: # Create a new thread to listen for RTP packets print "Playing Movie" threading.Thread(target=self.listenRtp).start() self.playEvent = threading.Event() self.playEvent.clear() self.sendRtspRequest(self.PLAY)
python
{ "resource": "" }
q7387
Client.writeFrame
train
def writeFrame(self, data): """Write the received frame to a temp image file. Return the image file.""" cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT try: file = open(cachename, "wb") except: print "file open error" try: file.write(data) except: print "file write error" file.close() return cachename
python
{ "resource": "" }
q7388
Client.handler
train
def handler(self): """Handler on explicitly closing the GUI window.""" self.pauseMovie() if tkMessageBox.askokcancel("Quit?", "Are you sure you want to quit?"): self.exitClient() else: # When the user presses cancel, resume playing. #self.playMovie() print "Playing Movie" threading.Thread(target=self.listenRtp).start() #self.playEvent = threading.Event() #self.playEvent.clear() self.sendRtspRequest(self.PLAY)
python
{ "resource": "" }
q7389
ChatThreadParser.skip
train
def skip(self): """ Eats through the input iterator without recording the content. """ for pos, element in self.element_iter: tag, class_attr = _tag_and_class_attr(element) if tag == "div" and "thread" in class_attr and pos == "end": break
python
{ "resource": "" }
q7390
MessageHtmlParser.should_record_thread
train
def should_record_thread(self, participants): """ Determines if the thread should be parsed based on the participants and the filter given. For example, if the filter states ['jack', 'billy joe'], then only threads with exactly two participants (excluding the owner of the chat history) containing someone with the first or last name 'Jack' and someone named 'Billy Joel' will be included. Any of the following would match that criteria: - Jack Stevenson, Billy Joel - Billy Joel, Jack Stevens - Jack Jenson, Billy Joel - Jack Jack, Billy Joel participants -- the participants of the thread (excluding the history owner) """ if not self.thread_filter: return True if len(participants) != len(self.thread_filter): return False participants = [[p.lower()] + p.lower().split(" ") for p in participants] matches = defaultdict(set) for e, p in enumerate(participants): for f in self.thread_filter: if f in p: matches[f].add(e) matched = set() for f in matches: if len(matches[f]) == 0: return False matched |= matches[f] return len(matched) == len(participants)
python
{ "resource": "" }
q7391
MessageHtmlParser.parse_thread
train
def parse_thread(self, participants, element_iter, require_flush): """ Parses a thread with appropriate CLI feedback. :param participants: The participants in this thread. :param element_iter: The XML iterator to parse the data from. :param require_flush: Whether the iterator needs to be flushed if it is determined that the thread should be skipped. :return: A `ChatThread` object if not skipped, otherwise `None`. """ # Very rarely threads may lack information on who the # participants are. We will consider those threads corrupted # and skip them. participants_text = _truncate(', '.join(participants), 60) if participants: skip_thread = not self.should_record_thread(participants) participants_text = yellow("[%s]" % participants_text) else: participants_text = "unknown participants" skip_thread = True if skip_thread: line = "\rSkipping chat thread with %s..." % \ yellow(participants_text) else: participants_key = ", ".join(participants) if participants_key in self.chat_threads: thread_current_len = len(self.chat_threads[participants_key]) line = "\rContinuing chat thread with %s %s..." \ % (yellow(participants_text), magenta("<@%d messages>" % thread_current_len)) else: line = "\rDiscovered chat thread with %s..." \ % yellow(participants_text) if self.progress_output: sys.stderr.write(line.ljust(self.last_line_len)) sys.stderr.flush() self.last_line_len = len(line) parser = ChatThreadParser( element_iter, self.timezone_hints, self.use_utc, self.name_resolver, self.no_sender_warning, self.seq_num) if skip_thread: if require_flush: parser.skip() else: self.no_sender_warning, thread = parser.parse(participants) return thread
python
{ "resource": "" }
q7392
LegacyMessageHtmlParser.parse_impl
train
def parse_impl(self): """ Parses the HTML content as a stream. This is far less memory intensive than loading the entire HTML file into memory, like BeautifulSoup does. """ # Cast to str to ensure not unicode under Python 2, as the parser # doesn't like that. parser = XMLParser(encoding=str('UTF-8')) element_iter = ET.iterparse(self.handle, events=("start", "end"), parser=parser) for pos, element in element_iter: tag, class_attr = _tag_and_class_attr(element) if tag == "h1" and pos == "end": if not self.user: self.user = element.text.strip() elif tag == "div" and "thread" in class_attr and pos == "start": participants = self.parse_participants(element) thread = self.parse_thread(participants, element_iter, True) self.save_thread(thread)
python
{ "resource": "" }
q7393
messages
train
def messages(path, thread, fmt, nocolor, timezones, utc, noprogress, resolve, directory): """ Conversion of Facebook chat history. """ with colorize_output(nocolor): try: chat_history = _process_history( path=path, thread=thread, timezones=timezones, utc=utc, noprogress=noprogress, resolve=resolve) except ProcessingFailure: return if directory: set_all_color(enabled=False) write(fmt, chat_history, directory or sys.stdout)
python
{ "resource": "" }
q7394
stats
train
def stats(path, fmt, nocolor, timezones, utc, noprogress, most_common, resolve, length): """Analysis of Facebook chat history.""" with colorize_output(nocolor): try: chat_history = _process_history( path=path, thread='', timezones=timezones, utc=utc, noprogress=noprogress, resolve=resolve) except ProcessingFailure: return statistics = ChatHistoryStatistics( chat_history, most_common=None if most_common < 0 else most_common) if fmt == 'text': statistics.write_text(sys.stdout, -1 if length < 0 else length) elif fmt == 'json': statistics.write_json(sys.stdout) elif fmt == 'pretty-json': statistics.write_json(sys.stdout, pretty=True) elif fmt == 'yaml': statistics.write_yaml(sys.stdout)
python
{ "resource": "" }
q7395
set_stream_color
train
def set_stream_color(stream, disabled): """ Remember what our original streams were so that we can colorize them separately, which colorama doesn't seem to natively support. """ original_stdout = sys.stdout original_stderr = sys.stderr init(strip=disabled) if stream != original_stdout: sys.stdout = original_stdout sys.stderr = BinaryStreamWrapper(stream, sys.stderr) if stream != original_stderr: sys.stderr = original_stderr sys.stdout = BinaryStreamWrapper(stream, sys.stdout)
python
{ "resource": "" }
q7396
FacebookNameResolver._manual_lookup
train
def _manual_lookup(self, facebook_id, facebook_id_string): """ People who we have not communicated with in a long time will not appear in the look-ahead cache that Facebook keeps. We must manually resolve them. :param facebook_id: Profile ID of the user to lookup. :return: """ resp = self._session.get( 'https://www.facebook.com/%s' % facebook_id, allow_redirects=True, timeout=10 ) # No point in trying to get this using BeautifulSoup. The HTML here # is the very epitome of what it is to be invalid... m = _MANUAL_NAME_MATCHER.search(resp.text) if m: name = m.group(1) else: name = facebook_id_string self._cached_profiles[facebook_id] = name return name
python
{ "resource": "" }
q7397
get_exporter
train
def get_exporter(obj, name): """ Get an exporter for the :param obj: object to export :type obj: :class:`Component <cqparts.Component>` :param name: registered name of exporter :type name: :class:`str` :return: an exporter instance of the given type :rtype: :class:`Exporter` :raises TypeError: if exporter cannot be found """ if name not in exporter_index: raise TypeError( ("exporter type '%s' is not registered: " % name) + ("registered types: %r" % sorted(exporter_index.keys())) ) for base_class in exporter_index[name]: if isinstance(obj, base_class): return exporter_index[name][base_class](obj) raise TypeError("exporter type '%s' for a %r is not registered" % ( name, type(obj) ))
python
{ "resource": "" }
q7398
get_importer
train
def get_importer(cls, name): """ Get an importer for the given registered type. :param cls: class to import :type cls: :class:`type` :param name: registered name of importer :type name: :class:`str` :return: an importer instance of the given type :rtype: :class:`Importer` :raises TypeError: if importer cannot be found """ if name not in importer_index: raise TypeError( ("importer type '%s' is not registered: " % name) + ("registered types: %r" % sorted(importer_index.keys())) ) for base_class in importer_index[name]: if issubclass(cls, base_class): return importer_index[name][base_class](cls) raise TypeError("importer type '%s' for a %r is not registered" % ( name, cls ))
python
{ "resource": "" }
q7399
BunningsProductSpider.parse
train
def parse(self, response): """Parse pagenated list of products""" # Check if page is out of range no_more_products = re.search( r'No matching products were found', response.css('div.paged-results').extract_first(), flags=re.I ) if no_more_products: pass # no more pages to populate, stop scraping else: # Scrape products list for product in response.css('article.product-list__item'): product_url = product.css('a::attr("href")').extract_first() yield response.follow(product_url, self.parse_detail) (base, params) = split_url(response.url) params.update({'page': int(params.get('page', '1')) + 1}) next_page_url = join_url(base, params) self.logger.info(next_page_url) yield response.follow(next_page_url, self.parse)
python
{ "resource": "" }