_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q7200
NTEnum.assign
train
def assign(self, V, py): """Store python value in Value """ if isinstance(py, (bytes, unicode)): for i,C in enumerate(V['value.choices'] or self._choices): if py==C: V['value.index'] = i return # attempt to parse as integer V['value.index'] = py
python
{ "resource": "" }
q7201
periodic
train
def periodic(period=60.0, file=sys.stderr): """Start a daemon thread which will periodically print GC stats :param period: Update period in seconds :param file: A writable file-like object """ import threading import time S = _StatsThread(period=period, file=file) T = threading.Thread(target=S) T.daemon = True T.start()
python
{ "resource": "" }
q7202
StatsDelta.collect
train
def collect(self, file=sys.stderr): """Collect stats and print results to file :param file: A writable file-like object """ cur = gcstats() Ncur = len(cur) if self.stats is not None and file is not None: prev = self.stats Nprev = self.ntypes # may be less than len(prev) if Ncur != Nprev: print("# Types %d -> %d" % (Nprev, Ncur), file=file) Scur, Sprev, first = set(cur), set(prev), True for T in Scur - Sprev: # new types if first: print('New Types', file=file) first = False print(' ', T, cur[T], file=file) first = True for T in Sprev - Scur: # collected types if first: print('Cleaned Types', file=file) first = False print(' ', T, -prev[T], file=file) first = True for T in Scur & Sprev: if cur[T] == prev[T]: continue if first: print('Known Types', file=file) first = False print(' ', T, cur[T], 'delta', cur[T] - prev[T], file=file) else: # first call print("All Types", file=file) for T, C in cur.items(): print(' ', T, C, file=file) self.stats, self.ntypes = cur, len(cur)
python
{ "resource": "" }
q7203
MembersClient.filter
train
def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs): """ Takes a chamber and Congress, OR state and district, returning a list of members """ check_chamber(chamber) kwargs.update(chamber=chamber, congress=congress) if 'state' in kwargs and 'district' in kwargs: path = ("members/{chamber}/{state}/{district}/" "current.json").format(**kwargs) elif 'state' in kwargs: path = ("members/{chamber}/{state}/" "current.json").format(**kwargs) else: path = ("{congress}/{chamber}/" "members.json").format(**kwargs) return self.fetch(path, parse=lambda r: r['results'])
python
{ "resource": "" }
q7204
MembersClient.bills
train
def bills(self, member_id, type='introduced'): "Same as BillsClient.by_member" path = "members/{0}/bills/{1}.json".format(member_id, type) return self.fetch(path)
python
{ "resource": "" }
q7205
MembersClient.compare
train
def compare(self, first, second, chamber, type='votes', congress=CURRENT_CONGRESS): """ See how often two members voted together in a given Congress. Takes two member IDs, a chamber and a Congress number. """ check_chamber(chamber) path = "members/{first}/{type}/{second}/{congress}/{chamber}.json" path = path.format(first=first, second=second, type=type, congress=congress, chamber=chamber) return self.fetch(path)
python
{ "resource": "" }
q7206
BillsClient.upcoming
train
def upcoming(self, chamber, congress=CURRENT_CONGRESS): "Shortcut for upcoming bills" path = "bills/upcoming/{chamber}.json".format(chamber=chamber) return self.fetch(path)
python
{ "resource": "" }
q7207
VotesClient.by_month
train
def by_month(self, chamber, year=None, month=None): """ Return votes for a single month, defaulting to the current month. """ check_chamber(chamber) now = datetime.datetime.now() year = year or now.year month = month or now.month path = "{chamber}/votes/{year}/{month}.json".format( chamber=chamber, year=year, month=month) return self.fetch(path, parse=lambda r: r['results'])
python
{ "resource": "" }
q7208
VotesClient.by_range
train
def by_range(self, chamber, start, end): """ Return votes cast in a chamber between two dates, up to one month apart. """ check_chamber(chamber) start, end = parse_date(start), parse_date(end) if start > end: start, end = end, start path = "{chamber}/votes/{start:%Y-%m-%d}/{end:%Y-%m-%d}.json".format( chamber=chamber, start=start, end=end) return self.fetch(path, parse=lambda r: r['results'])
python
{ "resource": "" }
q7209
VotesClient.by_date
train
def by_date(self, chamber, date): "Return votes cast in a chamber on a single day" date = parse_date(date) return self.by_range(chamber, date, date)
python
{ "resource": "" }
q7210
VotesClient.today
train
def today(self, chamber): "Return today's votes in a given chamber" now = datetime.date.today() return self.by_range(chamber, now, now)
python
{ "resource": "" }
q7211
VotesClient.nominations
train
def nominations(self, congress=CURRENT_CONGRESS): "Return votes on nominations from a given Congress" path = "{congress}/nominations.json".format(congress=congress) return self.fetch(path)
python
{ "resource": "" }
q7212
Client.fetch
train
def fetch(self, path, parse=lambda r: r['results'][0]): """ Make an API request, with authentication. This method can be used directly to fetch new endpoints or customize parsing. :: >>> from congress import Congress >>> client = Congress() >>> senate = client.fetch('115/senate/members.json') >>> print(senate['num_results']) 101 """ url = self.BASE_URI + path headers = {'X-API-Key': self.apikey} log.debug(url) resp, content = self.http.request(url, headers=headers) content = u(content) content = json.loads(content) # handle errors if not content.get('status') == 'OK': if "errors" in content and content['errors'][0]['error'] == "Record not found": raise NotFound(path) if content.get('status') == '404': raise NotFound(path) raise CongressError(content, resp, url) if callable(parse): content = parse(content) return content
python
{ "resource": "" }
q7213
parse_date
train
def parse_date(s): """ Parse a date using dateutil.parser.parse if available, falling back to datetime.datetime.strptime if not """ if isinstance(s, (datetime.datetime, datetime.date)): return s try: from dateutil.parser import parse except ImportError: parse = lambda d: datetime.datetime.strptime(d, "%Y-%m-%d") return parse(s)
python
{ "resource": "" }
q7214
GroupBy._prep_spark_sql_groupby
train
def _prep_spark_sql_groupby(self): """Used Spark SQL group approach""" # Strip the index info non_index_columns = filter(lambda x: x not in self._prdd._index_names, self._prdd._column_names()) self._grouped_spark_sql = (self._prdd.to_spark_sql() .select(non_index_columns) .groupBy(self._by)) self._columns = filter(lambda x: x != self._by, non_index_columns)
python
{ "resource": "" }
q7215
GroupBy._prep_pandas_groupby
train
def _prep_pandas_groupby(self): """Prepare the old school pandas group by based approach.""" myargs = self._myargs mykwargs = self._mykwargs def extract_keys(groupedFrame): for key, group in groupedFrame: yield (key, group) def group_and_extract(frame): return extract_keys(frame.groupby(*myargs, **mykwargs)) self._baseRDD = self._prdd._rdd() self._distributedRDD = self._baseRDD.flatMap(group_and_extract) self._mergedRDD = self._sortIfNeeded( self._group(self._distributedRDD))
python
{ "resource": "" }
q7216
GroupBy._group
train
def _group(self, rdd): """Group together the values with the same key.""" return rdd.reduceByKey(lambda x, y: x.append(y))
python
{ "resource": "" }
q7217
GroupBy.ngroups
train
def ngroups(self): """Number of groups.""" if self._can_use_new_school(): return self._grouped_spark_sql.count() self._prep_pandas_groupby() return self._mergedRDD.count()
python
{ "resource": "" }
q7218
GroupBy.sum
train
def sum(self): """Compute the sum for each group.""" if self._can_use_new_school(): self._prep_spark_sql_groupby() import pyspark.sql.functions as func return self._use_aggregation(func.sum) self._prep_pandas_groupby() myargs = self._myargs mykwargs = self._mykwargs def create_combiner(x): return x.groupby(*myargs, **mykwargs).sum() def merge_value(x, y): return pd.concat([x, create_combiner(y)]) def merge_combiner(x, y): return x + y rddOfSum = self._sortIfNeeded(self._distributedRDD.combineByKey( create_combiner, merge_value, merge_combiner)).values() return DataFrame.fromDataFrameRDD(rddOfSum, self.sql_ctx)
python
{ "resource": "" }
q7219
GroupBy._create_exprs_using_func
train
def _create_exprs_using_func(self, f, columns): """Create aggregate expressions using the provided function with the result coming back as the original column name.""" expressions = map(lambda c: f(c).alias(c), self._columns) return expressions
python
{ "resource": "" }
q7220
GroupBy._use_aggregation
train
def _use_aggregation(self, agg, columns=None): """Compute the result using the aggregation function provided. The aggregation name must also be provided so we can strip of the extra name that Spark SQL adds.""" if not columns: columns = self._columns from pyspark.sql import functions as F aggs = map(lambda column: agg(column).alias(column), self._columns) aggRdd = self._grouped_spark_sql.agg(*aggs) df = DataFrame.from_schema_rdd(aggRdd, self._by) return df
python
{ "resource": "" }
q7221
GroupBy._regroup_mergedRDD
train
def _regroup_mergedRDD(self): """A common pattern is we want to call groupby again on the dataframes so we can use the groupby functions. """ myargs = self._myargs mykwargs = self._mykwargs self._prep_pandas_groupby() def regroup(df): return df.groupby(*myargs, **mykwargs) return self._mergedRDD.mapValues(regroup)
python
{ "resource": "" }
q7222
GroupBy.nth
train
def nth(self, n, *args, **kwargs): """Take the nth element of each grouby.""" # TODO: Stop collecting the entire frame for each key. self._prep_pandas_groupby() myargs = self._myargs mykwargs = self._mykwargs nthRDD = self._regroup_mergedRDD().mapValues( lambda r: r.nth( n, *args, **kwargs)).values() return DataFrame.fromDataFrameRDD(nthRDD, self.sql_ctx)
python
{ "resource": "" }
q7223
GroupBy.apply
train
def apply(self, func, *args, **kwargs): """Apply the provided function and combine the results together in the same way as apply from groupby in pandas. This returns a DataFrame. """ self._prep_pandas_groupby() def key_by_index(data): """Key each row by its index. """ # TODO: Is there a better way to do this? for key, row in data.iterrows(): yield (key, pd.DataFrame.from_dict( dict([(key, row)]), orient='index')) myargs = self._myargs mykwargs = self._mykwargs regroupedRDD = self._distributedRDD.mapValues( lambda data: data.groupby(*myargs, **mykwargs)) appliedRDD = regroupedRDD.map( lambda key_data: key_data[1].apply(func, *args, **kwargs)) reKeyedRDD = appliedRDD.flatMap(key_by_index) dataframe = self._sortIfNeeded(reKeyedRDD).values() return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx)
python
{ "resource": "" }
q7224
_create_function
train
def _create_function(name, doc=""): """ Create a function for aggregator by name""" def _(col): spark_ctx = SparkContext._active_spark_context java_ctx = (getattr(spark_ctx._jvm.com.sparklingpandas.functions, name) (col._java_ctx if isinstance(col, Column) else col)) return Column(java_ctx) _.__name__ = name _.__doc__ = doc return _
python
{ "resource": "" }
q7225
PStatCounter.merge
train
def merge(self, frame): """ Add another DataFrame to the PStatCounter. """ for column, values in frame.iteritems(): # Temporary hack, fix later counter = self._counters.get(column) for value in values: if counter is not None: counter.merge(value)
python
{ "resource": "" }
q7226
PStatCounter.merge_pstats
train
def merge_pstats(self, other): """ Merge all of the stats counters of the other PStatCounter with our counters. """ if not isinstance(other, PStatCounter): raise Exception("Can only merge PStatcounters!") for column, counter in self._counters.items(): other_counter = other._counters.get(column) self._counters[column] = counter.mergeStats(other_counter) return self
python
{ "resource": "" }
q7227
_update_index_on_df
train
def _update_index_on_df(df, index_names): """Helper function to restore index information after collection. Doesn't use self so we can serialize this.""" if index_names: df = df.set_index(index_names) # Remove names from unnamed indexes index_names = _denormalize_index_names(index_names) df.index.names = index_names return df
python
{ "resource": "" }
q7228
DataFrame._rdd
train
def _rdd(self): """Return an RDD of Panda DataFrame objects. This can be expensive especially if we don't do a narrow transformation after and get it back to Spark SQL land quickly.""" columns = self._schema_rdd.columns index_names = self._index_names def fromRecords(records): if not records: return [] else: loaded_df = pd.DataFrame.from_records([records], columns=columns) indexed_df = _update_index_on_df(loaded_df, index_names) return [indexed_df] return self._schema_rdd.rdd.flatMap(fromRecords)
python
{ "resource": "" }
q7229
DataFrame._column_names
train
def _column_names(self): """Return the column names""" index_names = set(_normalize_index_names(self._index_names)) column_names = [col_name for col_name in self._schema_rdd.columns if col_name not in index_names] return column_names
python
{ "resource": "" }
q7230
DataFrame._evil_apply_with_dataframes
train
def _evil_apply_with_dataframes(self, func, preserves_cols=False): """Convert the underlying SchmeaRDD to an RDD of DataFrames. apply the provide function and convert the result back. This is hella slow.""" source_rdd = self._rdd() result_rdd = func(source_rdd) # By default we don't know what the columns & indexes are so we let # from_rdd_of_dataframes look at the first partition to determine them. column_idxs = None if preserves_cols: index_names = self._index_names # Remove indexes from the columns columns = self._schema_rdd.columns[len(self._index_names):] column_idxs = (columns, index_names) return self.from_rdd_of_dataframes( result_rdd, column_idxs=column_idxs)
python
{ "resource": "" }
q7231
DataFrame._first_as_df
train
def _first_as_df(self): """Gets the first row as a Panda's DataFrame. Useful for functions like dtypes & ftypes""" columns = self._schema_rdd.columns df = pd.DataFrame.from_records( [self._schema_rdd.first()], columns=self._schema_rdd.columns) df = _update_index_on_df(df, self._index_names) return df
python
{ "resource": "" }
q7232
DataFrame.fromDataFrameRDD
train
def fromDataFrameRDD(cls, rdd, sql_ctx): """Construct a DataFrame from an RDD of DataFrames. No checking or validation occurs.""" result = DataFrame(None, sql_ctx) return result.from_rdd_of_dataframes(rdd)
python
{ "resource": "" }
q7233
DataFrame.applymap
train
def applymap(self, f, **kwargs): """Return a new DataFrame by applying a function to each element of each Panda DataFrame.""" def transform_rdd(rdd): return rdd.map(lambda data: data.applymap(f), **kwargs) return self._evil_apply_with_dataframes(transform_rdd, preserves_cols=True)
python
{ "resource": "" }
q7234
DataFrame.groupby
train
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False): """Returns a groupby on the schema rdd. This returns a GroupBy object. Note that grouping by a column name will be faster than most other options due to implementation.""" from sparklingpandas.groupby import GroupBy return GroupBy(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze)
python
{ "resource": "" }
q7235
DataFrame.collect
train
def collect(self): """Collect the elements in an DataFrame and concatenate the partition.""" local_df = self._schema_rdd.toPandas() correct_idx_df = _update_index_on_df(local_df, self._index_names) return correct_idx_df
python
{ "resource": "" }
q7236
PRDD.applymap
train
def applymap(self, func, **kwargs): """Return a new PRDD by applying a function to each element of each pandas DataFrame.""" return self.from_rdd( self._rdd.map(lambda data: data.applymap(func), **kwargs))
python
{ "resource": "" }
q7237
PRDD.collect
train
def collect(self): """Collect the elements in an PRDD and concatenate the partition.""" # The order of the frame order appends is based on the implementation # of reduce which calls our function with # f(valueToBeAdded, accumulator) so we do our reduce implementation. def append_frames(frame_a, frame_b): return frame_a.append(frame_b) return self._custom_rdd_reduce(append_frames)
python
{ "resource": "" }
q7238
PRDD._custom_rdd_reduce
train
def _custom_rdd_reduce(self, reduce_func): """Provides a custom RDD reduce which preserves ordering if the RDD has been sorted. This is useful for us because we need this functionality as many pandas operations support sorting the results. The standard reduce in PySpark does not have this property. Note that when PySpark no longer does partition reduces locally this code will also need to be updated.""" def accumulating_iter(iterator): acc = None for obj in iterator: if acc is None: acc = obj else: acc = reduce_func(acc, obj) if acc is not None: yield acc vals = self._rdd.mapPartitions(accumulating_iter).collect() return reduce(accumulating_iter, vals)
python
{ "resource": "" }
q7239
PSparkContext.read_csv
train
def read_csv(self, file_path, use_whole_file=False, names=None, skiprows=0, *args, **kwargs): """Read a CSV file in and parse it into Pandas DataFrames. By default, the first row from the first partition of that data is parsed and used as the column names for the data from. If no 'names' param is provided we parse the first row of the first partition of data and use it for column names. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/' use_whole_file: boolean Whether of not to use the whole file. names: list of strings, optional skiprows: integer, optional indicates how many rows of input to skip. This will only be applied to the first partition of the data (so if #skiprows > #row in first partition this will not work). Generally this shouldn't be an issue for small values of skiprows. No other value of header is supported. All additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file. """ def csv_file(partition_number, files): # pylint: disable=unexpected-keyword-arg file_count = 0 for _, contents in files: # Only skip lines on the first file if partition_number == 0 and file_count == 0 and _skiprows > 0: yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs) else: file_count += 1 yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, **kwargs) def csv_rows(partition_number, rows): # pylint: disable=unexpected-keyword-arg in_str = "\n".join(rows) if partition_number == 0: return iter([ pandas.read_csv( sio(in_str), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs)]) else: # could use .iterows instead? return iter([pandas.read_csv(sio(in_str), *args, header=None, names=mynames, **kwargs)]) # If we need to peak at the first partition and determine the column # names mynames = None _skiprows = skiprows if names: mynames = names else: # In the future we could avoid this expensive call. first_line = self.spark_ctx.textFile(file_path).first() frame = pandas.read_csv(sio(first_line), **kwargs) # pylint sees frame as a tuple despite it being a DataFrame mynames = list(frame.columns) _skiprows += 1 # Do the actual load if use_whole_file: return self.from_pandas_rdd( self.spark_ctx.wholeTextFiles(file_path) .mapPartitionsWithIndex(csv_file)) else: return self.from_pandas_rdd( self.spark_ctx.textFile(file_path) .mapPartitionsWithIndex(csv_rows))
python
{ "resource": "" }
q7240
PSparkContext.read_json
train
def read_json(self, file_path, *args, **kwargs): """Read a json file in and parse it into Pandas DataFrames. If no names is provided we use the first row for the names. Currently, it is not possible to skip the first n rows of a file. Headers are provided in the json file and not specified separately. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'my/path/in/local/file/system' or 'hdfs:/user/juliet/' Other than skipRows, all additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file. """ def json_file_to_df(files): """ Transforms a JSON file into a list of data""" for _, contents in files: yield pandas.read_json(sio(contents), *args, **kwargs) return self.from_pandas_rdd(self.spark_ctx.wholeTextFiles(file_path) .mapPartitions(json_file_to_df))
python
{ "resource": "" }
q7241
IIIFManipulatorGen.do_first
train
def do_first(self): """Load generator, set size. We take the generator module name from self.srcfile so that this manipulator will work with different generators in a similar way to how the ordinary generators work with different images """ # Load generator module and create instance if we haven't already if (not self.srcfile): raise IIIFError(text=("No generator specified")) if (not self.gen): try: (name, ext) = os.path.splitext(self.srcfile) (pack, mod) = os.path.split(name) module_name = 'iiif.generators.' + mod try: module = sys.modules[module_name] except KeyError: self.logger.debug( "Loading generator module %s" % (module_name)) # Would be nice to use importlib but this is available only # in python 2.7 and higher pack = __import__(module_name) # returns iiif package module = getattr(pack.generators, mod) self.gen = module.PixelGen() except ImportError: raise IIIFError( text=("Failed to load generator %s" % (str(self.srcfile)))) (self.width, self.height) = self.gen.size
python
{ "resource": "" }
q7242
IIIFManipulatorGen.do_region
train
def do_region(self, x, y, w, h): """Record region.""" if (x is None): self.rx = 0 self.ry = 0 self.rw = self.width self.rh = self.height else: self.rx = x self.ry = y self.rw = w self.rh = h
python
{ "resource": "" }
q7243
IIIFManipulatorGen.do_size
train
def do_size(self, w, h): """Record size.""" if (w is None): self.sw = self.rw self.sh = self.rh else: self.sw = w self.sh = h # Now we have region and size, generate the image image = Image.new("RGB", (self.sw, self.sh), self.gen.background_color) for y in range(0, self.sh): for x in range(0, self.sw): ix = int((x * self.rw) // self.sw + self.rx) iy = int((y * self.rh) // self.sh + self.ry) color = self.gen.pixel(ix, iy) if (color is not None): image.putpixel((x, y), color) self.image = image
python
{ "resource": "" }
q7244
IIIFAuthBasic.login_handler
train
def login_handler(self, config=None, prefix=None, **args): """HTTP Basic login handler. Respond with 401 and WWW-Authenticate header if there are no credentials or bad credentials. If there are credentials then simply check for username equal to password for validity. """ headers = {} headers['Access-control-allow-origin'] = '*' headers['Content-type'] = 'text/html' auth = request.authorization if (auth and auth.username == auth.password): return self.set_cookie_close_window_response( "valid-http-basic-login") else: headers['WWW-Authenticate'] = ( 'Basic realm="HTTP-Basic-Auth at %s (u=p to login)"' % (self.name)) return make_response("", 401, headers)
python
{ "resource": "" }
q7245
IIIFAuthClickthrough.login_service_description
train
def login_service_description(self): """Clickthrough login service description. The login service description _MUST_ include the token service description. Additionally, for a clickthroudh loginThe authentication pattern is indicated via the profile URI which is built using self.auth_pattern. """ desc = super(IIIFAuthClickthrough, self).login_service_description() desc['confirmLabel'] = self.confirm_label return desc
python
{ "resource": "" }
q7246
IIIFRequest.clear
train
def clear(self): """Clear all data that might pertain to an individual IIIF URL. Does not change/reset the baseurl or API version which might be useful in a sequence of calls. """ # API parameters self.identifier = None self.region = None self.size = None self.rotation = None self.quality = None self.format = None self.info = None # Derived data and flags self.region_full = False self.region_square = False self.region_pct = False self.region_xywh = None # (x,y,w,h) self.size_full = False self.size_max = False # new in 2.1 self.size_pct = None self.size_bang = None self.size_wh = None # (w,h) self.rotation_mirror = False self.rotation_deg = 0.0
python
{ "resource": "" }
q7247
IIIFRequest.api_version
train
def api_version(self, v): """Set the api_version and associated configurations.""" self._api_version = v if (self._api_version >= '2.0'): self.default_quality = 'default' self.allowed_qualities = ['default', 'color', 'bitonal', 'gray'] else: # versions 1.0 and 1.1 self.default_quality = 'native' self.allowed_qualities = ['native', 'color', 'bitonal', 'grey']
python
{ "resource": "" }
q7248
IIIFRequest.url
train
def url(self, **params): """Build a URL path for image or info request. An IIIF Image request with parameterized form is assumed unless the info parameter is specified, in which case an Image Information request URI is constructred. """ self._setattrs(**params) path = self.baseurl + self.quote(self.identifier) + "/" if (self.info): # info request path += "info" format = self.format if self.format else "json" else: # region if self.region: region = self.region elif self.region_xywh: region = "%d,%d,%d,%d" % tuple(self.region_xywh) else: region = "full" # size if self.size: size = self.size elif self.size_wh: if (self.size_wh[0] is None): size = ",%d" % (self.size_wh[1]) elif (self.size_wh[1] is None): size = "%d," % (self.size_wh[0]) else: size = "%d,%d" % (self.size_wh[0], self.size_wh[1]) elif (self.size_max and self.api_version >= '2.1'): size = 'max' else: size = "full" # rotation and quality rotation = self.rotation if self.rotation else "0" quality = self.quality if self.quality else self.default_quality # parameterized form path += self.quote(region) + "/" +\ self.quote(size) + "/" +\ self.quote(rotation) + "/" +\ self.quote(quality) format = self.format if (format): path += "." + format return(path)
python
{ "resource": "" }
q7249
IIIFRequest.parse_url
train
def parse_url(self, url): """Parse an IIIF API URL path and each component. Will parse a URL or URL path that accords with either the parametrized or info request forms. Will raise an IIIFRequestError on failure. A wrapper for the split_url() and parse_parameters() methods. Note that behavior of split_url() depends on whether self.identifier is set. """ self.split_url(url) if (not self.info): self.parse_parameters() return(self)
python
{ "resource": "" }
q7250
IIIFRequest.split_url
train
def split_url(self, url): """Parse an IIIF API URL path into components. Will parse a URL or URL path that accords with either the parametrized or info API forms. Will raise an IIIFRequestError on failure. If self.identifier is set then url is assumed not to include the identifier. """ # clear data first identifier = self.identifier self.clear() # url must start with baseurl if set (including slash) if (self.baseurl is not None): (path, num) = re.subn('^' + self.baseurl, '', url, 1) if (num != 1): raise IIIFRequestError( text="Request URL does not start with base URL") url = path # Break up by path segments, count to decide format segs = url.split('/') if (identifier is not None): segs.insert(0, identifier) elif (self.allow_slashes_in_identifier): segs = self._allow_slashes_in_identifier_munger(segs) # Now have segments with identifier as first if (len(segs) > 5): raise IIIFRequestPathError( text="Request URL (%s) has too many path segments" % url) elif (len(segs) == 5): self.identifier = urlunquote(segs[0]) self.region = urlunquote(segs[1]) self.size = urlunquote(segs[2]) self.rotation = urlunquote(segs[3]) self.quality = self.strip_format(urlunquote(segs[4])) self.info = False elif (len(segs) == 2): self.identifier = urlunquote(segs[0]) info_name = self.strip_format(urlunquote(segs[1])) if (info_name != "info"): raise IIIFRequestError( text="Bad name for Image Information") if (self.api_version == '1.0'): if (self.format not in ['json', 'xml']): raise IIIFRequestError( text="Invalid format for Image Information (json and xml allowed)") elif (self.format != 'json'): raise IIIFRequestError( text="Invalid format for Image Information (only json allowed)") self.info = True elif (len(segs) == 1): self.identifier = urlunquote(segs[0]) raise IIIFRequestBaseURI() else: raise IIIFRequestPathError( text="Bad number of path segments in request") return(self)
python
{ "resource": "" }
q7251
IIIFRequest.strip_format
train
def strip_format(self, str_and_format): """Look for optional .fmt at end of URI. The format must start with letter. Note that we want to catch the case of a dot and no format (format='') which is different from no dot (format=None) Sets self.format as side effect, returns possibly modified string """ m = re.match("(.+)\.([a-zA-Z]\w*)$", str_and_format) if (m): # There is a format string at end, chop off and store str_and_format = m.group(1) self.format = (m.group(2) if (m.group(2) is not None) else '') return(str_and_format)
python
{ "resource": "" }
q7252
IIIFRequest.parse_parameters
train
def parse_parameters(self): """Parse the parameters of an Image Information request. Will throw an IIIFRequestError on failure, set attributes on success. Care is taken not to change any of the artibutes which store path components. All parsed values are stored in new attributes. """ self.parse_region() self.parse_size() self.parse_rotation() self.parse_quality() self.parse_format()
python
{ "resource": "" }
q7253
IIIFRequest.parse_region
train
def parse_region(self): """Parse the region component of the path. /full/ -> self.region_full = True (test this first) /square/ -> self.region_square = True (test this second) /x,y,w,h/ -> self.region_xywh = (x,y,w,h) /pct:x,y,w,h/ -> self.region_xywh and self.region_pct = True Will throw errors if the parameters are illegal according to the specification but does not know about and thus cannot do any tests against any image being manipulated. """ self.region_full = False self.region_square = False self.region_pct = False if (self.region is None or self.region == 'full'): self.region_full = True return if (self.api_version >= '2.1' and self.region == 'square'): self.region_square = True return xywh = self.region pct_match = re.match('pct:(.*)$', self.region) if (pct_match): xywh = pct_match.group(1) self.region_pct = True # Now whether this was pct: or now, we expect 4 values... str_values = xywh.split(',', 5) if (len(str_values) != 4): raise IIIFRequestError( code=400, parameter="region", text="Bad number of values in region specification, " "must be x,y,w,h but got %d value(s) from '%s'" % (len(str_values), xywh)) values = [] for str_value in str_values: # Must be either integer (not pct) or interger/float (pct) if (pct_match): try: # This is rather more permissive that the iiif spec value = float(str_value) except ValueError: raise IIIFRequestError( parameter="region", text="Bad floating point value for percentage in " "region (%s)." % str_value) if (value > 100.0): raise IIIFRequestError( parameter="region", text="Percentage over value over 100.0 in region " "(%s)." % str_value) else: try: value = int(str_value) except ValueError: raise IIIFRequestError( parameter="region", text="Bad integer value in region (%s)." % str_value) if (value < 0): raise IIIFRequestError( parameter="region", text="Negative values not allowed in region (%s)." % str_value) values.append(value) # Zero size region is w or h are zero (careful that they may be float) if (values[2] == 0.0 or values[3] == 0.0): raise IIIFZeroSizeError( code=400, parameter="region", text="Zero size region specified (%s))." % xywh) self.region_xywh = values
python
{ "resource": "" }
q7254
IIIFRequest.parse_size
train
def parse_size(self, size=None): """Parse the size component of the path. /full/ -> self.size_full = True /max/ -> self.size_mac = True (2.1 and up) /w,/ -> self.size_wh = (w,None) /,h/ -> self.size_wh = (None,h) /w,h/ -> self.size_wh = (w,h) /pct:p/ -> self.size_pct = p /!w,h/ -> self.size_wh = (w,h), self.size_bang = True Expected use: (w,h) = iiif.size_to_apply(region_w,region_h) if (q is None): # full image else: # scale to w by h Returns (None,None) if no scaling is required. """ if (size is not None): self.size = size self.size_pct = None self.size_bang = False self.size_full = False self.size_wh = (None, None) if (self.size is None or self.size == 'full'): self.size_full = True return elif (self.size == 'max' and self.api_version >= '2.1'): self.size_max = True return pct_match = re.match('pct:(.*)$', self.size) if (pct_match is not None): pct_str = pct_match.group(1) try: self.size_pct = float(pct_str) except ValueError: raise IIIFRequestError( code=400, parameter="size", text="Percentage size value must be a number, got " "'%s'." % (pct_str)) # Note that Image API specificaton places no upper limit on # size so none is implemented here. if (self.size_pct < 0.0): raise IIIFRequestError( code=400, parameter="size", text="Base size percentage, must be > 0.0, got %f." % (self.size_pct)) else: if (self.size[0] == '!'): # Have "!w,h" form size_no_bang = self.size[1:] (mw, mh) = self._parse_w_comma_h(size_no_bang, 'size') if (mw is None or mh is None): raise IIIFRequestError( code=400, parameter="size", text="Illegal size requested: both w,h must be " "specified in !w,h requests.") self.size_wh = (mw, mh) self.size_bang = True else: # Must now be "w,h", "w," or ",h" self.size_wh = self._parse_w_comma_h(self.size, 'size') # Sanity check w,h (w, h) = self.size_wh if ((w is not None and w <= 0) or (h is not None and h <= 0)): raise IIIFZeroSizeError( code=400, parameter='size', text="Size parameters request zero size result image.")
python
{ "resource": "" }
q7255
IIIFRequest._parse_w_comma_h
train
def _parse_w_comma_h(self, whstr, param): """Utility to parse "w,h" "w," or ",h" values. Returns (w,h) where w,h are either None or ineteger. Will throw a ValueError if there is a problem with one or both. """ try: (wstr, hstr) = whstr.split(',', 2) w = self._parse_non_negative_int(wstr, 'w') h = self._parse_non_negative_int(hstr, 'h') except ValueError as e: raise IIIFRequestError( code=400, parameter=param, text="Illegal %s value (%s)." % (param, str(e))) if (w is None and h is None): raise IIIFRequestError( code=400, parameter=param, text="Must specify at least one of w,h for %s." % (param)) return(w, h)
python
{ "resource": "" }
q7256
IIIFRequest.parse_quality
train
def parse_quality(self): """Check quality paramater. Sets self.quality_val based on simple substitution of 'native' for default. Checks for the three valid values else throws an IIIFRequestError. """ if (self.quality is None): self.quality_val = self.default_quality elif (self.quality not in self.allowed_qualities): raise IIIFRequestError( code=400, parameter="quality", text="The quality parameter must be '%s', got '%s'." % ("', '".join(self.allowed_qualities), self.quality)) else: self.quality_val = self.quality
python
{ "resource": "" }
q7257
IIIFRequest.parse_format
train
def parse_format(self): """Check format parameter. All formats values listed in the specification are lowercase alphanumeric value commonly used as file extensions. To leave opportunity for extension here just do a limited sanity check on characters and length. """ if (self.format is not None and not re.match(r'''\w{1,20}$''', self.format)): raise IIIFRequestError( parameter='format', text='Bad format parameter')
python
{ "resource": "" }
q7258
IIIFRequest.is_scaled_full_image
train
def is_scaled_full_image(self): """True if this request is for a scaled full image. To be used to determine whether this request should be used in the set of `sizes` specificed in the Image Information. """ return(self.region_full and self.size_wh[0] is not None and self.size_wh[1] is not None and not self.size_bang and self.rotation_deg == 0.0 and self.quality == self.default_quality and self.format == 'jpg')
python
{ "resource": "" }
q7259
create_reference_server_flask_app
train
def create_reference_server_flask_app(cfg): """Create referece server Flask application with one or more IIIF handlers.""" # Create Flask app app = Flask(__name__) Flask.secret_key = "SECRET_HERE" app.debug = cfg.debug # Install request handlers client_prefixes = dict() for api_version in cfg.api_versions: handler_config = Config(cfg) handler_config.api_version = api_version handler_config.klass_name = 'pil' handler_config.auth_type = 'none' # Set same prefix on local server as expected on iiif.io handler_config.prefix = "api/image/%s/example/reference" % (api_version) handler_config.client_prefix = handler_config.prefix add_handler(app, handler_config) return app
python
{ "resource": "" }
q7260
IIIFInfo.id
train
def id(self): """id property based on server_and_prefix and identifier.""" id = '' if (self.server_and_prefix is not None and self.server_and_prefix != ''): id += self.server_and_prefix + '/' if (self.identifier is not None): id += self.identifier return id
python
{ "resource": "" }
q7261
IIIFInfo.id
train
def id(self, value): """Split into server_and_prefix and identifier.""" i = value.rfind('/') if (i > 0): self.server_and_prefix = value[:i] self.identifier = value[(i + 1):] elif (i == 0): self.server_and_prefix = '' self.identifier = value[(i + 1):] else: self.server_and_prefix = '' self.identifier = value
python
{ "resource": "" }
q7262
IIIFInfo.set_version_info
train
def set_version_info(self, api_version=None): """Set up normal values for given api_version. Will use current value of self.api_version if a version number is not specified in the call. Will raise an IIIFInfoError """ if (api_version is None): api_version = self.api_version if (api_version not in CONF): raise IIIFInfoError("Unknown API version %s" % (api_version)) self.params = CONF[api_version]['params'] self.array_params = CONF[api_version]['array_params'] self.complex_params = CONF[api_version]['complex_params'] for a in ('context', 'compliance_prefix', 'compliance_suffix', 'protocol', 'required_params'): if (a in CONF[api_version]): self._setattr(a, CONF[api_version][a])
python
{ "resource": "" }
q7263
IIIFInfo.compliance
train
def compliance(self, value): """Set the compliance profile URI.""" if (self.api_version < '2.0'): self.profile = value else: try: self.profile[0] = value except AttributeError: # handle case where profile not initialized as array self.profile = [value]
python
{ "resource": "" }
q7264
IIIFInfo.level
train
def level(self): """Extract level number from compliance profile URI. Returns integer level number or raises IIIFInfoError """ m = re.match( self.compliance_prefix + r'(\d)' + self.compliance_suffix + r'$', self.compliance) if (m): return int(m.group(1)) raise IIIFInfoError( "Bad compliance profile URI, failed to extract level number")
python
{ "resource": "" }
q7265
IIIFInfo.level
train
def level(self, value): """Build profile URI from level. Level should be an integer 0,1,2 """ self.compliance = self.compliance_prefix + \ ("%d" % value) + self.compliance_suffix
python
{ "resource": "" }
q7266
IIIFInfo.add_service
train
def add_service(self, service): """Add a service description. Handles transition from self.service=None, self.service=dict for a single service, and then self.service=[dict,dict,...] for multiple """ if (self.service is None): self.service = service elif (isinstance(self.service, dict)): self.service = [self.service, service] else: self.service.append(service)
python
{ "resource": "" }
q7267
IIIFInfo.validate
train
def validate(self): """Validate this object as Image API data. Raise IIIFInfoError with helpful message if not valid. """ errors = [] for param in self.required_params: if (not hasattr(self, param) or getattr(self, param) is None): errors.append("missing %s parameter" % (param)) if (len(errors) > 0): raise IIIFInfoError("Bad data for info.json: " + ", ".join(errors)) return True
python
{ "resource": "" }
q7268
IIIFInfo.as_json
train
def as_json(self, validate=True): """Return JSON serialization. Will raise IIIFInfoError if insufficient parameters are present to have a valid info.json response (unless validate is False). """ if (validate): self.validate() json_dict = {} if (self.api_version > '1.0'): json_dict['@context'] = self.context params_to_write = set(self.params) params_to_write.discard('identifier') if (self.identifier): if (self.api_version == '1.0'): json_dict['identifier'] = self.identifier # local id else: json_dict['@id'] = self.id # URI params_to_write.discard('profile') if (self.compliance): if (self.api_version < '2.0'): json_dict['profile'] = self.compliance else: # FIXME - need to support extra profile features json_dict['profile'] = [self.compliance] d = {} if (self.formats is not None): d['formats'] = self.formats if (self.qualities is not None): d['qualities'] = self.qualities if (self.supports is not None): d['supports'] = self.supports if (len(d) > 0): json_dict['profile'].append(d) params_to_write.discard('formats') params_to_write.discard('qualities') params_to_write.discard('supports') for param in params_to_write: if (hasattr(self, param) and getattr(self, param) is not None): json_dict[param] = getattr(self, param) return(json.dumps(json_dict, sort_keys=True, indent=2))
python
{ "resource": "" }
q7269
IIIFInfo.read
train
def read(self, fh, api_version=None): """Read info.json from file like object. Parameters: fh -- file like object supporting fh.read() api_version -- IIIF Image API version expected If api_version is set then the parsing will assume this API version, else the version will be determined from the incoming data. NOTE that the value of self.api_version is NOT used in this routine. If an api_version is specified and there is a @context specified then an IIIFInfoError will be raised unless these match. If no known @context is present and no api_version set then an IIIFInfoError will be raised. """ j = json.load(fh) # # @context and API version self.context = None if (api_version == '1.0'): # v1.0 did not have a @context so we simply take the version # passed in self.api_version = api_version elif ('@context' in j): # determine API version from context self.context = j['@context'] api_version_read = None for v in CONF: if (v > '1.0' and self.context == CONF[v]['context']): api_version_read = v break if (api_version_read is None): raise IIIFInfoError( "Unknown @context, cannot determine API version (%s)" % (self.context)) else: if (api_version is not None and api_version != api_version_read): raise IIIFInfoError( "Expected API version '%s' but got @context for API version '%s'" % (api_version, api_version_read)) else: self.api_version = api_version_read else: # no @context and not 1.0 if (api_version is None): raise IIIFInfoError("No @context (and no default given)") self.api_version = api_version self.set_version_info() # # @id or identifier if (self.api_version == '1.0'): if ('identifier' in j): self.id = j['identifier'] else: raise IIIFInfoError("Missing identifier in info.json") else: if ('@id' in j): self.id = j['@id'] else: raise IIIFInfoError("Missing @id in info.json") # # other params for param in self.params: if (param == 'identifier'): continue # dealt with above if (param in j): if (param in self.complex_params): # use function ref in complex_params to parse, optional # dst to map to a different property name self._setattr(param, self.complex_params[ param](self, j[param])) else: self._setattr(param, j[param]) return True
python
{ "resource": "" }
q7270
IIIFManipulatorPIL.set_max_image_pixels
train
def set_max_image_pixels(self, pixels): """Set PIL limit on pixel size of images to load if non-zero. WARNING: This is a global setting in PIL, it is not local to this manipulator instance! Setting a value here will not only set the given limit but also convert the PIL "DecompressionBombWarning" into an error. Thus setting a moderate limit sets a hard limit on image size loaded, setting a very large limit will have the effect of disabling the warning. """ if (pixels): Image.MAX_IMAGE_PIXELS = pixels Image.warnings.simplefilter( 'error', Image.DecompressionBombWarning)
python
{ "resource": "" }
q7271
IIIFManipulatorPIL.do_first
train
def do_first(self): """Create PIL object from input image file. Image location must be in self.srcfile. Will result in self.width and self.height being set to the image dimensions. Will raise an IIIFError on failure to load the image """ self.logger.debug("do_first: src=%s" % (self.srcfile)) try: self.image = Image.open(self.srcfile) except Image.DecompressionBombWarning as e: # This exception will be raised only if PIL has been # configured to raise an error in the case of images # that exceeed Image.MAX_IMAGE_PIXELS, with # Image.warnings.simplefilter('error', Image.DecompressionBombWarning) raise IIIFError(text=("Image size limit exceeded (PIL: %s)" % (str(e)))) except Exception as e: raise IIIFError(text=("Failed to read image (PIL: %s)" % (str(e)))) (self.width, self.height) = self.image.size
python
{ "resource": "" }
q7272
html_page
train
def html_page(title="Page Title", body=""): """Create HTML page as string.""" html = "<html>\n<head><title>%s</title></head>\n<body>\n" % (title) html += "<h1>%s</h1>\n" % (title) html += body html += "</body>\n</html>\n" return html
python
{ "resource": "" }
q7273
top_level_index_page
train
def top_level_index_page(config): """HTML top-level index page which provides a link to each handler.""" title = "IIIF Test Server on %s" % (config.host) body = "<ul>\n" for prefix in sorted(config.prefixes.keys()): body += '<li><a href="/%s">%s</a></li>\n' % (prefix, prefix) body += "</ul>\n" return html_page(title, body)
python
{ "resource": "" }
q7274
identifiers
train
def identifiers(config): """Show list of identifiers for this prefix. Handles both the case of local file based identifiers and also image generators. Arguments: config - configuration object in which: config.klass_name - 'gen' if a generator function config.generator_dir - directory for generator code config.image_dir - directory for images Returns: ids - a list of ids """ ids = [] if (config.klass_name == 'gen'): for generator in os.listdir(config.generator_dir): if (generator == '__init__.py'): continue (gid, ext) = os.path.splitext(generator) if (ext == '.py' and os.path.isfile(os.path.join(config.generator_dir, generator))): ids.append(gid) else: for image_file in os.listdir(config.image_dir): (iid, ext) = os.path.splitext(image_file) if (ext in ['.jpg', '.png', '.tif'] and os.path.isfile(os.path.join(config.image_dir, image_file))): ids.append(iid) return ids
python
{ "resource": "" }
q7275
prefix_index_page
train
def prefix_index_page(config): """HTML index page for a specific prefix. The prefix seen by the client is obtained from config.client_prefix as opposed to the local server prefix in config.prefix. Also uses the identifiers(config) function to get identifiers available. Arguments: config - configuration object in which: config.client_prefix - URI path prefix seen by client config.host - URI host seen by client config.api_version - string for api_version config.manipulator - string manipulator type config.auth_type - string for auth type config.include_osd - whether OSD is included """ title = "IIIF Image API services under %s" % (config.client_prefix) # details of this prefix handler body = '<p>\n' body += 'host = %s<br/>\n' % (config.host) body += 'api_version = %s<br/>\n' % (config.api_version) body += 'manipulator = %s<br/>\n' % (config.klass_name) body += 'auth_type = %s\n</p>\n' % (config.auth_type) # table of identifiers and example requests ids = identifiers(config) api_version = config.api_version default = 'native' if api_version < '2.0' else 'default' body += '<table border="1">\n<tr><th align="left">Image identifier</th>' body += '<th> </th><th>full</th>' if (config.klass_name != 'dummy'): body += '<th>256,256</th>' body += '<th>30deg</th>' if (config.include_osd): body += '<th> </th>' body += "</tr>\n" for identifier in sorted(ids): base = urljoin('/', config.client_prefix + '/' + identifier) body += '<tr><th align="left">%s</th>' % (identifier) info = base + "/info.json" body += '<td><a href="%s">%s</a></td>' % (info, 'info') suffix = "full/full/0/%s" % (default) body += '<td><a href="%s">%s</a></td>' % (base + '/' + suffix, suffix) if (config.klass_name != 'dummy'): suffix = "full/256,256/0/%s" % (default) body += '<td><a href="%s">%s</a></td>' % (base + '/' + suffix, suffix) suffix = "full/100,/30/%s" % (default) body += '<td><a href="%s">%s</a></td>' % (base + '/' + suffix, suffix) if (config.include_osd): body += '<td><a href="%s/osd.html">OSD</a></td>' % (base) body += "</tr>\n" body += "</table<\n" return html_page(title, body)
python
{ "resource": "" }
q7276
osd_page_handler
train
def osd_page_handler(config=None, identifier=None, prefix=None, **args): """Flask handler to produce HTML response for OpenSeadragon view of identifier. Arguments: config - Config object for this IIIF handler identifier - identifier of image/generator prefix - path prefix **args - other aguments ignored """ template_dir = os.path.join(os.path.dirname(__file__), 'templates') with open(os.path.join(template_dir, 'testserver_osd.html'), 'r') as f: template = f.read() d = dict(prefix=prefix, identifier=identifier, api_version=config.api_version, osd_version='2.0.0', osd_uri='/openseadragon200/openseadragon.min.js', osd_images_prefix='/openseadragon200/images', osd_height=500, osd_width=500, info_json_uri='info.json') return make_response(Template(template).safe_substitute(d))
python
{ "resource": "" }
q7277
iiif_info_handler
train
def iiif_info_handler(prefix=None, identifier=None, config=None, klass=None, auth=None, **args): """Handler for IIIF Image Information requests.""" if (not auth or degraded_request(identifier) or auth.info_authz()): # go ahead with request as made if (auth): logging.debug("Authorized for image %s" % identifier) i = IIIFHandler(prefix, identifier, config, klass, auth) try: return i.image_information_response() except IIIFError as e: return i.error_response(e) elif (auth.info_authn()): # authn but not authz -> 401 abort(401) else: # redirect to degraded response = redirect(host_port_prefix( config.host, config.port, prefix) + '/' + identifier + '-deg/info.json') response.headers['Access-control-allow-origin'] = '*' return response
python
{ "resource": "" }
q7278
iiif_image_handler
train
def iiif_image_handler(prefix=None, identifier=None, path=None, config=None, klass=None, auth=None, **args): """Handler for IIIF Image Requests. Behaviour for case of a non-authn or non-authz case is to return 403. """ if (not auth or degraded_request(identifier) or auth.image_authz()): # serve image if (auth): logging.debug("Authorized for image %s" % identifier) i = IIIFHandler(prefix, identifier, config, klass, auth) try: return i.image_request_response(path) except IIIFError as e: return i.error_response(e) else: # redirect to degraded (for not authz and for authn but not authz too) degraded_uri = host_port_prefix( config.host, config.port, prefix) + '/' + identifier + '-deg/' + path logging.info("Redirection to degraded: %s" % degraded_uri) response = redirect(degraded_uri) response.headers['Access-control-allow-origin'] = '*' return response
python
{ "resource": "" }
q7279
parse_accept_header
train
def parse_accept_header(accept): """Parse an HTTP Accept header. Parses *accept*, returning a list with pairs of (media_type, q_value), ordered by q values. Adapted from <https://djangosnippets.org/snippets/1042/> """ result = [] for media_range in accept.split(","): parts = media_range.split(";") media_type = parts.pop(0).strip() media_params = [] q = 1.0 for part in parts: (key, value) = part.lstrip().split("=", 1) if key == "q": q = float(value) else: media_params.append((key, value)) result.append((media_type, tuple(media_params), q)) result.sort(key=lambda x: -x[2]) return result
python
{ "resource": "" }
q7280
parse_authorization_header
train
def parse_authorization_header(value): """Parse the Authenticate header. Returns nothing on failure, opts hash on success with type='basic' or 'digest' and other params. <http://nullege.com/codes/search/werkzeug.http.parse_authorization_header> <http://stackoverflow.com/questions/1349367/parse-an-http-request-authorization-header-with-python> <http://bugs.python.org/file34041/0001-Add-an-authorization-header-to-the-initial-request.patch> """ try: (auth_type, auth_info) = value.split(' ', 1) auth_type = auth_type.lower() except ValueError: return if (auth_type == 'basic'): try: decoded = base64.b64decode(auth_info).decode( 'utf-8') # b64decode gives bytes in python3 (username, password) = decoded.split(':', 1) except (ValueError, TypeError): # py3, py2 return return {'type': 'basic', 'username': username, 'password': password} elif (auth_type == 'digest'): try: auth_map = parse_keqv_list(parse_http_list(auth_info)) except ValueError: return logging.debug(auth_map) for key in 'username', 'realm', 'nonce', 'uri', 'response': if key not in auth_map: return if 'qop' in auth_map and ('nc' not in auth_map or 'cnonce' not in auth_map): return auth_map['type'] = 'digest' return auth_map else: # unknown auth type return
python
{ "resource": "" }
q7281
do_conneg
train
def do_conneg(accept, supported): """Parse accept header and look for preferred type in supported list. Arguments: accept - HTTP Accept header supported - list of MIME type supported by the server Returns: supported MIME type with highest q value in request, else None. FIXME - Should replace this with negotiator2 """ for result in parse_accept_header(accept): mime_type = result[0] if (mime_type in supported): return mime_type return None
python
{ "resource": "" }
q7282
setup_auth_paths
train
def setup_auth_paths(app, auth, prefix, params): """Add URL rules for auth paths.""" base = urljoin('/', prefix + '/') # Must end in slash app.add_url_rule(base + 'login', prefix + 'login_handler', auth.login_handler, defaults=params) app.add_url_rule(base + 'logout', prefix + 'logout_handler', auth.logout_handler, defaults=params) if (auth.client_id_handler): app.add_url_rule(base + 'client', prefix + 'client_id_handler', auth.client_id_handler, defaults=params) app.add_url_rule(base + 'token', prefix + 'access_token_handler', auth.access_token_handler, defaults=params) if (auth.home_handler): app.add_url_rule(base + 'home', prefix + 'home_handler', auth.home_handler, defaults=params)
python
{ "resource": "" }
q7283
make_prefix
train
def make_prefix(api_version, manipulator, auth_type): """Make prefix string based on configuration parameters.""" prefix = "%s_%s" % (api_version, manipulator) if (auth_type and auth_type != 'none'): prefix += '_' + auth_type return prefix
python
{ "resource": "" }
q7284
split_comma_argument
train
def split_comma_argument(comma_sep_str): """Split a comma separated option into a list.""" terms = [] for term in comma_sep_str.split(','): if term: terms.append(term) return terms
python
{ "resource": "" }
q7285
write_pid_file
train
def write_pid_file(): """Write a file with the PID of this server instance. Call when setting up a command line testserver. """ pidfile = os.path.basename(sys.argv[0])[:-3] + '.pid' # strip .py, add .pid with open(pidfile, 'w') as fh: fh.write("%d\n" % os.getpid()) fh.close()
python
{ "resource": "" }
q7286
setup_app
train
def setup_app(app, cfg): """Setup Flask app and handle reverse proxy setup if configured. Arguments: app - Flask application cfg - configuration data """ # Set up app_host and app_port in case that we are running # under reverse proxy setup, otherwise they default to # config.host and config.port. if (cfg.app_host and cfg.app_port): logging.warning("Reverse proxy for service at http://%s:%d/ ..." % (cfg.host, cfg.port)) app.wsgi_app = ReverseProxied(app.wsgi_app, cfg.host) elif (cfg.app_host or cfg.app_port): logging.critical("Must specify both app-host and app-port for reverse proxy configuration, aborting") sys.exit(1) else: cfg.app_host = cfg.host cfg.app_port = cfg.port logging.warning("Setup server on http://%s:%d/ ..." % (cfg.app_host, cfg.app_port)) return(app)
python
{ "resource": "" }
q7287
IIIFHandler.server_and_prefix
train
def server_and_prefix(self): """Server and prefix from config.""" return(host_port_prefix(self.config.host, self.config.port, self.prefix))
python
{ "resource": "" }
q7288
IIIFHandler.json_mime_type
train
def json_mime_type(self): """Return the MIME type for a JSON response. For version 2.0+ the server must return json-ld MIME type if that format is requested. Implement for 1.1 also. http://iiif.io/api/image/2.1/#information-request """ mime_type = "application/json" if (self.api_version >= '1.1' and 'Accept' in request.headers): mime_type = do_conneg(request.headers['Accept'], [ 'application/ld+json']) or mime_type return mime_type
python
{ "resource": "" }
q7289
IIIFHandler.file
train
def file(self): """Filename property for the source image for the current identifier.""" file = None if (self.config.klass_name == 'gen'): for ext in ['.py']: file = os.path.join( self.config.generator_dir, self.identifier + ext) if (os.path.isfile(file)): return file else: for ext in ['.jpg', '.png', '.tif']: file = os.path.join(self.config.image_dir, self.identifier + ext) if (os.path.isfile(file)): return file # failed, show list of available identifiers as error available = "\n ".join(identifiers(self.config)) raise IIIFError(code=404, parameter="identifier", text="Image resource '" + self.identifier + "' not found. Local resources available:" + available + "\n")
python
{ "resource": "" }
q7290
IIIFHandler.add_compliance_header
train
def add_compliance_header(self): """Add IIIF Compliance level header to response.""" if (self.manipulator.compliance_uri is not None): self.headers['Link'] = '<' + \ self.manipulator.compliance_uri + '>;rel="profile"'
python
{ "resource": "" }
q7291
IIIFHandler.make_response
train
def make_response(self, content, code=200, headers=None): """Wrapper around Flask.make_response which also adds any local headers.""" if headers: for header in headers: self.headers[header] = headers[header] return make_response(content, code, self.headers)
python
{ "resource": "" }
q7292
IIIFHandler.image_information_response
train
def image_information_response(self): """Parse image information request and create response.""" dr = degraded_request(self.identifier) if (dr): self.logger.info("image_information: degraded %s -> %s" % (self.identifier, dr)) self.degraded = self.identifier self.identifier = dr else: self.logger.info("image_information: %s" % (self.identifier)) # get size self.manipulator.srcfile = self.file self.manipulator.do_first() # most of info.json comes from config, a few things specific to image info = {'tile_height': self.config.tile_height, 'tile_width': self.config.tile_width, 'scale_factors': self.config.scale_factors } # calculate scale factors if not hard-coded if ('auto' in self.config.scale_factors): info['scale_factors'] = self.manipulator.scale_factors( self.config.tile_width, self.config.tile_height) i = IIIFInfo(conf=info, api_version=self.api_version) i.server_and_prefix = self.server_and_prefix i.identifier = self.iiif.identifier i.width = self.manipulator.width i.height = self.manipulator.height if (self.api_version >= '2.0'): # FIXME - should come from manipulator i.qualities = ["default", "color", "gray"] else: # FIXME - should come from manipulator i.qualities = ["native", "color", "gray"] i.formats = ["jpg", "png"] # FIXME - should come from manipulator if (self.auth): self.auth.add_services(i) return self.make_response(i.as_json(), headers={"Content-Type": self.json_mime_type})
python
{ "resource": "" }
q7293
IIIFHandler.image_request_response
train
def image_request_response(self, path): """Parse image request and create response.""" # Parse the request in path if (len(path) > 1024): raise IIIFError(code=414, text="URI Too Long: Max 1024 chars, got %d\n" % len(path)) try: self.iiif.identifier = self.identifier self.iiif.parse_url(path) except IIIFRequestPathError as e: # Reraise as IIIFError with code=404 because we can't tell # whether there was an encoded slash in the identifier or # whether there was a bad number of path segments. raise IIIFError(code=404, text=e.text) except IIIFError as e: # Pass through raise e except Exception as e: # Something completely unexpected => 500 raise IIIFError(code=500, text="Internal Server Error: unexpected exception parsing request (" + str(e) + ")") dr = degraded_request(self.identifier) if (dr): self.logger.info("image_request: degraded %s -> %s" % (self.identifier, dr)) self.degraded = self.identifier self.identifier = dr self.iiif.quality = 'gray' else: # Parsed request OK, attempt to fulfill self.logger.info("image_request: %s" % (self.identifier)) file = self.file self.manipulator.srcfile = file self.manipulator.do_first() if (self.api_version < '2.0' and self.iiif.format is None and 'Accept' in request.headers): # In 1.0 and 1.1 conneg was specified as an alternative to format, see: # http://iiif.io/api/image/1.0/#format # http://iiif.io/api/image/1.1/#parameters-format formats = {'image/jpeg': 'jpg', 'image/tiff': 'tif', 'image/png': 'png', 'image/gif': 'gif', 'image/jp2': 'jps', 'application/pdf': 'pdf'} accept = do_conneg(request.headers['Accept'], list(formats.keys())) # Ignore Accept header if not recognized, should this be an error # instead? if (accept in formats): self.iiif.format = formats[accept] (outfile, mime_type) = self.manipulator.derive(file, self.iiif) # FIXME - find efficient way to serve file with headers self.add_compliance_header() return send_file(outfile, mimetype=mime_type)
python
{ "resource": "" }
q7294
IIIFHandler.error_response
train
def error_response(self, e): """Make response for an IIIFError e. Also add compliance header. """ self.add_compliance_header() return self.make_response(*e.image_server_response(self.api_version))
python
{ "resource": "" }
q7295
IIIFRequestHandler.error_response
train
def error_response(self, code, content=''): """Construct and send error response.""" self.send_response(code) self.send_header('Content-Type', 'text/xml') self.add_compliance_header() self.end_headers() self.wfile.write(content)
python
{ "resource": "" }
q7296
IIIFRequestHandler.do_GET
train
def do_GET(self): """Implement the HTTP GET method. The bulk of this code is wrapped in a big try block and anywhere within the code may raise an IIIFError which then results in an IIIF error response (section 5 of spec). """ self.compliance_uri = None self.iiif = IIIFRequest(baseurl='/') try: (of, mime_type) = self.do_GET_body() if (not of): raise IIIFError("Unexpected failure to open result image") self.send_response(200, 'OK') if (mime_type is not None): self.send_header('Content-Type', mime_type) self.add_compliance_header() self.end_headers() while (1): buffer = of.read(8192) if (not buffer): break self.wfile.write(buffer) # Now cleanup self.manipulator.cleanup() except IIIFError as e: if (self.debug): e.text += "\nRequest parameters:\n" + str(self.iiif) self.error_response(e.code, str(e)) except Exception as ue: # Anything else becomes a 500 Internal Server Error e = IIIFError(code=500, text="Something went wrong... %s ---- %s.\n" % (str(ue), traceback.format_exc())) if (self.debug): e.text += "\nRequest parameters:\n" + str(self.iiif) self.error_response(e.code, str(e))
python
{ "resource": "" }
q7297
IIIFRequestHandler.do_GET_body
train
def do_GET_body(self): """Create body of GET.""" iiif = self.iiif if (len(self.path) > 1024): raise IIIFError(code=414, text="URI Too Long: Max 1024 chars, got %d\n" % len(self.path)) try: # self.path has leading / then identifier/params... self.path = self.path.lstrip('/') sys.stderr.write("path = %s" % (self.path)) iiif.parse_url(self.path) except Exception as e: # Something completely unexpected => 500 raise IIIFError(code=500, text="Internal Server Error: unexpected exception parsing request (" + str(e) + ")") # Now we have a full iiif request if (re.match('[\w\.\-]+$', iiif.identifier)): file = os.path.join(TESTIMAGE_DIR, iiif.identifier) if (not os.path.isfile(file)): images_available = "" for image_file in os.listdir(TESTIMAGE_DIR): if (os.path.isfile(os.path.join(TESTIMAGE_DIR, image_file))): images_available += " " + image_file + "\n" raise IIIFError(code=404, parameter="identifier", text="Image resource '" + iiif.identifier + "' not found. Local image files available:\n" + images_available) else: raise IIIFError(code=404, parameter="identifier", text="Image resource '" + iiif.identifier + "' not found. Only local test images and http: URIs for images are supported.\n") # Now know image is OK manipulator = IIIFRequestHandler.manipulator_class() # Stash manipulator object so we can cleanup after reading file self.manipulator = manipulator self.compliance_uri = manipulator.compliance_uri if (iiif.info): # get size manipulator.srcfile = file manipulator.do_first() # most of info.json comes from config, a few things # specific to image i = IIIFInfo() i.identifier = self.iiif.identifier i.width = manipulator.width i.height = manipulator.height import io return(io.StringIO(i.as_json()), "application/json") else: (outfile, mime_type) = manipulator.derive(file, iiif) return(open(outfile, 'r'), mime_type)
python
{ "resource": "" }
q7298
IIIFAuth.set_cookie_prefix
train
def set_cookie_prefix(self, cookie_prefix=None): """Set a random cookie prefix unless one is specified. In order to run multiple demonstration auth services on the same server we need to have different cookie names for each auth domain. Unless cookie_prefix is set, generate a random one. """ if (cookie_prefix is None): self.cookie_prefix = "%06d_" % int(random.random() * 1000000) else: self.cookie_prefix = cookie_prefix
python
{ "resource": "" }
q7299
IIIFAuth.add_services
train
def add_services(self, info): """Add auth service descriptions to an IIIFInfo object. Login service description is the wrapper for all other auth service descriptions so we have nothing unless self.login_uri is specified. If we do then add any other auth services at children. Exactly the same structure is used in the authorized and unauthorized cases (although in the data could be different). """ if (self.login_uri): svc = self.login_service_description() svcs = [] if (self.logout_uri): svcs.append(self.logout_service_description()) if (self.client_id_uri): svcs.append(self.client_id_service_description()) if (self.access_token_uri): svcs.append(self.access_token_service_description()) # Add one as direct child of service property, else array for >1 if (len(svcs) == 1): svc['service'] = svcs[0] elif (len(svcs) > 1): svc['service'] = svcs info.add_service(svc)
python
{ "resource": "" }