code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
sigparams = inspect.signature(f).parameters for p in sigparams: if sigparams[p].kind == inspect.Parameter.VAR_POSITIONAL: return sigparams[p].name return None
def get_func_posargs_name(f)
Returns the name of the function f's keyword argument parameter if it exists, otherwise None
2.475337
2.568383
0.963772
sigparams = inspect.signature(f).parameters for p in sigparams: if sigparams[p].kind == inspect.Parameter.VAR_KEYWORD: return sigparams[p].name return None
def get_func_kwargs_name(f)
Returns the name of the function f's keyword argument parameter if it exists, otherwise None
2.597182
2.588624
1.003306
for k,v in kwargs.items(): Settings._set(k, v)
def set(**kwargs)
Set configuration parameters. Pass keyword arguments for the parameters you would like to set. This function is particularly useful to call at the head of your script file to disable particular features. For example, >>> from paranoid.settings import Settings >>> Settings.set(enabled=False) This is syntactic sugar for the _set function.
8.273553
4.06848
2.033573
if name not in Settings.__global_setting_values.keys(): raise NameError("Invalid setting value") if name in Settings.__validate_settings.keys(): if not Settings.__validate_settings[name](value): raise ValueError("Invalid setting: %s = %s" % (name, value)) # Set the setting either globally (if no function is passed) # or else locally to the function (if a function is passed). if function: if not hasattr(function, Settings.FUNCTION_SETTINGS_NAME): setattr(function, Settings.FUNCTION_SETTINGS_NAME, {}) # Test if this wraps something. TODO this will fail # for nested decorators. This also assumes that, if # there is a wrapped function (super wraps sub), that # if super doesn't have settings, then sup doesn't # either. (This assumption is valid for paranoid # decorators since it properly uses update_wrapper, # but may not be valid for other decorators.) if hasattr(function, "__wrapped__"): setattr(function.__wrapped__, Settings.FUNCTION_SETTINGS_NAME, getattr(function, Settings.FUNCTION_SETTINGS_NAME)) getattr(function, Settings.FUNCTION_SETTINGS_NAME)[name] = value else: Settings.__global_setting_values[name] = value
def _set(name, value, function=None)
Internally set a config parameter. If you call it with no function, it sets the global parameter. If you call it with a function argument, it sets the value for the specified function. Normally, this should only be called with a function argument for internal code. This should not be called by code outside of the paranoid module.
5.112358
4.830975
1.058246
if function is not None: if hasattr(function, Settings.FUNCTION_SETTINGS_NAME): if name in getattr(function, Settings.FUNCTION_SETTINGS_NAME): return getattr(function, Settings.FUNCTION_SETTINGS_NAME)[name] return Settings.__global_setting_values[name]
def get(name, function=None)
Get a setting. `name` should be the name of the setting to look for. If the optional argument `function` is passed, this will look for a value local to the function before retrieving the global value.
3.497808
3.460334
1.01083
def timed(*arguments, **kw): ts = time.time() result = method(*arguments, **kw) te = time.time() sys.stdout.write('Time: %r %2.2f sec\n' % (method.__name__.strip("_"), te - ts)) sys.stdout.write('------------------------------------\n') sys.stdout.flush() return result return timed
def timeit(method)
Decorator: Compute the execution time of a function :param method: the function :return: the method runtime
2.9865
2.976811
1.003255
self.g = nx.read_edgelist(network_filename, nodetype=int)
def __read_graph(self, network_filename)
Read .ncol network file :param network_filename: complete path for the .ncol file :return: an undirected network
3.765479
4.79793
0.784813
for n in self.g.nodes(): self.g.node[n]['communities'] = [n] all_communities = {} for ego in tqdm.tqdm(nx.nodes(self.g), ncols=35, bar_format='Exec: {l_bar}{bar}'): ego_minus_ego = nx.ego_graph(self.g, ego, 1, False) community_to_nodes = self.__overlapping_label_propagation(ego_minus_ego, ego) # merging phase for c in community_to_nodes.keys(): if len(community_to_nodes[c]) > self.min_community_size: actual_community = community_to_nodes[c] all_communities = self.__merge_communities(all_communities, actual_community) # write output on file if self.file_output: with open(self.file_output, "w") as out_file_com: for idc, c in enumerate(all_communities.keys()): out_file_com.write("%d\t%s\n" % (idc, str(sorted(c)))) return list(all_communities.keys())
def execute(self)
Execute Demon algorithm
3.696868
3.598924
1.027215
sql = return {r[0]: frozenset(r[1]) for r in conn.execute(sql, (schema,))}
def get_defined_enums(conn, schema)
Return a dict mapping PostgreSQL enumeration types to the set of their defined values. :param conn: SQLAlchemy connection instance. :param str schema: Schema name (e.g. "public"). :returns dict: { "my_enum": frozenset(["a", "b", "c"]), }
8.749813
9.163748
0.954829
types = set(column.type for table in metadata.tables.values() for column in table.columns if (isinstance(column.type, sqlalchemy.Enum) and schema == (column.type.schema or default))) return {t.name: frozenset(t.enums) for t in types}
def get_declared_enums(metadata, schema, default)
Return a dict mapping SQLAlchemy enumeration types to the set of their declared values. :param metadata: ... :param str schema: Schema name (e.g. "public"). :returns dict: { "my_enum": frozenset(["a", "b", "c"]), }
3.748558
3.555429
1.054319
to_add = set() for schema in schema_names: default = autogen_context.dialect.default_schema_name if schema is None: schema = default defined = get_defined_enums(autogen_context.connection, schema) declared = get_declared_enums(autogen_context.metadata, schema, default) for name, new_values in declared.items(): old_values = defined.get(name) # Alembic will handle creation of the type in this migration, so # skip undefined names. if name in defined and new_values.difference(old_values): to_add.add((schema, name, old_values, new_values)) for schema, name, old_values, new_values in sorted(to_add): op = SyncEnumValuesOp(schema, name, old_values, new_values) upgrade_ops.ops.append(op)
def compare_enums(autogen_context, upgrade_ops, schema_names)
Walk the declared SQLAlchemy schema for every referenced Enum, walk the PG schema for every definde Enum, then generate SyncEnumValuesOp migrations for each defined enum that has grown new entries when compared to its declared version. Enums that don't exist in the database yet are ignored, since SQLAlchemy/Alembic will create them as part of the usual migration process.
3.46761
3.012823
1.150951
kwargs = kwargs or {} key, store = self._expand_opts(key, opts) # Resolve the etag. opts['etag'] = call_or_pass(opts.get('etag') or opts.get('etagger'), args, kwargs) if not isinstance(key, str): raise TypeError('non-string key of type %s' % type(key)) data = store.get(key) if data is not None: if not self._has_expired(data, opts): return data[VALUE_INDEX] if func is None: return None # Prioritize passed options over a store's native lock. lock_func = opts.get('lock') or getattr(store, 'lock', None) lock = lock_func and lock_func(key) locked = lock and lock.acquire(opts.get('timeout', DEFAULT_TIMEOUT)) try: value = func(*args, **kwargs) finally: if locked: lock.release() creation = time() expiry = call_or_pass(opts.get('expiry'), args, kwargs) max_age = call_or_pass(opts.get('max_age'), args, kwargs) if max_age is not None: expiry = min(x for x in (expiry, creation + max_age) if x is not None) # Need to be careful as this is the only place where we do not use the # lovely index constants. store[key] = (CURRENT_PROTOCOL_VERSION, creation, expiry, opts.get('etag'), value) return value
def get(self, key, func=None, args=(), kwargs=None, **opts)
Manually retrieve a value from the cache, calculating as needed. Params: key -> string to store/retrieve value from. func -> callable to generate value if it does not exist, or has expired. args -> positional arguments to call the function with. kwargs -> keyword arguments to call the function with. Keyword Params (options): These will be combined with region values (as selected by the "region" keyword argument, and then selected by "parent" values of those regions all the way up the chain to the "default" region). namespace -> string prefix to apply to the key before get/set. lock -> lock constructor. See README. expiry -> float unix expiration time. max_age -> float number of seconds until the value expires. Only provide expiry OR max_age, not both.
3.954201
4.144422
0.954102
key, store = self._expand_opts(key, opts) try: del store[key] except KeyError: pass
def delete(self, key, **opts)
Remove a key from the cache.
6.519174
5.890087
1.106804
key, store = self._expand_opts(key, opts) data = store.get(key) if data is not None: data = list(data) data[EXPIRY_INDEX] = expiry store[key] = tuple(data) else: raise KeyError(key)
def expire_at(self, key, expiry, **opts)
Set the explicit unix expiry time of a key.
3.611385
3.699888
0.97608
self.expire_at(key, time() + max_age, **opts)
def expire(self, key, max_age, **opts)
Set the maximum age of a given key, in seconds.
3.887725
4.097147
0.948886
key, store = self._expand_opts(key, opts) if hasattr(store, 'ttl'): return store.ttl(key) data = store.get(key) if data is None: return None expiry = data[EXPIRY_INDEX] if expiry is not None: return max(0, expiry - time()) or None
def ttl(self, key, **opts)
Get the time-to-live of a given key; None if not set.
4.230924
4.160989
1.016807
key, store = self._expand_opts(key, opts) data = store.get(key) # Note that we do not actually delete the thing here as the max_age # just for this call may have triggered a False. if not data or self._has_expired(data, opts): return False return True
def exists(self, key, **opts)
Return if a key exists in the cache.
11.671516
11.10631
1.050891
self.unuse_region() if self._rlist is not None: # Actual client count, which doesn't include the reference kept by the manager, nor ours # as we are about to be deleted try: if len(self._rlist) == 0: # Free all resources associated with the mapped file self._manager._fdict.pop(self._rlist.path_or_fd()) # END remove regions list from manager except (TypeError, KeyError): # sometimes, during shutdown, getrefcount is None. Its possible # to re-import it, however, its probably better to just ignore # this python problem (for now). # The next step is to get rid of the error prone getrefcount alltogether. pass
def _destroy(self)
Destruction code to decrement counters
18.00234
17.433084
1.032654
self._manager = rhs._manager self._rlist = type(rhs._rlist)(rhs._rlist) self._region = rhs._region self._ofs = rhs._ofs self._size = rhs._size for region in self._rlist: region.increment_client_count() if self._region is not None: self._region.increment_client_count()
def _copy_from(self, rhs)
Copy all data from rhs into this instance, handles usage count
4.219647
3.950229
1.068203
need_region = True man = self._manager fsize = self._rlist.file_size() size = min(size or fsize, man.window_size() or fsize) # clamp size to window size if self._region is not None: if self._region.includes_ofs(offset): need_region = False else: self.unuse_region() # END handle existing region # END check existing region # offset too large ? if offset >= fsize: return self # END handle offset if need_region: self._region = man._obtain_region(self._rlist, offset, size, flags, False) self._region.increment_client_count() # END need region handling self._ofs = offset - self._region._b self._size = min(size, self._region.ofs_end() - offset) return self
def use_region(self, offset=0, size=0, flags=0)
Assure we point to a window which allows access to the given offset into the file :param offset: absolute offset in bytes into the file :param size: amount of bytes to map. If 0, all available bytes will be mapped :param flags: additional flags to be given to os.open in case a file handle is initially opened for mapping. Has no effect if a region can actually be reused. :return: this instance - it should be queried for whether it points to a valid memory region. This is not the case if the mapping failed because we reached the end of the file **Note:**: The size actually mapped may be smaller than the given size. If that is the case, either the file has reached its end, or the map was created between two existing regions
5.609443
6.232815
0.899985
return buffer(self._region.buffer(), self._ofs, self._size)
def buffer(self)
Return a buffer object which allows access to our memory region from our offset to the window size. Please note that it might be smaller than you requested when calling use_region() **Note:** You can only obtain a buffer if this instance is_valid() ! **Note:** buffers should not be cached passed the duration of your access as it will prevent resources from being freed even though they might not be accounted for anymore !
22.124981
14.868345
1.488059
# unroll methods return (self._region._b + self._ofs) <= ofs < (self._region._b + self._ofs + self._size)
def includes_ofs(self, ofs)
:return: True if the given absolute offset is contained in the cursors current region **Note:** cursor must be valid for this to work
12.812949
11.233675
1.140584
if isinstance(self._rlist.path_or_fd(), int): raise ValueError("Path queried although mapping was applied to a file descriptor") # END handle type return self._rlist.path_or_fd()
def path(self)
:return: path of the underlying mapped file :raise ValueError: if attached path is not a path
16.754038
14.28311
1.172997
if isinstance(self._rlist.path_or_fd(), string_types()): raise ValueError("File descriptor queried although mapping was generated from path") # END handle type return self._rlist.path_or_fd()
def fd(self)
:return: file descriptor used to create the underlying mapping. **Note:** it is not required to be valid anymore :raise ValueError: if the mapping was not created by a file descriptor
20.376614
17.550301
1.161041
num_found = 0 while (size == 0) or (self._memory_size + size > self._max_memory_size): lru_region = None lru_list = None for regions in self._fdict.values(): for region in regions: # check client count - if it's 1, it's just us if (region.client_count() == 1 and (lru_region is None or region._uc < lru_region._uc)): lru_region = region lru_list = regions # END update lru_region # END for each region # END for each regions list if lru_region is None: break # END handle region not found num_found += 1 del(lru_list[lru_list.index(lru_region)]) lru_region.increment_client_count(-1) self._memory_size -= lru_region.size() self._handle_count -= 1 # END while there is more memory to free return num_found
def _collect_lru_region(self, size)
Unmap the region which was least-recently used and has no client :param size: size of the region we want to map next (assuming its not already mapped partially or full if 0, we try to free any available region :return: Amount of freed regions .. Note:: We don't raise exceptions anymore, in order to keep the system working, allowing temporary overallocation. If the system runs out of memory, it will tell. .. TODO:: implement a case where all unusued regions are discarded efficiently. Currently its only brute force
4.032457
3.903459
1.033047
if self._memory_size + size > self._max_memory_size: self._collect_lru_region(size) # END handle collection r = None if a: assert len(a) == 1 r = a[0] else: try: r = self.MapRegionCls(a.path_or_fd(), 0, sys.maxsize, flags) except Exception: # apparently we are out of system resources or hit a limit # As many more operations are likely to fail in that condition ( # like reading a file from disk, etc) we free up as much as possible # As this invalidates our insert position, we have to recurse here if is_recursive: # we already tried this, and still have no success in obtaining # a mapping. This is an exception, so we propagate it raise # END handle existing recursion self._collect_lru_region(0) return self._obtain_region(a, offset, size, flags, True) # END handle exceptions self._handle_count += 1 self._memory_size += r.size() a.append(r) # END handle array assert r.includes_ofs(offset) return r
def _obtain_region(self, a, offset, size, flags, is_recursive)
Utilty to create a new region - for more information on the parameters, see MapCursor.use_region. :param a: A regions (a)rray :return: The newly created region
8.587612
8.610361
0.997358
regions = self._fdict.get(path_or_fd) if regions is None: regions = self.MapRegionListCls(path_or_fd) self._fdict[path_or_fd] = regions # END obtain region for path return self.WindowCursorCls(self, regions)
def make_cursor(self, path_or_fd)
:return: a cursor pointing to the given path or file descriptor. It can be used to map new regions of the file into memory **Note:** if a file descriptor is given, it is assumed to be open and valid, but may be closed afterwards. To refer to the same file, you may reuse your existing file descriptor, but keep in mind that new windows can only be mapped as long as it stays valid. This is why the using actual file paths are preferred unless you plan to keep the file descriptor open. **Note:** file descriptors are problematic as they are not necessarily unique, as two different files opened and closed in succession might have the same file descriptor id. **Note:** Using file descriptors directly is faster once new windows are mapped as it prevents the file to be opened again just for the purpose of mapping it.
6.528327
7.146266
0.91353
return reduce(lambda x, y: x + y, (1 for rlist in self._fdict.values() if len(rlist) > 0), 0)
def num_open_files(self)
Amount of opened files in the system
5.422602
5.887216
0.921081
if sys.platform != 'win32': return # END early bailout num_closed = 0 for path, rlist in self._fdict.items(): if path.startswith(base_path): for region in rlist: region.release() num_closed += 1 # END path matches # END for each path return num_closed
def force_map_handle_removal_win(self, base_path)
ONLY AVAILABLE ON WINDOWS On windows removing files is not allowed if anybody still has it opened. If this process is ourselves, and if the whole process uses this memory manager (as far as the parent framework is concerned) we can enforce closing all memory maps whose path matches the given base path to allow the respective operation after all. The respective system must NOT access the closed memory regions anymore ! This really may only be used if you know that the items which keep the cursors alive will not be using it anymore. They need to be recreated ! :return: Amount of closed handles **Note:** does nothing on non-windows platforms
5.993001
4.613258
1.299082
res = (num // ALLOCATIONGRANULARITY) * ALLOCATIONGRANULARITY if round_up and (res != num): res += ALLOCATIONGRANULARITY # END handle size return res
def align_to_mmap(num, round_up)
Align the given integer number to the closest page offset, which usually is 4096 bytes. :param round_up: if True, the next higher multiple of page size is used, otherwise the lower page_size will be used (i.e. if True, 1 becomes 4096, otherwise it becomes 0) :return: num rounded to closest page
6.149282
9.342823
0.658182
nofs = align_to_mmap(self.ofs, 0) self.size += self.ofs - nofs # keep size constant self.ofs = nofs self.size = align_to_mmap(self.size, 1)
def align(self)
Assures the previous window area is contained in the new one
7.399368
8.029688
0.921501
rofs = self.ofs - window.ofs_end() nsize = rofs + self.size rofs -= nsize - min(nsize, max_size) self.ofs = self.ofs - rofs self.size += rofs
def extend_left_to(self, window, max_size)
Adjust the offset to start where the given window on our left ends if possible, but don't make yourself larger than max_size. The resize will assure that the new window still contains the old window area
6.290514
5.922993
1.06205
self.size = min(self.size + (window.ofs - self.ofs_end()), max_size)
def extend_right_to(self, window, max_size)
Adjust the size to make our window end where the right window begins, but don't get larger than max_size
9.820017
8.300249
1.183099
return self._b <= ofs < self._b + self._size
def includes_ofs(self, ofs)
:return: True if the given offset can be read in our mapped region
19.196175
14.78514
1.298342
self._uc += ofs assert self._uc > -1, "Increments must match decrements, usage counter negative: %i" % self._uc if self.client_count() == 0: self.release() return True else: return False
def increment_client_count(self, ofs = 1)
Adjust the usage count by the given positive or negative offset. If usage count equals 0, we will auto-release our resources :return: True if we released resources, False otherwise. In the latter case, we can still be used
9.62353
7.646539
1.258547
if self._file_size is None: if isinstance(self._path_or_fd, string_types()): self._file_size = os.stat(self._path_or_fd).st_size else: self._file_size = os.fstat(self._path_or_fd).st_size # END handle path type # END update file size return self._file_size
def file_size(self)
:return: size of file we manager
2.680098
2.615662
1.024634
if cursor: self._c = cursor # END update our cursor # reuse existing cursors if possible if self._c is not None and self._c.is_associated(): res = self._c.use_region(offset, size, flags).is_valid() if res: # if given size is too large or default, we computer a proper size # If its smaller, we assume the combination between offset and size # as chosen by the user is correct and use it ! # If not, the user is in trouble. if size > self._c.file_size(): size = self._c.file_size() - offset # END handle size self._size = size # END set size return res # END use our cursor return False
def begin_access(self, cursor=None, offset=0, size=sys.maxsize, flags=0)
Call this before the first use of this instance. The method was already called by the constructor in case sufficient information was provided. For more information no the parameters, see the __init__ method :param path: if cursor is None the existing one will be used. :return: True if the buffer can be used
7.879743
8.176282
0.963732
parser = argparse.ArgumentParser(description="generate HTML from crawler JSON") parser.add_argument( "--data-dir", default="data", help=u"Directory containing JSON data from crawler [%(default)s]" ) parser.add_argument( "--output-dir", default="html", help=u"Directory to output the resulting HTML files [%(default)s]" ) return parser
def make_parser()
Returns an argparse instance for this script.
3.647576
3.386606
1.077059
parser = make_parser() args = parser.parse_args() data_dir = Path(args.data_dir).expand() if not data_dir.isdir(): # pylint: disable=no-value-for-parameter msg = u"Data directory {dir} does not exist".format(dir=args.data_dir) raise ValueError(msg) data_filenames = [name for name in data_dir.files() # pylint: disable=no-value-for-parameter if name.endswith(".json")] if not data_filenames: msg = u"Data directory {dir} contains no JSON files".format(dir=args.data_dir) raise ValueError(msg) output_dir = Path(args.output_dir).expand() output_dir.makedirs_p() # pylint: disable=no-value-for-parameter return render_html(data_dir, output_dir)
def main()
Validates script arguments and calls the render_html() function with them.
2.373687
2.259711
1.050438
bits = code.split(".") for bit in reversed(bits): if WCAG_REFS_RE.match(bit): return bit.split(",") return []
def wcag_refs(code)
Given a `code` from pa11y, return a list of the WCAG references. These references are always of the form: one or more capital letters, followed by one or more digits. One `code` may contain multiple references, separated by commas.
5.844201
5.244293
1.114393
template = env.get_template(template_filename) rendered_html = template.render(**context) # pylint: disable=no-member html_path.write_text(rendered_html, encoding='utf-8')
def render_template(env, html_path, template_filename, context)
Render a template file into the given output location.
2.086052
2.172326
0.960285
env = Environment(loader=PackageLoader('pa11ycrawler', 'templates')) env.globals["wcag_refs"] = wcag_refs pages = [] counter = collections.Counter() grouped_violations = collections.defaultdict(dict) # render detail templates for data_file in data_dir.files('*.json'): data = json.load(data_file.open()) num_error, num_warning, num_notice = pa11y_counts(data['pa11y']) data["num_error"] = num_error data["num_warning"] = num_warning data["num_notice"] = num_notice fname = data_file.namebase + ".html" html_path = output_dir / fname render_template(env, html_path, 'detail.html', data) data["filename"] = fname pages.append(data) for violation in data['pa11y']: violation_id = hashlib.md5( (violation['selector'] + violation['code']).encode('utf-8') ).hexdigest() if violation_id not in grouped_violations[violation['type']]: violation['pages'] = [] grouped_violations[violation['type']][violation_id] = violation counter[violation['type']] += 1 grouped_violations[violation['type']][violation_id]['pages'].append({ 'url': data['url'], 'page_title': data['page_title'] }) def extract_nums(page): "Used to sort pages by violation counts" return ( page["num_error"], page["num_warning"], page["num_notice"], ) index_path = output_dir / INDEX_TEMPLATE render_template(env, index_path, INDEX_TEMPLATE, { "pages": sorted(pages, key=extract_nums, reverse=True), "num_error": counter["error"], "num_warning": counter["warning"], "num_notice": counter["notice"] }) for violation_type in grouped_violations: unique_path = output_dir / u'{}s.html'.format(violation_type) render_template(env, unique_path, UNIQUE_TEMPLATE, { "grouped_violations": sorted( grouped_violations[violation_type].values(), key=lambda item: len(item['pages']), reverse=True ), "current_type": violation_type, "violation_counts": counter })
def render_html(data_dir, output_dir)
The main workhorse of this script. Finds all the JSON data files from pa11ycrawler, and transforms them into HTML files via Jinja2 templating.
2.568692
2.507195
1.024528
ignore_rules = getattr(spider, "pa11y_ignore_rules", {}) or {} return itertools.chain.from_iterable( rule_list for url_glob, rule_list in ignore_rules.items() if fnmatch.fnmatch(url, url_glob) )
def ignore_rules_for_url(spider, url)
Returns a list of ignore rules from the given spider, that are relevant to the given URL.
3.621513
3.799614
0.953127
return all( fnmatch.fnmatch(pa11y_result.get(attr), ignore_rule.get(attr)) for attr in ignore_rule.keys() )
def ignore_rule_matches_result(ignore_rule, pa11y_result)
Returns a boolean result of whether the given ignore rule matches the given pa11y result. The rule only matches the result if *all* attributes of the rule match.
3.526807
3.23602
1.08986
if not stdout: return [] results = json.loads(stdout.decode('utf8')) ignore_rules = ignore_rules_for_url(spider, url) for rule in ignore_rules: results = [ result for result in results if not ignore_rule_matches_result(rule, result) ] return results
def load_pa11y_results(stdout, spider, url)
Load output from pa11y, filtering out the ignored messages. The `stdout` parameter is a bytestring, not a unicode string.
3.04974
2.842454
1.072925
config = { "page": { "headers": item["request_headers"], }, } config_file = tempfile.NamedTemporaryFile( mode="w", prefix="pa11y-config-", suffix=".json", delete=False ) json.dump(config, config_file) config_file.close() return config_file
def write_pa11y_config(item)
The only way that pa11y will see the same page that scrapy sees is to make sure that pa11y requests the page with the same headers. However, the only way to configure request headers with pa11y is to write them into a config file. This function will create a config file, write the config into it, and return a reference to that file.
2.677961
2.549473
1.050398
if not pa11y_results: # no output from pa11y, nothing to check. return title_errs = [err for err in pa11y_results if err["context"].startswith("<title")] for err in title_errs: title_elmt = html.fragment_fromstring(err["context"]) # pa11ycrawler will elide the title, so grab whatever true # content we can from the output elided_title = title_elmt.text.strip() if elided_title.endswith("..."): pa11y_title = elided_title[0:-4].strip() else: pa11y_title = elided_title # check that they match -- the elided version should be a substring # of the full version if pa11y_title not in expected_title: # whoa, something's screwy! msg = ( u'Parser mismatch! ' u'Scrapy saw full title "{scrapy_title}", ' u'Pa11y saw elided title "{elided_title}".' ).format( scrapy_title=expected_title, elided_title=elided_title, ) logger.error(msg)
def check_title_match(expected_title, pa11y_results, logger)
Check if Scrapy reports any issue with the HTML <title> element. If so, compare that <title> element to the title that we got in the A11yItem. If they don't match, something is screwy, and pa11y isn't parsing the page that we expect.
4.602758
4.187144
1.09926
num_err, num_warn, num_notice = pa11y_counts(pa11y_results) stats = spider.crawler.stats stats.inc_value("pa11y/error", count=num_err, spider=spider) stats.inc_value("pa11y/warning", count=num_warn, spider=spider) stats.inc_value("pa11y/notice", count=num_notice, spider=spider)
def track_pa11y_stats(pa11y_results, spider)
Keep track of the number of pa11y errors, warnings, and notices that we've seen so far, using the Scrapy stats collector: http://doc.scrapy.org/en/1.1/topics/stats.html
2.050297
1.780134
1.151766
data = dict(item) data['pa11y'] = pa11y_results # it would be nice to use the URL as the filename, # but that gets complicated (long URLs, special characters, etc) # so we'll make the filename a hash of the URL instead, # and throw in the access time so that we can store the same URL # multiple times in this data directory hasher = hashlib.md5() hasher.update(item["url"].encode('utf8')) hasher.update(item["accessed_at"].isoformat().encode('utf8')) basename = hasher.hexdigest() filename = basename + ".json" filepath = data_dir / filename data_dir.makedirs_p() text = json.dumps(data, cls=DateTimeEncoder) filepath.write_text(text)
def write_pa11y_results(item, pa11y_results, data_dir)
Write the output from pa11y into a data file.
4.193534
4.191947
1.000378
config_file = write_pa11y_config(item) args = [ self.pa11y_path, item["url"], '--config={file}'.format(file=config_file.name), ] for flag, value in self.cli_flags.items(): args.append("--{flag}={value}".format(flag=flag, value=value)) retries_remaining = 3 while retries_remaining: logline = " ".join(args) if retries_remaining != 3: logline += u" # (retry {num})".format(num=3-retries_remaining) spider.logger.info(logline) proc = sp.Popen( args, shell=False, stdout=sp.PIPE, stderr=sp.PIPE, ) stdout, stderr = proc.communicate() if proc.returncode in (0, 2): # `pa11y` ran successfully! # Return code 0 means no a11y errors. # Return code 2 means `pa11y` identified a11y errors. # Either way, we're done, so break out of the `while` loop break else: # `pa11y` did _not_ run successfully! # We sometimes get the error "Truffler timed out": # truffler is what accesses the web page for `pa11y1`. # https://www.npmjs.com/package/truffler # If this is the error, we can resolve it just by trying again, # so decrement the retries_remaining and start over. retries_remaining -= 1 if retries_remaining == 0: raise DropItem( u"Couldn't get pa11y results for {url}. Error:\n{err}".format( url=item['url'], err=stderr, ) ) pa11y_results = load_pa11y_results(stdout, spider, item['url']) check_title_match(item['page_title'], pa11y_results, spider.logger) track_pa11y_stats(pa11y_results, spider) os.remove(config_file.name) write_pa11y_results(item, pa11y_results, Path(spider.data_dir)) return item
def process_item(self, item, spider)
Use the Pa11y command line tool to get an a11y report.
3.814218
3.668152
1.03982
for ext in hamlpy.VALID_EXTENSIONS: if extension.endswith('.' + ext): return True return False
def watched_extension(extension)
Return True if the given extension is one of the watched extensions
6.444573
6.550543
0.983823
argv = sys.argv[1:] if len(sys.argv) > 1 else [] args = arg_parser.parse_args(sys.argv[1:]) compiler_args = {} input_folder = os.path.realpath(args.input_dir) if not args.output_dir: output_folder = input_folder else: output_folder = os.path.realpath(args.output_dir) if args.verbose: Options.VERBOSE = True print "Watching {} at refresh interval {} seconds".format(input_folder, args.refresh) if args.extension: Options.OUTPUT_EXT = args.extension if getattr(args, 'tags', False): hamlpynodes.TagNode.self_closing.update(args.tags) if args.input_extension: hamlpy.VALID_EXTENSIONS += args.input_extension if args.attr_wrapper: compiler_args['attr_wrapper'] = args.attr_wrapper if args.jinja: for k in ('ifchanged', 'ifequal', 'ifnotequal', 'autoescape', 'blocktrans', 'spaceless', 'comment', 'cache', 'localize', 'compress'): del hamlpynodes.TagNode.self_closing[k] hamlpynodes.TagNode.may_contain.pop(k, None) hamlpynodes.TagNode.self_closing.update({ 'macro' : 'endmacro', 'call' : 'endcall', 'raw' : 'endraw' }) hamlpynodes.TagNode.may_contain['for'] = 'else' while True: try: _watch_folder(input_folder, output_folder, compiler_args) time.sleep(args.refresh) except KeyboardInterrupt: # allow graceful exit (no stacktrace output) sys.exit(0)
def watch_folder()
Main entry point. Expects one or two arguments (the watch folder + optional destination folder).
3.890272
3.872676
1.004543
for dirpath, dirnames, filenames in os.walk(folder): for filename in filenames: # Ignore filenames starting with ".#" for Emacs compatibility if watched_extension(filename) and not filename.startswith('.#'): fullpath = os.path.join(dirpath, filename) subfolder = os.path.relpath(dirpath, folder) mtime = os.stat(fullpath).st_mtime # Create subfolders in target directory if they don't exist compiled_folder = os.path.join(destination, subfolder) if not os.path.exists(compiled_folder): os.makedirs(compiled_folder) compiled_path = _compiled_path(compiled_folder, filename) if (not fullpath in compiled or compiled[fullpath] < mtime or not os.path.isfile(compiled_path)): compile_file(fullpath, compiled_path, compiler_args) compiled[fullpath] = mtime
def _watch_folder(folder, destination, compiler_args)
Compares "modified" timestamps against the "compiled" dict, calls compiler if necessary.
2.661659
2.621222
1.015427
if Options.VERBOSE: print '%s %s -> %s' % (strftime("%H:%M:%S"), fullpath, outfile_name) try: if Options.DEBUG: print "Compiling %s -> %s" % (fullpath, outfile_name) haml_lines = codecs.open(fullpath, 'r', encoding = 'utf-8').read().splitlines() compiler = hamlpy.Compiler(compiler_args) output = compiler.process_lines(haml_lines) outfile = codecs.open(outfile_name, 'w', encoding = 'utf-8') outfile.write(output) except Exception, e: # import traceback print "Failed to compile %s -> %s\nReason:\n%s" % (fullpath, outfile_name, e)
def compile_file(fullpath, outfile_name, compiler_args)
Calls HamlPy compiler.
2.584543
2.382545
1.084782
return ( len(url.path.segments) == 6 and url.path.segments[0] == 'courses' and url.path.segments[2] == 'courseware' and url.path.segments[5] == '1' )
def is_sequence_start_page(self, url)
Does this URL represent the first page in a section sequence? E.g. /courses/{coursename}/courseware/{block_id}/{section_id}/1 This will return the same page as the pattern /courses/{coursename}/courseware/{block_id}/{section_id}.
3.239755
2.251151
1.439155
url = url.parent if url in self.urls_seen: raise DropItem(u"Dropping duplicate url {url}".format(url=item["url"])) else: self.urls_seen.add(url) return item
def process_item(self, item, spider): # pylint: disable=unused-argument url = self.clean_url(item["url"]) if self.is_sequence_start_page(url)
Stops processing item if we've already seen this URL before.
4.023909
3.286217
1.224481
raise DropItem(u"Dropping DRF url {url}".format(url=url)) else: return item
def process_item(self, item, spider): # pylint: disable=unused-argument "Check for DRF urls." url = URLObject(item["url"]) if url.path.startswith("/api/")
Check for DRF urls.
7.526538
4.29071
1.754147
num_error = 0 num_warning = 0 num_notice = 0 for result in results: if result['type'] == 'error': num_error += 1 elif result['type'] == 'warning': num_warning += 1 elif result['type'] == 'notice': num_notice += 1 return num_error, num_warning, num_notice
def pa11y_counts(results)
Given a list of pa11y results, return three integers: number of errors, number of warnings, and number of notices.
1.59811
1.420979
1.124654
cookie_headers = [ h.decode('ascii') for h in response.headers.getlist("Set-Cookie") ] if not cookie_headers: return None csrf_headers = [ h for h in cookie_headers if h.startswith("csrftoken=") ] if not csrf_headers: return None match = re.match("csrftoken=([^ ;]+);", csrf_headers[-1]) return match.group(1)
def get_csrf_token(response)
Extract the CSRF token out of the "Set-Cookie" header of a response.
2.434494
2.286668
1.064647
return None if file: file = Path(file) if not file.isfile(): msg = ( u"pa11y_ignore_rules_file specified, but file does not exist! {file}" ).format(file=file) raise ValueError(msg) return yaml.safe_load(file.text()) # must be URL resp = requests.get(url) if not resp.ok: msg = ( u"pa11y_ignore_rules_url specified, but failed to fetch URL. status={status}" ).format(status=resp.status_code) err = RuntimeError(msg) err.response = resp raise err return yaml.safe_load(resp.text)
def load_pa11y_ignore_rules(file=None, url=None): # pylint: disable=redefined-builtin if not file and not url
Load the pa11y ignore rules from the given file or URL.
2.781378
2.642095
1.052717
self.logger.error(repr(failure)) if failure.check(HttpError): response = failure.value.response self.logger.error(u'HttpError on %s', response.url) self.logger.error(u'HttpError Code: %s', response.status) if response.status in (401, 403): # If the error is from invalid login, tell the user self.logger.error( "Credentials failed. Either add/update the current credentials " "or remove them to enable auto auth" ) elif failure.check(DNSLookupError): request = failure.request self.logger.error(u'DNSLookupError on %s', request.url)
def handle_error(self, failure)
Provides basic error information for bad requests. If the error was an HttpError or DNSLookupError, it prints more specific information.
3.667378
3.418173
1.072906
if self.single_url: port = urlparse(self.single_url).port if port: if port != self.port: self.port = port else: # No need for credentials yield scrapy.Request( self.single_url, callback=self.parse_item, errback=self.handle_error ) return if self.login_email and self.login_password: login_url = ( URLObject("http://") .with_hostname(self.domain) .with_port(self.port) .with_path(LOGIN_HTML_PATH) ) yield scrapy.Request( login_url, callback=self.after_initial_csrf, errback=self.handle_error ) else: self.logger.info( "email/password unset, fetching credentials via auto_auth" ) auth_url = ( URLObject("http://") .with_hostname(self.domain) .with_port(self.port) .with_path(AUTO_AUTH_PATH) .set_query_params( staff='true', course_id=self.course_key, ) ) # make sure to request a parseable JSON response headers = { b"Accept": b"application/json", } yield scrapy.Request( auth_url, headers=headers, callback=self.after_auto_auth, errback=self.handle_error )
def start_requests(self)
Gets the spider started. If both `self.login_email` and `self.login_password` are set, this method generates a request to login with those credentials. Otherwise, this method generates a request to go to the "auto auth" page and get credentials from there. Either way, this method doesn't actually generate requests from `self.start_urls` -- that is handled by the `after_initial_login()` and `after_auto_auth()` methods.
3.141036
2.831735
1.109227
login_url = ( URLObject("http://") .with_hostname(self.domain) .with_port(self.port) .with_path(LOGIN_API_PATH) ) credentials = { "email": self.login_email, "password": self.login_password, } headers = { b"X-CSRFToken": get_csrf_token(response), } yield scrapy.FormRequest( login_url, formdata=credentials, headers=headers, callback=self.after_initial_login, errback=self.handle_error )
def after_initial_csrf(self, response)
This method is called *only* if the crawler is started with an email and password combination. In order to log in, we need a CSRF token from a GET request. This method takes the result of a GET request, extracts the CSRF token, and uses it to make a login request. The response to this login request will be handled by the `after_initial_login` method.
2.989058
2.749454
1.087146
if LOGIN_FAILURE_MSG in response.text: self.logger.error( "Credentials failed. Either add/update the current credentials " "or remove them to enable auto auth" ) return self.logger.info("successfully completed initial login") if self.single_url: yield scrapy.Request( self.single_url, callback=self.parse_item, errback=self.handle_error ) else: for url in self.start_urls: yield scrapy.Request( url, callback=self.analyze_url_list, errback=self.handle_error )
def after_initial_login(self, response)
This method is called *only* if the crawler is started with an email and password combination. It verifies that the login request was successful, and then generates requests from `self.start_urls`.
3.929232
3.804117
1.032889
result = json.loads(response.text) self.login_email = result["email"] self.login_password = result["password"] msg = ( u"Obtained credentials via auto_auth! email={email} password={password}" ).format(**result) self.logger.info(msg) if self.single_url: yield scrapy.Request( self.single_url, callback=self.parse_item, errback=self.handle_error ) else: for url in self.start_urls: yield scrapy.Request( url, callback=self.analyze_url_list, errback=self.handle_error )
def after_auto_auth(self, response)
This method is called *only* if the crawler is started without an email and password combination. It parses the response from the "auto auth" feature, and saves the email and password combination. Then it generates requests from `self.start_urls`.
2.911207
2.643686
1.101192
response = json.loads(response.text) for _, block in response['blocks'].items(): for attribute in block: parsed = urlparse(block[attribute]) # find urls in the JSON response if parsed.scheme and parsed.netloc: yield self.make_requests_from_url(block[attribute])
def analyze_url_list(self, response)
Parse JSON response for the beginning url(s) for the crawler.
4.712142
4.456942
1.057259
# if we got redirected to a login page, then login if URLObject(response.url).path == LOGIN_HTML_PATH: reqs = self.handle_unexpected_redirect_to_login_page(response) for req in reqs: yield req title = response.xpath("//title/text()").extract_first() if title: title = title.strip() # `response.request.headers` is a dictionary where the key is the # header name, and the value is a *list*, containing one item, # which is the header value. We need to get rid of this list, and just # have key-value pairs. (This list probably exists in case the same # header is sent multiple times, but that's not happening in this case, # and the list construct is getting in the way.) # # We also need to convert bytes to ASCII. In practice, headers can # only contain ASCII characters: see # http://stackoverflow.com/questions/5423223/how-to-send-non-english-unicode-string-using-http-header request_headers = {key.decode('ascii'): value[0].decode('ascii') for key, value in response.request.headers.items()} item = A11yItem( url=response.url, request_headers=request_headers, accessed_at=datetime.utcnow(), page_title=title, ) yield item
def parse_item(self, response)
Get basic information about a page, so that it can be passed to the `pa11y` tool for further testing. @url https://www.google.com/ @returns items 1 1 @returns requests 0 0 @scrapes url request_headers accessed_at page_title
5.02541
4.900349
1.025521
next_url = URLObject(response.url).query_dict.get("next") login_url = ( URLObject("http://") .with_hostname(self.domain) .with_port(self.port) .with_path(LOGIN_API_PATH) ) if next_url: login_url = login_url.set_query_param("next", next_url) credentials = { "email": self.login_email, "password": self.login_password, } headers = { b"X-CSRFToken": get_csrf_token(response), } yield scrapy.FormRequest( login_url, formdata=credentials, headers=headers, callback=self.after_login, errback=self.handle_error )
def handle_unexpected_redirect_to_login_page(self, response)
This method is called if the crawler has been unexpectedly logged out. If that happens, and the crawler requests a page that requires a logged-in user, the crawler will be redirected to a login page, with the originally-requested URL as the `next` query parameter. This method simply causes the crawler to log back in using the saved email and password credentials. We rely on the fact that the login page will redirect the user to the URL in the `next` query parameter if the login is successful -- this will allow the crawl to resume where it left off. This is method is very much like the `get_initial_login()` method, but the callback is `self.after_login` instead of `self.after_initial_login`.
2.528275
2.603347
0.971163
''' Escapes quotes with a backslash, except those inside a Django tag ''' escaped = [] inside_tag = False for i, _ in enumerate(v): if v[i:i + 2] == '{%': inside_tag = True elif v[i:i + 2] == '%}': inside_tag = False if v[i] == self.attr_wrapper and not inside_tag: escaped.append('\\') escaped.append(v[i]) return ''.join(escaped)
def _escape_attribute_quotes(self, v)
Escapes quotes with a backslash, except those inside a Django tag
3.259781
2.278202
1.430857
'''Add child node, and copy all options to it''' super(RootNode, self).add_child(child) child.attr_wrapper = self.attr_wrapper
def add_child(self, child)
Add child node, and copy all options to it
10.594488
5.124915
2.067251
'''Render opening tag and inline content''' start = ["%s<%s" % (self.spaces, element.tag)] if element.id: start.append(" id=%s" % self.element.attr_wrap(self.replace_inline_variables(element.id))) if element.classes: start.append(" class=%s" % self.element.attr_wrap(self.replace_inline_variables(element.classes))) if element.attributes: start.append(' ' + self.replace_inline_variables(element.attributes)) content = self._render_inline_content(self.element.inline_content) if element.nuke_inner_whitespace and content: content = content.strip() if element.self_close and not content: start.append(" />") elif content: start.append(">%s" % (content)) elif self.children: start.append(">%s" % (self.render_newlines())) else: start.append(">") return ''.join(start)
def _render_before(self, element)
Render opening tag and inline content
3.155926
2.890414
1.091859
'''Render closing tag''' if element.inline_content: return "</%s>%s" % (element.tag, self.render_newlines()) elif element.self_close: return self.render_newlines() elif self.children: return "%s</%s>\n" % (self.spaces, element.tag) else: return "</%s>\n" % (element.tag)
def _render_after(self, element)
Render closing tag
3.972916
3.693213
1.075734
request = Request(AUTH_URL) request.add_header('X-Simperium-API-Key', API_KEY) if sys.version_info < (3, 3): request.add_data(json.dumps({'username': user, 'password': password})) else: request.data = json.dumps({'username': user, 'password': password}).encode() try: res = urllib2.urlopen(request).read() token = json.loads(res.decode('utf-8'))["access_token"] except HTTPError: raise SimplenoteLoginFailed('Login to Simplenote API failed!') except IOError: # no connection exception token = None return token
def authenticate(self, user, password)
Method to get simplenote auth token Arguments: - user (string): simplenote email address - password (string): simplenote password Returns: Simplenote API token as string
3.87167
3.311691
1.169092
if self.token == None: self.token = self.authenticate(self.username, self.password) try: return str(self.token,'utf-8') except TypeError: return self.token
def get_token(self)
Method to retrieve an auth token. The cached global token is looked up and returned if it exists. If it is `None` a new one is requested and returned. Returns: Simplenote API token as string
3.266526
4.035312
0.809485
# request note params_version = "" if version is not None: params_version = '/v/' + str(version) params = '/i/%s%s' % (str(noteid), params_version) request = Request(DATA_URL+params) request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 except IOError as e: return e, -1 note = json.loads(response.read().decode('utf-8')) note = self.__add_simplenote_api_fields(note, noteid, int(response.info().get("X-Simperium-Version"))) # Sort tags # For early versions of notes, tags not always available if "tags" in note: note["tags"] = sorted(note["tags"]) return note, 0
def get_note(self, noteid, version=None)
Method to get a specific note Arguments: - noteid (string): ID of the note to get - version (int): optional version of the note to get Returns: A tuple `(note, status)` - note (dict): note object - status (int): 0 on success and -1 otherwise
4.209087
4.287602
0.981688
# determine whether to create a new note or update an existing one # Also need to add/remove key field to keep simplenote.py consistency if "key" in note: # Then already have a noteid we need to remove before passing to Simperium API noteid = note.pop("key", None) else: # Adding a new note noteid = uuid.uuid4().hex # TODO: Set a ccid? # ccid = uuid.uuid4().hex if "version" in note: version = note.pop("version", None) url = '%s/i/%s/v/%s?response=1' % (DATA_URL, noteid, version) else: url = '%s/i/%s?response=1' % (DATA_URL, noteid) # TODO: Could do with being consistent here. Everywhere else is Request(DATA_URL+params) note = self.__remove_simplenote_api_fields(note) request = Request(url, data=json.dumps(note).encode('utf-8')) request.add_header(self.header, self.get_token()) request.add_header('Content-Type', 'application/json') response = "" try: response = urllib2.urlopen(request) except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 except IOError as e: return e, -1 note = json.loads(response.read().decode('utf-8')) note = self.__add_simplenote_api_fields(note, noteid, int(response.info().get("X-Simperium-Version"))) return note, 0
def update_note(self, note)
Method to update a specific note object, if the note object does not have a "key" field, a new note is created Arguments - note (dict): note object to update Returns: A tuple `(note, status)` - note (dict): note object - status (int): 0 on success and -1 otherwise
4.673579
4.58956
1.018306
if type(note) == str: return self.update_note({"content": note}) elif (type(note) == dict) and "content" in note: return self.update_note(note) else: return "No string or valid note.", -1
def add_note(self, note)
Wrapper method to add a note The method can be passed the note as a dict with the `content` property set, which is then directly send to the web service for creation. Alternatively, only the body as string can also be passed. In this case the parameter is used as `content` for the new note. Arguments: - note (dict or string): the note to add Returns: A tuple `(note, status)` - note (dict): the newly created note - status (int): 0 on success and -1 otherwise
4.552031
4.212035
1.08072
# initialize data status = 0 ret = [] response_notes = {} notes = { "index" : [] } # get the note index params = '/index?limit=%s' % (str(NOTE_FETCH_LENGTH)) if since is not None: params += '&since=%s' % (since) # Fetching data is the default if data: params += '&data=true' # perform initial HTTP request request = Request(DATA_URL+params) request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) response_notes = json.loads(response.read().decode('utf-8')) # re-write for v1 consistency note_objects = [] for n in response_notes["index"]: # If data=False then can't do this bit... or not all of it, just have id and version. Add empty data object. if not data: n['d'] = {} note_object = self.__add_simplenote_api_fields(n['d'], n['id'], n['v']) note_objects.append(note_object) notes["index"].extend(note_objects) except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 except IOError as e: return e, -1 # get additional notes if bookmark was set in response while "mark" in response_notes: params += '&mark=%s' % response_notes["mark"] # perform the actual HTTP request request = Request(DATA_URL+params) request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) response_notes = json.loads(response.read().decode('utf-8')) # re-write for v1 consistency note_objects = [] for n in response_notes["index"]: if not data: n['d'] = {} note_object = n['d'] note_object = self.__add_simplenote_api_fields(n['d'], n['id'], n['v']) note_objects.append(note_object) notes["index"].extend(note_objects) except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 except IOError as e: return e, -1 note_list = notes["index"] self.current = response_notes["current"] # Can only filter for tags at end, once all notes have been retrieved. if (len(tags) > 0): note_list = [n for n in note_list if (len(set(n["tags"]).intersection(tags)) > 0)] return note_list, status
def get_note_list(self, data=True, since=None, tags=[])
Method to get the note list The method can be passed optional arguments to limit the list to notes containing a certain tag, or only updated since a certain Simperium cursor. If omitted a list of all notes is returned. By default data objects are returned. If data is set to false only keys/ids and versions are returned. An empty data object is inserted for compatibility. Arguments: - tags=[] list of tags as string: return notes that have at least one of these tags - since=cursor Simperium cursor as string: return only changes since this cursor - data=True If false only return keys/ids and versions Returns: A tuple `(notes, status)` - notes (list): A list of note objects with all properties set except `content`. - status (int): 0 on success and -1 otherwise
3.154242
3.1097
1.014324
# get note note, status = self.get_note(note_id) if (status == -1): return note, status # set deleted property, but only if not already trashed # TODO: A 412 is ok, that's unmodified. Should handle this in update_note and # then not worry about checking here if not note["deleted"]: note["deleted"] = True note["modificationDate"] = time.time() # update note return self.update_note(note) else: return note, 0
def trash_note(self, note_id)
Method to move a note to the trash Arguments: - note_id (string): key of the note to trash Returns: A tuple `(note, status)` - note (dict): the newly created note or an error message - status (int): 0 on success and -1 otherwise
6.583873
6.46917
1.017731
# notes have to be trashed before deletion note, status = self.trash_note(note_id) if (status == -1): return note, status params = '/i/%s' % (str(note_id)) request = Request(url=DATA_URL+params, method='DELETE') request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) except IOError as e: return e, -1 except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 return {}, 0
def delete_note(self, note_id)
Method to permanently delete a note Arguments: - note_id (string): key of the note to trash Returns: A tuple `(note, status)` - note (dict): an empty dict or an error message - status (int): 0 on success and -1 otherwise
4.416355
4.156633
1.062484
try: user_info = self.get_user_info(token) except UserInfoRetrievalFailed: msg = 'Failed to retrieve user info. Unable to authenticate.' logger.error(msg) raise exceptions.AuthenticationFailed(msg) user, __ = get_user_model().objects.get_or_create(username=user_info['username'], defaults=user_info) if not user.is_active: raise exceptions.AuthenticationFailed('User inactive or deleted.') return user, token
def authenticate_credentials(self, token)
Validate the bearer token against the OAuth provider. Arguments: token (str): Access token to validate Returns: (tuple): tuple containing: user (User): User associated with the access token access_token (str): Access token Raises: AuthenticationFailed: The user is inactive, or retrieval of user info failed.
2.625988
2.567363
1.022835
url = self.get_user_info_url() try: headers = {'Authorization': 'Bearer {}'.format(token)} response = requests.get(url, headers=headers) except requests.RequestException: logger.exception('Failed to retrieve user info due to a request exception.') raise UserInfoRetrievalFailed if response.status_code == 200: return self.process_user_info_response(response.json()) else: msg = 'Failed to retrieve user info. Server [{server}] responded with status [{status}].'.format( server=url, status=response.status_code ) raise UserInfoRetrievalFailed(msg)
def get_user_info(self, token)
Retrieves the user info from the OAuth provider. Arguments: token (str): OAuth2 access token. Returns: dict Raises: UserInfoRetrievalFailed: Retrieval of user info from the remote server failed.
2.563433
2.346211
1.092584
mapping = ( ('username', 'preferred_username'), ('email', 'email'), ('last_name', 'family_name'), ('first_name', 'given_name'), ) return {dest: response[source] for dest, source in mapping}
def process_user_info_response(self, response)
Process the user info response data. By default, this simply maps the edX user info key-values (example below) to Django-friendly names. If your provider returns different fields, you should sub-class this class and override this method. .. code-block:: python { "username": "jdoe", "email": "jdoe@example.com", "first_name": "Jane", "last_name": "Doe" } Arguments: response (dict): User info data Returns: dict
3.554421
4.374958
0.812447
username = payload.get('preferred_username') or payload.get('username') if username is None: raise exceptions.AuthenticationFailed('JWT must include a preferred_username or username claim!') else: try: user, __ = get_user_model().objects.get_or_create(username=username) attributes_updated = False for claim, attr in self.get_jwt_claim_attribute_map().items(): payload_value = payload.get(claim) if getattr(user, attr) != payload_value and payload_value is not None: setattr(user, attr, payload_value) attributes_updated = True if attributes_updated: user.save() except: msg = 'User retrieval failed.' logger.exception(msg) raise exceptions.AuthenticationFailed(msg) return user
def authenticate_credentials(self, payload)
Get or create an active user with the username contained in the payload.
2.695323
2.630911
1.024483
return any( issubclass(auth_class, base_class) for auth_class in iter_classes, )
def _includes_base_class(self, iter_classes, base_class)
Returns whether any class in iter_class is a subclass of the given base_class.
6.37011
5.522387
1.153507
view_permissions = list(getattr(view_class, 'permission_classes', [])) # Not all permissions are classes, some will be ConditionalPermission # objects from the rest_condition library. So we have to crawl all those # and expand them to see if our target classes are inside the # conditionals somewhere. permission_classes = [] classes_to_add = [] while view_permissions: permission = view_permissions.pop() if not hasattr(permission, 'perms_or_conds'): permission_classes.append(permission) else: for child in getattr(permission, 'perms_or_conds', []): view_permissions.append(child) for perm_class in self._required_permission_classes: if not self._includes_base_class(permission_classes, perm_class): log.warning( u"The view %s allows Jwt Authentication but needs to include the %s permission class (adding it for you)", view_class.__name__, perm_class.__name__, ) classes_to_add.append(perm_class) if classes_to_add: view_class.permission_classes += tuple(classes_to_add)
def _add_missing_jwt_permission_classes(self, view_class)
Adds permissions classes that should exist for Jwt based authentication, if needed.
4.86003
4.790482
1.014518
cookie_missing_message = '{} cookie is missing. JWT auth cookies will not be reconstituted.'.format( cookie_name ) request_jwt_cookie = 'missing-{}'.format(cookie_name) return cookie_missing_message, request_jwt_cookie
def _get_missing_cookie_message_and_metric(self, cookie_name)
Returns tuple with missing cookie (log_message, metric_value)
6.305048
5.987794
1.052983
use_jwt_cookie_requested = request.META.get(USE_JWT_COOKIE_HEADER) header_payload_cookie = request.COOKIES.get(jwt_cookie_header_payload_name()) signature_cookie = request.COOKIES.get(jwt_cookie_signature_name()) if not use_jwt_cookie_requested: metric_value = 'not-requested' elif header_payload_cookie and signature_cookie: # Reconstitute JWT auth cookie if split cookies are available and jwt cookie # authentication was requested by the client. request.COOKIES[jwt_cookie_name()] = '{}{}{}'.format( header_payload_cookie, JWT_DELIMITER, signature_cookie, ) metric_value = 'success' elif header_payload_cookie or signature_cookie: # Log unexpected case of only finding one cookie. if not header_payload_cookie: log_message, metric_value = self._get_missing_cookie_message_and_metric( jwt_cookie_header_payload_name() ) if not signature_cookie: log_message, metric_value = self._get_missing_cookie_message_and_metric( jwt_cookie_signature_name() ) log.warning(log_message) else: metric_value = 'missing-both' monitoring.set_custom_metric('request_jwt_cookie', metric_value)
def process_request(self, request)
Reconstitute the full JWT and add a new cookie on the request object.
3.402689
3.199258
1.063587
jwt_issuer = get_first_jwt_issuer() _verify_jwt_signature(token, jwt_issuer) decoded_token = _decode_and_verify_token(token, jwt_issuer) return _set_token_defaults(decoded_token)
def jwt_decode_handler(token)
Decodes a JSON Web Token (JWT). Notes: * Requires "exp" and "iat" claims to be present in the token's payload. * Aids debugging by logging InvalidTokenError log entries when decoding fails. Examples: Use with `djangorestframework-jwt <https://getblimp.github.io/django-rest-framework-jwt/>`_, by changing your Django settings: .. code-block:: python JWT_AUTH = { 'JWT_DECODE_HANDLER': 'edx_rest_framework_extensions.auth.jwt.decoder.jwt_decode_handler', 'JWT_ISSUER': 'https://the.jwt.issuer', 'JWT_SECRET_KEY': 'the-jwt-secret-key', (defaults to settings.SECRET_KEY) 'JWT_AUDIENCE': 'the-jwt-audience', 'JWT_PUBLIC_SIGNING_JWK_SET': 'the-jwk-set-of-public-signing-keys', } Args: token (str): JWT to be decoded. Returns: dict: Decoded JWT payload. Raises: MissingRequiredClaimError: Either the exp or iat claims is missing from the JWT payload. InvalidTokenError: Decoding fails.
4.256349
6.932002
0.614014
def _verify_version(jwt_version): supported_version = Version( settings.JWT_AUTH.get('JWT_SUPPORTED_VERSION', JwtTokenVersion.default_latest_supported) ) if jwt_version.major > supported_version.major: logger.info('Token decode failed due to unsupported JWT version number [%s]', str(jwt_version)) raise jwt.InvalidTokenError('JWT version number [%s] is unsupported', str(jwt_version)) def _get_and_set_version(token): if 'version' not in token: token['version'] = str(JwtTokenVersion.starting_version) return Version(token['version']) def _set_is_restricted(token): if 'is_restricted' not in token: token['is_restricted'] = False def _set_filters(token): if 'filters' not in token: token['filters'] = [] token_version = _get_and_set_version(token) _verify_version(token_version) _set_is_restricted(token) _set_filters(token) return token
def _set_token_defaults(token)
Returns an updated token that includes default values for fields that were introduced since the token was created by checking its version number.
3.220363
3.065133
1.050644
key_set = KEYS() # asymmetric keys signing_jwk_set = settings.JWT_AUTH.get('JWT_PUBLIC_SIGNING_JWK_SET') if signing_jwk_set: key_set.load_jwks(signing_jwk_set) # symmetric key key_set.add({'key': jwt_issuer['SECRET_KEY'], 'kty': 'oct'}) return key_set
def _get_signing_jwk_key_set(jwt_issuer)
Returns a JWK Keyset containing all active keys that are configured for verifying signatures.
3.619854
3.869736
0.935426
paginator = Paginator(search_results['results'], page_size) # This code is taken from within the GenericAPIView#paginate_queryset method. # It is common code, but try: page_number = paginator.validate_number(page) except InvalidPage: if page == 'last': page_number = paginator.num_pages else: raise Http404("Page is not 'last', nor can it be converted to an int.") try: paged_results = paginator.page(page_number) except InvalidPage as exception: raise Http404( "Invalid page {page_number}: {message}".format( page_number=page_number, message=str(exception) ) ) search_queryset_pks = [item['data']['pk'] for item in paged_results.object_list] queryset = object_class.objects.filter(pk__in=search_queryset_pks) def ordered_objects(primary_key): for obj in queryset: if obj.pk == primary_key: return obj # map over the search results and get a list of database objects in the same order object_results = list(map(ordered_objects, search_queryset_pks)) paged_results.object_list = object_results return paged_results
def paginate_search_results(object_class, search_results, page_size, page)
Takes edx-search results and returns a Page object populated with db objects for that page. :param object_class: Model class to use when querying the db for objects. :param search_results: edX-search results. :param page_size: Number of results per page. :param page: Page number. :return: Paginator object with model objects
2.917234
3.112475
0.937271
return Response({ 'next': self.get_next_link(), 'previous': self.get_previous_link(), 'count': self.page.paginator.count, 'num_pages': self.page.paginator.num_pages, 'current_page': self.page.number, 'start': (self.page.number - 1) * self.get_page_size(self.request), 'results': data })
def get_paginated_response(self, data)
Annotate the response with pagination information.
1.879243
1.760609
1.067383
metadata = { 'next': self.get_next_link(), 'previous': self.get_previous_link(), 'count': self.get_result_count(), 'num_pages': self.get_num_pages(), } if isinstance(data, dict): if 'results' not in data: raise TypeError(u'Malformed result dict') data['pagination'] = metadata else: data = { 'results': data, 'pagination': metadata, } return Response(data)
def get_paginated_response(self, data)
Annotate the response with pagination information
2.644256
2.48042
1.066052
jwt_cookie = request.COOKIES.get(jwt_cookie_name(), None) if not jwt_cookie: return None return jwt_decode_handler(jwt_cookie)
def get_decoded_jwt(request)
Grab jwt from jwt cookie in request if possible. Returns a decoded jwt dict if it can be found. Returns None if the jwt is not found.
3.008027
3.031232
0.992345
# Get the underlying HttpRequest object request = request._request # pylint: disable=protected-access user = getattr(request, 'user', None) # Unauthenticated, CSRF validation not required # This is where regular `SessionAuthentication` checks that the user is active. # We have removed that check in this implementation. # But we added a check to prevent anonymous users since we require a logged-in account. if not user or user.is_anonymous: return None self.enforce_csrf(request) # CSRF passed with authenticated user return (user, None)
def authenticate(self, request)
Authenticate the user, requiring a logged-in account and CSRF. This is exactly the same as the `SessionAuthentication` implementation, with the `user.is_active` check removed. Args: request (HttpRequest) Returns: Tuple of `(user, token)` Raises: PermissionDenied: The CSRF token check failed.
7.640589
6.781415
1.126695
course_key = CourseKey.from_string(view.kwargs.get('course_id')) jwt_filters = decode_jwt_filters(request.auth) for filter_type, filter_value in jwt_filters: if filter_type == 'content_org' and filter_value == course_key.org: return True log.warning( u"Permission JwtHasContentOrgFilterForRequestedCourse: no filter found for %s.", course_key.org, ) return False
def has_permission(self, request, view)
Ensure that the course_id kwarg provided to the view contains one of the organizations specified in the content provider filters in the JWT used to authenticate.
5.477236
4.441827
1.233104
user_filter = self._get_user_filter(request) if not user_filter: # no user filters are present in the token to limit access return True username_param = get_username_param(request) allowed = user_filter == username_param if not allowed: log.warning( u"Permission JwtHasUserFilterForRequestedUser: user_filter %s doesn't match username %s.", user_filter, username_param, ) return allowed
def has_permission(self, request, view)
If the JWT has a user filter, verify that the filtered user value matches the user in the URL.
6.327345
5.436614
1.163839
# If JWT_ISSUERS is not defined, attempt to return the deprecated settings. warnings.warn( "'JWT_ISSUERS' list not defined, checking for deprecated settings.", DeprecationWarning ) return [ { 'ISSUER': api_settings.JWT_ISSUER, 'SECRET_KEY': api_settings.JWT_SECRET_KEY, 'AUDIENCE': api_settings.JWT_AUDIENCE } ]
def _get_deprecated_jwt_issuers()
Internal helper to retrieve the deprecated set of JWT_ISSUER data from the JWT_AUTH configuration Having this allows for easier testing/mocking
3.729771
3.620447
1.030196
self._set_request_auth_type_metric(request) self._set_request_user_agent_metrics(request) self._set_request_referer_metric(request) self._set_request_user_id_metric(request) return response
def process_response(self, request, response)
Add metrics for various details of the request.
3.891871
3.167362
1.228742