_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q15300
setup
train
def setup(service_manager, conf, reload_method="reload"): """Load services configuration from oslo config object. It reads ServiceManager and Service configuration options from an oslo_config.ConfigOpts() object. Also It registers a ServiceManager hook to reload the configuration file on reload in the master process and in all children. And then when each child start or reload, the configuration options are logged if the oslo config option 'log_options' is True. On children, the configuration file is reloaded before the running the application reload method. Options currently supported on ServiceManager and Service: * graceful_shutdown_timeout :param service_manager: ServiceManager instance :type service_manager: cotyledon.ServiceManager :param conf: Oslo Config object :type conf: oslo_config.ConfigOpts() :param reload_method: reload or mutate the config files :type reload_method: str "reload/mutate" """ conf.register_opts(service_opts) # Set cotyledon options from oslo config options _load_service_manager_options(service_manager, conf) def _service_manager_reload(): _configfile_reload(conf, reload_method) _load_service_manager_options(service_manager, conf) if os.name != "posix": # NOTE(sileht): reloading can't be supported oslo.config is not pickle # But we don't care SIGHUP is not support on window return service_manager.register_hooks( on_new_worker=functools.partial( _new_worker_hook, conf, reload_method), on_reload=_service_manager_reload)
python
{ "resource": "" }
q15301
ServiceManager.register_hooks
train
def register_hooks(self, on_terminate=None, on_reload=None, on_new_worker=None, on_dead_worker=None): """Register hook methods This can be callable multiple times to add more hooks, hooks are executed in added order. If a hook raised an exception, next hooks will be not executed. :param on_terminate: method called on SIGTERM :type on_terminate: callable() :param on_reload: method called on SIGHUP :type on_reload: callable() :param on_new_worker: method called in the child process when this one is ready :type on_new_worker: callable(service_id, worker_id, service_obj) :param on_new_worker: method called when a child died :type on_new_worker: callable(service_id, worker_id, exit_code) If window support is planned, hooks callable must support to be pickle.pickle(). See CPython multiprocessing module documentation for more detail. """ if on_terminate is not None: _utils.check_callable(on_terminate, 'on_terminate') self._hooks['terminate'].append(on_terminate) if on_reload is not None: _utils.check_callable(on_reload, 'on_reload') self._hooks['reload'].append(on_reload) if on_new_worker is not None: _utils.check_callable(on_new_worker, 'on_new_worker') self._hooks['new_worker'].append(on_new_worker) if on_dead_worker is not None: _utils.check_callable(on_dead_worker, 'on_dead_worker') self._hooks['dead_worker'].append(on_dead_worker)
python
{ "resource": "" }
q15302
ServiceManager.add
train
def add(self, service, workers=1, args=None, kwargs=None): """Add a new service to the ServiceManager :param service: callable that return an instance of :py:class:`Service` :type service: callable :param workers: number of processes/workers for this service :type workers: int :param args: additional positional arguments for this service :type args: tuple :param kwargs: additional keywoard arguments for this service :type kwargs: dict :return: a service id :rtype: uuid.uuid4 """ _utils.check_callable(service, 'service') _utils.check_workers(workers, 1) service_id = uuid.uuid4() self._services[service_id] = _service.ServiceConfig( service_id, service, workers, args, kwargs) return service_id
python
{ "resource": "" }
q15303
ServiceManager.reconfigure
train
def reconfigure(self, service_id, workers): """Reconfigure a service registered in ServiceManager :param service_id: the service id :type service_id: uuid.uuid4 :param workers: number of processes/workers for this service :type workers: int :raises: ValueError """ try: sc = self._services[service_id] except KeyError: raise ValueError("%s service id doesn't exists" % service_id) else: _utils.check_workers(workers, minimum=(1 - sc.workers)) sc.workers = workers # Reset forktimes to respawn services quickly self._forktimes = []
python
{ "resource": "" }
q15304
ServiceManager.run
train
def run(self): """Start and supervise services workers This method will start and supervise all children processes until the master process asked to shutdown by a SIGTERM. All spawned processes are part of the same unix process group. """ self._systemd_notify_once() self._child_supervisor = _utils.spawn(self._child_supervisor_thread) self._wait_forever()
python
{ "resource": "" }
q15305
ServiceManager._reload
train
def _reload(self): """reload all children posix only """ self._run_hooks('reload') # Reset forktimes to respawn services quickly self._forktimes = [] signal.signal(signal.SIGHUP, signal.SIG_IGN) os.killpg(0, signal.SIGHUP) signal.signal(signal.SIGHUP, self._signal_catcher)
python
{ "resource": "" }
q15306
ServiceManager._get_last_worker_died
train
def _get_last_worker_died(self): """Return the last died worker information or None""" for service_id in list(self._running_services.keys()): # We copy the list to clean the orignal one processes = list(self._running_services[service_id].items()) for process, worker_id in processes: if not process.is_alive(): self._run_hooks('dead_worker', service_id, worker_id, process.exitcode) if process.exitcode < 0: sig = _utils.signal_to_name(process.exitcode) LOG.info('Child %(pid)d killed by signal %(sig)s', dict(pid=process.pid, sig=sig)) else: LOG.info('Child %(pid)d exited with status %(code)d', dict(pid=process.pid, code=process.exitcode)) del self._running_services[service_id][process] return service_id, worker_id
python
{ "resource": "" }
q15307
ServiceManager._systemd_notify_once
train
def _systemd_notify_once(): """Send notification once to Systemd that service is ready. Systemd sets NOTIFY_SOCKET environment variable with the name of the socket listening for notifications from services. This method removes the NOTIFY_SOCKET environment variable to ensure notification is sent only once. """ notify_socket = os.getenv('NOTIFY_SOCKET') if notify_socket: if notify_socket.startswith('@'): # abstract namespace socket notify_socket = '\0%s' % notify_socket[1:] sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) with contextlib.closing(sock): try: sock.connect(notify_socket) sock.sendall(b'READY=1') del os.environ['NOTIFY_SOCKET'] except EnvironmentError: LOG.debug("Systemd notification failed", exc_info=True)
python
{ "resource": "" }
q15308
accepts
train
def accepts(*argtypes, **kwargtypes): """A function decorator to specify argument types of the function. Types may be specified either in the order that they appear in the function or via keyword arguments (just as if you were calling the function). Example usage: | @accepts(Positive0) | def square_root(x): | ... """ theseargtypes = [T.TypeFactory(a) for a in argtypes] thesekwargtypes = {k : T.TypeFactory(a) for k,a in kwargtypes.items()} def _decorator(func): # @accepts decorator f = func.__wrapped__ if hasattr(func, "__wrapped__") else func try: argtypes = inspect.getcallargs(f, *theseargtypes, **thesekwargtypes) argtypes = {k: v if issubclass(type(v), T.Type) else T.Constant(v) for k,v in argtypes.items()} except TypeError: raise E.ArgumentTypeError("Invalid argument specification to @accepts in %s" % func.__qualname__) # Support keyword arguments. Find the name of the **kwargs # parameter (not necessarily "kwargs") and set it to be a # dictionary of unspecified types. kwargname = U.get_func_kwargs_name(func) if kwargname in argtypes.keys(): argtypes[kwargname] = T.KeywordArguments() # Support positional arguments. Find the name of the *args # parameter (not necessarily "args") and set it to be an # unspecified type. posargname = U.get_func_posargs_name(func) if posargname in argtypes.keys(): argtypes[posargname] = T.PositionalArguments() # TODO merge with actual argument names if U.has_fun_prop(func, "argtypes"): raise ValueError("Cannot set argument types twice") U.set_fun_prop(func, "argtypes", argtypes) return _wrap(func) return _decorator
python
{ "resource": "" }
q15309
returns
train
def returns(returntype): """A function decorator to specify return type of the function. Example usage: | @accepts(Positive0) | @returns(Positive0) | def square_root(x): | ... """ returntype = T.TypeFactory(returntype) def _decorator(func): # @returns decorator if U.has_fun_prop(func, "returntype"): raise ValueError("Cannot set return type twice") U.set_fun_prop(func, "returntype", returntype) return _wrap(func) return _decorator
python
{ "resource": "" }
q15310
requires
train
def requires(condition): """A function decorator to specify entry conditions for the function. Entry conditions should be a string, which will be evaluated as Python code. Arguments of the function may be accessed by their name. The special syntax "-->" and "<-->" may be used to mean "if" and "if and only if", respectively. They may not be contained within sub-expressions. Note that globals will not be included by default, and must be manually included using the "namespace" setting, set via settings.Settings. Example usage: | @requires("x >= y") | def subtract(x, y): | ... | @accepts(l=List(Number), log_transform=Boolean) | @requires("log_transform == True --> min(l) > 0") | def process_list(l, log_transform=False): | ... """ def _decorator(func, condition=condition): # @requires decorator if U.has_fun_prop(func, "requires"): if not isinstance(U.get_fun_prop(func, "requires"), list): raise E.InternalError("Invalid requires structure") base_requires = U.get_fun_prop(func, "requires") else: base_requires = [] base_condition = condition if "<-->" in condition: condition_parts = condition.split("<-->") assert len(condition_parts) == 2, "Only one implies per statement in %s condition %s" % (condition, func.__qualname__) condition = "((%s) if (%s) else True) and ((%s) if (%s) else True)" % (condition_parts[1], condition_parts[0], condition_parts[0], condition_parts[1]) elif "-->" in condition: condition_parts = condition.split("-->") assert len(condition_parts) == 2, "Only one implies per statement in %s condition %s" % (base_condition, func.__qualname__) condition = "(%s) if (%s) else True" % (condition_parts[1], condition_parts[0]) U.set_fun_prop(func, "requires", [(compile(condition, '', 'eval'), condition)]+base_requires) return _wrap(func) return _decorator
python
{ "resource": "" }
q15311
ensures
train
def ensures(condition): """A function decorator to specify exit conditions for the function. Exit conditions should be a string, which will be evaluated as Python code. Arguments of the function may be accessed by their name. The return value of the function may be accessed using the special variable name "return". The special syntax "-->" and "<-->" may be used to mean "if" and "if and only if", respectively. They may not be contained within sub-expressions. Values may be compared to previous executions of the function by including a "`" or "``" after them to check for higher order properties of the function. Note that globals will not be included by default, and must be manually included using the "namespace" setting, set via settings.Settings. Example usage: | @ensures("lower_bound <= return <= upper_bound") | def search(lower_bound, upper_bound): | ... | @ensures("x <= x` --> return <= return`") | def monotonic(x): | ... """ def _decorator(func, condition=condition): # @ensures decorator if U.has_fun_prop(func, "ensures"): if not isinstance(U.get_fun_prop(func, "ensures"), list): raise E.InternalError("Invalid ensures strucutre") ensures_statements = U.get_fun_prop(func, "ensures") else: ensures_statements = [] e = condition.replace("return", "__RETURN__") if "<-->" in e: e_parts = e.split("<-->") assert len(e_parts) == 2, "Only one implies per statement in %s condition %s" % (ensurement, func.__qualname__) e = "((%s) if (%s) else True) and ((%s) if (%s) else True)" % (e_parts[1], e_parts[0], e_parts[0], e_parts[1]) assert "-->" not in e, "Only one implies per statement in %s condition %s" % (ensurement, func.__qualname__) if "-->" in e: e_parts = e.split("-->") assert len(e_parts) == 2, "Only one implies per statement in %s condition %s" % (ensurement, func.__qualname__) e = "(%s) if (%s) else True" % (e_parts[1], e_parts[0]) _bt = "__BACKTICK__" _dbt = "__DOUBLEBACKTICK__" if "``" in e: e = e.replace("``", _dbt) e = e.replace("`", _bt) compiled = compile(e, '', 'eval') U.set_fun_prop(func, "ensures", [(2, compiled, condition)]+ensures_statements) elif "`" in e: e = e.replace("`", _bt) compiled = compile(e, '', 'eval') U.set_fun_prop(func, "ensures", [(1, compiled, condition)]+ensures_statements) else: compiled = compile(e, '', 'eval') U.set_fun_prop(func, "ensures", [(0, compiled, condition)]+ensures_statements) return _wrap(func) return _decorator
python
{ "resource": "" }
q15312
paranoidclass
train
def paranoidclass(cls): """A class decorator to specify that class methods contain paranoid decorators. Example usage: | @paranoidclass | class Point: | def __init__(self, x, y): | ... | @returns(Number) | def distance_from_zero(): | ... """ for methname in dir(cls): meth = getattr(cls, methname) if U.has_fun_prop(meth, "argtypes"): argtypes = U.get_fun_prop(meth, "argtypes") if "self" in argtypes and isinstance(argtypes["self"], T.Self): argtypes["self"] = T.Generic(cls) U.set_fun_prop(meth, "argtypes", argtypes) # TODO Not necessary because of reference if U.has_fun_prop(meth, "returntype"): if isinstance(U.get_fun_prop(meth, "returntype"), T.Self): U.set_fun_prop(meth, "returntype", T.Generic(cls)) return cls
python
{ "resource": "" }
q15313
paranoidconfig
train
def paranoidconfig(**kwargs): """A function decorator to set a local setting. Settings may be set either globally (using settings.Settings.set()) or locally using this decorator. The setting name should be passed as a keyword argument, and the value to assign the setting should be passed as the value. See settings.Settings for the different settings which can be set. Example usage: | @returns(Number) | @paranoidconfig(enabled=False) | def slow_function(): | ... """ def _decorator(func): for k,v in kwargs.items(): Settings._set(k, v, function=func) return _wrap(func) return _decorator
python
{ "resource": "" }
q15314
TypeFactory
train
def TypeFactory(v): """Ensure `v` is a valid Type. This function is used to convert user-specified types into internal types for the verification engine. It allows Type subclasses, Type subclass instances, Python type, and user-defined classes to be passed. Returns an instance of the type of `v`. Users should never access this function directly. """ if v is None: return Nothing() elif issubclass(type(v), Type): return v elif issubclass(v, Type): return v() elif issubclass(type(v), type): return Generic(v) else: raise InvalidTypeError("Invalid type %s" % v)
python
{ "resource": "" }
q15315
profile_v3_to_proofs
train
def profile_v3_to_proofs(profile, fqdn, refresh=False, address = None): """ Convert profile format v3 to proofs """ proofs = [] try: test = profile.items() except: return proofs if 'account' in profile: accounts = profile['account'] else: return proofs for account in accounts: # skip if proof service is not supported if 'service' in account and account['service'].lower() not in SITES: continue if 'proofType' in account and account['proofType'] == "http": try: proof = {"service": account['service'], "proof_url": account['proofUrl'], "identifier": account['identifier'], "valid": False} if is_valid_proof(account['service'], account['identifier'], fqdn, account['proofUrl'], address = address): proof["valid"] = True proofs.append(proof) except Exception as e: pass return proofs
python
{ "resource": "" }
q15316
has_fun_prop
train
def has_fun_prop(f, k): """Test whether function `f` has property `k`. We define properties as annotations added to a function throughout the process of defining a function for verification, e.g. the argument types. If `f` is an unannotated function, this returns False. If `f` has the property named `k`, it returns True. Otherwise, it returns False. Users should never access this function directly. """ if not hasattr(f, _FUN_PROPS): return False if not isinstance(getattr(f, _FUN_PROPS), dict): return False if k not in getattr(f, _FUN_PROPS).keys(): return False return True
python
{ "resource": "" }
q15317
get_fun_prop
train
def get_fun_prop(f, k): """Get the value of property `k` from function `f`. We define properties as annotations added to a function throughout the process of defining a function for verification, e.g. the argument types. If `f` does not have a property named `k`, this throws an error. If `f` has the property named `k`, it returns the value of it. Users should never access this function directly. """ if not has_fun_prop(f, k): raise InternalError("Function %s has no property %s" % (str(f), k)) return getattr(f, _FUN_PROPS)[k]
python
{ "resource": "" }
q15318
set_fun_prop
train
def set_fun_prop(f, k, v): """Set the value of property `k` to be `v` in function `f`. We define properties as annotations added to a function throughout the process of defining a function for verification, e.g. the argument types. This sets function `f`'s property named `k` to be value `v`. Users should never access this function directly. """ if not hasattr(f, _FUN_PROPS): setattr(f, _FUN_PROPS, {}) if not isinstance(getattr(f, _FUN_PROPS), dict): raise InternalError("Invalid properties dictionary for %s" % str(f)) getattr(f, _FUN_PROPS)[k] = v
python
{ "resource": "" }
q15319
Settings.set
train
def set(**kwargs): """Set configuration parameters. Pass keyword arguments for the parameters you would like to set. This function is particularly useful to call at the head of your script file to disable particular features. For example, >>> from paranoid.settings import Settings >>> Settings.set(enabled=False) This is syntactic sugar for the _set function. """ for k,v in kwargs.items(): Settings._set(k, v)
python
{ "resource": "" }
q15320
Settings._set
train
def _set(name, value, function=None): """Internally set a config parameter. If you call it with no function, it sets the global parameter. If you call it with a function argument, it sets the value for the specified function. Normally, this should only be called with a function argument for internal code. This should not be called by code outside of the paranoid module. """ if name not in Settings.__global_setting_values.keys(): raise NameError("Invalid setting value") if name in Settings.__validate_settings.keys(): if not Settings.__validate_settings[name](value): raise ValueError("Invalid setting: %s = %s" % (name, value)) # Set the setting either globally (if no function is passed) # or else locally to the function (if a function is passed). if function: if not hasattr(function, Settings.FUNCTION_SETTINGS_NAME): setattr(function, Settings.FUNCTION_SETTINGS_NAME, {}) # Test if this wraps something. TODO this will fail # for nested decorators. This also assumes that, if # there is a wrapped function (super wraps sub), that # if super doesn't have settings, then sup doesn't # either. (This assumption is valid for paranoid # decorators since it properly uses update_wrapper, # but may not be valid for other decorators.) if hasattr(function, "__wrapped__"): setattr(function.__wrapped__, Settings.FUNCTION_SETTINGS_NAME, getattr(function, Settings.FUNCTION_SETTINGS_NAME)) getattr(function, Settings.FUNCTION_SETTINGS_NAME)[name] = value else: Settings.__global_setting_values[name] = value
python
{ "resource": "" }
q15321
Settings.get
train
def get(name, function=None): """Get a setting. `name` should be the name of the setting to look for. If the optional argument `function` is passed, this will look for a value local to the function before retrieving the global value. """ if function is not None: if hasattr(function, Settings.FUNCTION_SETTINGS_NAME): if name in getattr(function, Settings.FUNCTION_SETTINGS_NAME): return getattr(function, Settings.FUNCTION_SETTINGS_NAME)[name] return Settings.__global_setting_values[name]
python
{ "resource": "" }
q15322
Demon.__read_graph
train
def __read_graph(self, network_filename): """ Read .ncol network file :param network_filename: complete path for the .ncol file :return: an undirected network """ self.g = nx.read_edgelist(network_filename, nodetype=int)
python
{ "resource": "" }
q15323
Demon.execute
train
def execute(self): """ Execute Demon algorithm """ for n in self.g.nodes(): self.g.node[n]['communities'] = [n] all_communities = {} for ego in tqdm.tqdm(nx.nodes(self.g), ncols=35, bar_format='Exec: {l_bar}{bar}'): ego_minus_ego = nx.ego_graph(self.g, ego, 1, False) community_to_nodes = self.__overlapping_label_propagation(ego_minus_ego, ego) # merging phase for c in community_to_nodes.keys(): if len(community_to_nodes[c]) > self.min_community_size: actual_community = community_to_nodes[c] all_communities = self.__merge_communities(all_communities, actual_community) # write output on file if self.file_output: with open(self.file_output, "w") as out_file_com: for idc, c in enumerate(all_communities.keys()): out_file_com.write("%d\t%s\n" % (idc, str(sorted(c)))) return list(all_communities.keys())
python
{ "resource": "" }
q15324
get_declared_enums
train
def get_declared_enums(metadata, schema, default): """ Return a dict mapping SQLAlchemy enumeration types to the set of their declared values. :param metadata: ... :param str schema: Schema name (e.g. "public"). :returns dict: { "my_enum": frozenset(["a", "b", "c"]), } """ types = set(column.type for table in metadata.tables.values() for column in table.columns if (isinstance(column.type, sqlalchemy.Enum) and schema == (column.type.schema or default))) return {t.name: frozenset(t.enums) for t in types}
python
{ "resource": "" }
q15325
compare_enums
train
def compare_enums(autogen_context, upgrade_ops, schema_names): """ Walk the declared SQLAlchemy schema for every referenced Enum, walk the PG schema for every definde Enum, then generate SyncEnumValuesOp migrations for each defined enum that has grown new entries when compared to its declared version. Enums that don't exist in the database yet are ignored, since SQLAlchemy/Alembic will create them as part of the usual migration process. """ to_add = set() for schema in schema_names: default = autogen_context.dialect.default_schema_name if schema is None: schema = default defined = get_defined_enums(autogen_context.connection, schema) declared = get_declared_enums(autogen_context.metadata, schema, default) for name, new_values in declared.items(): old_values = defined.get(name) # Alembic will handle creation of the type in this migration, so # skip undefined names. if name in defined and new_values.difference(old_values): to_add.add((schema, name, old_values, new_values)) for schema, name, old_values, new_values in sorted(to_add): op = SyncEnumValuesOp(schema, name, old_values, new_values) upgrade_ops.ops.append(op)
python
{ "resource": "" }
q15326
Memoizer.get
train
def get(self, key, func=None, args=(), kwargs=None, **opts): """Manually retrieve a value from the cache, calculating as needed. Params: key -> string to store/retrieve value from. func -> callable to generate value if it does not exist, or has expired. args -> positional arguments to call the function with. kwargs -> keyword arguments to call the function with. Keyword Params (options): These will be combined with region values (as selected by the "region" keyword argument, and then selected by "parent" values of those regions all the way up the chain to the "default" region). namespace -> string prefix to apply to the key before get/set. lock -> lock constructor. See README. expiry -> float unix expiration time. max_age -> float number of seconds until the value expires. Only provide expiry OR max_age, not both. """ kwargs = kwargs or {} key, store = self._expand_opts(key, opts) # Resolve the etag. opts['etag'] = call_or_pass(opts.get('etag') or opts.get('etagger'), args, kwargs) if not isinstance(key, str): raise TypeError('non-string key of type %s' % type(key)) data = store.get(key) if data is not None: if not self._has_expired(data, opts): return data[VALUE_INDEX] if func is None: return None # Prioritize passed options over a store's native lock. lock_func = opts.get('lock') or getattr(store, 'lock', None) lock = lock_func and lock_func(key) locked = lock and lock.acquire(opts.get('timeout', DEFAULT_TIMEOUT)) try: value = func(*args, **kwargs) finally: if locked: lock.release() creation = time() expiry = call_or_pass(opts.get('expiry'), args, kwargs) max_age = call_or_pass(opts.get('max_age'), args, kwargs) if max_age is not None: expiry = min(x for x in (expiry, creation + max_age) if x is not None) # Need to be careful as this is the only place where we do not use the # lovely index constants. store[key] = (CURRENT_PROTOCOL_VERSION, creation, expiry, opts.get('etag'), value) return value
python
{ "resource": "" }
q15327
Memoizer.expire_at
train
def expire_at(self, key, expiry, **opts): """Set the explicit unix expiry time of a key.""" key, store = self._expand_opts(key, opts) data = store.get(key) if data is not None: data = list(data) data[EXPIRY_INDEX] = expiry store[key] = tuple(data) else: raise KeyError(key)
python
{ "resource": "" }
q15328
Memoizer.expire
train
def expire(self, key, max_age, **opts): """Set the maximum age of a given key, in seconds.""" self.expire_at(key, time() + max_age, **opts)
python
{ "resource": "" }
q15329
Memoizer.ttl
train
def ttl(self, key, **opts): """Get the time-to-live of a given key; None if not set.""" key, store = self._expand_opts(key, opts) if hasattr(store, 'ttl'): return store.ttl(key) data = store.get(key) if data is None: return None expiry = data[EXPIRY_INDEX] if expiry is not None: return max(0, expiry - time()) or None
python
{ "resource": "" }
q15330
Memoizer.exists
train
def exists(self, key, **opts): """Return if a key exists in the cache.""" key, store = self._expand_opts(key, opts) data = store.get(key) # Note that we do not actually delete the thing here as the max_age # just for this call may have triggered a False. if not data or self._has_expired(data, opts): return False return True
python
{ "resource": "" }
q15331
WindowCursor._destroy
train
def _destroy(self): """Destruction code to decrement counters""" self.unuse_region() if self._rlist is not None: # Actual client count, which doesn't include the reference kept by the manager, nor ours # as we are about to be deleted try: if len(self._rlist) == 0: # Free all resources associated with the mapped file self._manager._fdict.pop(self._rlist.path_or_fd()) # END remove regions list from manager except (TypeError, KeyError): # sometimes, during shutdown, getrefcount is None. Its possible # to re-import it, however, its probably better to just ignore # this python problem (for now). # The next step is to get rid of the error prone getrefcount alltogether. pass
python
{ "resource": "" }
q15332
WindowCursor._copy_from
train
def _copy_from(self, rhs): """Copy all data from rhs into this instance, handles usage count""" self._manager = rhs._manager self._rlist = type(rhs._rlist)(rhs._rlist) self._region = rhs._region self._ofs = rhs._ofs self._size = rhs._size for region in self._rlist: region.increment_client_count() if self._region is not None: self._region.increment_client_count()
python
{ "resource": "" }
q15333
WindowCursor.use_region
train
def use_region(self, offset=0, size=0, flags=0): """Assure we point to a window which allows access to the given offset into the file :param offset: absolute offset in bytes into the file :param size: amount of bytes to map. If 0, all available bytes will be mapped :param flags: additional flags to be given to os.open in case a file handle is initially opened for mapping. Has no effect if a region can actually be reused. :return: this instance - it should be queried for whether it points to a valid memory region. This is not the case if the mapping failed because we reached the end of the file **Note:**: The size actually mapped may be smaller than the given size. If that is the case, either the file has reached its end, or the map was created between two existing regions""" need_region = True man = self._manager fsize = self._rlist.file_size() size = min(size or fsize, man.window_size() or fsize) # clamp size to window size if self._region is not None: if self._region.includes_ofs(offset): need_region = False else: self.unuse_region() # END handle existing region # END check existing region # offset too large ? if offset >= fsize: return self # END handle offset if need_region: self._region = man._obtain_region(self._rlist, offset, size, flags, False) self._region.increment_client_count() # END need region handling self._ofs = offset - self._region._b self._size = min(size, self._region.ofs_end() - offset) return self
python
{ "resource": "" }
q15334
StaticWindowMapManager.num_open_files
train
def num_open_files(self): """Amount of opened files in the system""" return reduce(lambda x, y: x + y, (1 for rlist in self._fdict.values() if len(rlist) > 0), 0)
python
{ "resource": "" }
q15335
align_to_mmap
train
def align_to_mmap(num, round_up): """ Align the given integer number to the closest page offset, which usually is 4096 bytes. :param round_up: if True, the next higher multiple of page size is used, otherwise the lower page_size will be used (i.e. if True, 1 becomes 4096, otherwise it becomes 0) :return: num rounded to closest page""" res = (num // ALLOCATIONGRANULARITY) * ALLOCATIONGRANULARITY if round_up and (res != num): res += ALLOCATIONGRANULARITY # END handle size return res
python
{ "resource": "" }
q15336
MapWindow.align
train
def align(self): """Assures the previous window area is contained in the new one""" nofs = align_to_mmap(self.ofs, 0) self.size += self.ofs - nofs # keep size constant self.ofs = nofs self.size = align_to_mmap(self.size, 1)
python
{ "resource": "" }
q15337
MapWindow.extend_left_to
train
def extend_left_to(self, window, max_size): """Adjust the offset to start where the given window on our left ends if possible, but don't make yourself larger than max_size. The resize will assure that the new window still contains the old window area""" rofs = self.ofs - window.ofs_end() nsize = rofs + self.size rofs -= nsize - min(nsize, max_size) self.ofs = self.ofs - rofs self.size += rofs
python
{ "resource": "" }
q15338
MapWindow.extend_right_to
train
def extend_right_to(self, window, max_size): """Adjust the size to make our window end where the right window begins, but don't get larger than max_size""" self.size = min(self.size + (window.ofs - self.ofs_end()), max_size)
python
{ "resource": "" }
q15339
SlidingWindowMapBuffer.begin_access
train
def begin_access(self, cursor=None, offset=0, size=sys.maxsize, flags=0): """Call this before the first use of this instance. The method was already called by the constructor in case sufficient information was provided. For more information no the parameters, see the __init__ method :param path: if cursor is None the existing one will be used. :return: True if the buffer can be used""" if cursor: self._c = cursor # END update our cursor # reuse existing cursors if possible if self._c is not None and self._c.is_associated(): res = self._c.use_region(offset, size, flags).is_valid() if res: # if given size is too large or default, we computer a proper size # If its smaller, we assume the combination between offset and size # as chosen by the user is correct and use it ! # If not, the user is in trouble. if size > self._c.file_size(): size = self._c.file_size() - offset # END handle size self._size = size # END set size return res # END use our cursor return False
python
{ "resource": "" }
q15340
make_parser
train
def make_parser(): """ Returns an argparse instance for this script. """ parser = argparse.ArgumentParser(description="generate HTML from crawler JSON") parser.add_argument( "--data-dir", default="data", help=u"Directory containing JSON data from crawler [%(default)s]" ) parser.add_argument( "--output-dir", default="html", help=u"Directory to output the resulting HTML files [%(default)s]" ) return parser
python
{ "resource": "" }
q15341
render_template
train
def render_template(env, html_path, template_filename, context): """ Render a template file into the given output location. """ template = env.get_template(template_filename) rendered_html = template.render(**context) # pylint: disable=no-member html_path.write_text(rendered_html, encoding='utf-8')
python
{ "resource": "" }
q15342
render_html
train
def render_html(data_dir, output_dir): """ The main workhorse of this script. Finds all the JSON data files from pa11ycrawler, and transforms them into HTML files via Jinja2 templating. """ env = Environment(loader=PackageLoader('pa11ycrawler', 'templates')) env.globals["wcag_refs"] = wcag_refs pages = [] counter = collections.Counter() grouped_violations = collections.defaultdict(dict) # render detail templates for data_file in data_dir.files('*.json'): data = json.load(data_file.open()) num_error, num_warning, num_notice = pa11y_counts(data['pa11y']) data["num_error"] = num_error data["num_warning"] = num_warning data["num_notice"] = num_notice fname = data_file.namebase + ".html" html_path = output_dir / fname render_template(env, html_path, 'detail.html', data) data["filename"] = fname pages.append(data) for violation in data['pa11y']: violation_id = hashlib.md5( (violation['selector'] + violation['code']).encode('utf-8') ).hexdigest() if violation_id not in grouped_violations[violation['type']]: violation['pages'] = [] grouped_violations[violation['type']][violation_id] = violation counter[violation['type']] += 1 grouped_violations[violation['type']][violation_id]['pages'].append({ 'url': data['url'], 'page_title': data['page_title'] }) def extract_nums(page): "Used to sort pages by violation counts" return ( page["num_error"], page["num_warning"], page["num_notice"], ) index_path = output_dir / INDEX_TEMPLATE render_template(env, index_path, INDEX_TEMPLATE, { "pages": sorted(pages, key=extract_nums, reverse=True), "num_error": counter["error"], "num_warning": counter["warning"], "num_notice": counter["notice"] }) for violation_type in grouped_violations: unique_path = output_dir / u'{}s.html'.format(violation_type) render_template(env, unique_path, UNIQUE_TEMPLATE, { "grouped_violations": sorted( grouped_violations[violation_type].values(), key=lambda item: len(item['pages']), reverse=True ), "current_type": violation_type, "violation_counts": counter })
python
{ "resource": "" }
q15343
ignore_rules_for_url
train
def ignore_rules_for_url(spider, url): """ Returns a list of ignore rules from the given spider, that are relevant to the given URL. """ ignore_rules = getattr(spider, "pa11y_ignore_rules", {}) or {} return itertools.chain.from_iterable( rule_list for url_glob, rule_list in ignore_rules.items() if fnmatch.fnmatch(url, url_glob) )
python
{ "resource": "" }
q15344
load_pa11y_results
train
def load_pa11y_results(stdout, spider, url): """ Load output from pa11y, filtering out the ignored messages. The `stdout` parameter is a bytestring, not a unicode string. """ if not stdout: return [] results = json.loads(stdout.decode('utf8')) ignore_rules = ignore_rules_for_url(spider, url) for rule in ignore_rules: results = [ result for result in results if not ignore_rule_matches_result(rule, result) ] return results
python
{ "resource": "" }
q15345
write_pa11y_config
train
def write_pa11y_config(item): """ The only way that pa11y will see the same page that scrapy sees is to make sure that pa11y requests the page with the same headers. However, the only way to configure request headers with pa11y is to write them into a config file. This function will create a config file, write the config into it, and return a reference to that file. """ config = { "page": { "headers": item["request_headers"], }, } config_file = tempfile.NamedTemporaryFile( mode="w", prefix="pa11y-config-", suffix=".json", delete=False ) json.dump(config, config_file) config_file.close() return config_file
python
{ "resource": "" }
q15346
write_pa11y_results
train
def write_pa11y_results(item, pa11y_results, data_dir): """ Write the output from pa11y into a data file. """ data = dict(item) data['pa11y'] = pa11y_results # it would be nice to use the URL as the filename, # but that gets complicated (long URLs, special characters, etc) # so we'll make the filename a hash of the URL instead, # and throw in the access time so that we can store the same URL # multiple times in this data directory hasher = hashlib.md5() hasher.update(item["url"].encode('utf8')) hasher.update(item["accessed_at"].isoformat().encode('utf8')) basename = hasher.hexdigest() filename = basename + ".json" filepath = data_dir / filename data_dir.makedirs_p() text = json.dumps(data, cls=DateTimeEncoder) filepath.write_text(text)
python
{ "resource": "" }
q15347
Pa11yPipeline.process_item
train
def process_item(self, item, spider): """ Use the Pa11y command line tool to get an a11y report. """ config_file = write_pa11y_config(item) args = [ self.pa11y_path, item["url"], '--config={file}'.format(file=config_file.name), ] for flag, value in self.cli_flags.items(): args.append("--{flag}={value}".format(flag=flag, value=value)) retries_remaining = 3 while retries_remaining: logline = " ".join(args) if retries_remaining != 3: logline += u" # (retry {num})".format(num=3-retries_remaining) spider.logger.info(logline) proc = sp.Popen( args, shell=False, stdout=sp.PIPE, stderr=sp.PIPE, ) stdout, stderr = proc.communicate() if proc.returncode in (0, 2): # `pa11y` ran successfully! # Return code 0 means no a11y errors. # Return code 2 means `pa11y` identified a11y errors. # Either way, we're done, so break out of the `while` loop break else: # `pa11y` did _not_ run successfully! # We sometimes get the error "Truffler timed out": # truffler is what accesses the web page for `pa11y1`. # https://www.npmjs.com/package/truffler # If this is the error, we can resolve it just by trying again, # so decrement the retries_remaining and start over. retries_remaining -= 1 if retries_remaining == 0: raise DropItem( u"Couldn't get pa11y results for {url}. Error:\n{err}".format( url=item['url'], err=stderr, ) ) pa11y_results = load_pa11y_results(stdout, spider, item['url']) check_title_match(item['page_title'], pa11y_results, spider.logger) track_pa11y_stats(pa11y_results, spider) os.remove(config_file.name) write_pa11y_results(item, pa11y_results, Path(spider.data_dir)) return item
python
{ "resource": "" }
q15348
watched_extension
train
def watched_extension(extension): """Return True if the given extension is one of the watched extensions""" for ext in hamlpy.VALID_EXTENSIONS: if extension.endswith('.' + ext): return True return False
python
{ "resource": "" }
q15349
_watch_folder
train
def _watch_folder(folder, destination, compiler_args): """Compares "modified" timestamps against the "compiled" dict, calls compiler if necessary.""" for dirpath, dirnames, filenames in os.walk(folder): for filename in filenames: # Ignore filenames starting with ".#" for Emacs compatibility if watched_extension(filename) and not filename.startswith('.#'): fullpath = os.path.join(dirpath, filename) subfolder = os.path.relpath(dirpath, folder) mtime = os.stat(fullpath).st_mtime # Create subfolders in target directory if they don't exist compiled_folder = os.path.join(destination, subfolder) if not os.path.exists(compiled_folder): os.makedirs(compiled_folder) compiled_path = _compiled_path(compiled_folder, filename) if (not fullpath in compiled or compiled[fullpath] < mtime or not os.path.isfile(compiled_path)): compile_file(fullpath, compiled_path, compiler_args) compiled[fullpath] = mtime
python
{ "resource": "" }
q15350
compile_file
train
def compile_file(fullpath, outfile_name, compiler_args): """Calls HamlPy compiler.""" if Options.VERBOSE: print '%s %s -> %s' % (strftime("%H:%M:%S"), fullpath, outfile_name) try: if Options.DEBUG: print "Compiling %s -> %s" % (fullpath, outfile_name) haml_lines = codecs.open(fullpath, 'r', encoding = 'utf-8').read().splitlines() compiler = hamlpy.Compiler(compiler_args) output = compiler.process_lines(haml_lines) outfile = codecs.open(outfile_name, 'w', encoding = 'utf-8') outfile.write(output) except Exception, e: # import traceback print "Failed to compile %s -> %s\nReason:\n%s" % (fullpath, outfile_name, e)
python
{ "resource": "" }
q15351
DuplicatesPipeline.process_item
train
def process_item(self, item, spider): # pylint: disable=unused-argument """ Stops processing item if we've already seen this URL before. """ url = self.clean_url(item["url"]) if self.is_sequence_start_page(url): url = url.parent if url in self.urls_seen: raise DropItem(u"Dropping duplicate url {url}".format(url=item["url"])) else: self.urls_seen.add(url) return item
python
{ "resource": "" }
q15352
DropDRFPipeline.process_item
train
def process_item(self, item, spider): # pylint: disable=unused-argument "Check for DRF urls." url = URLObject(item["url"]) if url.path.startswith("/api/"): raise DropItem(u"Dropping DRF url {url}".format(url=url)) else: return item
python
{ "resource": "" }
q15353
get_csrf_token
train
def get_csrf_token(response): """ Extract the CSRF token out of the "Set-Cookie" header of a response. """ cookie_headers = [ h.decode('ascii') for h in response.headers.getlist("Set-Cookie") ] if not cookie_headers: return None csrf_headers = [ h for h in cookie_headers if h.startswith("csrftoken=") ] if not csrf_headers: return None match = re.match("csrftoken=([^ ;]+);", csrf_headers[-1]) return match.group(1)
python
{ "resource": "" }
q15354
load_pa11y_ignore_rules
train
def load_pa11y_ignore_rules(file=None, url=None): # pylint: disable=redefined-builtin """ Load the pa11y ignore rules from the given file or URL. """ if not file and not url: return None if file: file = Path(file) if not file.isfile(): msg = ( u"pa11y_ignore_rules_file specified, but file does not exist! {file}" ).format(file=file) raise ValueError(msg) return yaml.safe_load(file.text()) # must be URL resp = requests.get(url) if not resp.ok: msg = ( u"pa11y_ignore_rules_url specified, but failed to fetch URL. status={status}" ).format(status=resp.status_code) err = RuntimeError(msg) err.response = resp raise err return yaml.safe_load(resp.text)
python
{ "resource": "" }
q15355
EdxSpider.handle_error
train
def handle_error(self, failure): """ Provides basic error information for bad requests. If the error was an HttpError or DNSLookupError, it prints more specific information. """ self.logger.error(repr(failure)) if failure.check(HttpError): response = failure.value.response self.logger.error(u'HttpError on %s', response.url) self.logger.error(u'HttpError Code: %s', response.status) if response.status in (401, 403): # If the error is from invalid login, tell the user self.logger.error( "Credentials failed. Either add/update the current credentials " "or remove them to enable auto auth" ) elif failure.check(DNSLookupError): request = failure.request self.logger.error(u'DNSLookupError on %s', request.url)
python
{ "resource": "" }
q15356
EdxSpider.parse_item
train
def parse_item(self, response): """ Get basic information about a page, so that it can be passed to the `pa11y` tool for further testing. @url https://www.google.com/ @returns items 1 1 @returns requests 0 0 @scrapes url request_headers accessed_at page_title """ # if we got redirected to a login page, then login if URLObject(response.url).path == LOGIN_HTML_PATH: reqs = self.handle_unexpected_redirect_to_login_page(response) for req in reqs: yield req title = response.xpath("//title/text()").extract_first() if title: title = title.strip() # `response.request.headers` is a dictionary where the key is the # header name, and the value is a *list*, containing one item, # which is the header value. We need to get rid of this list, and just # have key-value pairs. (This list probably exists in case the same # header is sent multiple times, but that's not happening in this case, # and the list construct is getting in the way.) # # We also need to convert bytes to ASCII. In practice, headers can # only contain ASCII characters: see # http://stackoverflow.com/questions/5423223/how-to-send-non-english-unicode-string-using-http-header request_headers = {key.decode('ascii'): value[0].decode('ascii') for key, value in response.request.headers.items()} item = A11yItem( url=response.url, request_headers=request_headers, accessed_at=datetime.utcnow(), page_title=title, ) yield item
python
{ "resource": "" }
q15357
EdxSpider.handle_unexpected_redirect_to_login_page
train
def handle_unexpected_redirect_to_login_page(self, response): """ This method is called if the crawler has been unexpectedly logged out. If that happens, and the crawler requests a page that requires a logged-in user, the crawler will be redirected to a login page, with the originally-requested URL as the `next` query parameter. This method simply causes the crawler to log back in using the saved email and password credentials. We rely on the fact that the login page will redirect the user to the URL in the `next` query parameter if the login is successful -- this will allow the crawl to resume where it left off. This is method is very much like the `get_initial_login()` method, but the callback is `self.after_login` instead of `self.after_initial_login`. """ next_url = URLObject(response.url).query_dict.get("next") login_url = ( URLObject("http://") .with_hostname(self.domain) .with_port(self.port) .with_path(LOGIN_API_PATH) ) if next_url: login_url = login_url.set_query_param("next", next_url) credentials = { "email": self.login_email, "password": self.login_password, } headers = { b"X-CSRFToken": get_csrf_token(response), } yield scrapy.FormRequest( login_url, formdata=credentials, headers=headers, callback=self.after_login, errback=self.handle_error )
python
{ "resource": "" }
q15358
Element._escape_attribute_quotes
train
def _escape_attribute_quotes(self, v): ''' Escapes quotes with a backslash, except those inside a Django tag ''' escaped = [] inside_tag = False for i, _ in enumerate(v): if v[i:i + 2] == '{%': inside_tag = True elif v[i:i + 2] == '%}': inside_tag = False if v[i] == self.attr_wrapper and not inside_tag: escaped.append('\\') escaped.append(v[i]) return ''.join(escaped)
python
{ "resource": "" }
q15359
RootNode.add_child
train
def add_child(self, child): '''Add child node, and copy all options to it''' super(RootNode, self).add_child(child) child.attr_wrapper = self.attr_wrapper
python
{ "resource": "" }
q15360
ElementNode._render_before
train
def _render_before(self, element): '''Render opening tag and inline content''' start = ["%s<%s" % (self.spaces, element.tag)] if element.id: start.append(" id=%s" % self.element.attr_wrap(self.replace_inline_variables(element.id))) if element.classes: start.append(" class=%s" % self.element.attr_wrap(self.replace_inline_variables(element.classes))) if element.attributes: start.append(' ' + self.replace_inline_variables(element.attributes)) content = self._render_inline_content(self.element.inline_content) if element.nuke_inner_whitespace and content: content = content.strip() if element.self_close and not content: start.append(" />") elif content: start.append(">%s" % (content)) elif self.children: start.append(">%s" % (self.render_newlines())) else: start.append(">") return ''.join(start)
python
{ "resource": "" }
q15361
ElementNode._render_after
train
def _render_after(self, element): '''Render closing tag''' if element.inline_content: return "</%s>%s" % (element.tag, self.render_newlines()) elif element.self_close: return self.render_newlines() elif self.children: return "%s</%s>\n" % (self.spaces, element.tag) else: return "</%s>\n" % (element.tag)
python
{ "resource": "" }
q15362
Simplenote.authenticate
train
def authenticate(self, user, password): """ Method to get simplenote auth token Arguments: - user (string): simplenote email address - password (string): simplenote password Returns: Simplenote API token as string """ request = Request(AUTH_URL) request.add_header('X-Simperium-API-Key', API_KEY) if sys.version_info < (3, 3): request.add_data(json.dumps({'username': user, 'password': password})) else: request.data = json.dumps({'username': user, 'password': password}).encode() try: res = urllib2.urlopen(request).read() token = json.loads(res.decode('utf-8'))["access_token"] except HTTPError: raise SimplenoteLoginFailed('Login to Simplenote API failed!') except IOError: # no connection exception token = None return token
python
{ "resource": "" }
q15363
Simplenote.get_token
train
def get_token(self): """ Method to retrieve an auth token. The cached global token is looked up and returned if it exists. If it is `None` a new one is requested and returned. Returns: Simplenote API token as string """ if self.token == None: self.token = self.authenticate(self.username, self.password) try: return str(self.token,'utf-8') except TypeError: return self.token
python
{ "resource": "" }
q15364
Simplenote.get_note
train
def get_note(self, noteid, version=None): """ Method to get a specific note Arguments: - noteid (string): ID of the note to get - version (int): optional version of the note to get Returns: A tuple `(note, status)` - note (dict): note object - status (int): 0 on success and -1 otherwise """ # request note params_version = "" if version is not None: params_version = '/v/' + str(version) params = '/i/%s%s' % (str(noteid), params_version) request = Request(DATA_URL+params) request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 except IOError as e: return e, -1 note = json.loads(response.read().decode('utf-8')) note = self.__add_simplenote_api_fields(note, noteid, int(response.info().get("X-Simperium-Version"))) # Sort tags # For early versions of notes, tags not always available if "tags" in note: note["tags"] = sorted(note["tags"]) return note, 0
python
{ "resource": "" }
q15365
Simplenote.update_note
train
def update_note(self, note): """ Method to update a specific note object, if the note object does not have a "key" field, a new note is created Arguments - note (dict): note object to update Returns: A tuple `(note, status)` - note (dict): note object - status (int): 0 on success and -1 otherwise """ # determine whether to create a new note or update an existing one # Also need to add/remove key field to keep simplenote.py consistency if "key" in note: # Then already have a noteid we need to remove before passing to Simperium API noteid = note.pop("key", None) else: # Adding a new note noteid = uuid.uuid4().hex # TODO: Set a ccid? # ccid = uuid.uuid4().hex if "version" in note: version = note.pop("version", None) url = '%s/i/%s/v/%s?response=1' % (DATA_URL, noteid, version) else: url = '%s/i/%s?response=1' % (DATA_URL, noteid) # TODO: Could do with being consistent here. Everywhere else is Request(DATA_URL+params) note = self.__remove_simplenote_api_fields(note) request = Request(url, data=json.dumps(note).encode('utf-8')) request.add_header(self.header, self.get_token()) request.add_header('Content-Type', 'application/json') response = "" try: response = urllib2.urlopen(request) except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 except IOError as e: return e, -1 note = json.loads(response.read().decode('utf-8')) note = self.__add_simplenote_api_fields(note, noteid, int(response.info().get("X-Simperium-Version"))) return note, 0
python
{ "resource": "" }
q15366
Simplenote.add_note
train
def add_note(self, note): """ Wrapper method to add a note The method can be passed the note as a dict with the `content` property set, which is then directly send to the web service for creation. Alternatively, only the body as string can also be passed. In this case the parameter is used as `content` for the new note. Arguments: - note (dict or string): the note to add Returns: A tuple `(note, status)` - note (dict): the newly created note - status (int): 0 on success and -1 otherwise """ if type(note) == str: return self.update_note({"content": note}) elif (type(note) == dict) and "content" in note: return self.update_note(note) else: return "No string or valid note.", -1
python
{ "resource": "" }
q15367
Simplenote.get_note_list
train
def get_note_list(self, data=True, since=None, tags=[]): """ Method to get the note list The method can be passed optional arguments to limit the list to notes containing a certain tag, or only updated since a certain Simperium cursor. If omitted a list of all notes is returned. By default data objects are returned. If data is set to false only keys/ids and versions are returned. An empty data object is inserted for compatibility. Arguments: - tags=[] list of tags as string: return notes that have at least one of these tags - since=cursor Simperium cursor as string: return only changes since this cursor - data=True If false only return keys/ids and versions Returns: A tuple `(notes, status)` - notes (list): A list of note objects with all properties set except `content`. - status (int): 0 on success and -1 otherwise """ # initialize data status = 0 ret = [] response_notes = {} notes = { "index" : [] } # get the note index params = '/index?limit=%s' % (str(NOTE_FETCH_LENGTH)) if since is not None: params += '&since=%s' % (since) # Fetching data is the default if data: params += '&data=true' # perform initial HTTP request request = Request(DATA_URL+params) request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) response_notes = json.loads(response.read().decode('utf-8')) # re-write for v1 consistency note_objects = [] for n in response_notes["index"]: # If data=False then can't do this bit... or not all of it, just have id and version. Add empty data object. if not data: n['d'] = {} note_object = self.__add_simplenote_api_fields(n['d'], n['id'], n['v']) note_objects.append(note_object) notes["index"].extend(note_objects) except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 except IOError as e: return e, -1 # get additional notes if bookmark was set in response while "mark" in response_notes: params += '&mark=%s' % response_notes["mark"] # perform the actual HTTP request request = Request(DATA_URL+params) request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) response_notes = json.loads(response.read().decode('utf-8')) # re-write for v1 consistency note_objects = [] for n in response_notes["index"]: if not data: n['d'] = {} note_object = n['d'] note_object = self.__add_simplenote_api_fields(n['d'], n['id'], n['v']) note_objects.append(note_object) notes["index"].extend(note_objects) except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 except IOError as e: return e, -1 note_list = notes["index"] self.current = response_notes["current"] # Can only filter for tags at end, once all notes have been retrieved. if (len(tags) > 0): note_list = [n for n in note_list if (len(set(n["tags"]).intersection(tags)) > 0)] return note_list, status
python
{ "resource": "" }
q15368
Simplenote.trash_note
train
def trash_note(self, note_id): """ Method to move a note to the trash Arguments: - note_id (string): key of the note to trash Returns: A tuple `(note, status)` - note (dict): the newly created note or an error message - status (int): 0 on success and -1 otherwise """ # get note note, status = self.get_note(note_id) if (status == -1): return note, status # set deleted property, but only if not already trashed # TODO: A 412 is ok, that's unmodified. Should handle this in update_note and # then not worry about checking here if not note["deleted"]: note["deleted"] = True note["modificationDate"] = time.time() # update note return self.update_note(note) else: return note, 0
python
{ "resource": "" }
q15369
Simplenote.delete_note
train
def delete_note(self, note_id): """ Method to permanently delete a note Arguments: - note_id (string): key of the note to trash Returns: A tuple `(note, status)` - note (dict): an empty dict or an error message - status (int): 0 on success and -1 otherwise """ # notes have to be trashed before deletion note, status = self.trash_note(note_id) if (status == -1): return note, status params = '/i/%s' % (str(note_id)) request = Request(url=DATA_URL+params, method='DELETE') request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) except IOError as e: return e, -1 except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 return {}, 0
python
{ "resource": "" }
q15370
BearerAuthentication.authenticate_credentials
train
def authenticate_credentials(self, token): """ Validate the bearer token against the OAuth provider. Arguments: token (str): Access token to validate Returns: (tuple): tuple containing: user (User): User associated with the access token access_token (str): Access token Raises: AuthenticationFailed: The user is inactive, or retrieval of user info failed. """ try: user_info = self.get_user_info(token) except UserInfoRetrievalFailed: msg = 'Failed to retrieve user info. Unable to authenticate.' logger.error(msg) raise exceptions.AuthenticationFailed(msg) user, __ = get_user_model().objects.get_or_create(username=user_info['username'], defaults=user_info) if not user.is_active: raise exceptions.AuthenticationFailed('User inactive or deleted.') return user, token
python
{ "resource": "" }
q15371
BearerAuthentication.get_user_info
train
def get_user_info(self, token): """ Retrieves the user info from the OAuth provider. Arguments: token (str): OAuth2 access token. Returns: dict Raises: UserInfoRetrievalFailed: Retrieval of user info from the remote server failed. """ url = self.get_user_info_url() try: headers = {'Authorization': 'Bearer {}'.format(token)} response = requests.get(url, headers=headers) except requests.RequestException: logger.exception('Failed to retrieve user info due to a request exception.') raise UserInfoRetrievalFailed if response.status_code == 200: return self.process_user_info_response(response.json()) else: msg = 'Failed to retrieve user info. Server [{server}] responded with status [{status}].'.format( server=url, status=response.status_code ) raise UserInfoRetrievalFailed(msg)
python
{ "resource": "" }
q15372
BearerAuthentication.process_user_info_response
train
def process_user_info_response(self, response): """ Process the user info response data. By default, this simply maps the edX user info key-values (example below) to Django-friendly names. If your provider returns different fields, you should sub-class this class and override this method. .. code-block:: python { "username": "jdoe", "email": "jdoe@example.com", "first_name": "Jane", "last_name": "Doe" } Arguments: response (dict): User info data Returns: dict """ mapping = ( ('username', 'preferred_username'), ('email', 'email'), ('last_name', 'family_name'), ('first_name', 'given_name'), ) return {dest: response[source] for dest, source in mapping}
python
{ "resource": "" }
q15373
JwtAuthentication.authenticate_credentials
train
def authenticate_credentials(self, payload): """Get or create an active user with the username contained in the payload.""" username = payload.get('preferred_username') or payload.get('username') if username is None: raise exceptions.AuthenticationFailed('JWT must include a preferred_username or username claim!') else: try: user, __ = get_user_model().objects.get_or_create(username=username) attributes_updated = False for claim, attr in self.get_jwt_claim_attribute_map().items(): payload_value = payload.get(claim) if getattr(user, attr) != payload_value and payload_value is not None: setattr(user, attr, payload_value) attributes_updated = True if attributes_updated: user.save() except: msg = 'User retrieval failed.' logger.exception(msg) raise exceptions.AuthenticationFailed(msg) return user
python
{ "resource": "" }
q15374
EnsureJWTAuthSettingsMiddleware._includes_base_class
train
def _includes_base_class(self, iter_classes, base_class): """ Returns whether any class in iter_class is a subclass of the given base_class. """ return any( issubclass(auth_class, base_class) for auth_class in iter_classes, )
python
{ "resource": "" }
q15375
EnsureJWTAuthSettingsMiddleware._add_missing_jwt_permission_classes
train
def _add_missing_jwt_permission_classes(self, view_class): """ Adds permissions classes that should exist for Jwt based authentication, if needed. """ view_permissions = list(getattr(view_class, 'permission_classes', [])) # Not all permissions are classes, some will be ConditionalPermission # objects from the rest_condition library. So we have to crawl all those # and expand them to see if our target classes are inside the # conditionals somewhere. permission_classes = [] classes_to_add = [] while view_permissions: permission = view_permissions.pop() if not hasattr(permission, 'perms_or_conds'): permission_classes.append(permission) else: for child in getattr(permission, 'perms_or_conds', []): view_permissions.append(child) for perm_class in self._required_permission_classes: if not self._includes_base_class(permission_classes, perm_class): log.warning( u"The view %s allows Jwt Authentication but needs to include the %s permission class (adding it for you)", view_class.__name__, perm_class.__name__, ) classes_to_add.append(perm_class) if classes_to_add: view_class.permission_classes += tuple(classes_to_add)
python
{ "resource": "" }
q15376
JwtAuthCookieMiddleware.process_request
train
def process_request(self, request): """ Reconstitute the full JWT and add a new cookie on the request object. """ use_jwt_cookie_requested = request.META.get(USE_JWT_COOKIE_HEADER) header_payload_cookie = request.COOKIES.get(jwt_cookie_header_payload_name()) signature_cookie = request.COOKIES.get(jwt_cookie_signature_name()) if not use_jwt_cookie_requested: metric_value = 'not-requested' elif header_payload_cookie and signature_cookie: # Reconstitute JWT auth cookie if split cookies are available and jwt cookie # authentication was requested by the client. request.COOKIES[jwt_cookie_name()] = '{}{}{}'.format( header_payload_cookie, JWT_DELIMITER, signature_cookie, ) metric_value = 'success' elif header_payload_cookie or signature_cookie: # Log unexpected case of only finding one cookie. if not header_payload_cookie: log_message, metric_value = self._get_missing_cookie_message_and_metric( jwt_cookie_header_payload_name() ) if not signature_cookie: log_message, metric_value = self._get_missing_cookie_message_and_metric( jwt_cookie_signature_name() ) log.warning(log_message) else: metric_value = 'missing-both' monitoring.set_custom_metric('request_jwt_cookie', metric_value)
python
{ "resource": "" }
q15377
_set_token_defaults
train
def _set_token_defaults(token): """ Returns an updated token that includes default values for fields that were introduced since the token was created by checking its version number. """ def _verify_version(jwt_version): supported_version = Version( settings.JWT_AUTH.get('JWT_SUPPORTED_VERSION', JwtTokenVersion.default_latest_supported) ) if jwt_version.major > supported_version.major: logger.info('Token decode failed due to unsupported JWT version number [%s]', str(jwt_version)) raise jwt.InvalidTokenError('JWT version number [%s] is unsupported', str(jwt_version)) def _get_and_set_version(token): """ Tokens didn't always contain a version number so we default to a nominal starting number. """ if 'version' not in token: token['version'] = str(JwtTokenVersion.starting_version) return Version(token['version']) def _set_is_restricted(token): """ We can safely default to False since all "restricted" tokens created prior to the addition of the `is_restricted` flag were always created as expired tokens. Expired tokens would not validate and so would not get this far into the decoding process. # TODO: ARCH-166 """ if 'is_restricted' not in token: token['is_restricted'] = False def _set_filters(token): """ We can safely default to an empty list of filters since previously created tokens were either "restricted" (always expired) or had full access. # TODO: ARCH-166 """ if 'filters' not in token: token['filters'] = [] token_version = _get_and_set_version(token) _verify_version(token_version) _set_is_restricted(token) _set_filters(token) return token
python
{ "resource": "" }
q15378
_get_signing_jwk_key_set
train
def _get_signing_jwk_key_set(jwt_issuer): """ Returns a JWK Keyset containing all active keys that are configured for verifying signatures. """ key_set = KEYS() # asymmetric keys signing_jwk_set = settings.JWT_AUTH.get('JWT_PUBLIC_SIGNING_JWK_SET') if signing_jwk_set: key_set.load_jwks(signing_jwk_set) # symmetric key key_set.add({'key': jwt_issuer['SECRET_KEY'], 'kty': 'oct'}) return key_set
python
{ "resource": "" }
q15379
paginate_search_results
train
def paginate_search_results(object_class, search_results, page_size, page): """ Takes edx-search results and returns a Page object populated with db objects for that page. :param object_class: Model class to use when querying the db for objects. :param search_results: edX-search results. :param page_size: Number of results per page. :param page: Page number. :return: Paginator object with model objects """ paginator = Paginator(search_results['results'], page_size) # This code is taken from within the GenericAPIView#paginate_queryset method. # It is common code, but try: page_number = paginator.validate_number(page) except InvalidPage: if page == 'last': page_number = paginator.num_pages else: raise Http404("Page is not 'last', nor can it be converted to an int.") try: paged_results = paginator.page(page_number) except InvalidPage as exception: raise Http404( "Invalid page {page_number}: {message}".format( page_number=page_number, message=str(exception) ) ) search_queryset_pks = [item['data']['pk'] for item in paged_results.object_list] queryset = object_class.objects.filter(pk__in=search_queryset_pks) def ordered_objects(primary_key): """ Returns database object matching the search result object""" for obj in queryset: if obj.pk == primary_key: return obj # map over the search results and get a list of database objects in the same order object_results = list(map(ordered_objects, search_queryset_pks)) paged_results.object_list = object_results return paged_results
python
{ "resource": "" }
q15380
DefaultPagination.get_paginated_response
train
def get_paginated_response(self, data): """ Annotate the response with pagination information. """ return Response({ 'next': self.get_next_link(), 'previous': self.get_previous_link(), 'count': self.page.paginator.count, 'num_pages': self.page.paginator.num_pages, 'current_page': self.page.number, 'start': (self.page.number - 1) * self.get_page_size(self.request), 'results': data })
python
{ "resource": "" }
q15381
NamespacedPageNumberPagination.get_paginated_response
train
def get_paginated_response(self, data): """ Annotate the response with pagination information """ metadata = { 'next': self.get_next_link(), 'previous': self.get_previous_link(), 'count': self.get_result_count(), 'num_pages': self.get_num_pages(), } if isinstance(data, dict): if 'results' not in data: raise TypeError(u'Malformed result dict') data['pagination'] = metadata else: data = { 'results': data, 'pagination': metadata, } return Response(data)
python
{ "resource": "" }
q15382
get_decoded_jwt
train
def get_decoded_jwt(request): """ Grab jwt from jwt cookie in request if possible. Returns a decoded jwt dict if it can be found. Returns None if the jwt is not found. """ jwt_cookie = request.COOKIES.get(jwt_cookie_name(), None) if not jwt_cookie: return None return jwt_decode_handler(jwt_cookie)
python
{ "resource": "" }
q15383
SessionAuthenticationAllowInactiveUser.authenticate
train
def authenticate(self, request): """Authenticate the user, requiring a logged-in account and CSRF. This is exactly the same as the `SessionAuthentication` implementation, with the `user.is_active` check removed. Args: request (HttpRequest) Returns: Tuple of `(user, token)` Raises: PermissionDenied: The CSRF token check failed. """ # Get the underlying HttpRequest object request = request._request # pylint: disable=protected-access user = getattr(request, 'user', None) # Unauthenticated, CSRF validation not required # This is where regular `SessionAuthentication` checks that the user is active. # We have removed that check in this implementation. # But we added a check to prevent anonymous users since we require a logged-in account. if not user or user.is_anonymous: return None self.enforce_csrf(request) # CSRF passed with authenticated user return (user, None)
python
{ "resource": "" }
q15384
JwtHasContentOrgFilterForRequestedCourse.has_permission
train
def has_permission(self, request, view): """ Ensure that the course_id kwarg provided to the view contains one of the organizations specified in the content provider filters in the JWT used to authenticate. """ course_key = CourseKey.from_string(view.kwargs.get('course_id')) jwt_filters = decode_jwt_filters(request.auth) for filter_type, filter_value in jwt_filters: if filter_type == 'content_org' and filter_value == course_key.org: return True log.warning( u"Permission JwtHasContentOrgFilterForRequestedCourse: no filter found for %s.", course_key.org, ) return False
python
{ "resource": "" }
q15385
JwtHasUserFilterForRequestedUser.has_permission
train
def has_permission(self, request, view): """ If the JWT has a user filter, verify that the filtered user value matches the user in the URL. """ user_filter = self._get_user_filter(request) if not user_filter: # no user filters are present in the token to limit access return True username_param = get_username_param(request) allowed = user_filter == username_param if not allowed: log.warning( u"Permission JwtHasUserFilterForRequestedUser: user_filter %s doesn't match username %s.", user_filter, username_param, ) return allowed
python
{ "resource": "" }
q15386
RequestMetricsMiddleware.process_response
train
def process_response(self, request, response): """ Add metrics for various details of the request. """ self._set_request_auth_type_metric(request) self._set_request_user_agent_metrics(request) self._set_request_referer_metric(request) self._set_request_user_id_metric(request) return response
python
{ "resource": "" }
q15387
RequestMetricsMiddleware._set_request_user_id_metric
train
def _set_request_user_id_metric(self, request): """ Add request_user_id metric Metrics: request_user_id """ if hasattr(request, 'user') and hasattr(request.user, 'id') and request.user.id: monitoring.set_custom_metric('request_user_id', request.user.id)
python
{ "resource": "" }
q15388
RequestMetricsMiddleware._set_request_referer_metric
train
def _set_request_referer_metric(self, request): """ Add metric 'request_referer' for http referer. """ if 'HTTP_REFERER' in request.META and request.META['HTTP_REFERER']: monitoring.set_custom_metric('request_referer', request.META['HTTP_REFERER'])
python
{ "resource": "" }
q15389
RequestMetricsMiddleware._set_request_user_agent_metrics
train
def _set_request_user_agent_metrics(self, request): """ Add metrics for user agent for python. Metrics: request_user_agent request_client_name: The client name from edx-rest-api-client calls. """ if 'HTTP_USER_AGENT' in request.META and request.META['HTTP_USER_AGENT']: user_agent = request.META['HTTP_USER_AGENT'] monitoring.set_custom_metric('request_user_agent', user_agent) if user_agent: # Example agent string from edx-rest-api-client: # python-requests/2.9.1 edx-rest-api-client/1.7.2 ecommerce # See https://github.com/edx/edx-rest-api-client/commit/692903c30b157f7a4edabc2f53aae1742db3a019 user_agent_parts = user_agent.split() if len(user_agent_parts) == 3 and user_agent_parts[1].startswith('edx-rest-api-client/'): monitoring.set_custom_metric('request_client_name', user_agent_parts[2])
python
{ "resource": "" }
q15390
RequestMetricsMiddleware._set_request_auth_type_metric
train
def _set_request_auth_type_metric(self, request): """ Add metric 'request_auth_type' for the authentication type used. NOTE: This is a best guess at this point. Possible values include: no-user unauthenticated jwt/bearer/other-token-type session-or-unknown (catch all) """ if 'HTTP_AUTHORIZATION' in request.META and request.META['HTTP_AUTHORIZATION']: token_parts = request.META['HTTP_AUTHORIZATION'].split() # Example: "JWT eyJhbGciO..." if len(token_parts) == 2: auth_type = token_parts[0].lower() # 'jwt' or 'bearer' (for example) else: auth_type = 'other-token-type' elif not hasattr(request, 'user') or not request.user: auth_type = 'no-user' elif not request.user.is_authenticated: auth_type = 'unauthenticated' else: auth_type = 'session-or-unknown' monitoring.set_custom_metric('request_auth_type', auth_type)
python
{ "resource": "" }
q15391
jsonrpc_request
train
def jsonrpc_request(method, identifier, params=None): """Produce a JSONRPC request.""" return '{}\r\n'.format(json.dumps({ 'id': identifier, 'method': method, 'params': params or {}, 'jsonrpc': '2.0' })).encode()
python
{ "resource": "" }
q15392
SnapcastProtocol.handle_data
train
def handle_data(self, data): """Handle JSONRPC data.""" if 'id' in data: self.handle_response(data) else: self.handle_notification(data)
python
{ "resource": "" }
q15393
SnapcastProtocol.handle_response
train
def handle_response(self, data): """Handle JSONRPC response.""" identifier = data.get('id') self._buffer[identifier]['data'] = data.get('result') self._buffer[identifier]['flag'].set()
python
{ "resource": "" }
q15394
SnapcastProtocol.handle_notification
train
def handle_notification(self, data): """Handle JSONRPC notification.""" if data.get('method') in self._callbacks: self._callbacks.get(data.get('method'))(data.get('params'))
python
{ "resource": "" }
q15395
SnapcastProtocol.request
train
def request(self, method, params): """Send a JSONRPC request.""" identifier = random.randint(1, 1000) self._transport.write(jsonrpc_request(method, identifier, params)) self._buffer[identifier] = {'flag': asyncio.Event()} yield from self._buffer[identifier]['flag'].wait() result = self._buffer[identifier]['data'] del self._buffer[identifier]['data'] return result
python
{ "resource": "" }
q15396
Snapserver.start
train
def start(self): """Initiate server connection.""" yield from self._do_connect() _LOGGER.info('connected to snapserver on %s:%s', self._host, self._port) status = yield from self.status() self.synchronize(status) self._on_server_connect()
python
{ "resource": "" }
q15397
Snapserver._do_connect
train
def _do_connect(self): """Perform the connection to the server.""" _, self._protocol = yield from self._loop.create_connection( lambda: SnapcastProtocol(self._callbacks), self._host, self._port)
python
{ "resource": "" }
q15398
Snapserver._reconnect_cb
train
def _reconnect_cb(self): """Callback to reconnect to the server.""" @asyncio.coroutine def try_reconnect(): """Actual coroutine ro try to reconnect or reschedule.""" try: yield from self._do_connect() except IOError: self._loop.call_later(SERVER_RECONNECT_DELAY, self._reconnect_cb) asyncio.ensure_future(try_reconnect())
python
{ "resource": "" }
q15399
Snapserver._transact
train
def _transact(self, method, params=None): """Wrap requests.""" result = yield from self._protocol.request(method, params) return result
python
{ "resource": "" }