code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
left_height = self.left.height() if self.left else 0 right_height = self.right.height() if self.right else 0 if abs(left_height - right_height) > 1: return False return all(c.is_balanced for c, _ in self.children)
def is_balanced(self)
Returns True if the (sub)tree is balanced The tree is balanced if the heights of both subtrees differ at most by 1
2.575415
2.502291
1.029223
return math.pow(self.data[axis] - point[axis], 2)
def axis_dist(self, point, axis)
Squared distance at the given axis between the current Node and the given point
6.504995
6.255476
1.039888
r = range(self.dimensions) return sum([self.axis_dist(point, i) for i in r])
def dist(self, point)
Squared distance between the current Node and the given point
7.521041
8.18065
0.91937
if k < 1: raise ValueError("k must be greater than 0.") if dist is None: get_dist = lambda n: n.dist(point) else: get_dist = lambda n: dist(n.data, point) results = [] self._search_node(point, k, results, get_dist, itertools.count()) # We sort the final result by the distance in the tuple # (<KdNode>, distance). return [(node, -d) for d, _, node in sorted(results, reverse=True)]
def search_knn(self, point, k, dist=None)
Return the k nearest neighbors of point and their distances point must be an actual point, not a node. k is the number of results to return. The actual results can be less (if there aren't more nodes to return) or more in case of equal distances. dist is a distance function, expecting two points and returning a distance value. Distance values can be any comparable type. The result is an ordered list of (node, distance) tuples.
4.678294
4.358473
1.073379
return next(iter(self.search_knn(point, 1, dist)), None)
def search_nn(self, point, dist=None)
Search the nearest node of the given point point must be an actual point, not a node. The nearest node to the point is returned. If a location of an actual node is used, the Node with this location will be returned (not its neighbor). dist is a distance function, expecting two points and returning a distance value. Distance values can be any comparable type. The result is a (node, distance) tuple.
7.791165
8.192165
0.951051
results = [] get_dist = lambda n: n.dist(point) self._search_nn_dist(point, distance, results, get_dist) return results
def search_nn_dist(self, point, distance, best=None)
Search the n nearest nodes of the given point which are within given distance point must be a location, not a node. A list containing the n nearest nodes to the point within the distance will be returned.
5.415734
5.309953
1.019921
if not self: return True if self.left and self.data[self.axis] < self.left.data[self.axis]: return False if self.right and self.data[self.axis] > self.right.data[self.axis]: return False return all(c.is_valid() for c, _ in self.children) or self.is_leaf
def is_valid(self)
Checks recursively if the tree is valid It is valid if each node splits correctly
3.386107
2.652533
1.276556
max_key = lambda child_parent: child_parent[0].data[axis] # we don't know our parent, so we include None me = [(self, None)] if self else [] child_max = [c.extreme_child(sel_func, axis) for c, _ in self.children] # insert self for unknown parents child_max = [(c, p if p is not None else self) for c, p in child_max] candidates = me + child_max if not candidates: return None, None return sel_func(candidates, key=max_key)
def extreme_child(self, sel_func, axis)
Returns a child of the subtree and its parent The child is selected by sel_func which is either min or max (or a different function with similar semantics).
6.214001
6.133456
1.013132
encrypted = self.vault.encrypt(text) if stream: stream.write(encrypted) else: return encrypted
def dump_raw(self, text, stream=None)
Encrypt raw data and write to stream.
4.577292
3.482022
1.31455
yaml_text = yaml.dump( data, default_flow_style=False, allow_unicode=True) return self.dump_raw(yaml_text, stream=stream)
def dump(self, data, stream=None)
Encrypt data and print stdout or write to stream.
3.448926
3.588394
0.961134
if isinstance(item, MenuItem): if name not in c.items: c.items[name] = [] c.items[name].append(item) c.sorted[name] = False
def add_item(c, name, item)
add_item adds MenuItems to the menu identified by 'name'
3.525186
2.986345
1.180435
# we don't need to do this more than once if c.loaded: return # Fetch all installed app names app_names = settings.INSTALLED_APPS if apps: app_names = [ app_config.name for app_config in apps.get_app_configs() ] # loop through our INSTALLED_APPS for app in app_names: # skip any django apps if app.startswith("django."): continue menu_module = '%s.menus' % app try: __import__(menu_module, fromlist=["menu", ]) except ImportError: pass c.loaded = True
def load_menus(c)
load_menus loops through INSTALLED_APPS and loads the menu.py files from them.
4.003386
3.295053
1.214969
for name in c.items: if not c.sorted[name]: c.items[name].sort(key=lambda x: x.weight) c.sorted[name] = True
def sort_menus(c)
sort_menus goes through the items and sorts them based on their weight
4.056
3.304929
1.227258
# make sure we're loaded & sorted c.load_menus() c.sort_menus() if name is None: # special case, process all menus items = {} for name in c.items: items[name] = c.process(request, name) return items if name not in c.items: return [] items = copy.deepcopy(c.items[name]) curitem = None for item in items: item.process(request) if item.visible: item.selected = False if item.match_url(request): if curitem is None or len(curitem.url) < len(item.url): curitem = item if curitem is not None: curitem.selected = True # return only visible items visible = [ item for item in items if item.visible ] # determine if we should apply 'selected' to parents when one of their # children is the 'selected' menu if getattr(settings, 'MENU_SELECT_PARENTS', False): def is_child_selected(item): for child in item.children: if child.selected or is_child_selected(child): return True for item in visible: if is_child_selected(item): item.selected = True return visible
def process(c, request, name=None)
process uses the current request to determine which menus should be visible, which are selected, etc.
3.207525
3.049696
1.051752
if callable(self.check_func): self.visible = self.check_func(request)
def check(self, request)
Evaluate if we should be visible for this request
6.560938
4.1369
1.585955
# if we're not visible we return since we don't need to do anymore processing self.check(request) if not self.visible: return # evaluate our title if callable(self.title): self.title = self.title(request) # if no title is set turn it into a slug if self.slug is None: # in python3 we don't need to convert to unicode, in python2 slugify # requires a unicode string if sys.version_info > (3, 0): self.slug = slugify(self.title) else: self.slug = slugify(unicode(self.title)) # evaluate children if callable(self.children): children = list(self.children(request)) else: children = list(self.children) for child in children: child.parent = self child.process(request) self.children = [ child for child in children if child.visible ] self.children.sort(key=lambda child: child.weight) # if we have no children and MENU_HIDE_EMPTY then we are not visible and should return hide_empty = getattr(settings, 'MENU_HIDE_EMPTY', False) if hide_empty and len(self.children) == 0: self.visible = False return # find out if one of our children is selected, and mark it as such curitem = None for item in self.children: item.selected = False if item.match_url(request): if curitem is None or len(curitem.url) < len(item.url): curitem = item if curitem is not None: curitem.selected = True
def process(self, request)
process determines if this item should visible, if its selected, etc...
3.225998
3.132533
1.029837
matched = False if self.exact_url: if re.match("%s$" % (self.url,), request.path): matched = True elif re.match("%s" % self.url, request.path): matched = True return matched
def match_url(self, request)
match url determines if this is selected
3.418036
3.161682
1.081081
if not proto: defer.returnValue(None) reply = yield self.__send_ismaster(proto, timeout=self.initialDelay) # Handle the reply from the "ismaster" query. The reply contains # configuration information about the peer. # Make sure we got a result document. if len(reply.documents) != 1: raise OperationFailure("TxMongo: invalid document length.") # Get the configuration document from the reply. config = reply.documents[0].decode() # Make sure the command was successful. if not config.get("ok"): code = config.get("code") msg = "TxMongo: " + config.get("err", "Unknown error") raise OperationFailure(msg, code) # Check that the replicaSet matches. set_name = config.get("setName") expected_set_name = self.uri["options"].get("replicaset") if expected_set_name and (expected_set_name != set_name): # Log the invalid replica set failure. msg = "TxMongo: Mongo instance does not match requested replicaSet." raise ConfigurationError(msg) # Track max bson object size limit. proto.max_bson_size = config.get("maxBsonObjectSize", DEFAULT_MAX_BSON_SIZE) proto.max_write_batch_size = config.get("maxWriteBatchSize", DEFAULT_MAX_WRITE_BATCH_SIZE) proto.set_wire_versions(config.get("minWireVersion", 0), config.get("maxWireVersion", 0)) # Track the other hosts in the replica set. hosts = config.get("hosts") if isinstance(hosts, list) and hosts: for host in hosts: if ':' not in host: host = (host, 27017) else: host = host.split(':', 1) host[1] = int(host[1]) host = tuple(host) if host not in self.__allnodes: self.__allnodes.append(host) # Check if this node is the master. ismaster = config.get("ismaster") if not ismaster: msg = "TxMongo: MongoDB host `%s` is not master." % config.get('me') raise AutoReconnect(msg)
def configure(self, proto)
Configures the protocol using the information gathered from the remote Mongo instance. Such information may contain the max BSON document size, replica set configuration, and the master status of the instance.
3.426878
3.330848
1.02883
if self.instance: return defer.succeed(self.instance) def on_cancel(d): self.__notify_ready.remove(d) df = defer.Deferred(on_cancel) self.__notify_ready.append(df) return df
def notifyReady(self)
Returns a deferred that will fire when the factory has created a protocol that can be used to communicate with a Mongo server. Note that this will not fire until we have connected to a Mongo master, unless slaveOk was specified in the Mongo URI connection options.
4.614636
4.432972
1.04098
if not self.continueTrying: msg = "TxMongo: Abandoning {0} on explicit request.".format(connector) log.msg(msg) return if connector is None: if self.connector is None: raise ValueError("TxMongo: No additional connector to retry.") else: connector = self.connector delay = False self.__index += 1 if self.__index >= len(self.__allnodes): self.__index = 0 delay = True connector.host, connector.port = self.__allnodes[self.__index] if delay: self.retry(connector) else: connector.connect()
def retryNextHost(self, connector=None)
Have this connector connect again, to the next host in the configured list of hosts.
4.895991
4.90996
0.997155
d = defer.Deferred() self.__indexes_created_defer.chainDeferred(d) return d
def indexes_created(self)
Returns a defer on the creation of this GridFS instance's indexes
10.278375
5.700747
1.802987
grid_file = GridIn(self.__collection, **kwargs) def _finally(result): return grid_file.close().addCallback(lambda _: result) return grid_file.write(data)\ .addBoth(_finally)\ .addCallback(lambda _: grid_file._id)
def put(self, data, **kwargs)
Put data in GridFS as a new file. Equivalent to doing: >>> f = new_file(**kwargs) >>> try: >>> f.write(data) >>> finally: >>> f.close() `data` can be either an instance of :class:`str` or a file-like object providing a :meth:`read` method. Any keyword arguments will be passed through to the created file - see :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the ``"_id"`` of the created file. :Parameters: - `data`: data to be written as a file. - `**kwargs` (optional): keyword arguments for file creation .. versionadded:: 1.6
7.509575
6.549639
1.146563
def ok(doc): if doc is None: raise NoFile("TxMongo: no file in gridfs with _id {0}".format(repr(file_id))) return GridOut(self.__collection, doc) return self.__collection.files.find_one({"_id": file_id}).addCallback(ok)
def get(self, file_id)
Get a file from GridFS by ``"_id"``. Returns an instance of :class:`~gridfs.grid_file.GridOut`, which provides a file-like interface for reading. :Parameters: - `file_id`: ``"_id"`` of the file to get .. versionadded:: 1.6
7.393576
6.609485
1.118631
query = {"filename": filename} skip = abs(version) if version < 0: skip -= 1 myorder = DESCENDING("uploadDate") else: myorder = ASCENDING("uploadDate") def ok(cursor): if cursor: return GridOut(self.__collection, cursor[0]) raise NoFile("no version %d for filename %r" % (version, filename)) return self.__files.find(query, filter=filter.sort(myorder), limit=1, skip=skip)\ .addCallback(ok)
def get_version(self, filename=None, version=-1)
Get a file from GridFS by ``"filename"``. Returns a version of the file in GridFS whose filename matches `filename` and whose metadata fields match the supplied keyword arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. Version numbering is a convenience atop the GridFS API provided by MongoDB. If more than one file matches the query (either by `filename` alone, by metadata fields, or by a combination of both), then version ``-1`` will be the most recently uploaded matching file, ``-2`` the second most recently uploaded, etc. Version ``0`` will be the first version uploaded, ``1`` the second version, etc. So if three versions have been uploaded, then version ``0`` is the same as version ``-3``, version ``1`` is the same as version ``-2``, and version ``2`` is the same as version ``-1``. Note that searching by random (unindexed) meta data is not supported here. Raises :class:`~gridfs.errors.NoFile` if no such version of that file exists. :Parameters: - `filename`: ``"filename"`` of the file to get, or `None` - `version` (optional): version of the file to get (defaults to -1, the most recent version uploaded)
7.249096
5.409749
1.340006
def ok(doc): if doc is None: raise NoFile("TxMongo: no file in gridfs with filename {0}".format(repr(filename))) return GridOut(self.__collection, doc) return self.__files.find_one({"filename": filename}, filter = filter.sort(DESCENDING("uploadDate"))).addCallback(ok)
def get_last_version(self, filename)
Get a file from GridFS by ``"filename"``. Returns the most recently uploaded file in GridFS with the name `filename` as an instance of :class:`~gridfs.grid_file.GridOut`. Raises :class:`~gridfs.errors.NoFile` if no such file exists. An index on ``{filename: 1, uploadDate: -1}`` will automatically be created when this method is called the first time. :Parameters: - `filename`: ``"filename"`` of the file to get .. versionadded:: 1.6
12.689199
9.158095
1.385572
return defer.DeferredList([ self.__files.remove({"_id": file_id}, safe=True), self.__chunks.remove({"files_id": file_id}) ])
def delete(self, file_id)
Delete a file from GridFS by ``"_id"``. Removes all data belonging to the file with ``"_id"``: `file_id`. .. warning:: Any processes/threads reading from the file while this method is executing will likely see an invalid/corrupt file. Care should be taken to avoid concurrent reads to a file while it is being deleted. :Parameters: - `file_id`: ``"_id"`` of the file to delete .. versionadded:: 1.6
5.705453
5.892206
0.968305
if isinstance(command, (bytes, unicode)): command = SON([(command, value)]) options = kwargs.copy() command.update(options) def on_ok(response): if check: msg = "TxMongo: command {0} on namespace {1} failed with '%s'".format(repr(command), ns) _check_command_response(response, msg, allowable_errors) return response ns = self["$cmd"].with_options(codec_options=codec_options) return ns.find_one(command, _deadline=_deadline).addCallback(on_ok)
def command(self, command, value=1, check=True, allowable_errors=None, codec_options=DEFAULT_CODEC_OPTIONS, _deadline=None, **kwargs)
command(command, value=1, check=True, allowable_errors=None, codec_options=DEFAULT_CODEC_OPTIONS)
4.575407
4.508698
1.014796
if isinstance(name_or_collection, Collection): name = name_or_collection._collection_name elif isinstance(name_or_collection, (bytes, unicode)): name = name_or_collection else: raise TypeError("TxMongo: name must be an instance of basestring or txmongo.Collection") return self.command("drop", unicode(name), allowable_errors=["ns not found"], _deadline=_deadline)
def drop_collection(self, name_or_collection, _deadline=None)
drop_collection(name_or_collection)
3.689171
3.604266
1.023557
def ok(results): names = [r["name"] for r in results] names = [n[len(str(self)) + 1:] for n in names if n.startswith(str(self) + ".")] names = [n for n in names if "$" not in n] return names return self["system.namespaces"].find(_deadline=_deadline).addCallback(ok)
def collection_names(self, _deadline=None)
collection_names()
4.409422
4.328563
1.01868
if not isinstance(name, (bytes, unicode)): raise TypeError("TxMongo: name must be an instance of basestring.") if not isinstance(password, (bytes, unicode)): raise TypeError("TxMongo: password must be an instance of basestring.") return self.connection.authenticate(self, name, password, mechanism)
def authenticate(self, name, password, mechanism="DEFAULT")
Send an authentication command for this database. mostly stolen from pymongo
3.578768
2.971269
1.204457
@wraps(func) def _timeout(*args, **kwargs): now = time() deadline = kwargs.pop("deadline", None) seconds = kwargs.pop("timeout", None) if deadline is None and seconds is not None: deadline = now + seconds if deadline is not None and deadline < now: raise TimeExceeded("TxMongo: run time exceeded by {0}s.".format(now-deadline)) kwargs['_deadline'] = deadline raw_d = func(*args, **kwargs) if deadline is None: return raw_d if seconds is None and deadline is not None and deadline - now > 0: seconds = deadline - now timeout_d = defer.Deferred() times_up = reactor.callLater(seconds, timeout_d.callback, None) def on_ok(result): if timeout_d.called: raw_d.cancel() raise TimeExceeded("TxMongo: run time of {0}s exceeded.".format(seconds)) else: times_up.cancel() return result[0] def on_fail(failure): failure.trap(defer.FirstError) assert failure.value.index == 0 times_up.cancel() failure.value.subFailure.raiseException() return defer.DeferredList([raw_d, timeout_d], fireOnOneCallback=True, fireOnOneErrback=True, consumeErrors=True).addCallbacks(on_ok, on_fail) return _timeout
def timeout(func)
Decorator to add timeout to Deferred calls
3.036285
2.947057
1.030277
def getter(self): if closed_only and not self._closed: raise AttributeError( "TxMongo: can only get {0} on a closed file.".format(repr(field_name))) return self._file.get(field_name, None) def setter(self, value): if self._closed: raise AttributeError( "TxMongo: cannot set {0} on a closed file.".format(repr(field_name))) self._file[field_name] = value if read_only: docstring += "\n\nThis attribute is read-only." elif not closed_only: docstring = "%s\n\n%s" % (docstring, "This attribute can only be " "set before :meth:`close` has been called.") else: docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and " "can only be read after :meth:`close` " "has been called.") if not read_only and not closed_only: return property(getter, setter, doc=docstring) return property(getter, doc=docstring)
def _create_property(field_name, docstring, read_only=False, closed_only=False)
Helper for creating properties to read/write to files.
2.581299
2.496779
1.033852
if not data: return defer.succeed(None) assert (len(data) <= self.chunk_size) chunk = {"files_id": self._file["_id"], "n": self._chunk_number, "data": Binary(data)} def ok(_): self._chunk_number += 1 self._position += len(data) # Continue writing after the insert completes (non-blocking) return self._chunks.insert(chunk).addCallback(ok)
def __flush_data(self, data)
Flush `data` to a chunk.
6.16514
5.660551
1.089141
def ok(_): self._buffer.close() self._buffer = StringIO() return self.__flush_data(self._buffer.getvalue()).addCallback(ok)
def __flush_buffer(self)
Flush the buffer contents out to a chunk.
7.207005
6.227593
1.15727
def on_md5(md5): self._file["md5"] = md5 self._file["length"] = self._position self._file["uploadDate"] = datetime.datetime.utcnow() return self._coll.files.insert(self._file) return self.__flush_buffer()\ .addCallback(lambda _: self._coll.filemd5(self._id))\ .addCallback(on_md5)
def __flush(self)
Flush the file to the database.
5.451887
5.16956
1.054613
if self._closed: return defer.succeed(None) def ok(_): self._closed = True return self.__flush().addCallback(ok)
def close(self)
Flush the file and close it. A closed file cannot be written any more. Calling :meth:`close` more than once is allowed.
6.950022
5.876427
1.182695
if self._closed: raise ValueError("TxMongo: cannot write to a closed file.") try: # file-like read = data.read except AttributeError: # string if not isinstance(data, (bytes, unicode)): raise TypeError("TxMongo: can only write strings or file-like objects.") if isinstance(data, unicode): try: data = data.encode(self.encoding) except AttributeError: raise TypeError("TxMongo: must specify an encoding for file in " "order to write {0}".format(data)) read = StringIO(data).read def do_write(_=None): to_write = read(self.chunk_size) if to_write and len(to_write) == self.chunk_size: return self.__flush_data(to_write).addCallback(do_write) else: self._buffer.write(to_write) if self._buffer.tell() > 0: # Make sure to flush only when _buffer is complete space = self.chunk_size - self._buffer.tell() if space: to_write = read(space) self._buffer.write(to_write) if len(to_write) < space: return defer.succeed(None) # EOF or incomplete return self.__flush_buffer().addCallback(do_write) else: return defer.maybeDeferred(do_write)
def write(self, data)
Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object (implementing :meth:`read`). Due to buffering, the data may not actually be written to the database until the :meth:`close` method is called. Raises :class:`ValueError` if this file is already closed. Raises :class:`TypeError` if `data` is not an instance of :class:`str` or a file-like object. :Parameters: - `data`: string of bytes or file-like object to be written to the file
3.344804
3.382987
0.988713
iterator = iter(sequence) def iterate(_=None): try: return self.write(next(iterator)).addCallback(iterate) except StopIteration: return return defer.maybeDeferred(iterate)
def writelines(self, sequence)
Write a sequence of strings to the file. Does not add separators.
5.702615
7.166471
0.795735
if not size: return defer.succeed(None) remainder = int(self.length) - self.__position if size < 0 or size > remainder: size = remainder class State(object): pass state = State() state.data = self.__buffer state.chunk_number = (len(state.data) + self.__position) / self.chunk_size def iterate(_=None): if len(state.data) < size: return self.__chunks.find_one({"files_id": self._id, "n": state.chunk_number})\ .addCallback(process).addCallback(iterate) return defer.succeed(None) def process(chunk): if not chunk: raise CorruptGridFile("TxMongo: no chunk #{0}".format(state.chunk_number)) if not state.data: state.data += chunk["data"][self.__position % self.chunk_size:] else: state.data += chunk["data"] state.chunk_number += 1 def done(_): self.__position += size to_return = state.data[:size] self.__buffer = state.data[size:] return to_return return iterate().addCallback(done)
def read(self, size=-1)
Read at most `size` bytes from the file (less if there isn't enough data). The bytes are returned as an instance of :class:`str`. If `size` is negative or omitted all data is read. :Parameters: - `size` (optional): the number of bytes to read
3.769559
4.087155
0.922294
if whence == os.SEEK_SET: new_pos = pos elif whence == os.SEEK_CUR: new_pos = self.__position + pos elif whence == os.SEEK_END: new_pos = int(self.length) + pos else: raise IOError(22, "TxMongo: invalid value for `whence`") if new_pos < 0: raise IOError(22, "TxMongo: invalid value for `pos` - must be positive") self.__position = new_pos
def seek(self, pos, whence=os.SEEK_SET)
Set the current position of this file. :Parameters: - `pos`: the position (or offset if using relative positioning) to seek to - `whence` (optional): where to seek from. :attr:`os.SEEK_SET` (``0``) for absolute file positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative to the current position, :attr:`os.SEEK_END` (``2``) to seek relative to the file's end.
2.550063
2.935817
0.868604
content_type = self.__queue_item.response.headers.get('content-type') scrapers = self.__get_all_scrapers() new_requests = [] for scraper in scrapers: instance = scraper(self.__options, self.__queue_item) if self.__content_type_matches(content_type, instance.content_types): new_requests.extend(instance.get_requests()) return new_requests
def get_new_requests(self)
Retrieve all the new request that were found in this request. Returns: list(:class:`nyawc.http.Request`): A list of request objects.
3.861614
3.921652
0.984691
request_by_method = getattr(requests, method) return request_by_method( url=url, data=data, auth=auth, cookies=cookies, headers=headers, proxies=proxies, timeout=timeout, verify=verify, allow_redirects=True, stream=False )
def __make_request(self, url, method, data, auth, cookies, headers, proxies, timeout, verify)
Execute a request with the given data. Args: url (str): The URL to call. method (str): The method (e.g. `get` or `post`). data (str): The data to call the URL with. auth (obj): The authentication class. cookies (obj): The cookie dict. headers (obj): The header dict. proxies (obj): The proxies dict. timeout (int): The request timeout in seconds. verify (mixed): SSL verification. Returns: obj: The response object.
2.129529
2.36537
0.900294
modules_strings = self.__get_all_scrapers_modules() modules = [] for module_string in modules_strings: module = importlib.import_module("nyawc.scrapers." + module_string) modules.append(getattr(module, module_string)) return modules
def __get_all_scrapers(self)
Find all available scraper references. Returns: list(obj): The scraper references.
3.63086
3.430768
1.058323
modules = [] file = os.path.realpath(__file__) folder = os.path.dirname(file) for filename in os.listdir(folder + "/../scrapers"): if filename.endswith("Scraper.py") and not filename.startswith("Base"): modules.append(filename[:-3]) return modules
def __get_all_scrapers_modules(self)
Find all available scraper modules. Returns: list(obj): The scraper modules.
3.07473
3.271123
0.939962
if content_type is None: return False if content_type in available_content_types: return True for available_content_type in available_content_types: if available_content_type in content_type: return True return False
def __content_type_matches(self, content_type, available_content_types)
Check if the given content type matches one of the available content types. Args: content_type (str): The given content type. available_content_types list(str): All the available content types. Returns: bool: True if a match was found, False otherwise.
1.957742
2.075971
0.943049
host = self.queue_item.response.url content = self.queue_item.response.text found_requests = [] for expression in self.__expressions: matches = re.findall(expression["raw"], content) for match in matches: found_url = match[expression["group"]] absolute_url = URLHelper.make_absolute(host, found_url) found_requests.append(Request(absolute_url)) return found_requests
def derived_get_requests(self)
Get all the new requests that were found in the response. Returns: list(:class:`nyawc.http.Request`): A list of new requests that were found.
4.684167
4.347755
1.077376
for route in self.__routing_options.routes: if re.compile(route).match(crawled_request.url): count_key = str(route) + crawled_request.method if count_key in self.__routing_count.keys(): self.__routing_count[count_key] += 1 else: self.__routing_count[count_key] = 1 break
def increase_route_count(self, crawled_request)
Increase the count that determines how many times a URL of a certain route has been crawled. Args: crawled_request (:class:`nyawc.http.Request`): The request that possibly matches a route.
2.810361
2.786498
1.008564
for route in self.__routing_options.routes: if re.compile(route).match(scraped_request.url): count_key = str(route) + scraped_request.method if count_key in self.__routing_count.keys(): return self.__routing_count[count_key] >= self.__routing_options.minimum_threshold return False
def is_treshold_reached(self, scraped_request)
Check if similar requests to the given requests have already been crawled X times. Where X is the minimum treshold amount from the options. Args: scraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold. Returns: bool: True if treshold reached, false otherwise.
4.65749
4.33694
1.073912
queue_item = QueueItem(request, Response(request.url)) self.add(queue_item) return queue_item
def add_request(self, request)
Add a request to the queue. Args: request (:class:`nyawc.http.Request`): The request to add. Returns: :class:`nyawc.QueueItem`: The created queue item.
6.667989
6.774953
0.984212
queue_item = QueueItem(request, Response(request.url)) key = queue_item.get_hash() for status in QueueItem.STATUSES: if key in self.__get_var("items_" + status).keys(): return True return False
def has_request(self, request)
Check if the given request already exists in the queue. Args: request (:class:`nyawc.http.Request`): The request to check. Returns: bool: True if already exists, False otherwise.
8.1699
8.081882
1.010891
hash_key = queue_item.get_hash() items = self.__get_var("items_" + queue_item.status) if hash_key in items.keys(): return items[queue_item.get_hash()] = queue_item self.count_total += 1
def add(self, queue_item)
Add a request/response pair to the queue. Args: queue_item (:class:`nyawc.QueueItem`): The queue item to add.
5.435595
5.666378
0.959272
items = self.__get_var("items_" + queue_item.status) del items[queue_item.get_hash()] self.count_total -= 1 queue_item.status = status self.add(queue_item)
def move(self, queue_item, status)
Move a request/response pair to another status. Args: queue_item (:class:`nyawc.QueueItem`): The queue item to move status (str): The new status of the queue item.
7.326881
7.561086
0.969025
for status in from_statuses: from_status_items = self.__get_var("items_" + status) self.__set_var("items_" + status, OrderedDict()) to_status_items = self.__get_var("items_" + to_status) to_status_items.update(from_status_items)
def move_bulk(self, from_statuses, to_status)
Move a bulk of request/response pairs to another status Args: from_statuses list(str): The statuses to move from to_status (str): The status to move to
3.443129
3.505889
0.982099
items = self.get_all(status) if items: return list(items.items())[0][1] return None
def get_first(self, status)
Get the first item in the queue that has the given status. Args: status (str): return the first item with this status. Returns: :class:`nyawc.QueueItem`: The first queue item with the given status.
5.596979
5.970871
0.937381
count_remaining = len(self.items_queued) + len(self.items_in_progress) percentage_remaining = 100 / self.count_total * count_remaining return 100 - percentage_remaining
def get_progress(self)
Get the progress of the queue in percentage (float). Returns: float: The 'finished' progress in percentage.
5.252195
4.388335
1.196854
if input_type in RandomInputHelper.cache: return RandomInputHelper.cache[input_type] types = { "text": RandomInputHelper.get_random_value, "hidden": RandomInputHelper.get_random_value, "search": RandomInputHelper.get_random_value, "color": RandomInputHelper.get_random_color, "week": {"function": RandomInputHelper.get_random_value, "params": [2, ["1234"]]}, "password": RandomInputHelper.get_random_password, "number": RandomInputHelper.get_random_number, "tel": RandomInputHelper.get_random_telephonenumber, "url": RandomInputHelper.get_random_url, "textarea": RandomInputHelper.get_random_text, "email": RandomInputHelper.get_random_email } if types.get(input_type) is None: return "" if type(types.get(input_type)) is dict: generator = types.get(input_type) value = generator.get("function")(*generator.get("params")) else: value = types.get(input_type)() RandomInputHelper.cache[input_type] = value return value
def get_for_type(input_type="text")
Get a random string for the given html input type Args: input_type (str): The input type (e.g. email). Returns: str: The (cached) random value.
2.314624
2.159777
1.071696
return "".join(random.choice("".join(character_sets)) for i in range(length))
def get_random_value(length=10, character_sets=[string.ascii_uppercase, string.ascii_lowercase])
Get a random string with the given length. Args: length (int): The length of the string to return. character_sets list(str): The caracter sets to use. Returns: str: The random string.
4.245186
6.465545
0.656586
email = [ RandomInputHelper.get_random_value(6, [string.ascii_lowercase]), "@", RandomInputHelper.get_random_value(6, [string.ascii_lowercase]), ".", ltd ] return "".join(email)
def get_random_email(ltd="com")
Get a random email address with the given ltd. Args: ltd (str): The ltd to use (e.g. com). Returns: str: The random email.
5.483788
6.080677
0.901839
password = [] password.append(RandomInputHelper.get_random_value(4, [string.ascii_lowercase])) password.append(RandomInputHelper.get_random_value(2, [string.digits])) password.append(RandomInputHelper.get_random_value(2, ["$&*@!"])) password.append(RandomInputHelper.get_random_value(4, [string.ascii_uppercase])) return "".join(password)
def get_random_password()
Get a random password that complies with most of the requirements. Note: This random password is not strong and not "really" random, and should only be used for testing purposes. Returns: str: The random password.
3.19399
3.167707
1.008297
url = [ "https://", RandomInputHelper.get_random_value(8, [string.ascii_lowercase]), ".", ltd ] return "".join(url)
def get_random_url(ltd="com")
Get a random url with the given ltd. Args: ltd (str): The ltd to use (e.g. com). Returns: str: The random url.
10.600211
10.21723
1.037484
phone = [ RandomInputHelper.get_random_value(3, "123456789"), RandomInputHelper.get_random_value(3, "12345678"), "".join(map(str, random.sample(range(10), 4))) ] return "-".join(phone)
def get_random_telephonenumber()
Get a random 10 digit phone number that complies with most of the requirements. Returns: str: The random telephone number.
3.84479
3.994735
0.962465
request.auth = copy.deepcopy(options.identity.auth) request.cookies = copy.deepcopy(options.identity.cookies) request.headers = copy.deepcopy(options.identity.headers) request.proxies = copy.deepcopy(options.identity.proxies) request.timeout = copy.copy(options.performance.request_timeout) if parent_queue_item != None: for cookie in parent_queue_item.request.cookies: request.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path) for cookie in parent_queue_item.response.cookies: request.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path) if options.misc.verify_ssl_certificates and options.misc.trusted_certificates: request.verify = options.misc.trusted_certificates else: request.verify = options.misc.verify_ssl_certificates
def patch_with_options(request, options, parent_queue_item=None)
Patch the given request with the given options (e.g. user agent). Args: request (:class:`nyawc.http.Request`): The request to patch. options (:class:`nyawc.Options`): The options to patch the request with. parent_queue_item (:class:`nyawc.QueueItem`): The parent queue item object (request/response pair) if exists.
2.225917
2.178249
1.021884
if not URLHelper.is_parsable(queue_item.request.url): return False if not URLHelper.is_parsable(new_request.url): return False if scope.request_methods: if not queue_item.request.method in scope.request_methods: return False if scope.protocol_must_match: if URLHelper.get_protocol(queue_item.request.url) != URLHelper.get_protocol(new_request.url): return False if scope.subdomain_must_match: current_subdomain = URLHelper.get_subdomain(queue_item.request.url) new_subdomain = URLHelper.get_subdomain(new_request.url) www_matches = False if current_subdomain == "www" and new_subdomain == "": www_matches = True if new_subdomain == "www" and current_subdomain == "": www_matches = True if not www_matches and current_subdomain != new_subdomain: return False if scope.hostname_must_match: if URLHelper.get_hostname(queue_item.request.url) != URLHelper.get_hostname(new_request.url): return False if scope.tld_must_match: if URLHelper.get_tld(queue_item.request.url) != URLHelper.get_tld(new_request.url): return False return True
def complies_with_scope(queue_item, new_request, scope)
Check if the new request complies with the crawling scope. Args: queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request. new_request (:class:`nyawc.http.Request`): The request to check. scope (:class:`nyawc.Options.OptionsScope`): The scope to check. Returns: bool: True if it complies, False otherwise.
1.806709
1.755558
1.029137
header = [] path = URLHelper.get_path(queue_item.request.url) for cookie in queue_item.request.cookies: root_path = cookie.path == "" or cookie.path == "/" if path.startswith(cookie.path) or root_path: header.append(cookie.name + "=" + cookie.value) return "&".join(header)
def get_cookie_header(queue_item)
Convert a requests cookie jar to a HTTP request cookie header value. Args: queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request. Returns: str: The HTTP cookie header value.
3.656591
3.669639
0.996444
if self.response is not None: if self.__response_soup is None: result = BeautifulSoup(self.response.text, "lxml") if self.decomposed: return result else: self.__response_soup = BeautifulSoup(self.response.text, "lxml") return self.__response_soup
def get_soup_response(self)
Get the response as a cached BeautifulSoup container. Returns: obj: The BeautifulSoup container.
3.7929
3.673846
1.032406
if self.__index_hash: return self.__index_hash key = self.request.method key += URLHelper.get_protocol(self.request.url) key += URLHelper.get_subdomain(self.request.url) key += URLHelper.get_hostname(self.request.url) key += URLHelper.get_tld(self.request.url) key += URLHelper.get_path(self.request.url) key += str(URLHelper.get_ordered_params(self.request.url)) if self.request.data is not None: key += str(self.request.data.keys()) self.__index_hash = key return self.__index_hash
def get_hash(self)
Generate and return the dict index hash of the given queue item. Note: Cookies should not be included in the hash calculation because otherwise requests are crawled multiple times with e.g. different session keys, causing infinite crawling recursion. Note: At this moment the keys do not actually get hashed since it works perfectly without and since hashing the keys requires us to built hash collision management. Returns: str: The hash of the given queue item.
2.708275
2.591057
1.045239
host = self.queue_item.response.url soup = self.queue_item.get_soup_response() found_requests = [] for form in soup.find_all("form"): found_requests.append(self.__get_request(host, form)) return found_requests
def derived_get_requests(self)
Get all the new requests that were found in the response. Returns: list(:class:`nyawc.http.Request`): A list of new requests that were found.
5.086092
4.715065
1.07869
url = URLHelper.make_absolute(host, self.__trim_grave_accent(soup["action"])) if soup.has_attr("action") else host method_original = soup["method"] if soup.has_attr("method") else "get" method = "post" if method_original.lower() == "post" else "get" data = self.__get_form_data(soup) return Request(url, method, data)
def __get_request(self, host, soup)
Build a request from the given soup form. Args: host str: The URL of the current queue item. soup (obj): The BeautifulSoup form. Returns: :class:`nyawc.http.Request`: The new Request.
4.182581
4.238312
0.986851
if href.startswith("`"): href = href[1:] if href.endswith("`"): href = href[:-1] return href
def __trim_grave_accent(self, href)
Trim grave accents manually (because BeautifulSoup doesn"t support it). Args: href (str): The BeautifulSoup href value. Returns: str: The BeautifulSoup href value without grave accents.
3.229738
3.144723
1.027034
elements = self.__get_valid_form_data_elements(soup) form_data = self.__get_default_form_data_input(elements) callback = self.options.callbacks.form_before_autofill action = callback(self.queue_item, elements, form_data) if action == CrawlerActions.DO_AUTOFILL_FORM: self.__autofill_form_data(form_data, elements) return form_data
def __get_form_data(self, soup)
Build a form data dict from the given form. Args: soup (obj): The BeautifulSoup form. Returns: obj: The form data (key/value).
5.686065
6.123408
0.928579
elements = [] for element in soup.find_all(["input", "button", "textarea", "select"]): if element.has_attr("name"): elements.append(element) return elements
def __get_valid_form_data_elements(self, soup)
Get all valid form input elements. Note: An element is valid when the value can be updated client-side and the element has a name attribute. Args: soup (obj): The BeautifulSoup form. Returns: list(obj): Soup elements.
2.693062
3.005477
0.896051
form_data = OrderedDict() for element in elements: default_value = self.__get_default_value_from_element(element) if default_value is False: continue form_data[element["name"]] = default_value return form_data
def __get_default_form_data_input(self, elements)
Get the default form data {key: value} for the given elements. Args: elements list(obj): Soup elements. Returns: obj: The {key: value} form data
2.607909
3.277344
0.795738
for element in elements: if not element["name"] in form_data: continue if not len(form_data[element["name"]]) is 0: continue if element.name == "textarea": form_data[element["name"]] = RandomInputHelper.get_for_type("textarea") continue if element.has_attr("type"): form_data[element["name"]] = RandomInputHelper.get_for_type(element["type"])
def __autofill_form_data(self, form_data, elements)
Autofill empty form data with random data. Args: form_data (obj): The {key: value} form data elements list(obj): Soup elements. Returns: obj: The {key: value}
3.307606
3.360987
0.984118
if element.name == "select": options = element.find_all("option") is_multiple = element.has_attr("multiple") selected_options = [ option for option in options if option.has_attr("selected") ] if not selected_options and options: selected_options = [options[0]] selected_values = [] if is_multiple: for option in selected_options: value = option["value"] if option.has_attr("value") else option.string selected_values.append(value) return selected_values elif len(selected_options) >= 1: if selected_options[0].has_attr("value"): return selected_options[0]["value"] else: return selected_options[0].string return "" if element.name == "textarea": return element.string if element.string is not None else "" if element.name == "input" and element.has_attr("type"): if element["type"] in ("checkbox", "radio"): if not element.has_attr("checked"): return False if element.has_attr("value"): return element["value"] else: return "on" if element.has_attr("value"): return element["value"] return ""
def __get_default_value_from_element(self, element)
Get the default value of a form element Args: elements (obj): The soup element. Returns: str: The default value
1.856887
1.863762
0.996311
# Python 3.4 and lower do not remove folder traversal strings. # This was fixed in 3.5 (https://docs.python.org/3/whatsnew/3.5.html#urllib) while relative.startswith('/../') or relative.startswith('../'): relative = relative[3:] base_parsed = urlparse(base) new_path = base_parsed.path.rsplit('/', 1)[0] base_parsed = base_parsed._replace(path=new_path) base = base_parsed.geturl() return urljoin(base, relative)
def make_absolute(base, relative)
Make the given (relative) URL absolute. Args: base (str): The absolute URL the relative url was found on. relative (str): The (possibly relative) url to make absolute. Returns: str: The absolute URL.
3.683499
3.524456
1.045125
if data is None: return url url_parts = list(urlparse(url)) query = OrderedDict(parse_qsl(url_parts[4], keep_blank_values=True)) query.update(data) url_parts[4] = URLHelper.query_dict_to_string(query) return urlunparse(url_parts)
def append_with_data(url, data)
Append the given URL with the given data OrderedDict. Args: url (str): The URL to append. data (obj): The key value OrderedDict to append to the URL. Returns: str: The new URL.
2.748872
2.427991
1.132159
try: parsed = urlparse(url) URLHelper.__cache[url] = parsed return True except: return False
def is_parsable(url)
Check if the given URL is parsable (make sure it's a valid URL). If it is parsable, also cache it. Args: url (str): The URL to check. Returns: bool: True if parsable, False otherwise.
7.526734
6.09575
1.234751
if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) return URLHelper.__cache[url].scheme
def get_protocol(url)
Get the protocol (e.g. http, https or ftp) of the given URL. Args: url (str): The URL to get the protocol from. Returns: str: The URL protocol
6.368019
6.367061
1.000151
if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) return ".".join(URLHelper.__cache[url].netloc.split(".")[:-2])
def get_subdomain(url)
Get the subdomain of the given URL. Args: url (str): The URL to get the subdomain from. Returns: str: The subdomain(s)
5.213286
5.448757
0.956785
if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) parts = URLHelper.__cache[url].netloc.split(".") if len(parts) == 1: return parts[0] else: return ".".join(parts[-2:-1])
def get_hostname(url)
Get the hostname of the given URL. Args: url (str): The URL to get the hostname from. Returns: str: The hostname
3.433876
3.560876
0.964335
if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) parts = URLHelper.__cache[url].netloc.split(".") if len(parts) == 1: return "" else: return parts[-1]
def get_tld(url)
Get the tld of the given URL. Args: url (str): The URL to get the tld from. Returns: str: The tld
3.951863
3.939599
1.003113
if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) return URLHelper.__cache[url].path
def get_path(url)
Get the path (e.g /page/23) of the given URL. Args: url (str): The URL to get the path from. Returns: str: The path
6.145988
6.268276
0.980491
if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) params = URLHelper.query_string_to_dict(URLHelper.__cache[url].query) return OrderedDict(sorted(params.items()))
def get_ordered_params(url)
Get the query parameters of the given URL in alphabetical order. Args: url (str): The URL to get the query parameters from. Returns: str: The query parameters
4.206127
4.559997
0.922397
query_params = [] for key, value in query.items(): query_params.append(key + "=" + value) return "&".join(query_params)
def query_dict_to_string(query)
Convert an OrderedDict to a query string. Args: query (obj): The key value object with query params. Returns: str: The query string. Note: This method does the same as urllib.parse.urlencode except that it doesn't actually encode the values.
2.773487
3.151934
0.879932
query_params = {} for key_value in query.split("&"): key_value_pair = key_value.split("=", 1) key = key_value_pair[0] if len(key_value_pair) >= 1 else "" value = key_value_pair[1] if len(key_value_pair) == 2 else "" query_params[key] = value return query_params
def query_string_to_dict(query)
Convert a string to a query dict. Args: query (str): The query string. Returns: obj: The key value object with query params. Note: This method does the same as urllib.parse.parse_qsl except that it doesn't actually decode the values.
1.917354
1.9785
0.969095
if PackageHelper.__version: return PackageHelper.__version PackageHelper.__version = "Unknown" # If this is a GIT clone without install, use the ``.semver`` file. file = os.path.realpath(__file__) folder = os.path.dirname(file) try: semver = open(folder + "/../../.semver", "r") PackageHelper.__version = semver.read().rstrip() semver.close() return PackageHelper.__version except: pass # If the package was installed, get the version number via Python's distribution details. try: distribution = pkg_resources.get_distribution(PackageHelper.get_alias()) if distribution.version: PackageHelper.__version = distribution.version return PackageHelper.__version except: pass return PackageHelper.__version
def get_version()
Get the version number of this package. Returns: str: The version number (marjor.minor.patch). Note: When this package is installed, the version number will be available through the package resource details. Otherwise this method will look for a ``.semver`` file. Note: In rare cases corrupt installs can cause the version number to be unknown. In this case the version number will be set to the string "Unknown".
4.143041
3.498452
1.18425
# The PyPi description does not support the SVG file type. contents = contents.replace(".svg?pypi=png.from.svg", ".png") # Convert ``<br class="title">`` to a H1 title asterisks_length = len(PackageHelper.get_name()) asterisks = "*" * asterisks_length title = asterisks + "\n" + PackageHelper.get_name() + "\n" + asterisks; contents = re.sub(r"(\.\. raw\:\: html\n)(\n {2,4})(\<br class=\"title\"\>)", title, contents) # The PyPi description does not support raw HTML contents = re.sub(r"(\.\. raw\:\: html\n)((\n {2,4})([A-Za-z0-9<>\ =\"\/])*)*", "", contents) return contents
def rst_to_pypi(contents)
Convert the given GitHub RST contents to PyPi RST contents (since some RST directives are not available in PyPi). Args: contents (str): The GitHub compatible RST contents. Returns: str: The PyPi compatible RST contents.
7.302912
7.353862
0.993072
attributes = { "src": True, "href": True, "link": True, "script": True, "url": True } host = self.queue_item.response.url soup = self.queue_item.get_soup_response() base_element = soup.find("base", href=True) elements = soup.select("[{}]".format("],[".join(attributes.keys()))) # Always use the URL from the base element if it exists. # https://www.w3schools.com/tags/tag_base.asp if base_element: host = URLHelper.make_absolute(host, base_element["href"]) found_requests = [] for element in elements: for attribute in attributes.keys(): if not element.has_attr(attribute): continue found_url = self.__trim_grave_accent(element[attribute]) if URLHelper.is_mailto(found_url): continue absolute_url = URLHelper.make_absolute(host, found_url) found_requests.append(Request(absolute_url)) return found_requests
def derived_get_requests(self)
Get all the new requests that were found in the response. Returns: list(:class:`nyawc.http.Request`): A list of new requests that were found.
3.900145
3.64743
1.069286
try: self.__options.callbacks.request_in_thread_before_start(self.__queue_item) except Exception as e: print(e) new_requests = [] failed = False try: handler = Handler(self.__options, self.__queue_item) new_requests = handler.get_new_requests() try: self.__queue_item.response.raise_for_status() except Exception: if self.__queue_item.request.parent_raised_error: failed = True else: for new_request in new_requests: new_request.parent_raised_error = True except Exception as e: failed = True error_message = "Setting status of '{}' to '{}' because of an HTTP error.".format( self.__queue_item.request.url, QueueItem.STATUS_ERRORED ) DebugHelper.output(self.__options, error_message) DebugHelper.output(self.__options, e) try: self.__options.callbacks.request_on_error(self.__queue_item, str(e)) except Exception as e: print(e) for new_request in new_requests: new_request.parent_url = self.__queue_item.request.url try: self.__options.callbacks.request_in_thread_after_finish(self.__queue_item) except Exception as e: print(e) with self.__callback_lock: self.__callback(self.__queue_item, new_requests, failed)
def run(self)
Executes the HTTP call. Note: If this and the parent handler raised an error, the queue item status will be set to errored instead of finished. This is to prevent e.g. 404 recursion.
3.08565
2.905339
1.062062
HTTPRequestHelper.patch_with_options(request, self.__options) self.queue.add_request(request) self.__crawler_start()
def start_with(self, request)
Start the crawler using the given request. Args: request (:class:`nyawc.http.Request`): The startpoint for the crawler.
18.238308
17.79678
1.024809
self.__should_spawn_new_requests = False in_progress_count = len(self.queue.get_all(QueueItem.STATUS_IN_PROGRESS)) while in_progress_count < self.__options.performance.max_threads: if self.__spawn_new_request(): in_progress_count += 1 else: break if in_progress_count == 0: self.__crawler_stop()
def __spawn_new_requests(self)
Spawn new requests until the max threads option value is reached. Note: If no new requests were spawned and there are no requests in progress the crawler will stop crawling.
4.862092
4.041793
1.202954
first_in_line = self.queue.get_first(QueueItem.STATUS_QUEUED) if first_in_line is None: return False while self.routing.is_treshold_reached(first_in_line.request): self.queue.move(first_in_line, QueueItem.STATUS_CANCELLED) first_in_line = self.queue.get_first(QueueItem.STATUS_QUEUED) if first_in_line is None: return False self.__request_start(first_in_line) return True
def __spawn_new_request(self)
Spawn the first queued request if there is one available. Returns: bool: True if a new request was spawned, false otherwise.
3.470909
3.150207
1.101803
try: self.__options.callbacks.crawler_before_start() except Exception as e: print(e) print(traceback.format_exc()) self.__spawn_new_requests() while not self.__stopped: if self.__should_stop: self.__crawler_stop() if self.__should_spawn_new_requests: self.__spawn_new_requests() time.sleep(0.1)
def __crawler_start(self)
Spawn the first X queued request, where X is the max threads option. Note: The main thread will sleep until the crawler is finished. This enables quiting the application using sigints (see http://stackoverflow.com/a/11816038/2491049). Note: `__crawler_stop()` and `__spawn_new_requests()` are called here on the main thread to prevent thread recursion and deadlocks.
3.776561
2.986784
1.264424
if self.__stopping: return self.__stopping = True self.__wait_for_current_threads() self.queue.move_bulk([ QueueItem.STATUS_QUEUED, QueueItem.STATUS_IN_PROGRESS ], QueueItem.STATUS_CANCELLED) self.__crawler_finish() self.__stopped = True
def __crawler_stop(self)
Mark the crawler as stopped. Note: If :attr:`__stopped` is True, the main thread will be stopped. Every piece of code that gets executed after :attr:`__stopped` is True could cause Thread exceptions and or race conditions.
6.965574
6.438364
1.081886
try: self.__options.callbacks.crawler_after_finish(self.queue) except Exception as e: print(e) print(traceback.format_exc())
def __crawler_finish(self)
Called when the crawler is finished because there are no queued requests left or it was stopped.
6.148385
5.421159
1.134146
try: action = self.__options.callbacks.request_before_start(self.queue, queue_item) except Exception as e: action = None print(e) print(traceback.format_exc()) if action == CrawlerActions.DO_STOP_CRAWLING: self.__should_stop = True if action == CrawlerActions.DO_SKIP_TO_NEXT: self.queue.move(queue_item, QueueItem.STATUS_FINISHED) self.__should_spawn_new_requests = True if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None: self.queue.move(queue_item, QueueItem.STATUS_IN_PROGRESS) thread = CrawlerThread(self.__request_finish, self.__lock, self.__options, queue_item) self.__threads[queue_item.get_hash()] = thread thread.daemon = True thread.start()
def __request_start(self, queue_item)
Execute the request in given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair to scrape.
3.407667
3.479395
0.979385
if self.__stopping: return del self.__threads[queue_item.get_hash()] if request_failed: new_queue_items = [] self.queue.move(queue_item, QueueItem.STATUS_ERRORED) else: self.routing.increase_route_count(queue_item.request) new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests) self.queue.move(queue_item, QueueItem.STATUS_FINISHED) try: action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items) except Exception as e: action = None print(e) print(traceback.format_exc()) queue_item.decompose() if action == CrawlerActions.DO_STOP_CRAWLING: self.__should_stop = True if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None: self.__should_spawn_new_requests = True
def __request_finish(self, queue_item, new_requests, request_failed=False)
Called when the crawler finished the given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. request_failed (bool): True if the request failed (if needs to be moved to errored).
3.790911
3.602904
1.052182
new_queue_items = [] for scraped_request in scraped_requests: HTTPRequestHelper.patch_with_options(scraped_request, self.__options, queue_item) if not HTTPRequestHelper.complies_with_scope(queue_item, scraped_request, self.__options.scope): continue if self.queue.has_request(scraped_request): continue scraped_request.depth = queue_item.request.depth + 1 if self.__options.scope.max_depth is not None: if scraped_request.depth > self.__options.scope.max_depth: continue new_queue_item = self.queue.add_request(scraped_request) new_queue_items.append(new_queue_item) return new_queue_items
def __add_scraped_requests_to_queue(self, queue_item, scraped_requests)
Convert the scraped requests to queue items, return them and also add them to the queue. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. Returns: list(:class:`nyawc.QueueItem`): The new queue items.
2.745219
2.74312
1.000765
requests = self.derived_get_requests() for request in requests: request.url = URLHelper.remove_hash(request.url) return requests
def get_requests(self)
Get all the new requests that were found in the response. Returns: list(:class:`nyawc.http.Request`): A list of new requests that were found.
7.734612
7.625189
1.01435
if not options.misc.debug: requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning )
def setup(options)
Initialize debug/logging in third party libraries correctly. Args: options (:class:`nyawc.Options`): The options to use for the current crawling runtime.
4.789866
4.278188
1.119602