_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q38800
SiteRootMixin.locateChild
train
def locateChild(self, context, segments): """ Return a statically defined child or a child defined by a site root plugin or an avatar from guard. """ request = IRequest(context) webViewer = IWebViewer(self.store, None) childAndSegments = self.siteProduceResource(request, segments, webViewer) if childAndSegments is not None: return childAndSegments return NotFound
python
{ "resource": "" }
q38801
SiteRootMixin.siteProduceResource
train
def siteProduceResource(self, req, segments, webViewer): """ Retrieve a child resource and segments from rootChild_ methods on this object and SiteRootPlugins. @return: a 2-tuple of (resource, segments), suitable for return from locateChild. @param req: an L{IRequest} provider. @param segments: a tuple of L{str}s, the segments from the request. @param webViewer: an L{IWebViewer}, to be propagated through the child lookup process. """ # rootChild_* is not the same as child_, because its signature is # different. Maybe this should be done some other way. shortcut = getattr(self, 'rootChild_' + segments[0], None) if shortcut: res = shortcut(req, webViewer) if res is not None: return res, segments[1:] for plg in self.store.powerupsFor(ISiteRootPlugin): produceResource = getattr(plg, 'produceResource', None) if produceResource is not None: childAndSegments = produceResource(req, segments, webViewer) else: childAndSegments = plg.resourceFactory(segments) if childAndSegments is not None: return childAndSegments return None
python
{ "resource": "" }
q38802
VirtualHostWrapper.subdomain
train
def subdomain(self, hostname): """ Determine of which known domain the given hostname is a subdomain. @return: A two-tuple giving the subdomain part and the domain part or C{None} if the domain is not a subdomain of any known domain. """ hostname = hostname.split(":")[0] for domain in getDomainNames(self.siteStore): if hostname.endswith("." + domain): username = hostname[:-len(domain) - 1] if username != "www": return username, domain return None
python
{ "resource": "" }
q38803
_webTranslator
train
def _webTranslator(store, fallback): """ Discover a web translator based on an Axiom store and a specified default. Prefer the specified default. This is an implementation detail of various initializers in this module which require an L{IWebTranslator} provider. Some of those initializers did not previously require a webTranslator, so this function will issue a L{UserWarning} if no L{IWebTranslator} powerup exists for the given store and no fallback is provided. @param store: an L{axiom.store.Store} @param fallback: a provider of L{IWebTranslator}, or None @return: 'fallback', if it is provided, or the L{IWebTranslator} powerup on 'store'. """ if fallback is None: fallback = IWebTranslator(store, None) if fallback is None: warnings.warn( "No IWebTranslator plugin when creating Scrolltable - broken " "configuration, now deprecated! Try passing webTranslator " "keyword argument.", category=DeprecationWarning, stacklevel=4) return fallback
python
{ "resource": "" }
q38804
_ScrollableBase.resort
train
def resort(self, columnName): """ Re-sort the table. @param columnName: the name of the column to sort by. This is a string because it is passed from the browser. """ csc = self.currentSortColumn newSortColumn = self.columns[columnName] if newSortColumn is None: raise Unsortable('column %r has no sort attribute' % (columnName,)) if csc is newSortColumn: self.isAscending = not self.isAscending else: self.currentSortColumn = newSortColumn self.isAscending = True return self.isAscending
python
{ "resource": "" }
q38805
InequalityModel.inequalityQuery
train
def inequalityQuery(self, constraint, count, isAscending): """ Perform a query to obtain some rows from the table represented by this model, at the behest of a networked client. @param constraint: an additional constraint to apply to the query. @type constraint: L{axiom.iaxiom.IComparison}. @param count: the maximum number of rows to return. @type count: C{int} @param isAscending: a boolean describing whether the query should be yielding ascending or descending results. @type isAscending: C{bool} @return: an query which will yield some results from this model. @rtype: L{axiom.iaxiom.IQuery} """ if self.baseConstraint is not None: if constraint is not None: constraint = AND(self.baseConstraint, constraint) else: constraint = self.baseConstraint # build the sort currentSortAttribute = self.currentSortColumn.sortAttribute() if isAscending: sort = (currentSortAttribute.ascending, self.itemType.storeID.ascending) else: sort = (currentSortAttribute.descending, self.itemType.storeID.descending) return self.store.query(self.itemType, constraint, sort=sort, limit=count).distinct()
python
{ "resource": "" }
q38806
InequalityModel.rowsAfterValue
train
def rowsAfterValue(self, value, count): """ Retrieve some rows at or after a given sort-column value. @param value: Starting value in the index for the current sort column at which to start returning results. Rows with a column value for the current sort column which is greater than or equal to this value will be returned. @type value: Some type compatible with the current sort column, or None, to specify the beginning of the data. @param count: The maximum number of rows to return. @type count: C{int} @return: A list of row data, ordered by the current sort column, beginning at C{value} and containing at most C{count} elements. """ if value is None: query = self.inequalityQuery(None, count, True) else: pyvalue = self._toComparableValue(value) currentSortAttribute = self.currentSortColumn.sortAttribute() query = self.inequalityQuery(currentSortAttribute >= pyvalue, count, True) return self.constructRows(query)
python
{ "resource": "" }
q38807
InequalityModel.rowsBeforeValue
train
def rowsBeforeValue(self, value, count): """ Retrieve display data for rows with sort-column values less than the given value. @type value: Some type compatible with the current sort column. @param value: Starting value in the index for the current sort column at which to start returning results. Rows with a column value for the current sort column which is less than this value will be returned. @type count: C{int} @param count: The number of rows to return. @return: A list of row data, ordered by the current sort column, ending at C{value} and containing at most C{count} elements. """ if value is None: query = self.inequalityQuery(None, count, False) else: pyvalue = self._toComparableValue(value) currentSortAttribute = self.currentSortColumn.sortAttribute() query = self.inequalityQuery( currentSortAttribute < pyvalue, count, False) return self.constructRows(query)[::-1]
python
{ "resource": "" }
q38808
InequalityModel.rowsBeforeItem
train
def rowsBeforeItem(self, item, count): """ The inverse of rowsAfterItem. @param item: then L{Item} to request rows before. @type item: this L{InequalityModel}'s L{itemType} attribute. @param count: The maximum number of rows to return. @type count: L{int} @return: A list of row data, ordered by the current sort column, beginning immediately after C{item}. """ currentSortAttribute = self.currentSortColumn.sortAttribute() value = currentSortAttribute.__get__(item, type(item)) firstQuery = self.inequalityQuery( AND(currentSortAttribute == value, self.itemType.storeID < item.storeID), count, False) results = self.constructRows(firstQuery) count -= len(results) if count: secondQuery = self.inequalityQuery(currentSortAttribute < value, count, False) results.extend(self.constructRows(secondQuery)) return results[::-1]
python
{ "resource": "" }
q38809
IndexingModel.requestRowRange
train
def requestRowRange(self, rangeBegin, rangeEnd): """ Retrieve display data for the given range of rows. @type rangeBegin: C{int} @param rangeBegin: The index of the first row to retrieve. @type rangeEnd: C{int} @param rangeEnd: The index of the last row to retrieve. @return: A C{list} of C{dict}s giving row data. """ return self.constructRows(self.performQuery(rangeBegin, rangeEnd))
python
{ "resource": "" }
q38810
IndexingModel.getTableMetadata
train
def getTableMetadata(self): """ Retrieve a description of the various properties of this scrolltable. @return: A sequence containing 5 elements. They are, in order, a list of the names of the columns present, a mapping of column names to two-tuples of their type and a boolean indicating their sortability, the total number of rows in the scrolltable, the name of the default sort column, and a boolean indicating whether or not the current sort order is ascending. """ coltypes = {} for (colname, column) in self.columns.iteritems(): sortable = column.sortAttribute() is not None coltype = column.getType() if coltype is not None: coltype = unicode(coltype, 'ascii') coltypes[colname] = (coltype, sortable) if self.currentSortColumn: csc = unicode(self.currentSortColumn.sortAttribute().attrname, 'ascii') else: csc = None return [self.columnNames, coltypes, self.requestCurrentSize(), csc, self.isAscending]
python
{ "resource": "" }
q38811
ScrollableView.constructRows
train
def constructRows(self, items): """ Build row objects that are serializable using Athena for sending to the client. @param items: an iterable of objects compatible with my columns' C{extractValue} methods. @return: a list of dictionaries, where each dictionary has a string key for each column name in my list of columns. """ rows = [] for item in items: row = dict((colname, col.extractValue(self, item)) for (colname, col) in self.columns.iteritems()) link = self.linkToItem(item) if link is not None: row[u'__id__'] = link rows.append(row) return rows
python
{ "resource": "" }
q38812
ScrollingElement.getInitialArguments
train
def getInitialArguments(self): """ Return the constructor arguments required for the JavaScript client class, Mantissa.ScrollTable.ScrollTable. @return: a 3-tuple of:: - The unicode attribute ID of my current sort column - A list of dictionaries with 'name' and 'type' keys which are strings describing the name and type of all the columns in this table. - A bool indicating whether the sort direction is initially ascending. """ ic = IColumn(self.currentSortColumn) return [ic.attributeID.decode('ascii'), self._getColumnList(), self.isAscending]
python
{ "resource": "" }
q38813
printChannelColRow
train
def printChannelColRow(campaign, ra, dec): """Prints the channel, col, row for a given campaign and coordinate.""" fovobj = fields.getKeplerFov(campaign) ch, col, row = fovobj.getChannelColRow(ra, dec) print("Position in C{}: channel {}, col {:.0f}, row {:.0f}.".format(campaign, int(ch), col, row))
python
{ "resource": "" }
q38814
findCampaigns
train
def findCampaigns(ra, dec): """Returns a list of the campaigns that cover a given position. Parameters ---------- ra, dec : float, float Position in decimal degrees (J2000). Returns ------- campaigns : list of int A list of the campaigns that cover the given position. """ # Temporary disable the logger to avoid the preliminary field warnings logger.disabled = True campaigns_visible = [] for c in fields.getFieldNumbers(): fovobj = fields.getKeplerFov(c) if onSiliconCheck(ra, dec, fovobj): campaigns_visible.append(c) # Re-enable the logger logger.disabled = True return campaigns_visible
python
{ "resource": "" }
q38815
findCampaignsByName
train
def findCampaignsByName(target): """Returns a list of the campaigns that cover a given target. Parameters ---------- target : str Name of the celestial object. Returns ------- campaigns : list of int A list of the campaigns that cover the given target name. ra, dec : float, float Resolved coordinates in decimal degrees (J2000). Exceptions ---------- Raises an ImportError if AstroPy is not installed. Raises a ValueError if `name` cannot be resolved to coordinates. """ # Is AstroPy (optional dependency) installed? try: from astropy.coordinates import SkyCoord from astropy.coordinates.name_resolve import NameResolveError from astropy.utils.data import conf conf.remote_timeout = 90 except ImportError: print('Error: AstroPy needs to be installed for this feature.') sys.exit(1) # Translate the target name into celestial coordinates try: crd = SkyCoord.from_name(target) except NameResolveError: raise ValueError('Could not find coordinates ' 'for target "{0}".'.format(target)) # Find the campaigns with visibility return findCampaigns(crd.ra.deg, crd.dec.deg), crd.ra.deg, crd.dec.deg
python
{ "resource": "" }
q38816
K2findCampaigns_csv_main
train
def K2findCampaigns_csv_main(args=None): """Exposes K2findCampaigns-csv to the command line.""" parser = argparse.ArgumentParser( description="Check which objects listed in a CSV table " "are (or were) observable by NASA's K2 mission.") parser.add_argument('input_filename', nargs=1, type=str, help="Path to a comma-separated table containing " "columns 'ra,dec,kepmag' (decimal degrees) " "or 'name'.") args = parser.parse_args(args) input_fn = args.input_filename[0] output_fn = input_fn + '-K2findCampaigns.csv' # First, try assuming the file has the classic "ra,dec,kepmag" format try: ra, dec, kepmag = parse_file(input_fn, exit_on_error=False) campaigns = np.array([findCampaigns(ra[idx], dec[idx]) for idx in range(len(ra))]) output = np.array([ra, dec, kepmag, campaigns]) print("Writing {0}".format(output_fn)) np.savetxt(output_fn, output.T, delimiter=', ', fmt=['%10.10f', '%10.10f', '%10.2f', '%s']) # If this fails, assume the file has a single "name" column except ValueError: names = [name.strip() for name in open(input_fn, "r").readlines() if len(name.strip()) > 0] print("Writing {0}".format(output_fn)) output = open(output_fn, "w") for target in names: try: campaigns, ra, dec = findCampaignsByName(target) except ValueError: campaigns = [] output.write("{0}, {1}\n".format(target, campaigns)) output.flush() output.close()
python
{ "resource": "" }
q38817
mock_import
train
def mock_import(do_not_mock=None, **mock_kwargs): """ Mocks import statements by ignoring ImportErrors and replacing the missing module with a Mock. :param str|unicode|list[str|unicode] do_not_mock: names of modules that should exists, and an ImportError could be raised for. :param mock_kwargs: kwargs for MagicMock object. :return: patch object """ do_not_mock = _to_list(do_not_mock) def try_import(module_name, *args, **kwargs): try: return _builtins_import(module_name, *args, **kwargs) except: # intentionally catch all exceptions if any((_match(module_name, prefix) for prefix in do_not_mock)): # This is a module we need to import, # so we raise the exception instead of mocking it raise # Mock external module so we can peacefully create our client return mock.MagicMock(**mock_kwargs) return mock.patch('six.moves.builtins.__import__', try_import)
python
{ "resource": "" }
q38818
doapi.request
train
def request(self, url, params=None, data=None, method='GET'): """ Perform an HTTP request and return the response body as a decoded JSON value :param str url: the URL to make the request of. If ``url`` begins with a forward slash, :attr:`endpoint` is prepended to it; otherwise, ``url`` is treated as an absolute URL. :param dict params: parameters to add to the URL's query string :param data: a value to send in the body of the request. If ``data`` is not a string, it will be serialized as JSON before sending; either way, the :mailheader:`Content-Type` header of the request will be set to :mimetype:`application/json`. Note that a ``data`` value of `None` means "Don't send any data"; to send an actual `None` value, convert it to JSON (i.e., the string ``"null"``) first. :param str method: the HTTP method to use: ``"GET"``, ``"POST"``, ``"PUT"``, or ``"DELETE"`` (case-insensitive); default: ``"GET"`` :return: a decoded JSON value, or `None` if no data was returned :rtype: `list` or `dict` (depending on the request) or `None` :raises ValueError: if ``method`` is an invalid value :raises DOAPIError: if the API endpoint replies with an error """ if url.startswith('/'): url = self.endpoint + url attrs = { "headers": {"Authorization": "Bearer " + self.api_token}, "params": params if params is not None else {}, "timeout": self.timeout, } method = method.upper() if data is not None: if not isinstance(data, string_types): data = json.dumps(data, cls=DOEncoder) attrs["data"] = data attrs["headers"]["Content-Type"] = "application/json" if method == 'GET': r = self.session.get(url, **attrs) elif method == 'POST': r = self.session.post(url, **attrs) elif method == 'PUT': r = self.session.put(url, **attrs) elif method == 'DELETE': r = self.session.delete(url, **attrs) else: raise ValueError('Unrecognized HTTP method: ' + repr(method)) self.last_response = r self.last_meta = None if not r.ok: raise DOAPIError(r) if r.text.strip(): # Even when returning "no content", the API can still return # whitespace. response = r.json() try: self.last_meta = response["meta"] except (KeyError, TypeError): pass return response
python
{ "resource": "" }
q38819
doapi.paginate
train
def paginate(self, url, key, params=None): """ Fetch a sequence of paginated resources from the API endpoint. The initial request to ``url`` and all subsequent requests must respond with a JSON object; the field specified by ``key`` must be a list, whose elements will be yielded, and the next request will be made to the URL in the ``.links.pages.next`` field until the responses no longer contain that field. :param str url: the URL to make the initial request of. If ``url`` begins with a forward slash, :attr:`endpoint` is prepended to it; otherwise, ``url`` is treated as an absolute URL. :param str key: the field on each page containing a list of values to yield :param dict params: parameters to add to the initial URL's query string. A ``"per_page"`` parameter may be included to override the default :attr:`per_page` setting. :rtype: generator of decoded JSON values :raises ValueError: if a response body is not an object or ``key`` is not one of its keys :raises DOAPIError: if the API endpoint replies with an error """ if params is None: params = {} if self.per_page is not None and "per_page" not in params: params = dict(params, per_page=self.per_page) page = self.request(url, params=params) while True: try: objects = page[key] except (KeyError, TypeError): raise ValueError('{0!r}: not a key of the response body'\ .format(key)) for obj in objects: yield obj try: url = page["links"]["pages"]["next"] except KeyError: break page = self.request(url)
python
{ "resource": "" }
q38820
doapi.fetch_all_droplets
train
def fetch_all_droplets(self, tag_name=None): r""" Returns a generator that yields all of the droplets belonging to the account .. versionchanged:: 0.2.0 ``tag_name`` parameter added :param tag_name: if non-`None`, only droplets with the given tag are returned :type tag_name: string or `Tag` :rtype: generator of `Droplet`\ s :raises DOAPIError: if the API endpoint replies with an error """ params = {} if tag_name is not None: params["tag_name"] = str(tag_name) return map(self._droplet, self.paginate('/v2/droplets', 'droplets', params=params))
python
{ "resource": "" }
q38821
doapi.create_droplet
train
def create_droplet(self, name, image, size, region, ssh_keys=None, backups=None, ipv6=None, private_networking=None, user_data=None, **kwargs): """ Create a new droplet. All fields other than ``name``, ``image``, ``size``, and ``region`` are optional and will be omitted from the API request if not specified. The returned `Droplet` object will represent the droplet at the moment of creation; the actual droplet may not be active yet and may not have even been assigned an IP address. To wait for the droplet to activate, use the `Droplet`'s :meth:`~Droplet.wait` method. :param str name: a name for the droplet :param image: the image ID, slug, or `Image` object representing the base image to use for the droplet :type image: integer, string, or `Image` :param size: the slug or `Size` object representing the size of the new droplet :type size: string or `Size` :param region: the slug or `Region` object representing the region in which to create the droplet :type region: string or `Region` :param iterable ssh_keys: an iterable of SSH key resource IDs, SSH key fingerprints, and/or `SSHKey` objects specifying the public keys to add to the new droplet's :file:`/root/.ssh/authorized_keys` file :param bool backups: whether to enable automatic backups on the new droplet :param bool ipv6: whether to enable IPv6 on the new droplet :param bool private_networking: whether to enable private networking for the new droplet :param str user_data: a string of user data/metadata for the droplet :param kwargs: additional fields to include in the API request :return: the new droplet resource :rtype: Droplet :raises DOAPIError: if the API endpoint replies with an error """ data = { "name": name, "image": image.id if isinstance(image, Image) else image, "size": str(size), "region": str(region), } if ssh_keys is not None: data["ssh_keys"] = [k._id if isinstance(k, SSHKey) else k for k in ssh_keys] if backups is not None: data["backups"] = backups if ipv6 is not None: data["ipv6"] = ipv6 if private_networking is not None: data["private_networking"] = private_networking if user_data is not None: data["user_data"] = user_data data.update(kwargs) return self._droplet(self.request('/v2/droplets', method='POST', data=data)["droplet"])
python
{ "resource": "" }
q38822
doapi.create_multiple_droplets
train
def create_multiple_droplets(self, names, image, size, region, ssh_keys=None, backups=None, ipv6=None, private_networking=None, user_data=None, **kwargs): r""" Create multiple new droplets at once with the same image, size, etc., differing only in name. All fields other than ``names``, ``image``, ``size``, and ``region`` are optional and will be omitted from the API request if not specified. The returned `Droplet` objects will represent the droplets at the moment of creation; the actual droplets may not be active yet and may not have even been assigned IP addresses. To wait for the droplets to activate, use their :meth:`~Droplet.wait` method or `wait_droplets`. :param names: the names for the new droplets :type names: list of strings :param image: the image ID, slug, or `Image` object representing the base image to use for the droplets :type image: integer, string, or `Image` :param size: the slug or `Size` object representing the size of the new droplets :type size: string or `Size` :param region: the slug or `Region` object representing the region in which to create the droplets :type region: string or `Region` :param iterable ssh_keys: an iterable of SSH key resource IDs, SSH key fingerprints, and/or `SSHKey` objects specifying the public keys to add to the new droplets' :file:`/root/.ssh/authorized_keys` files :param bool backups: whether to enable automatic backups on the new droplets :param bool ipv6: whether to enable IPv6 on the new droplets :param bool private_networking: whether to enable private networking for the new droplets :param str user_data: a string of user data/metadata for the droplets :param kwargs: additional fields to include in the API request :return: the new droplet resources :rtype: list of `Droplet`\ s :raises DOAPIError: if the API endpoint replies with an error """ data = { "names": names, "image": image.id if isinstance(image, Image) else image, "size": str(size), "region": str(region), } if ssh_keys is not None: data["ssh_keys"] = [k._id if isinstance(k, SSHKey) else k for k in ssh_keys] if backups is not None: data["backups"] = backups if ipv6 is not None: data["ipv6"] = ipv6 if private_networking is not None: data["private_networking"] = private_networking if user_data is not None: data["user_data"] = user_data data.update(kwargs) return list(map(self._droplet, self.request('/v2/droplets', method='POST', data=data)["droplets"]))
python
{ "resource": "" }
q38823
doapi.fetch_all_droplet_neighbors
train
def fetch_all_droplet_neighbors(self): r""" Returns a generator of all sets of multiple droplets that are running on the same physical hardware :rtype: generator of lists of `Droplet`\ s :raises DOAPIError: if the API endpoint replies with an error """ for hood in self.paginate('/v2/reports/droplet_neighbors', 'neighbors'): yield list(map(self._droplet, hood))
python
{ "resource": "" }
q38824
doapi.wait_actions
train
def wait_actions(self, actions, wait_interval=None, wait_time=None): r""" Poll the server periodically until all actions in ``actions`` have either completed or errored out, yielding each `Action`'s final value as it ends. If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing any remaining in-progress actions) is raised. If a `KeyboardInterrupt` is caught, any remaining actions are returned immediately without waiting for completion. .. versionchanged:: 0.2.0 Raises `WaitTimeoutError` on timeout :param iterable actions: an iterable of `Action`\ s and/or other values that are acceptable arguments to :meth:`fetch_action` :param number wait_interval: how many seconds to sleep between requests; defaults to :attr:`wait_interval` if not specified or `None` :param number wait_time: the total number of seconds after which the method will raise an error if any actions have not yet completed, or a negative number to wait indefinitely; defaults to :attr:`wait_time` if not specified or `None` :rtype: generator of `Action`\ s :raises DOAPIError: if the API endpoint replies with an error :raises WaitTimeoutError: if ``wait_time`` is exceeded """ return self._wait(map(self._action, actions), "done", True, wait_interval, wait_time)
python
{ "resource": "" }
q38825
doapi.create_ssh_key
train
def create_ssh_key(self, name, public_key, **kwargs): """ Add a new SSH public key resource to the account :param str name: the name to give the new SSH key resource :param str public_key: the text of the public key to register, in the form used by :file:`authorized_keys` files :param kwargs: additional fields to include in the API request :return: the new SSH key resource :rtype: SSHKey :raises DOAPIError: if the API endpoint replies with an error """ data = {"name": name, "public_key": public_key} data.update(kwargs) return self._ssh_key(self.request('/v2/account/keys', method='POST', data=data)["ssh_key"])
python
{ "resource": "" }
q38826
doapi.fetch_all_images
train
def fetch_all_images(self, type=None, private=None): # pylint: disable=redefined-builtin r""" Returns a generator that yields all of the images available to the account :param type: the type of images to fetch: ``"distribution"``, ``"application"``, or all (`None`); default: `None` :type type: string or None :param bool private: whether to only return the user's private images; default: return all images :rtype: generator of `Image`\ s :raises DOAPIError: if the API endpoint replies with an error """ params = {} if type is not None: params["type"] = type if private is not None: params["private"] = 'true' if private else 'false' return map(self._image, self.paginate('/v2/images', 'images', params=params))
python
{ "resource": "" }
q38827
doapi.create_domain
train
def create_domain(self, name, ip_address, **kwargs): """ Add a new domain name resource to the account. Note that this method does not actually register a new domain name; it merely configures DigitalOcean's nameservers to provide DNS resolution for the domain. See `How To Set Up a Host Name with DigitalOcean <https://www.digitalocean.com/community/tutorials/how-to-set-up-a-host-name-with-digitalocean>`_ for more information. :param str name: the domain name to add :param ip_address: the IP address to which the domain should point :type ip_address: string or `FloatingIP` :param kwargs: additional fields to include in the API request :return: the new domain resource :rtype: Domain :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(ip_address, FloatingIP): ip_address = ip_address.ip data = {"name": name, "ip_address": ip_address} data.update(kwargs) return self._domain(self.request('/v2/domains', method='POST', data=data)["domain"])
python
{ "resource": "" }
q38828
doapi.create_floating_ip
train
def create_floating_ip(self, droplet_id=None, region=None, **kwargs): """ Create a new floating IP assigned to a droplet or reserved to a region. Either ``droplet_id`` or ``region`` must be specified, but not both. The returned `FloatingIP` object will represent the IP at the moment of creation; if the IP address is supposed to be assigned to a droplet, the assignment may not have been completed at the time the object is returned. To wait for the assignment to complete, use the `FloatingIP`'s :meth:`~FloatingIP.wait_for_action` method. :param droplet_id: the droplet to assign the floating IP to as either an ID or a `Droplet` object :type droplet_id: integer or `Droplet` :param region: the region to reserve the floating IP to as either a slug or a `Region` object :type region: string or `Region` :param kwargs: additional fields to include in the API request :return: the new floating IP :rtype: FloatingIP :raises TypeError: if both ``droplet_id`` & ``region`` or neither of them are defined :raises DOAPIError: if the API endpoint replies with an error """ if (droplet_id is None) == (region is None): ### TODO: Is TypeError the right type of error? raise TypeError('Exactly one of "droplet_id" and "region" must be' ' specified') if droplet_id is not None: if isinstance(droplet_id, Droplet): droplet_id = droplet_id.id data = {"droplet_id": droplet_id} else: if isinstance(region, Region): region = region.slug data = {"region": region} data.update(kwargs) return self._floating_ip(self.request('/v2/floating_ips', method='POST', data=data)["floating_ip"])
python
{ "resource": "" }
q38829
doapi._wait
train
def _wait(self, objects, attr, value, wait_interval=None, wait_time=None): r""" Calls the ``fetch`` method of each object in ``objects`` periodically until the ``attr`` attribute of each one equals ``value``, yielding the final state of each object as soon as it satisfies the condition. If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing any remaining in-progress objects) is raised. If a `KeyboardInterrupt` is caught, any remaining objects are returned immediately without waiting for completion. .. versionchanged:: 0.2.0 Raises `WaitTimeoutError` on timeout :param iterable objects: an iterable of `Resource`\ s with ``fetch`` methods :param string attr: the attribute to watch :param value: the value of ``attr`` to wait for :param number wait_interval: how many seconds to sleep between requests; defaults to :attr:`wait_interval` if not specified or `None` :param number wait_time: the total number of seconds after which the method will raise an error if any objects have not yet completed, or a negative number to wait indefinitely; defaults to :attr:`wait_time` if not specified or `None` :rtype: generator :raises DOAPIError: if the API endpoint replies with an error :raises WaitTimeoutError: if ``wait_time`` is exceeded """ objects = list(objects) if not objects: return if wait_interval is None: wait_interval = self.wait_interval if wait_time < 0: end_time = None else: if wait_time is None: wait_time = self.wait_time if wait_time is None or wait_time < 0: end_time = None else: end_time = time() + wait_time while end_time is None or time() < end_time: loop_start = time() next_objs = [] for o in objects: obj = o.fetch() if getattr(obj, attr, None) == value: yield obj else: next_objs.append(obj) objects = next_objs if not objects: break loop_end = time() time_left = wait_interval - (loop_end - loop_start) if end_time is not None: time_left = min(time_left, end_time - loop_end) if time_left > 0: try: sleep(time_left) except KeyboardInterrupt: for o in objects: yield o return if objects: raise WaitTimeoutError(objects, attr, value, wait_interval, wait_time)
python
{ "resource": "" }
q38830
min_tasks_per_node
train
def min_tasks_per_node(queue_id): """ This function is used when requesting non exclusive use as the parallel environment might enforce a minimum number of tasks """ parallel_env = queue_id.split(':')[0] queue_name = queue_id.split(':')[1] tasks = 1 pe_tasks = tasks with os.popen('qconf -sp ' + parallel_env) as f: try: for line in f: if line.split(' ')[0] == 'allocation_rule': # This may throw exception as allocation rule # may not always be an integer pe_tasks = int(re.split('\W+', line)[1]) except: pass return max(tasks, pe_tasks)
python
{ "resource": "" }
q38831
check_shape
train
def check_shape(meth): """ Decorator for larray magic methods, to ensure that the operand has the same shape as the array. """ @wraps(meth) def wrapped_meth(self, val): if isinstance(val, (larray, numpy.ndarray)): if val.shape != self._shape: raise ValueError("shape mismatch: objects cannot be broadcast to a single shape") return meth(self, val) return wrapped_meth
python
{ "resource": "" }
q38832
partial_shape
train
def partial_shape(addr, full_shape): """ Calculate the size of the sub-array represented by `addr` """ def size(x, max): if isinstance(x, (int, long, numpy.integer)): return None elif isinstance(x, slice): y = min(max, x.stop or max) # slice limits can go past the bounds return 1 + (y - (x.start or 0) - 1) // (x.step or 1) elif isinstance(x, collections.Sized): if hasattr(x, 'dtype') and x.dtype == bool: return x.sum() else: return len(x) else: raise TypeError("Unsupported index type %s" % type(x)) addr = full_address(addr, full_shape) if isinstance(addr, numpy.ndarray) and addr.dtype == bool: return (addr.sum(),) elif all(isinstance(x, collections.Sized) for x in addr): return (len(addr[0]),) else: shape = [size(x, max) for (x, max) in zip(addr, full_shape)] return tuple([x for x in shape if x is not None])
python
{ "resource": "" }
q38833
_build_ufunc
train
def _build_ufunc(func): """Return a ufunc that works with lazy arrays""" def larray_compatible_ufunc(x): if isinstance(x, larray): y = deepcopy(x) y.apply(func) return y else: return func(x) return larray_compatible_ufunc
python
{ "resource": "" }
q38834
larray.is_homogeneous
train
def is_homogeneous(self): """True if all the elements of the array are the same.""" hom_base = isinstance(self.base_value, (int, long, numpy.integer, float, bool)) \ or type(self.base_value) == self.dtype \ or (isinstance(self.dtype, type) and isinstance(self.base_value, self.dtype)) hom_ops = all(obj.is_homogeneous for f, obj in self.operations if isinstance(obj, larray)) return hom_base and hom_ops
python
{ "resource": "" }
q38835
larray._partially_evaluate
train
def _partially_evaluate(self, addr, simplify=False): """ Return part of the lazy array. """ if self.is_homogeneous: if simplify: base_val = self.base_value else: base_val = self._homogeneous_array(addr) * self.base_value elif isinstance(self.base_value, (int, long, numpy.integer, float, bool)): base_val = self._homogeneous_array(addr) * self.base_value elif isinstance(self.base_value, numpy.ndarray): base_val = self.base_value[addr] elif have_scipy and sparse.issparse(self.base_value): # For sparse matrices larr[2, :] base_val = self.base_value[addr] elif callable(self.base_value): indices = self._array_indices(addr) base_val = self.base_value(*indices) if isinstance(base_val, numpy.ndarray) and base_val.shape == (1,): base_val = base_val[0] elif hasattr(self.base_value, "lazily_evaluate"): base_val = self.base_value.lazily_evaluate(addr, shape=self._shape) elif isinstance(self.base_value, VectorizedIterable): partial_shape = self._partial_shape(addr) if partial_shape: n = reduce(operator.mul, partial_shape) else: n = 1 base_val = self.base_value.next(n) # note that the array contents will depend on the order of access to elements if n == 1: base_val = base_val[0] elif partial_shape and base_val.shape != partial_shape: base_val = base_val.reshape(partial_shape) elif isinstance(self.base_value, collections.Iterator): raise NotImplementedError("coming soon...") else: raise ValueError("invalid base value for array (%s)" % self.base_value) return self._apply_operations(base_val, addr, simplify=simplify)
python
{ "resource": "" }
q38836
larray.check_bounds
train
def check_bounds(self, addr): """ Check whether the given address is within the array bounds. """ def is_boolean_array(arr): return hasattr(arr, 'dtype') and arr.dtype == bool def check_axis(x, size): if isinstance(x, (int, long, numpy.integer)): lower = upper = x elif isinstance(x, slice): lower = x.start or 0 upper = min(x.stop or size - 1, size - 1) # slices are allowed to go past the bounds elif isinstance(x, collections.Sized): if is_boolean_array(x): lower = 0 upper = x.size - 1 else: if len(x) == 0: raise ValueError("Empty address component (address was %s)" % str(addr)) if hasattr(x, "min"): lower = x.min() else: lower = min(x) if hasattr(x, "max"): upper = x.max() else: upper = max(x) else: raise TypeError("Invalid array address: %s (element of type %s)" % (str(addr), type(x))) if (lower < -size) or (upper >= size): raise IndexError("Index out of bounds") full_addr = self._full_address(addr) if isinstance(addr, numpy.ndarray) and addr.dtype == bool: if len(addr.shape) > len(self._shape): raise IndexError("Too many indices for array") for xmax, size in zip(addr.shape, self._shape): upper = xmax - 1 if upper >= size: raise IndexError("Index out of bounds") else: for i, size in zip(full_addr, self._shape): check_axis(i, size)
python
{ "resource": "" }
q38837
larray.evaluate
train
def evaluate(self, simplify=False, empty_val=0): """ Return the lazy array as a real NumPy array. If the array is homogeneous and ``simplify`` is ``True``, return a single numerical value. """ # need to catch the situation where a generator-based larray is evaluated a second time if self.is_homogeneous: if simplify: x = self.base_value else: x = self.base_value * numpy.ones(self._shape, dtype=self.dtype) elif isinstance(self.base_value, (int, long, numpy.integer, float, bool, numpy.bool_)): x = self.base_value * numpy.ones(self._shape, dtype=self.dtype) elif isinstance(self.base_value, numpy.ndarray): x = self.base_value elif callable(self.base_value): x = numpy.array(numpy.fromfunction(self.base_value, shape=self._shape, dtype=int), dtype=self.dtype) elif hasattr(self.base_value, "lazily_evaluate"): x = self.base_value.lazily_evaluate(shape=self._shape) elif isinstance(self.base_value, VectorizedIterable): x = self.base_value.next(self.size) if x.shape != self._shape: x = x.reshape(self._shape) elif have_scipy and sparse.issparse(self.base_value): # For sparse matrices if empty_val!=0: x = self.base_value.toarray((sparse.csc_matrix)) x = numpy.where(x, x, numpy.nan) else: x = self.base_value.toarray((sparse.csc_matrix)) elif isinstance(self.base_value, collections.Iterator): x = numpy.fromiter(self.base_value, dtype=self.dtype or float, count=self.size) if x.shape != self._shape: x = x.reshape(self._shape) else: raise ValueError("invalid base value for array") return self._apply_operations(x, simplify=simplify)
python
{ "resource": "" }
q38838
is_closest_date_parameter
train
def is_closest_date_parameter(task, param_name): """ Return the parameter class of param_name on task. """ for name, obj in task.get_params(): if name == param_name: return hasattr(obj, 'use_closest_date') return False
python
{ "resource": "" }
q38839
delistify
train
def delistify(x): """ A basic slug version of a given parameter list. """ if isinstance(x, list): x = [e.replace("'", "") for e in x] return '-'.join(sorted(x)) return x
python
{ "resource": "" }
q38840
BaseTask.effective_task_id
train
def effective_task_id(self): """ Replace date in task id with closest date. """ params = self.param_kwargs if 'date' in params and is_closest_date_parameter(self, 'date'): params['date'] = self.closest() task_id_parts = sorted(['%s=%s' % (k, str(v)) for k, v in params.items()]) return '%s(%s)' % (self.task_family, ', '.join(task_id_parts)) else: return self.task_id
python
{ "resource": "" }
q38841
BaseTask.taskdir
train
def taskdir(self): """ Return the directory under which all artefacts are stored. """ return os.path.join(self.BASE, self.TAG, self.task_family)
python
{ "resource": "" }
q38842
MockTask.run
train
def run(self): """ Just copy the fixture, so we have some output. """ luigi.LocalTarget(path=self.fixture).copy(self.output().path)
python
{ "resource": "" }
q38843
Dispatcher.get_listeners
train
def get_listeners(self, name): """ Return the callables related to name """ return list(map(lambda listener: listener[0], self.listeners[name]))
python
{ "resource": "" }
q38844
Dispatcher.add_listener
train
def add_listener(self, name, listener, priority=0): """ Add a new listener to the dispatch """ if name not in self.listeners: self.listeners[name] = [] self.listeners[name].append((listener, priority)) # reorder event self.listeners[name].sort(key=lambda listener: listener[1], reverse=True)
python
{ "resource": "" }
q38845
dump_etree
train
def dump_etree(data, container=None, nsmap=None, attribs=None): """Convert dictionary to Simple Dublin Core XML as ElementTree. :param data: Dictionary. :param container: Name (include namespace) of container element. :param nsmap: Namespace mapping for lxml. :param attribs: Default attributes for container element. :returns: LXML ElementTree. """ container = container or container_element nsmap = nsmap or ns attribs = attribs or container_attribs return dump_etree_helper(container, data, rules, nsmap, attribs)
python
{ "resource": "" }
q38846
rule_factory
train
def rule_factory(plural, singular): """Element rule factory.""" @rules.rule(plural) def f(path, values): for v in values: if v: elem = etree.Element( '{{http://purl.org/dc/elements/1.1/}}{0}'.format(singular)) elem.text = v yield elem f.__name__ = plural return f
python
{ "resource": "" }
q38847
RemoteStatsCollector.startReceivingBoxes
train
def startReceivingBoxes(self, sender): """ Start observing log events for stat events to send. """ AMP.startReceivingBoxes(self, sender) log.addObserver(self._emit)
python
{ "resource": "" }
q38848
RemoteStatsCollector.stopReceivingBoxes
train
def stopReceivingBoxes(self, reason): """ Stop observing log events. """ AMP.stopReceivingBoxes(self, reason) log.removeObserver(self._emit)
python
{ "resource": "" }
q38849
normalize_filename
train
def normalize_filename(filename): """ Remove special characters and shorten if name is too long """ # if the url pointed to a directory then just replace all the special chars filename = re.sub("/|\\|;|:|\?|=", "_", filename) if len(filename) > 150: prefix = hashlib.md5(filename).hexdigest() filename = prefix + filename[-140:] return filename
python
{ "resource": "" }
q38850
build_local_filename
train
def build_local_filename(download_url=None, filename=None, decompress=False): """ Determine which local filename to use based on the file's source URL, an optional desired filename, and whether a compression suffix needs to be removed """ assert download_url or filename, "Either filename or URL must be specified" # if no filename provided, use the original filename on the server if not filename: digest = hashlib.md5(download_url.encode('utf-8')).hexdigest() parts = split(download_url) filename = digest + "." + "_".join(parts) filename = normalize_filename(filename) if decompress: (base, ext) = splitext(filename) if ext in (".gz", ".zip"): filename = base return filename
python
{ "resource": "" }
q38851
compare
train
def compare(file1, file2): """compares the contents of two files, passed in either as open file handles or accessible file paths. Does a simple naive string comparison, so do not use on larger files""" if isinstance(file1, six.string_types): # pragma: no branch file1 = open(file1, 'r', True) if isinstance(file2, six.string_types): # pragma: no branch file2 = open(file2, 'r', True) file1_contents = file1.read() file2_contents = file2.read() return file1_contents == file2_contents
python
{ "resource": "" }
q38852
sha1sum
train
def sha1sum(f): """Return the SHA-1 hash of the contents of file `f`, in hex format""" h = hashlib.sha1() fp = open(f, 'rb') while True: block = fp.read(512 * 1024) if not block: break h.update(block) return h.hexdigest()
python
{ "resource": "" }
q38853
safe_copyfile
train
def safe_copyfile(src, dest): """safely copy src to dest using a temporary intermediate and then renaming to dest""" fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(dest)) shutil.copyfileobj(open(src, 'rb'), os.fdopen(fd, 'wb')) shutil.copystat(src, tmpname) os.rename(tmpname, dest)
python
{ "resource": "" }
q38854
sphericalAngSep
train
def sphericalAngSep(ra0, dec0, ra1, dec1, radians=False): """ Compute the spherical angular separation between two points on the sky. //Taken from http://www.movable-type.co.uk/scripts/gis-faq-5.1.html NB: For small distances you can probably use sqrt( dDec**2 + cos^2(dec)*dRa) where dDec = dec1 - dec0 and dRa = ra1 - ra0 and dec1 \approx dec \approx dec0 """ if radians==False: ra0 = np.radians(ra0) dec0 = np.radians(dec0) ra1 = np.radians(ra1) dec1 = np.radians(dec1) deltaRa= ra1-ra0 deltaDec= dec1-dec0 val = haversine(deltaDec) val += np.cos(dec0) * np.cos(dec1) * haversine(deltaRa) val = min(1, np.sqrt(val)) ; #Guard against round off error? val = 2*np.arcsin(val) #Convert back to degrees if necessary if radians==False: val = np.degrees(val) return val
python
{ "resource": "" }
q38855
haversine
train
def haversine(x): """Return the haversine of an angle haversine(x) = sin(x/2)**2, where x is an angle in radians """ y = .5*x y = np.sin(y) return y*y
python
{ "resource": "" }
q38856
linkTo
train
def linkTo(sharedProxyOrItem): """ Generate the path part of a URL to link to a share item or its proxy. @param sharedProxy: a L{sharing.SharedProxy} or L{sharing.Share} @return: a URL object, which when converted to a string will look something like '/users/user@host/shareID'. @rtype: L{nevow.url.URL} @raise: L{RuntimeError} if the store that the C{sharedProxyOrItem} is stored in is not accessible via the web, for example due to the fact that the store has no L{LoginMethod} objects to indicate who it is owned by. """ if isinstance(sharedProxyOrItem, sharing.SharedProxy): userStore = sharing.itemFromProxy(sharedProxyOrItem).store else: userStore = sharedProxyOrItem.store appStore = isAppStore(userStore) if appStore: # This code-path should be fixed by #2703; PublicWeb is deprecated. from xmantissa.publicweb import PublicWeb substore = userStore.parent.getItemByID(userStore.idInParent) pw = userStore.parent.findUnique(PublicWeb, PublicWeb.application == substore) path = [pw.prefixURL.encode('ascii')] else: for lm in userbase.getLoginMethods(userStore): if lm.internal: path = ['users', lm.localpart.encode('ascii')] break else: raise RuntimeError( "Shared item is in a user store with no" " internal username -- can't generate a link.") if (sharedProxyOrItem.shareID == getDefaultShareID(userStore)): shareID = sharedProxyOrItem.shareID path.append('') else: shareID = None path.append(sharedProxyOrItem.shareID) return _ShareURL(shareID, scheme='', netloc='', pathsegs=path)
python
{ "resource": "" }
q38857
_ShareURL.child
train
def child(self, path): """ Override the base implementation to inject the share ID our constructor was passed. """ if self._shareID is not None: self = url.URL.child(self, self._shareID) self._shareID = None return url.URL.child(self, path)
python
{ "resource": "" }
q38858
_ShareURL.cloneURL
train
def cloneURL(self, scheme, netloc, pathsegs, querysegs, fragment): """ Override the base implementation to pass along the share ID our constructor was passed. """ return self.__class__( self._shareID, scheme, netloc, pathsegs, querysegs, fragment)
python
{ "resource": "" }
q38859
_set_autocommit
train
def _set_autocommit(connection): """Make sure a connection is in autocommit mode.""" if hasattr(connection.connection, "autocommit"): if callable(connection.connection.autocommit): connection.connection.autocommit(True) else: connection.connection.autocommit = True elif hasattr(connection.connection, "set_isolation_level"): connection.connection.set_isolation_level(0)
python
{ "resource": "" }
q38860
_patch_static_handler
train
def _patch_static_handler(handler): """Patch in support for static files serving if supported and enabled. """ if django.VERSION[:2] < (1, 3): return from django.contrib.staticfiles.handlers import StaticFilesHandler return StaticFilesHandler(handler)
python
{ "resource": "" }
q38861
SeleniumPlugin._inject_selenium
train
def _inject_selenium(self, test): """ Injects a selenium instance into the method. """ from django.conf import settings test_case = get_test_case_class(test) test_case.selenium_plugin_started = True # Provide some reasonable default values sel = selenium( getattr(settings, "SELENIUM_HOST", "localhost"), int(getattr(settings, "SELENIUM_PORT", 4444)), getattr(settings, "SELENIUM_BROWSER_COMMAND", "*chrome"), getattr(settings, "SELENIUM_URL_ROOT", "http://127.0.0.1:8000/")) try: sel.start() except socket.error: if getattr(settings, "FORCE_SELENIUM_TESTS", False): raise else: raise SkipTest("Selenium server not available.") else: test_case.selenium_started = True # Only works on method test cases, because we obviously need # self. if isinstance(test.test, nose.case.MethodTestCase): test.test.test.im_self.selenium = sel elif isinstance(test.test, TestCase): test.test.run.im_self.selenium = sel else: raise SkipTest("Test skipped because it's not a method.")
python
{ "resource": "" }
q38862
StoppableWSGIServer.server_bind
train
def server_bind(self): """Bind server to socket. Overrided to store server name and set timeout. """ try: HTTPServer.server_bind(self) except Exception as e: raise WSGIServerException(e) self.setup_environ() self.socket.settimeout(1)
python
{ "resource": "" }
q38863
AsyncMethodCall.result
train
def result(self, wait=False): """ Gets the result of the method call. If the call was successful, return the result, otherwise, reraise the exception. :param wait: Block until the result is available, or just get the result. :raises: RuntimeError when called and the result is not yet available. """ if wait: self._async_resp.wait() if not self.finished(): raise RuntimeError("Result is not ready yet") raw_response = self._async_resp.get() return Result(result=raw_response["result"], error=raw_response["error"], id=raw_response["id"], method_call=self.request)
python
{ "resource": "" }
q38864
Extension.post_build
train
def post_build(self, container_builder, container): """ This method make sure the flask configuration is fine, and check the if ioc.extra.jinja2 service is available. If so, the flask instance will use this service, by keeping the flask template loader and the one registered at the jinja2 """ app = container.get('ioc.extra.flask.app') app.config.update(container_builder.parameters.get('ioc.extra.flask.app.config')) if container.has('ioc.extra.jinja2'): # This must be an instance of jinja.ChoiceLoader # This code replace the flask specific jinja configuration to use # the one provided by the ioc.extra.jinja2 code jinja2 = container.get('ioc.extra.jinja2') jinja2.loader.loaders.append(app.create_global_jinja_loader()) for name, value in app.jinja_env.globals.items(): if name not in jinja2.globals: jinja2.globals[name] = value for name, value in app.jinja_env.filters.items(): if name not in jinja2.filters: jinja2.filters[name] = value app.jinja_env = jinja2
python
{ "resource": "" }
q38865
Mongrel2Handler.start
train
def start(self): """ Start to listen for incoming requests. """ assert not self._started self._listening_stream.on_recv(self._recv_callback) self._started = True
python
{ "resource": "" }
q38866
Mongrel2Handler._create_listening_stream
train
def _create_listening_stream(self, pull_addr): """ Create a stream listening for Requests. The `self._recv_callback` method is asociated with incoming requests. """ sock = self._zmq_context.socket(zmq.PULL) sock.connect(pull_addr) stream = ZMQStream(sock, io_loop=self.io_loop) return stream
python
{ "resource": "" }
q38867
Mongrel2Handler._create_sending_stream
train
def _create_sending_stream(self, pub_addr): """ Create a `ZMQStream` for sending responses back to Mongrel2. """ sock = self._zmq_context.socket(zmq.PUB) sock.setsockopt(zmq.IDENTITY, self.sender_id) sock.connect(pub_addr) stream = ZMQStream(sock, io_loop=self.io_loop) return stream
python
{ "resource": "" }
q38868
Mongrel2Handler._recv_callback
train
def _recv_callback(self, msg): """ Method is called when there is a message coming from a Mongrel2 server. This message should be a valid Request String. """ m2req = MongrelRequest.parse(msg[0]) MongrelConnection(m2req, self._sending_stream, self.request_callback, no_keep_alive=self.no_keep_alive, xheaders=self.xheaders)
python
{ "resource": "" }
q38869
MongrelConnection._begin_request
train
def _begin_request(self): """ Actually start executing this request. """ headers = self.m2req.headers self._request = HTTPRequest(connection=self, method=headers.get("METHOD"), uri=self.m2req.path, version=headers.get("VERSION"), headers=headers, remote_ip=headers.get("x-forwarded-for")) if len(self.m2req.body) > 0: self._request.body = self.m2req.body if self.m2req.is_disconnect(): self.finish() elif headers.get("x-mongrel2-upload-done", None): # there has been a file upload. expected = headers.get("x-mongrel2-upload-start", "BAD") upload = headers.get("x-mongrel2-upload-done", None) if expected == upload: self.request_callback(self._request) elif headers.get("x-mongrel2-upload-start", None): # this is just a notification that a file upload has started. Do # nothing for now! pass else: self.request_callback(self._request)
python
{ "resource": "" }
q38870
MongrelConnection.finish
train
def finish(self): """ Finish this connection. """ assert self._request, "Request closed" self._request_finished = True if self.m2req.should_close() or self.no_keep_alive: self._send("") self._request = None
python
{ "resource": "" }
q38871
MongrelConnection._send
train
def _send(self, msg): """ Raw send to the given connection ID at the given uuid, mostly used internally. """ uuid = self.m2req.sender conn_id = self.m2req.conn_id header = "%s %d:%s," % (uuid, len(str(conn_id)), str(conn_id)) zmq_message = header + ' ' + msg self.stream.send(zmq_message)
python
{ "resource": "" }
q38872
as_completed
train
def as_completed(*async_result_wrappers): """ Yields results as they become available from asynchronous method calls. Example usage :: async_calls = [service.call_method_async("do_stuff", (x,)) for x in range(25)] for async_call in gemstone.as_completed(*async_calls): print("just finished with result ", async_call.result()) :param async_result_wrappers: :py:class:`gemstone.client.structs.AsyncMethodCall` instances. :return: a generator that yields items as soon they results become available. .. versionadded:: 0.5.0 """ for item in async_result_wrappers: if not isinstance(item, AsyncMethodCall): raise TypeError("Got non-AsyncMethodCall object: {}".format(item)) wrappers_copy = list(async_result_wrappers) while len(wrappers_copy): completed = list(filter(lambda x: x.finished(), wrappers_copy)) if not len(completed): continue for item in completed: wrappers_copy.remove(item) yield item
python
{ "resource": "" }
q38873
dynamic_load
train
def dynamic_load(module_or_member): """ Dynamically loads a class or member of a class. If ``module_or_member`` is something like ``"a.b.c"``, will perform ``from a.b import c``. If ``module_or_member`` is something like ``"a"`` will perform ``import a`` :param module_or_member: the name of a module or member of a module to import. :return: the returned entity, be it a module or member of a module. """ parts = module_or_member.split(".") if len(parts) > 1: name_to_import = parts[-1] module_to_import = ".".join(parts[:-1]) else: name_to_import = None module_to_import = module_or_member module = importlib.import_module(module_to_import) if name_to_import: to_return = getattr(module, name_to_import) if not to_return: raise AttributeError("{} has no attribute {}".format(module, name_to_import)) return to_return else: return module
python
{ "resource": "" }
q38874
BooleanField.encode
train
def encode(cls, value): """ convert a boolean value into something we can persist to redis. An empty string is the representation for False. :param value: bool :return: bytes """ if value not in [True, False]: raise InvalidValue('not a boolean') return b'1' if value else b''
python
{ "resource": "" }
q38875
FloatField.encode
train
def encode(cls, value): """ encode a floating point number to bytes in redis :param value: float :return: bytes """ try: if float(value) + 0 == value: return repr(value) except (TypeError, ValueError): pass raise InvalidValue('not a float')
python
{ "resource": "" }
q38876
IntegerField.encode
train
def encode(cls, value): """ take an integer and turn it into a string representation to write into redis. :param value: int :return: str """ try: coerced = int(value) if coerced + 0 == value: return repr(coerced) except (TypeError, ValueError): pass raise InvalidValue('not an int')
python
{ "resource": "" }
q38877
TextField.encode
train
def encode(cls, value): """ take a valid unicode string and turn it into utf-8 bytes :param value: unicode, str :return: bytes """ coerced = unicode(value) if coerced == value: return coerced.encode(cls._encoding) raise InvalidValue('not text')
python
{ "resource": "" }
q38878
AsciiField.encode
train
def encode(cls, value): """ take a list of strings and turn it into utf-8 byte-string :param value: :return: """ coerced = unicode(value) if coerced == value and cls.PATTERN.match(coerced): return coerced.encode(cls._encoding) raise InvalidValue('not ascii')
python
{ "resource": "" }
q38879
BinaryField.encode
train
def encode(cls, value): """ write binary data into redis without encoding it. :param value: bytes :return: bytes """ try: coerced = bytes(value) if coerced == value: return coerced except (TypeError, UnicodeError): pass raise InvalidValue('not binary')
python
{ "resource": "" }
q38880
ListField.encode
train
def encode(cls, value): """ take a list and turn it into a utf-8 encoded byte-string for redis. :param value: list :return: bytes """ try: coerced = list(value) if coerced == value: return json.dumps(coerced).encode(cls._encoding) except TypeError: pass raise InvalidValue('not a list')
python
{ "resource": "" }
q38881
ListField.decode
train
def decode(cls, value): """ take a utf-8 encoded byte-string from redis and turn it back into a list :param value: bytes :return: list """ try: return None if value is None else \ list(json.loads(value.decode(cls._encoding))) except (TypeError, AttributeError): return list(value)
python
{ "resource": "" }
q38882
DictField.encode
train
def encode(cls, value): """ encode the dict as a json string to be written into redis. :param value: dict :return: bytes """ try: coerced = dict(value) if coerced == value: return json.dumps(coerced).encode(cls._encoding) except (TypeError, ValueError): pass raise InvalidValue('not a dict')
python
{ "resource": "" }
q38883
StringListField.encode
train
def encode(cls, value): """ the list it so it can be stored in redis. :param value: list :return: bytes """ try: coerced = [str(v) for v in value] if coerced == value: return ",".join(coerced).encode(cls._encoding) if len( value) > 0 else None except TypeError: pass raise InvalidValue('not a list of strings')
python
{ "resource": "" }
q38884
swap_twitter_subject
train
def swap_twitter_subject(subject, body): """If subject starts from 'Tweet from...' then we need to get first meaning line from the body.""" if subject.startswith('Tweet from'): lines = body.split('\n') for idx, line in enumerate(lines): if re.match(r'.*, ?\d{2}:\d{2}]]', line) is not None: try: subject = lines[idx + 1] except IndexError: pass break return subject, body
python
{ "resource": "" }
q38885
trial
train
def trial(log_dir=None, upload_dir=None, sync_period=None, trial_prefix="", param_map=None, init_logging=True): """ Generates a trial within a with context. """ global _trial # pylint: disable=global-statement if _trial: # TODO: would be nice to stack crawl at creation time to report # where that initial trial was created, and that creation line # info is helpful to keep around anyway. raise ValueError("A trial already exists in the current context") local_trial = Trial( log_dir=log_dir, upload_dir=upload_dir, sync_period=sync_period, trial_prefix=trial_prefix, param_map=param_map, init_logging=True) try: _trial = local_trial _trial.start() yield local_trial finally: _trial = None local_trial.close()
python
{ "resource": "" }
q38886
index
train
def index(index, length): """Generates an index. :param index: The index, can be positive or negative. :param length: The length of the sequence to index. :raises: IndexError Negative indices are typically used to index a sequence in reverse order. But to use them, the indexed object must convert them to the correct, positive index. This function can be used to do this. """ if index < 0: index += length if 0 <= index < length: return index raise IndexError()
python
{ "resource": "" }
q38887
range_to_numeric
train
def range_to_numeric(ranges): """Converts a sequence of string ranges to a sequence of floats. E.g.:: >>> range_to_numeric(['1 uV', '2 mV', '1 V']) [1E-6, 0.002, 1.0] """ values, units = zip(*(r.split() for r in ranges)) # Detect common unit. unit = os.path.commonprefix([u[::-1] for u in units]) # Strip unit to get just the SI prefix. prefixes = (u[:-len(unit)] for u in units) # Convert string value and scale with prefix. values = [float(v) * SI_PREFIX[p] for v, p in zip(values, prefixes)] return values
python
{ "resource": "" }
q38888
AutoRange.range
train
def range(self, value): """Estimates an appropriate sensitivity range.""" self._buffer.append(abs(value)) mean = sum(self._buffer) / len(self._buffer) estimate = next( (r for r in self.ranges if mean < self.scale * r), self.ranges[-1] ) if self._mapping: return self._mapping[estimate] else: return estimate
python
{ "resource": "" }
q38889
Context.add
train
def add(self, schema, data): """ Stage ``data`` as a set of statements, based on the given ``schema`` definition. """ binding = self.get_binding(schema, data) uri, triples = triplify(binding) for triple in triples: self.graph.add(triple) return uri
python
{ "resource": "" }
q38890
Context.save
train
def save(self): """ Transfer the statements in this context over to the main store. """ if self.parent.buffered: query = """ INSERT DATA { GRAPH %s { %s } } """ query = query % (self.identifier.n3(), self.graph.serialize(format='nt')) self.parent.graph.update(query) self.flush() else: self.meta.generate()
python
{ "resource": "" }
q38891
Context.delete
train
def delete(self): """ Delete all statements matching the current context identifier from the main store. """ if self.parent.buffered: query = 'CLEAR SILENT GRAPH %s ;' % self.identifier.n3() self.parent.graph.update(query) self.flush() else: self.graph.remove((None, None, None))
python
{ "resource": "" }
q38892
Transport.read_bytes
train
def read_bytes(self, num_bytes): """Reads at most `num_bytes`.""" buffer_size = len(self._buffer) if buffer_size > num_bytes: # The buffer is larger than the requested amount of bytes. data, self._buffer = self._buffer[:num_bytes], self._buffer[num_bytes:] elif 0 < buffer_size <= num_bytes: # This might return less bytes than requested. data, self._buffer = self._buffer, bytearray() else: # Buffer is empty. Try to read `num_bytes` and call `read_bytes()` # again. This ensures that at most `num_bytes` are returned. self._buffer += self.__read__(num_bytes) return self.read_bytes(num_bytes) return data
python
{ "resource": "" }
q38893
Transport.read_until
train
def read_until(self, delimiter): """Reads until the delimiter is found.""" if delimiter in self._buffer: data, delimiter, self._buffer = self._buffer.partition(delimiter) return data else: self._buffer += self.__read__(self._max_bytes) return self.read_until(delimiter)
python
{ "resource": "" }
q38894
LinuxGpib.close
train
def close(self): """Closes the gpib transport.""" if self._device is not None: ibsta = self._lib.ibonl(self._device, 0) self._check_status(ibsta) self._device = None
python
{ "resource": "" }
q38895
LinuxGpib.trigger
train
def trigger(self): """Triggers the device. The trigger method sens a GET(group execute trigger) command byte to the device. """ ibsta = self._lib.ibtrg(self._device) self._check_status(ibsta)
python
{ "resource": "" }
q38896
LinuxGpib._check_status
train
def _check_status(self, ibsta): """Checks ibsta value.""" if ibsta & 0x4000: raise LinuxGpib.Timeout() elif ibsta & 0x8000: raise LinuxGpib.Error(self.error_status)
python
{ "resource": "" }
q38897
DownloadPage.run
train
def run(self): """ Just run wget quietly. """ output = shellout('wget -q "{url}" -O {output}', url=self.url) luigi.LocalTarget(output).move(self.output().path)
python
{ "resource": "" }
q38898
DownloadPage.output
train
def output(self): """ Use the digest version, since URL can be ugly. """ return luigi.LocalTarget(path=self.path(digest=True, ext='html'))
python
{ "resource": "" }
q38899
JsonPage.run
train
def run(self): """ Construct the document id from the date and the url. """ document = {} document['_id'] = hashlib.sha1('%s:%s' % ( self.date, self.url)).hexdigest() with self.input().open() as handle: document['content'] = handle.read().decode('utf-8', 'ignore') document['url'] = self.url document['date'] = unicode(self.date) with self.output().open('w') as output: output.write(json.dumps(document))
python
{ "resource": "" }