_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q33800
arrow_get
train
def arrow_get(string): '''this function exists because ICS uses ISO 8601 without dashes or colons, i.e. not ISO 8601 at all.''' # replace slashes with dashes if '/' in string: string = string.replace('/', '-') # if string contains dashes, assume it to be proper ISO 8601 if '-' in string: return arrow.get(string) string = string.rstrip('Z') return arrow.get(string, DATE_FORMATS[len(string)])
python
{ "resource": "" }
q33801
parse_duration
train
def parse_duration(line): """ Return a timedelta object from a string in the DURATION property format """ DAYS, SECS = {'D': 1, 'W': 7}, {'S': 1, 'M': 60, 'H': 3600} sign, i = 1, 0 if line[i] in '-+': if line[i] == '-': sign = -1 i += 1 if line[i] != 'P': raise parse.ParseError() i += 1 days, secs = 0, 0 while i < len(line): if line[i] == 'T': i += 1 if i == len(line): break j = i while line[j].isdigit(): j += 1 if i == j: raise parse.ParseError() val = int(line[i:j]) if line[j] in DAYS: days += val * DAYS[line[j]] DAYS.pop(line[j]) elif line[j] in SECS: secs += val * SECS[line[j]] SECS.pop(line[j]) else: raise parse.ParseError() i = j + 1 return timedelta(sign * days, sign * secs)
python
{ "resource": "" }
q33802
timedelta_to_duration
train
def timedelta_to_duration(dt): """ Return a string according to the DURATION property format from a timedelta object """ days, secs = dt.days, dt.seconds res = 'P' if days // 7: res += str(days // 7) + 'W' days %= 7 if days: res += str(days) + 'D' if secs: res += 'T' if secs // 3600: res += str(secs // 3600) + 'H' secs %= 3600 if secs // 60: res += str(secs // 60) + 'M' secs %= 60 if secs: res += str(secs) + 'S' return res
python
{ "resource": "" }
q33803
Event.end
train
def end(self): """Get or set the end of the event. | Will return an :class:`Arrow` object. | May be set to anything that :func:`Arrow.get` understands. | If set to a non null value, removes any already existing duration. | Setting to None will have unexpected behavior if begin is not None. | Must not be set to an inferior value than self.begin. """ if self._duration: # if end is duration defined # return the beginning + duration return self.begin + self._duration elif self._end_time: # if end is time defined if self.all_day: return self._end_time else: return self._end_time elif self._begin: # if end is not defined if self.all_day: return self._begin + timedelta(days=1) else: # instant event return self._begin else: return None
python
{ "resource": "" }
q33804
Event.duration
train
def duration(self): """Get or set the duration of the event. | Will return a timedelta object. | May be set to anything that timedelta() understands. | May be set with a dict ({"days":2, "hours":6}). | If set to a non null value, removes any already existing end time. """ if self._duration: return self._duration elif self.end: # because of the clever getter for end, this also takes care of all_day events return self.end - self.begin else: # event has neither start, nor end, nor duration return None
python
{ "resource": "" }
q33805
Event.make_all_day
train
def make_all_day(self): """Transforms self to an all-day event. The event will span all the days from the begin to the end day. """ if self.all_day: # Do nothing if we already are a all day event return begin_day = self.begin.floor('day') end_day = self.end.floor('day') self._begin = begin_day # for a one day event, we don't need a _end_time if begin_day == end_day: self._end_time = None else: self._end_time = end_day + timedelta(days=1) self._duration = None self._begin_precision = 'day'
python
{ "resource": "" }
q33806
Event.join
train
def join(self, other, *args, **kwarg): """Create a new event which covers the time range of two intersecting events All extra parameters are passed to the Event constructor. Args: other: the other event Returns: a new Event instance """ event = Event(*args, **kwarg) if self.intersects(other): if self.starts_within(other): event.begin = other.begin else: event.begin = self.begin if self.ends_within(other): event.end = other.end else: event.end = self.end return event raise ValueError('Cannot join {} with {}: they don\'t intersect.'.format(self, other))
python
{ "resource": "" }
q33807
timezone
train
def timezone(calendar, vtimezones): """Receives a list of VTIMEZONE blocks. Parses them and adds them to calendar._timezones. """ for vtimezone in vtimezones: remove_x(vtimezone) # Remove non standard lines from the block fake_file = StringIO() fake_file.write(str(vtimezone)) # Represent the block as a string fake_file.seek(0) timezones = tzical(fake_file) # tzical does not like strings # timezones is a tzical object and could contain multiple timezones for key in timezones.keys(): calendar._timezones[key] = timezones.get(key)
python
{ "resource": "" }
q33808
Todo.due
train
def due(self): """Get or set the end of the todo. | Will return an :class:`Arrow` object. | May be set to anything that :func:`Arrow.get` understands. | If set to a non null value, removes any already existing duration. | Setting to None will have unexpected behavior if begin is not None. | Must not be set to an inferior value than self.begin. """ if self._duration: # if due is duration defined return the beginning + duration return self.begin + self._duration elif self._due_time: # if due is time defined return self._due_time else: return None
python
{ "resource": "" }
q33809
Todo.duration
train
def duration(self): """Get or set the duration of the todo. | Will return a timedelta object. | May be set to anything that timedelta() understands. | May be set with a dict ({"days":2, "hours":6}). | If set to a non null value, removes any already existing end time. """ if self._duration: return self._duration elif self.due: return self.due - self.begin else: # todo has neither due, nor start and duration return None
python
{ "resource": "" }
q33810
APIAuth.login
train
def login(self, email, password, android_id): """Authenticate to Google with the provided credentials. Args: email (str): The account to use. password (str): The account password. android_id (str): An identifier for this client. Raises: LoginException: If there was a problem logging in. """ self._email = email self._android_id = android_id res = gpsoauth.perform_master_login(self._email, password, self._android_id) if 'Token' not in res: raise exception.LoginException(res.get('Error'), res.get('ErrorDetail')) self._master_token = res['Token'] self.refresh() return True
python
{ "resource": "" }
q33811
APIAuth.load
train
def load(self, email, master_token, android_id): """Authenticate to Google with the provided master token. Args: email (str): The account to use. master_token (str): The master token. android_id (str): An identifier for this client. Raises: LoginException: If there was a problem logging in. """ self._email = email self._android_id = android_id self._master_token = master_token self.refresh() return True
python
{ "resource": "" }
q33812
APIAuth.refresh
train
def refresh(self): """Refresh the OAuth token. Returns: string: The auth token. Raises: LoginException: If there was a problem refreshing the OAuth token. """ res = gpsoauth.perform_oauth( self._email, self._master_token, self._android_id, service=self._scopes, app='com.google.android.keep', client_sig='38918a453d07199354f8b19af05ec6562ced5788' ) if 'Auth' not in res: if 'Token' not in res: raise exception.LoginException(res.get('Error')) self._auth_token = res['Auth'] return self._auth_token
python
{ "resource": "" }
q33813
APIAuth.logout
train
def logout(self): """Log out of the account.""" self._master_token = None self._auth_token = None self._email = None self._android_id = None
python
{ "resource": "" }
q33814
API.send
train
def send(self, **req_kwargs): """Send an authenticated request to a Google API. Automatically retries if the access token has expired. Args: **req_kwargs: Arbitrary keyword arguments to pass to Requests. Return: dict: The parsed JSON response. Raises: APIException: If the server returns an error. LoginException: If :py:meth:`login` has not been called. """ i = 0 while True: response = self._send(**req_kwargs).json() if 'error' not in response: break error = response['error'] if error['code'] != 401: raise exception.APIException(error['code'], error) if i >= self.RETRY_CNT: raise exception.APIException(error['code'], error) logger.info('Refreshing access token') self._auth.refresh() i += 1 return response
python
{ "resource": "" }
q33815
API._send
train
def _send(self, **req_kwargs): """Send an authenticated request to a Google API. Args: **req_kwargs: Arbitrary keyword arguments to pass to Requests. Return: requests.Response: The raw response. Raises: LoginException: If :py:meth:`login` has not been called. """ auth_token = self._auth.getAuthToken() if auth_token is None: raise exception.LoginException('Not logged in') req_kwargs.setdefault('headers', { 'Authorization': 'OAuth ' + auth_token }) return self._session.request(**req_kwargs)
python
{ "resource": "" }
q33816
MediaAPI.get
train
def get(self, blob): """Get the canonical link to a media blob. Args: blob (gkeepapi.node.Blob): The blob. Returns: str: A link to the media. """ return self._send( url=self._base_url + blob.parent.server_id + '/' + blob.server_id + '?s=0', method='GET', allow_redirects=False ).headers.get('Location')
python
{ "resource": "" }
q33817
RemindersAPI.create
train
def create(self): """Create a new reminder. """ params = {} return self.send( url=self._base_url + 'create', method='POST', json=params )
python
{ "resource": "" }
q33818
RemindersAPI.list
train
def list(self, master=True): """List current reminders. """ params = {} params.update(self.static_params) if master: params.update({ "recurrenceOptions": { "collapseMode": "MASTER_ONLY", }, "includeArchived": True, "includeDeleted": False, }) else: current_time = time.time() start_time = int((current_time - (365 * 24 * 60 * 60)) * 1000) end_time = int((current_time + (24 * 60 * 60)) * 1000) params.update({ "recurrenceOptions": { "collapseMode":"INSTANCES_ONLY", "recurrencesOnly": True, }, "includeArchived": False, "includeCompleted": False, "includeDeleted": False, "dueAfterMs": start_time, "dueBeforeMs": end_time, "recurrenceId": [], }) return self.send( url=self._base_url + 'list', method='POST', json=params )
python
{ "resource": "" }
q33819
RemindersAPI.history
train
def history(self, storage_version): """Get reminder changes. """ params = { "storageVersion": storage_version, "includeSnoozePresetUpdates": True, } params.update(self.static_params) return self.send( url=self._base_url + 'history', method='POST', json=params )
python
{ "resource": "" }
q33820
RemindersAPI.update
train
def update(self): """Sync up changes to reminders. """ params = {} return self.send( url=self._base_url + 'update', method='POST', json=params )
python
{ "resource": "" }
q33821
Keep.login
train
def login(self, username, password, state=None, sync=True): """Authenticate to Google with the provided credentials & sync. Args: email (str): The account to use. password (str): The account password. state (dict): Serialized state to load. Raises: LoginException: If there was a problem logging in. """ auth = APIAuth(self.OAUTH_SCOPES) ret = auth.login(username, password, get_mac()) if ret: self.load(auth, state, sync) return ret
python
{ "resource": "" }
q33822
Keep.resume
train
def resume(self, email, master_token, state=None, sync=True): """Authenticate to Google with the provided master token & sync. Args: email (str): The account to use. master_token (str): The master token. state (dict): Serialized state to load. Raises: LoginException: If there was a problem logging in. """ auth = APIAuth(self.OAUTH_SCOPES) ret = auth.load(email, master_token, android_id=get_mac()) if ret: self.load(auth, state, sync) return ret
python
{ "resource": "" }
q33823
Keep.dump
train
def dump(self): """Serialize note data. Args: state (dict): Serialized state to load. """ # Find all nodes manually, as the Keep object isn't aware of new ListItems # until they've been synced to the server. nodes = [] for node in self.all(): nodes.append(node) for child in node.children: nodes.append(child) return { 'keep_version': self._keep_version, 'labels': [label.save(False) for label in self.labels()], 'nodes': [node.save(False) for node in nodes] }
python
{ "resource": "" }
q33824
Keep.restore
train
def restore(self, state): """Unserialize saved note data. Args: state (dict): Serialized state to load. """ self._clear() self._parseUserInfo({'labels': state['labels']}) self._parseNodes(state['nodes']) self._keep_version = state['keep_version']
python
{ "resource": "" }
q33825
Keep.get
train
def get(self, node_id): """Get a note with the given ID. Args: node_id (str): The note ID. Returns: gkeepapi.node.TopLevelNode: The Note or None if not found. """ return \ self._nodes[_node.Root.ID].get(node_id) or \ self._nodes[_node.Root.ID].get(self._sid_map.get(node_id))
python
{ "resource": "" }
q33826
Keep.find
train
def find(self, query=None, func=None, labels=None, colors=None, pinned=None, archived=None, trashed=False): # pylint: disable=too-many-arguments """Find Notes based on the specified criteria. Args: query (Union[_sre.SRE_Pattern, str, None]): A str or regular expression to match against the title and text. func (Union[callable, None]): A filter function. labels (Union[List[str], None]): A list of label ids or objects to match. An empty list matches notes with no labels. colors (Union[List[str], None]): A list of colors to match. pinned (Union[bool, None]): Whether to match pinned notes. archived (Union[bool, None]): Whether to match archived notes. trashed (Union[bool, None]): Whether to match trashed notes. Return: List[gkeepapi.node.TopLevelNode]: Results. """ if labels is not None: labels = [i.id if isinstance(i, _node.Label) else i for i in labels] return (node for node in self.all() if (query is None or ( (isinstance(query, six.string_types) and (query in node.title or query in node.text)) or (isinstance(query, Pattern) and ( query.search(node.title) or query.search(node.text) )) )) and (func is None or func(node)) and \ (labels is None or \ (not labels and not node.labels.all()) or \ (any((node.labels.get(i) is not None for i in labels))) ) and \ (colors is None or node.color in colors) and \ (pinned is None or node.pinned == pinned) and \ (archived is None or node.archived == archived) and \ (trashed is None or node.trashed == trashed) )
python
{ "resource": "" }
q33827
Keep.findLabel
train
def findLabel(self, query, create=False): """Find a label with the given name. Args: name (Union[_sre.SRE_Pattern, str]): A str or regular expression to match against the name. create (bool): Whether to create the label if it doesn't exist (only if name is a str). Returns: Union[gkeepapi.node.Label, None]: The label. """ if isinstance(query, six.string_types): query = query.lower() for label in self._labels.values(): if (isinstance(query, six.string_types) and query == label.name.lower()) or \ (isinstance(query, Pattern) and query.search(label.name)): return label return self.createLabel(query) if create and isinstance(query, six.string_types) else None
python
{ "resource": "" }
q33828
Keep.deleteLabel
train
def deleteLabel(self, label_id): """Deletes a label. Args: label_id (str): Label id. """ if label_id not in self._labels: return label = self._labels[label_id] label.delete() for node in self.all(): node.labels.remove(label)
python
{ "resource": "" }
q33829
Keep.sync
train
def sync(self, resync=False): """Sync the local Keep tree with the server. If resyncing, local changes will be detroyed. Otherwise, local changes to notes, labels and reminders will be detected and synced up. Args: resync (bool): Whether to resync data. Raises: SyncException: If there is a consistency issue. """ if resync: self._clear() while True: logger.debug('Starting reminder sync: %s', self._reminder_version) changes = self._reminders_api.list() if 'task' in changes: self._parseTasks(changes['task']) self._reminder_version = changes['storageVersion'] logger.debug('Finishing sync: %s', self._reminder_version) history = self._reminders_api.history(self._reminder_version) if self._reminder_version == history['highestStorageVersion']: break while True: logger.debug('Starting keep sync: %s', self._keep_version) labels_updated = any((i.dirty for i in self._labels.values())) changes = self._keep_api.changes( target_version=self._keep_version, nodes=[i.save() for i in self._findDirtyNodes()], labels=[i.save() for i in self._labels.values()] if labels_updated else None, ) if changes.get('forceFullResync'): raise exception.ResyncRequiredException('Full resync required') if changes.get('upgradeRecommended'): raise exception.UpgradeRecommendedException('Upgrade recommended') if 'userInfo' in changes: self._parseUserInfo(changes['userInfo']) if 'nodes' in changes: self._parseNodes(changes['nodes']) self._keep_version = changes['toVersion'] logger.debug('Finishing sync: %s', self._keep_version) if not changes['truncated']: break if _node.DEBUG: self._clean()
python
{ "resource": "" }
q33830
Keep._clean
train
def _clean(self): """Recursively check that all nodes are reachable.""" found_ids = {} nodes = [self._nodes[_node.Root.ID]] while nodes: node = nodes.pop() found_ids[node.id] = None nodes = nodes + node.children for node_id in self._nodes: if node_id in found_ids: continue logger.error('Dangling node: %s', node_id) for node_id in found_ids: if node_id in self._nodes: continue logger.error('Unregistered node: %s', node_id)
python
{ "resource": "" }
q33831
from_json
train
def from_json(raw): """Helper to construct a node from a dict. Args: raw (dict): Raw node representation. Returns: Node: A Node object or None. """ ncls = None _type = raw.get('type') try: ncls = _type_map[NodeType(_type)] except (KeyError, ValueError) as e: logger.warning('Unknown node type: %s', _type) if DEBUG: raise_from(exception.ParseException('Parse error for %s' % (_type), raw), e) return None node = ncls() node.load(raw) return node
python
{ "resource": "" }
q33832
Element.save
train
def save(self, clean=True): """Serialize into raw representation. Clears the dirty bit by default. Args: clean (bool): Whether to clear the dirty bit. Returns: dict: Raw. """ ret = {} if clean: self._dirty = False else: ret['_dirty'] = self._dirty return ret
python
{ "resource": "" }
q33833
NodeAnnotations.from_json
train
def from_json(cls, raw): """Helper to construct an annotation from a dict. Args: raw (dict): Raw annotation representation. Returns: Node: An Annotation object or None. """ bcls = None if 'webLink' in raw: bcls = WebLink elif 'topicCategory' in raw: bcls = Category elif 'taskAssist' in raw: bcls = TaskAssist elif 'context' in raw: bcls = Context if bcls is None: logger.warning('Unknown annotation type: %s', raw.keys()) return None annotation = bcls() annotation.load(raw) return annotation
python
{ "resource": "" }
q33834
NodeAnnotations.links
train
def links(self): """Get all links. Returns: list[gkeepapi.node.WebLink]: A list of links. """ return [annotation for annotation in self._annotations.values() if isinstance(annotation, WebLink) ]
python
{ "resource": "" }
q33835
NodeAnnotations.append
train
def append(self, annotation): """Add an annotation. Args: annotation (gkeepapi.node.Annotation): An Annotation object. Returns: gkeepapi.node.Annotation: The Annotation. """ self._annotations[annotation.id] = annotation self._dirty = True return annotation
python
{ "resource": "" }
q33836
NodeAnnotations.remove
train
def remove(self, annotation): """Removes an annotation. Args: annotation (gkeepapi.node.Annotation): An Annotation object. Returns: gkeepapi.node.Annotation: The Annotation. """ if annotation.id in self._annotations: del self._annotations[annotation.id] self._dirty = True
python
{ "resource": "" }
q33837
NodeCollaborators.add
train
def add(self, email): """Add a collaborator. Args: str : Collaborator email address. """ if email not in self._collaborators: self._collaborators[email] = ShareRequestValue.Add self._dirty = True
python
{ "resource": "" }
q33838
NodeCollaborators.remove
train
def remove(self, email): """Remove a Collaborator. Args: str : Collaborator email address. """ if email in self._collaborators: if self._collaborators[email] == ShareRequestValue.Add: del self._collaborators[email] else: self._collaborators[email] = ShareRequestValue.Remove self._dirty = True
python
{ "resource": "" }
q33839
NodeCollaborators.all
train
def all(self): """Get all collaborators. Returns: List[str]: Collaborators. """ return [email for email, action in self._collaborators.items() if action in [RoleValue.Owner, RoleValue.User, ShareRequestValue.Add]]
python
{ "resource": "" }
q33840
NodeLabels.add
train
def add(self, label): """Add a label. Args: label (gkeepapi.node.Label): The Label object. """ self._labels[label.id] = label self._dirty = True
python
{ "resource": "" }
q33841
NodeLabels.remove
train
def remove(self, label): """Remove a label. Args: label (gkeepapi.node.Label): The Label object. """ if label.id in self._labels: self._labels[label.id] = None self._dirty = True
python
{ "resource": "" }
q33842
TimestampsMixin.touch
train
def touch(self, edited=False): """Mark the node as dirty. Args: edited (bool): Whether to set the edited time. """ self._dirty = True dt = datetime.datetime.utcnow() self.timestamps.updated = dt if edited: self.timestamps.edited = dt
python
{ "resource": "" }
q33843
TimestampsMixin.trashed
train
def trashed(self): """Get the trashed state. Returns: bool: Whether this item is trashed. """ return self.timestamps.trashed is not None and self.timestamps.trashed > NodeTimestamps.int_to_dt(0)
python
{ "resource": "" }
q33844
TimestampsMixin.deleted
train
def deleted(self): """Get the deleted state. Returns: bool: Whether this item is deleted. """ return self.timestamps.deleted is not None and self.timestamps.deleted > NodeTimestamps.int_to_dt(0)
python
{ "resource": "" }
q33845
Node.text
train
def text(self, value): """Set the text value. Args: value (str): Text value. """ self._text = value self.timestamps.edited = datetime.datetime.utcnow() self.touch(True)
python
{ "resource": "" }
q33846
Node.append
train
def append(self, node, dirty=True): """Add a new child node. Args: node (gkeepapi.Node): Node to add. dirty (bool): Whether this node should be marked dirty. """ self._children[node.id] = node node.parent = self if dirty: self.touch() return node
python
{ "resource": "" }
q33847
Node.remove
train
def remove(self, node, dirty=True): """Remove the given child node. Args: node (gkeepapi.Node): Node to remove. dirty (bool): Whether this node should be marked dirty. """ if node.id in self._children: self._children[node.id].parent = None del self._children[node.id] if dirty: self.touch()
python
{ "resource": "" }
q33848
List.add
train
def add(self, text, checked=False, sort=None): """Add a new item to the list. Args: text (str): The text. checked (bool): Whether this item is checked. sort (int): Item id for sorting. """ node = ListItem(parent_id=self.id, parent_server_id=self.server_id) node.checked = checked node.text = text if sort is not None: node.sort = sort self.append(node, True) self.touch(True) return node
python
{ "resource": "" }
q33849
List.items_sort
train
def items_sort(cls, items): """Sort list items, taking into account parent items. Args: items (list[gkeepapi.node.ListItem]): Items to sort. Returns: list[gkeepapi.node.ListItem]: Sorted items. """ class t(tuple): """Tuple with element-based sorting""" def __cmp__(self, other): for a, b in six.moves.zip_longest(self, other): if a != b: if a is None: return 1 if b is None: return -1 return a - b return 0 def __lt__(self, other): return self.__cmp__(other) < 0 def __gt_(self, other): return self.__cmp__(other) > 0 def __le__(self, other): return self.__cmp__(other) <= 0 def __ge_(self, other): return self.__cmp__(other) >= 0 def __eq__(self, other): return self.__cmp__(other) == 0 def __ne__(self, other): return self.__cmp__(other) != 0 def key_func(x): if x.indented: return t((int(x.parent_item.sort), int(x.sort))) return t((int(x.sort), )) return sorted(items, key=key_func, reverse=True)
python
{ "resource": "" }
q33850
ListItem.add
train
def add(self, text, checked=False, sort=None): """Add a new sub item to the list. This item must already be attached to a list. Args: text (str): The text. checked (bool): Whether this item is checked. sort (int): Item id for sorting. """ if self.parent is None: raise exception.InvalidException('Item has no parent') node = self.parent.add(text, checked, sort) self.indent(node) return node
python
{ "resource": "" }
q33851
ListItem.indent
train
def indent(self, node, dirty=True): """Indent an item. Does nothing if the target has subitems. Args: node (gkeepapi.node.ListItem): Item to indent. dirty (bool): Whether this node should be marked dirty. """ if node.subitems: return self._subitems[node.id] = node node.super_list_item_id = self.id node.parent_item = self if dirty: node.touch(True)
python
{ "resource": "" }
q33852
ListItem.dedent
train
def dedent(self, node, dirty=True): """Dedent an item. Does nothing if the target is not indented under this item. Args: node (gkeepapi.node.ListItem): Item to dedent. dirty (bool): Whether this node should be marked dirty. """ if node.id not in self._subitems: return del self._subitems[node.id] node.super_list_item_id = None node.parent_item = None if dirty: node.touch(True)
python
{ "resource": "" }
q33853
Blob.from_json
train
def from_json(cls, raw): """Helper to construct a blob from a dict. Args: raw (dict): Raw blob representation. Returns: NodeBlob: A NodeBlob object or None. """ if raw is None: return None bcls = None _type = raw.get('type') try: bcls = cls._blob_type_map[BlobType(_type)] except (KeyError, ValueError) as e: logger.warning('Unknown blob type: %s', _type) if DEBUG: raise_from(exception.ParseException('Parse error for %s' % (_type), raw), e) return None blob = bcls() blob.load(raw) return blob
python
{ "resource": "" }
q33854
Google.check_prompt_code
train
def check_prompt_code(response): """ Sometimes there is an additional numerical code on the response page that needs to be selected on the prompt from a list of multiple choice. Print it if it's there. """ num_code = response.find("div", {"jsname": "EKvSSd"}) if num_code: print("numerical code for prompt: {}".format(num_code.string))
python
{ "resource": "" }
q33855
get_short_module_name
train
def get_short_module_name(module_name, obj_name): """ Get the shortest possible module name """ scope = {} try: # Find out what the real object is supposed to be. exec('from %s import %s' % (module_name, obj_name), scope, scope) real_obj = scope[obj_name] except Exception: return module_name parts = module_name.split('.') short_name = module_name for i in range(len(parts) - 1, 0, -1): short_name = '.'.join(parts[:i]) scope = {} try: exec('from %s import %s' % (short_name, obj_name), scope, scope) # Ensure shortened object is the same as what we expect. assert real_obj is scope[obj_name] except Exception: # libraries can throw all sorts of exceptions... # get the last working module name short_name = '.'.join(parts[:(i + 1)]) break return short_name
python
{ "resource": "" }
q33856
identify_names
train
def identify_names(filename): """Builds a codeobj summary by identifying and resolving used names.""" node, _ = parse_source_file(filename) if node is None: return {} # Get matches from the code (AST) finder = NameFinder() finder.visit(node) names = list(finder.get_mapping()) names += extract_object_names_from_docs(filename) example_code_obj = collections.OrderedDict() for name, full_name in names: if name in example_code_obj: continue # if someone puts it in the docstring and code # name is as written in file (e.g. np.asarray) # full_name includes resolved import path (e.g. numpy.asarray) splitted = full_name.rsplit('.', 1) if len(splitted) == 1: # module without attribute. This is not useful for # backreferences continue module, attribute = splitted # get shortened module name module_short = get_short_module_name(module, attribute) cobj = {'name': attribute, 'module': module, 'module_short': module_short} example_code_obj[name] = cobj return example_code_obj
python
{ "resource": "" }
q33857
scan_used_functions
train
def scan_used_functions(example_file, gallery_conf): """save variables so we can later add links to the documentation""" example_code_obj = identify_names(example_file) if example_code_obj: codeobj_fname = example_file[:-3] + '_codeobj.pickle.new' with open(codeobj_fname, 'wb') as fid: pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL) _replace_md5(codeobj_fname) backrefs = set('{module_short}.{name}'.format(**entry) for entry in example_code_obj.values() if entry['module'].startswith(gallery_conf['doc_module'])) return backrefs
python
{ "resource": "" }
q33858
_thumbnail_div
train
def _thumbnail_div(target_dir, src_dir, fname, snippet, is_backref=False, check=True): """Generates RST to place a thumbnail in a gallery""" thumb, _ = _find_image_ext( os.path.join(target_dir, 'images', 'thumb', 'sphx_glr_%s_thumb.png' % fname[:-3])) if check and not os.path.isfile(thumb): # This means we have done something wrong in creating our thumbnail! raise RuntimeError('Could not find internal sphinx-gallery thumbnail ' 'file:\n%s' % (thumb,)) thumb = os.path.relpath(thumb, src_dir) full_dir = os.path.relpath(target_dir, src_dir) # Inside rst files forward slash defines paths thumb = thumb.replace(os.sep, "/") ref_name = os.path.join(full_dir, fname).replace(os.path.sep, '_') template = BACKREF_THUMBNAIL_TEMPLATE if is_backref else THUMBNAIL_TEMPLATE return template.format(snippet=escape(snippet), thumbnail=thumb, ref_name=ref_name)
python
{ "resource": "" }
q33859
write_backreferences
train
def write_backreferences(seen_backrefs, gallery_conf, target_dir, fname, snippet): """Writes down back reference files, which include a thumbnail list of examples using a certain module""" if gallery_conf['backreferences_dir'] is None: return example_file = os.path.join(target_dir, fname) backrefs = scan_used_functions(example_file, gallery_conf) for backref in backrefs: include_path = os.path.join(gallery_conf['src_dir'], gallery_conf['backreferences_dir'], '%s.examples.new' % backref) seen = backref in seen_backrefs with codecs.open(include_path, 'a' if seen else 'w', encoding='utf-8') as ex_file: if not seen: heading = '\n\nExamples using ``%s``' % backref ex_file.write(heading + '\n') ex_file.write('^' * len(heading) + '\n') ex_file.write(_thumbnail_div(target_dir, gallery_conf['src_dir'], fname, snippet, is_backref=True)) seen_backrefs.add(backref)
python
{ "resource": "" }
q33860
finalize_backreferences
train
def finalize_backreferences(seen_backrefs, gallery_conf): """Replace backref files only if necessary.""" logger = sphinx_compatibility.getLogger('sphinx-gallery') if gallery_conf['backreferences_dir'] is None: return for backref in seen_backrefs: path = os.path.join(gallery_conf['src_dir'], gallery_conf['backreferences_dir'], '%s.examples.new' % backref) if os.path.isfile(path): _replace_md5(path) else: level = gallery_conf['log_level'].get('backreference_missing', 'warning') func = getattr(logger, level) func('Could not find backreferences file: %s' % (path,)) func('The backreferences are likely to be erroneous ' 'due to file system case insensitivity.')
python
{ "resource": "" }
q33861
jupyter_notebook_skeleton
train
def jupyter_notebook_skeleton(): """Returns a dictionary with the elements of a Jupyter notebook""" py_version = sys.version_info notebook_skeleton = { "cells": [], "metadata": { "kernelspec": { "display_name": "Python " + str(py_version[0]), "language": "python", "name": "python" + str(py_version[0]) }, "language_info": { "codemirror_mode": { "name": "ipython", "version": py_version[0] }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython" + str(py_version[0]), "version": '{0}.{1}.{2}'.format(*sys.version_info[:3]) } }, "nbformat": 4, "nbformat_minor": 0 } return notebook_skeleton
python
{ "resource": "" }
q33862
directive_fun
train
def directive_fun(match, directive): """Helper to fill in directives""" directive_to_alert = dict(note="info", warning="danger") return ('<div class="alert alert-{0}"><h4>{1}</h4><p>{2}</p></div>' .format(directive_to_alert[directive], directive.capitalize(), match.group(1).strip()))
python
{ "resource": "" }
q33863
rst2md
train
def rst2md(text): """Converts the RST text from the examples docstrigs and comments into markdown text for the Jupyter notebooks""" top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M) text = re.sub(top_heading, r'# \1', text) math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M) text = re.sub(math_eq, lambda match: r'\begin{{align}}{0}\end{{align}}'.format( match.group(1).strip()), text) inline_math = re.compile(r':math:`(.+?)`', re.DOTALL) text = re.sub(inline_math, r'$\1$', text) directives = ('warning', 'note') for directive in directives: directive_re = re.compile(r'^\.\. %s::((?:.+)?(?:\n+^ .+)*)' % directive, flags=re.M) text = re.sub(directive_re, partial(directive_fun, directive=directive), text) links = re.compile(r'^ *\.\. _.*:.*$\n', flags=re.M) text = re.sub(links, '', text) refs = re.compile(r':ref:`') text = re.sub(refs, '`', text) contents = re.compile(r'^\s*\.\. contents::.*$(\n +:\S+: *$)*\n', flags=re.M) text = re.sub(contents, '', text) images = re.compile( r'^\.\. image::(.*$)(?:\n *:alt:(.*$)\n)?(?: +:\S+:.*$\n)*', flags=re.M) text = re.sub( images, lambda match: '![{1}]({0})\n'.format( match.group(1).strip(), (match.group(2) or '').strip()), text) return text
python
{ "resource": "" }
q33864
jupyter_notebook
train
def jupyter_notebook(script_blocks, gallery_conf): """Generate a Jupyter notebook file cell-by-cell Parameters ---------- script_blocks : list Script execution cells. gallery_conf : dict The sphinx-gallery configuration dictionary. """ first_cell = gallery_conf.get("first_notebook_cell", "%matplotlib inline") work_notebook = jupyter_notebook_skeleton() if first_cell is not None: add_code_cell(work_notebook, first_cell) fill_notebook(work_notebook, script_blocks) return work_notebook
python
{ "resource": "" }
q33865
add_code_cell
train
def add_code_cell(work_notebook, code): """Add a code cell to the notebook Parameters ---------- code : str Cell content """ code_cell = { "cell_type": "code", "execution_count": None, "metadata": {"collapsed": False}, "outputs": [], "source": [code.strip()] } work_notebook["cells"].append(code_cell)
python
{ "resource": "" }
q33866
fill_notebook
train
def fill_notebook(work_notebook, script_blocks): """Writes the Jupyter notebook cells Parameters ---------- script_blocks : list Each list element should be a tuple of (label, content, lineno). """ for blabel, bcontent, lineno in script_blocks: if blabel == 'code': add_code_cell(work_notebook, bcontent) else: add_markdown_cell(work_notebook, bcontent + '\n')
python
{ "resource": "" }
q33867
save_notebook
train
def save_notebook(work_notebook, write_file): """Saves the Jupyter work_notebook to write_file""" with open(write_file, 'w') as out_nb: json.dump(work_notebook, out_nb, indent=2)
python
{ "resource": "" }
q33868
python_to_jupyter_cli
train
def python_to_jupyter_cli(args=None, namespace=None): """Exposes the jupyter notebook renderer to the command line Takes the same arguments as ArgumentParser.parse_args """ from . import gen_gallery # To avoid circular import parser = argparse.ArgumentParser( description='Sphinx-Gallery Notebook converter') parser.add_argument('python_src_file', nargs='+', help='Input Python file script to convert. ' 'Supports multiple files and shell wildcards' ' (e.g. *.py)') args = parser.parse_args(args, namespace) for src_file in args.python_src_file: file_conf, blocks = split_code_and_text_blocks(src_file) print('Converting {0}'.format(src_file)) gallery_conf = copy.deepcopy(gen_gallery.DEFAULT_GALLERY_CONF) example_nb = jupyter_notebook(blocks, gallery_conf) save_notebook(example_nb, replace_py_ipynb(src_file))
python
{ "resource": "" }
q33869
_import_matplotlib
train
def _import_matplotlib(): """Import matplotlib safely.""" # make sure that the Agg backend is set before importing any # matplotlib import matplotlib matplotlib.use('agg') matplotlib_backend = matplotlib.get_backend().lower() if matplotlib_backend != 'agg': raise ValueError( "Sphinx-Gallery relies on the matplotlib 'agg' backend to " "render figures and write them to files. You are " "currently using the {} backend. Sphinx-Gallery will " "terminate the build now, because changing backends is " "not well supported by matplotlib. We advise you to move " "sphinx_gallery imports before any matplotlib-dependent " "import. Moving sphinx_gallery imports at the top of " "your conf.py file should fix this issue" .format(matplotlib_backend)) import matplotlib.pyplot as plt return matplotlib, plt
python
{ "resource": "" }
q33870
matplotlib_scraper
train
def matplotlib_scraper(block, block_vars, gallery_conf, **kwargs): """Scrape Matplotlib images. Parameters ---------- block : tuple A tuple containing the (label, content, line_number) of the block. block_vars : dict Dict of block variables. gallery_conf : dict Contains the configuration of Sphinx-Gallery **kwargs : dict Additional keyword arguments to pass to :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``. The ``format`` kwarg in particular is used to set the file extension of the output file (currently only 'png' and 'svg' are supported). Returns ------- rst : str The ReSTructuredText that will be rendered to HTML containing the images. This is often produced by :func:`figure_rst`. """ matplotlib, plt = _import_matplotlib() image_path_iterator = block_vars['image_path_iterator'] image_paths = list() for fig_num, image_path in zip(plt.get_fignums(), image_path_iterator): if 'format' in kwargs: image_path = '%s.%s' % (os.path.splitext(image_path)[0], kwargs['format']) # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. fig = plt.figure(fig_num) to_rgba = matplotlib.colors.colorConverter.to_rgba for attr in ['facecolor', 'edgecolor']: fig_attr = getattr(fig, 'get_' + attr)() default_attr = matplotlib.rcParams['figure.' + attr] if to_rgba(fig_attr) != to_rgba(default_attr) and \ attr not in kwargs: kwargs[attr] = fig_attr fig.savefig(image_path, **kwargs) image_paths.append(image_path) plt.close('all') return figure_rst(image_paths, gallery_conf['src_dir'])
python
{ "resource": "" }
q33871
mayavi_scraper
train
def mayavi_scraper(block, block_vars, gallery_conf): """Scrape Mayavi images. Parameters ---------- block : tuple A tuple containing the (label, content, line_number) of the block. block_vars : dict Dict of block variables. gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- rst : str The ReSTructuredText that will be rendered to HTML containing the images. This is often produced by :func:`figure_rst`. """ from mayavi import mlab image_path_iterator = block_vars['image_path_iterator'] image_paths = list() e = mlab.get_engine() for scene, image_path in zip(e.scenes, image_path_iterator): mlab.savefig(image_path, figure=scene) # make sure the image is not too large scale_image(image_path, image_path, 850, 999) image_paths.append(image_path) mlab.close(all=True) return figure_rst(image_paths, gallery_conf['src_dir'])
python
{ "resource": "" }
q33872
_find_image_ext
train
def _find_image_ext(path, number=None): """Find an image, tolerant of different file extensions.""" if number is not None: path = path.format(number) path = os.path.splitext(path)[0] for ext in _KNOWN_IMG_EXTS: this_path = '%s.%s' % (path, ext) if os.path.isfile(this_path): break else: ext = 'png' return ('%s.%s' % (path, ext), ext)
python
{ "resource": "" }
q33873
save_figures
train
def save_figures(block, block_vars, gallery_conf): """Save all open figures of the example code-block. Parameters ---------- block : tuple A tuple containing the (label, content, line_number) of the block. block_vars : dict Dict of block variables. gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- images_rst : str rst code to embed the images in the document. """ image_path_iterator = block_vars['image_path_iterator'] all_rst = u'' prev_count = len(image_path_iterator) for scraper in gallery_conf['image_scrapers']: rst = scraper(block, block_vars, gallery_conf) if not isinstance(rst, basestring): raise TypeError('rst from scraper %r was not a string, ' 'got type %s:\n%r' % (scraper, type(rst), rst)) n_new = len(image_path_iterator) - prev_count for ii in range(n_new): current_path, _ = _find_image_ext( image_path_iterator.paths[prev_count + ii]) if not os.path.isfile(current_path): raise RuntimeError('Scraper %s did not produce expected image:' '\n%s' % (scraper, current_path)) all_rst += rst return all_rst
python
{ "resource": "" }
q33874
figure_rst
train
def figure_rst(figure_list, sources_dir): """Generate RST for a list of PNG filenames. Depending on whether we have one or more figures, we use a single rst call to 'image' or a horizontal list. Parameters ---------- figure_list : list List of strings of the figures' absolute paths. sources_dir : str absolute path of Sphinx documentation sources Returns ------- images_rst : str rst code to embed the images in the document """ figure_paths = [os.path.relpath(figure_path, sources_dir) .replace(os.sep, '/').lstrip('/') for figure_path in figure_list] images_rst = "" if len(figure_paths) == 1: figure_name = figure_paths[0] images_rst = SINGLE_IMAGE % figure_name elif len(figure_paths) > 1: images_rst = HLIST_HEADER for figure_name in figure_paths: images_rst += HLIST_IMAGE_TEMPLATE % figure_name return images_rst
python
{ "resource": "" }
q33875
_reset_seaborn
train
def _reset_seaborn(gallery_conf, fname): """Reset seaborn.""" # Horrible code to 'unload' seaborn, so that it resets # its default when is load # Python does not support unloading of modules # https://bugs.python.org/issue9072 for module in list(sys.modules.keys()): if 'seaborn' in module: del sys.modules[module]
python
{ "resource": "" }
q33876
python_zip
train
def python_zip(file_list, gallery_path, extension='.py'): """Stores all files in file_list into an zip file Parameters ---------- file_list : list Holds all the file names to be included in zip file gallery_path : str path to where the zipfile is stored extension : str '.py' or '.ipynb' In order to deal with downloads of python sources and jupyter notebooks the file extension from files in file_list will be removed and replace with the value of this variable while generating the zip file Returns ------- zipname : str zip file name, written as `target_dir_{python,jupyter}.zip` depending on the extension """ zipname = os.path.basename(os.path.normpath(gallery_path)) zipname += '_python' if extension == '.py' else '_jupyter' zipname = os.path.join(gallery_path, zipname + '.zip') zipname_new = zipname + '.new' with zipfile.ZipFile(zipname_new, mode='w') as zipf: for fname in file_list: file_src = os.path.splitext(fname)[0] + extension zipf.write(file_src, os.path.relpath(file_src, gallery_path)) _replace_md5(zipname_new) return zipname
python
{ "resource": "" }
q33877
list_downloadable_sources
train
def list_downloadable_sources(target_dir): """Returns a list of python source files is target_dir Parameters ---------- target_dir : str path to the directory where python source file are Returns ------- list list of paths to all Python source files in `target_dir` """ return [os.path.join(target_dir, fname) for fname in os.listdir(target_dir) if fname.endswith('.py')]
python
{ "resource": "" }
q33878
generate_zipfiles
train
def generate_zipfiles(gallery_dir): """ Collects all Python source files and Jupyter notebooks in gallery_dir and makes zipfiles of them Parameters ---------- gallery_dir : str path of the gallery to collect downloadable sources Return ------ download_rst: str RestructuredText to include download buttons to the generated files """ listdir = list_downloadable_sources(gallery_dir) for directory in sorted(os.listdir(gallery_dir)): if os.path.isdir(os.path.join(gallery_dir, directory)): target_dir = os.path.join(gallery_dir, directory) listdir.extend(list_downloadable_sources(target_dir)) py_zipfile = python_zip(listdir, gallery_dir) jy_zipfile = python_zip(listdir, gallery_dir, ".ipynb") def rst_path(filepath): return filepath.replace(os.sep, '/') dw_rst = CODE_ZIP_DOWNLOAD.format(os.path.basename(py_zipfile), rst_path(py_zipfile), os.path.basename(jy_zipfile), rst_path(jy_zipfile)) return dw_rst
python
{ "resource": "" }
q33879
codestr2rst
train
def codestr2rst(codestr, lang='python', lineno=None): """Return reStructuredText code block from code string""" if lineno is not None: if LooseVersion(sphinx.__version__) >= '1.3': # Sphinx only starts numbering from the first non-empty line. blank_lines = codestr.count('\n', 0, -len(codestr.lstrip())) lineno = ' :lineno-start: {0}\n'.format(lineno + blank_lines) else: lineno = ' :linenos:\n' else: lineno = '' code_directive = "\n.. code-block:: {0}\n{1}\n".format(lang, lineno) indented_block = indent(codestr, ' ' * 4) return code_directive + indented_block
python
{ "resource": "" }
q33880
md5sum_is_current
train
def md5sum_is_current(src_file): """Checks whether src_file has the same md5 hash as the one on disk""" src_md5 = get_md5sum(src_file) src_md5_file = src_file + '.md5' if os.path.exists(src_md5_file): with open(src_md5_file, 'r') as file_checksum: ref_md5 = file_checksum.read() return src_md5 == ref_md5 return False
python
{ "resource": "" }
q33881
save_thumbnail
train
def save_thumbnail(image_path_template, src_file, file_conf, gallery_conf): """Generate and Save the thumbnail image Parameters ---------- image_path_template : str holds the template where to save and how to name the image src_file : str path to source python file gallery_conf : dict Sphinx-Gallery configuration dictionary """ # read specification of the figure to display as thumbnail from main text thumbnail_number = file_conf.get('thumbnail_number', 1) if not isinstance(thumbnail_number, int): raise TypeError( 'sphinx_gallery_thumbnail_number setting is not a number, ' 'got %r' % (thumbnail_number,)) thumbnail_image_path, ext = _find_image_ext(image_path_template, thumbnail_number) thumb_dir = os.path.join(os.path.dirname(thumbnail_image_path), 'thumb') if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) base_image_name = os.path.splitext(os.path.basename(src_file))[0] thumb_file = os.path.join(thumb_dir, 'sphx_glr_%s_thumb.%s' % (base_image_name, ext)) if src_file in gallery_conf['failing_examples']: img = os.path.join(glr_path_static(), 'broken_example.png') elif os.path.exists(thumbnail_image_path): img = thumbnail_image_path elif not os.path.exists(thumb_file): # create something to replace the thumbnail img = os.path.join(glr_path_static(), 'no_image.png') img = gallery_conf.get("default_thumb_file", img) else: return if ext == 'svg': copyfile(img, thumb_file) else: scale_image(img, thumb_file, *gallery_conf["thumbnail_size"])
python
{ "resource": "" }
q33882
_memory_usage
train
def _memory_usage(func, gallery_conf): """Get memory usage of a function call.""" if gallery_conf['show_memory']: from memory_profiler import memory_usage assert callable(func) mem, out = memory_usage(func, max_usage=True, retval=True, multiprocess=True) mem = mem[0] else: out = func() mem = 0 return out, mem
python
{ "resource": "" }
q33883
_get_memory_base
train
def _get_memory_base(gallery_conf): """Get the base amount of memory used by running a Python process.""" if not gallery_conf['show_memory']: memory_base = 0 else: # There might be a cleaner way to do this at some point from memory_profiler import memory_usage sleep, timeout = (1, 2) if sys.platform == 'win32' else (0.5, 1) proc = subprocess.Popen( [sys.executable, '-c', 'import time, sys; time.sleep(%s); sys.exit(0)' % sleep], close_fds=True) memories = memory_usage(proc, interval=1e-3, timeout=timeout) kwargs = dict(timeout=timeout) if sys.version_info >= (3, 5) else {} proc.communicate(**kwargs) # On OSX sometimes the last entry can be None memories = [mem for mem in memories if mem is not None] + [0.] memory_base = max(memories) return memory_base
python
{ "resource": "" }
q33884
execute_code_block
train
def execute_code_block(compiler, block, example_globals, script_vars, gallery_conf): """Executes the code block of the example file""" blabel, bcontent, lineno = block # If example is not suitable to run, skip executing its blocks if not script_vars['execute_script'] or blabel == 'text': script_vars['memory_delta'].append(0) return '' cwd = os.getcwd() # Redirect output to stdout and orig_stdout = sys.stdout src_file = script_vars['src_file'] # First cd in the original example dir, so that any file # created by the example get created in this directory my_stdout = MixedEncodingStringIO() os.chdir(os.path.dirname(src_file)) sys_path = copy.deepcopy(sys.path) sys.path.append(os.getcwd()) sys.stdout = LoggingTee(my_stdout, logger, src_file) try: dont_inherit = 1 code_ast = compile(bcontent, src_file, 'exec', ast.PyCF_ONLY_AST | compiler.flags, dont_inherit) ast.increment_lineno(code_ast, lineno - 1) # don't use unicode_literals at the top of this file or you get # nasty errors here on Py2.7 _, mem = _memory_usage(_exec_once( compiler(code_ast, src_file, 'exec'), example_globals), gallery_conf) except Exception: sys.stdout.flush() sys.stdout = orig_stdout except_rst = handle_exception(sys.exc_info(), src_file, script_vars, gallery_conf) # python2.7: Code was read in bytes needs decoding to utf-8 # unless future unicode_literals is imported in source which # make ast output unicode strings if hasattr(except_rst, 'decode') and not \ isinstance(except_rst, unicode): except_rst = except_rst.decode('utf-8') code_output = u"\n{0}\n\n\n\n".format(except_rst) # still call this even though we won't use the images so that # figures are closed save_figures(block, script_vars, gallery_conf) mem = 0 else: sys.stdout.flush() sys.stdout = orig_stdout sys.path = sys_path os.chdir(cwd) my_stdout = my_stdout.getvalue().strip().expandtabs() if my_stdout: stdout = CODE_OUTPUT.format(indent(my_stdout, u' ' * 4)) else: stdout = '' images_rst = save_figures(block, script_vars, gallery_conf) code_output = u"\n{0}\n\n{1}\n\n".format(images_rst, stdout) finally: os.chdir(cwd) sys.path = sys_path sys.stdout = orig_stdout script_vars['memory_delta'].append(mem) return code_output
python
{ "resource": "" }
q33885
executable_script
train
def executable_script(src_file, gallery_conf): """Validate if script has to be run according to gallery configuration Parameters ---------- src_file : str path to python script gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- bool True if script has to be executed """ filename_pattern = gallery_conf.get('filename_pattern') execute = re.search(filename_pattern, src_file) and gallery_conf[ 'plot_gallery'] return execute
python
{ "resource": "" }
q33886
execute_script
train
def execute_script(script_blocks, script_vars, gallery_conf): """Execute and capture output from python script already in block structure Parameters ---------- script_blocks : list (label, content, line_number) List where each element is a tuple with the label ('text' or 'code'), the corresponding content string of block and the leading line number script_vars : dict Configuration and run time variables gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- output_blocks : list List of strings where each element is the restructured text representation of the output of each block time_elapsed : float Time elapsed during execution """ example_globals = { # A lot of examples contains 'print(__doc__)' for example in # scikit-learn so that running the example prints some useful # information. Because the docstring has been separated from # the code blocks in sphinx-gallery, __doc__ is actually # __builtin__.__doc__ in the execution context and we do not # want to print it '__doc__': '', # Examples may contain if __name__ == '__main__' guards # for in example scikit-learn if the example uses multiprocessing '__name__': '__main__', # Don't ever support __file__: Issues #166 #212 } argv_orig = sys.argv[:] if script_vars['execute_script']: # We want to run the example without arguments. See # https://github.com/sphinx-gallery/sphinx-gallery/pull/252 # for more details. sys.argv[0] = script_vars['src_file'] sys.argv[1:] = [] t_start = time() gc.collect() _, memory_start = _memory_usage(lambda: None, gallery_conf) compiler = codeop.Compile() # include at least one entry to avoid max() ever failing script_vars['memory_delta'] = [memory_start] output_blocks = [execute_code_block(compiler, block, example_globals, script_vars, gallery_conf) for block in script_blocks] time_elapsed = time() - t_start script_vars['memory_delta'] = ( # actually turn it into a delta now max(script_vars['memory_delta']) - memory_start) sys.argv = argv_orig # Write md5 checksum if the example was meant to run (no-plot # shall not cache md5sum) and has built correctly if script_vars['execute_script']: with open(script_vars['target_file'] + '.md5', 'w') as file_checksum: file_checksum.write(get_md5sum(script_vars['target_file'])) gallery_conf['passing_examples'].append(script_vars['src_file']) return output_blocks, time_elapsed
python
{ "resource": "" }
q33887
rst_blocks
train
def rst_blocks(script_blocks, output_blocks, file_conf, gallery_conf): """Generates the rst string containing the script prose, code and output Parameters ---------- script_blocks : list (label, content, line_number) List where each element is a tuple with the label ('text' or 'code'), the corresponding content string of block and the leading line number output_blocks : list List of strings where each element is the restructured text representation of the output of each block file_conf : dict File-specific settings given in source file comments as: ``# sphinx_gallery_<name> = <value>`` gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- out : str rst notebook """ # A simple example has two blocks: one for the # example introduction/explanation and one for the code is_example_notebook_like = len(script_blocks) > 2 example_rst = u"" # there can be unicode content for (blabel, bcontent, lineno), code_output in \ zip(script_blocks, output_blocks): if blabel == 'code': if not file_conf.get('line_numbers', gallery_conf.get('line_numbers', False)): lineno = None code_rst = codestr2rst(bcontent, lang=gallery_conf['lang'], lineno=lineno) + '\n' if is_example_notebook_like: example_rst += code_rst example_rst += code_output else: example_rst += code_output if 'sphx-glr-script-out' in code_output: # Add some vertical space after output example_rst += "\n\n|\n\n" example_rst += code_rst else: block_separator = '\n\n' if not bcontent.endswith('\n') else '\n' example_rst += bcontent + block_separator return example_rst
python
{ "resource": "" }
q33888
save_rst_example
train
def save_rst_example(example_rst, example_file, time_elapsed, memory_used, gallery_conf): """Saves the rst notebook to example_file including header & footer Parameters ---------- example_rst : str rst containing the executed file content example_file : str Filename with full path of python example file in documentation folder time_elapsed : float Time elapsed in seconds while executing file memory_used : float Additional memory used during the run. gallery_conf : dict Sphinx-Gallery configuration dictionary """ ref_fname = os.path.relpath(example_file, gallery_conf['src_dir']) ref_fname = ref_fname.replace(os.path.sep, "_") binder_conf = check_binder_conf(gallery_conf.get('binder')) binder_text = (" or run this example in your browser via Binder" if len(binder_conf) else "") example_rst = (".. note::\n" " :class: sphx-glr-download-link-note\n\n" " Click :ref:`here <sphx_glr_download_{0}>` " "to download the full example code{1}\n" ".. rst-class:: sphx-glr-example-title\n\n" ".. _sphx_glr_{0}:\n\n" ).format(ref_fname, binder_text) + example_rst if time_elapsed >= gallery_conf["min_reported_time"]: time_m, time_s = divmod(time_elapsed, 60) example_rst += TIMING_CONTENT.format(time_m, time_s) if gallery_conf['show_memory']: example_rst += ("**Estimated memory usage:** {0: .0f} MB\n\n" .format(memory_used)) # Generate a binder URL if specified binder_badge_rst = '' if len(binder_conf) > 0: binder_badge_rst += gen_binder_rst(example_file, binder_conf, gallery_conf) fname = os.path.basename(example_file) example_rst += CODE_DOWNLOAD.format(fname, replace_py_ipynb(fname), binder_badge_rst, ref_fname) example_rst += SPHX_GLR_SIG write_file_new = re.sub(r'\.py$', '.rst.new', example_file) with codecs.open(write_file_new, 'w', encoding="utf-8") as f: f.write(example_rst) # in case it wasn't in our pattern, only replace the file if it's # still stale. _replace_md5(write_file_new)
python
{ "resource": "" }
q33889
get_data
train
def get_data(url, gallery_dir): """Persistent dictionary usage to retrieve the search indexes""" # shelve keys need to be str in python 2 if sys.version_info[0] == 2 and isinstance(url, unicode): url = url.encode('utf-8') cached_file = os.path.join(gallery_dir, 'searchindex') search_index = shelve.open(cached_file) if url in search_index: data = search_index[url] else: data = _get_data(url) search_index[url] = data search_index.close() return data
python
{ "resource": "" }
q33890
parse_sphinx_docopts
train
def parse_sphinx_docopts(index): """ Parse the Sphinx index for documentation options. Parameters ---------- index : str The Sphinx index page Returns ------- docopts : dict The documentation options from the page. """ pos = index.find('var DOCUMENTATION_OPTIONS') if pos < 0: raise ValueError('Documentation options could not be found in index.') pos = index.find('{', pos) if pos < 0: raise ValueError('Documentation options could not be found in index.') endpos = index.find('};', pos) if endpos < 0: raise ValueError('Documentation options could not be found in index.') block = index[pos + 1:endpos].strip() docopts = {} for line in block.splitlines(): key, value = line.split(':', 1) key = key.strip().strip('"') value = value.strip() if value[-1] == ',': value = value[:-1].rstrip() if value[0] in '"\'': value = value[1:-1] elif value == 'false': value = False elif value == 'true': value = True else: try: value = int(value) except ValueError: # In Sphinx 1.7.5, URL_ROOT is a JavaScript fragment. # Ignoring this entry since URL_ROOT is not used # elsewhere. # https://github.com/sphinx-gallery/sphinx-gallery/issues/382 continue docopts[key] = value return docopts
python
{ "resource": "" }
q33891
embed_code_links
train
def embed_code_links(app, exception): """Embed hyperlinks to documentation into example code""" if exception is not None: return # No need to waste time embedding hyperlinks when not running the examples # XXX: also at the time of writing this fixes make html-noplot # for some reason I don't fully understand if not app.builder.config.plot_gallery: return # XXX: Whitelist of builders for which it makes sense to embed # hyperlinks inside the example html. Note that the link embedding # require searchindex.js to exist for the links to the local doc # and there does not seem to be a good way of knowing which # builders creates a searchindex.js. if app.builder.name not in ['html', 'readthedocs']: return logger.info('embedding documentation hyperlinks...', color='white') gallery_conf = app.config.sphinx_gallery_conf gallery_dirs = gallery_conf['gallery_dirs'] if not isinstance(gallery_dirs, list): gallery_dirs = [gallery_dirs] for gallery_dir in gallery_dirs: _embed_code_links(app, gallery_conf, gallery_dir)
python
{ "resource": "" }
q33892
SphinxDocLinkResolver._get_link
train
def _get_link(self, cobj): """Get a valid link, False if not found""" fullname = cobj['module_short'] + '.' + cobj['name'] try: value = self._searchindex['objects'][cobj['module_short']] match = value[cobj['name']] except KeyError: link = False else: fname_idx = match[0] objname_idx = str(match[1]) anchor = match[3] fname = self._searchindex['filenames'][fname_idx] # In 1.5+ Sphinx seems to have changed from .rst.html to only # .html extension in converted files. Find this from the options. ext = self._docopts.get('FILE_SUFFIX', '.rst.html') fname = os.path.splitext(fname)[0] + ext if self._is_windows: fname = fname.replace('/', '\\') link = os.path.join(self.doc_url, fname) else: link = posixpath.join(self.doc_url, fname) if anchor == '': anchor = fullname elif anchor == '-': anchor = (self._searchindex['objnames'][objname_idx][1] + '-' + fullname) link = link + '#' + anchor return link
python
{ "resource": "" }
q33893
SphinxDocLinkResolver.resolve
train
def resolve(self, cobj, this_url): """Resolve the link to the documentation, returns None if not found Parameters ---------- cobj : dict Dict with information about the "code object" for which we are resolving a link. cobj['name'] : function or class name (str) cobj['module_short'] : shortened module name (str) cobj['module'] : module name (str) this_url: str URL of the current page. Needed to construct relative URLs (only used if relative=True in constructor). Returns ------- link : str or None The link (URL) to the documentation. """ full_name = cobj['module_short'] + '.' + cobj['name'] link = self._link_cache.get(full_name, None) if link is None: # we don't have it cached link = self._get_link(cobj) # cache it for the future self._link_cache[full_name] = link if link is False or link is None: # failed to resolve return None if self.relative: link = os.path.relpath(link, start=this_url) if self._is_windows: # replace '\' with '/' so it on the web link = link.replace('\\', '/') # for some reason, the relative link goes one directory too high up link = link[3:] return link
python
{ "resource": "" }
q33894
glr_path_static
train
def glr_path_static(): """Returns path to packaged static files""" return os.path.abspath(os.path.join(os.path.dirname(__file__), '_static'))
python
{ "resource": "" }
q33895
gen_binder_url
train
def gen_binder_url(fpath, binder_conf, gallery_conf): """Generate a Binder URL according to the configuration in conf.py. Parameters ---------- fpath: str The path to the `.py` file for which a Binder badge will be generated. binder_conf: dict or None The Binder configuration dictionary. See `gen_binder_rst` for details. Returns ------- binder_url : str A URL that can be used to direct the user to the live Binder environment. """ # Build the URL fpath_prefix = binder_conf.get('filepath_prefix') link_base = binder_conf.get('notebooks_dir') # We want to keep the relative path to sub-folders relative_link = os.path.relpath(fpath, gallery_conf['src_dir']) path_link = os.path.join( link_base, replace_py_ipynb(relative_link)) # In case our website is hosted in a sub-folder if fpath_prefix is not None: path_link = '/'.join([fpath_prefix.strip('/'), path_link]) # Make sure we have the right slashes (in case we're on Windows) path_link = path_link.replace(os.path.sep, '/') # Create the URL binder_url = binder_conf['binderhub_url'] binder_url = '/'.join([binder_conf['binderhub_url'], 'v2', 'gh', binder_conf['org'], binder_conf['repo'], binder_conf['branch']]) if binder_conf.get('use_jupyter_lab', False) is True: binder_url += '?urlpath=lab/tree/{}'.format(path_link) else: binder_url += '?filepath={}'.format(path_link) return binder_url
python
{ "resource": "" }
q33896
gen_binder_rst
train
def gen_binder_rst(fpath, binder_conf, gallery_conf): """Generate the RST + link for the Binder badge. Parameters ---------- fpath: str The path to the `.py` file for which a Binder badge will be generated. binder_conf: dict or None If a dictionary it must have the following keys: 'binderhub_url': The URL of the BinderHub instance that's running a Binder service. 'org': The GitHub organization to which the documentation will be pushed. 'repo': The GitHub repository to which the documentation will be pushed. 'branch': The Git branch on which the documentation exists (e.g., gh-pages). 'dependencies': A list of paths to dependency files that match the Binderspec. Returns ------- rst : str The reStructuredText for the Binder badge that links to this file. """ binder_conf = check_binder_conf(binder_conf) binder_url = gen_binder_url(fpath, binder_conf, gallery_conf) rst = ( "\n" " .. container:: binder-badge\n\n" " .. image:: https://mybinder.org/badge_logo.svg\n" " :target: {}\n" " :width: 150 px\n").format(binder_url) return rst
python
{ "resource": "" }
q33897
copy_binder_files
train
def copy_binder_files(app, exception): """Copy all Binder requirements and notebooks files.""" if exception is not None: return if app.builder.name not in ['html', 'readthedocs']: return gallery_conf = app.config.sphinx_gallery_conf binder_conf = check_binder_conf(gallery_conf.get('binder')) if not len(binder_conf) > 0: return logger.info('copying binder requirements...', color='white') _copy_binder_reqs(app, binder_conf) _copy_binder_notebooks(app)
python
{ "resource": "" }
q33898
_copy_binder_reqs
train
def _copy_binder_reqs(app, binder_conf): """Copy Binder requirements files to a "binder" folder in the docs.""" path_reqs = binder_conf.get('dependencies') for path in path_reqs: if not os.path.exists(os.path.join(app.srcdir, path)): raise ValueError(("Couldn't find the Binder requirements file: {}, " "did you specify the path correctly?".format(path))) binder_folder = os.path.join(app.outdir, 'binder') if not os.path.isdir(binder_folder): os.makedirs(binder_folder) # Copy over the requirements to the output directory for path in path_reqs: shutil.copy(os.path.join(app.srcdir, path), binder_folder)
python
{ "resource": "" }
q33899
_remove_ipynb_files
train
def _remove_ipynb_files(path, contents): """Given a list of files in `contents`, remove all files named `ipynb` or directories named `images` and return the result. Used with the `shutil` "ignore" keyword to filter out non-ipynb files.""" contents_return = [] for entry in contents: if entry.endswith('.ipynb'): # Don't include ipynb files pass elif (entry != "images") and os.path.isdir(os.path.join(path, entry)): # Don't include folders not called "images" pass else: # Keep everything else contents_return.append(entry) return contents_return
python
{ "resource": "" }