_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q31200
normalize
train
def normalize(dt, tz): """ Given a object with a timezone return a datetime object normalized to the proper timezone. This means take the give localized datetime and returns the datetime normalized to match the specificed timezone. """ if not isinstance(tz, tzinfo): tz = pytz.timezone(tz) dt = tz.normalize(dt) return dt
python
{ "resource": "" }
q31201
Delorean.shift
train
def shift(self, timezone): """ Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object, modifying the Delorean object and returning the modified object. .. testsetup:: from datetime import datetime from delorean import Delorean .. doctest:: >>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific') >>> d.shift('UTC') Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC') """ try: self._tzinfo = pytz.timezone(timezone) except pytz.UnknownTimeZoneError: raise DeloreanInvalidTimezone('Provide a valid timezone') self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo)) self._tzinfo = self._dt.tzinfo return self
python
{ "resource": "" }
q31202
Delorean.epoch
train
def epoch(self): """ Returns the total seconds since epoch associated with the Delorean object. .. testsetup:: from datetime import datetime from delorean import Delorean .. doctest:: >>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific') >>> d.epoch 1420099200.0 """ epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0)) now_sec = pytz.utc.normalize(self._dt) delta_sec = now_sec - epoch_sec return get_total_second(delta_sec)
python
{ "resource": "" }
q31203
Delorean.replace
train
def replace(self, **kwargs): """ Returns a new Delorean object after applying replace on the existing datetime object. .. testsetup:: from datetime import datetime from delorean import Delorean .. doctest:: >>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC') >>> d.replace(hour=8) Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC') """ return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
python
{ "resource": "" }
q31204
Delorean.format_datetime
train
def format_datetime(self, format='medium', locale='en_US'): """ Return a date string formatted to the given pattern. .. testsetup:: from delorean import Delorean .. doctest:: >>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific') >>> d.format_datetime(locale='en_US') u'Jan 1, 2015, 12:30:00 PM' >>> d.format_datetime(format='long', locale='de_DE') u'1. Januar 2015 12:30:00 -0800' :param format: one of "full", "long", "medium", "short", or a custom datetime pattern :param locale: a locale identifier """ return format_datetime(self._dt, format=format, locale=locale)
python
{ "resource": "" }
q31205
mask_phone_number
train
def mask_phone_number(number): """ Masks a phone number, only first 3 and last 2 digits visible. Examples: * `+31 * ******58` :param number: str or phonenumber object :return: str """ if isinstance(number, phonenumbers.PhoneNumber): number = format_phone_number(number) return phone_mask.sub('*', number)
python
{ "resource": "" }
q31206
PhoneDevice.generate_challenge
train
def generate_challenge(self): # local import to avoid circular import from two_factor.utils import totp_digits """ Sends the current TOTP token to `self.number` using `self.method`. """ no_digits = totp_digits() token = str(totp(self.bin_key, digits=no_digits)).zfill(no_digits) if self.method == 'call': make_call(device=self, token=token) else: send_sms(device=self, token=token)
python
{ "resource": "" }
q31207
AdminSiteOTPRequiredMixin.login
train
def login(self, request, extra_context=None): """ Redirects to the site login page for the given HttpRequest. """ redirect_to = request.POST.get(REDIRECT_FIELD_NAME, request.GET.get(REDIRECT_FIELD_NAME)) if not redirect_to or not is_safe_url(url=redirect_to, allowed_hosts=[request.get_host()]): redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL) return redirect_to_login(redirect_to)
python
{ "resource": "" }
q31208
class_view_decorator
train
def class_view_decorator(function_decorator): """ Converts a function based decorator into a class based decorator usable on class based Views. Can't subclass the `View` as it breaks inheritance (super in particular), so we monkey-patch instead. From: http://stackoverflow.com/a/8429311/58107 """ def simple_decorator(View): View.dispatch = method_decorator(function_decorator)(View.dispatch) return View return simple_decorator
python
{ "resource": "" }
q31209
IdempotentSessionWizardView.is_step_visible
train
def is_step_visible(self, step): """ Returns whether the given `step` should be included in the wizard; it is included if either the form is idempotent or not filled in before. """ return self.idempotent_dict.get(step, True) or \ step not in self.storage.validated_step_data
python
{ "resource": "" }
q31210
IdempotentSessionWizardView.post
train
def post(self, *args, **kwargs): """ Check if the current step is still available. It might not be if conditions have changed. """ if self.steps.current not in self.steps.all: logger.warning("Current step '%s' is no longer valid, returning " "to last valid step in the wizard.", self.steps.current) return self.render_goto_step(self.steps.all[-1]) # -- Duplicated code from upstream # Look for a wizard_goto_step element in the posted data which # contains a valid step name. If one was found, render the requested # form. (This makes stepping back a lot easier). wizard_goto_step = self.request.POST.get('wizard_goto_step', None) if wizard_goto_step and wizard_goto_step in self.get_form_list(): return self.render_goto_step(wizard_goto_step) # Check if form was refreshed management_form = ManagementForm(self.request.POST, prefix=self.prefix) if not management_form.is_valid(): raise SuspiciousOperation(_('ManagementForm data is missing or has been tampered with')) form_current_step = management_form.cleaned_data['current_step'] if (form_current_step != self.steps.current and self.storage.current_step is not None): # form refreshed, change current step self.storage.current_step = form_current_step # -- End duplicated code from upstream # This is different from the first check, as this checks # if the new step is available. See issue #65. if self.steps.current not in self.steps.all: logger.warning("Requested step '%s' is no longer valid, returning " "to last valid step in the wizard.", self.steps.current) return self.render_goto_step(self.steps.all[-1]) return super(IdempotentSessionWizardView, self).post(*args, **kwargs)
python
{ "resource": "" }
q31211
IdempotentSessionWizardView.process_step
train
def process_step(self, form): """ Stores the validated data for `form` and cleans out validated forms for next steps, as those might be affected by the current step. Note that this behaviour is relied upon by the `LoginView` to prevent users from bypassing the `TokenForm` by going steps back and changing credentials. """ step = self.steps.current # If the form is not-idempotent (cannot be validated multiple times), # the cleaned data should be stored; marking the form as validated. self.storage.validated_step_data[step] = form.cleaned_data # It is assumed that earlier steps affect later steps; so even though # those forms might not be idempotent, we'll remove the validated data # to force re-entry. # form_list = self.get_form_list(idempotent=False) form_list = self.get_form_list() keys = list(form_list.keys()) key = keys.index(step) + 1 for next_step in keys[key:]: self.storage.validated_step_data.pop(next_step, None) return super(IdempotentSessionWizardView, self).process_step(form)
python
{ "resource": "" }
q31212
download
train
def download(query, num_results): """ downloads HTML after google search """ # https://stackoverflow.com/questions/11818362/how-to-deal-with-unicode-string-in-url-in-python3 name = quote(query) name = name.replace(' ','+') url = 'http://www.google.com/search?q=' + name if num_results != 10: url += '&num=' + str(num_results) # adding this param might hint Google towards a bot req = request.Request(url, headers={ 'User-Agent' : choice(user_agents), # 'Referer': 'google.com' }) try: response = request.urlopen(req) except Exception: # catch connection issues # may also catch 503 rate limit exceed print('ERROR\n') traceback.print_exc() return '' # response.read is bytes in Py 3 if isPython2: # trick: decode unicode as early as possible data = response.read().decode('utf8', errors='ignore') else: data = str(response.read(), 'utf-8', errors='ignore') # print(data) return data
python
{ "resource": "" }
q31213
convert_unicode
train
def convert_unicode(text): """ converts unicode HTML to real Unicode """ if isPython2: h = HTMLParser() s = h.unescape(text) else: try: s = unescape(text) except Exception: # Python 3.3 and below # https://stackoverflow.com/a/2360639/2295672 s = HTMLParser().unescape(text) return s
python
{ "resource": "" }
q31214
permission_required
train
def permission_required(perm, login_url=None): """Replacement for django.contrib.auth.decorators.permission_required that returns 403 Forbidden if the user is already logged in. """ return user_passes_test(lambda u: u.has_perm(perm), login_url=login_url)
python
{ "resource": "" }
q31215
CASBackend.authenticate
train
def authenticate(self, request, ticket, service): """Verifies CAS ticket and gets or creates User object""" client = get_cas_client(service_url=service, request=request) username, attributes, pgtiou = client.verify_ticket(ticket) if attributes and request: request.session['attributes'] = attributes if settings.CAS_USERNAME_ATTRIBUTE != 'uid' and settings.CAS_VERSION != 'CAS_2_SAML_1_0': if attributes: username = attributes.get(settings.CAS_USERNAME_ATTRIBUTE) else: return None if not username: return None user = None username = self.clean_username(username) if attributes: reject = self.bad_attributes_reject(request, username, attributes) if reject: return None # If we can, we rename the attributes as described in the settings file # Existing attributes will be overwritten for cas_attr_name, req_attr_name in settings.CAS_RENAME_ATTRIBUTES.items(): if cas_attr_name in attributes and cas_attr_name is not req_attr_name: attributes[req_attr_name] = attributes[cas_attr_name] attributes.pop(cas_attr_name) UserModel = get_user_model() # Note that this could be accomplished in one try-except clause, but # instead we use get_or_create when creating unknown users since it has # built-in safeguards for multiple threads. if settings.CAS_CREATE_USER: user_kwargs = { UserModel.USERNAME_FIELD: username } if settings.CAS_CREATE_USER_WITH_ID: user_kwargs['id'] = self.get_user_id(attributes) user, created = UserModel._default_manager.get_or_create(**user_kwargs) if created: user = self.configure_user(user) else: created = False try: if settings.CAS_LOCAL_NAME_FIELD: user_kwargs = { settings.CAS_LOCAL_NAME_FIELD: username } user = UserModel._default_manager.get(**user_kwargs) else: user = UserModel._default_manager.get_by_natural_key(username) except UserModel.DoesNotExist: pass if not self.user_can_authenticate(user): return None if pgtiou and settings.CAS_PROXY_CALLBACK and request: request.session['pgtiou'] = pgtiou if settings.CAS_APPLY_ATTRIBUTES_TO_USER and attributes: # If we are receiving None for any values which cannot be NULL # in the User model, set them to an empty string instead. # Possibly it would be desirable to let these throw an error # and push the responsibility to the CAS provider or remove # them from the dictionary entirely instead. Handling these # is a little ambiguous. user_model_fields = UserModel._meta.fields for field in user_model_fields: # Handle null -> '' conversions mentioned above if not field.null: try: if attributes[field.name] is None: attributes[field.name] = '' except KeyError: continue # Coerce boolean strings into true booleans if field.get_internal_type() == 'BooleanField': try: boolean_value = attributes[field.name] == 'True' attributes[field.name] = boolean_value except KeyError: continue user.__dict__.update(attributes) # If we are keeping a local copy of the user model we # should save these attributes which have a corresponding # instance in the DB. if settings.CAS_CREATE_USER: user.save() # send the `cas_user_authenticated` signal cas_user_authenticated.send( sender=self, user=user, created=created, username=username, attributes=attributes, pgtiou=pgtiou, ticket=ticket, service=service, request=request ) return user
python
{ "resource": "" }
q31216
CASBackend.get_user_id
train
def get_user_id(self, attributes): """ For use when CAS_CREATE_USER_WITH_ID is True. Will raise ImproperlyConfigured exceptions when a user_id cannot be accessed. This is important because we shouldn't create Users with automatically assigned ids if we are trying to keep User primary key's in sync. """ if not attributes: raise ImproperlyConfigured("CAS_CREATE_USER_WITH_ID is True, but " "no attributes were provided") user_id = attributes.get('id') if not user_id: raise ImproperlyConfigured("CAS_CREATE_USER_WITH_ID is True, but " "`'id'` is not part of attributes.") return user_id
python
{ "resource": "" }
q31217
CASBackend.clean_username
train
def clean_username(self, username): """ Performs any cleaning on the "username" prior to using it to get or create the user object. Returns the cleaned username. By default, changes the username case according to `settings.CAS_FORCE_CHANGE_USERNAME_CASE`. """ username_case = settings.CAS_FORCE_CHANGE_USERNAME_CASE if username_case == 'lower': username = username.lower() elif username_case == 'upper': username = username.upper() elif username_case is not None: raise ImproperlyConfigured( "Invalid value for the CAS_FORCE_CHANGE_USERNAME_CASE setting. " "Valid values are `'lower'`, `'upper'`, and `None`.") return username
python
{ "resource": "" }
q31218
get_service_url
train
def get_service_url(request, redirect_to=None): """Generates application django service URL for CAS""" if hasattr(django_settings, 'CAS_ROOT_PROXIED_AS'): service = django_settings.CAS_ROOT_PROXIED_AS + request.path else: protocol = get_protocol(request) host = request.get_host() service = urllib_parse.urlunparse( (protocol, host, request.path, '', '', ''), ) if not django_settings.CAS_STORE_NEXT: if '?' in service: service += '&' else: service += '?' service += urllib_parse.urlencode({ REDIRECT_FIELD_NAME: redirect_to or get_redirect_url(request) }) return service
python
{ "resource": "" }
q31219
ProxyGrantingTicket.retrieve_pt
train
def retrieve_pt(cls, request, service): """`request` should be the current HttpRequest object `service` a string representing the service for witch we want to retrieve a ticket. The function return a Proxy Ticket or raise `ProxyError` """ try: pgt = cls.objects.get(user=request.user, session_key=request.session.session_key).pgt except cls.DoesNotExist: raise ProxyError( "INVALID_TICKET", "No proxy ticket found for this HttpRequest object" ) else: client = get_cas_client(service_url=service, request=request) try: return client.get_proxy_ticket(pgt) # change CASError to ProxyError nicely except CASError as error: raise ProxyError(*error.args) # just embed other errors except Exception as e: raise ProxyError(e)
python
{ "resource": "" }
q31220
DummyStateMachine.reset
train
def reset(self, document, parent, level): """Reset the state of state machine. After reset, self and self.state can be used to passed to docutils.parsers.rst.Directive.run Parameters ---------- document: docutils document Current document of the node. parent: parent node Parent node that will be used to interpret role and directives. level: int Current section level. """ self.language = languages.get_language( document.settings.language_code) # setup memo self.memo.document = document self.memo.reporter = document.reporter self.memo.language = self.language self.memo.section_level = level # setup inliner if self.memo.inliner is None: self.memo.inliner = Inliner() self.memo.inliner.init_customizations(document.settings) inliner = self.memo.inliner inliner.reporter = document.reporter inliner.document = document inliner.language = self.language inliner.parent = parent # setup self self.document = document self.reporter = self.memo.reporter self.node = parent self.state.runtime_init() self.input_lines = document['source']
python
{ "resource": "" }
q31221
DummyStateMachine.run_directive
train
def run_directive(self, name, arguments=None, options=None, content=None): """Generate directive node given arguments. Parameters ---------- name : str name of directive. arguments : list list of positional arguments. options : dict key value arguments. content : content content of the directive Returns ------- node : docutil Node Node generated by the arguments. """ if options is None: options = {} if content is None: content = [] if arguments is None: arguments = [] direc, _ = directive(name, self.language, self.document) direc = direc(name=name, arguments=arguments, options=options, content=content, lineno=self.node.line, content_offset=0, block_text='Dummy BlockText', state=self.state, state_machine=self) return direc.run()
python
{ "resource": "" }
q31222
DummyStateMachine.run_role
train
def run_role(self, name, options=None, content=None): """Generate a role node. options : dict key value arguments. content : content content of the directive Returns ------- node : docutil Node Node generated by the arguments. """ if options is None: options = {} if content is None: content = [] role_fn, _ = role(name, self.language, self.node.line, self.reporter) vec, _ = role_fn(name, rawtext=str(content), text=str(content), lineno=self.node.line, inliner=self.memo.inliner, options=options, content=content) assert len(vec) == 1, 'only support one list in role' return vec[0]
python
{ "resource": "" }
q31223
CommonMarkParser.default_depart
train
def default_depart(self, mdnode): """Default node depart handler If there is a matching ``visit_<type>`` method for a container node, then we should make sure to back up to it's parent element when the node is exited. """ if mdnode.is_container(): fn_name = 'visit_{0}'.format(mdnode.t) if not hasattr(self, fn_name): warn("Container node skipped: type={0}".format(mdnode.t)) else: self.current_node = self.current_node.parent
python
{ "resource": "" }
q31224
CommonMarkParser.depart_heading
train
def depart_heading(self, _): """Finish establishing section Wrap up title node, but stick in the section node. Add the section names based on all the text nodes added to the title. """ assert isinstance(self.current_node, nodes.title) # The title node has a tree of text nodes, use the whole thing to # determine the section id and names text = self.current_node.astext() if self.translate_section_name: text = self.translate_section_name(text) name = nodes.fully_normalize_name(text) section = self.current_node.parent section['names'].append(name) self.document.note_implicit_target(section, section) self.current_node = section
python
{ "resource": "" }
q31225
AutoStructify.parse_ref
train
def parse_ref(self, ref): """Analyze the ref block, and return the information needed. Parameters ---------- ref : nodes.reference Returns ------- result : tuple of (str, str, str) The returned result is tuple of (title, uri, docpath). title is the display title of the ref. uri is the html uri of to the ref after resolve. docpath is the absolute document path to the document, if the target corresponds to an internal document, this can bex None """ title = None if len(ref.children) == 0: title = ref['name'] if 'name' in ref else None elif isinstance(ref.children[0], nodes.Text): title = ref.children[0].astext() uri = ref['refuri'] if uri.find('://') != -1: return (title, uri, None) anchor = None arr = uri.split('#') if len(arr) == 2: anchor = arr[1] if len(arr) > 2 or len(arr[0]) == 0: return (title, uri, None) uri = arr[0] abspath = os.path.abspath(os.path.join(self.file_dir, uri)) relpath = os.path.relpath(abspath, self.root_dir) suffix = abspath.rsplit('.', 1) if len(suffix) == 2 and suffix[1] in AutoStructify.suffix_set and ( os.path.exists(abspath) and abspath.startswith(self.root_dir)): # replace the path separator if running on non-UNIX environment if os.path.sep != '/': relpath = relpath.replace(os.path.sep, '/') docpath = '/' + relpath.rsplit('.', 1)[0] # rewrite suffix to html, this is suboptimal uri = docpath + '.html' if anchor is None: return (title, uri, docpath) else: return (title, uri + '#' + anchor, None) else: # use url resolver if self.url_resolver: uri = self.url_resolver(relpath) if anchor: uri += '#' + anchor return (title, uri, None)
python
{ "resource": "" }
q31226
AutoStructify.auto_toc_tree
train
def auto_toc_tree(self, node): # pylint: disable=too-many-branches """Try to convert a list block to toctree in rst. This function detects if the matches the condition and return a converted toc tree node. The matching condition: The list only contains one level, and only contains references Parameters ---------- node: nodes.Sequential A list node in the doctree Returns ------- tocnode: docutils node The converted toc tree node, None if conversion is not possible. """ if not self.config['enable_auto_toc_tree']: return None # when auto_toc_tree_section is set # only auto generate toctree under the specified section title sec = self.config['auto_toc_tree_section'] if sec is not None: if node.parent is None: return None title = None if isinstance(node.parent, nodes.section): child = node.parent.first_child_matching_class(nodes.title) if child is not None: title = node.parent.children[child] elif isinstance(node.parent, nodes.paragraph): child = node.parent.parent.first_child_matching_class(nodes.title) if child is not None: title = node.parent.parent.children[child] if not title: return None if title.astext().strip() != sec: return None numbered = None if isinstance(node, nodes.bullet_list): numbered = 0 elif isinstance(node, nodes.enumerated_list): numbered = 1 if numbered is None: return None refs = [] for nd in node.children[:]: assert isinstance(nd, nodes.list_item) if len(nd.children) != 1: return None par = nd.children[0] if not isinstance(par, nodes.paragraph): return None if len(par.children) != 1: return None ref = par.children[0] if isinstance(ref, addnodes.pending_xref): ref = ref.children[0] if not isinstance(ref, nodes.reference): return None title, uri, docpath = self.parse_ref(ref) if title is None or uri.startswith('#'): return None if docpath: refs.append((title, docpath)) else: refs.append((title, uri)) self.state_machine.reset(self.document, node.parent, self.current_level) return self.state_machine.run_directive( 'toctree', options={'maxdepth': 1, 'numbered': numbered}, content=['%s <%s>' % (k, v) for k, v in refs])
python
{ "resource": "" }
q31227
AutoStructify.auto_inline_code
train
def auto_inline_code(self, node): """Try to automatically generate nodes for inline literals. Parameters ---------- node : nodes.literal Original codeblock node Returns ------- tocnode: docutils node The converted toc tree node, None if conversion is not possible. """ assert isinstance(node, nodes.literal) if len(node.children) != 1: return None content = node.children[0] if not isinstance(content, nodes.Text): return None content = content.astext().strip() if content.startswith('$') and content.endswith('$'): if not self.config['enable_inline_math']: return None content = content[1:-1] self.state_machine.reset(self.document, node.parent, self.current_level) return self.state_machine.run_role('math', content=content) else: return None
python
{ "resource": "" }
q31228
AutoStructify.auto_code_block
train
def auto_code_block(self, node): """Try to automatically generate nodes for codeblock syntax. Parameters ---------- node : nodes.literal_block Original codeblock node Returns ------- tocnode: docutils node The converted toc tree node, None if conversion is not possible. """ assert isinstance(node, nodes.literal_block) original_node = node if 'language' not in node: return None self.state_machine.reset(self.document, node.parent, self.current_level) content = node.rawsource.split('\n') language = node['language'] if language == 'math': if self.config['enable_math']: return self.state_machine.run_directive( 'math', content=content) elif language == 'eval_rst': if self.config['enable_eval_rst']: # allow embed non section level rst node = nodes.section() self.state_machine.state.nested_parse( StringList(content, source=original_node.source), 0, node=node, match_titles=True) return node.children[:] else: match = re.search('[ ]?[\w_-]+::.*', language) if match: parser = Parser() new_doc = new_document(None, self.document.settings) newsource = u'.. ' + match.group(0) + '\n' + node.rawsource parser.parse(newsource, new_doc) return new_doc.children[:] else: return self.state_machine.run_directive( 'code-block', arguments=[language], content=content) return None
python
{ "resource": "" }
q31229
AutoStructify.find_replace
train
def find_replace(self, node): """Try to find replace node for current node. Parameters ---------- node : docutil node Node to find replacement for. Returns ------- nodes : node or list of node The replacement nodes of current node. Returns None if no replacement can be found. """ newnode = None if isinstance(node, nodes.Sequential): newnode = self.auto_toc_tree(node) elif isinstance(node, nodes.literal_block): newnode = self.auto_code_block(node) elif isinstance(node, nodes.literal): newnode = self.auto_inline_code(node) return newnode
python
{ "resource": "" }
q31230
AutoStructify.apply
train
def apply(self): """Apply the transformation by configuration.""" source = self.document['source'] self.reporter.info('AutoStructify: %s' % source) # only transform markdowns if not source.endswith(tuple(self.config['commonmark_suffixes'])): return self.url_resolver = self.config['url_resolver'] assert callable(self.url_resolver) self.state_machine = DummyStateMachine() self.current_level = 0 self.file_dir = os.path.abspath(os.path.dirname(self.document['source'])) self.root_dir = os.path.abspath(self.document.settings.env.srcdir) self.traverse(self.document)
python
{ "resource": "" }
q31231
correction
train
def correction(sentence, pos): "Most probable spelling correction for word." word = sentence[pos] cands = candidates(word) if not cands: cands = candidates(word, False) if not cands: return word cands = sorted(cands, key=lambda w: P(w, sentence, pos), reverse=True) cands = [c[0] for c in cands] return cands
python
{ "resource": "" }
q31232
graph_from_dot_file
train
def graph_from_dot_file(path, encoding=None): """Load graphs from DOT file at `path`. @param path: to DOT file @param encoding: as passed to `io.open`. For example, `'utf-8'`. @return: Graphs that result from parsing. @rtype: `list` of `pydot.Dot` """ with io.open(path, 'rt', encoding=encoding) as f: s = f.read() if not PY3: s = unicode(s) graphs = graph_from_dot_data(s) return graphs
python
{ "resource": "" }
q31233
Node.to_string
train
def to_string(self): """Return string representation of node in DOT language.""" # RMF: special case defaults for node, edge and graph properties. # node = quote_if_necessary(self.obj_dict['name']) node_attr = list() for attr in sorted(self.obj_dict['attributes']): value = self.obj_dict['attributes'][attr] if value == '': value = '""' if value is not None: node_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) ) else: node_attr.append( attr ) # No point in having nodes setting any defaults if the don't set # any attributes... # if node in ('graph', 'node', 'edge') and len(node_attr) == 0: return '' node_attr = ', '.join(node_attr) if node_attr: node += ' [' + node_attr + ']' return node + ';'
python
{ "resource": "" }
q31234
Graph.add_node
train
def add_node(self, graph_node): """Adds a node object to the graph. It takes a node object as its only argument and returns None. """ if not isinstance(graph_node, Node): raise TypeError( 'add_node() received ' + 'a non node class object: ' + str(graph_node)) node = self.get_node(graph_node.get_name()) if not node: self.obj_dict['nodes'][graph_node.get_name()] = [ graph_node.obj_dict ] #self.node_dict[graph_node.get_name()] = graph_node.attributes graph_node.set_parent_graph(self.get_parent_graph()) else: self.obj_dict['nodes'][graph_node.get_name()].append( graph_node.obj_dict ) graph_node.set_sequence(self.get_next_sequence_number())
python
{ "resource": "" }
q31235
Graph.del_node
train
def del_node(self, name, index=None): """Delete a node from the graph. Given a node's name all node(s) with that same name will be deleted if 'index' is not specified or set to None. If there are several nodes with that same name and 'index' is given, only the node in that position will be deleted. 'index' should be an integer specifying the position of the node to delete. If index is larger than the number of nodes with that name, no action is taken. If nodes are deleted it returns True. If no action is taken it returns False. """ if isinstance(name, Node): name = name.get_name() if name in self.obj_dict['nodes']: if (index is not None and index < len(self.obj_dict['nodes'][name])): del self.obj_dict['nodes'][name][index] return True else: del self.obj_dict['nodes'][name] return True return False
python
{ "resource": "" }
q31236
Graph.get_edge
train
def get_edge(self, src_or_list, dst=None): """Retrieved an edge from the graph. Given an edge's source and destination the corresponding Edge instance(s) will be returned. If one or more edges exist with that source and destination a list of Edge instances is returned. An empty list is returned otherwise. """ if isinstance( src_or_list, (list, tuple)) and dst is None: edge_points = tuple(src_or_list) edge_points_reverse = (edge_points[1], edge_points[0]) else: edge_points = (src_or_list, dst) edge_points_reverse = (dst, src_or_list) match = list() if edge_points in self.obj_dict['edges'] or ( self.get_top_graph_type() == 'graph' and edge_points_reverse in self.obj_dict['edges']): edges_obj_dict = self.obj_dict['edges'].get( edge_points, self.obj_dict['edges'].get( edge_points_reverse, None )) for edge_obj_dict in edges_obj_dict: match.append( Edge(edge_points[0], edge_points[1], obj_dict=edge_obj_dict)) return match
python
{ "resource": "" }
q31237
Graph.get_subgraph
train
def get_subgraph(self, name): """Retrieved a subgraph from the graph. Given a subgraph's name the corresponding Subgraph instance will be returned. If one or more subgraphs exist with the same name, a list of Subgraph instances is returned. An empty list is returned otherwise. """ match = list() if name in self.obj_dict['subgraphs']: sgraphs_obj_dict = self.obj_dict['subgraphs'].get( name ) for obj_dict_list in sgraphs_obj_dict: #match.extend( Subgraph( obj_dict = obj_d ) # for obj_d in obj_dict_list ) match.append( Subgraph( obj_dict = obj_dict_list ) ) return match
python
{ "resource": "" }
q31238
Graph.get_subgraph_list
train
def get_subgraph_list(self): """Get the list of Subgraph instances. This method returns the list of Subgraph instances in the graph. """ sgraph_objs = list() for sgraph in self.obj_dict['subgraphs']: obj_dict_list = self.obj_dict['subgraphs'][sgraph] sgraph_objs.extend( [Subgraph(obj_dict=obj_d) for obj_d in obj_dict_list]) return sgraph_objs
python
{ "resource": "" }
q31239
Dot.create
train
def create(self, prog=None, format='ps', encoding=None): """Creates and returns a binary image for the graph. create will write the graph to a temporary dot file in the encoding specified by `encoding` and process it with the program given by 'prog' (which defaults to 'twopi'), reading the binary image output and return it as: - `str` of bytes in Python 2 - `bytes` in Python 3 There's also the preferred possibility of using: create_'format'(prog='program') which are automatically defined for all the supported formats, for example: - `create_ps()` - `create_gif()` - `create_dia()` If 'prog' is a list, instead of a string, then the fist item is expected to be the program name, followed by any optional command-line arguments for it: [ 'twopi', '-Tdot', '-s10' ] @param prog: either: - name of GraphViz executable that can be found in the `$PATH`, or - absolute path to GraphViz executable. If you have added GraphViz to the `$PATH` and use its executables as installed (without renaming any of them) then their names are: - `'dot'` - `'twopi'` - `'neato'` - `'circo'` - `'fdp'` - `'sfdp'` On Windows, these have the notorious ".exe" extension that, only for the above strings, will be added automatically. The `$PATH` is inherited from `os.env['PATH']` and passed to `subprocess.Popen` using the `env` argument. If you haven't added GraphViz to your `$PATH` on Windows, then you may want to give the absolute path to the executable (for example, to `dot.exe`) in `prog`. """ if prog is None: prog = self.prog assert prog is not None if isinstance(prog, (list, tuple)): prog, args = prog[0], prog[1:] else: args = [] # temp file tmp_fd, tmp_name = tempfile.mkstemp() os.close(tmp_fd) self.write(tmp_name, encoding=encoding) tmp_dir = os.path.dirname(tmp_name) # For each of the image files... for img in self.shape_files: # Get its data f = open(img, 'rb') f_data = f.read() f.close() # And copy it under a file with the same name in # the temporary directory f = open(os.path.join(tmp_dir, os.path.basename(img)), 'wb') f.write(f_data) f.close() arguments = ['-T{}'.format(format), ] + args + [tmp_name] try: stdout_data, stderr_data, process = call_graphviz( program=prog, arguments=arguments, working_dir=tmp_dir, ) except OSError as e: if e.errno == errno.ENOENT: args = list(e.args) args[1] = '"{prog}" not found in path.'.format( prog=prog) raise OSError(*args) else: raise # clean file litter for img in self.shape_files: os.unlink(os.path.join(tmp_dir, os.path.basename(img))) os.unlink(tmp_name) if process.returncode != 0: message = ( '"{prog}" with args {arguments} returned code: {code}\n\n' 'stdout, stderr:\n {out}\n{err}\n' ).format( prog=prog, arguments=arguments, code=process.returncode, out=stdout_data, err=stderr_data, ) print(message) assert process.returncode == 0, process.returncode return stdout_data
python
{ "resource": "" }
q31240
SchemaError.code
train
def code(self): """ Removes duplicates values in auto and error list. parameters. """ def uniq(seq): """ Utility function that removes duplicate. """ seen = set() seen_add = seen.add # This way removes duplicates while preserving the order. return [x for x in seq if x not in seen and not seen_add(x)] data_set = uniq(i for i in self.autos if i is not None) error_list = uniq(i for i in self.errors if i is not None) if error_list: return "\n".join(error_list) return "\n".join(data_set)
python
{ "resource": "" }
q31241
Schema._dict_key_priority
train
def _dict_key_priority(s): """Return priority for a given key object.""" if isinstance(s, Hook): return _priority(s._schema) - 0.5 if isinstance(s, Optional): return _priority(s._schema) + 0.5 return _priority(s)
python
{ "resource": "" }
q31242
Schema._prepend_schema_name
train
def _prepend_schema_name(self, message): """ If a custom schema name has been defined, prepends it to the error message that gets raised when a schema error occurs. """ if self._name: message = "{0!r} {1!s}".format(self._name, message) return message
python
{ "resource": "" }
q31243
Schema.json_schema
train
def json_schema(self, schema_id=None, is_main_schema=True): """Generate a draft-07 JSON schema dict representing the Schema. This method can only be called when the Schema's value is a dict. This method must be called with a schema_id. Calling it without one is used in a recursive context for sub schemas.""" Schema = self.__class__ s = self._schema i = self._ignore_extra_keys flavor = _priority(s) if flavor != DICT and is_main_schema: raise ValueError("The main schema must be a dict.") if flavor == TYPE: # Handle type return {"type": {int: "integer", float: "number", bool: "boolean"}.get(s, "string")} elif flavor == ITERABLE and len(s) == 1: # Handle arrays of a single type or dict schema return {"type": "array", "items": Schema(s[0]).json_schema(is_main_schema=False)} elif isinstance(s, Or): # Handle Or values values = [Schema(or_key).json_schema(is_main_schema=False) for or_key in s._args] any_of = [] for value in values: if value not in any_of: any_of.append(value) return {"anyOf": any_of} if flavor != DICT: # If not handled, do not check return {} if is_main_schema and not schema_id: raise ValueError("schema_id is required.") # Handle dict required_keys = [] expanded_schema = {} for key in s: if isinstance(key, Hook): continue if isinstance(s[key], Schema): sub_schema = s[key] else: sub_schema = Schema(s[key], ignore_extra_keys=i) sub_schema_json = sub_schema.json_schema(is_main_schema=False) is_optional = False if isinstance(key, Optional): key = key._schema is_optional = True if isinstance(key, str): if not is_optional: required_keys.append(key) expanded_schema[key] = sub_schema_json elif isinstance(key, Or): for or_key in key._args: expanded_schema[or_key] = sub_schema_json schema_dict = { "type": "object", "properties": expanded_schema, "required": required_keys, "additionalProperties": i, } if is_main_schema: schema_dict.update({"id": schema_id, "$schema": "http://json-schema.org/draft-07/schema#"}) return schema_dict
python
{ "resource": "" }
q31244
Files.put
train
def put(self, filename, data): """Create or update the specified file with the provided data. """ # Open the file for writing on the board and write chunks of data. self._pyboard.enter_raw_repl() self._pyboard.exec_("f = open('{0}', 'wb')".format(filename)) size = len(data) # Loop through and write a buffer size chunk of data at a time. for i in range(0, size, BUFFER_SIZE): chunk_size = min(BUFFER_SIZE, size - i) chunk = repr(data[i : i + chunk_size]) # Make sure to send explicit byte strings (handles python 2 compatibility). if not chunk.startswith("b"): chunk = "b" + chunk self._pyboard.exec_("f.write({0})".format(chunk)) self._pyboard.exec_("f.close()") self._pyboard.exit_raw_repl()
python
{ "resource": "" }
q31245
cli
train
def cli(port, baud, delay): """ampy - Adafruit MicroPython Tool Ampy is a tool to control MicroPython boards over a serial connection. Using ampy you can manipulate files on the board's internal filesystem and even run scripts. """ global _board # On Windows fix the COM port path name for ports above 9 (see comment in # windows_full_port_name function). if platform.system() == "Windows": port = windows_full_port_name(port) _board = pyboard.Pyboard(port, baudrate=baud, rawdelay=delay)
python
{ "resource": "" }
q31246
get
train
def get(remote_file, local_file): """ Retrieve a file from the board. Get will download a file from the board and print its contents or save it locally. You must pass at least one argument which is the path to the file to download from the board. If you don't specify a second argument then the file contents will be printed to standard output. However if you pass a file name as the second argument then the contents of the downloaded file will be saved to that file (overwriting anything inside it!). For example to retrieve the boot.py and print it out run: ampy --port /board/serial/port get boot.py Or to get main.py and save it as main.py locally run: ampy --port /board/serial/port get main.py main.py """ # Get the file contents. board_files = files.Files(_board) contents = board_files.get(remote_file) # Print the file out if no local file was provided, otherwise save it. if local_file is None: print(contents.decode("utf-8")) else: local_file.write(contents)
python
{ "resource": "" }
q31247
mkdir
train
def mkdir(directory, exists_okay): """ Create a directory on the board. Mkdir will create the specified directory on the board. One argument is required, the full path of the directory to create. Note that you cannot recursively create a hierarchy of directories with one mkdir command, instead you must create each parent directory with separate mkdir command calls. For example to make a directory under the root called 'code': ampy --port /board/serial/port mkdir /code """ # Run the mkdir command. board_files = files.Files(_board) board_files.mkdir(directory, exists_okay=exists_okay)
python
{ "resource": "" }
q31248
ls
train
def ls(directory, long_format, recursive): """List contents of a directory on the board. Can pass an optional argument which is the path to the directory. The default is to list the contents of the root, /, path. For example to list the contents of the root run: ampy --port /board/serial/port ls Or to list the contents of the /foo/bar directory on the board run: ampy --port /board/serial/port ls /foo/bar Add the -l or --long_format flag to print the size of files (however note MicroPython does not calculate the size of folders and will show 0 bytes): ampy --port /board/serial/port ls -l /foo/bar """ # List each file/directory on a separate line. board_files = files.Files(_board) for f in board_files.ls(directory, long_format=long_format, recursive=recursive): print(f)
python
{ "resource": "" }
q31249
put
train
def put(local, remote): """Put a file or folder and its contents on the board. Put will upload a local file or folder to the board. If the file already exists on the board it will be overwritten with no warning! You must pass at least one argument which is the path to the local file/folder to upload. If the item to upload is a folder then it will be copied to the board recursively with its entire child structure. You can pass a second optional argument which is the path and name of the file/folder to put to on the connected board. For example to upload a main.py from the current directory to the board's root run: ampy --port /board/serial/port put main.py Or to upload a board_boot.py from a ./foo subdirectory and save it as boot.py in the board's root run: ampy --port /board/serial/port put ./foo/board_boot.py boot.py To upload a local folder adafruit_library and all of its child files/folders as an item under the board's root run: ampy --port /board/serial/port put adafruit_library Or to put a local folder adafruit_library on the board under the path /lib/adafruit_library on the board run: ampy --port /board/serial/port put adafruit_library /lib/adafruit_library """ # Use the local filename if no remote filename is provided. if remote is None: remote = os.path.basename(os.path.abspath(local)) # Check if path is a folder and do recursive copy of everything inside it. # Otherwise it's a file and should simply be copied over. if os.path.isdir(local): # Directory copy, create the directory and walk all children to copy # over the files. board_files = files.Files(_board) for parent, child_dirs, child_files in os.walk(local): # Create board filesystem absolute path to parent directory. remote_parent = posixpath.normpath( posixpath.join(remote, os.path.relpath(parent, local)) ) try: # Create remote parent directory. board_files.mkdir(remote_parent) # Loop through all the files and put them on the board too. for filename in child_files: with open(os.path.join(parent, filename), "rb") as infile: remote_filename = posixpath.join(remote_parent, filename) board_files.put(remote_filename, infile.read()) except files.DirectoryExistsError: # Ignore errors for directories that already exist. pass else: # File copy, open the file and copy its contents to the board. # Put the file on the board. with open(local, "rb") as infile: board_files = files.Files(_board) board_files.put(remote, infile.read())
python
{ "resource": "" }
q31250
rmdir
train
def rmdir(remote_folder, missing_okay): """Forcefully remove a folder and all its children from the board. Remove the specified folder from the board's filesystem. Must specify one argument which is the path to the folder to delete. This will delete the directory and ALL of its children recursively, use with caution! For example to delete everything under /adafruit_library from the root of a board run: ampy --port /board/serial/port rmdir adafruit_library """ # Delete the provided file/directory on the board. board_files = files.Files(_board) board_files.rmdir(remote_folder, missing_okay=missing_okay)
python
{ "resource": "" }
q31251
run
train
def run(local_file, no_output): """Run a script and print its output. Run will send the specified file to the board and execute it immediately. Any output from the board will be printed to the console (note that this is not a 'shell' and you can't send input to the program). Note that if your code has a main or infinite loop you should add the --no-output option. This will run the script and immediately exit without waiting for the script to finish and print output. For example to run a test.py script and print any output after it finishes: ampy --port /board/serial/port run test.py Or to run test.py and not wait for it to finish: ampy --port /board/serial/port run --no-output test.py """ # Run the provided file and print its output. board_files = files.Files(_board) try: output = board_files.run(local_file, not no_output) if output is not None: print(output.decode("utf-8"), end="") except IOError: click.echo( "Failed to find or read input file: {0}".format(local_file), err=True )
python
{ "resource": "" }
q31252
GenericESCPOS.kick_drawer
train
def kick_drawer(self, port=0, **kwargs): """Kick drawer connected to the given port. In this implementation, cash drawers are identified according to the port in which they are connected. This relation between drawers and ports does not exists in the ESC/POS specification and it is just a design decision to normalize cash drawers handling. From the user application perspective, drawers are simply connected to port 0, 1, 2, and so on. If printer does not have this feature then no exception should be raised. :param int number: The port number to kick drawer (default is ``0``). :raises CashDrawerException: If given port does not exists. """ if self.hardware_features.get(feature.CASHDRAWER_PORTS, False): # if feature is available assume at least one port is available max_ports = self.hardware_features.get( feature.CASHDRAWER_AVAILABLE_PORTS, 1) if port not in range(max_ports): raise CashDrawerException('invalid cash drawer port: {!r} ' '(available ports are {!r})'.format( port, range(max_ports))) return self._kick_drawer_impl(port=port, **kwargs)
python
{ "resource": "" }
q31253
get_baudrates
train
def get_baudrates(): """ Returns supported baud rates in a Django-like choices tuples. """ baudrates = [] s = pyserial.Serial() for name, value in s.getSupportedBaudrates(): baudrates.append((value, name,)) return tuple(baudrates)
python
{ "resource": "" }
q31254
get_databits
train
def get_databits(): """ Returns supported byte sizes in a Django-like choices tuples. """ databits = [] s = pyserial.Serial() for name, value in s.getSupportedByteSizes(): databits.append((value, name,)) return tuple(databits)
python
{ "resource": "" }
q31255
get_stopbits
train
def get_stopbits(): """ Returns supported stop bit lengths in a Django-like choices tuples. """ stopbits = [] s = pyserial.Serial() for name, value in s.getSupportedStopbits(): stopbits.append((value, name,)) return tuple(stopbits)
python
{ "resource": "" }
q31256
get_parities
train
def get_parities(): """ Returns supported parities in a Django-like choices tuples. """ parities = [] s = pyserial.Serial() for name, value in s.getSupportedParities(): parities.append((value, name,)) return tuple(parities)
python
{ "resource": "" }
q31257
SerialSettings.get_connection
train
def get_connection(self, **kwargs): """Return a serial connection implementation suitable for the specified protocol. Raises ``RuntimeError`` if there is no implementation for the given protocol. .. warn:: This may be a little bit confusing since there is no effective connection but an implementation of a connection pattern. """ if self.is_rtscts(): return RTSCTSConnection(self, **kwargs) if self.is_dsrdtr(): return DSRDTRConnection(self, **kwargs) else: raise RuntimeError('Serial protocol "%s" is not available.' % ( self.protocol))
python
{ "resource": "" }
q31258
SerialConnection.write
train
def write(self, data): """Write data to serial port.""" for chunk in chunks(data, 512): self.wait_to_write() self.comport.write(chunk) self.comport.flush()
python
{ "resource": "" }
q31259
SerialConnection.read
train
def read(self): """Read data from serial port and returns a ``bytearray``.""" data = bytearray() while True: incoming_bytes = self.comport.inWaiting() if incoming_bytes == 0: break else: content = self.comport.read(size=incoming_bytes) data.extend(bytearray(content)) return data
python
{ "resource": "" }
q31260
FileConnection.write
train
def write(self, data): """Print any command sent in raw format. :param bytes data: arbitrary code to be printed. """ self.device.write(data) if self.auto_flush: self.flush()
python
{ "resource": "" }
q31261
is_value_in
train
def is_value_in(constants_group, value): """ Checks whether value can be found in the given constants group, which in turn, should be a Django-like choices tuple. """ for const_value, label in constants_group: if const_value == value: return True return False
python
{ "resource": "" }
q31262
run_interactive
train
def run_interactive(query, editor=None, just_count=False, default_no=False): """ Asks the user about each patch suggested by the result of the query. @param query An instance of the Query class. @param editor Name of editor to use for manual intervention, e.g. 'vim' or 'emacs'. If omitted/None, defaults to $EDITOR environment variable. @param just_count If true: don't run normally. Just print out number of places in the codebase where the query matches. """ global yes_to_all # Load start from bookmark, if appropriate. bookmark = _load_bookmark() if bookmark: print('Resume where you left off, at %s (y/n)? ' % str(bookmark), end=' ') sys.stdout.flush() if (_prompt(default='y') == 'y'): query.start_position = bookmark # Okay, enough of this foolishness of computing start and end. # Let's ask the user about some one line diffs! print('Searching for first instance...') suggestions = query.generate_patches() if just_count: for count, _ in enumerate(suggestions): terminal.terminal_move_to_beginning_of_line() print(count, end=" ") sys.stdout.flush() # since print statement ends in comma print() return for patch in suggestions: _save_bookmark(patch.start_position) _ask_about_patch(patch, editor, default_no) print('Searching...') _delete_bookmark() if yes_to_all: terminal.terminal_clear() print( "You MUST indicate in your code review:" " \"codemod with 'Yes to all'\"." "Make sure you and other people review the changes.\n\n" "With great power, comes great responsibility." )
python
{ "resource": "" }
q31263
Query.get_all_patches
train
def get_all_patches(self, dont_use_cache=False): """ Computes a list of all patches matching this query, though ignoreing self.start_position and self.end_position. @param dont_use_cache If False, and get_all_patches has been called before, compute the list computed last time. """ if not dont_use_cache and self._all_patches_cache is not None: return self._all_patches_cache print( 'Computing full change list (since you specified a percentage)...' ), sys.stdout.flush() # since print statement ends in comma endless_query = self.clone() endless_query.start_position = endless_query.end_position = None self._all_patches_cache = list(endless_query.generate_patches()) return self._all_patches_cache
python
{ "resource": "" }
q31264
Query.compute_percentile
train
def compute_percentile(self, percentage): """ Returns a Position object that represents percentage%-far-of-the-way through the larger task, as specified by this query. @param percentage a number between 0 and 100. """ all_patches = self.get_all_patches() return all_patches[ int(len(all_patches) * percentage / 100) ].start_position
python
{ "resource": "" }
q31265
Query.generate_patches
train
def generate_patches(self): """ Generates a list of patches for each file underneath self.root_directory that satisfy the given conditions given query conditions, where patches for each file are suggested by self.suggestor. """ start_pos = self.start_position or Position(None, None) end_pos = self.end_position or Position(None, None) path_list = Query._walk_directory(self.root_directory) path_list = Query._sublist(path_list, start_pos.path, end_pos.path) path_list = ( path for path in path_list if Query._path_looks_like_code(path) and (self.path_filter(path)) or (self.inc_extensionless and helpers.is_extensionless(path)) ) for path in path_list: try: lines = list(open(path)) except (IOError, UnicodeDecodeError): # If we can't open the file--perhaps it's a symlink whose # destination no loner exists--then short-circuit. continue for patch in self.suggestor(lines): if path == start_pos.path: if patch.start_line_number < start_pos.line_number: continue # suggestion is pre-start_pos if path == end_pos.path: if patch.end_line_number >= end_pos.line_number: break # suggestion is post-end_pos old_lines = lines[ patch.start_line_number:patch.end_line_number] if patch.new_lines is None or patch.new_lines != old_lines: patch.path = path yield patch # re-open file, in case contents changed lines[:] = list(open(path))
python
{ "resource": "" }
q31266
Query._walk_directory
train
def _walk_directory(root_directory): """ Generates the paths of all files that are ancestors of `root_directory`. """ paths = [os.path.join(root, name) for root, dirs, files in os.walk(root_directory) # noqa for name in files] paths.sort() return paths
python
{ "resource": "" }
q31267
matches_extension
train
def matches_extension(path, extension): """ Returns True if path has the given extension, or if the last path component matches the extension. Supports Unix glob matching. >>> matches_extension("./www/profile.php", "php") True >>> matches_extension("./scripts/menu.js", "html") False >>> matches_extension("./LICENSE", "LICENSE") True """ _, ext = os.path.splitext(path) if ext == '': # If there is no extension, grab the file name and # compare it to the given extension. return os.path.basename(path) == extension else: # If the is an extension, drop the leading period and # compare it to the extension. return fnmatch.fnmatch(ext[1:], extension)
python
{ "resource": "" }
q31268
path_filter
train
def path_filter(extensions, exclude_paths=None): """ Returns a function that returns True if a filepath is acceptable. @param extensions An array of strings. Specifies what file extensions should be accepted by the filter. If None, we default to the Unix glob `*` and match every file extension. @param exclude_paths An array of strings which represents filepaths that should never be accepted by the filter. Unix shell-style wildcards are supported. @return function A filter function that will only return True when a filepath is acceptable under the above conditions. >>> list(map(path_filter(extensions=['js', 'php']), ... ['./profile.php', './q.jjs'])) [True, False] >>> list(map(path_filter(extensions=['*'], ... exclude_paths=['html']), ... ['./html/x.php', './lib/y.js'])) [False, True] >>> list(map(path_filter(extensions=['js', 'BUILD']), ... ['./a.js', './BUILD', './profile.php'])) [True, True, False] >>> list(map(path_filter(extensions=['js'], ... exclude_paths=['*/node_modules/*']), ... ['./a.js', './tools/node_modules/dep.js'])) [True, False] """ exclude_paths = exclude_paths or [] def the_filter(path): if not any(matches_extension(path, extension) for extension in extensions): return False if exclude_paths: for excluded in exclude_paths: if (path.startswith(excluded) or path.startswith('./' + excluded) or fnmatch.fnmatch(path, excluded)): return False return True return the_filter
python
{ "resource": "" }
q31269
_terminal_use_capability
train
def _terminal_use_capability(capability_name): """ If the terminal supports the given capability, output it. Return whether it was output. """ curses.setupterm() capability = curses.tigetstr(capability_name) if capability: sys.stdout.write(_unicode(capability)) return bool(capability)
python
{ "resource": "" }
q31270
Countries.get_option
train
def get_option(self, option): """ Get a configuration option, trying the options attribute first and falling back to a Django project setting. """ value = getattr(self, option, None) if value is not None: return value return getattr(settings, "COUNTRIES_{0}".format(option.upper()))
python
{ "resource": "" }
q31271
Countries.countries
train
def countries(self): """ Return the a dictionary of countries, modified by any overriding options. The result is cached so future lookups are less work intensive. """ if not hasattr(self, "_countries"): only = self.get_option("only") if only: only_choices = True if not isinstance(only, dict): for item in only: if isinstance(item, six.string_types): only_choices = False break if only and only_choices: self._countries = dict(only) else: # Local import so that countries aren't loaded into memory # until first used. from django_countries.data import COUNTRIES self._countries = dict(COUNTRIES) if self.get_option("common_names"): self._countries.update(self.COMMON_NAMES) override = self.get_option("override") if override: self._countries.update(override) self._countries = dict( (code, name) for code, name in self._countries.items() if name is not None ) if only and not only_choices: countries = {} for item in only: if isinstance(item, six.string_types): countries[item] = self._countries[item] else: key, value = item countries[key] = value self._countries = countries self.countries_first = [] first = self.get_option("first") or [] for code in first: code = self.alpha2(code) if code in self._countries: self.countries_first.append(code) return self._countries
python
{ "resource": "" }
q31272
Countries.translate_pair
train
def translate_pair(self, code): """ Force a country to the current activated translation. :returns: ``CountryTuple(code, translated_country_name)`` namedtuple """ name = self.countries[code] if code in self.OLD_NAMES: # Check if there's an older translation available if there's no # translation for the newest name. with override(None): source_name = force_text(name) name = force_text(name) if name == source_name: for old_name in self.OLD_NAMES[code]: with override(None): source_old_name = force_text(old_name) old_name = force_text(old_name) if old_name != source_old_name: name = old_name break else: name = force_text(name) return CountryTuple(code, name)
python
{ "resource": "" }
q31273
Countries.alpha2
train
def alpha2(self, code): """ Return the two letter country code when passed any type of ISO 3166-1 country code. If no match is found, returns an empty string. """ code = force_text(code).upper() if code.isdigit(): lookup_code = int(code) def find(alt_codes): return alt_codes[1] == lookup_code elif len(code) == 3: lookup_code = code def find(alt_codes): return alt_codes[0] == lookup_code else: find = None if find: code = None for alpha2, alt_codes in self.alt_codes.items(): if find(alt_codes): code = alpha2 break if code in self.countries: return code return ""
python
{ "resource": "" }
q31274
Countries.name
train
def name(self, code): """ Return the name of a country, based on the code. If no match is found, returns an empty string. """ code = self.alpha2(code) if code not in self.countries: return "" return self.translate_pair(code)[1]
python
{ "resource": "" }
q31275
Countries.by_name
train
def by_name(self, country, language="en"): """ Fetch a country's ISO3166-1 two letter country code from its name. An optional language parameter is also available. Warning: This depends on the quality of the available translations. If no match is found, returns an empty string. ..warning:: Be cautious about relying on this returning a country code (especially with any hard-coded string) since the ISO names of countries may change over time. """ with override(language): for code, name in self: if name.lower() == country.lower(): return code if code in self.OLD_NAMES: for old_name in self.OLD_NAMES[code]: if old_name.lower() == country.lower(): return code return ""
python
{ "resource": "" }
q31276
Countries.alpha3
train
def alpha3(self, code): """ Return the ISO 3166-1 three letter country code matching the provided country code. If no match is found, returns an empty string. """ code = self.alpha2(code) try: return self.alt_codes[code][0] except KeyError: return ""
python
{ "resource": "" }
q31277
Countries.numeric
train
def numeric(self, code, padded=False): """ Return the ISO 3166-1 numeric country code matching the provided country code. If no match is found, returns ``None``. :param padded: Pass ``True`` to return a 0-padded three character string, otherwise an integer will be returned. """ code = self.alpha2(code) try: num = self.alt_codes[code][1] except KeyError: return None if padded: return "%03d" % num return num
python
{ "resource": "" }
q31278
LazyChoicesMixin.choices
train
def choices(self): """ When it's time to get the choices, if it was a lazy then figure it out now and memoize the result. """ if isinstance(self._choices, Promise): self._choices = list(self._choices) return self._choices
python
{ "resource": "" }
q31279
check_ioc_countries
train
def check_ioc_countries(verbosity=1): """ Check if all IOC codes map to ISO codes correctly """ from django_countries.data import COUNTRIES if verbosity: # pragma: no cover print("Checking if all IOC codes map correctly") for key in ISO_TO_IOC: assert COUNTRIES.get(key), "No ISO code for %s" % key if verbosity: # pragma: no cover print("Finished checking IOC codes")
python
{ "resource": "" }
q31280
self_generate
train
def self_generate(output_filename, filename="iso3166-1.csv"): # pragma: no cover """ The following code can be used for self-generation of this file. It requires a UTF-8 CSV file containing the short ISO name and two letter country code as the first two columns. """ import csv import re countries = [] alt_codes = [] with open(filename, "r") as csv_file: for row in csv.reader(csv_file): name = row[0].rstrip("*") name = re.sub(r"\(the\)", "", name) if name: countries.append((name, row[1])) alt_codes.append((row[1], row[2], int(row[3]))) with open(__file__, "r") as source_file: contents = source_file.read() # Write countries. bits = re.match("(.*\nCOUNTRIES = \{\n)(.*?)(\n\}.*)", contents, re.DOTALL).groups() country_list = [] for name, code in countries: name = name.replace('"', r"\"").strip() country_list.append(' "{code}": _("{name}"),'.format(name=name, code=code)) content = bits[0] content += "\n".join(country_list) # Write alt codes. alt_bits = re.match( "(.*\nALT_CODES = \{\n)(.*)(\n\}.*)", bits[2], re.DOTALL ).groups() alt_list = [] for code, code3, codenum in alt_codes: name = name.replace('"', r"\"").strip() alt_list.append( ' "{code}": ("{code3}", {codenum}),'.format( code=code, code3=code3, codenum=codenum ) ) content += alt_bits[0] content += "\n".join(alt_list) content += alt_bits[2] # Generate file. with open(output_filename, "w") as output_file: output_file.write(content) return countries
python
{ "resource": "" }
q31281
LazyChoicesMixin._set_choices
train
def _set_choices(self, value): """ Also update the widget's choices. """ super(LazyChoicesMixin, self)._set_choices(value) self.widget.choices = value
python
{ "resource": "" }
q31282
CountryField.deconstruct
train
def deconstruct(self): """ Remove choices from deconstructed field, as this is the country list and not user editable. Not including the ``blank_label`` property, as this isn't database related. """ name, path, args, kwargs = super(CountryField, self).deconstruct() kwargs.pop("choices") if self.multiple: # multiple determines the length of the field kwargs["multiple"] = self.multiple if self.countries is not countries: # Include the countries class if it's not the default countries # instance. kwargs["countries"] = self.countries.__class__ return name, path, args, kwargs
python
{ "resource": "" }
q31283
CountryField.validate
train
def validate(self, value, model_instance): """ Use custom validation for when using a multiple countries field. """ if not self.multiple: return super(CountryField, self).validate(value, model_instance) if not self.editable: # Skip validation for non-editable fields. return if value: choices = [option_key for option_key, option_value in self.choices] for single_value in value: if single_value not in choices: raise exceptions.ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": single_value}, ) if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages["blank"], code="blank")
python
{ "resource": "" }
q31284
CountryField.value_to_string
train
def value_to_string(self, obj): """ Ensure data is serialized correctly. """ value = self.value_from_object(obj) return self.get_prep_value(value)
python
{ "resource": "" }
q31285
_pquery
train
def _pquery(scheduler, data, ndata, ndim, leafsize, x, nx, d, i, k, eps, p, dub, ierr): """ Function that parallelly queries the K-D tree based on chunks of data returned by the scheduler """ try: _data = shmem_as_nparray(data).reshape((ndata, ndim)) _x = shmem_as_nparray(x).reshape((nx, ndim)) _d = shmem_as_nparray(d).reshape((nx, k)) _i = shmem_as_nparray(i).reshape((nx, k)) kdtree = cKDTree(_data, leafsize=leafsize) for s in scheduler: d_out, i_out = kdtree.query(_x[s, :], k=k, eps=eps, p=p, distance_upper_bound=dub) m_d = d_out.shape[0] m_i = i_out.shape[0] _d[s, :], _i[s, :] = d_out.reshape(m_d, 1), i_out.reshape(m_i, 1) except: ierr.value += 1
python
{ "resource": "" }
q31286
cKDTree_MP.pquery
train
def pquery(self, x_list, k=1, eps=0, p=2, distance_upper_bound=np.inf): """ Function to parallelly query the K-D Tree """ x = np.array(x_list) nx, mx = x.shape shmem_x = mp.Array(ctypes.c_double, nx*mx) shmem_d = mp.Array(ctypes.c_double, nx*k) shmem_i = mp.Array(ctypes.c_double, nx*k) _x = shmem_as_nparray(shmem_x).reshape((nx, mx)) _d = shmem_as_nparray(shmem_d).reshape((nx, k)) _i = shmem_as_nparray(shmem_i) if k != 1: _i = _i.reshape((nx, k)) _x[:, :] = x nprocs = num_cpus() scheduler = Scheduler(nx, nprocs) ierr = mp.Value(ctypes.c_int, 0) query_args = (scheduler, self.shmem_data, self.n, self.m, self.leafsize, shmem_x, nx, shmem_d, shmem_i, k, eps, p, distance_upper_bound, ierr) pool = [mp.Process(target=_pquery, args=query_args) for _ in range(nprocs)] for p in pool: p.start() for p in pool: p.join() if ierr.value != 0: raise RuntimeError('%d errors in worker processes' % (ierr.value)) return _d.copy(), _i.astype(int).copy()
python
{ "resource": "" }
q31287
singleton
train
def singleton(cls): """ Function to get single instance of the RGeocoder class """ instances = {} def getinstance(**kwargs): """ Creates a new RGeocoder instance if not created already """ if cls not in instances: instances[cls] = cls(**kwargs) return instances[cls] return getinstance
python
{ "resource": "" }
q31288
rel_path
train
def rel_path(filename): """ Function that gets relative path to the filename """ return os.path.join(os.getcwd(), os.path.dirname(__file__), filename)
python
{ "resource": "" }
q31289
get
train
def get(geo_coord, mode=2, verbose=True): """ Function to query for a single coordinate """ if not isinstance(geo_coord, tuple) or not isinstance(geo_coord[0], float): raise TypeError('Expecting a tuple') _rg = RGeocoder(mode=mode, verbose=verbose) return _rg.query([geo_coord])[0]
python
{ "resource": "" }
q31290
search
train
def search(geo_coords, mode=2, verbose=True): """ Function to query for a list of coordinates """ if not isinstance(geo_coords, tuple) and not isinstance(geo_coords, list): raise TypeError('Expecting a tuple or a tuple/list of tuples') elif not isinstance(geo_coords[0], tuple): geo_coords = [geo_coords] _rg = RGeocoder(mode=mode, verbose=verbose) return _rg.query(geo_coords)
python
{ "resource": "" }
q31291
Printer.art_msg
train
def art_msg(self, arttag, colorname, file=sys.stdout): """Wrapper for easy emission of the calendar borders""" self.msg(self.art[arttag], colorname, file=file)
python
{ "resource": "" }
q31292
GoogleCalendarInterface._cal_monday
train
def _cal_monday(self, day_num): """Shift the day number if we're doing cal monday, or cal_weekend is false, since that also means we're starting on day 1 """ if self.options['cal_monday'] or not self.options['cal_weekend']: day_num -= 1 if day_num < 0: day_num = 6 return day_num
python
{ "resource": "" }
q31293
GoogleCalendarInterface.QuickAddEvent
train
def QuickAddEvent(self, event_text, reminders=None): """Wrapper around Google Calendar API's quickAdd""" if not event_text: raise GcalcliError('event_text is required for a quickAdd') if len(self.cals) != 1: # TODO: get a better name for this exception class # and use it elsewhere raise GcalcliError('You must only specify a single calendar\n') new_event = self._retry_with_backoff( self.get_cal_service() .events() .quickAdd( calendarId=self.cals[0]['id'], text=event_text ) ) if reminders or not self.options['default_reminders']: rem = {} rem['reminders'] = {'useDefault': False, 'overrides': []} for r in reminders: n, m = utils.parse_reminder(r) rem['reminders']['overrides'].append({'minutes': n, 'method': m}) new_event = self._retry_with_backoff( self.get_cal_service() .events() .patch( calendarId=self.cals[0]['id'], eventId=new_event['id'], body=rem ) ) if self.details.get('url'): hlink = new_event['htmlLink'] self.printer.msg('New event added: %s\n' % hlink, 'green') return new_event
python
{ "resource": "" }
q31294
GoogleCalendarInterface.Remind
train
def Remind(self, minutes, command, use_reminders=False): """ Check for events between now and now+minutes. If use_reminders then only remind if now >= event['start'] - reminder """ # perform a date query for now + minutes + slip start = self.now end = (start + timedelta(minutes=(minutes + 5))) event_list = self._search_for_events(start, end, None) message = '' for event in event_list: # skip this event if it already started # XXX maybe add a 2+ minute grace period here... if event['s'] < self.now: continue # not sure if 'reminders' always in event if use_reminders and 'reminders' in event \ and 'overrides' in event['reminders']: if all(event['s'] - timedelta(minutes=r['minutes']) > self.now for r in event['reminders']['overrides']): # don't remind if all reminders haven't arrived yet continue if self.options.get('military'): tmp_time_str = event['s'].strftime('%H:%M') else: tmp_time_str = \ event['s'].strftime('%I:%M').lstrip('0') + \ event['s'].strftime('%p').lower() message += '%s %s\n' % \ (tmp_time_str, self._valid_title(event).strip()) if not message: return cmd = shlex.split(command) for i, a in zip(range(len(cmd)), cmd): if a == '%s': cmd[i] = message pid = os.fork() if not pid: os.execvp(cmd[0], cmd)
python
{ "resource": "" }
q31295
color_validator
train
def color_validator(input_str): """ A filter allowing only the particular colors used by the Google Calendar API Raises ValidationError otherwise. """ try: assert input_str in VALID_OVERRIDE_COLORS + [''] return input_str except AssertionError: raise ValidationError( 'Expected colors are: ' + ', '.join(color for color in VALID_OVERRIDE_COLORS) + '. (Ctrl-C to exit)\n')
python
{ "resource": "" }
q31296
reminder_validator
train
def reminder_validator(input_str): """ Allows a string that matches utils.REMINDER_REGEX. Raises ValidationError otherwise. """ match = re.match(REMINDER_REGEX, input_str) if match or input_str == '.': return input_str else: raise ValidationError('Expected format: <number><w|d|h|m> ' '<popup|email|sms>. (Ctrl-C to exit)\n')
python
{ "resource": "" }
q31297
BaseAPI.registration_id_chunks
train
def registration_id_chunks(self, registration_ids): """ Splits registration ids in several lists of max 1000 registration ids per list Args: registration_ids (list): FCM device registration ID Yields: generator: list including lists with registration ids """ try: xrange except NameError: xrange = range # Yield successive 1000-sized (max fcm recipients per request) chunks from registration_ids for i in xrange(0, len(registration_ids), self.FCM_MAX_RECIPIENTS): yield registration_ids[i:i + self.FCM_MAX_RECIPIENTS]
python
{ "resource": "" }
q31298
BaseAPI.json_dumps
train
def json_dumps(self, data): """ Standardized json.dumps function with separators and sorted keys set Args: data (dict or list): data to be dumped Returns: string: json """ return json.dumps( data, separators=(',', ':'), sort_keys=True, cls=self.json_encoder, ensure_ascii=False ).encode('utf8')
python
{ "resource": "" }
q31299
BaseAPI.registration_info_request
train
def registration_info_request(self, registration_id): """ Makes a request for registration info and returns the response object Args: registration_id: id to be checked Returns: response of registration info request """ return self.requests_session.get( self.INFO_END_POINT + registration_id, params={'details': 'true'} )
python
{ "resource": "" }