_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q23800
Provider._delete_record
train
def _delete_record(self, identifier=None, rtype=None, name=None, content=None): """Deletes an existing record""" to_delete_ids = list() if identifier: to_delete_ids.append(identifier) else: for record in self._list_records(rtype=rtype, name=name, content=content): to_delete_ids.append(record["id"]) for to_delete_id in to_delete_ids: self._request_delete_dns_record_by_id(to_delete_id) return True
python
{ "resource": "" }
q23801
Provider._create_request_record
train
def _create_request_record(self, identifier, rtype, name, content, ttl, priority): # pylint: disable=too-many-arguments """Creates record for Subreg API calls""" record = collections.OrderedDict() # Mandatory content # Just for update - not for creation if identifier is not None: record['id'] = identifier record['type'] = rtype # Just for creation - not for update if name is not None: record['name'] = self._relative_name(name) # Optional content if content is not None: record['content'] = content if ttl is not None: record['ttl'] = ttl if priority is not None: record['prio'] = priority return record
python
{ "resource": "" }
q23802
Provider._create_response_record
train
def _create_response_record(self, response): """Creates record for lexicon API calls""" record = dict() record['id'] = response['id'] record['type'] = response['type'] record['name'] = self._full_name(response['name']) if 'content' in response: record['content'] = response['content'] or "" if 'ttl' in response: record['ttl'] = response['ttl'] if 'prio' in response: record['priority'] = response['prio'] return record
python
{ "resource": "" }
q23803
Provider._full_name
train
def _full_name(self, record_name): """Returns full domain name of a sub-domain name""" # Handle None and empty strings if not record_name: return self.domain return super(Provider, self)._full_name(record_name)
python
{ "resource": "" }
q23804
Provider._relative_name
train
def _relative_name(self, record_name): """Returns sub-domain of a domain name""" # Handle None and empty strings as None if not record_name: return None subdomain = super(Provider, self)._relative_name(record_name) return subdomain if subdomain else None
python
{ "resource": "" }
q23805
Provider._list_records_internal
train
def _list_records_internal(self, identifier=None, rtype=None, name=None, content=None): """Lists all records by the specified criteria""" response = self._request_get_dns_zone() if 'records' in response: # Interpret empty string as None because zeep does so too content_check = content if content != "" else None name_check = self._relative_name(name) # Stringize the identifier to prevent any rtype differences identifier_check = str( identifier) if identifier is not None else None filtered_records = [ record for record in response['records'] if ( identifier is None or str( record['id']) == identifier_check) and ( rtype is None or record['type'] == rtype) and ( name is None or record['name'] == name_check) and ( content is None or ( 'content' in record and record['content'] == content_check))] records = [self._create_response_record( filtered_record) for filtered_record in filtered_records] else: records = [] return records
python
{ "resource": "" }
q23806
Provider._guess_record
train
def _guess_record(self, rtype, name=None, content=None): """Tries to find existing unique record by type, name and content""" records = self._list_records_internal( identifier=None, rtype=rtype, name=name, content=content) if len(records) == 1: return records[0] if len(records) > 1: raise Exception( 'Identifier was not provided and several existing ' 'records match the request for {0}/{1}'.format(rtype, name)) raise Exception( 'Identifier was not provided and no existing records match ' 'the request for {0}/{1}'.format(rtype, name))
python
{ "resource": "" }
q23807
Provider._request_login
train
def _request_login(self, login, password): """Sends Login request""" return self._request_internal("Login", login=login, password=password)
python
{ "resource": "" }
q23808
Provider._request_add_dns_record
train
def _request_add_dns_record(self, record): """Sends Add_DNS_Record request""" return self._request_internal("Add_DNS_Record", domain=self.domain, record=record)
python
{ "resource": "" }
q23809
Provider._request_modify_dns_record
train
def _request_modify_dns_record(self, record): """Sends Modify_DNS_Record request""" return self._request_internal("Modify_DNS_Record", domain=self.domain, record=record)
python
{ "resource": "" }
q23810
Provider._request_delete_dns_record_by_id
train
def _request_delete_dns_record_by_id(self, identifier): """Sends Delete_DNS_Record request""" return self._request_internal("Delete_DNS_Record", domain=self.domain, record={'id': identifier})
python
{ "resource": "" }
q23811
Provider._request_internal
train
def _request_internal(self, command, **kwargs): """Make request parse response""" args = dict(kwargs) if self.ssid: args['ssid'] = self.ssid method = getattr(self.api, command) response = method(**args) if response and 'status' in response: if response['status'] == 'error': raise SubregError( message=response['error']['errormsg'], major=response['error']['errorcode']['major'], minor=response['error']['errorcode']['minor'] ) if response['status'] == 'ok': return response['data'] if 'data' in response else dict() raise Exception("Invalid status found in SOAP response") raise Exception('Invalid response')
python
{ "resource": "" }
q23812
Provider._authenticate
train
def _authenticate(self): """ The Namecheap API is a little difficult to work with. Originally this method called PyNamecheap's `domains_getList`, which is connected to an API endpoint that only lists domains registered under the authenticating account. However, an authenticating Namecheap user may be permissioned to manage the DNS of additional domains. Namecheap's API does not offer a way to list these domains. This approach to detecting permissioned relies on some implementation details of the Namecheap API and the PyNamecheap module: * If the user does not own the domain, or is not permissioned to manage it in any way, Namecheap will return an error status, which PyNamecheap will instantly catch and raise. * If a non-error repsonse is returned, the XML payload is analyzed. If the user owns the domain it immediately returns valid. Otherwise we look for "All" Modification rights, or the hosts-edit permission. This is not feature complete and most-likely misses multiple scenarios where: * a user is privileged to manage the domain DNS, but via another "right" * a user is privileged to manage the domain, but DNS is not configured Important Note: * the Namecheap API has inconsistent use of capitalization with strings and a case-insensitive match should be made. e.g. the following appear in a payload: `False` and 'false', 'OK' and 'Ok'. TODO: * check payload for PremiumDNS <PremiumDnsSubscription> <IsActive>false</IsActive> </PremiumDnsSubscription> * check payload for other types of DNS <DnsDetails ProviderType="FREE" IsUsingOurDNS="true" HostCount="5" EmailType="No Email Service" DynamicDNSStatus="false" IsFailover="false"> <Nameserver>dns1.registrar-servers.com</Nameserver> <Nameserver>dns2.registrar-servers.com</Nameserver> </DnsDetails> """ extra_payload = {'DomainName': self.domain, } try: xml = self.client._call('namecheap.domains.getInfo', extra_payload) # pylint: disable=protected-access except namecheap.ApiError as err: # this will happen if there is an API connection error # OR if the user is not permissioned to manage this domain # OR the API request came from a not whitelisted IP # we should print the error, so people know how to correct it. raise Exception('Authentication failed: `%s`' % str(err)) xpath = './/{%(ns)s}CommandResponse/{%(ns)s}DomainGetInfoResult' % { 'ns': namecheap.NAMESPACE} domain_info = xml.find(xpath) def _check_hosts_permission(): # this shouldn't happen if domain_info is None: return False # do they own the domain? if domain_info.attrib['IsOwner'].lower() == 'true': return True # look for rights xpath_alt = ('.//{%(ns)s}CommandResponse/{%(ns)s}DomainGetInfoResult' '/{%(ns)s}Modificationrights' % {'ns': namecheap.NAMESPACE}) rights_info = xml.find(xpath_alt) if rights_info is None: return False # all rights if rights_info.attrib['All'].lower() == 'true': return True for right in rights_info.getchildren(): if right.attrib['Type'].lower() == 'hosts': # we're only looking at hosts, so we can exit now return right.text.lower() == 'ok' return None permissioned = _check_hosts_permission() if not permissioned: raise Exception('The domain {} is not controlled by this Namecheap ' 'account'.format(self.domain)) # FIXME What is this for? self.domain_id = self.domain
python
{ "resource": "" }
q23813
Provider._convert_to_namecheap
train
def _convert_to_namecheap(self, record): """ converts from lexicon format record to namecheap format record, suitable to sending through the api to namecheap""" name = record['name'] if name.endswith('.'): name = name[:-1] short_name = name[:name.find(self.domain) - 1] processed_record = { 'Type': record['type'], 'Name': short_name, 'TTL': record['ttl'], 'Address': record['content'], 'HostId': record['id'] } return processed_record
python
{ "resource": "" }
q23814
Provider._convert_to_lexicon
train
def _convert_to_lexicon(self, record): """ converts from namecheap raw record format to lexicon format record """ name = record['Name'] if self.domain not in name: name = "{}.{}".format(name, self.domain) processed_record = { 'type': record['Type'], 'name': '{0}.{1}'.format(record['Name'], self.domain), 'ttl': record['TTL'], 'content': record['Address'], 'id': record['HostId'] } return processed_record
python
{ "resource": "" }
q23815
Provider._authenticate
train
def _authenticate(self): """An innocent call to check that the credentials are okay.""" response = self._get("/v1/domains/{0}".format(self.domain)) self.domain_id = response["domain"]["id"]
python
{ "resource": "" }
q23816
Provider._create_record
train
def _create_record(self, rtype, name, content): """Create record if doesnt already exist with same content""" # check if record already exists existing_records = self._list_records(rtype, name, content) if len(existing_records) >= 1: return True record = { "record_type": rtype, "name": self._relative_name(name), "content": content, } if self._get_lexicon_option("ttl"): record["ttl"] = self._get_lexicon_option("ttl") if self._get_lexicon_option("priority"): record["prio"] = self._get_lexicon_option("priority") payload = self._post( "/v1/domains/{0}/records".format(self.domain), {"record": record}, ) status = "id" in payload.get("record", {}) LOGGER.debug("create_record: %s", status) return status
python
{ "resource": "" }
q23817
Provider._list_records
train
def _list_records(self, rtype=None, name=None, content=None): """List all records. record_type, name and content are used to filter the records. If possible it filters during the query, otherwise afterwards. An empty list is returned if no records are found. """ filter_query = {} if rtype: filter_query["record_type"] = rtype if name: name = self._relative_name(name) filter_query["name"] = name payload = self._get( "/v1/domains/{0}/records".format(self.domain), query_params=filter_query, ) records = [] for data in payload: record = data["record"] if content and record["content"] != content: continue if record["name"] == "": rname = self.domain else: rname = ".".join((record["name"], self.domain)) processed_record = { "type": record["record_type"], "name": rname, "ttl": record["ttl"], "content": record["content"], "id": record["id"], } if record["prio"]: processed_record["options"] = { "mx": {"priority": record["prio"]} } records.append(processed_record) LOGGER.debug("list_records: %s", records) return records
python
{ "resource": "" }
q23818
Provider._delete_record
train
def _delete_record(self, identifier=None, rtype=None, name=None, content=None): """Delete an existing record. If the record doesn't exist, does nothing. """ if not identifier: records = self._list_records(rtype, name, content) identifiers = [record["id"] for record in records] else: identifiers = [identifier] LOGGER.debug("delete_records: %s", identifiers) for record_id in identifiers: self._delete( "/v1/domains/{0}/records/{1}".format( self.domain, record_id ) ) LOGGER.debug("delete_record: %s", record_id) LOGGER.debug("delete_record: %s", True) return True
python
{ "resource": "" }
q23819
provider_parser
train
def provider_parser(subparser): """Configure provider parser for auto provider""" subparser.description = ''' Provider 'auto' enables the Lexicon provider auto-discovery. Based on the nameservers declared for the given domain, Lexicon will try to find the DNS provider holding the DNS zone if it is supported. Actual DNS zone read/write operations will be delegated to the provider found: every environment variable or command line specific to this provider can be passed to Lexicon and will be processed accordingly. ''' subparser.add_argument("--mapping-override", metavar="[DOMAIN]:[PROVIDER], ...", help="comma separated list of elements in the form of " "[DOMAIN]:[PROVIDER] to authoritatively map a " "particular domain to a particular provider") # Explore and load the arguments available for every provider into the 'auto' provider. for provider_name, provider_module in AVAILABLE_PROVIDERS.items(): parser = argparse.ArgumentParser(add_help=False) provider_module.provider_parser(parser) for action in parser._actions: # pylint: disable=protected-access action.option_strings = [re.sub( r'^--(.*)$', r'--{0}-\1'.format(provider_name), option) for option in action.option_strings] action.dest = 'auto_{0}_{1}'.format(provider_name, action.dest) subparser._add_action(action)
python
{ "resource": "" }
q23820
generate_base_provider_parser
train
def generate_base_provider_parser(): """Function that generates the base provider to be used by all dns providers.""" parser = argparse.ArgumentParser(add_help=False) parser.add_argument('action', help='specify the action to take', default='list', choices=['create', 'list', 'update', 'delete']) parser.add_argument( 'domain', help='specify the domain, supports subdomains as well') parser.add_argument('type', help='specify the entry type', default='TXT', choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'LOC']) parser.add_argument('--name', help='specify the record name') parser.add_argument('--content', help='specify the record content') parser.add_argument('--ttl', type=int, help='specify the record time-to-live') parser.add_argument('--priority', help='specify the record priority') parser.add_argument( '--identifier', help='specify the record for update or delete actions') parser.add_argument('--log_level', help='specify the log level', default='ERROR', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']) parser.add_argument('--output', help=('specify the type of output: by default a formatted table (TABLE), ' 'a formatted table without header (TABLE-NO-HEADER), ' 'a JSON string (JSON) or no output (QUIET)'), default='TABLE', choices=['TABLE', 'TABLE-NO-HEADER', 'JSON', 'QUIET']) return parser
python
{ "resource": "" }
q23821
generate_cli_main_parser
train
def generate_cli_main_parser(): """Using all providers available, generate a parser that will be used by Lexicon CLI""" parser = argparse.ArgumentParser( description='Create, Update, Delete, List DNS entries') parser.add_argument('--version', help='show the current version of lexicon', action='version', version='%(prog)s {0}' .format(discovery.lexicon_version())) parser.add_argument('--delegated', help='specify the delegated domain') parser.add_argument('--config-dir', default=os.getcwd(), help='specify the directory where to search lexicon.yml and ' 'lexicon_[provider].yml configuration files ' '(default: current directory).') subparsers = parser.add_subparsers( dest='provider_name', help='specify the DNS provider to use') subparsers.required = True for provider, available in discovery.find_providers().items(): provider_module = importlib.import_module( 'lexicon.providers.' + provider) provider_parser = getattr(provider_module, 'provider_parser') subparser = subparsers.add_parser(provider, help='{0} provider'.format(provider), parents=[generate_base_provider_parser()]) provider_parser(subparser) if not available: subparser.epilog = ('WARNING: some required dependencies for this provider are not ' 'installed. Please install lexicon[{0}] first before using it.' .format(provider)) return parser
python
{ "resource": "" }
q23822
Provider._create_record
train
def _create_record(self, rtype, name, content): """ Create a resource record. If a record already exists with the same content, do nothing. """ result = False name = self._relative_name(name) ttl = None # TODO: shoud assert that this is an int if self.ttl: ttl = self.ttl with localzone.manage(self.filename, self.origin, autosave=True) as zone: if zone.add_record(name, rtype, content, ttl=ttl): # pylint: disable=no-member result = True LOGGER.debug("create_record: %s", result) return result
python
{ "resource": "" }
q23823
Provider._list_records
train
def _list_records(self, rtype=None, name=None, content=None): """ Return a list of records matching the supplied params. If no params are provided, then return all zone records. If no records are found, return an empty list. """ if name: name = self._relative_name(name) if not rtype: rtype = "ANY" filter_query = {"rdtype": rtype, "name": name, "content": content} with localzone.manage(self.filename, self.origin, autosave=True) as zone: records = zone.find_record(**filter_query) # pylint: disable=no-member result = [] for record in records: rdict = { "type": record.rdtype, "name": self._full_name(record.name), "ttl": record.ttl, "content": record.content, "id": record.hashid, } if rdict["type"] == "TXT": rdict["content"] = rdict["content"].replace('"', "") result.append(rdict) LOGGER.debug("list_records: %s", result) return result
python
{ "resource": "" }
q23824
Provider._update_record
train
def _update_record(self, identifier, rtype=None, name=None, content=None): """ Update a record. Returns `False` if no matching record is found. """ result = False # TODO: some providers allow content-based updates without supplying an # ID, and therefore `identifier` is here optional. If we don't receive # an ID, look it up. if not identifier and rtype and name: records = self._list_records(rtype, name) if len(records) == 1: identifier = records[0]["id"] if identifier and content: with localzone.manage(self.filename, self.origin, autosave=True) as zone: if zone.update_record(identifier, content): # pylint: disable=no-member result = True LOGGER.debug("update_record: %s", result) return result
python
{ "resource": "" }
q23825
set_random_state
train
def set_random_state(state): """Force-set the state of factory.fuzzy's random generator.""" randgen.state_set = True randgen.setstate(state) faker.generator.random.setstate(state)
python
{ "resource": "" }
q23826
reseed_random
train
def reseed_random(seed): """Reseed factory.fuzzy's random generator.""" r = random.Random(seed) random_internal_state = r.getstate() set_random_state(random_internal_state)
python
{ "resource": "" }
q23827
get_model
train
def get_model(app, model): """Wrapper around django's get_model.""" if 'get_model' not in _LAZY_LOADS: _lazy_load_get_model() _get_model = _LAZY_LOADS['get_model'] return _get_model(app, model)
python
{ "resource": "" }
q23828
_lazy_load_get_model
train
def _lazy_load_get_model(): """Lazy loading of get_model. get_model loads django.conf.settings, which may fail if the settings haven't been configured yet. """ if django is None: def _get_model(app, model): raise import_failure else: from django import apps as django_apps _get_model = django_apps.apps.get_model _LAZY_LOADS['get_model'] = _get_model
python
{ "resource": "" }
q23829
DjangoModelFactory._get_or_create
train
def _get_or_create(cls, model_class, *args, **kwargs): """Create an instance of the model through objects.get_or_create.""" manager = cls._get_manager(model_class) assert 'defaults' not in cls._meta.django_get_or_create, ( "'defaults' is a reserved keyword for get_or_create " "(in %s._meta.django_get_or_create=%r)" % (cls, cls._meta.django_get_or_create)) key_fields = {} for field in cls._meta.django_get_or_create: if field not in kwargs: raise errors.FactoryError( "django_get_or_create - " "Unable to find initialization value for '%s' in factory %s" % (field, cls.__name__)) key_fields[field] = kwargs.pop(field) key_fields['defaults'] = kwargs try: instance, _created = manager.get_or_create(*args, **key_fields) except IntegrityError: try: instance = manager.get(**cls._original_params) except manager.model.DoesNotExist: raise ValueError( "django_get_or_create - Unable to create a new object " "due an IntegrityError raised based on " "your model's uniqueness constraints. " "DoesNotExist: Unable to find an existing object based on " "the fields specified in your factory instance.") return instance
python
{ "resource": "" }
q23830
FileField.generate
train
def generate(self, step, params): """Fill in the field.""" # Recurse into a DictFactory: allows users to have some params depending # on others. params = step.recurse(base.DictFactory, params, force_sequence=step.sequence) filename, content = self._make_content(params) return django_files.File(content.file, filename)
python
{ "resource": "" }
q23831
import_object
train
def import_object(module_name, attribute_name): """Import an object from its absolute path. Example: >>> import_object('datetime', 'datetime') <type 'datetime.datetime'> """ # Py2 compatibility: force str (i.e bytes) when importing. module = __import__(str(module_name), {}, {}, [str(attribute_name)], 0) return getattr(module, str(attribute_name))
python
{ "resource": "" }
q23832
sort_ordered_objects
train
def sort_ordered_objects(items, getter=lambda x: x): """Sort an iterable of OrderedBase instances. Args: items (iterable): the objects to sort getter (callable or None): a function to extract the OrderedBase instance from an object. Examples: >>> sort_ordered_objects([x, y, z]) >>> sort_ordered_objects(v.items(), getter=lambda e: e[1]) """ return sorted(items, key=lambda x: getattr(getter(x), OrderedBase.CREATION_COUNTER_FIELD, -1))
python
{ "resource": "" }
q23833
resolve_attribute
train
def resolve_attribute(name, bases, default=None): """Find the first definition of an attribute according to MRO order.""" for base in bases: if hasattr(base, name): return getattr(base, name) return default
python
{ "resource": "" }
q23834
use_strategy
train
def use_strategy(new_strategy): """Force the use of a different strategy. This is an alternative to setting default_strategy in the class definition. """ def wrapped_class(klass): klass._meta.strategy = new_strategy return klass return wrapped_class
python
{ "resource": "" }
q23835
FactoryOptions._build_default_options
train
def _build_default_options(self): """"Provide the default value for all allowed fields. Custom FactoryOptions classes should override this method to update() its return value. """ return [ OptionDefault('model', None, inherit=True), OptionDefault('abstract', False, inherit=False), OptionDefault('strategy', enums.CREATE_STRATEGY, inherit=True), OptionDefault('inline_args', (), inherit=True), OptionDefault('exclude', (), inherit=True), OptionDefault('rename', {}, inherit=True), ]
python
{ "resource": "" }
q23836
FactoryOptions._get_counter_reference
train
def _get_counter_reference(self): """Identify which factory should be used for a shared counter.""" if (self.model is not None and self.base_factory is not None and self.base_factory._meta.model is not None and issubclass(self.model, self.base_factory._meta.model)): return self.base_factory._meta.counter_reference else: return self
python
{ "resource": "" }
q23837
FactoryOptions._initialize_counter
train
def _initialize_counter(self): """Initialize our counter pointer. If we're the top-level factory, instantiate a new counter Otherwise, point to the top-level factory's counter. """ if self._counter is not None: return if self.counter_reference is self: self._counter = _Counter(seq=self.factory._setup_next_sequence()) else: self.counter_reference._initialize_counter() self._counter = self.counter_reference._counter
python
{ "resource": "" }
q23838
FactoryOptions._is_declaration
train
def _is_declaration(self, name, value): """Determines if a class attribute is a field value declaration. Based on the name and value of the class attribute, return ``True`` if it looks like a declaration of a default field value, ``False`` if it is private (name starts with '_') or a classmethod or staticmethod. """ if isinstance(value, (classmethod, staticmethod)): return False elif enums.get_builder_phase(value): # All objects with a defined 'builder phase' are declarations. return True return not name.startswith("_")
python
{ "resource": "" }
q23839
FactoryOptions._check_parameter_dependencies
train
def _check_parameter_dependencies(self, parameters): """Find out in what order parameters should be called.""" # Warning: parameters only provide reverse dependencies; we reverse them into standard dependencies. # deep_revdeps: set of fields a field depend indirectly upon deep_revdeps = collections.defaultdict(set) # Actual, direct dependencies deps = collections.defaultdict(set) for name, parameter in parameters.items(): if isinstance(parameter, declarations.Parameter): field_revdeps = parameter.get_revdeps(parameters) if not field_revdeps: continue deep_revdeps[name] = set.union(*(deep_revdeps[dep] for dep in field_revdeps)) deep_revdeps[name] |= set(field_revdeps) for dep in field_revdeps: deps[dep].add(name) # Check for cyclical dependencies cyclic = [name for name, field_deps in deep_revdeps.items() if name in field_deps] if cyclic: raise errors.CyclicDefinitionError( "Cyclic definition detected on %r; Params around %s" % (self.factory, ', '.join(cyclic))) return deps
python
{ "resource": "" }
q23840
BaseFactory.reset_sequence
train
def reset_sequence(cls, value=None, force=False): """Reset the sequence counter. Args: value (int or None): the new 'next' sequence value; if None, recompute the next value from _setup_next_sequence(). force (bool): whether to force-reset parent sequence counters in a factory inheritance chain. """ cls._meta.reset_sequence(value, force=force)
python
{ "resource": "" }
q23841
BaseFactory.attributes
train
def attributes(cls, create=False, extra=None): """Build a dict of attribute values, respecting declaration order. The process is: - Handle 'orderless' attributes, overriding defaults with provided kwargs when applicable - Handle ordered attributes, overriding them with provided kwargs when applicable; the current list of computed attributes is available to the currently processed object. """ warnings.warn( "Usage of Factory.attributes() is deprecated.", DeprecationWarning, stacklevel=2, ) declarations = cls._meta.pre_declarations.as_dict() declarations.update(extra or {}) from . import helpers return helpers.make_factory(dict, **declarations)
python
{ "resource": "" }
q23842
BaseFactory.declarations
train
def declarations(cls, extra_defs=None): """Retrieve a copy of the declared attributes. Args: extra_defs (dict): additional definitions to insert into the retrieved DeclarationDict. """ warnings.warn( "Factory.declarations is deprecated; use Factory._meta.pre_declarations instead.", DeprecationWarning, stacklevel=2, ) decls = cls._meta.pre_declarations.as_dict() decls.update(extra_defs or {}) return decls
python
{ "resource": "" }
q23843
BaseFactory._generate
train
def _generate(cls, strategy, params): """generate the object. Args: params (dict): attributes to use for generating the object strategy: the strategy to use """ if cls._meta.abstract: raise errors.FactoryError( "Cannot generate instances of abstract factory %(f)s; " "Ensure %(f)s.Meta.model is set and %(f)s.Meta.abstract " "is either not set or False." % dict(f=cls.__name__)) step = builder.StepBuilder(cls._meta, params, strategy) return step.build()
python
{ "resource": "" }
q23844
BaseFactory.build_batch
train
def build_batch(cls, size, **kwargs): """Build a batch of instances of the given class, with overriden attrs. Args: size (int): the number of instances to build Returns: object list: the built instances """ return [cls.build(**kwargs) for _ in range(size)]
python
{ "resource": "" }
q23845
BaseFactory.create_batch
train
def create_batch(cls, size, **kwargs): """Create a batch of instances of the given class, with overriden attrs. Args: size (int): the number of instances to create Returns: object list: the created instances """ return [cls.create(**kwargs) for _ in range(size)]
python
{ "resource": "" }
q23846
BaseFactory.stub_batch
train
def stub_batch(cls, size, **kwargs): """Stub a batch of instances of the given class, with overriden attrs. Args: size (int): the number of instances to stub Returns: object list: the stubbed instances """ return [cls.stub(**kwargs) for _ in range(size)]
python
{ "resource": "" }
q23847
make_factory
train
def make_factory(klass, **kwargs): """Create a new, simple factory for the given class.""" factory_name = '%sFactory' % klass.__name__ class Meta: model = klass kwargs['Meta'] = Meta base_class = kwargs.pop('FACTORY_CLASS', base.Factory) factory_class = type(base.Factory).__new__(type(base.Factory), factory_name, (base_class,), kwargs) factory_class.__name__ = '%sFactory' % klass.__name__ factory_class.__doc__ = 'Auto-generated factory for class %s' % klass return factory_class
python
{ "resource": "" }
q23848
generate_batch
train
def generate_batch(klass, strategy, size, **kwargs): """Create a factory for the given class, and generate instances.""" return make_factory(klass, **kwargs).generate_batch(strategy, size)
python
{ "resource": "" }
q23849
simple_generate_batch
train
def simple_generate_batch(klass, create, size, **kwargs): """Create a factory for the given class, and simple_generate instances.""" return make_factory(klass, **kwargs).simple_generate_batch(create, size)
python
{ "resource": "" }
q23850
deepgetattr
train
def deepgetattr(obj, name, default=_UNSPECIFIED): """Try to retrieve the given attribute of an object, digging on '.'. This is an extended getattr, digging deeper if '.' is found. Args: obj (object): the object of which an attribute should be read name (str): the name of an attribute to look up. default (object): the default value to use if the attribute wasn't found Returns: the attribute pointed to by 'name', splitting on '.'. Raises: AttributeError: if obj has no 'name' attribute. """ try: if '.' in name: attr, subname = name.split('.', 1) return deepgetattr(getattr(obj, attr), subname, default) else: return getattr(obj, name) except AttributeError: if default is _UNSPECIFIED: raise else: return default
python
{ "resource": "" }
q23851
ContainerAttribute.evaluate
train
def evaluate(self, instance, step, extra): """Evaluate the current ContainerAttribute. Args: obj (LazyStub): a lazy stub of the object being constructed, if needed. containers (list of LazyStub): a list of lazy stubs of factories being evaluated in a chain, each item being a future field of next one. """ # Strip the current instance from the chain chain = step.chain[1:] if self.strict and not chain: raise TypeError( "A ContainerAttribute in 'strict' mode can only be used " "within a SubFactory.") return self.function(instance, chain)
python
{ "resource": "" }
q23852
DeclarationSet.join
train
def join(cls, root, subkey): """Rebuild a full declaration name from its components. for every string x, we have `join(split(x)) == x`. """ if subkey is None: return root return enums.SPLITTER.join((root, subkey))
python
{ "resource": "" }
q23853
StepBuilder.build
train
def build(self, parent_step=None, force_sequence=None): """Build a factory instance.""" # TODO: Handle "batch build" natively pre, post = parse_declarations( self.extras, base_pre=self.factory_meta.pre_declarations, base_post=self.factory_meta.post_declarations, ) if force_sequence is not None: sequence = force_sequence elif self.force_init_sequence is not None: sequence = self.force_init_sequence else: sequence = self.factory_meta.next_sequence() step = BuildStep( builder=self, sequence=sequence, parent_step=parent_step, ) step.resolve(pre) args, kwargs = self.factory_meta.prepare_arguments(step.attributes) instance = self.factory_meta.instantiate( step=step, args=args, kwargs=kwargs, ) postgen_results = {} for declaration_name in post.sorted(): declaration = post[declaration_name] unrolled_context = declaration.declaration.unroll_context( instance=instance, step=step, context=declaration.context, ) postgen_context = PostGenerationContext( value_provided='' in unrolled_context, value=unrolled_context.get(''), extra={k: v for k, v in unrolled_context.items() if k != ''}, ) postgen_results[declaration_name] = declaration.declaration.call( instance=instance, step=step, context=postgen_context, ) self.factory_meta.use_postgeneration_results( instance=instance, step=step, results=postgen_results, ) return instance
python
{ "resource": "" }
q23854
StepBuilder.recurse
train
def recurse(self, factory_meta, extras): """Recurse into a sub-factory call.""" return self.__class__(factory_meta, extras, strategy=self.strategy)
python
{ "resource": "" }
q23855
parse
train
def parse(code, mode='exec', **exception_kwargs): """Parse an expression into AST""" try: return _ast_util.parse(code, '<unknown>', mode) except Exception: raise exceptions.SyntaxException( "(%s) %s (%r)" % ( compat.exception_as().__class__.__name__, compat.exception_as(), code[0:50] ), **exception_kwargs)
python
{ "resource": "" }
q23856
tokenize
train
def tokenize(expr): """ Parse a string expression into a set of tokens that can be used as a path into a Python datastructure. """ tokens = [] escape = False cur_token = '' for c in expr: if escape == True: cur_token += c escape = False else: if c == '\\': # Next char will be escaped escape = True continue elif c == '[': # Next token is of type index (list) if len(cur_token) > 0: tokens.append(cur_token) cur_token = '' elif c == ']': # End of index token. Next token defaults to a key (dict) if len(cur_token) > 0: tokens.append(int(cur_token)) cur_token = '' elif c == '.': # End of key token. Next token defaults to a key (dict) if len(cur_token) > 0: tokens.append(cur_token) cur_token = '' else: # Append char to token name cur_token += c if len(cur_token) > 0: tokens.append(cur_token) return tokens
python
{ "resource": "" }
q23857
compile
train
def compile(node, uri, filename=None, default_filters=None, buffer_filters=None, imports=None, future_imports=None, source_encoding=None, generate_magic_comment=True, disable_unicode=False, strict_undefined=False, enable_loop=True, reserved_names=frozenset()): """Generate module source code given a parsetree node, uri, and optional source filename""" # if on Py2K, push the "source_encoding" string to be # a bytestring itself, as we will be embedding it into # the generated source and we don't want to coerce the # result into a unicode object, in "disable_unicode" mode if not compat.py3k and isinstance(source_encoding, compat.text_type): source_encoding = source_encoding.encode(source_encoding) buf = util.FastEncodingBuffer() printer = PythonPrinter(buf) _GenerateRenderMethod(printer, _CompileContext(uri, filename, default_filters, buffer_filters, imports, future_imports, source_encoding, generate_magic_comment, disable_unicode, strict_undefined, enable_loop, reserved_names), node) return buf.getvalue()
python
{ "resource": "" }
q23858
mangle_mako_loop
train
def mangle_mako_loop(node, printer): """converts a for loop into a context manager wrapped around a for loop when access to the `loop` variable has been detected in the for loop body """ loop_variable = LoopVariable() node.accept_visitor(loop_variable) if loop_variable.detected: node.nodes[-1].has_loop_context = True match = _FOR_LOOP.match(node.text) if match: printer.writelines( 'loop = __M_loop._enter(%s)' % match.group(2), 'try:' #'with __M_loop(%s) as loop:' % match.group(2) ) text = 'for %s in loop:' % match.group(1) else: raise SyntaxError("Couldn't apply loop context: %s" % node.text) else: text = node.text return text
python
{ "resource": "" }
q23859
_GenerateRenderMethod.write_render_callable
train
def write_render_callable(self, node, name, args, buffered, filtered, cached): """write a top-level render callable. this could be the main render() method or that of a top-level def.""" if self.in_def: decorator = node.decorator if decorator: self.printer.writeline( "@runtime._decorate_toplevel(%s)" % decorator) self.printer.start_source(node.lineno) self.printer.writelines( "def %s(%s):" % (name, ','.join(args)), # push new frame, assign current frame to __M_caller "__M_caller = context.caller_stack._push_frame()", "try:" ) if buffered or filtered or cached: self.printer.writeline("context._push_buffer()") self.identifier_stack.append( self.compiler.identifiers.branch(self.node)) if (not self.in_def or self.node.is_block) and '**pageargs' in args: self.identifier_stack[-1].argument_declared.add('pageargs') if not self.in_def and ( len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0 ): self.printer.writeline("__M_locals = __M_dict_builtin(%s)" % ','.join([ "%s=%s" % (x, x) for x in self.identifiers.argument_declared ])) self.write_variable_declares(self.identifiers, toplevel=True) for n in self.node.nodes: n.accept_visitor(self) self.write_def_finish(self.node, buffered, filtered, cached) self.printer.writeline(None) self.printer.write_blanks(2) if cached: self.write_cache_decorator( node, name, args, buffered, self.identifiers, toplevel=True)
python
{ "resource": "" }
q23860
_GenerateRenderMethod.write_namespaces
train
def write_namespaces(self, namespaces): """write the module-level namespace-generating callable.""" self.printer.writelines( "def _mako_get_namespace(context, name):", "try:", "return context.namespaces[(__name__, name)]", "except KeyError:", "_mako_generate_namespaces(context)", "return context.namespaces[(__name__, name)]", None, None ) self.printer.writeline("def _mako_generate_namespaces(context):") for node in namespaces.values(): if 'import' in node.attributes: self.compiler.has_ns_imports = True self.printer.start_source(node.lineno) if len(node.nodes): self.printer.writeline("def make_namespace():") export = [] identifiers = self.compiler.identifiers.branch(node) self.in_def = True class NSDefVisitor(object): def visitDefTag(s, node): s.visitDefOrBase(node) def visitBlockTag(s, node): s.visitDefOrBase(node) def visitDefOrBase(s, node): if node.is_anonymous: raise exceptions.CompileException( "Can't put anonymous blocks inside " "<%namespace>", **node.exception_kwargs ) self.write_inline_def(node, identifiers, nested=False) export.append(node.funcname) vis = NSDefVisitor() for n in node.nodes: n.accept_visitor(vis) self.printer.writeline("return [%s]" % (','.join(export))) self.printer.writeline(None) self.in_def = False callable_name = "make_namespace()" else: callable_name = "None" if 'file' in node.parsed_attributes: self.printer.writeline( "ns = runtime.TemplateNamespace(%r," " context._clean_inheritance_tokens()," " templateuri=%s, callables=%s, " " calling_uri=_template_uri)" % ( node.name, node.parsed_attributes.get('file', 'None'), callable_name, ) ) elif 'module' in node.parsed_attributes: self.printer.writeline( "ns = runtime.ModuleNamespace(%r," " context._clean_inheritance_tokens()," " callables=%s, calling_uri=_template_uri," " module=%s)" % ( node.name, callable_name, node.parsed_attributes.get( 'module', 'None') ) ) else: self.printer.writeline( "ns = runtime.Namespace(%r," " context._clean_inheritance_tokens()," " callables=%s, calling_uri=_template_uri)" % ( node.name, callable_name, ) ) if eval(node.attributes.get('inheritable', "False")): self.printer.writeline("context['self'].%s = ns" % (node.name)) self.printer.writeline( "context.namespaces[(__name__, %s)] = ns" % repr(node.name)) self.printer.write_blanks(1) if not len(namespaces): self.printer.writeline("pass") self.printer.writeline(None)
python
{ "resource": "" }
q23861
_GenerateRenderMethod.write_variable_declares
train
def write_variable_declares(self, identifiers, toplevel=False, limit=None): """write variable declarations at the top of a function. the variable declarations are in the form of callable definitions for defs and/or name lookup within the function's context argument. the names declared are based on the names that are referenced in the function body, which don't otherwise have any explicit assignment operation. names that are assigned within the body are assumed to be locally-scoped variables and are not separately declared. for def callable definitions, if the def is a top-level callable then a 'stub' callable is generated which wraps the current Context into a closure. if the def is not top-level, it is fully rendered as a local closure. """ # collection of all defs available to us in this scope comp_idents = dict([(c.funcname, c) for c in identifiers.defs]) to_write = set() # write "context.get()" for all variables we are going to # need that arent in the namespace yet to_write = to_write.union(identifiers.undeclared) # write closure functions for closures that we define # right here to_write = to_write.union( [c.funcname for c in identifiers.closuredefs.values()]) # remove identifiers that are declared in the argument # signature of the callable to_write = to_write.difference(identifiers.argument_declared) # remove identifiers that we are going to assign to. # in this way we mimic Python's behavior, # i.e. assignment to a variable within a block # means that variable is now a "locally declared" var, # which cannot be referenced beforehand. to_write = to_write.difference(identifiers.locally_declared) if self.compiler.enable_loop: has_loop = "loop" in to_write to_write.discard("loop") else: has_loop = False # if a limiting set was sent, constraint to those items in that list # (this is used for the caching decorator) if limit is not None: to_write = to_write.intersection(limit) if toplevel and getattr(self.compiler, 'has_ns_imports', False): self.printer.writeline("_import_ns = {}") self.compiler.has_imports = True for ident, ns in self.compiler.namespaces.items(): if 'import' in ns.attributes: self.printer.writeline( "_mako_get_namespace(context, %r)." "_populate(_import_ns, %r)" % ( ident, re.split(r'\s*,\s*', ns.attributes['import']) )) if has_loop: self.printer.writeline( 'loop = __M_loop = runtime.LoopStack()' ) for ident in to_write: if ident in comp_idents: comp = comp_idents[ident] if comp.is_block: if not comp.is_anonymous: self.write_def_decl(comp, identifiers) else: self.write_inline_def(comp, identifiers, nested=True) else: if comp.is_root(): self.write_def_decl(comp, identifiers) else: self.write_inline_def(comp, identifiers, nested=True) elif ident in self.compiler.namespaces: self.printer.writeline( "%s = _mako_get_namespace(context, %r)" % (ident, ident) ) else: if getattr(self.compiler, 'has_ns_imports', False): if self.compiler.strict_undefined: self.printer.writelines( "%s = _import_ns.get(%r, UNDEFINED)" % (ident, ident), "if %s is UNDEFINED:" % ident, "try:", "%s = context[%r]" % (ident, ident), "except KeyError:", "raise NameError(\"'%s' is not defined\")" % ident, None, None ) else: self.printer.writeline( "%s = _import_ns.get(%r, context.get(%r, UNDEFINED))" % (ident, ident, ident)) else: if self.compiler.strict_undefined: self.printer.writelines( "try:", "%s = context[%r]" % (ident, ident), "except KeyError:", "raise NameError(\"'%s' is not defined\")" % ident, None ) else: self.printer.writeline( "%s = context.get(%r, UNDEFINED)" % (ident, ident) ) self.printer.writeline("__M_writer = context.writer()")
python
{ "resource": "" }
q23862
_GenerateRenderMethod.write_def_decl
train
def write_def_decl(self, node, identifiers): """write a locally-available callable referencing a top-level def""" funcname = node.funcname namedecls = node.get_argument_expressions() nameargs = node.get_argument_expressions(as_call=True) if not self.in_def and ( len(self.identifiers.locally_assigned) > 0 or len(self.identifiers.argument_declared) > 0): nameargs.insert(0, 'context._locals(__M_locals)') else: nameargs.insert(0, 'context') self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls))) self.printer.writeline( "return render_%s(%s)" % (funcname, ",".join(nameargs))) self.printer.writeline(None)
python
{ "resource": "" }
q23863
_GenerateRenderMethod.write_inline_def
train
def write_inline_def(self, node, identifiers, nested): """write a locally-available def callable inside an enclosing def.""" namedecls = node.get_argument_expressions() decorator = node.decorator if decorator: self.printer.writeline( "@runtime._decorate_inline(context, %s)" % decorator) self.printer.writeline( "def %s(%s):" % (node.funcname, ",".join(namedecls))) filtered = len(node.filter_args.args) > 0 buffered = eval(node.attributes.get('buffered', 'False')) cached = eval(node.attributes.get('cached', 'False')) self.printer.writelines( # push new frame, assign current frame to __M_caller "__M_caller = context.caller_stack._push_frame()", "try:" ) if buffered or filtered or cached: self.printer.writelines( "context._push_buffer()", ) identifiers = identifiers.branch(node, nested=nested) self.write_variable_declares(identifiers) self.identifier_stack.append(identifiers) for n in node.nodes: n.accept_visitor(self) self.identifier_stack.pop() self.write_def_finish(node, buffered, filtered, cached) self.printer.writeline(None) if cached: self.write_cache_decorator(node, node.funcname, namedecls, False, identifiers, inline=True, toplevel=False)
python
{ "resource": "" }
q23864
_GenerateRenderMethod.write_def_finish
train
def write_def_finish(self, node, buffered, filtered, cached, callstack=True): """write the end section of a rendering function, either outermost or inline. this takes into account if the rendering function was filtered, buffered, etc. and closes the corresponding try: block if any, and writes code to retrieve captured content, apply filters, send proper return value.""" if not buffered and not cached and not filtered: self.printer.writeline("return ''") if callstack: self.printer.writelines( "finally:", "context.caller_stack._pop_frame()", None ) if buffered or filtered or cached: if buffered or cached: # in a caching scenario, don't try to get a writer # from the context after popping; assume the caching # implemenation might be using a context with no # extra buffers self.printer.writelines( "finally:", "__M_buf = context._pop_buffer()" ) else: self.printer.writelines( "finally:", "__M_buf, __M_writer = context._pop_buffer_and_writer()" ) if callstack: self.printer.writeline("context.caller_stack._pop_frame()") s = "__M_buf.getvalue()" if filtered: s = self.create_filter_callable(node.filter_args.args, s, False) self.printer.writeline(None) if buffered and not cached: s = self.create_filter_callable(self.compiler.buffer_filters, s, False) if buffered or cached: self.printer.writeline("return %s" % s) else: self.printer.writelines( "__M_writer(%s)" % s, "return ''" )
python
{ "resource": "" }
q23865
_GenerateRenderMethod.write_cache_decorator
train
def write_cache_decorator(self, node_or_pagetag, name, args, buffered, identifiers, inline=False, toplevel=False): """write a post-function decorator to replace a rendering callable with a cached version of itself.""" self.printer.writeline("__M_%s = %s" % (name, name)) cachekey = node_or_pagetag.parsed_attributes.get('cache_key', repr(name)) cache_args = {} if self.compiler.pagetag is not None: cache_args.update( ( pa[6:], self.compiler.pagetag.parsed_attributes[pa] ) for pa in self.compiler.pagetag.parsed_attributes if pa.startswith('cache_') and pa != 'cache_key' ) cache_args.update( ( pa[6:], node_or_pagetag.parsed_attributes[pa] ) for pa in node_or_pagetag.parsed_attributes if pa.startswith('cache_') and pa != 'cache_key' ) if 'timeout' in cache_args: cache_args['timeout'] = int(eval(cache_args['timeout'])) self.printer.writeline("def %s(%s):" % (name, ','.join(args))) # form "arg1, arg2, arg3=arg3, arg4=arg4", etc. pass_args = [ "%s=%s" % ((a.split('=')[0],) * 2) if '=' in a else a for a in args ] self.write_variable_declares( identifiers, toplevel=toplevel, limit=node_or_pagetag.undeclared_identifiers() ) if buffered: s = "context.get('local')."\ "cache._ctx_get_or_create("\ "%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" % ( cachekey, name, ','.join(pass_args), ''.join(["%s=%s, " % (k, v) for k, v in cache_args.items()]), name ) # apply buffer_filters s = self.create_filter_callable(self.compiler.buffer_filters, s, False) self.printer.writelines("return " + s, None) else: self.printer.writelines( "__M_writer(context.get('local')." "cache._ctx_get_or_create(" "%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" % ( cachekey, name, ','.join(pass_args), ''.join(["%s=%s, " % (k, v) for k, v in cache_args.items()]), name, ), "return ''", None )
python
{ "resource": "" }
q23866
_GenerateRenderMethod.create_filter_callable
train
def create_filter_callable(self, args, target, is_expression): """write a filter-applying expression based on the filters present in the given filter names, adjusting for the global 'default' filter aliases as needed.""" def locate_encode(name): if re.match(r'decode\..+', name): return "filters." + name elif self.compiler.disable_unicode: return filters.NON_UNICODE_ESCAPES.get(name, name) else: return filters.DEFAULT_ESCAPES.get(name, name) if 'n' not in args: if is_expression: if self.compiler.pagetag: args = self.compiler.pagetag.filter_args.args + args if self.compiler.default_filters: args = self.compiler.default_filters + args for e in args: # if filter given as a function, get just the identifier portion if e == 'n': continue m = re.match(r'(.+?)(\(.*\))', e) if m: ident, fargs = m.group(1, 2) f = locate_encode(ident) e = f + fargs else: e = locate_encode(e) assert e is not None target = "%s(%s)" % (e, target) return target
python
{ "resource": "" }
q23867
_Identifiers.branch
train
def branch(self, node, **kwargs): """create a new Identifiers for a new Node, with this Identifiers as the parent.""" return _Identifiers(self.compiler, node, self, **kwargs)
python
{ "resource": "" }
q23868
_Identifiers.check_declared
train
def check_declared(self, node): """update the state of this Identifiers with the undeclared and declared identifiers of the given node.""" for ident in node.undeclared_identifiers(): if ident != 'context' and\ ident not in self.declared.union(self.locally_declared): self.undeclared.add(ident) for ident in node.declared_identifiers(): self.locally_declared.add(ident)
python
{ "resource": "" }
q23869
HostsParser._parse_line_entry
train
def _parse_line_entry(self, line, type): """ Parse a section entry line into its components. In case of a 'vars' section, the first field will be None. Otherwise, the first field will be the unexpanded host or group name the variables apply to. For example: [production:children] frontend purpose="web" # The line we process Returns: ('frontend', {'purpose': 'web'}) For example: [production:vars] purpose="web" # The line we process Returns: (None, {'purpose': 'web'}) Undocumented feature: [prod:vars] json_like_vars=[{'name': 'htpasswd_auth'}] Returns: (None, {'name': 'htpasswd_auth'}) """ name = None key_values = {} if type == 'vars': key_values = self._parse_line_vars(line) else: tokens = shlex.split(line.strip()) name = tokens.pop(0) try: key_values = self._parse_vars(tokens) except ValueError: self.log.warning("Unsupported vars syntax. Skipping line: {0}".format(line)) return (name, {}) return (name, key_values)
python
{ "resource": "" }
q23870
HostsParser._parse_vars
train
def _parse_vars(self, tokens): """ Given an iterable of tokens, returns variables and their values as a dictionary. For example: ['dtap=prod', 'comment=some comment'] Returns: {'dtap': 'prod', 'comment': 'some comment'} """ key_values = {} for token in tokens: if token.startswith('#'): # End parsing if we encounter a comment, which lasts # until the end of the line. break else: k, v = token.split('=', 1) key = k.strip() key_values[key] = v.strip() return key_values
python
{ "resource": "" }
q23871
HostsParser._get_distinct_hostnames
train
def _get_distinct_hostnames(self): """ Return a set of distinct hostnames found in the entire inventory. """ hostnames = [] for section in self.sections: hostnames.extend(self._group_get_hostnames(section['name'])) return set(hostnames)
python
{ "resource": "" }
q23872
HostsParser._apply_section
train
def _apply_section(self, section, hosts): """ Recursively find all the hosts that belong in or under a section and add the section's group name and variables to every host. """ # Add the current group name to each host that this section covers. if section['name'] is not None: for hostname in self._group_get_hostnames(section['name']): hosts[hostname]['groups'].add(section['name']) # Apply variables func_map = { "hosts": self._apply_section_hosts, "children": self._apply_section_children, "vars": self._apply_section_vars, } func = func_map[section['type']] func(section, hosts)
python
{ "resource": "" }
q23873
HostsParser._apply_section_hosts
train
def _apply_section_hosts(self, section, hosts): """ Add the variables for each entry in a 'hosts' section to the hosts belonging to that entry. """ for entry in section['entries']: for hostname in self.expand_hostdef(entry['name']): if hostname not in hosts: # Expanded host or child host or something else refers to a # host that isn't actually defined. Ansible skips this, so # we will too. continue host = hosts[hostname] for var_key, var_val in entry['hostvars'].items(): host['hostvars'][var_key] = var_val
python
{ "resource": "" }
q23874
HostsParser._apply_section_children
train
def _apply_section_children(self, section, hosts): """ Add the variables for each entry in a 'children' section to the hosts belonging to that entry. """ for entry in section['entries']: for hostname in self._group_get_hostnames(entry['name']): host = hosts[hostname] for var_key, var_val in entry['hostvars'].items(): host['hostvars'][var_key] = var_val
python
{ "resource": "" }
q23875
HostsParser._group_get_hostnames
train
def _group_get_hostnames(self, group_name): """ Recursively fetch a list of each unique hostname that belongs in or under the group. This includes hosts in children groups. """ hostnames = [] hosts_section = self._get_section(group_name, 'hosts') if hosts_section: for entry in hosts_section['entries']: hostnames.extend(self.expand_hostdef(entry['name'])) children_section = self._get_section(group_name, 'children') if children_section: for entry in children_section['entries']: hostnames.extend(self._group_get_hostnames(entry['name'])) return hostnames
python
{ "resource": "" }
q23876
HostsParser._get_section
train
def _get_section(self, name, type): """ Find and return a section with `name` and `type` """ for section in self.sections: if section['name'] == name and section['type'] == type: return section return None
python
{ "resource": "" }
q23877
DynInvParser._get_host
train
def _get_host(self, hostname): """ Get an existing host or otherwise initialize a new empty one. """ if hostname not in self.hosts: self.hosts[hostname] = { 'groups': set(), 'hostvars': {} } return self.hosts[hostname]
python
{ "resource": "" }
q23878
DynInvParser._parse_meta
train
def _parse_meta(self, meta): """ Parse the _meta element from a dynamic host inventory output. """ for hostname, hostvars in meta.get('hostvars', {}).items(): for var_key, var_val in hostvars.items(): self._get_host(hostname)['hostvars'][var_key] = var_val
python
{ "resource": "" }
q23879
dump
train
def dump(node): """ A very verbose representation of the node passed. This is useful for debugging purposes. """ def _format(node): if isinstance(node, AST): return '%s(%s)' % (node.__class__.__name__, ', '.join('%s=%s' % (a, _format(b)) for a, b in iter_fields(node))) elif isinstance(node, list): return '[%s]' % ', '.join(_format(x) for x in node) return repr(node) if not isinstance(node, AST): raise TypeError('expected AST, got %r' % node.__class__.__name__) return _format(node)
python
{ "resource": "" }
q23880
fix_missing_locations
train
def fix_missing_locations(node): """ Some nodes require a line number and the column offset. Without that information the compiler will abort the compilation. Because it can be a dull task to add appropriate line numbers and column offsets when adding new nodes this function can help. It copies the line number and column offset of the parent node to the child nodes without this information. Unlike `copy_location` this works recursive and won't touch nodes that already have a location information. """ def _fix(node, lineno, col_offset): if 'lineno' in node._attributes: if not hasattr(node, 'lineno'): node.lineno = lineno else: lineno = node.lineno if 'col_offset' in node._attributes: if not hasattr(node, 'col_offset'): node.col_offset = col_offset else: col_offset = node.col_offset for child in iter_child_nodes(node): _fix(child, lineno, col_offset) _fix(node, 1, 0) return node
python
{ "resource": "" }
q23881
increment_lineno
train
def increment_lineno(node, n=1): """ Increment the line numbers of all nodes by `n` if they have line number attributes. This is useful to "move code" to a different location in a file. """ for node in zip((node,), walk(node)): if 'lineno' in node._attributes: node.lineno = getattr(node, 'lineno', 0) + n
python
{ "resource": "" }
q23882
iter_fields
train
def iter_fields(node): """Iterate over all fields of a node, only yielding existing fields.""" # CPython 2.5 compat if not hasattr(node, '_fields') or not node._fields: return for field in node._fields: try: yield field, getattr(node, field) except AttributeError: pass
python
{ "resource": "" }
q23883
iter_child_nodes
train
def iter_child_nodes(node): """Iterate over all child nodes or a node.""" for name, field in iter_fields(node): if isinstance(field, AST): yield field elif isinstance(field, list): for item in field: if isinstance(item, AST): yield item
python
{ "resource": "" }
q23884
get_docstring
train
def get_docstring(node): """ Return the docstring for the given node or `None` if no docstring can be found. If the node provided does not accept docstrings a `TypeError` will be raised. """ if not isinstance(node, (FunctionDef, ClassDef, Module)): raise TypeError("%r can't have docstrings" % node.__class__.__name__) if node.body and isinstance(node.body[0], Str): return node.body[0].s
python
{ "resource": "" }
q23885
walk
train
def walk(node): """ Iterate over all nodes. This is useful if you only want to modify nodes in place and don't care about the context or the order the nodes are returned. """ from collections import deque todo = deque([node]) while todo: node = todo.popleft() todo.extend(iter_child_nodes(node)) yield node
python
{ "resource": "" }
q23886
Lexer.match_reg
train
def match_reg(self, reg): """match the given regular expression object to the current text position. if a match occurs, update the current text and line position. """ mp = self.match_position match = reg.match(self.text, self.match_position) if match: (start, end) = match.span() if end == start: self.match_position = end + 1 else: self.match_position = end self.matched_lineno = self.lineno lines = re.findall(r"\n", self.text[mp:self.match_position]) cp = mp - 1 while (cp >= 0 and cp < self.textlength and self.text[cp] != '\n'): cp -= 1 self.matched_charpos = mp - cp self.lineno += len(lines) #print "MATCHED:", match.group(0), "LINE START:", # self.matched_lineno, "LINE END:", self.lineno #print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \ # (match and "TRUE" or "FALSE") return match
python
{ "resource": "" }
q23887
Lexer.match_comment
train
def match_comment(self): """matches the multiline version of a comment""" match = self.match(r"<%doc>(.*?)</%doc>", re.S) if match: self.append_node(parsetree.Comment, match.group(1)) return True else: return False
python
{ "resource": "" }
q23888
extract
train
def extract(fileobj, keywords, comment_tags, options): """Extract messages from Mako templates. :param fileobj: the file-like object the messages should be extracted from :param keywords: a list of keywords (i.e. function names) that should be recognized as translation functions :param comment_tags: a list of translator tags to search for and include in the results :param options: a dictionary of additional options (optional) :return: an iterator over ``(lineno, funcname, message, comments)`` tuples :rtype: ``iterator`` """ extractor = BabelMakoExtractor(keywords, comment_tags, options) for message in extractor(fileobj): yield message
python
{ "resource": "" }
q23889
ControlLine.is_ternary
train
def is_ternary(self, keyword): """return true if the given keyword is a ternary keyword for this ControlLine""" return keyword in { 'if':set(['else', 'elif']), 'try':set(['except', 'finally']), 'for':set(['else']) }.get(self.keyword, [])
python
{ "resource": "" }
q23890
Cache.set
train
def set(self, key, value, **kw): """Place a value in the cache. :param key: the value's key. :param value: the value. :param \**kw: cache configuration arguments. """ self.impl.set(key, value, **self._get_cache_kw(kw, None))
python
{ "resource": "" }
q23891
Cache.get
train
def get(self, key, **kw): """Retrieve a value from the cache. :param key: the value's key. :param \**kw: cache configuration arguments. The backend is configured using these arguments upon first request. Subsequent requests that use the same series of configuration values will use that same backend. """ return self.impl.get(key, **self._get_cache_kw(kw, None))
python
{ "resource": "" }
q23892
Cache.invalidate
train
def invalidate(self, key, **kw): """Invalidate a value in the cache. :param key: the value's key. :param \**kw: cache configuration arguments. The backend is configured using these arguments upon first request. Subsequent requests that use the same series of configuration values will use that same backend. """ self.impl.invalidate(key, **self._get_cache_kw(kw, None))
python
{ "resource": "" }
q23893
supports_caller
train
def supports_caller(func): """Apply a caller_stack compatibility decorator to a plain Python function. See the example in :ref:`namespaces_python_modules`. """ def wrap_stackframe(context, *args, **kwargs): context.caller_stack._push_frame() try: return func(context, *args, **kwargs) finally: context.caller_stack._pop_frame() return wrap_stackframe
python
{ "resource": "" }
q23894
capture
train
def capture(context, callable_, *args, **kwargs): """Execute the given template def, capturing the output into a buffer. See the example in :ref:`namespaces_python_modules`. """ if not compat.callable(callable_): raise exceptions.RuntimeException( "capture() function expects a callable as " "its argument (i.e. capture(func, *args, **kwargs))" ) context._push_buffer() try: callable_(*args, **kwargs) finally: buf = context._pop_buffer() return buf.getvalue()
python
{ "resource": "" }
q23895
_include_file
train
def _include_file(context, uri, calling_uri, **kwargs): """locate the template from the given uri and include it in the current output.""" template = _lookup_template(context, uri, calling_uri) (callable_, ctx) = _populate_self_namespace( context._clean_inheritance_tokens(), template) callable_(ctx, **_kwargs_for_include(callable_, context._data, **kwargs))
python
{ "resource": "" }
q23896
_inherit_from
train
def _inherit_from(context, uri, calling_uri): """called by the _inherit method in template modules to set up the inheritance chain at the start of a template's execution.""" if uri is None: return None template = _lookup_template(context, uri, calling_uri) self_ns = context['self'] ih = self_ns while ih.inherits is not None: ih = ih.inherits lclcontext = context._locals({'next': ih}) ih.inherits = TemplateNamespace("self:%s" % template.uri, lclcontext, template=template, populate_self=False) context._data['parent'] = lclcontext._data['local'] = ih.inherits callable_ = getattr(template.module, '_mako_inherit', None) if callable_ is not None: ret = callable_(template, lclcontext) if ret: return ret gen_ns = getattr(template.module, '_mako_generate_namespaces', None) if gen_ns is not None: gen_ns(context) return (template.callable_, lclcontext)
python
{ "resource": "" }
q23897
_render
train
def _render(template, callable_, args, data, as_unicode=False): """create a Context and return the string output of the given template and template callable.""" if as_unicode: buf = util.FastEncodingBuffer(as_unicode=True) elif template.bytestring_passthrough: buf = compat.StringIO() else: buf = util.FastEncodingBuffer( as_unicode=as_unicode, encoding=template.output_encoding, errors=template.encoding_errors) context = Context(buf, **data) context._outputting_as_unicode = as_unicode context._set_with_template(template) _render_context(template, callable_, context, *args, **_kwargs_for_callable(callable_, data)) return context._pop_buffer().getvalue()
python
{ "resource": "" }
q23898
_exec_template
train
def _exec_template(callable_, context, args=None, kwargs=None): """execute a rendering callable given the callable, a Context, and optional explicit arguments the contextual Template will be located if it exists, and the error handling options specified on that Template will be interpreted here. """ template = context._with_template if template is not None and \ (template.format_exceptions or template.error_handler): try: callable_(context, *args, **kwargs) except Exception: _render_error(template, context, compat.exception_as()) except: e = sys.exc_info()[0] _render_error(template, context, e) else: callable_(context, *args, **kwargs)
python
{ "resource": "" }
q23899
Context._push_writer
train
def _push_writer(self): """push a capturing buffer onto this Context and return the new writer function.""" buf = util.FastEncodingBuffer() self._buffer_stack.append(buf) return buf.write
python
{ "resource": "" }