_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q30000
HiveServer2Cursor.execute
train
def execute(self, operation, parameters=None, configuration=None): """Synchronously execute a SQL query. Blocks until results are available. Parameters ---------- operation : str The SQL query to execute. parameters : str, optional Parameters to be bound to variables in the SQL query, if any. Impyla supports all DB API `paramstyle`s, including `qmark`, `numeric`, `named`, `format`, `pyformat`. configuration : dict of str keys and values, optional Configuration overlay for this query. Returns ------- NoneType Results are available through a call to `fetch*`. """ # PEP 249 self.execute_async(operation, parameters=parameters, configuration=configuration) log.debug('Waiting for query to finish') self._wait_to_finish() # make execute synchronous log.debug('Query finished')
python
{ "resource": "" }
q30001
HiveServer2Cursor.execute_async
train
def execute_async(self, operation, parameters=None, configuration=None): """Asynchronously execute a SQL query. Immediately returns after query is sent to the HS2 server. Poll with `is_executing`. A call to `fetch*` will block. Parameters ---------- operation : str The SQL query to execute. parameters : str, optional Parameters to be bound to variables in the SQL query, if any. Impyla supports all DB API `paramstyle`s, including `qmark`, `numeric`, `named`, `format`, `pyformat`. configuration : dict of str keys and values, optional Configuration overlay for this query. Returns ------- NoneType Results are available through a call to `fetch*`. """ log.debug('Executing query %s', operation) def op(): if parameters: self._last_operation_string = _bind_parameters(operation, parameters) else: self._last_operation_string = operation op = self.session.execute(self._last_operation_string, configuration, run_async=True) self._last_operation = op self._execute_async(op)
python
{ "resource": "" }
q30002
HiveServer2Cursor._get_sleep_interval
train
def _get_sleep_interval(self, start_time): """Returns a step function of time to sleep in seconds before polling again. Maximum sleep is 1s, minimum is 0.1s""" elapsed = time.time() - start_time if elapsed < 0.05: return 0.01 elif elapsed < 1.0: return 0.05 elif elapsed < 10.0: return 0.1 elif elapsed < 60.0: return 0.5 return 1.0
python
{ "resource": "" }
q30003
HiveServer2Cursor.fetchcbatch
train
def fetchcbatch(self): '''Return a CBatch object of any data currently in the buffer or if no data currently in buffer then fetch a batch''' if not self._last_operation.is_columnar: raise NotSupportedError("Server does not support columnar " "fetching") if not self.has_result_set: raise ProgrammingError( "Trying to fetch results on an operation with no results.") if len(self._buffer) > 0: log.debug('fetchcbatch: buffer has data in. Returning it and wiping buffer') batch = self._buffer self._buffer = Batch() return batch elif self._last_operation_active: log.debug('fetchcbatch: buffer empty and op is active => fetching ' 'more data') batch = (self._last_operation.fetch( self.description, self.buffersize, convert_types=self.convert_types)) if len(batch) == 0: return None return batch else: return None
python
{ "resource": "" }
q30004
HiveServer2Cursor.fetchcolumnar
train
def fetchcolumnar(self): """Executes a fetchall operation returning a list of CBatches""" self._wait_to_finish() if not self._last_operation.is_columnar: raise NotSupportedError("Server does not support columnar " "fetching") batches = [] while True: batch = (self._last_operation.fetch( self.description, self.buffersize, convert_types=self.convert_types)) if len(batch) == 0: break batches.append(batch) return batches
python
{ "resource": "" }
q30005
Client.setOption
train
def setOption(self, key, value): """ Sets an option Parameters: - key - value """ self.send_setOption(key, value) self.recv_setOption()
python
{ "resource": "" }
q30006
Client.fetch
train
def fetch(self, query_id, start_over, fetch_size): """ Get the results of a query. This is non-blocking. Caller should check Results.ready to determine if the results are in yet. The call requests the batch size of fetch. Parameters: - query_id - start_over - fetch_size """ self.send_fetch(query_id, start_over, fetch_size) return self.recv_fetch()
python
{ "resource": "" }
q30007
ImpalaDDLCompiler.post_create_table
train
def post_create_table(self, table): """Build table-level CREATE options.""" table_opts = [] if 'impala_partition_by' in table.kwargs: table_opts.append('PARTITION BY %s' % table.kwargs.get('impala_partition_by')) if 'impala_stored_as' in table.kwargs: table_opts.append('STORED AS %s' % table.kwargs.get('impala_stored_as')) if 'impala_table_properties' in table.kwargs: table_properties = ["'{0}' = '{1}'".format(property_, value) for property_, value in table.kwargs.get('impala_table_properties', {}).items()] table_opts.append('TBLPROPERTIES (%s)' % ', '.join(table_properties)) return '\n%s' % '\n'.join(table_opts)
python
{ "resource": "" }
q30008
_get_table_schema_hack
train
def _get_table_schema_hack(cursor, table): """Get the schema of table by talking to Impala table must be a string (incl possible db name) """ # get the schema of the query result via a LIMIT 0 hack cursor.execute('SELECT * FROM %s LIMIT 0' % table) schema = [tup[:2] for tup in cursor.description] cursor.fetchall() # resets the state of the cursor and closes operation return schema
python
{ "resource": "" }
q30009
respond
train
def respond(request, code): """ Responds to the request with the given response code. If ``next`` is in the form, it will redirect instead. """ redirect = request.GET.get('next', request.POST.get('next')) if redirect: return HttpResponseRedirect(redirect) return type('Response%d' % code, (HttpResponse, ), {'status_code': code})()
python
{ "resource": "" }
q30010
follow_unfollow
train
def follow_unfollow(request, content_type_id, object_id, flag, do_follow=True, actor_only=True): """ Creates or deletes the follow relationship between ``request.user`` and the actor defined by ``content_type_id``, ``object_id``. """ ctype = get_object_or_404(ContentType, pk=content_type_id) instance = get_object_or_404(ctype.model_class(), pk=object_id) # If flag was omitted in url, None will pass to flag keyword argument flag = flag or '' if do_follow: actions.follow(request.user, instance, actor_only=actor_only, flag=flag) return respond(request, 201) # CREATED actions.unfollow(request.user, instance, flag=flag) return respond(request, 204)
python
{ "resource": "" }
q30011
followers
train
def followers(request, content_type_id, object_id, flag): """ Creates a listing of ``User``s that follow the actor defined by ``content_type_id``, ``object_id``. """ ctype = get_object_or_404(ContentType, pk=content_type_id) instance = get_object_or_404(ctype.model_class(), pk=object_id) flag = flag or '' return render( request, 'actstream/followers.html', { 'followers': models.followers(instance, flag=flag), 'actor': instance, } )
python
{ "resource": "" }
q30012
ActionManager.public
train
def public(self, *args, **kwargs): """ Only return public actions """ kwargs['public'] = True return self.filter(*args, **kwargs)
python
{ "resource": "" }
q30013
ActionManager.actor
train
def actor(self, obj, **kwargs): """ Stream of most recent actions where obj is the actor. Keyword arguments will be passed to Action.objects.filter """ check(obj) return obj.actor_actions.public(**kwargs)
python
{ "resource": "" }
q30014
ActionManager.target
train
def target(self, obj, **kwargs): """ Stream of most recent actions where obj is the target. Keyword arguments will be passed to Action.objects.filter """ check(obj) return obj.target_actions.public(**kwargs)
python
{ "resource": "" }
q30015
ActionManager.action_object
train
def action_object(self, obj, **kwargs): """ Stream of most recent actions where obj is the action_object. Keyword arguments will be passed to Action.objects.filter """ check(obj) return obj.action_object_actions.public(**kwargs)
python
{ "resource": "" }
q30016
ActionManager.model_actions
train
def model_actions(self, model, **kwargs): """ Stream of most recent actions by any particular model """ check(model) ctype = ContentType.objects.get_for_model(model) return self.public( (Q(target_content_type=ctype) | Q(action_object_content_type=ctype) | Q(actor_content_type=ctype)), **kwargs )
python
{ "resource": "" }
q30017
ActionManager.any
train
def any(self, obj, **kwargs): """ Stream of most recent actions where obj is the actor OR target OR action_object. """ check(obj) ctype = ContentType.objects.get_for_model(obj) return self.public( Q( actor_content_type=ctype, actor_object_id=obj.pk, ) | Q( target_content_type=ctype, target_object_id=obj.pk, ) | Q( action_object_content_type=ctype, action_object_object_id=obj.pk, ), **kwargs)
python
{ "resource": "" }
q30018
ActionManager.user
train
def user(self, obj, with_user_activity=False, follow_flag=None, **kwargs): """Create a stream of the most recent actions by objects that the user is following.""" q = Q() qs = self.public() if not obj: return qs.none() check(obj) if with_user_activity: q = q | Q( actor_content_type=ContentType.objects.get_for_model(obj), actor_object_id=obj.pk ) follows = apps.get_model('actstream', 'follow').objects.filter(user=obj) if follow_flag: follows = follows.filter(flag=follow_flag) content_types = ContentType.objects.filter( pk__in=follows.values('content_type_id') ) if not (content_types.exists() or with_user_activity): return qs.none() for content_type in content_types: object_ids = follows.filter(content_type=content_type) q = q | Q( actor_content_type=content_type, actor_object_id__in=object_ids.values('object_id') ) | Q( target_content_type=content_type, target_object_id__in=object_ids.filter( actor_only=False).values('object_id') ) | Q( action_object_content_type=content_type, action_object_object_id__in=object_ids.filter( actor_only=False).values('object_id') ) return qs.filter(q, **kwargs)
python
{ "resource": "" }
q30019
FollowManager.for_object
train
def for_object(self, instance, flag=''): """ Filter to a specific instance. """ check(instance) content_type = ContentType.objects.get_for_model(instance).pk queryset = self.filter(content_type=content_type, object_id=instance.pk) if flag: queryset = queryset.filter(flag=flag) return queryset
python
{ "resource": "" }
q30020
FollowManager.is_following
train
def is_following(self, user, instance, flag=''): """ Check if a user is following an instance. """ if not user or user.is_anonymous: return False queryset = self.for_object(instance) if flag: queryset = queryset.filter(flag=flag) return queryset.filter(user=user).exists()
python
{ "resource": "" }
q30021
setup_generic_relations
train
def setup_generic_relations(model_class): """ Set up GenericRelations for actionable models. """ Action = apps.get_model('actstream', 'action') if Action is None: raise RegistrationError( 'Unable get actstream.Action. Potential circular imports ' 'in initialisation. Try moving actstream app to come after the ' 'apps which have models to register in the INSTALLED_APPS setting.' ) related_attr_name = 'related_query_name' related_attr_value = 'actions_with_%s' % label(model_class) relations = {} for field in ('actor', 'target', 'action_object'): attr = '%s_actions' % field attr_value = '%s_as_%s' % (related_attr_value, field) kwargs = { 'content_type_field': '%s_content_type' % field, 'object_id_field': '%s_object_id' % field, related_attr_name: attr_value } rel = GenericRelation('actstream.Action', **kwargs) rel.contribute_to_class(model_class, attr) relations[field] = rel # @@@ I'm not entirely sure why this works setattr(Action, attr_value, None) return relations
python
{ "resource": "" }
q30022
stream
train
def stream(func): """ Stream decorator to be applied to methods of an ``ActionManager`` subclass Syntax:: from actstream.decorators import stream from actstream.managers import ActionManager class MyManager(ActionManager): @stream def foobar(self, ...): ... """ @wraps(func) def wrapped(manager, *args, **kwargs): offset, limit = kwargs.pop('_offset', None), kwargs.pop('_limit', None) qs = func(manager, *args, **kwargs) if isinstance(qs, dict): qs = manager.public(**qs) elif isinstance(qs, (list, tuple)): qs = manager.public(*qs) if offset or limit: qs = qs[offset:limit] return qs.fetch_generic_relations() return wrapped
python
{ "resource": "" }
q30023
follow_url
train
def follow_url(parser, token): """ Renders the URL of the follow view for a particular actor instance :: <a href="{% follow_url other_user %}"> {% if request.user|is_following:other_user %} stop following {% else %} follow {% endif %} </a> <a href="{% follow_url other_user 'watching' %}"> {% is_following user group "watching" as is_watching %} {% if is_watching %} stop watching {% else %} watch {% endif %} </a> """ bits = token.split_contents() if len(bits) > 3: raise TemplateSyntaxError("Accepted format {% follow_url [instance] %} or {% follow_url [instance] [flag] %}") elif len(bits) == 2: return DisplayActivityFollowUrl(bits[1]) else: flag = bits[2][1:-1] return DisplayActivityFollowUrl(bits[1], flag=flag)
python
{ "resource": "" }
q30024
follow_all_url
train
def follow_all_url(parser, token): """ Renders the URL to follow an object as both actor and target :: <a href="{% follow_all_url other_user %}"> {% if request.user|is_following:other_user %} stop following {% else %} follow {% endif %} </a> <a href="{% follow_all_url other_user 'watching' %}"> {% is_following user group "watching" as is_watching %} {% if is_watching %} stop watching {% else %} watch {% endif %} </a> """ bits = token.split_contents() if len(bits) > 3: raise TemplateSyntaxError( "Accepted format {% follow_all_url [instance] %} or {% follow_url [instance] [flag] %}" ) elif len(bits) == 2: return DisplayActivityFollowUrl(bits[1], actor_only=False) else: flag = bits[2][1:-1] return DisplayActivityFollowUrl(bits[1], actor_only=False, flag=flag)
python
{ "resource": "" }
q30025
actor_url
train
def actor_url(parser, token): """ Renders the URL for a particular actor instance :: <a href="{% actor_url request.user %}">View your actions</a> <a href="{% actor_url another_user %}">{{ another_user }}'s actions</a> """ bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("Accepted format " "{% actor_url [actor_instance] %}") else: return DisplayActivityActorUrl(*bits[1:])
python
{ "resource": "" }
q30026
AsNode.handle_token
train
def handle_token(cls, parser, token): """ Class method to parse and return a Node. """ tag_error = "Accepted formats {%% %(tagname)s %(args)s %%} or " \ "{%% %(tagname)s %(args)s as [var] %%}" bits = token.split_contents() args_count = len(bits) - 1 if args_count >= 2 and bits[-2] == 'as': as_var = bits[-1] args_count -= 2 else: as_var = None if args_count != cls.args_count: arg_list = ' '.join(['[arg]' * cls.args_count]) raise TemplateSyntaxError(tag_error % {'tagname': bits[0], 'args': arg_list}) args = [parser.compile_filter(tkn) for tkn in bits[1:args_count + 1]] return cls(args, varname=as_var)
python
{ "resource": "" }
q30027
follow
train
def follow(user, obj, send_action=True, actor_only=True, flag='', **kwargs): """ Creates a relationship allowing the object's activities to appear in the user's stream. Returns the created ``Follow`` instance. If ``send_action`` is ``True`` (the default) then a ``<user> started following <object>`` action signal is sent. Extra keyword arguments are passed to the action.send call. If ``actor_only`` is ``True`` (the default) then only actions where the object is the actor will appear in the user's activity stream. Set to ``False`` to also include actions where this object is the action_object or the target. If ``flag`` not an empty string then the relationship would marked by this flag. Example:: follow(request.user, group, actor_only=False) follow(request.user, group, actor_only=False, flag='liking') """ check(obj) instance, created = apps.get_model('actstream', 'follow').objects.get_or_create( user=user, object_id=obj.pk, flag=flag, content_type=ContentType.objects.get_for_model(obj), actor_only=actor_only ) if send_action and created: if not flag: action.send(user, verb=_('started following'), target=obj, **kwargs) else: action.send(user, verb=_('started %s' % flag), target=obj, **kwargs) return instance
python
{ "resource": "" }
q30028
unfollow
train
def unfollow(user, obj, send_action=False, flag=''): """ Removes a "follow" relationship. Set ``send_action`` to ``True`` (``False is default) to also send a ``<user> stopped following <object>`` action signal. Pass a string value to ``flag`` to determine which type of "follow" relationship you want to remove. Example:: unfollow(request.user, other_user) unfollow(request.user, other_user, flag='watching') """ check(obj) qs = apps.get_model('actstream', 'follow').objects.filter( user=user, object_id=obj.pk, content_type=ContentType.objects.get_for_model(obj) ) if flag: qs = qs.filter(flag=flag) qs.delete() if send_action: if not flag: action.send(user, verb=_('stopped following'), target=obj) else: action.send(user, verb=_('stopped %s' % flag), target=obj)
python
{ "resource": "" }
q30029
is_following
train
def is_following(user, obj, flag=''): """ Checks if a "follow" relationship exists. Returns True if exists, False otherwise. Pass a string value to ``flag`` to determine which type of "follow" relationship you want to check. Example:: is_following(request.user, group) is_following(request.user, group, flag='liking') """ check(obj) qs = apps.get_model('actstream', 'follow').objects.filter( user=user, object_id=obj.pk, content_type=ContentType.objects.get_for_model(obj) ) if flag: qs = qs.filter(flag=flag) return qs.exists()
python
{ "resource": "" }
q30030
action_handler
train
def action_handler(verb, **kwargs): """ Handler function to create Action instance upon action signal call. """ kwargs.pop('signal', None) actor = kwargs.pop('sender') # We must store the unstranslated string # If verb is an ugettext_lazyed string, fetch the original string if hasattr(verb, '_proxy____args'): verb = verb._proxy____args[0] newaction = apps.get_model('actstream', 'action')( actor_content_type=ContentType.objects.get_for_model(actor), actor_object_id=actor.pk, verb=text_type(verb), public=bool(kwargs.pop('public', True)), description=kwargs.pop('description', None), timestamp=kwargs.pop('timestamp', now()) ) for opt in ('target', 'action_object'): obj = kwargs.pop(opt, None) if obj is not None: check(obj) setattr(newaction, '%s_object_id' % opt, obj.pk) setattr(newaction, '%s_content_type' % opt, ContentType.objects.get_for_model(obj)) if settings.USE_JSONFIELD and len(kwargs): newaction.data = kwargs newaction.save(force_insert=True) return newaction
python
{ "resource": "" }
q30031
AbstractActivityStream.items
train
def items(self, *args, **kwargs): """ Returns a queryset of Actions to use based on the stream method and object. """ return self.get_stream()(self.get_object(*args, **kwargs))
python
{ "resource": "" }
q30032
AbstractActivityStream.get_uri
train
def get_uri(self, action, obj=None, date=None): """ Returns an RFC3987 IRI ID for the given object, action and date. """ if date is None: date = action.timestamp date = datetime_safe.new_datetime(date).strftime('%Y-%m-%d') return 'tag:%s,%s:%s' % (Site.objects.get_current().domain, date, self.get_url(action, obj, False))
python
{ "resource": "" }
q30033
AbstractActivityStream.get_url
train
def get_url(self, action, obj=None, domain=True): """ Returns an RFC3987 IRI for a HTML representation of the given object, action. If domain is true, the current site's domain will be added. """ if not obj: url = reverse('actstream_detail', None, (action.pk,)) elif hasattr(obj, 'get_absolute_url'): url = obj.get_absolute_url() else: ctype = ContentType.objects.get_for_model(obj) url = reverse('actstream_actor', None, (ctype.pk, obj.pk)) if domain: return add_domain(Site.objects.get_current().domain, url) return url
python
{ "resource": "" }
q30034
AbstractActivityStream.format
train
def format(self, action): """ Returns a formatted dictionary for the given action. """ item = { 'id': self.get_uri(action), 'url': self.get_url(action), 'verb': action.verb, 'published': rfc3339_date(action.timestamp), 'actor': self.format_actor(action), 'title': text_type(action), } if action.description: item['content'] = action.description if action.target: item['target'] = self.format_target(action) if action.action_object: item['object'] = self.format_action_object(action) return item
python
{ "resource": "" }
q30035
AbstractActivityStream.format_item
train
def format_item(self, action, item_type='actor'): """ Returns a formatted dictionary for an individual item based on the action and item_type. """ obj = getattr(action, item_type) return { 'id': self.get_uri(action, obj), 'url': self.get_url(action, obj), 'objectType': ContentType.objects.get_for_model(obj).name, 'displayName': text_type(obj) }
python
{ "resource": "" }
q30036
ActivityStreamsBaseFeed.item_extra_kwargs
train
def item_extra_kwargs(self, action): """ Returns an extra keyword arguments dictionary that is used with the `add_item` call of the feed generator. Add the 'content' field of the 'Entry' item, to be used by the custom feed generator. """ item = self.format(action) item.pop('title', None) item['uri'] = item.pop('url') item['activity:verb'] = item.pop('verb') return item
python
{ "resource": "" }
q30037
Statement._principals
train
def _principals(self): """Extracts all principals from IAM statement. Should handle these cases: "Principal": "value" "Principal": ["value"] "Principal": { "AWS": "value" } "Principal": { "AWS": ["value", "value"] } "Principal": { "Service": "value" } "Principal": { "Service": ["value", "value"] } Return: Set of principals """ principals = set() principal = self.statement.get("Principal", None) if not principal: # It is possible not to define a principal, AWS ignores these statements. return principals if isinstance(principal, dict): if 'AWS' in principal: self._add_or_extend(principal['AWS'], principals) if 'Service' in principal: self._add_or_extend(principal['Service'], principals) else: self._add_or_extend(principal, principals) return principals
python
{ "resource": "" }
q30038
extract_audioclip_samples
train
def extract_audioclip_samples(d) -> dict: """ Extract all the sample data from an AudioClip and convert it from FSB5 if needed. """ ret = {} if not d.data: # eg. StreamedResource not available return {} try: from fsb5 import FSB5 except ImportError as e: raise RuntimeError("python-fsb5 is required to extract AudioClip") af = FSB5(d.data) for i, sample in enumerate(af.samples): if i > 0: filename = "%s-%i.%s" % (d.name, i, af.get_sample_extension()) else: filename = "%s.%s" % (d.name, af.get_sample_extension()) try: sample = af.rebuild_sample(sample) except ValueError as e: print("WARNING: Could not extract %r (%s)" % (d, e)) continue ret[filename] = sample return ret
python
{ "resource": "" }
q30039
use_kwargs
train
def use_kwargs(args, locations=None, inherit=None, apply=None, **kwargs): """Inject keyword arguments from the specified webargs arguments into the decorated view function. Usage: .. code-block:: python from marshmallow import fields @use_kwargs({'name': fields.Str(), 'category': fields.Str()}) def get_pets(**kwargs): return Pet.query.filter_by(**kwargs).all() :param args: Mapping of argument names to :class:`Field <marshmallow.fields.Field>` objects, :class:`Schema <marshmallow.Schema>`, or a callable which accepts a request and returns a :class:`Schema <marshmallow.Schema>` :param locations: Default request locations to parse :param inherit: Inherit args from parent classes :param apply: Parse request with specified args """ kwargs.update({'locations': locations}) def wrapper(func): options = { 'args': args, 'kwargs': kwargs, } annotate(func, 'args', [options], inherit=inherit, apply=apply) return activate(func) return wrapper
python
{ "resource": "" }
q30040
marshal_with
train
def marshal_with(schema, code='default', description='', inherit=None, apply=None): """Marshal the return value of the decorated view function using the specified schema. Usage: .. code-block:: python class PetSchema(Schema): class Meta: fields = ('name', 'category') @marshal_with(PetSchema) def get_pet(pet_id): return Pet.query.filter(Pet.id == pet_id).one() :param schema: :class:`Schema <marshmallow.Schema>` class or instance, or `None` :param code: Optional HTTP response code :param description: Optional response description :param inherit: Inherit schemas from parent classes :param apply: Marshal response with specified schema """ def wrapper(func): options = { code: { 'schema': schema or {}, 'description': description, }, } annotate(func, 'schemas', [options], inherit=inherit, apply=apply) return activate(func) return wrapper
python
{ "resource": "" }
q30041
doc
train
def doc(inherit=None, **kwargs): """Annotate the decorated view function or class with the specified Swagger attributes. Usage: .. code-block:: python @doc(tags=['pet'], description='a pet store') def get_pet(pet_id): return Pet.query.filter(Pet.id == pet_id).one() :param inherit: Inherit Swagger documentation from parent classes """ def wrapper(func): annotate(func, 'docs', [kwargs], inherit=inherit) return activate(func) return wrapper
python
{ "resource": "" }
q30042
wrap_with
train
def wrap_with(wrapper_cls): """Use a custom `Wrapper` to apply annotations to the decorated function. :param wrapper_cls: Custom `Wrapper` subclass """ def wrapper(func): annotate(func, 'wrapper', [{'wrapper': wrapper_cls}]) return activate(func) return wrapper
python
{ "resource": "" }
q30043
get_available_FIELD_transitions
train
def get_available_FIELD_transitions(instance, field): """ List of transitions available in current model state with all conditions met """ curr_state = field.get_state(instance) transitions = field.transitions[instance.__class__] for name, transition in transitions.items(): meta = transition._django_fsm if meta.has_transition(curr_state) and meta.conditions_met(instance, curr_state): yield meta.get_transition(curr_state)
python
{ "resource": "" }
q30044
get_available_user_FIELD_transitions
train
def get_available_user_FIELD_transitions(instance, user, field): """ List of transitions available in current model state with all conditions met and user have rights on it """ for transition in get_available_FIELD_transitions(instance, field): if transition.has_perm(instance, user): yield transition
python
{ "resource": "" }
q30045
transition
train
def transition(field, source='*', target=None, on_error=None, conditions=[], permission=None, custom={}): """ Method decorator to mark allowed transitions. Set target to None if current state needs to be validated and has not changed after the function call. """ def inner_transition(func): wrapper_installed, fsm_meta = True, getattr(func, '_django_fsm', None) if not fsm_meta: wrapper_installed = False fsm_meta = FSMMeta(field=field, method=func) setattr(func, '_django_fsm', fsm_meta) if isinstance(source, (list, tuple, set)): for state in source: func._django_fsm.add_transition(func, state, target, on_error, conditions, permission, custom) else: func._django_fsm.add_transition(func, source, target, on_error, conditions, permission, custom) @wraps(func) def _change_state(instance, *args, **kwargs): return fsm_meta.field.change_state(instance, func, *args, **kwargs) if not wrapper_installed: return _change_state return func return inner_transition
python
{ "resource": "" }
q30046
can_proceed
train
def can_proceed(bound_method, check_conditions=True): """ Returns True if model in state allows to call bound_method Set ``check_conditions`` argument to ``False`` to skip checking conditions. """ if not hasattr(bound_method, '_django_fsm'): im_func = getattr(bound_method, 'im_func', getattr(bound_method, '__func__')) raise TypeError('%s method is not transition' % im_func.__name__) meta = bound_method._django_fsm im_self = getattr(bound_method, 'im_self', getattr(bound_method, '__self__')) current_state = meta.field.get_state(im_self) return meta.has_transition(current_state) and ( not check_conditions or meta.conditions_met(im_self, current_state))
python
{ "resource": "" }
q30047
has_transition_perm
train
def has_transition_perm(bound_method, user): """ Returns True if model in state allows to call bound_method and user have rights on it """ if not hasattr(bound_method, '_django_fsm'): im_func = getattr(bound_method, 'im_func', getattr(bound_method, '__func__')) raise TypeError('%s method is not transition' % im_func.__name__) meta = bound_method._django_fsm im_self = getattr(bound_method, 'im_self', getattr(bound_method, '__self__')) current_state = meta.field.get_state(im_self) return (meta.has_transition(current_state) and meta.conditions_met(im_self, current_state) and meta.has_transition_perm(im_self, current_state, user))
python
{ "resource": "" }
q30048
FSMMeta.has_transition
train
def has_transition(self, state): """ Lookup if any transition exists from current model state using current method """ if state in self.transitions: return True if '*' in self.transitions: return True if '+' in self.transitions and self.transitions['+'].target != state: return True return False
python
{ "resource": "" }
q30049
FSMMeta.conditions_met
train
def conditions_met(self, instance, state): """ Check if all conditions have been met """ transition = self.get_transition(state) if transition is None: return False elif transition.conditions is None: return True else: return all(map(lambda condition: condition(instance), transition.conditions))
python
{ "resource": "" }
q30050
Silverstripe._convert_to_folder
train
def _convert_to_folder(self, packages): """ Silverstripe's page contains a list of composer packages. This function converts those to folder names. These may be different due to installer-name. Implemented exponential backoff in order to prevent packager from being overly sensitive about the number of requests I was making. @see: https://github.com/composer/installers#custom-install-names @see: https://github.com/richardsjoqvist/silverstripe-localdate/issues/7 """ url = 'http://packagist.org/p/%s.json' with ThreadPoolExecutor(max_workers=12) as executor: futures = [] for package in packages: future = executor.submit(self._get, url, package) futures.append({ 'future': future, 'package': package }) folders = [] for i, future in enumerate(futures, start=1): r = future['future'].result() package = future['package'] if not 'installer-name' in r.text: folder_name = package.split('/')[1] else: splat = list(filter(None, re.split(r'[^a-zA-Z0-9-_.,]', r.text))) folder_name = splat[splat.index('installer-name') + 1] if not folder_name in folders: folders.append(folder_name) else: print("Folder %s is duplicated (current %s, previous %s)" % (folder_name, package, folders.index(folder_name))) if i % 25 == 0: print("Done %s." % i) return folders
python
{ "resource": "" }
q30051
BasePluginInternal._general_init
train
def _general_init(self, opts, out=None): """ Initializes a variety of variables depending on user input. @return: a tuple containing a boolean value indicating whether progressbars should be hidden, functionality and enabled functionality. """ self.session = Session() if out: self.out = out else: self.out = self._output(opts) is_cms_plugin = self._meta.label != "scan" if is_cms_plugin: self.vf = VersionsFile(self.versions_file) # http://stackoverflow.com/questions/23632794/in-requests-library-how-can-i-avoid-httpconnectionpool-is-full-discarding-con try: a = requests.adapters.HTTPAdapter(pool_maxsize=5000) self.session.mount('http://', a) self.session.mount('https://', a) self.session.cookies.set_policy(BlockAll()) except AttributeError: old_req = """Running a very old version of requests! Please `pip install -U requests`.""" self.out.warn(old_req) self.session.verify = False self.session.headers['User-Agent'] = self.DEFAULT_UA debug_requests = opts['debug_requests'] if debug_requests: hide_progressbar = True opts['threads_identify'] = 1 opts['threads_scan'] = 1 opts['threads_enumerate'] = 1 self.session = RequestsLogger(self.session) else: if opts['hide_progressbar']: hide_progressbar = True else: hide_progressbar = False functionality = self._functionality(opts) enabled_functionality = self._enabled_functionality(functionality, opts) return (hide_progressbar, functionality, enabled_functionality)
python
{ "resource": "" }
q30052
BasePluginInternal.url_scan
train
def url_scan(self, url, opts, functionality, enabled_functionality, hide_progressbar): """ This is the main function called whenever a URL needs to be scanned. This is called when a user specifies an individual CMS, or after CMS identification has taken place. This function is called for individual hosts specified by `-u` or for individual lines specified by `-U`. @param url: this parameter can either be a URL or a (url, host_header) tuple. The url, if a string, can be in the format of url + " " + host_header. @param opts: options object as returned by self._options(). @param functionality: as returned by self._general_init. @param enabled_functionality: as returned by self._general_init. @param hide_progressbar: whether to hide the progressbar. @return: results dictionary. """ self.out.debug('base_plugin_internal.url_scan -> %s' % str(url)) if isinstance(url, tuple): url, host_header = url else: url, host_header = self._process_host_line(url) url = common.repair_url(url) if opts['follow_redirects']: url, host_header = self.determine_redirect(url, host_header, opts) need_sm = opts['enumerate'] in ['a', 'p', 't'] if need_sm and (self.can_enumerate_plugins or self.can_enumerate_themes): scanning_method = opts['method'] if not scanning_method: scanning_method = self.determine_scanning_method(url, opts['verb'], opts['timeout'], self._generate_headers(host_header)) else: scanning_method = None enumerating_all = opts['enumerate'] == 'a' result = {} for enumerate in enabled_functionality: enum = functionality[enumerate] if common.shutdown: continue # Get the arguments for the function. kwargs = dict(enum['kwargs']) kwargs['url'] = url kwargs['hide_progressbar'] = hide_progressbar if enumerate in ['themes', 'plugins']: kwargs['scanning_method'] = scanning_method kwargs['headers'] = self._generate_headers(host_header) # Call to the respective functions occurs here. finds, is_empty = enum['func'](**kwargs) result[enumerate] = {'finds': finds, 'is_empty': is_empty} return result
python
{ "resource": "" }
q30053
BasePluginInternal._determine_redirect
train
def _determine_redirect(self, url, verb, timeout=15, headers={}): """ Internal redirect function, focuses on HTTP and worries less about application-y stuff. @param url: the url to check @param verb: the verb, e.g. head, or get. @param timeout: the time, in seconds, that requests should wait before throwing an exception. @param headers: a set of headers as expected by requests. @return: the url that needs to be scanned. It may be equal to the url parameter if no redirect is needed. """ requests_verb = getattr(self.session, verb) r = requests_verb(url, timeout=timeout, headers=headers, allow_redirects=False) redirect = 300 <= r.status_code < 400 url_new = url if redirect: redirect_url = r.headers['Location'] url_new = redirect_url relative_redirect = not redirect_url.startswith('http') if relative_redirect: url_new = url base_redir = base_url(redirect_url) base_supplied = base_url(url) same_base = base_redir == base_supplied if same_base: url_new = url return url_new
python
{ "resource": "" }
q30054
BasePluginInternal.enumerate_version_changelog
train
def enumerate_version_changelog(self, url, versions_estimated, timeout=15, headers={}): """ If we have a changelog in store for this CMS, this function will be called, and a changelog will be used for narrowing down which version is installed. If the changelog's version is outside our estimated range, it is discarded. @param url: the url to check against. @param versions_estimated: the version other checks estimate the installation is. @param timeout: the number of seconds to wait before expiring a request. @param headers: headers to pass to requests.get() """ changelogs = self.vf.changelogs_get() ch_hash = None for ch_url in changelogs: try: ch_hash = self.enumerate_file_hash(url, file_url=ch_url, timeout=timeout, headers=headers) except RuntimeError: pass ch_version = self.vf.changelog_identify(ch_hash) if ch_version in versions_estimated: return [ch_version] else: return versions_estimated
python
{ "resource": "" }
q30055
BasePluginInternal._enumerate_plugin_if
train
def _enumerate_plugin_if(self, found_list, verb, threads, imu_list, hide_progressbar, timeout=15, headers={}): """ Finds interesting urls within a plugin folder which respond with 200 OK. @param found_list: as returned in self.enumerate. E.g. [{'name': 'this_exists', 'url': 'http://adhwuiaihduhaknbacnckajcwnncwkakncw.com/sites/all/modules/this_exists/'}] @param verb: the verb to use. @param threads: the number of threads to use. @param imu_list: Interesting module urls. @param hide_progressbar: whether to display a progressbar. @param timeout: timeout in seconds for http requests. @param headers: custom headers as expected by requests. """ if not hide_progressbar: p = ProgressBar(sys.stderr, len(found_list) * len(imu_list), name="IMU") requests_verb = getattr(self.session, verb) with ThreadPoolExecutor(max_workers=threads) as executor: futures = [] for i, found in enumerate(found_list): found_list[i]['imu'] = [] for imu in imu_list: interesting_url = found['url'] + imu[0] future = executor.submit(requests_verb, interesting_url, timeout=timeout, headers=headers) futures.append({ 'url': interesting_url, 'future': future, 'description': imu[1], 'i': i }) for f in futures: if common.shutdown: f['future'].cancel() continue r = f['future'].result() if r.status_code == 200: found_list[f['i']]['imu'].append({ 'url': f['url'], 'description': f['description'] }) if not hide_progressbar: p.increment_progress() if not hide_progressbar: p.hide() return found_list
python
{ "resource": "" }
q30056
BasePluginInternal.cms_identify
train
def cms_identify(self, url, timeout=15, headers={}): """ Function called when attempting to determine if a URL is identified as being this particular CMS. @param url: the URL to attempt to identify. @param timeout: number of seconds before a timeout occurs on a http connection. @param headers: custom HTTP headers as expected by requests. @return: a boolean value indiciating whether this CMS is identified as being this particular CMS. """ self.out.debug("cms_identify") if isinstance(self.regular_file_url, str): rfu = [self.regular_file_url] else: rfu = self.regular_file_url is_cms = False for regular_file_url in rfu: try: hash = self.enumerate_file_hash(url, regular_file_url, timeout, headers) except RuntimeError: continue hash_exists = self.vf.has_hash(hash) if hash_exists: is_cms = True break return is_cms
python
{ "resource": "" }
q30057
BasePluginInternal.resume_forward
train
def resume_forward(self, fh, resume, file_location, error_log): """ Forwards `fh` n lines, where n lines is the amount of lines we should skip in order to resume our previous scan, if resume is required by the user. @param fh: fh to advance. @param file_location: location of the file handler in disk. @param error_log: location of the error_log in disk. """ if resume: if not error_log: raise CannotResumeException("--error-log not provided.") skip_lines = self.resume(file_location, error_log) for _ in range(skip_lines): next(fh)
python
{ "resource": "" }
q30058
StandardOutput.result
train
def result(self, result, functionality): """ For the final result of the scan. @param result: as returned by BasePluginInternal.url_scan @param functionality: functionality as returned by BasePluginInternal._general_init """ for enumerate in result: # The host is a special header, we must not attempt to display it. if enumerate == "host" or enumerate == "cms_name": continue result_ind = result[enumerate] finds = result_ind['finds'] is_empty = result_ind['is_empty'] template_str = functionality[enumerate]['template'] template_params = { 'noun': enumerate, 'Noun': enumerate.capitalize(), 'items': finds, 'empty': is_empty, } self.echo(template(template_str, template_params))
python
{ "resource": "" }
q30059
StandardOutput.warn
train
def warn(self, msg, whitespace_strp=True): """ For things that have gone seriously wrong but don't merit a program halt. Outputs to stderr, so JsonOutput does not need to override. @param msg: warning to output. @param whitespace_strp: whether to strip whitespace. """ if self.errors_display: if whitespace_strp: msg = strip_whitespace(msg) if not self.log_to_file: msg = colors['warn'] + "[+] " + msg + colors['endc'] else: msg = "[" + time.strftime("%c") + "] " + msg self.print(msg, file=self.error_log)
python
{ "resource": "" }
q30060
RequestsLogger._print
train
def _print(self, method, *args, **kwargs): """ Output format affects integration tests. @see: IntegrationTests.mock_output """ sess_method = getattr(self._session, method) try: headers = kwargs['headers'] except KeyError: headers = {} tpl = '[%s] %s %s' print(tpl % (method, args[0], headers), end=' ') try: r = sess_method(*args, **kwargs) except: e = sys.exc_info() e_str = "%s: %s" % (e[0], e[1]) print("FAILED (%s)" % e_str) raise if method == "get" and r.status_code == 200: hsh = hashlib.md5(r.content).hexdigest() else: hsh = "" print(r.status_code, hsh) return r
python
{ "resource": "" }
q30061
repair_url
train
def repair_url(url): """ Fixes URL. @param url: url to repair. @param out: instance of StandardOutput as defined in this lib. @return: Newline characters are stripped from the URL string. If the url string parameter does not start with http, it prepends http:// If the url string parameter does not end with a slash, appends a slash. If the url contains a query string, it gets removed. """ url = url.strip('\n') if not re.match(r"^http", url): url = "http://" + url if "?" in url: url, _ = url.split('?') if not url.endswith("/"): return url + "/" else : return url
python
{ "resource": "" }
q30062
exc_handle
train
def exc_handle(url, out, testing): """ Handle exception. If of a determinate subset, it is stored into a file as a single type. Otherwise, full stack is stored. Furthermore, if testing, stack is always shown. @param url: url which was being scanned when exception was thrown. @param out: Output object, usually self.out. @param testing: whether we are currently running unit tests. """ quiet_exceptions = [ConnectionError, ReadTimeout, ConnectTimeout, TooManyRedirects] type, value, _ = sys.exc_info() if type not in quiet_exceptions or testing: exc = traceback.format_exc() exc_string = ("Line '%s' raised:\n" % url) + exc out.warn(exc_string, whitespace_strp=False) if testing: print(exc) else: exc_string = "Line %s '%s: %s'" % (url, type, value) out.warn(exc_string)
python
{ "resource": "" }
q30063
tail
train
def tail(f, window=20): """ Returns the last `window` lines of file `f` as a list. @param window: the number of lines. """ if window == 0: return [] BUFSIZ = 1024 f.seek(0, 2) bytes = f.tell() size = window + 1 block = -1 data = [] while size > 0 and bytes > 0: if bytes - BUFSIZ > 0: # Seek back one whole BUFSIZ f.seek(block * BUFSIZ, 2) # read BUFFER data.insert(0, f.read(BUFSIZ).decode('utf-8', errors='ignore')) else: # file too small, start from begining f.seek(0,0) # only read what was not read data.insert(0, f.read(bytes).decode('utf-8', errors='ignore')) linesFound = data[0].count('\n') size -= linesFound bytes -= BUFSIZ block -= 1 return ''.join(data).splitlines()[-window:]
python
{ "resource": "" }
q30064
process_host_line
train
def process_host_line(line): """ Processes a line and determines whether it is a tab-delimited CSV of url and host. Strips all strings. @param line: the line to analyse. @param opts: the options dictionary to modify. @return: a tuple containing url, and host header if any change is required. Otherwise, line, null is returned. """ if not line: return None, None host = None if _line_contains_host(line): url, host = re.split(SPLIT_PATTERN, line.strip()) else: url = line.strip() return url, host
python
{ "resource": "" }
q30065
instances_get
train
def instances_get(opts, plugins, url_file_input, out): """ Creates and returns an ordered dictionary containing instances for all available scanning plugins, sort of ordered by popularity. @param opts: options as returned by self._options. @param plugins: plugins as returned by plugins_util.plugins_base_get. @param url_file_input: boolean value which indicates whether we are scanning an individual URL or a file. This is used to determine kwargs required. @param out: self.out """ instances = OrderedDict() preferred_order = ['wordpress', 'joomla', 'drupal'] for cms_name in preferred_order: for plugin in plugins: plugin_name = plugin.__name__.lower() if cms_name == plugin_name: instances[plugin_name] = instance_get(plugin, opts, url_file_input, out) for plugin in plugins: plugin_name = plugin.__name__.lower() if plugin_name not in preferred_order: instances[plugin_name] = instance_get(plugin, opts, url_file_input, out) return instances
python
{ "resource": "" }
q30066
instance_get
train
def instance_get(plugin, opts, url_file_input, out): """ Return an instance dictionary for an individual plugin. @see Scan._instances_get. """ inst = plugin() hp, func, enabled_func = inst._general_init(opts, out) name = inst._meta.label kwargs = { 'hide_progressbar': hp, 'functionality': func, 'enabled_functionality': enabled_func } if url_file_input: del kwargs['hide_progressbar'] return { 'inst': inst, 'kwargs': kwargs }
python
{ "resource": "" }
q30067
result_anything_found
train
def result_anything_found(result): """ Interim solution for the fact that sometimes determine_scanning_method can legitimately return a valid scanning method, but it results that the site does not belong to a particular CMS. @param result: the result as passed to Output.result() @return: whether anything was found. """ keys = ['version', 'themes', 'plugins', 'interesting urls'] anything_found = False for k in keys: if k not in result: continue else: if not result[k]['is_empty']: anything_found = True return anything_found
python
{ "resource": "" }
q30068
VersionsFile.update
train
def update(self, sums): """ Update self.et with the sums as returned by VersionsX.sums_get @param sums: {'version': {'file1':'hash1'}} """ for version in sums: hashes = sums[version] for filename in hashes: hsh = hashes[filename] file_xpath = './files/*[@url="%s"]' % filename try: file_add = self.root.findall(file_xpath)[0] except IndexError: raise ValueError("Attempted to update element '%s' which doesn't exist" % filename) # Do not add duplicate, equal hashes. if not self.version_exists(file_add, version, hsh): new_ver = ET.SubElement(file_add, 'version') new_ver.attrib = { 'md5': hsh, 'nb': version }
python
{ "resource": "" }
q30069
github_tags_newer
train
def github_tags_newer(github_repo, versions_file, update_majors): """ Get new tags from a github repository. Cannot use github API because it doesn't support chronological ordering of tags. @param github_repo: the github repository, e.g. 'drupal/drupal/'. @param versions_file: the file path where the versions database can be found. @param update_majors: major versions to update. If you want to update the 6.x and 7.x branch, you would supply a list which would look like ['6', '7'] @return: a boolean value indicating whether an update is needed @raise MissingMajorException: A new version from a newer major branch is exists, but will not be downloaded due to it not being in majors. """ github_repo = _github_normalize(github_repo) vf = VersionsFile(versions_file) current_highest = vf.highest_version_major(update_majors) tags_url = '%s%stags' % (GH, github_repo) resp = requests.get(tags_url) bs = BeautifulSoup(resp.text, 'lxml') gh_versions = [] for header in bs.find_all('h4'): tag = header.findChild('a') if not tag: continue # Ignore learn more header. gh_versions.append(tag.text.strip()) newer = _newer_tags_get(current_highest, gh_versions) return len(newer) > 0
python
{ "resource": "" }
q30070
_check_newer_major
train
def _check_newer_major(current_highest, versions): """ Utility function for checking whether a new version exists and is not going to be updated. This is undesirable because it could result in new versions existing and not being updated. Raising is prefering to adding the new version manually because that allows maintainers to check whether the new version works. @param current_highest: as returned by VersionsFile.highest_version_major() @param versions: a list of versions. @return: void @raise MissingMajorException: A new version from a newer major branch is exists, but will not be downloaded due to it not being in majors. """ for tag in versions: update_majors = list(current_highest.keys()) example_version_str = current_highest[update_majors[0]] if _tag_is_rubbish(tag, example_version_str): continue major = tag[0:len(update_majors[0])] if major not in current_highest: higher_version_present = False for major_highest in current_highest: if version_gt(major_highest, major): higher_version_present = True break if not higher_version_present: msg = 'Failed updating: Major %s has a new version and is not going to be updated.' % major raise MissingMajorException(msg)
python
{ "resource": "" }
q30071
_newer_tags_get
train
def _newer_tags_get(current_highest, versions): """ Returns versions from versions which are greater than than the highest version in each major. If a newer major is present in versions which is not present on current_highest, an exception will be raised. @param current_highest: as returned by VersionsFile.highest_version_major() @param versions: a list of versions. @return: a list of versions. @raise MissingMajorException: A new version from a newer major branch is exists, but will not be downloaded due to it not being in majors. """ newer = [] for major in current_highest: highest_version = current_highest[major] for version in versions: version = version.lstrip('v') if version.startswith(major) and version_gt(version, highest_version): newer.append(version) _check_newer_major(current_highest, versions) return newer
python
{ "resource": "" }
q30072
github_repo_new
train
def github_repo_new(repo_url, plugin_name, versions_file, update_majors): """ Convenience method which creates GitRepo and returns the created instance, as well as a VersionsFile and tags which need to be updated. @param repo_url: the github repository path, e.g. 'drupal/drupal/' @param plugin_name: the current plugin's name (for namespace purposes). @param versions_file: the path in disk to this plugin's versions.xml. Note that this path must be relative to the directory where the droopescan module is installed. @param update_majors: major versions to update. If you want to update the 6.x and 7.x branch, you would supply a list which would look like ['6', '7'] @return: a tuple containing (GitRepo, VersionsFile, GitRepo.tags_newer()) """ gr = github_repo(repo_url, plugin_name) vf = v.VersionsFile(versions_file) new_tags = gr.tags_newer(vf, update_majors) return gr, vf, new_tags
python
{ "resource": "" }
q30073
hashes_get
train
def hashes_get(versions_file, base_path): """ Gets hashes for currently checked out version. @param versions_file: a common.VersionsFile instance to check against. @param base_path: where to look for files. e.g. './.update-workspace/silverstripe/' @return: checksums {'file1': 'hash1'} """ files = versions_file.files_get_all() result = {} for f in files: try: result[f] = functions.md5_file(base_path + f) except IOError: # Not all files exist for all versions. pass return result
python
{ "resource": "" }
q30074
file_mtime
train
def file_mtime(file_path): """ Returns the file modified time. This is with regards to the last modification the file has had in the droopescan repo, rather than actual file modification time in the filesystem. @param file_path: file path relative to the executable. @return datetime.datetime object. """ if not os.path.isfile(file_path): raise IOError('File "%s" does not exist.' % file_path) ut = subprocess.check_output(['git', 'log', '-1', '--format=%ct', file_path]).strip() return datetime.fromtimestamp(int(ut))
python
{ "resource": "" }
q30075
modules_get
train
def modules_get(url_tpl, per_page, css, max_modules=2000, pagination_type=PT.normal): """ Gets a list of modules. Note that this function can also be used to get themes. @param url_tpl: a string such as https://drupal.org/project/project_module?page=%s. %s will be replaced with the page number. @param per_page: how many items there are per page. @param css: the elements matched by this selector will be returned by the iterator. @param max_modules: absolute maximum modules we will attempt to request. @param pagination_type: type of pagination. See the PaginationType enum for more information. @return: bs4.element.Tag @see: http://www.crummy.com/software/BeautifulSoup/bs4/doc/#css-selectors @see: http://www.crummy.com/software/BeautifulSoup/bs4/doc/#tag """ page = 0 elements = False done_so_far = 0 max_potential_pages = max_modules / per_page print("Maximum pages: %s." % max_potential_pages) stop = False while elements == False or len(elements) == per_page: url = url_tpl % page r = requests.get(url) bs = BeautifulSoup(r.text, 'lxml') elements = bs.select(css) for element in elements: yield element done_so_far += 1 if done_so_far >= max_modules: stop = True break if stop: break if pagination_type == PT.normal: print('Finished parsing page %s.' % page) page += 1 elif pagination_type == PT.skip: print('Finished parsing page %s.' % (page / per_page)) page += per_page else: assert False
python
{ "resource": "" }
q30076
GitRepo.init
train
def init(self): """ Performs a clone or a fetch, depending on whether the repository has been previously cloned or not. """ if os.path.isdir(self.path): self.fetch() else: self.clone()
python
{ "resource": "" }
q30077
GitRepo.clone
train
def clone(self): """ Clones a directory based on the clone_url and plugin_name given to the constructor. The clone will be located at self.path. """ base_dir = '/'.join(self.path.split('/')[:-2]) try: os.makedirs(base_dir, 0o700) except OSError: # Raises an error exception if the leaf directory already exists. pass self._cmd(['git', 'clone', self._clone_url, self.path], cwd=os.getcwd())
python
{ "resource": "" }
q30078
GitRepo.tags_newer
train
def tags_newer(self, versions_file, majors): """ Checks this git repo tags for newer versions. @param versions_file: a common.VersionsFile instance to check against. @param majors: a list of major branches to check. E.g. ['6', '7'] @raise RuntimeError: no newer tags were found. @raise MissingMajorException: A new version from a newer major branch is exists, but hasn't been downloaded due to it not being in majors. """ highest = versions_file.highest_version_major(majors) all = self.tags_get() newer = _newer_tags_get(highest, all) if len(newer) == 0: raise RuntimeError("No new tags found.") return newer
python
{ "resource": "" }
q30079
get_rfu
train
def get_rfu(): """ Returns a list of al "regular file urls" for all plugins. """ global _rfu if _rfu: return _rfu plugins = plugins_base_get() rfu = [] for plugin in plugins: if isinstance(plugin.regular_file_url, str): rfu.append(plugin.regular_file_url) else: rfu += plugin.regular_file_url _rfu = rfu return rfu
python
{ "resource": "" }
q30080
plugin_get_rfu
train
def plugin_get_rfu(plugin): """ Returns "regular file urls" for a particular plugin. @param plugin: plugin class. """ if isinstance(plugin.regular_file_url, str): rfu = [plugin.regular_file_url] else: rfu = plugin.regular_file_url return rfu
python
{ "resource": "" }
q30081
plugin_get
train
def plugin_get(name): """ Return plugin class. @param name: the cms label. """ plugins = plugins_base_get() for plugin in plugins: if plugin.Meta.label == name: return plugin raise RuntimeError('CMS "%s" not known.' % name)
python
{ "resource": "" }
q30082
PhoneNumberField.get_prep_value
train
def get_prep_value(self, value): """ Perform preliminary non-db specific value checks and conversions. """ if value: if not isinstance(value, PhoneNumber): value = to_python(value) if value.is_valid(): format_string = getattr(settings, "PHONENUMBER_DB_FORMAT", "E164") fmt = PhoneNumber.format_map[format_string] value = value.format_as(fmt) else: value = self.get_default() return super(PhoneNumberField, self).get_prep_value(value)
python
{ "resource": "" }
q30083
unique_to_each
train
def unique_to_each(*iterables): """Return the elements from each of the input iterables that aren't in the other input iterables. For example, suppose you have a set of packages, each with a set of dependencies:: {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} If you remove one package, which dependencies can also be removed? If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) [['A'], ['C'], ['D']] If there are duplicates in one input iterable that aren't in the others they will be duplicated in the output. Input order is preserved:: >>> unique_to_each("mississippi", "missouri") [['p', 'p'], ['o', 'u', 'r']] It is assumed that the elements of each iterable are hashable. """ pool = [list(it) for it in iterables] counts = Counter(chain.from_iterable(map(set, pool))) uniques = {element for element in counts if counts[element] == 1} return [list(filter(uniques.__contains__, it)) for it in pool]
python
{ "resource": "" }
q30084
interleave_longest
train
def interleave_longest(*iterables): """Return a new iterable yielding from each iterable in turn, skipping any that are exhausted. >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) [1, 4, 6, 2, 5, 7, 3, 8] This function produces the same output as :func:`roundrobin`, but may perform better for some inputs (in particular when the number of iterables is large). """ i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) return (x for x in i if x is not _marker)
python
{ "resource": "" }
q30085
unique_everseen
train
def unique_everseen(iterable, key=None): """ Yield unique elements, preserving order. >>> list(unique_everseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D'] >>> list(unique_everseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'D'] Sequences with a mix of hashable and unhashable items can be used. The function will be slower (i.e., `O(n^2)`) for unhashable items. Remember that ``list`` objects are unhashable - you can use the *key* parameter to transform the list to a tuple (which is hashable) to avoid a slowdown. >>> iterable = ([1, 2], [2, 3], [1, 2]) >>> list(unique_everseen(iterable)) # Slow [[1, 2], [2, 3]] >>> list(unique_everseen(iterable, key=tuple)) # Faster [[1, 2], [2, 3]] Similary, you may want to convert unhashable ``set`` objects with ``key=frozenset``. For ``dict`` objects, ``key=lambda x: frozenset(x.items())`` can be used. """ seenset = set() seenset_add = seenset.add seenlist = [] seenlist_add = seenlist.append if key is None: for element in iterable: try: if element not in seenset: seenset_add(element) yield element except TypeError: if element not in seenlist: seenlist_add(element) yield element else: for element in iterable: k = key(element) try: if k not in seenset: seenset_add(k) yield element except TypeError: if k not in seenlist: seenlist_add(k) yield element
python
{ "resource": "" }
q30086
unique_justseen
train
def unique_justseen(iterable, key=None): """Yields elements in order, ignoring serial duplicates >>> list(unique_justseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D', 'A', 'B'] >>> list(unique_justseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'A', 'D'] """ return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
python
{ "resource": "" }
q30087
random_product
train
def random_product(*args, **kwds): """Draw an item at random from each of the input iterables. >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP ('c', 3, 'Z') If *repeat* is provided as a keyword argument, that many items will be drawn from each iterable. >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP ('a', 2, 'd', 3) This equivalent to taking a random selection from ``itertools.product(*args, **kwarg)``. """ pools = [tuple(pool) for pool in args] * kwds.get('repeat', 1) return tuple(choice(pool) for pool in pools)
python
{ "resource": "" }
q30088
run
train
def run(input, conf, filepath=None): """Lints a YAML source. Returns a generator of LintProblem objects. :param input: buffer, string or stream to read from :param conf: yamllint configuration object """ if conf.is_file_ignored(filepath): return () if isinstance(input, (type(b''), type(u''))): # compat with Python 2 & 3 return _run(input, conf, filepath) elif hasattr(input, 'read'): # Python 2's file or Python 3's io.IOBase # We need to have everything in memory to parse correctly content = input.read() return _run(content, conf, filepath) else: raise TypeError('input should be a string or a stream')
python
{ "resource": "" }
q30089
get_line_indent
train
def get_line_indent(token): """Finds the indent of the line the token starts in.""" start = token.start_mark.buffer.rfind('\n', 0, token.start_mark.pointer) + 1 content = start while token.start_mark.buffer[content] == ' ': content += 1 return content - start
python
{ "resource": "" }
q30090
get_real_end_line
train
def get_real_end_line(token): """Finds the line on which the token really ends. With pyyaml, scalar tokens often end on a next line. """ end_line = token.end_mark.line + 1 if not isinstance(token, yaml.ScalarToken): return end_line pos = token.end_mark.pointer - 1 while (pos >= token.start_mark.pointer - 1 and token.end_mark.buffer[pos] in string.whitespace): if token.end_mark.buffer[pos] == '\n': end_line -= 1 pos -= 1 return end_line
python
{ "resource": "" }
q30091
comments_between_tokens
train
def comments_between_tokens(token1, token2): """Find all comments between two tokens""" if token2 is None: buf = token1.end_mark.buffer[token1.end_mark.pointer:] elif (token1.end_mark.line == token2.start_mark.line and not isinstance(token1, yaml.StreamStartToken) and not isinstance(token2, yaml.StreamEndToken)): return else: buf = token1.end_mark.buffer[token1.end_mark.pointer: token2.start_mark.pointer] line_no = token1.end_mark.line + 1 column_no = token1.end_mark.column + 1 pointer = token1.end_mark.pointer comment_before = None for line in buf.split('\n'): pos = line.find('#') if pos != -1: comment = Comment(line_no, column_no + pos, token1.end_mark.buffer, pointer + pos, token1, token2, comment_before) yield comment comment_before = comment pointer += len(line) + 1 line_no += 1 column_no = 1
python
{ "resource": "" }
q30092
token_or_comment_or_line_generator
train
def token_or_comment_or_line_generator(buffer): """Generator that mixes tokens and lines, ordering them by line number""" tok_or_com_gen = token_or_comment_generator(buffer) line_gen = line_generator(buffer) tok_or_com = next(tok_or_com_gen, None) line = next(line_gen, None) while tok_or_com is not None or line is not None: if tok_or_com is None or (line is not None and tok_or_com.line_no > line.line_no): yield line line = next(line_gen, None) else: yield tok_or_com tok_or_com = next(tok_or_com_gen, None)
python
{ "resource": "" }
q30093
ExportMigrations
train
def ExportMigrations(): """Exports counts of unapplied migrations. This is meant to be called during app startup, ideally by django_prometheus.apps.AppConfig. """ # Import MigrationExecutor lazily. MigrationExecutor checks at # import time that the apps are ready, and they are not when # django_prometheus is imported. ExportMigrations() should be # called in AppConfig.ready(), which signals that all apps are # ready. from django.db.migrations.executor import MigrationExecutor if 'default' in connections and ( type(connections['default']) == DatabaseWrapper): # This is the case where DATABASES = {} in the configuration, # i.e. the user is not using any databases. Django "helpfully" # adds a dummy database and then throws when you try to # actually use it. So we don't do anything, because trying to # export stats would crash the app on startup. return for alias in connections.databases: executor = MigrationExecutor(connections[alias]) ExportMigrationsForDatabase(alias, executor)
python
{ "resource": "" }
q30094
ExportingCursorWrapper
train
def ExportingCursorWrapper(cursor_class, alias, vendor): """Returns a CursorWrapper class that knows its database's alias and vendor name. """ class CursorWrapper(cursor_class): """Extends the base CursorWrapper to count events.""" def execute(self, *args, **kwargs): execute_total.labels(alias, vendor).inc() with ExceptionCounterByType(errors_total, extra_labels={ 'alias': alias, 'vendor': vendor}): return super(CursorWrapper, self).execute(*args, **kwargs) def executemany(self, query, param_list, *args, **kwargs): execute_total.labels(alias, vendor).inc(len(param_list)) execute_many_total.labels(alias, vendor).inc(len(param_list)) with ExceptionCounterByType(errors_total, extra_labels={ 'alias': alias, 'vendor': vendor}): return super(CursorWrapper, self).executemany( query, param_list, *args, **kwargs) return CursorWrapper
python
{ "resource": "" }
q30095
ExportModelOperationsMixin
train
def ExportModelOperationsMixin(model_name): """Returns a mixin for models to export counters for lifecycle operations. Usage: class User(ExportModelOperationsMixin('user'), Model): ... """ # Force create the labels for this model in the counters. This # is not necessary but it avoids gaps in the aggregated data. model_inserts.labels(model_name) model_updates.labels(model_name) model_deletes.labels(model_name) class Mixin(object): def _do_insert(self, *args, **kwargs): model_inserts.labels(model_name).inc() return super(Mixin, self)._do_insert(*args, **kwargs) def _do_update(self, *args, **kwargs): model_updates.labels(model_name).inc() return super(Mixin, self)._do_update(*args, **kwargs) def delete(self, *args, **kwargs): model_deletes.labels(model_name).inc() return super(Mixin, self).delete(*args, **kwargs) return Mixin
python
{ "resource": "" }
q30096
SetupPrometheusEndpointOnPort
train
def SetupPrometheusEndpointOnPort(port, addr=''): """Exports Prometheus metrics on an HTTPServer running in its own thread. The server runs on the given port and is by default listenning on all interfaces. This HTTPServer is fully independent of Django and its stack. This offers the advantage that even if Django becomes unable to respond, the HTTPServer will continue to function and export metrics. However, this also means that the features offered by Django (like middlewares or WSGI) can't be used. Now here's the really weird part. When Django runs with the auto-reloader enabled (which is the default, you can disable it with `manage.py runserver --noreload`), it forks and executes manage.py twice. That's wasteful but usually OK. It starts being a problem when you try to open a port, like we do. We can detect that we're running under an autoreloader through the presence of the RUN_MAIN environment variable, so we abort if we're trying to export under an autoreloader and trying to open a port. """ assert os.environ.get('RUN_MAIN') != 'true', ( 'The thread-based exporter can\'t be safely used when django\'s ' 'autoreloader is active. Use the URL exporter, or start django ' 'with --noreload. See documentation/exports.md.') prometheus_client.start_http_server(port, addr=addr)
python
{ "resource": "" }
q30097
SetupPrometheusEndpointOnPortRange
train
def SetupPrometheusEndpointOnPortRange(port_range, addr=''): """Like SetupPrometheusEndpointOnPort, but tries several ports. This is useful when you're running Django as a WSGI application with multiple processes and you want Prometheus to discover all workers. Each worker will grab a port and you can use Prometheus to aggregate across workers. port_range may be any iterable object that contains a list of ports. Typically this would be an xrange of contiguous ports. As soon as one port is found that can serve, use this one and stop trying. The same caveats regarding autoreload apply. Do not use this when Django's autoreloader is active. """ assert os.environ.get('RUN_MAIN') != 'true', ( 'The thread-based exporter can\'t be safely used when django\'s ' 'autoreloader is active. Use the URL exporter, or start django ' 'with --noreload. See documentation/exports.md.') for port in port_range: try: httpd = HTTPServer((addr, port), prometheus_client.MetricsHandler) except (OSError, socket.error): # Python 2 raises socket.error, in Python 3 socket.error is an # alias for OSError continue # Try next port thread = PrometheusEndpointServer(httpd) thread.daemon = True thread.start() logger.info('Exporting Prometheus /metrics/ on port %s' % port) return
python
{ "resource": "" }
q30098
SetupPrometheusExportsFromConfig
train
def SetupPrometheusExportsFromConfig(): """Exports metrics so Prometheus can collect them.""" port = getattr(settings, 'PROMETHEUS_METRICS_EXPORT_PORT', None) port_range = getattr( settings, 'PROMETHEUS_METRICS_EXPORT_PORT_RANGE', None) addr = getattr(settings, 'PROMETHEUS_METRICS_EXPORT_ADDRESS', '') if port_range: SetupPrometheusEndpointOnPortRange(port_range, addr) elif port: SetupPrometheusEndpointOnPort(port, addr)
python
{ "resource": "" }
q30099
_get_path
train
def _get_path(share_name=None, directory_name=None, file_name=None): ''' Creates the path to access a file resource. share_name: Name of share. directory_name: The path to the directory. file_name: Name of file. ''' if share_name and directory_name and file_name: return '/{0}/{1}/{2}'.format( _str(share_name), _str(directory_name), _str(file_name)) elif share_name and directory_name: return '/{0}/{1}'.format( _str(share_name), _str(directory_name)) elif share_name and file_name: return '/{0}/{1}'.format( _str(share_name), _str(file_name)) elif share_name: return '/{0}'.format(_str(share_name)) else: return '/'
python
{ "resource": "" }