sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def gt(min_value, # type: Any strict=False # type: bool ): """ 'Greater than' validation_function generator. Returns a validation_function to check that x >= min_value (strict=False, default) or x > min_value (strict=True) :param min_value: minimum value for x :param strict: Boolean flag to switch between x >= min_value (strict=False) and x > min_value (strict=True) :return: """ if strict: def gt_(x): if x > min_value: return True else: # raise Failure('x > ' + str(min_value) + ' does not hold for x=' + str(x)) # '{val} is not strictly greater than {ref}' raise TooSmall(wrong_value=x, min_value=min_value, strict=True) else: def gt_(x): if x >= min_value: return True else: # raise Failure('x >= ' + str(min_value) + ' does not hold for x=' + str(x)) # '{val} is not greater than {ref}' raise TooSmall(wrong_value=x, min_value=min_value, strict=False) gt_.__name__ = '{}greater_than_{}'.format('strictly_' if strict else '', min_value) return gt_
'Greater than' validation_function generator. Returns a validation_function to check that x >= min_value (strict=False, default) or x > min_value (strict=True) :param min_value: minimum value for x :param strict: Boolean flag to switch between x >= min_value (strict=False) and x > min_value (strict=True) :return:
entailment
def lt(max_value, # type: Any strict=False # type: bool ): """ 'Lesser than' validation_function generator. Returns a validation_function to check that x <= max_value (strict=False, default) or x < max_value (strict=True) :param max_value: maximum value for x :param strict: Boolean flag to switch between x <= max_value (strict=False) and x < max_value (strict=True) :return: """ if strict: def lt_(x): if x < max_value: return True else: # raise Failure('x < ' + str(max_value) + ' does not hold for x=' + str(x)) # '{val} is not strictly lesser than {ref}' raise TooBig(wrong_value=x, max_value=max_value, strict=True) else: def lt_(x): if x <= max_value: return True else: # raise Failure('x <= ' + str(max_value) + ' does not hold for x=' + str(x)) # '{val} is not lesser than {ref}' raise TooBig(wrong_value=x, max_value=max_value, strict=False) lt_.__name__ = '{}lesser_than_{}'.format('strictly_' if strict else '', max_value) return lt_
'Lesser than' validation_function generator. Returns a validation_function to check that x <= max_value (strict=False, default) or x < max_value (strict=True) :param max_value: maximum value for x :param strict: Boolean flag to switch between x <= max_value (strict=False) and x < max_value (strict=True) :return:
entailment
def between(min_val, # type: Any max_val, # type: Any open_left=False, # type: bool open_right=False # type: bool ): """ 'Is between' validation_function generator. Returns a validation_function to check that min_val <= x <= max_val (default). open_right and open_left flags allow to transform each side into strict mode. For example setting open_left=True will enforce min_val < x <= max_val :param min_val: minimum value for x :param max_val: maximum value for x :param open_left: Boolean flag to turn the left inequality to strict mode :param open_right: Boolean flag to turn the right inequality to strict mode :return: """ if open_left and open_right: def between_(x): if (min_val < x) and (x < max_val): return True else: # raise Failure('{} < x < {} does not hold for x={}'.format(min_val, max_val, x)) raise NotInRange(wrong_value=x, min_value=min_val, left_strict=True, max_value=max_val, right_strict=True) elif open_left: def between_(x): if (min_val < x) and (x <= max_val): return True else: # raise Failure('between: {} < x <= {} does not hold for x={}'.format(min_val, max_val, x)) raise NotInRange(wrong_value=x, min_value=min_val, left_strict=True, max_value=max_val, right_strict=False) elif open_right: def between_(x): if (min_val <= x) and (x < max_val): return True else: # raise Failure('between: {} <= x < {} does not hold for x={}'.format(min_val, max_val, x)) raise NotInRange(wrong_value=x, min_value=min_val, left_strict=False, max_value=max_val, right_strict=True) else: def between_(x): if (min_val <= x) and (x <= max_val): return True else: # raise Failure('between: {} <= x <= {} does not hold for x={}'.format(min_val, max_val, x)) raise NotInRange(wrong_value=x, min_value=min_val, left_strict=False, max_value=max_val, right_strict=False) between_.__name__ = 'between_{}_and_{}'.format(min_val, max_val) return between_
'Is between' validation_function generator. Returns a validation_function to check that min_val <= x <= max_val (default). open_right and open_left flags allow to transform each side into strict mode. For example setting open_left=True will enforce min_val < x <= max_val :param min_val: minimum value for x :param max_val: maximum value for x :param open_left: Boolean flag to turn the left inequality to strict mode :param open_right: Boolean flag to turn the right inequality to strict mode :return:
entailment
def _sub_process_main(started_event: Event, channel_name: str, connection: Connection, consumer_configuration: BrightsideConsumerConfiguration, consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer], command_processor_factory: Callable[[str], CommandProcessor], mapper_func: Callable[[BrightsideMessage], Request]) -> None: """ This is the main method for the sub=process, everything we need to create the message pump and channel it needs to be passed in as parameters that can be pickled as when we run they will be serialized into this process. The data should be value types, not reference types as we will receive a copy of the original. Inter-process communication is signalled by the event - to indicate startup - and the pipeline to facilitate a sentinel or stop message :param started_event: Used by the sub-process to signal that it is ready :param channel_name: The name we want to give the channel to the broker for identification :param connection: The 'broker' connection :param consumer_configuration: How to configure our consumer of messages from the channel :param consumer_factory: Callback to create the consumer. User code as we don't know what consumer library they want to use. Arame? Something else? :param command_processor_factory: Callback to register subscribers, policies, and task queues then build command processor. User code that provides us with their requests and handlers :param mapper_func: We need to map between messages on the wire and our handlers :return: """ logger = logging.getLogger(__name__) consumer = consumer_factory(connection, consumer_configuration, logger) channel = Channel(name=channel_name, consumer=consumer, pipeline=consumer_configuration.pipeline) # TODO: Fix defaults that need passed in config values command_processor = command_processor_factory(channel_name) message_pump = MessagePump(command_processor=command_processor, channel=channel, mapper_func=mapper_func, timeout=500, unacceptable_message_limit=None, requeue_count=None) logger.debug("Starting the message pump for %s", channel_name) message_pump.run(started_event)
This is the main method for the sub=process, everything we need to create the message pump and channel it needs to be passed in as parameters that can be pickled as when we run they will be serialized into this process. The data should be value types, not reference types as we will receive a copy of the original. Inter-process communication is signalled by the event - to indicate startup - and the pipeline to facilitate a sentinel or stop message :param started_event: Used by the sub-process to signal that it is ready :param channel_name: The name we want to give the channel to the broker for identification :param connection: The 'broker' connection :param consumer_configuration: How to configure our consumer of messages from the channel :param consumer_factory: Callback to create the consumer. User code as we don't know what consumer library they want to use. Arame? Something else? :param command_processor_factory: Callback to register subscribers, policies, and task queues then build command processor. User code that provides us with their requests and handlers :param mapper_func: We need to map between messages on the wire and our handlers :return:
entailment
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ app_conf = dci_config.generate_conf() url = app_conf['SQLALCHEMY_DATABASE_URI'] context.configure( url=url, target_metadata=target_metadata, literal_binds=True, ) with context.begin_transaction(): context.run_migrations()
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
entailment
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ app_conf = dci_config.generate_conf() connectable = dci_config.get_engine(app_conf) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, ) with context.begin_transaction(): context.run_migrations()
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
entailment
def reject(): """Sends a 401 reject response that enables basic auth.""" auth_message = ('Could not verify your access level for that URL.' 'Please login with proper credentials.') auth_message = json.dumps({'_status': 'Unauthorized', 'message': auth_message}) headers = {'WWW-Authenticate': 'Basic realm="Login required"'} return flask.Response(auth_message, 401, headers=headers, content_type='application/json')
Sends a 401 reject response that enables basic auth.
entailment
def modify(self, **kwargs): """Modify a contact. Returns status message Optional Parameters: * name -- Contact name Type: String * email -- Contact email address Type: String * cellphone -- Cellphone number, without the country code part. In some countries you are supposed to exclude leading zeroes. (Requires countrycode and countryiso) Type: String * countrycode -- Cellphone country code (Requires cellphone and countryiso) Type: String * countryiso -- Cellphone country ISO code. For example: US (USA), GB (Britain) or SE (Sweden) (Requires cellphone and countrycode) Type: String * defaultsmsprovider -- Default SMS provider Type: String ['clickatell', 'bulksms', 'esendex', 'cellsynt'] * directtwitter -- Send tweets as direct messages Type: Boolean Default: True * twitteruser -- Twitter user Type: String """ # Warn user about unhandled parameters for key in kwargs: if key not in ['email', 'cellphone', 'countrycode', 'countryiso', 'defaultsmsprovider', 'directtwitter', 'twitteruser', 'name']: sys.stderr.write("'%s'" % key + ' is not a valid argument ' + 'of <PingdomContact>.modify()\n') response = self.pingdom.request('PUT', 'notification_contacts/%s' % self.id, kwargs) return response.json()['message']
Modify a contact. Returns status message Optional Parameters: * name -- Contact name Type: String * email -- Contact email address Type: String * cellphone -- Cellphone number, without the country code part. In some countries you are supposed to exclude leading zeroes. (Requires countrycode and countryiso) Type: String * countrycode -- Cellphone country code (Requires cellphone and countryiso) Type: String * countryiso -- Cellphone country ISO code. For example: US (USA), GB (Britain) or SE (Sweden) (Requires cellphone and countrycode) Type: String * defaultsmsprovider -- Default SMS provider Type: String ['clickatell', 'bulksms', 'esendex', 'cellsynt'] * directtwitter -- Send tweets as direct messages Type: Boolean Default: True * twitteruser -- Twitter user Type: String
entailment
def _establish_connection(self, conn: BrokerConnection) -> None: """ We don't use a pool here. We only have one consumer connection per process, so we get no value from a pool, and we want to use a heartbeat to keep the consumer collection alive, which does not work with a pool :return: the connection to the transport """ try: self._logger.debug("Establishing connection.") self._conn = conn.ensure_connection(max_retries=3) self._logger.debug('Got connection: %s', conn.as_uri()) except kombu_exceptions.OperationalError as oe: self._logger.error("Error connecting to RMQ, could not retry %s", oe) # Try to clean up the mess if self._conn is not None: self._conn.close() else: conn.close()
We don't use a pool here. We only have one consumer connection per process, so we get no value from a pool, and we want to use a heartbeat to keep the consumer collection alive, which does not work with a pool :return: the connection to the transport
entailment
def run_heartbeat_continuously(self) -> threading.Event: """ For a long runing handler, there is a danger that we do not send a heartbeat message or activity on the connection whilst we are running the handler. With a default heartbeat of 30s, for example, there is a risk that a handler which takes more than 15s will fail to send the heartbeat in time and then the broker will reset the connection. So we spin up another thread, where the user has marked the thread as having a long-running thread :return: an event to cancel the thread """ cancellation_event = threading.Event() # Effectively a no-op if we are not actually a long-running thread if not self._is_long_running_handler: return cancellation_event self._logger.debug("Running long running handler on %s", self._conn) def _send_heartbeat(cnx: BrokerConnection, period: int, logger: logging.Logger) -> None: while not cancellation_event.is_set(): cnx.heartbeat_check() time.sleep(period) logger.debug("Signalled to exit long-running handler heartbeat") heartbeat_thread = threading.Thread(target=_send_heartbeat, args=(self._conn, 1, self._logger), daemon=True) self._logger.debug("Begin heartbeat thread for %s", self._conn) heartbeat_thread.start() self._logger.debug("Heartbeat running on thread for %s", self._conn) return cancellation_event
For a long runing handler, there is a danger that we do not send a heartbeat message or activity on the connection whilst we are running the handler. With a default heartbeat of 30s, for example, there is a risk that a handler which takes more than 15s will fail to send the heartbeat in time and then the broker will reset the connection. So we spin up another thread, where the user has marked the thread as having a long-running thread :return: an event to cancel the thread
entailment
def _check(user, topic): """If the topic has it's export_control set to True then all the teams under the product team can access to the topic's resources. :param user: :param topic: :return: True if check is ok, False otherwise """ # if export_control then check the team is associated to the product, ie.: # - the current user belongs to the product's team # OR # - the product's team belongs to the user's parents teams if topic['export_control']: product = v1_utils.verify_existence_and_get(topic['product_id'], models.PRODUCTS) return (user.is_in_team(product['team_id']) or product['team_id'] in user.parent_teams_ids) return False
If the topic has it's export_control set to True then all the teams under the product team can access to the topic's resources. :param user: :param topic: :return: True if check is ok, False otherwise
entailment
def get_stream_or_content_from_request(request): """Ensure the proper content is uploaded. Stream might be already consumed by authentication process. Hence flask.request.stream might not be readable and return improper value. This methods checks if the stream has already been consumed and if so retrieve the data from flask.request.data where it has been stored. """ if request.stream.tell(): logger.info('Request stream already consumed. ' 'Storing file content using in-memory data.') return request.data else: logger.info('Storing file content using request stream.') return request.stream
Ensure the proper content is uploaded. Stream might be already consumed by authentication process. Hence flask.request.stream might not be readable and return improper value. This methods checks if the stream has already been consumed and if so retrieve the data from flask.request.data where it has been stored.
entailment
def xpath_on_node(self, node, xpath, **kwargs): """ Return result of performing the given XPath query on the given node. All known namespace prefix-to-URI mappings in the document are automatically included in the XPath invocation. If an empty/default namespace (i.e. None) is defined, this is converted to the prefix name '_' so it can be used despite empty namespace prefixes being unsupported by XPath. """ if isinstance(node, etree._ElementTree): # Document node lxml.etree._ElementTree has no nsmap, lookup root root = self.get_impl_root(node) namespaces_dict = root.nsmap.copy() else: namespaces_dict = node.nsmap.copy() if 'namespaces' in kwargs: namespaces_dict.update(kwargs['namespaces']) # Empty namespace prefix is not supported, convert to '_' prefix if None in namespaces_dict: default_ns_uri = namespaces_dict.pop(None) namespaces_dict['_'] = default_ns_uri # Include XMLNS namespace if it's not already defined if not 'xmlns' in namespaces_dict: namespaces_dict['xmlns'] = nodes.Node.XMLNS_URI return node.xpath(xpath, namespaces=namespaces_dict)
Return result of performing the given XPath query on the given node. All known namespace prefix-to-URI mappings in the document are automatically included in the XPath invocation. If an empty/default namespace (i.e. None) is defined, this is converted to the prefix name '_' so it can be used despite empty namespace prefixes being unsupported by XPath.
entailment
def _is_ns_in_ancestor(self, node, name, value): """ Return True if the given namespace name/value is defined in an ancestor of the given node, meaning that the given node need not have its own attributes to apply that namespacing. """ curr_node = self.get_node_parent(node) while curr_node.__class__ == etree._Element: if (hasattr(curr_node, 'nsmap') and curr_node.nsmap.get(name) == value): return True for n, v in curr_node.attrib.items(): if v == value and '{%s}' % nodes.Node.XMLNS_URI in n: return True curr_node = self.get_node_parent(curr_node) return False
Return True if the given namespace name/value is defined in an ancestor of the given node, meaning that the given node need not have its own attributes to apply that namespacing.
entailment
def generic_annotate(qs_model, generic_qs_model, aggregator, gfk_field=None, alias='score'): """ Find blog entries with the most comments: qs = generic_annotate(Entry.objects.public(), Comment.objects.public(), Count('comments__id')) for entry in qs: print entry.title, entry.score Find the highest rated foods: generic_annotate(Food, Rating, Avg('ratings__rating'), alias='avg') for food in qs: print food.name, '- average rating:', food.avg .. note:: In both of the above examples it is assumed that a GenericRelation exists on Entry to Comment (named "comments") and also on Food to Rating (named "ratings"). If a GenericRelation does *not* exist, the query will still return correct results but the code path will be different as it will use the fallback method. .. warning:: If the underlying column type differs between the qs_model's primary key and the generic_qs_model's foreign key column, it will use the fallback method, which can correctly CASTself. :param qs_model: A model or a queryset of objects you want to perform annotation on, e.g. blog entries :param generic_qs_model: A model or queryset containing a GFK, e.g. comments :param aggregator: an aggregation, from django.db.models, e.g. Count('id') or Avg('rating') :param gfk_field: explicitly specify the field w/the gfk :param alias: attribute name to use for annotation """ return fallback_generic_annotate(qs_model, generic_qs_model, aggregator, gfk_field, alias)
Find blog entries with the most comments: qs = generic_annotate(Entry.objects.public(), Comment.objects.public(), Count('comments__id')) for entry in qs: print entry.title, entry.score Find the highest rated foods: generic_annotate(Food, Rating, Avg('ratings__rating'), alias='avg') for food in qs: print food.name, '- average rating:', food.avg .. note:: In both of the above examples it is assumed that a GenericRelation exists on Entry to Comment (named "comments") and also on Food to Rating (named "ratings"). If a GenericRelation does *not* exist, the query will still return correct results but the code path will be different as it will use the fallback method. .. warning:: If the underlying column type differs between the qs_model's primary key and the generic_qs_model's foreign key column, it will use the fallback method, which can correctly CASTself. :param qs_model: A model or a queryset of objects you want to perform annotation on, e.g. blog entries :param generic_qs_model: A model or queryset containing a GFK, e.g. comments :param aggregator: an aggregation, from django.db.models, e.g. Count('id') or Avg('rating') :param gfk_field: explicitly specify the field w/the gfk :param alias: attribute name to use for annotation
entailment
def generic_aggregate(qs_model, generic_qs_model, aggregator, gfk_field=None): """ Find total number of comments on blog entries: generic_aggregate(Entry.objects.public(), Comment.objects.public(), Count('comments__id')) Find the average rating for foods starting with 'a': a_foods = Food.objects.filter(name__startswith='a') generic_aggregate(a_foods, Rating, Avg('ratings__rating')) .. note:: In both of the above examples it is assumed that a GenericRelation exists on Entry to Comment (named "comments") and also on Food to Rating (named "ratings"). If a GenericRelation does *not* exist, the query will still return correct results but the code path will be different as it will use the fallback method. .. warning:: If the underlying column type differs between the qs_model's primary key and the generic_qs_model's foreign key column, it will use the fallback method, which can correctly CASTself. :param qs_model: A model or a queryset of objects you want to perform annotation on, e.g. blog entries :param generic_qs_model: A model or queryset containing a GFK, e.g. comments :param aggregator: an aggregation, from django.db.models, e.g. Count('id') or Avg('rating') :param gfk_field: explicitly specify the field w/the gfk """ return fallback_generic_aggregate(qs_model, generic_qs_model, aggregator, gfk_field)
Find total number of comments on blog entries: generic_aggregate(Entry.objects.public(), Comment.objects.public(), Count('comments__id')) Find the average rating for foods starting with 'a': a_foods = Food.objects.filter(name__startswith='a') generic_aggregate(a_foods, Rating, Avg('ratings__rating')) .. note:: In both of the above examples it is assumed that a GenericRelation exists on Entry to Comment (named "comments") and also on Food to Rating (named "ratings"). If a GenericRelation does *not* exist, the query will still return correct results but the code path will be different as it will use the fallback method. .. warning:: If the underlying column type differs between the qs_model's primary key and the generic_qs_model's foreign key column, it will use the fallback method, which can correctly CASTself. :param qs_model: A model or a queryset of objects you want to perform annotation on, e.g. blog entries :param generic_qs_model: A model or queryset containing a GFK, e.g. comments :param aggregator: an aggregation, from django.db.models, e.g. Count('id') or Avg('rating') :param gfk_field: explicitly specify the field w/the gfk
entailment
def generic_filter(generic_qs_model, filter_qs_model, gfk_field=None): """ Only show me ratings made on foods that start with "a": a_foods = Food.objects.filter(name__startswith='a') generic_filter(Rating.objects.all(), a_foods) Only show me comments from entries that are marked as public: generic_filter(Comment.objects.public(), Entry.objects.public()) :param generic_qs_model: A model or queryset containing a GFK, e.g. comments :param qs_model: A model or a queryset of objects you want to restrict the generic_qs to :param gfk_field: explicitly specify the field w/the gfk """ generic_qs = normalize_qs_model(generic_qs_model) filter_qs = normalize_qs_model(filter_qs_model) if not gfk_field: gfk_field = get_gfk_field(generic_qs.model) pk_field_type = get_field_type(filter_qs.model._meta.pk) gfk_field_type = get_field_type(generic_qs.model._meta.get_field(gfk_field.fk_field)) if pk_field_type != gfk_field_type: return fallback_generic_filter(generic_qs, filter_qs, gfk_field) return generic_qs.filter(**{ gfk_field.ct_field: ContentType.objects.get_for_model(filter_qs.model), '%s__in' % gfk_field.fk_field: filter_qs.values('pk'), })
Only show me ratings made on foods that start with "a": a_foods = Food.objects.filter(name__startswith='a') generic_filter(Rating.objects.all(), a_foods) Only show me comments from entries that are marked as public: generic_filter(Comment.objects.public(), Entry.objects.public()) :param generic_qs_model: A model or queryset containing a GFK, e.g. comments :param qs_model: A model or a queryset of objects you want to restrict the generic_qs to :param gfk_field: explicitly specify the field w/the gfk
entailment
def gen_etag(): """Generate random etag based on MD5.""" my_salt = gen_uuid() if six.PY2: my_salt = my_salt.decode('utf-8') elif six.PY3: my_salt = my_salt.encode('utf-8') md5 = hashlib.md5() md5.update(my_salt) return md5.hexdigest()
Generate random etag based on MD5.
entailment
def delete(self): """Delete this email report""" response = self.pingdom.request('DELETE', 'reports.shared/%s' % self.id) return response.json()['message']
Delete this email report
entailment
def is_multiple_of(ref): """ Validates that x is a multiple of the reference (`x % ref == 0`) """ def is_multiple_of_ref(x): if x % ref == 0: return True else: raise IsNotMultipleOf(wrong_value=x, ref=ref) # raise Failure('x % {ref} == 0 does not hold for x={val}'.format(ref=ref, val=x)) is_multiple_of_ref.__name__ = 'is_multiple_of_{}'.format(ref) return is_multiple_of_ref
Validates that x is a multiple of the reference (`x % ref == 0`)
entailment
def get_all_components(user, topic_id): """Get all components of a topic.""" args = schemas.args(flask.request.args.to_dict()) query = v1_utils.QueryBuilder(_TABLE, args, _C_COLUMNS) query.add_extra_condition(sql.and_( _TABLE.c.topic_id == topic_id, _TABLE.c.state != 'archived')) nb_rows = query.get_number_of_rows() rows = query.execute(fetchall=True) rows = v1_utils.format_result(rows, _TABLE.name, args['embed'], _EMBED_MANY) # Return only the component which have the export_control flag set to true # if user.is_not_super_admin(): rows = [row for row in rows if row['export_control']] return flask.jsonify({'components': rows, '_meta': {'count': nb_rows}})
Get all components of a topic.
entailment
def get_component_types_from_topic(topic_id, db_conn=None): """Returns the component types of a topic.""" db_conn = db_conn or flask.g.db_conn query = sql.select([models.TOPICS]).\ where(models.TOPICS.c.id == topic_id) topic = db_conn.execute(query).fetchone() topic = dict(topic) return topic['component_types']
Returns the component types of a topic.
entailment
def get_component_types(topic_id, remoteci_id, db_conn=None): """Returns either the topic component types or the rconfigration's component types.""" db_conn = db_conn or flask.g.db_conn rconfiguration = remotecis.get_remoteci_configuration(topic_id, remoteci_id, db_conn=db_conn) # if there is no rconfiguration associated to the remoteci or no # component types then use the topic's one. if (rconfiguration is not None and rconfiguration['component_types'] is not None): component_types = rconfiguration['component_types'] else: component_types = get_component_types_from_topic(topic_id, db_conn=db_conn) return component_types, rconfiguration
Returns either the topic component types or the rconfigration's component types.
entailment
def get_last_components_by_type(component_types, topic_id, db_conn=None): """For each component type of a topic, get the last one.""" db_conn = db_conn or flask.g.db_conn schedule_components_ids = [] for ct in component_types: where_clause = sql.and_(models.COMPONENTS.c.type == ct, models.COMPONENTS.c.topic_id == topic_id, models.COMPONENTS.c.export_control == True, models.COMPONENTS.c.state == 'active') # noqa query = (sql.select([models.COMPONENTS.c.id]) .where(where_clause) .order_by(sql.desc(models.COMPONENTS.c.created_at))) cmpt_id = db_conn.execute(query).fetchone() if cmpt_id is None: msg = 'Component of type "%s" not found or not exported.' % ct raise dci_exc.DCIException(msg, status_code=412) cmpt_id = cmpt_id[0] if cmpt_id in schedule_components_ids: msg = ('Component types %s malformed: type %s duplicated.' % (component_types, ct)) raise dci_exc.DCIException(msg, status_code=412) schedule_components_ids.append(cmpt_id) return schedule_components_ids
For each component type of a topic, get the last one.
entailment
def verify_and_get_components_ids(topic_id, components_ids, component_types, db_conn=None): """Process some verifications of the provided components ids.""" db_conn = db_conn or flask.g.db_conn if len(components_ids) != len(component_types): msg = 'The number of component ids does not match the number ' \ 'of component types %s' % component_types raise dci_exc.DCIException(msg, status_code=412) # get the components from their ids schedule_component_types = set() for c_id in components_ids: where_clause = sql.and_(models.COMPONENTS.c.id == c_id, models.COMPONENTS.c.topic_id == topic_id, models.COMPONENTS.c.export_control == True, # noqa models.COMPONENTS.c.state == 'active') query = (sql.select([models.COMPONENTS]) .where(where_clause)) cmpt = db_conn.execute(query).fetchone() if cmpt is None: msg = 'Component id %s not found or not exported' % c_id raise dci_exc.DCIException(msg, status_code=412) cmpt = dict(cmpt) if cmpt['type'] in schedule_component_types: msg = ('Component types malformed: type %s duplicated.' % cmpt['type']) raise dci_exc.DCIException(msg, status_code=412) schedule_component_types.add(cmpt['type']) return components_ids
Process some verifications of the provided components ids.
entailment
def retrieve_tags_from_component(user, c_id): """Retrieve all tags attached to a component.""" JCT = models.JOIN_COMPONENTS_TAGS query = (sql.select([models.TAGS]) .select_from(JCT.join(models.TAGS)) .where(JCT.c.component_id == c_id)) rows = flask.g.db_conn.execute(query) return flask.jsonify({'tags': rows, '_meta': {'count': rows.rowcount}})
Retrieve all tags attached to a component.
entailment
def add_tag_for_component(user, c_id): """Add a tag on a specific component.""" v1_utils.verify_existence_and_get(c_id, _TABLE) values = { 'component_id': c_id } component_tagged = tags.add_tag_to_resource(values, models.JOIN_COMPONENTS_TAGS) return flask.Response(json.dumps(component_tagged), 201, content_type='application/json')
Add a tag on a specific component.
entailment
def delete_tag_for_component(user, c_id, tag_id): """Delete a tag on a specific component.""" # Todo : check c_id and tag_id exist in db query = _TABLE_TAGS.delete().where(_TABLE_TAGS.c.tag_id == tag_id and _TABLE_TAGS.c.component_id == c_id) try: flask.g.db_conn.execute(query) except sa_exc.IntegrityError: raise dci_exc.DCICreationConflict(_TABLE_TAGS.c.tag_id, 'tag_id') return flask.Response(None, 204, content_type='application/json')
Delete a tag on a specific component.
entailment
def should_be_hidden_as_cause(exc): """ Used everywhere to decide if some exception type should be displayed or hidden as the casue of an error """ # reduced traceback in case of HasWrongType (instance_of checks) from valid8.validation_lib.types import HasWrongType, IsWrongType return isinstance(exc, (HasWrongType, IsWrongType))
Used everywhere to decide if some exception type should be displayed or hidden as the casue of an error
entailment
def is_error_of_type(exc, ref_type): """ Helper function to determine if some exception is of some type, by also looking at its declared __cause__ :param exc: :param ref_type: :return: """ if isinstance(exc, ref_type): return True elif hasattr(exc, '__cause__') and exc.__cause__ is not None: return is_error_of_type(exc.__cause__, ref_type)
Helper function to determine if some exception is of some type, by also looking at its declared __cause__ :param exc: :param ref_type: :return:
entailment
def _failure_raiser(validation_callable, # type: Callable failure_type=None, # type: Type[WrappingFailure] help_msg=None, # type: str **kw_context_args): # type: (...) -> Callable """ Wraps the provided validation function so that in case of failure it raises the given failure_type or a WrappingFailure with the given help message. :param validation_callable: :param failure_type: an optional subclass of `WrappingFailure` that should be raised in case of failure, instead of `WrappingFailure`. :param help_msg: an optional string help message for the raised `WrappingFailure` (if no failure_type is provided) :param kw_context_args: optional context arguments for the custom failure message :return: """ # check failure type if failure_type is not None and help_msg is not None: raise ValueError('Only one of failure_type and help_msg can be set at the same time') # convert mini-lambdas to functions if needed validation_callable = as_function(validation_callable) # create wrapper # option (a) use the `decorate()` helper method to preserve name and signature of the inner object # ==> NO, we want to support also non-function callable objects # option (b) simply create a wrapper manually def raiser(x): """ Wraps validation_callable to raise a failure_type_or_help_msg in case of failure """ try: # perform validation res = validation_callable(x) except Exception as e: # no need to raise from e since the __cause__ is already set in the constructor: we can safely commonalize res = e if not result_is_success(res): typ = failure_type or WrappingFailure exc = typ(wrapped_func=validation_callable, wrong_value=x, validation_outcome=res, help_msg=help_msg, **kw_context_args) raise exc # set a name so that the error messages are more user-friendly # NO, Do not include the callable type or error message in the name since it is only used in error messages where # they will appear anyway ! # --- # if help_msg or failure_type: # raiser.__name__ = 'failure_raiser({}, {})'.format(get_callable_name(validation_callable), # help_msg or failure_type.__name__) # else: # --- # raiser.__name__ = 'failure_raiser({})'.format(get_callable_name(validation_callable)) raiser.__name__ = get_callable_name(validation_callable) # Note: obviously this can hold as long as we do not check the name of this object in any other context than # raising errors. If we want to support this, then creating a callable object with everything in the fields will be # probably more appropriate so that error messages will be able to display the inner name, while repr() will still # say that this is a failure raiser. # TODO consider transforming failure_raiser into a class (see comment above) return raiser
Wraps the provided validation function so that in case of failure it raises the given failure_type or a WrappingFailure with the given help message. :param validation_callable: :param failure_type: an optional subclass of `WrappingFailure` that should be raised in case of failure, instead of `WrappingFailure`. :param help_msg: an optional string help message for the raised `WrappingFailure` (if no failure_type is provided) :param kw_context_args: optional context arguments for the custom failure message :return:
entailment
def _none_accepter(validation_callable # type: Callable ): # type: (...) -> Callable """ Wraps the given validation callable to accept None values silently. When a None value is received by the wrapper, it is not passed to the validation_callable and instead this function will return True. When any other value is received the validation_callable is called as usual. Note: the created wrapper has the same same than the validation callable for more user-friendly error messages :param validation_callable: :return: """ # option (a) use the `decorate()` helper method to preserve name and signature of the inner object # ==> NO, we want to support also non-function callable objects # option (b) simply create a wrapper manually def accept_none(x): if x is not None: # proceed with validation as usual return validation_callable(x) else: # value is None: skip validation return True # set a name so that the error messages are more user-friendly accept_none.__name__ = 'skip_on_none({})'.format(get_callable_name(validation_callable)) return accept_none
Wraps the given validation callable to accept None values silently. When a None value is received by the wrapper, it is not passed to the validation_callable and instead this function will return True. When any other value is received the validation_callable is called as usual. Note: the created wrapper has the same same than the validation callable for more user-friendly error messages :param validation_callable: :return:
entailment
def _none_rejecter(validation_callable # type: Callable ): # type: (...) -> Callable """ Wraps the given validation callable to reject None values. When a None value is received by the wrapper, it is not passed to the validation_callable and instead this function will raise a WrappingFailure. When any other value is received the validation_callable is called as usual. :param validation_callable: :return: """ # option (a) use the `decorate()` helper method to preserve name and signature of the inner object # ==> NO, we want to support also non-function callable objects # option (b) simply create a wrapper manually def reject_none(x): if x is not None: return validation_callable(x) else: raise ValueIsNone(wrong_value=x) # set a name so that the error messages are more user-friendly ==> NO ! here we want to see the checker reject_none.__name__ = 'reject_none({})'.format(get_callable_name(validation_callable)) return reject_none
Wraps the given validation callable to reject None values. When a None value is received by the wrapper, it is not passed to the validation_callable and instead this function will raise a WrappingFailure. When any other value is received the validation_callable is called as usual. :param validation_callable: :return:
entailment
def get_help_msg(self, dotspace_ending=False, # type: bool **kwargs): # type: (...) -> str """ The method used to get the formatted help message according to kwargs. By default it returns the 'help_msg' attribute, whether it is defined at the instance level or at the class level. The help message is formatted according to help_msg.format(**kwargs), and may be terminated with a dot and a space if dotspace_ending is set to True. :param dotspace_ending: True will append a dot and a space at the end of the message if it is not empty (default is False) :param kwargs: keyword arguments to format the help message :return: the formatted help message """ context = self.get_context_for_help_msgs(kwargs) if self.help_msg is not None and len(self.help_msg) > 0: # create a copy because we will modify it context = copy(context) # first format if needed try: help_msg = self.help_msg variables = re.findall("{\S+}", help_msg) for v in set(variables): v = v[1:-1] if v in context and len(str(context[v])) > self.__max_str_length_displayed__: new_name = '@@@@' + v + '@@@@' help_msg = help_msg.replace('{' + v + '}', '{' + new_name + '}') context[new_name] = "(too big for display)" help_msg = help_msg.format(**context) except KeyError as e: # no need to raise from e, __cause__ is set in the constructor raise HelpMsgFormattingException(self.help_msg, e, context) # then add a trailing dot and space if needed if dotspace_ending: return end_with_dot_space(help_msg) else: return help_msg else: return ''
The method used to get the formatted help message according to kwargs. By default it returns the 'help_msg' attribute, whether it is defined at the instance level or at the class level. The help message is formatted according to help_msg.format(**kwargs), and may be terminated with a dot and a space if dotspace_ending is set to True. :param dotspace_ending: True will append a dot and a space at the end of the message if it is not empty (default is False) :param kwargs: keyword arguments to format the help message :return: the formatted help message
entailment
def get_details(self): """ The function called to get the details appended to the help message when self.append_details is True """ strval = str(self.wrong_value) if len(strval) > self.__max_str_length_displayed__: return '(Actual value is too big to be printed in this message)' else: return 'Wrong value: [{}]'.format(self.wrong_value)
The function called to get the details appended to the help message when self.append_details is True
entailment
def get_details(self): """ Overrides the method in Failure so as to add a few details about the wrapped function and outcome """ if isinstance(self.validation_outcome, Exception): if isinstance(self.validation_outcome, Failure): # do not say again what was the value, it is already mentioned inside :) end_str = '' else: end_str = ' for value [{value}]'.format(value=self.wrong_value) contents = 'Function [{wrapped}] raised [{exception}: {details}]{end}.' \ ''.format(wrapped=get_callable_name(self.wrapped_func), exception=type(self.validation_outcome).__name__, details=self.validation_outcome, end=end_str) else: contents = 'Function [{wrapped}] returned [{result}] for value [{value}].' \ ''.format(wrapped=get_callable_name(self.wrapped_func), result=self.validation_outcome, value=self.wrong_value) return contents
Overrides the method in Failure so as to add a few details about the wrapped function and outcome
entailment
def get_context_for_help_msgs(self, context_dict): """ We override this method from HelpMsgMixIn to replace wrapped_func with its name """ context_dict = copy(context_dict) context_dict['wrapped_func'] = get_callable_name(context_dict['wrapped_func']) return context_dict
We override this method from HelpMsgMixIn to replace wrapped_func with its name
entailment
def validate_field(cls, field_name, *validation_func, # type: ValidationFuncs **kwargs): # type: (...) -> Callable """ A class decorator. It goes through all class variables and for all of those that are descriptors with a __set__, it wraps the descriptors' setter function with a `validate_arg` annotation :param field_name: :param validation_func: :param help_msg: :param error_type: :param none_policy: :param kw_context_args: :return """ return decorate_cls_with_validation(cls, field_name, *validation_func, **kwargs)
A class decorator. It goes through all class variables and for all of those that are descriptors with a __set__, it wraps the descriptors' setter function with a `validate_arg` annotation :param field_name: :param validation_func: :param help_msg: :param error_type: :param none_policy: :param kw_context_args: :return
entailment
def validate_io(f=DECORATED, none_policy=None, # type: int _out_=None, # type: ValidationFuncs **kw_validation_funcs # type: ValidationFuncs ): """ A function decorator to add input validation prior to the function execution. It should be called with named arguments: for each function arg name, provide a single validation function or a list of validation functions to apply. If validation fails, it will raise an InputValidationError with details about the function, the input name, and any further information available from the validation function(s) For example: ``` def is_even(x): return x % 2 == 0 def gt(a): def gt(x): return x >= a return gt @validate_io(a=[is_even, gt(1)], b=is_even) def myfunc(a, b): print('hello') ``` will generate the equivalent of : ``` def myfunc(a, b): gt1 = gt(1) if (is_even(a) and gt1(a)) and is_even(b): print('hello') else: raise InputValidationError(...) ``` :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`. :param _out_: a validation function or list of validation functions to apply to the function output. See kw_validation_funcs for details about the syntax. :param kw_validation_funcs: keyword arguments: for each of the function's input names, the validation function or list of validation functions to use. A validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :return: the decorated function, that will perform input validation before executing the function's code everytime it is executed. """ return decorate_several_with_validation(f, none_policy=none_policy, _out_=_out_, **kw_validation_funcs)
A function decorator to add input validation prior to the function execution. It should be called with named arguments: for each function arg name, provide a single validation function or a list of validation functions to apply. If validation fails, it will raise an InputValidationError with details about the function, the input name, and any further information available from the validation function(s) For example: ``` def is_even(x): return x % 2 == 0 def gt(a): def gt(x): return x >= a return gt @validate_io(a=[is_even, gt(1)], b=is_even) def myfunc(a, b): print('hello') ``` will generate the equivalent of : ``` def myfunc(a, b): gt1 = gt(1) if (is_even(a) and gt1(a)) and is_even(b): print('hello') else: raise InputValidationError(...) ``` :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`. :param _out_: a validation function or list of validation functions to apply to the function output. See kw_validation_funcs for details about the syntax. :param kw_validation_funcs: keyword arguments: for each of the function's input names, the validation function or list of validation functions to use. A validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :return: the decorated function, that will perform input validation before executing the function's code everytime it is executed.
entailment
def validate_arg(f, arg_name, *validation_func, # type: ValidationFuncs **kwargs ): # type: (...) -> Callable """ A decorator to apply function input validation for the given argument name, with the provided base validation function(s). You may use several such decorators on a given function as long as they are stacked on top of each other (no external decorator in the middle) :param arg_name: :param validation_func: the base validation function or list of base validation functions to use. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param error_type: a subclass of ValidationError to raise in case of validation failure. By default a ValidationError will be raised with the provided help_msg :param help_msg: an optional help message to be used in the raised error in case of validation failure. :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`. :param kw_context_args: optional contextual information to store in the exception, and that may be also used to format the help message :return: a function decorator, able to transform a function into a function that will perform input validation before executing the function's code everytime it is executed. """ return decorate_with_validation(f, arg_name, *validation_func, **kwargs)
A decorator to apply function input validation for the given argument name, with the provided base validation function(s). You may use several such decorators on a given function as long as they are stacked on top of each other (no external decorator in the middle) :param arg_name: :param validation_func: the base validation function or list of base validation functions to use. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param error_type: a subclass of ValidationError to raise in case of validation failure. By default a ValidationError will be raised with the provided help_msg :param help_msg: an optional help message to be used in the raised error in case of validation failure. :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`. :param kw_context_args: optional contextual information to store in the exception, and that may be also used to format the help message :return: a function decorator, able to transform a function into a function that will perform input validation before executing the function's code everytime it is executed.
entailment
def validate_out(*validation_func, # type: ValidationFuncs **kwargs): # type: (...) -> Callable """ A decorator to apply function output validation to this function's output, with the provided base validation function(s). You may use several such decorators on a given function as long as they are stacked on top of each other (no external decorator in the middle) :param validation_func: the base validation function or list of base validation functions to use. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`. :return: a function decorator, able to transform a function into a function that will perform input validation before executing the function's code everytime it is executed. """ def decorate(f): return decorate_with_validation(f, _OUT_KEY, *validation_func, **kwargs) return decorate
A decorator to apply function output validation to this function's output, with the provided base validation function(s). You may use several such decorators on a given function as long as they are stacked on top of each other (no external decorator in the middle) :param validation_func: the base validation function or list of base validation functions to use. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`. :return: a function decorator, able to transform a function into a function that will perform input validation before executing the function's code everytime it is executed.
entailment
def decorate_cls_with_validation(cls, field_name, # type: str *validation_func, # type: ValidationFuncs **kwargs): # type: (...) -> Type[Any] """ This method is equivalent to decorating a class with the `@validate_field` decorator but can be used a posteriori. :param cls: the class to decorate :param field_name: the name of the argument to validate or _OUT_KEY for output validation :param validation_func: the validation function or list of validation functions to use. A validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param error_type: a subclass of ValidationError to raise in case of validation failure. By default a ValidationError will be raised with the provided help_msg :param help_msg: an optional help message to be used in the raised error in case of validation failure. :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_REJECT`. :param kw_context_args: optional contextual information to store in the exception, and that may be also used to format the help message :return: the decorated function, that will perform input validation (using `_assert_input_is_valid`) before executing the function's code everytime it is executed. """ error_type, help_msg, none_policy = pop_kwargs(kwargs, [('error_type', None), ('help_msg', None), ('none_policy', None)], allow_others=True) # the rest of keyword arguments is used as context. kw_context_args = kwargs if not isclass(cls): raise TypeError('decorated cls should be a class') if hasattr(cls, field_name): # ** A class field with that name exist. Is it a descriptor ? var = cls.__dict__[field_name] # note: we cannot use getattr here if hasattr(var, '__set__') and callable(var.__set__): if isinstance(var, property): # *** OLD WAY which was losing type hints and default values (see var.__set__ signature) *** # properties are special beasts: their methods are method-wrappers (CPython) and can not have properties # so we have to create a wrapper (sic) before sending it to the main wrapping function # def func(inst, value): # var.__set__(inst, value) # *** NEW WAY : more elegant, use directly the setter provided by the user *** func = var.fset nb_args = 2 elif ismethod(var.__set__): # bound method: normal. Let's access to the underlying function func = var.__set__.__func__ nb_args = 3 else: # strange.. but lets try to continue func = var.__set__ nb_args = 3 # retrieve target function signature, check it and retrieve the 3d param # since signature is "def __set__(self, obj, val)" func_sig = signature(func) if len(func_sig.parameters) != nb_args: raise ValueError("Class field '{}' is a valid class descriptor for class '{}' but it does not implement" " __set__ with the correct number of parameters, so it is not possible to add " "validation to it. See https://docs.python.org/3.6/howto/descriptor.html". format(field_name, cls.__name__)) # extract the correct name descriptor_arg_name = list(func_sig.parameters.items())[-1][0] # do the same than in decorate_with_validation but with a class field validator # new_setter = decorate_with_validation(func, descriptor_arg_name, *validation_func, help_msg=help_msg, # error_type=error_type, none_policy=none_policy, # _clazz_field_name_=field_name, **kw_context_args) # --create the new validator none_policy = none_policy or NoneArgPolicy.SKIP_IF_NONABLE_ELSE_VALIDATE new_validator = _create_function_validator(func, func_sig, descriptor_arg_name, *validation_func, none_policy=none_policy, error_type=error_type, help_msg=help_msg, validated_class=cls, validated_class_field_name=field_name, **kw_context_args) # -- create the new setter with validation new_setter = decorate_with_validators(func, func_signature=func_sig, **{descriptor_arg_name: new_validator}) # replace the old one if isinstance(var, property): # properties are special beasts 2 setattr(cls, field_name, var.setter(new_setter)) else: # do not use type() for python 2 compat var.__class__.__set__ = new_setter elif (hasattr(var, '__get__') and callable(var.__get__)) \ or (hasattr(var, '__delete__') and callable(var.__delete__)): # this is a descriptor but it does not have any setter method: impossible to validate raise ValueError("Class field '{}' is a valid class descriptor for class '{}' but it does not implement " "__set__ so it is not possible to add validation to it. See " "https://docs.python.org/3.6/howto/descriptor.html".format(field_name, cls.__name__)) else: # this is not a descriptor: unsupported raise ValueError("Class field '{}.{}' is not a valid class descriptor, see " "https://docs.python.org/3.6/howto/descriptor.html".format(cls.__name__, field_name)) else: # ** No class field with that name exist # ? check for attrs ? > no specific need anymore, this is the same than annotating the constructor # if hasattr(cls, '__attrs_attrs__'): this was a proof of attrs-defined class # try to annotate the generated constructor try: init_func = cls.__init__ if sys.version_info < (3, 0): try: # python 2 - we have to access the inner `im_func` init_func = cls.__init__.im_func except AttributeError: pass cls.__init__ = decorate_with_validation(init_func, field_name, *validation_func, help_msg=help_msg, _constructor_of_cls_=cls, error_type=error_type, none_policy=none_policy, **kw_context_args) except InvalidNameError: # the field was not found # TODO should we also check if a __setattr__ is defined ? # (for __setattr__ see https://stackoverflow.com/questions/15750522/class-properties-and-setattr/15751159) # finally raise an error raise ValueError("@validate_field definition exception: field '{}' can not be found in class '{}', and it " "is also not an input argument of the __init__ method.".format(field_name, cls.__name__)) return cls
This method is equivalent to decorating a class with the `@validate_field` decorator but can be used a posteriori. :param cls: the class to decorate :param field_name: the name of the argument to validate or _OUT_KEY for output validation :param validation_func: the validation function or list of validation functions to use. A validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param error_type: a subclass of ValidationError to raise in case of validation failure. By default a ValidationError will be raised with the provided help_msg :param help_msg: an optional help message to be used in the raised error in case of validation failure. :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_REJECT`. :param kw_context_args: optional contextual information to store in the exception, and that may be also used to format the help message :return: the decorated function, that will perform input validation (using `_assert_input_is_valid`) before executing the function's code everytime it is executed.
entailment
def decorate_several_with_validation(func, _out_=None, # type: ValidationFuncs none_policy=None, # type: int **validation_funcs # type: ValidationFuncs ): # type: (...) -> Callable """ This method is equivalent to applying `decorate_with_validation` once for each of the provided arguments of the function `func` as well as output `_out_`. validation_funcs keyword arguments are validation functions for each arg name. Note that this method is less flexible than decorate_with_validation since * it does not allow to associate a custom error message or error type with each validation. * the none_policy is the same for all inputs and outputs :param func: :param _out_: :param validation_funcs: :param none_policy: :return: a function decorated with validation for all of the listed arguments and output if provided. """ # add validation for output if provided if _out_ is not None: func = decorate_with_validation(func, _OUT_KEY, _out_, none_policy=none_policy) # add validation for each of the listed arguments for att_name, att_validation_funcs in validation_funcs.items(): func = decorate_with_validation(func, att_name, att_validation_funcs, none_policy=none_policy) return func
This method is equivalent to applying `decorate_with_validation` once for each of the provided arguments of the function `func` as well as output `_out_`. validation_funcs keyword arguments are validation functions for each arg name. Note that this method is less flexible than decorate_with_validation since * it does not allow to associate a custom error message or error type with each validation. * the none_policy is the same for all inputs and outputs :param func: :param _out_: :param validation_funcs: :param none_policy: :return: a function decorated with validation for all of the listed arguments and output if provided.
entailment
def decorate_with_validation(func, arg_name, # type: str *validation_func, # type: ValidationFuncs **kwargs): # type: (...) -> Callable """ This method is the inner method used in `@validate_io`, `@validate_arg` and `@validate_out`. It can be used if you with to perform decoration manually without a decorator. :param func: :param arg_name: the name of the argument to validate or _OUT_KEY for output validation :param validation_func: the validation function or list of validation functions to use. A validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param error_type: a subclass of ValidationError to raise in case of validation failure. By default a ValidationError will be raised with the provided help_msg :param help_msg: an optional help message to be used in the raised error in case of validation failure. :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_REJECT`. :param kw_context_args: optional contextual information to store in the exception, and that may be also used to format the help message :return: the decorated function, that will perform input validation (using `_assert_input_is_valid`) before executing the function's code everytime it is executed. """ error_type, help_msg, none_policy, _constructor_of_cls_ = pop_kwargs(kwargs, [('error_type', None), ('help_msg', None), ('none_policy', None), ('_constructor_of_cls_', None)], allow_others=True) # the rest of keyword arguments is used as context. kw_context_args = kwargs none_policy = none_policy or NoneArgPolicy.SKIP_IF_NONABLE_ELSE_VALIDATE # retrieve target function signature func_sig = signature(func) # create the new validator if _constructor_of_cls_ is None: # standard method: input validator new_validator = _create_function_validator(func, func_sig, arg_name, *validation_func, none_policy=none_policy, error_type=error_type, help_msg=help_msg, **kw_context_args) else: # class constructor: field validator new_validator = _create_function_validator(func, func_sig, arg_name, *validation_func, none_policy=none_policy, error_type=error_type, help_msg=help_msg, validated_class=_constructor_of_cls_, validated_class_field_name=arg_name, **kw_context_args) # decorate or update decorator with this new validator return decorate_with_validators(func, func_signature=func_sig, **{arg_name: new_validator})
This method is the inner method used in `@validate_io`, `@validate_arg` and `@validate_out`. It can be used if you with to perform decoration manually without a decorator. :param func: :param arg_name: the name of the argument to validate or _OUT_KEY for output validation :param validation_func: the validation function or list of validation functions to use. A validation function may be a callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param error_type: a subclass of ValidationError to raise in case of validation failure. By default a ValidationError will be raised with the provided help_msg :param help_msg: an optional help message to be used in the raised error in case of validation failure. :param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_REJECT`. :param kw_context_args: optional contextual information to store in the exception, and that may be also used to format the help message :return: the decorated function, that will perform input validation (using `_assert_input_is_valid`) before executing the function's code everytime it is executed.
entailment
def _get_final_none_policy_for_validator(is_nonable, # type: bool none_policy # type: NoneArgPolicy ): """ Depending on none_policy and of the fact that the target parameter is nonable or not, returns a corresponding NonePolicy :param is_nonable: :param none_policy: :return: """ if none_policy in {NonePolicy.VALIDATE, NonePolicy.SKIP, NonePolicy.FAIL}: none_policy_to_use = none_policy elif none_policy is NoneArgPolicy.SKIP_IF_NONABLE_ELSE_VALIDATE: none_policy_to_use = NonePolicy.SKIP if is_nonable else NonePolicy.VALIDATE elif none_policy is NoneArgPolicy.SKIP_IF_NONABLE_ELSE_FAIL: none_policy_to_use = NonePolicy.SKIP if is_nonable else NonePolicy.FAIL else: raise ValueError('Invalid none policy: ' + str(none_policy)) return none_policy_to_use
Depending on none_policy and of the fact that the target parameter is nonable or not, returns a corresponding NonePolicy :param is_nonable: :param none_policy: :return:
entailment
def decorate_with_validators(func, func_signature=None, # type: Signature **validators # type: Validator ): """ Utility method to decorate the provided function with the provided input and output Validator objects. Since this method takes Validator objects as argument, it is for advanced users. :param func: the function to decorate. It might already be decorated, this method will check it and wont create another wrapper in this case, simply adding the validators to the existing wrapper :param func_signature: the function's signature if it is already known (internal calls), otherwise it will be found again by inspection :param validators: a dictionary of arg_name (or _out_) => Validator or list of Validator :return: """ # first turn the dictionary values into lists only for arg_name, validator in validators.items(): if not isinstance(validator, list): validators[arg_name] = [validator] if hasattr(func, '__wrapped__') and hasattr(func.__wrapped__, '__validators__'): # ---- This function is already wrapped by our validation wrapper ---- # Update the dictionary of validators with the new validator(s) for arg_name, validator in validators.items(): for v in validator: if arg_name in func.__wrapped__.__validators__: func.__wrapped__.__validators__[arg_name].append(v) else: func.__wrapped__.__validators__[arg_name] = [v] # return the function, no need to wrap it further (it is already wrapped) return func else: # ---- This function is not yet wrapped by our validator. ---- # Store the dictionary of validators as an attribute of the function if hasattr(func, '__validators__'): raise ValueError('Function ' + str(func) + ' already has a defined __validators__ attribute, valid8 ' 'decorators can not be applied on it') else: try: func.__validators__ = validators except AttributeError: raise ValueError("Error - Could not add validators list to function '%s'" % func) # either reuse or recompute function signature func_signature = func_signature or signature(func) # create a wrapper with the same signature @wraps(func) def validating_wrapper(*args, **kwargs): """ This is the wrapper that will be called everytime the function is called """ # (a) Perform input validation by applying `_assert_input_is_valid` on all received arguments apply_on_each_func_args_sig(func, args, kwargs, func_signature, func_to_apply=_assert_input_is_valid, func_to_apply_params_dict=func.__validators__) # (b) execute the function as usual res = func(*args, **kwargs) # (c) validate output if needed if _OUT_KEY in func.__validators__: for validator in func.__validators__[_OUT_KEY]: validator.assert_valid(res) return res return validating_wrapper
Utility method to decorate the provided function with the provided input and output Validator objects. Since this method takes Validator objects as argument, it is for advanced users. :param func: the function to decorate. It might already be decorated, this method will check it and wont create another wrapper in this case, simply adding the validators to the existing wrapper :param func_signature: the function's signature if it is already known (internal calls), otherwise it will be found again by inspection :param validators: a dictionary of arg_name (or _out_) => Validator or list of Validator :return:
entailment
def _assert_input_is_valid(input_value, # type: Any validators, # type: List[InputValidator] validated_func, # type: Callable input_name # type: str ): """ Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before executing the function. It simply delegates to the validator. The signature of this function is hardcoded to correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed. :param input_value: the value to validate :param validator: the Validator object that will be applied on input_value_to_validate :param validated_func: the function for which this validation is performed. This is not used since the Validator knows it already, but we should not change the signature here. :param input_name: the name of the function input that is being validated :return: Nothing """ for validator in validators: validator.assert_valid(input_name, input_value)
Called by the `validating_wrapper` in the first step (a) `apply_on_each_func_args` for each function input before executing the function. It simply delegates to the validator. The signature of this function is hardcoded to correspond to `apply_on_each_func_args`'s behaviour and should therefore not be changed. :param input_value: the value to validate :param validator: the Validator object that will be applied on input_value_to_validate :param validated_func: the function for which this validation is performed. This is not used since the Validator knows it already, but we should not change the signature here. :param input_name: the name of the function input that is being validated :return: Nothing
entailment
def get_what_txt(self): """ Overrides the base behaviour defined in ValidationError in order to add details about the function. :return: """ return 'input [{var}] for function [{func}]'.format(var=self.get_variable_str(), func=self.validator.get_validated_func_display_name())
Overrides the base behaviour defined in ValidationError in order to add details about the function. :return:
entailment
def get_what_txt(self): """ Overrides the base behaviour defined in ValidationError in order to add details about the class field. :return: """ return 'field [{field}] for class [{clazz}]'.format(field=self.get_variable_str(), clazz=self.validator.get_validated_class_display_name())
Overrides the base behaviour defined in ValidationError in order to add details about the class field. :return:
entailment
def generate_nonce_timestamp(): """ Generate unique nonce with counter, uuid and rng.""" global count rng = botan.rng().get(30) uuid4 = uuid.uuid4().bytes # 16 byte tmpnonce = (bytes(str(count).encode('utf-8'))) + uuid4 + rng nonce = tmpnonce[:41] # 41 byte (328 bit) count += 1 return nonce
Generate unique nonce with counter, uuid and rng.
entailment
def dict_merge(*dict_list): """recursively merges dict's. not just simple a['key'] = b['key'], if both a and bhave a key who's value is a dict then dict_merge is called on both values and the result stored in the returned dictionary. """ result = collections.defaultdict(dict) dicts_items = itertools.chain(*[six.iteritems(d or {}) for d in dict_list]) for key, value in dicts_items: src = result[key] if isinstance(src, dict) and isinstance(value, dict): result[key] = dict_merge(src, value) elif isinstance(src, dict) or isinstance(src, six.text_type): result[key] = value elif hasattr(src, "__iter__") and hasattr(value, "__iter__"): result[key] += value else: result[key] = value return dict(result)
recursively merges dict's. not just simple a['key'] = b['key'], if both a and bhave a key who's value is a dict then dict_merge is called on both values and the result stored in the returned dictionary.
entailment
def schedule_jobs(user): """Dispatch jobs to remotecis. The remoteci can use this method to request a new job. Before a job is dispatched, the server will flag as 'killed' all the running jobs that were associated with the remoteci. This is because they will never be finished. """ values = schemas.job_schedule.post(flask.request.json) values.update({ 'id': utils.gen_uuid(), 'created_at': datetime.datetime.utcnow().isoformat(), 'updated_at': datetime.datetime.utcnow().isoformat(), 'etag': utils.gen_etag(), 'status': 'new', 'remoteci_id': user.id, 'user_agent': flask.request.environ.get('HTTP_USER_AGENT'), 'client_version': flask.request.environ.get( 'HTTP_CLIENT_VERSION' ), }) topic_id = values.pop('topic_id') topic_id_secondary = values.pop('topic_id_secondary') components_ids = values.pop('components_ids') # check remoteci remoteci = v1_utils.verify_existence_and_get(user.id, models.REMOTECIS) if remoteci['state'] != 'active': message = 'RemoteCI "%s" is disabled.' % remoteci['id'] raise dci_exc.DCIException(message, status_code=412) # check primary topic topic = v1_utils.verify_existence_and_get(topic_id, models.TOPICS) if topic['state'] != 'active': msg = 'Topic %s:%s not active.' % (topic_id, topic['name']) raise dci_exc.DCIException(msg, status_code=412) v1_utils.verify_team_in_topic(user, topic_id) # check secondary topic if topic_id_secondary: topic_secondary = v1_utils.verify_existence_and_get( topic_id_secondary, models.TOPICS) if topic_secondary['state'] != 'active': msg = 'Topic %s:%s not active.' % (topic_id_secondary, topic['name']) raise dci_exc.DCIException(msg, status_code=412) v1_utils.verify_team_in_topic(user, topic_id_secondary) dry_run = values.pop('dry_run') if dry_run: component_types = components.get_component_types_from_topic(topic_id) components_ids = components.get_last_components_by_type( component_types, topic_id ) return flask.Response( json.dumps({'components_ids': components_ids, 'job': None}), 201, content_type='application/json' ) remotecis.kill_existing_jobs(remoteci['id']) values = _build_job(topic_id, remoteci, components_ids, values, topic_id_secondary=topic_id_secondary) return flask.Response(json.dumps({'job': values}), 201, headers={'ETag': values['etag']}, content_type='application/json')
Dispatch jobs to remotecis. The remoteci can use this method to request a new job. Before a job is dispatched, the server will flag as 'killed' all the running jobs that were associated with the remoteci. This is because they will never be finished.
entailment
def create_new_update_job_from_an_existing_job(user, job_id): """Create a new job in the same topic as the job_id provided and associate the latest components of this topic.""" values = { 'id': utils.gen_uuid(), 'created_at': datetime.datetime.utcnow().isoformat(), 'updated_at': datetime.datetime.utcnow().isoformat(), 'etag': utils.gen_etag(), 'status': 'new' } original_job_id = job_id original_job = v1_utils.verify_existence_and_get(original_job_id, models.JOBS) if not user.is_in_team(original_job['team_id']): raise dci_exc.Unauthorized() # get the remoteci remoteci_id = str(original_job['remoteci_id']) remoteci = v1_utils.verify_existence_and_get(remoteci_id, models.REMOTECIS) values.update({'remoteci_id': remoteci_id}) # get the associated topic topic_id = str(original_job['topic_id']) v1_utils.verify_existence_and_get(topic_id, models.TOPICS) values.update({ 'user_agent': flask.request.environ.get('HTTP_USER_AGENT'), 'client_version': flask.request.environ.get( 'HTTP_CLIENT_VERSION' ), }) values = _build_job(topic_id, remoteci, [], values, update_previous_job_id=original_job_id) return flask.Response(json.dumps({'job': values}), 201, headers={'ETag': values['etag']}, content_type='application/json')
Create a new job in the same topic as the job_id provided and associate the latest components of this topic.
entailment
def create_new_upgrade_job_from_an_existing_job(user): """Create a new job in the 'next topic' of the topic of the provided job_id.""" values = schemas.job_upgrade.post(flask.request.json) values.update({ 'id': utils.gen_uuid(), 'created_at': datetime.datetime.utcnow().isoformat(), 'updated_at': datetime.datetime.utcnow().isoformat(), 'etag': utils.gen_etag(), 'status': 'new' }) original_job_id = values.pop('job_id') original_job = v1_utils.verify_existence_and_get(original_job_id, models.JOBS) if not user.is_in_team(original_job['team_id']): raise dci_exc.Unauthorized() # get the remoteci remoteci_id = str(original_job['remoteci_id']) remoteci = v1_utils.verify_existence_and_get(remoteci_id, models.REMOTECIS) values.update({'remoteci_id': remoteci_id}) # get the associated topic topic_id = str(original_job['topic_id']) topic = v1_utils.verify_existence_and_get(topic_id, models.TOPICS) values.update({ 'user_agent': flask.request.environ.get('HTTP_USER_AGENT'), 'client_version': flask.request.environ.get( 'HTTP_CLIENT_VERSION' ), }) next_topic_id = topic['next_topic_id'] if not next_topic_id: raise dci_exc.DCIException( "topic %s does not contains a next topic" % topic_id) # instantiate a new job in the next_topic_id # todo(yassine): make possible the upgrade to choose specific components values = _build_job(next_topic_id, remoteci, [], values, previous_job_id=original_job_id) return flask.Response(json.dumps({'job': values}), 201, headers={'ETag': values['etag']}, content_type='application/json')
Create a new job in the 'next topic' of the topic of the provided job_id.
entailment
def get_all_jobs(user, topic_id=None): """Get all jobs. If topic_id is not None, then return all the jobs with a topic pointed by topic_id. """ # get the diverse parameters args = schemas.args(flask.request.args.to_dict()) # build the query thanks to the QueryBuilder class query = v1_utils.QueryBuilder(_TABLE, args, _JOBS_COLUMNS) # add extra conditions for filtering # # If not admin nor rh employee then restrict the view to the team if user.is_not_super_admin() and not user.is_read_only_user(): query.add_extra_condition( sql.or_( _TABLE.c.team_id.in_(user.teams_ids), _TABLE.c.team_id.in_(user.child_teams_ids))) # # If topic_id not None, then filter by topic_id if topic_id is not None: query.add_extra_condition(_TABLE.c.topic_id == topic_id) # # Get only the non archived jobs query.add_extra_condition(_TABLE.c.state != 'archived') nb_rows = query.get_number_of_rows() rows = query.execute(fetchall=True) rows = v1_utils.format_result(rows, _TABLE.name, args['embed'], _EMBED_MANY) return flask.jsonify({'jobs': rows, '_meta': {'count': nb_rows}})
Get all jobs. If topic_id is not None, then return all the jobs with a topic pointed by topic_id.
entailment
def update_job_by_id(user, job_id): """Update a job """ # get If-Match header if_match_etag = utils.check_and_get_etag(flask.request.headers) # get the diverse parameters values = schemas.job.put(flask.request.json) job = v1_utils.verify_existence_and_get(job_id, _TABLE) job = dict(job) if not user.is_in_team(job['team_id']): raise dci_exc.Unauthorized() # Update jobstate if needed status = values.get('status') if status and job.get('status') != status: jobstates.insert_jobstate(user, { 'status': status, 'job_id': job_id }) if status in models.FINAL_STATUSES: jobs_events.create_event(job_id, status, job['topic_id']) where_clause = sql.and_(_TABLE.c.etag == if_match_etag, _TABLE.c.id == job_id) values['etag'] = utils.gen_etag() query = _TABLE.update().returning(*_TABLE.columns).\ where(where_clause).values(**values) result = flask.g.db_conn.execute(query) if not result.rowcount: raise dci_exc.DCIConflict('Job', job_id) return flask.Response( json.dumps({'job': result.fetchone()}), 200, headers={'ETag': values['etag']}, content_type='application/json' )
Update a job
entailment
def get_all_results_from_jobs(user, j_id): """Get all results from job. """ job = v1_utils.verify_existence_and_get(j_id, _TABLE) if not user.is_in_team(job['team_id']) and not user.is_read_only_user(): raise dci_exc.Unauthorized() # get testscases from tests_results query = sql.select([models.TESTS_RESULTS]). \ where(models.TESTS_RESULTS.c.job_id == job['id']) all_tests_results = flask.g.db_conn.execute(query).fetchall() results = [] for test_result in all_tests_results: test_result = dict(test_result) results.append({'filename': test_result['name'], 'name': test_result['name'], 'total': test_result['total'], 'failures': test_result['failures'], 'errors': test_result['errors'], 'skips': test_result['skips'], 'time': test_result['time'], 'regressions': test_result['regressions'], 'successfixes': test_result['successfixes'], 'success': test_result['success'], 'file_id': test_result['file_id']}) return flask.jsonify({'results': results, '_meta': {'count': len(results)}})
Get all results from job.
entailment
def get_tags_from_job(user, job_id): """Retrieve all tags attached to a job.""" job = v1_utils.verify_existence_and_get(job_id, _TABLE) if not user.is_in_team(job['team_id']) and not user.is_read_only_user(): raise dci_exc.Unauthorized() JTT = models.JOIN_JOBS_TAGS query = (sql.select([models.TAGS]) .select_from(JTT.join(models.TAGS)) .where(JTT.c.job_id == job_id)) rows = flask.g.db_conn.execute(query) return flask.jsonify({'tags': rows, '_meta': {'count': rows.rowcount}})
Retrieve all tags attached to a job.
entailment
def add_tag_to_job(user, job_id): """Add a tag to a job.""" job = v1_utils.verify_existence_and_get(job_id, _TABLE) if not user.is_in_team(job['team_id']): raise dci_exc.Unauthorized() values = { 'job_id': job_id } job_tagged = tags.add_tag_to_resource(values, models.JOIN_JOBS_TAGS) return flask.Response(json.dumps(job_tagged), 201, content_type='application/json')
Add a tag to a job.
entailment
def delete_tag_from_job(user, job_id, tag_id): """Delete a tag from a job.""" _JJT = models.JOIN_JOBS_TAGS job = v1_utils.verify_existence_and_get(job_id, _TABLE) if not user.is_in_team(job['team_id']): raise dci_exc.Unauthorized() v1_utils.verify_existence_and_get(tag_id, models.TAGS) query = _JJT.delete().where(sql.and_(_JJT.c.tag_id == tag_id, _JJT.c.job_id == job_id)) try: flask.g.db_conn.execute(query) except sa_exc.IntegrityError: raise dci_exc.DCICreationConflict('tag', 'tag_id') return flask.Response(None, 204, content_type='application/json')
Delete a tag from a job.
entailment
def _lookup_node_parent(self, node): """ Return the parent of the given node, based on an internal dictionary mapping of child nodes to the child's parent required since ElementTree doesn't make info about node ancestry/parentage available. """ # Basic caching of our internal ancestry dict to help performance if not node in self.CACHED_ANCESTRY_DICT: # Given node isn't in cached ancestry dictionary, rebuild this now ancestry_dict = dict( (c, p) for p in self._impl_document.getiterator() for c in p) self.CACHED_ANCESTRY_DICT = ancestry_dict return self.CACHED_ANCESTRY_DICT[node]
Return the parent of the given node, based on an internal dictionary mapping of child nodes to the child's parent required since ElementTree doesn't make info about node ancestry/parentage available.
entailment
def _is_node_an_element(self, node): """ Return True if the given node is an ElementTree Element, a fact that can be tricky to determine if the cElementTree implementation is used. """ # Try the simplest approach first, works for plain old ElementTree if isinstance(node, BaseET.Element): return True # For cElementTree we need to be more cunning (or find a better way) if hasattr(node, 'makeelement') and isinstance(node.tag, basestring): return True
Return True if the given node is an ElementTree Element, a fact that can be tricky to determine if the cElementTree implementation is used.
entailment
def xpath_on_node(self, node, xpath, **kwargs): """ Return result of performing the given XPath query on the given node. All known namespace prefix-to-URI mappings in the document are automatically included in the XPath invocation. If an empty/default namespace (i.e. None) is defined, this is converted to the prefix name '_' so it can be used despite empty namespace prefixes being unsupported by XPath. """ namespaces_dict = {} if 'namespaces' in kwargs: namespaces_dict.update(kwargs['namespaces']) # Empty namespace prefix is not supported, convert to '_' prefix if None in namespaces_dict: default_ns_uri = namespaces_dict.pop(None) namespaces_dict['_'] = default_ns_uri # If no default namespace URI defined, use root's namespace (if any) if not '_' in namespaces_dict: root = self.get_impl_root(node) qname, ns_uri, prefix, local_name = self._unpack_name( root.tag, root) if ns_uri: namespaces_dict['_'] = ns_uri # Include XMLNS namespace if it's not already defined if not 'xmlns' in namespaces_dict: namespaces_dict['xmlns'] = nodes.Node.XMLNS_URI return node.findall(xpath, namespaces_dict)
Return result of performing the given XPath query on the given node. All known namespace prefix-to-URI mappings in the document are automatically included in the XPath invocation. If an empty/default namespace (i.e. None) is defined, this is converted to the prefix name '_' so it can be used despite empty namespace prefixes being unsupported by XPath.
entailment
def get_to_purge_archived_resources(user, table): """List the entries to be purged from the database. """ if user.is_not_super_admin(): raise dci_exc.Unauthorized() archived_resources = get_archived_resources(table) return flask.jsonify({table.name: archived_resources, '_meta': {'count': len(archived_resources)}})
List the entries to be purged from the database.
entailment
def purge_archived_resources(user, table): """Remove the entries to be purged from the database. """ if user.is_not_super_admin(): raise dci_exc.Unauthorized() where_clause = sql.and_( table.c.state == 'archived' ) query = table.delete().where(where_clause) flask.g.db_conn.execute(query) return flask.Response(None, 204, content_type='application/json')
Remove the entries to be purged from the database.
entailment
def refresh_api_secret(user, resource, table): """Refresh the resource API Secret. """ resource_name = table.name[0:-1] where_clause = sql.and_( table.c.etag == resource['etag'], table.c.id == resource['id'], ) values = { 'api_secret': signature.gen_secret(), 'etag': utils.gen_etag() } query = table.update().where(where_clause).values(**values) result = flask.g.db_conn.execute(query) if not result.rowcount: raise dci_exc.DCIConflict(resource_name, resource['id']) res = flask.jsonify(({'id': resource['id'], 'etag': resource['etag'], 'api_secret': values['api_secret']})) res.headers.add_header('ETag', values['etag']) return res
Refresh the resource API Secret.
entailment
def npm(package_json, output_file, pinned_file): """Generate a package.json file.""" amd_build_deprecation_warning() try: version = get_distribution(current_app.name).version except DistributionNotFound: version = '' output = { 'name': current_app.name, 'version': make_semver(version) if version else version, 'dependencies': {}, } # Load base file if package_json: output = dict(output, **json.load(package_json)) # Iterate over bundles deps = extract_deps(current_app.extensions['invenio-assets'].env, click.echo) output['dependencies'].update(deps) # Load pinned dependencies if pinned_file: output['dependencies'].update( json.load(pinned_file).get('dependencies', {})) # Write to static folder if output file is not specified if output_file is None: if not os.path.exists(current_app.static_folder): os.makedirs(current_app.static_folder) output_file = open( os.path.join(current_app.static_folder, 'package.json'), 'w') click.echo('Writing {0}'.format(output_file.name)) json.dump(output, output_file, indent=4) output_file.close()
Generate a package.json file.
entailment
def getAnalyses(self, **kwargs): """Returns a list of the latest root cause analysis results for a specified check. Optional Parameters: * limit -- Limits the number of returned results to the specified quantity. Type: Integer Default: 100 * offset -- Offset for listing. (Requires limit.) Type: Integer Default: 0 * time_from -- Return only results with timestamp of first test greater or equal to this value. Format is UNIX timestamp. Type: Integer Default: 0 * time_to -- Return only results with timestamp of first test less or equal to this value. Format is UNIX timestamp. Type: Integer Default: Current Time Returned structure: [ { 'id' : <Integer> Analysis id 'timefirsttest' : <Integer> Time of test that initiated the confirmation test 'timeconfrimtest' : <Integer> Time of the confirmation test that perfromed the error analysis }, ... ] """ # 'from' is a reserved word, use time_from instead if kwargs.get('time_from'): kwargs['from'] = kwargs.get('time_from') del kwargs['time_from'] if kwargs.get('time_to'): kwargs['to'] = kwargs.get('time_to') del kwargs['time_to'] # Warn user about unhandled kwargs for key in kwargs: if key not in ['limit', 'offset', 'from', 'to']: sys.stderr.write('%s not a valid argument for analysis()\n' % key) response = self.pingdom.request('GET', 'analysis/%s' % self.id, kwargs) return [PingdomAnalysis(self, x) for x in response.json()['analysis']]
Returns a list of the latest root cause analysis results for a specified check. Optional Parameters: * limit -- Limits the number of returned results to the specified quantity. Type: Integer Default: 100 * offset -- Offset for listing. (Requires limit.) Type: Integer Default: 0 * time_from -- Return only results with timestamp of first test greater or equal to this value. Format is UNIX timestamp. Type: Integer Default: 0 * time_to -- Return only results with timestamp of first test less or equal to this value. Format is UNIX timestamp. Type: Integer Default: Current Time Returned structure: [ { 'id' : <Integer> Analysis id 'timefirsttest' : <Integer> Time of test that initiated the confirmation test 'timeconfrimtest' : <Integer> Time of the confirmation test that perfromed the error analysis }, ... ]
entailment
def getDetails(self): """Update check details, returns dictionary of details""" response = self.pingdom.request('GET', 'checks/%s' % self.id) self.__addDetails__(response.json()['check']) return response.json()['check']
Update check details, returns dictionary of details
entailment
def modify(self, **kwargs): """Modify settings for a check. The provided settings will overwrite previous values. Settings not provided will stay the same as before the update. To clear an existing value, provide an empty value. Please note that you cannot change the type of a check once it has been created. General parameters: * name -- Check name Type: String * host - Target host Type: String * paused -- Check should be paused Type: Boolean * resolution -- Check resolution time (in minutes) Type: Integer [1, 5, 15, 30, 60] * contactids -- Comma separated list of contact IDs Type: String * sendtoemail -- Send alerts as email Type: Boolean * sendtosms -- Send alerts as SMS Type: Boolean * sendtotwitter -- Send alerts through Twitter Type: Boolean * sendtoiphone -- Send alerts to iPhone Type: Boolean * sendtoandroid -- Send alerts to Android Type: Boolean * sendnotificationwhendown -- Send notification when check is down the given number of times Type: Integer * notifyagainevery -- Set how many results to wait for in between notices Type: Integer * notifywhenbackup -- Notify when back up again Type: Boolean * use_legacy_notifications -- Use old notifications instead of BeepManager Type: Boolean * probe_filters -- Can be one of region: NA, region: EU, region: APAC Type: String HTTP check options: * url -- Target path on server Type: String * encryption -- Use SSL/TLS Type: Boolean * port -- Target server port Type: Integer * auth -- Username and password for HTTP authentication Example: user:password Type: String * shouldcontain -- Target site should contain this string. Cannot be combined with 'shouldnotcontain' Type: String * shouldnotcontain -- Target site should not contain this string. Cannot be combined with 'shouldcontain' Type: String * postdata -- Data that should be posted to the web page, for example submission data for a sign-up or login form. The data needs to be formatted in the same way as a web browser would send it to the web server Type: String * requestheader<NAME> -- Custom HTTP header, replace <NAME> with desired header name. Header in form: Header:Value Type: String HTTPCustom check options: * url -- Target path on server Type: String * encryption -- Use SSL/TLS Type: Boolean * port -- Target server port Type: Integer * auth -- Username and password for HTTP authentication Example: user:password Type: String * additionalurls -- Colon-separated list of additonal URLS with hostname included Type: String TCP check options: * port -- Target server port Type: Integer * stringtosend -- String to send Type: String * stringtoexpect -- String to expect in response Type: String DNS check options: * expectedip -- Expected IP Type: String * nameserver -- Nameserver to check Type: String UDP check options: * port -- Target server port Type: Integer * stringtosend -- String to send Type: String * stringtoexpect -- String to expect in response Type: String SMTP check options: * port -- Target server port Type: Integer * auth -- Username and password for target SMTP authentication. Example: user:password Type: String * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean POP3 check options: * port -- Target server port Type: Integer * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean IMAP check options: * port -- Target server port Type: Integer * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean """ # Warn user about unhandled parameters for key in kwargs: if key not in ['paused', 'resolution', 'contactids', 'sendtoemail', 'sendtosms', 'sendtotwitter', 'sendtoiphone', 'sendnotificationwhendown', 'notifyagainevery', 'notifywhenbackup', 'created', 'type', 'hostname', 'status', 'lasterrortime', 'lasttesttime', 'url', 'encryption', 'port', 'auth', 'shouldcontain', 'shouldnotcontain', 'postdata', 'additionalurls', 'stringtosend', 'stringtoexpect', 'expectedip', 'nameserver', 'use_legacy_notifications', 'host', 'alert_policy', 'autoresolve', 'probe_filters']: sys.stderr.write("'%s'" % key + ' is not a valid argument of' + '<PingdomCheck>.modify()\n') # If one of the legacy parameters is used, it is required to set the legacy flag. # https://github.com/KennethWilke/PingdomLib/issues/12 if any([k for k in kwargs if k in legacy_notification_parameters]): if "use_legacy_notifications" in kwargs and kwargs["use_legacy_notifications"] != True: raise Exception("Cannot set legacy parameter when use_legacy_notifications is not True") kwargs["use_legacy_notifications"] = True response = self.pingdom.request("PUT", 'checks/%s' % self.id, kwargs) return response.json()['message']
Modify settings for a check. The provided settings will overwrite previous values. Settings not provided will stay the same as before the update. To clear an existing value, provide an empty value. Please note that you cannot change the type of a check once it has been created. General parameters: * name -- Check name Type: String * host - Target host Type: String * paused -- Check should be paused Type: Boolean * resolution -- Check resolution time (in minutes) Type: Integer [1, 5, 15, 30, 60] * contactids -- Comma separated list of contact IDs Type: String * sendtoemail -- Send alerts as email Type: Boolean * sendtosms -- Send alerts as SMS Type: Boolean * sendtotwitter -- Send alerts through Twitter Type: Boolean * sendtoiphone -- Send alerts to iPhone Type: Boolean * sendtoandroid -- Send alerts to Android Type: Boolean * sendnotificationwhendown -- Send notification when check is down the given number of times Type: Integer * notifyagainevery -- Set how many results to wait for in between notices Type: Integer * notifywhenbackup -- Notify when back up again Type: Boolean * use_legacy_notifications -- Use old notifications instead of BeepManager Type: Boolean * probe_filters -- Can be one of region: NA, region: EU, region: APAC Type: String HTTP check options: * url -- Target path on server Type: String * encryption -- Use SSL/TLS Type: Boolean * port -- Target server port Type: Integer * auth -- Username and password for HTTP authentication Example: user:password Type: String * shouldcontain -- Target site should contain this string. Cannot be combined with 'shouldnotcontain' Type: String * shouldnotcontain -- Target site should not contain this string. Cannot be combined with 'shouldcontain' Type: String * postdata -- Data that should be posted to the web page, for example submission data for a sign-up or login form. The data needs to be formatted in the same way as a web browser would send it to the web server Type: String * requestheader<NAME> -- Custom HTTP header, replace <NAME> with desired header name. Header in form: Header:Value Type: String HTTPCustom check options: * url -- Target path on server Type: String * encryption -- Use SSL/TLS Type: Boolean * port -- Target server port Type: Integer * auth -- Username and password for HTTP authentication Example: user:password Type: String * additionalurls -- Colon-separated list of additonal URLS with hostname included Type: String TCP check options: * port -- Target server port Type: Integer * stringtosend -- String to send Type: String * stringtoexpect -- String to expect in response Type: String DNS check options: * expectedip -- Expected IP Type: String * nameserver -- Nameserver to check Type: String UDP check options: * port -- Target server port Type: Integer * stringtosend -- String to send Type: String * stringtoexpect -- String to expect in response Type: String SMTP check options: * port -- Target server port Type: Integer * auth -- Username and password for target SMTP authentication. Example: user:password Type: String * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean POP3 check options: * port -- Target server port Type: Integer * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean IMAP check options: * port -- Target server port Type: Integer * stringtoexpect -- String to expect in response Type: String * encryption -- Use connection encryption Type: Boolean
entailment
def averages(self, **kwargs): """Get the average time / uptime value for a specified check and time period. Optional parameters: * time_from -- Start time of period. Format is UNIX timestamp Type: Integer Default: 0 * time_to -- End time of period. Format is UNIX timestamp Type: Integer Default: Current time * probes -- Filter to only use results from a list of probes. Format is a comma separated list of probe identifiers Type: String Default: All probes * includeuptime -- Include uptime information Type: Boolean Default: False * bycountry -- Split response times into country groups Type: Boolean Default: False * byprobe -- Split response times into probe groups Type: Boolean Default: False Returned structure: { 'responsetime' : { 'to' : <Integer> Start time of period 'from' : <Integer> End time of period 'avgresponse' : <Integer> Total average response time in milliseconds }, < More can be included with optional parameters > } """ # 'from' is a reserved word, use time_from instead if kwargs.get('time_from'): kwargs['from'] = kwargs.get('time_from') del kwargs['time_from'] if kwargs.get('time_to'): kwargs['to'] = kwargs.get('time_to') del kwargs['time_to'] # Warn user about unhandled parameters for key in kwargs: if key not in ['from', 'to', 'probes', 'includeuptime', 'bycountry', 'byprobe']: sys.stderr.write("'%s'" % key + ' is not a valid argument of' + '<PingdomCheck.averages()\n') response = self.pingdom.request('GET', 'summary.average/%s' % self.id, kwargs) return response.json()['summary']
Get the average time / uptime value for a specified check and time period. Optional parameters: * time_from -- Start time of period. Format is UNIX timestamp Type: Integer Default: 0 * time_to -- End time of period. Format is UNIX timestamp Type: Integer Default: Current time * probes -- Filter to only use results from a list of probes. Format is a comma separated list of probe identifiers Type: String Default: All probes * includeuptime -- Include uptime information Type: Boolean Default: False * bycountry -- Split response times into country groups Type: Boolean Default: False * byprobe -- Split response times into probe groups Type: Boolean Default: False Returned structure: { 'responsetime' : { 'to' : <Integer> Start time of period 'from' : <Integer> End time of period 'avgresponse' : <Integer> Total average response time in milliseconds }, < More can be included with optional parameters > }
entailment
def probes(self, fromtime, totime=None): """Get a list of probes that performed tests for a specified check during a specified period.""" args = {'from': fromtime} if totime: args['to'] = totime response = self.pingdom.request('GET', 'summary.probes/%s' % self.id, args) return response.json()['probes']
Get a list of probes that performed tests for a specified check during a specified period.
entailment
def publishPublicReport(self): """Activate public report for this check. Returns status message""" response = self.pingdom.request('PUT', 'reports.public/%s' % self.id) return response.json()['message']
Activate public report for this check. Returns status message
entailment
def removePublicReport(self): """Deactivate public report for this check. Returns status message""" response = self.pingdom.request('DELETE', 'reports.public/%s' % self.id) return response.json()['message']
Deactivate public report for this check. Returns status message
entailment
def extract_deps(bundles, log=None): """Extract the dependencies from the bundle and its sub-bundles.""" def _flatten(bundle): deps = [] if hasattr(bundle, 'npm'): deps.append(bundle.npm) for content in bundle.contents: if isinstance(content, BundleBase): deps.extend(_flatten(content)) return deps flatten_deps = [] for bundle in bundles: flatten_deps.extend(_flatten(bundle)) packages = defaultdict(list) for dep in flatten_deps: for pkg, version in dep.items(): packages[pkg].append(version) deps = {} for package, versions in packages.items(): deps[package] = semver.max_satisfying(versions, '*', True) if log and len(versions) > 1: log('Warn: {0} version {1} resolved to: {2}'.format( repr(package), versions, repr(deps[package]) )) return deps
Extract the dependencies from the bundle and its sub-bundles.
entailment
def make_semver(version_str): """Make a semantic version from Python PEP440 version. Semantic versions does not handle post-releases. """ v = parse_version(version_str) major = v._version.release[0] try: minor = v._version.release[1] except IndexError: minor = 0 try: patch = v._version.release[2] except IndexError: patch = 0 prerelease = [] if v._version.pre: prerelease.append(''.join(str(x) for x in v._version.pre)) if v._version.dev: prerelease.append(''.join(str(x) for x in v._version.dev)) prerelease = '.'.join(prerelease) # Create semver version = '{0}.{1}.{2}'.format(major, minor, patch) if prerelease: version += '-{0}'.format(prerelease) if v.local: version += '+{0}'.format(v.local) return version
Make a semantic version from Python PEP440 version. Semantic versions does not handle post-releases.
entailment
def get_max_size(pool, num_option, item_length): """ Calculate the max number of item that an option can stored in the pool at give time. This is to limit the pool size to POOL_SIZE Args: option_index (int): the index of the option to calculate the size for pool (dict): answer pool num_option (int): total number of options available for the question item_length (int): the length of the item Returns: int: the max number of items that `option_index` can have """ max_items = POOL_SIZE / item_length # existing items plus the reserved for min size. If there is an option has 1 item, POOL_OPTION_MIN_SIZE - 1 space # is reserved. existing = POOL_OPTION_MIN_SIZE * num_option + sum([max(0, len(pool.get(i, {})) - 5) for i in xrange(num_option)]) return int(max_items - existing)
Calculate the max number of item that an option can stored in the pool at give time. This is to limit the pool size to POOL_SIZE Args: option_index (int): the index of the option to calculate the size for pool (dict): answer pool num_option (int): total number of options available for the question item_length (int): the length of the item Returns: int: the max number of items that `option_index` can have
entailment
def offer_answer(pool, answer, rationale, student_id, algo, options): """ submit a student answer to the answer pool The answer maybe selected to stay in the pool depending on the selection algorithm Args: pool (dict): answer pool Answer pool format: { option1_index: { 'student_id': { can store algorithm specific info here }, ... } option2_index: ... } answer (int): the option student selected rationale (str): the rationale text student_id (str): student identifier algo (str): the selection algorithm options (dict): the options available in the question Raises: UnknownChooseAnswerAlgorithm: when we don't know the algorithm """ if algo['name'] == 'simple': offer_simple(pool, answer, rationale, student_id, options) elif algo['name'] == 'random': offer_random(pool, answer, rationale, student_id, options) else: raise UnknownChooseAnswerAlgorithm()
submit a student answer to the answer pool The answer maybe selected to stay in the pool depending on the selection algorithm Args: pool (dict): answer pool Answer pool format: { option1_index: { 'student_id': { can store algorithm specific info here }, ... } option2_index: ... } answer (int): the option student selected rationale (str): the rationale text student_id (str): student identifier algo (str): the selection algorithm options (dict): the options available in the question Raises: UnknownChooseAnswerAlgorithm: when we don't know the algorithm
entailment
def offer_simple(pool, answer, rationale, student_id, options): """ The simple selection algorithm. This algorithm randomly select an answer from the pool to discard and add the new one when the pool reaches the limit """ existing = pool.setdefault(answer, {}) if len(existing) >= get_max_size(pool, len(options), POOL_ITEM_LENGTH_SIMPLE): student_id_to_remove = random.choice(existing.keys()) del existing[student_id_to_remove] existing[student_id] = {} pool[answer] = existing
The simple selection algorithm. This algorithm randomly select an answer from the pool to discard and add the new one when the pool reaches the limit
entailment
def offer_random(pool, answer, rationale, student_id, options): """ The random selection algorithm. The same as simple algorithm """ offer_simple(pool, answer, rationale, student_id, options)
The random selection algorithm. The same as simple algorithm
entailment
def validate_seeded_answers_simple(answers, options, algo): """ This validator checks if the answers includes all possible options Args: answers (str): the answers to be checked options (dict): all options that should exist in the answers algo (str): selection algorithm Returns: None if everything is good. Otherwise, the missing option error message. """ seen_options = {} for answer in answers: if answer: key = options[answer['answer']].get('text') if options[answer['answer']].get('image_url'): key += options[answer['answer']].get('image_url') seen_options.setdefault(key, 0) seen_options[key] += 1 missing_options = [] index = 1 for option in options: key = option.get('text') + option.get('image_url') if option.get('image_url') else option.get('text') if option.get('text') != 'n/a': if seen_options.get(key, 0) == 0: missing_options.append(_('Option ') + str(index)) index += 1 if missing_options: return {'seed_error': _('Missing option seed(s): ') + ', '.join(missing_options)} return None
This validator checks if the answers includes all possible options Args: answers (str): the answers to be checked options (dict): all options that should exist in the answers algo (str): selection algorithm Returns: None if everything is good. Otherwise, the missing option error message.
entailment
def validate_seeded_answers(answers, options, algo): """ Validate answers based on selection algorithm This is called when instructor setup the tool and providing seeded answers to the question. This function is trying to validate if instructor provided enough seeds for a give algorithm. e.g. we require 1 seed for each option in simple algorithm and at least 1 seed for random algorithm. Because otherwise, the first student won't be able to see the answers on the second step where he/she suppose to compare and review other students answers. Args: answers (list): list of dict that contain seeded answers options (dict): all options that should exist in the answers algo (str): selection algorithm Returns: None if successful, otherwise error message """ if algo['name'] == 'simple': return validate_seeded_answers_simple(answers, options, algo) elif algo['name'] == 'random': return validate_seeded_answers_random(answers) else: raise UnknownChooseAnswerAlgorithm()
Validate answers based on selection algorithm This is called when instructor setup the tool and providing seeded answers to the question. This function is trying to validate if instructor provided enough seeds for a give algorithm. e.g. we require 1 seed for each option in simple algorithm and at least 1 seed for random algorithm. Because otherwise, the first student won't be able to see the answers on the second step where he/she suppose to compare and review other students answers. Args: answers (list): list of dict that contain seeded answers options (dict): all options that should exist in the answers algo (str): selection algorithm Returns: None if successful, otherwise error message
entailment
def get_other_answers(pool, seeded_answers, get_student_item_dict, algo, options): """ Select other student's answers from answer pool or seeded answers based on the selection algorithm Args: pool (dict): answer pool, format: { option1_index: { student_id: { can store algorithm specific info here } }, option2_index: { student_id: { ... } } } seeded_answers (list): seeded answers from instructor [ {'answer': 0, 'rationale': 'rationale A'}, {'answer': 1, 'rationale': 'rationale B'}, ] get_student_item_dict (callable): get student item dict function to return student item dict algo (str): selection algorithm options (dict): answer options for the question Returns: dict: answers based on the selection algorithm """ # "#" means the number of responses returned should be the same as the number of options. num_responses = len(options) \ if 'num_responses' not in algo or algo['num_responses'] == "#" \ else int(algo['num_responses']) if algo['name'] == 'simple': return get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses) elif algo['name'] == 'random': return get_other_answers_random(pool, seeded_answers, get_student_item_dict, num_responses) else: raise UnknownChooseAnswerAlgorithm()
Select other student's answers from answer pool or seeded answers based on the selection algorithm Args: pool (dict): answer pool, format: { option1_index: { student_id: { can store algorithm specific info here } }, option2_index: { student_id: { ... } } } seeded_answers (list): seeded answers from instructor [ {'answer': 0, 'rationale': 'rationale A'}, {'answer': 1, 'rationale': 'rationale B'}, ] get_student_item_dict (callable): get student item dict function to return student item dict algo (str): selection algorithm options (dict): answer options for the question Returns: dict: answers based on the selection algorithm
entailment
def get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses): """ Get answers from others with simple algorithm, which picks one answer for each option. Args: see `get_other_answers` num_responses (int): the number of responses to be returned. This value may not be respected if there is not enough answers to return Returns: dict: answers based on the selection algorithm """ ret = [] # clean up answers so that all keys are int pool = {int(k): v for k, v in pool.items()} total_in_pool = len(seeded_answers) merged_pool = convert_seeded_answers(seeded_answers) student_id = get_student_item_dict()['student_id'] # merge the dictionaries in the answer dictionary for key in pool: total_in_pool += len(pool[key]) # if student_id has value, we assume the student just submitted an answer. So removing it # from total number in the pool if student_id in pool[key].keys(): total_in_pool -= 1 if key in merged_pool: merged_pool[key].update(pool[key].items()) else: merged_pool[key] = pool[key] # remember which option+student_id is selected, so that we don't have duplicates in the result selected = [] # loop until we have enough answers to return while len(ret) < min(num_responses, total_in_pool): for option, students in merged_pool.items(): student = student_id i = 0 while (student == student_id or i > 100) and (str(option) + student) not in selected: # retry until we got a different one or after 100 retries # we are suppose to get a different student answer or a seeded one in a few tries # as we have at least one seeded answer for each option in the algo. And it is not # suppose to overflow i order to break the loop student = random.choice(students.keys()) i += 1 selected.append(str(option)+student) if student.startswith('seeded'): # seeded answer, get the rationale from local rationale = students[student] else: student_item = get_student_item_dict(student) submission = sas_api.get_answers_for_student(student_item) rationale = submission.get_rationale(0) ret.append({'option': option, 'rationale': rationale}) # check if we have enough answers if len(ret) >= min(num_responses, total_in_pool): break return {"answers": ret}
Get answers from others with simple algorithm, which picks one answer for each option. Args: see `get_other_answers` num_responses (int): the number of responses to be returned. This value may not be respected if there is not enough answers to return Returns: dict: answers based on the selection algorithm
entailment
def get_other_answers_random(pool, seeded_answers, get_student_item_dict, num_responses): """ Get answers from others with random algorithm, which randomly select answer from the pool. Student may get three answers for option 1 or one answer for option 1 and two answers for option 2. Args: see `get_other_answers` num_responses (int): the number of responses to be returned. This value may not be respected if there is not enough answers to return Returns: dict: answers based on the selection algorithm """ ret = [] # clean up answers so that all keys are int pool = {int(k): v for k, v in pool.items()} seeded = {'seeded'+str(index): answer for index, answer in enumerate(seeded_answers)} merged_pool = seeded.keys() for key in pool: merged_pool += pool[key].keys() # shuffle random.shuffle(merged_pool) # get student identifier student_id = get_student_item_dict()['student_id'] for student in merged_pool: if len(ret) >= num_responses: # have enough answers break elif student == student_id: # this is the student's answer so don't return continue if student.startswith('seeded'): option = seeded[student]['answer'] rationale = seeded[student]['rationale'] else: student_item = get_student_item_dict(student) submission = sas_api.get_answers_for_student(student_item) rationale = submission.get_rationale(0) option = submission.get_vote(0) ret.append({'option': option, 'rationale': rationale}) return {"answers": ret}
Get answers from others with random algorithm, which randomly select answer from the pool. Student may get three answers for option 1 or one answer for option 1 and two answers for option 2. Args: see `get_other_answers` num_responses (int): the number of responses to be returned. This value may not be respected if there is not enough answers to return Returns: dict: answers based on the selection algorithm
entailment
def convert_seeded_answers(answers): """ Convert seeded answers into the format that can be merged into student answers. Args: answers (list): seeded answers Returns: dict: seeded answers with student answers format: { 0: { 'seeded0': 'rationaleA' } 1: { 'seeded1': 'rationaleB' } } """ converted = {} for index, answer in enumerate(answers): converted.setdefault(answer['answer'], {}) converted[answer['answer']]['seeded' + str(index)] = answer['rationale'] return converted
Convert seeded answers into the format that can be merged into student answers. Args: answers (list): seeded answers Returns: dict: seeded answers with student answers format: { 0: { 'seeded0': 'rationaleA' } 1: { 'seeded1': 'rationaleB' } }
entailment
def upgradeProcessor1to2(oldProcessor): """ Batch processors stopped polling at version 2, so they no longer needed the idleInterval attribute. They also gained a scheduled attribute which tracks their interaction with the scheduler. Since they stopped polling, we also set them up as a timed event here to make sure that they don't silently disappear, never to be seen again: running them with the scheduler gives them a chance to figure out what's up and set up whatever other state they need to continue to run. Since this introduces a new dependency of all batch processors on a powerup for the IScheduler, install a Scheduler or a SubScheduler if one is not already present. """ newProcessor = oldProcessor.upgradeVersion( oldProcessor.typeName, 1, 2, busyInterval=oldProcessor.busyInterval) newProcessor.scheduled = extime.Time() s = newProcessor.store sch = iaxiom.IScheduler(s, None) if sch is None: if s.parent is None: # Only site stores have no parents. sch = Scheduler(store=s) else: # Substores get subschedulers. sch = SubScheduler(store=s) installOn(sch, s) # And set it up to run. sch.schedule(newProcessor, newProcessor.scheduled) return newProcessor
Batch processors stopped polling at version 2, so they no longer needed the idleInterval attribute. They also gained a scheduled attribute which tracks their interaction with the scheduler. Since they stopped polling, we also set them up as a timed event here to make sure that they don't silently disappear, never to be seen again: running them with the scheduler gives them a chance to figure out what's up and set up whatever other state they need to continue to run. Since this introduces a new dependency of all batch processors on a powerup for the IScheduler, install a Scheduler or a SubScheduler if one is not already present.
entailment
def processor(forType): """ Create an Axiom Item type which is suitable to use as a batch processor for the given Axiom Item type. Processors created this way depend on a L{iaxiom.IScheduler} powerup on the on which store they are installed. @type forType: L{item.MetaItem} @param forType: The Axiom Item type for which to create a batch processor type. @rtype: L{item.MetaItem} @return: An Axiom Item type suitable for use as a batch processor. If such a type previously existed, it will be returned. Otherwise, a new type is created. """ MILLI = 1000 try: processor = _processors[forType] except KeyError: def __init__(self, *a, **kw): item.Item.__init__(self, *a, **kw) self.store.powerUp(self, iaxiom.IBatchProcessor) attrs = { '__name__': 'Batch_' + forType.__name__, '__module__': forType.__module__, '__init__': __init__, '__repr__': lambda self: '<Batch of %s #%d>' % (reflect.qual(self.workUnitType), self.storeID), 'schemaVersion': 2, 'workUnitType': forType, 'scheduled': attributes.timestamp(doc=""" The next time at which this processor is scheduled to run. """, default=None), # MAGIC NUMBERS AREN'T THEY WONDERFUL? 'busyInterval': attributes.integer(doc="", default=MILLI / 10), } _processors[forType] = processor = item.MetaItem( attrs['__name__'], (item.Item, _BatchProcessorMixin), attrs) registerUpgrader( upgradeProcessor1to2, _processors[forType].typeName, 1, 2) return processor
Create an Axiom Item type which is suitable to use as a batch processor for the given Axiom Item type. Processors created this way depend on a L{iaxiom.IScheduler} powerup on the on which store they are installed. @type forType: L{item.MetaItem} @param forType: The Axiom Item type for which to create a batch processor type. @rtype: L{item.MetaItem} @return: An Axiom Item type suitable for use as a batch processor. If such a type previously existed, it will be returned. Otherwise, a new type is created.
entailment
def storeBatchServiceSpecialCase(st, pups): """ Adapt a L{Store} to L{IBatchService}. If C{st} is a substore, return a simple wrapper that delegates to the site store's L{IBatchService} powerup. Return C{None} if C{st} has no L{BatchProcessingControllerService}. """ if st.parent is not None: try: return _SubStoreBatchChannel(st) except TypeError: return None storeService = service.IService(st) try: return storeService.getServiceNamed("Batch Processing Controller") except KeyError: return None
Adapt a L{Store} to L{IBatchService}. If C{st} is a substore, return a simple wrapper that delegates to the site store's L{IBatchService} powerup. Return C{None} if C{st} has no L{BatchProcessingControllerService}.
entailment
def mark(self): """ Mark the unit of work as failed in the database and update the listener so as to skip it next time. """ self.reliableListener.lastRun = extime.Time() BatchProcessingError( store=self.reliableListener.store, processor=self.reliableListener.processor, listener=self.reliableListener.listener, item=self.workUnit, error=self.failure.getErrorMessage())
Mark the unit of work as failed in the database and update the listener so as to skip it next time.
entailment
def run(self): """ Try to run one unit of work through one listener. If there are more listeners or more work, reschedule this item to be run again in C{self.busyInterval} milliseconds, otherwise unschedule it. @rtype: L{extime.Time} or C{None} @return: The next time at which to run this item, used by the scheduler for automatically rescheduling, or None if there is no more work to do. """ now = extime.Time() if self.step(): self.scheduled = now + datetime.timedelta(milliseconds=self.busyInterval) else: self.scheduled = None return self.scheduled
Try to run one unit of work through one listener. If there are more listeners or more work, reschedule this item to be run again in C{self.busyInterval} milliseconds, otherwise unschedule it. @rtype: L{extime.Time} or C{None} @return: The next time at which to run this item, used by the scheduler for automatically rescheduling, or None if there is no more work to do.
entailment
def addReliableListener(self, listener, style=iaxiom.LOCAL): """ Add the given Item to the set which will be notified of Items available for processing. Note: Each Item is processed synchronously. Adding too many listeners to a single batch processor will cause the L{step} method to block while it sends notification to each listener. @param listener: An Item instance which provides a C{processItem} method. @return: An Item representing L{listener}'s persistent tracking state. """ existing = self.store.findUnique(_ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.listener == listener), default=None) if existing is not None: return existing for work in self.store.query(self.workUnitType, sort=self.workUnitType.storeID.descending, limit=1): forwardMark = work.storeID backwardMark = work.storeID + 1 break else: forwardMark = 0 backwardMark = 0 if self.scheduled is None: self.scheduled = extime.Time() iaxiom.IScheduler(self.store).schedule(self, self.scheduled) return _ReliableListener(store=self.store, processor=self, listener=listener, forwardMark=forwardMark, backwardMark=backwardMark, style=style)
Add the given Item to the set which will be notified of Items available for processing. Note: Each Item is processed synchronously. Adding too many listeners to a single batch processor will cause the L{step} method to block while it sends notification to each listener. @param listener: An Item instance which provides a C{processItem} method. @return: An Item representing L{listener}'s persistent tracking state.
entailment
def removeReliableListener(self, listener): """ Remove a previously added listener. """ self.store.query(_ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.listener == listener)).deleteFromStore() self.store.query(BatchProcessingError, attributes.AND(BatchProcessingError.processor == self, BatchProcessingError.listener == listener)).deleteFromStore()
Remove a previously added listener.
entailment
def getReliableListeners(self): """ Return an iterable of the listeners which have been added to this batch processor. """ for rellist in self.store.query(_ReliableListener, _ReliableListener.processor == self): yield rellist.listener
Return an iterable of the listeners which have been added to this batch processor.
entailment
def getFailedItems(self): """ Return an iterable of two-tuples of listeners which raised an exception from C{processItem} and the item which was passed as the argument to that method. """ for failed in self.store.query(BatchProcessingError, BatchProcessingError.processor == self): yield (failed.listener, failed.item)
Return an iterable of two-tuples of listeners which raised an exception from C{processItem} and the item which was passed as the argument to that method.
entailment
def itemAdded(self): """ Called to indicate that a new item of the type monitored by this batch processor is being added to the database. If this processor is not already scheduled to run, this will schedule it. It will also start the batch process if it is not yet running and there are any registered remote listeners. """ localCount = self.store.query( _ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.style == iaxiom.LOCAL), limit=1).count() remoteCount = self.store.query( _ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.style == iaxiom.REMOTE), limit=1).count() if localCount and self.scheduled is None: self.scheduled = extime.Time() iaxiom.IScheduler(self.store).schedule(self, self.scheduled) if remoteCount: batchService = iaxiom.IBatchService(self.store, None) if batchService is not None: batchService.start()
Called to indicate that a new item of the type monitored by this batch processor is being added to the database. If this processor is not already scheduled to run, this will schedule it. It will also start the batch process if it is not yet running and there are any registered remote listeners.
entailment
def call(self, itemMethod): """ Invoke the given bound item method in the batch process. Return a Deferred which fires when the method has been invoked. """ item = itemMethod.im_self method = itemMethod.im_func.func_name return self.batchController.getProcess().addCallback( CallItemMethod(storepath=item.store.dbdir, storeid=item.storeID, method=method).do)
Invoke the given bound item method in the batch process. Return a Deferred which fires when the method has been invoked.
entailment
def processWhileRunning(self): """ Run tasks until stopService is called. """ work = self.step() for result, more in work: yield result if not self.running: break if more: delay = 0.1 else: delay = 10.0 yield task.deferLater(reactor, delay, lambda: None)
Run tasks until stopService is called.
entailment
def getcols(sheetMatch=None,colMatch="Decay"): """find every column in every sheet and put it in a new sheet or book.""" book=BOOK() if sheetMatch is None: matchingSheets=book.sheetNames print('all %d sheets selected '%(len(matchingSheets))) else: matchingSheets=[x for x in book.sheetNames if sheetMatch in x] print('%d of %d sheets selected matching "%s"'%(len(matchingSheets),len(book.sheetNames),sheetMatch)) matchingSheetsWithCol=[] for sheetName in matchingSheets: i = book.sheetNames.index(sheetName) # index of that sheet for j,colName in enumerate(book.sheets[i].colDesc): if colMatch in colName: matchingSheetsWithCol.append((sheetName,j)) break else: print(" no match in [%s]%s"%(book.bookName,sheetName)) print("%d of %d of those have your column"%(len(matchingSheetsWithCol),len(matchingSheets))) for item in matchingSheetsWithCol: print(item,item[0],item[1])
find every column in every sheet and put it in a new sheet or book.
entailment
def upgradeStore(self, store): """ Recursively upgrade C{store}. """ self.upgradeEverything(store) upgradeExplicitOid(store) for substore in store.query(SubStore): print 'Upgrading: {!r}'.format(substore) self.upgradeStore(substore.open())
Recursively upgrade C{store}.
entailment