_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q35800
Canvas.get_appointment_groups
train
def get_appointment_groups(self, **kwargs): """ List appointment groups. :calls: `GET /api/v1/appointment_groups \ <https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.appointment_group.AppointmentGroup` """ from canvasapi.appointment_group import AppointmentGroup return PaginatedList( AppointmentGroup, self.__requester, 'GET', 'appointment_groups', _kwargs=combine_kwargs(**kwargs) )
python
{ "resource": "" }
q35801
Canvas.get_appointment_group
train
def get_appointment_group(self, appointment_group): """ Return single Appointment Group by id :calls: `GET /api/v1/appointment_groups/:id \ <https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.show>`_ :param appointment_group: The ID of the appointment group. :type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int :rtype: :class:`canvasapi.appointment_group.AppointmentGroup` """ from canvasapi.appointment_group import AppointmentGroup appointment_group_id = obj_or_id( appointment_group, "appointment_group", (AppointmentGroup,) ) response = self.__requester.request( 'GET', 'appointment_groups/{}'.format(appointment_group_id) ) return AppointmentGroup(self.__requester, response.json())
python
{ "resource": "" }
q35802
Canvas.create_appointment_group
train
def create_appointment_group(self, appointment_group, **kwargs): """ Create a new Appointment Group. :calls: `POST /api/v1/appointment_groups \ <https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.create>`_ :param appointment_group: The attributes of the appointment group. :type appointment_group: `dict` :param title: The title of the appointment group. :type title: `str` :rtype: :class:`canvasapi.appointment_group.AppointmentGroup` """ from canvasapi.appointment_group import AppointmentGroup if ( isinstance(appointment_group, dict) and 'context_codes' in appointment_group and 'title' in appointment_group ): kwargs['appointment_group'] = appointment_group elif ( isinstance(appointment_group, dict) and 'context_codes' not in appointment_group ): raise RequiredFieldMissing( "Dictionary with key 'context_codes' is missing." ) elif isinstance(appointment_group, dict) and 'title' not in appointment_group: raise RequiredFieldMissing("Dictionary with key 'title' is missing.") response = self.__requester.request( 'POST', 'appointment_groups', _kwargs=combine_kwargs(**kwargs) ) return AppointmentGroup(self.__requester, response.json())
python
{ "resource": "" }
q35803
Canvas.get_file
train
def get_file(self, file, **kwargs): """ Return the standard attachment json object for a file. :calls: `GET /api/v1/files/:id \ <https://canvas.instructure.com/doc/api/files.html#method.files.api_show>`_ :param file: The object or ID of the file to retrieve. :type file: :class:`canvasapi.file.File` or int :rtype: :class:`canvasapi.file.File` """ file_id = obj_or_id(file, "file", (File,)) response = self.__requester.request( 'GET', 'files/{}'.format(file_id), _kwargs=combine_kwargs(**kwargs) ) return File(self.__requester, response.json())
python
{ "resource": "" }
q35804
Canvas.get_folder
train
def get_folder(self, folder): """ Return the details for a folder :calls: `GET /api/v1/folders/:id \ <https://canvas.instructure.com/doc/api/files.html#method.folders.show>`_ :param folder: The object or ID of the folder to retrieve. :type folder: :class:`canvasapi.folder.Folder` or int :rtype: :class:`canvasapi.folder.Folder` """ folder_id = obj_or_id(folder, "folder", (Folder,)) response = self.__requester.request( 'GET', 'folders/{}'.format(folder_id) ) return Folder(self.__requester, response.json())
python
{ "resource": "" }
q35805
Canvas.get_outcome
train
def get_outcome(self, outcome): """ Returns the details of the outcome with the given id. :calls: `GET /api/v1/outcomes/:id \ <https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_ :param outcome: The outcome object or ID to return. :type outcome: :class:`canvasapi.outcome.Outcome` or int :returns: An Outcome object. :rtype: :class:`canvasapi.outcome.Outcome` """ from canvasapi.outcome import Outcome outcome_id = obj_or_id(outcome, "outcome", (Outcome,)) response = self.__requester.request( 'GET', 'outcomes/{}'.format(outcome_id) ) return Outcome(self.__requester, response.json())
python
{ "resource": "" }
q35806
Canvas.get_root_outcome_group
train
def get_root_outcome_group(self): """ Redirect to root outcome group for context :calls: `GET /api/v1/global/root_outcome_group \ <https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.redirect>`_ :returns: The OutcomeGroup of the context. :rtype: :class:`canvasapi.outcome.OutcomeGroup` """ from canvasapi.outcome import OutcomeGroup response = self.__requester.request( 'GET', 'global/root_outcome_group' ) return OutcomeGroup(self.__requester, response.json())
python
{ "resource": "" }
q35807
Canvas.get_outcome_group
train
def get_outcome_group(self, group): """ Returns the details of the Outcome Group with the given id. :calls: `GET /api/v1/global/outcome_groups/:id \ <https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.show>`_ :param group: The outcome group object or ID to return. :type group: :class:`canvasapi.outcome.OutcomeGroup` or int :returns: An outcome group object. :rtype: :class:`canvasapi.outcome.OutcomeGroup` """ from canvasapi.outcome import OutcomeGroup outcome_group_id = obj_or_id(group, "group", (OutcomeGroup,)) response = self.__requester.request( 'GET', 'global/outcome_groups/{}'.format(outcome_group_id) ) return OutcomeGroup(self.__requester, response.json())
python
{ "resource": "" }
q35808
Canvas.get_progress
train
def get_progress(self, progress, **kwargs): """ Get a specific progress. :calls: `GET /api/v1/progress/:id <https://canvas.instructure.com/doc/api/progress.html#method.progress.show>`_ :param progress: The object or ID of the progress to retrieve. :type progress: int, str or :class:`canvasapi.progress.Progress` :rtype: :class:`canvasapi.progress.Progress` """ from canvasapi.progress import Progress progress_id = obj_or_id(progress, "progress", (Progress,)) response = self.__requester.request( 'GET', 'progress/{}'.format(progress_id), _kwargs=combine_kwargs(**kwargs) ) return Progress(self.__requester, response.json())
python
{ "resource": "" }
q35809
Canvas.get_announcements
train
def get_announcements(self, **kwargs): """ List announcements. :calls: `GET /api/v1/announcements \ <https://canvas.instructure.com/doc/api/announcements.html#method.announcements_api.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.discussion_topic.DiscussionTopic` """ from canvasapi.discussion_topic import DiscussionTopic return PaginatedList( DiscussionTopic, self.__requester, 'GET', 'announcements', _kwargs=combine_kwargs(**kwargs) )
python
{ "resource": "" }
q35810
is_multivalued
train
def is_multivalued(value): """ Determine whether the given value should be treated as a sequence of multiple values when used as a request parameter. In general anything that is iterable is multivalued. For example, `list` and `tuple` instances are multivalued. Generators are multivalued, as are the iterable objects returned by `zip`, `itertools.chain`, etc. However, a simple `int` is single-valued. `str` and `bytes` are special cases: although these are iterable, we treat each as a single value rather than as a sequence of isolated characters or bytes. """ # special cases: iterable, but not multivalued if isinstance(value, (string_types, binary_type)): return False # general rule: multivalued if iterable return isinstance(value, Iterable)
python
{ "resource": "" }
q35811
combine_kwargs
train
def combine_kwargs(**kwargs): """ Flatten a series of keyword arguments from complex combinations of dictionaries and lists into a list of tuples representing properly-formatted parameters to pass to the Requester object. :param kwargs: A dictionary containing keyword arguments to be flattened into properly-formatted parameters. :type kwargs: dict :returns: A list of tuples that represent flattened kwargs. The first element is a string representing the key. The second element is the value. :rtype: `list` of `tuple` """ combined_kwargs = [] # Loop through all kwargs provided for kw, arg in kwargs.items(): if isinstance(arg, dict): for k, v in arg.items(): for tup in flatten_kwarg(k, v): combined_kwargs.append(('{}{}'.format(kw, tup[0]), tup[1])) elif is_multivalued(arg): for i in arg: for tup in flatten_kwarg('', i): combined_kwargs.append(('{}{}'.format(kw, tup[0]), tup[1])) else: combined_kwargs.append((text_type(kw), arg)) return combined_kwargs
python
{ "resource": "" }
q35812
flatten_kwarg
train
def flatten_kwarg(key, obj): """ Recursive call to flatten sections of a kwarg to be combined :param key: The partial keyword to add to the full keyword :type key: str :param obj: The object to translate into a kwarg. If the type is `dict`, the key parameter will be added to the keyword between square brackets and recursively call this function. If the type is `list`, or `tuple`, a set of empty brackets will be appended to the keyword and recursively call this function. Otherwise, the function returns with the final keyword and value. :returns: A list of tuples that represent flattened kwargs. The first element is a string representing the key. The second element is the value. :rtype: `list` of `tuple` """ if isinstance(obj, dict): # Add the word (e.g. "[key]") new_list = [] for k, v in obj.items(): for tup in flatten_kwarg(k, v): new_list.append(('[{}]{}'.format(key, tup[0]), tup[1])) return new_list elif is_multivalued(obj): # Add empty brackets (i.e. "[]") new_list = [] for i in obj: for tup in flatten_kwarg(key + '][', i): new_list.append((tup[0], tup[1])) return new_list else: # Base case. Return list with tuple containing the value return [('[{}]'.format(text_type(key)), obj)]
python
{ "resource": "" }
q35813
get_institution_url
train
def get_institution_url(base_url): """ Clean up a given base URL. :param base_url: The base URL of the API. :type base_url: str :rtype: str """ base_url = base_url.rstrip('/') index = base_url.find('/api/v1') if index != -1: return base_url[0:index] return base_url
python
{ "resource": "" }
q35814
file_or_path
train
def file_or_path(file): """ Open a file and return the handler if a path is given. If a file handler is given, return it directly. :param file: A file handler or path to a file. :returns: A tuple with the open file handler and whether it was a path. :rtype: (file, bool) """ is_path = False if isinstance(file, string_types): if not os.path.exists(file): raise IOError('File at path ' + file + ' does not exist.') file = open(file, 'rb') is_path = True return file, is_path
python
{ "resource": "" }
q35815
CalendarEvent.delete
train
def delete(self, **kwargs): """ Delete this calendar event. :calls: `DELETE /api/v1/calendar_events/:id \ <https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.destroy>`_ :rtype: :class:`canvasapi.calendar_event.CalendarEvent` """ response = self._requester.request( 'DELETE', 'calendar_events/{}'.format(self.id), _kwargs=combine_kwargs(**kwargs) ) return CalendarEvent(self._requester, response.json())
python
{ "resource": "" }
q35816
CalendarEvent.edit
train
def edit(self, **kwargs): """ Modify this calendar event. :calls: `PUT /api/v1/calendar_events/:id \ <https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.update>`_ :rtype: :class:`canvasapi.calendar_event.CalendarEvent` """ response = self._requester.request( 'PUT', 'calendar_events/{}'.format(self.id), _kwargs=combine_kwargs(**kwargs) ) if 'title' in response.json(): super(CalendarEvent, self).set_attributes(response.json()) return CalendarEvent(self._requester, response.json())
python
{ "resource": "" }
q35817
_wrap_layer
train
def _wrap_layer(name, input_layer, build_func, dropout_rate=0.0, trainable=True): """Wrap layers with residual, normalization and dropout. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param build_func: A callable that takes the input tensor and generates the output tensor. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """ build_output = build_func(input_layer) if dropout_rate > 0.0: dropout_layer = keras.layers.Dropout( rate=dropout_rate, name='%s-Dropout' % name, )(build_output) else: dropout_layer = build_output if isinstance(input_layer, list): input_layer = input_layer[0] add_layer = keras.layers.Add(name='%s-Add' % name)([input_layer, dropout_layer]) normal_layer = LayerNormalization( trainable=trainable, name='%s-Norm' % name, )(add_layer) return normal_layer
python
{ "resource": "" }
q35818
attention_builder
train
def attention_builder(name, head_num, activation, history_only, trainable=True): """Get multi-head self-attention builder. :param name: Prefix of names for internal layers. :param head_num: Number of heads in multi-head self-attention. :param activation: Activation for multi-head self-attention. :param history_only: Only use history data. :param trainable: Whether the layer is trainable. :return: """ def _attention_builder(x): return MultiHeadAttention( head_num=head_num, activation=activation, history_only=history_only, trainable=trainable, name=name, )(x) return _attention_builder
python
{ "resource": "" }
q35819
feed_forward_builder
train
def feed_forward_builder(name, hidden_dim, activation, trainable=True): """Get position-wise feed-forward layer builder. :param name: Prefix of names for internal layers. :param hidden_dim: Hidden dimension of feed forward layer. :param activation: Activation for feed-forward layer. :param trainable: Whether the layer is trainable. :return: """ def _feed_forward_builder(x): return FeedForward( units=hidden_dim, activation=activation, trainable=trainable, name=name, )(x) return _feed_forward_builder
python
{ "resource": "" }
q35820
get_encoder_component
train
def get_encoder_component(name, input_layer, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, trainable=True): """Multi-head self-attention and feed-forward layer. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """ attention_name = '%s-MultiHeadSelfAttention' % name feed_forward_name = '%s-FeedForward' % name attention_layer = _wrap_layer( name=attention_name, input_layer=input_layer, build_func=attention_builder( name=attention_name, head_num=head_num, activation=attention_activation, history_only=False, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) feed_forward_layer = _wrap_layer( name=feed_forward_name, input_layer=attention_layer, build_func=feed_forward_builder( name=feed_forward_name, hidden_dim=hidden_dim, activation=feed_forward_activation, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) return feed_forward_layer
python
{ "resource": "" }
q35821
get_decoder_component
train
def get_decoder_component(name, input_layer, encoded_layer, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, trainable=True): """Multi-head self-attention, multi-head query attention and feed-forward layer. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param encoded_layer: Encoded layer from encoder. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """ self_attention_name = '%s-MultiHeadSelfAttention' % name query_attention_name = '%s-MultiHeadQueryAttention' % name feed_forward_name = '%s-FeedForward' % name self_attention_layer = _wrap_layer( name=self_attention_name, input_layer=input_layer, build_func=attention_builder( name=self_attention_name, head_num=head_num, activation=attention_activation, history_only=True, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) query_attention_layer = _wrap_layer( name=query_attention_name, input_layer=[self_attention_layer, encoded_layer, encoded_layer], build_func=attention_builder( name=query_attention_name, head_num=head_num, activation=attention_activation, history_only=False, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) feed_forward_layer = _wrap_layer( name=feed_forward_name, input_layer=query_attention_layer, build_func=feed_forward_builder( name=feed_forward_name, hidden_dim=hidden_dim, activation=feed_forward_activation, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) return feed_forward_layer
python
{ "resource": "" }
q35822
get_encoders
train
def get_encoders(encoder_num, input_layer, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, trainable=True): """Get encoders. :param encoder_num: Number of encoder components. :param input_layer: Input layer. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """ last_layer = input_layer for i in range(encoder_num): last_layer = get_encoder_component( name='Encoder-%d' % (i + 1), input_layer=last_layer, head_num=head_num, hidden_dim=hidden_dim, attention_activation=attention_activation, feed_forward_activation=feed_forward_activation, dropout_rate=dropout_rate, trainable=trainable, ) return last_layer
python
{ "resource": "" }
q35823
get_decoders
train
def get_decoders(decoder_num, input_layer, encoded_layer, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, trainable=True): """Get decoders. :param decoder_num: Number of decoder components. :param input_layer: Input layer. :param encoded_layer: Encoded layer from encoder. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """ last_layer = input_layer for i in range(decoder_num): last_layer = get_decoder_component( name='Decoder-%d' % (i + 1), input_layer=last_layer, encoded_layer=encoded_layer, head_num=head_num, hidden_dim=hidden_dim, attention_activation=attention_activation, feed_forward_activation=feed_forward_activation, dropout_rate=dropout_rate, trainable=trainable, ) return last_layer
python
{ "resource": "" }
q35824
get_model
train
def get_model(token_num, embed_dim, encoder_num, decoder_num, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, use_same_embed=True, embed_weights=None, embed_trainable=None, trainable=True): """Get full model without compilation. :param token_num: Number of distinct tokens. :param embed_dim: Dimension of token embedding. :param encoder_num: Number of encoder components. :param decoder_num: Number of decoder components. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param use_same_embed: Whether to use the same token embedding layer. `token_num`, `embed_weights` and `embed_trainable` should be lists of two elements if it is False. :param embed_weights: Initial weights of token embedding. :param embed_trainable: Whether the token embedding is trainable. It will automatically set to False if the given value is None when embedding weights has been provided. :param trainable: Whether the layers are trainable. :return: Keras model. """ if not isinstance(token_num, list): token_num = [token_num, token_num] encoder_token_num, decoder_token_num = token_num if not isinstance(embed_weights, list): embed_weights = [embed_weights, embed_weights] encoder_embed_weights, decoder_embed_weights = embed_weights if encoder_embed_weights is not None: encoder_embed_weights = [encoder_embed_weights] if decoder_embed_weights is not None: decoder_embed_weights = [decoder_embed_weights] if not isinstance(embed_trainable, list): embed_trainable = [embed_trainable, embed_trainable] encoder_embed_trainable, decoder_embed_trainable = embed_trainable if encoder_embed_trainable is None: encoder_embed_trainable = encoder_embed_weights is None if decoder_embed_trainable is None: decoder_embed_trainable = decoder_embed_weights is None if use_same_embed: encoder_embed_layer = decoder_embed_layer = EmbeddingRet( input_dim=encoder_token_num, output_dim=embed_dim, mask_zero=True, weights=encoder_embed_weights, trainable=encoder_embed_trainable, name='Token-Embedding', ) else: encoder_embed_layer = EmbeddingRet( input_dim=encoder_token_num, output_dim=embed_dim, mask_zero=True, weights=encoder_embed_weights, trainable=encoder_embed_trainable, name='Encoder-Token-Embedding', ) decoder_embed_layer = EmbeddingRet( input_dim=decoder_token_num, output_dim=embed_dim, mask_zero=True, weights=decoder_embed_weights, trainable=decoder_embed_trainable, name='Decoder-Token-Embedding', ) encoder_input = keras.layers.Input(shape=(None,), name='Encoder-Input') encoder_embed = TrigPosEmbedding( mode=TrigPosEmbedding.MODE_ADD, name='Encoder-Embedding', )(encoder_embed_layer(encoder_input)[0]) encoded_layer = get_encoders( encoder_num=encoder_num, input_layer=encoder_embed, head_num=head_num, hidden_dim=hidden_dim, attention_activation=attention_activation, feed_forward_activation=feed_forward_activation, dropout_rate=dropout_rate, trainable=trainable, ) decoder_input = keras.layers.Input(shape=(None,), name='Decoder-Input') decoder_embed, decoder_embed_weights = decoder_embed_layer(decoder_input) decoder_embed = TrigPosEmbedding( mode=TrigPosEmbedding.MODE_ADD, name='Decoder-Embedding', )(decoder_embed) decoded_layer = get_decoders( decoder_num=decoder_num, input_layer=decoder_embed, encoded_layer=encoded_layer, head_num=head_num, hidden_dim=hidden_dim, attention_activation=attention_activation, feed_forward_activation=feed_forward_activation, dropout_rate=dropout_rate, trainable=trainable, ) dense_layer = EmbeddingSim( trainable=trainable, name='Output', )([decoded_layer, decoder_embed_weights]) return keras.models.Model(inputs=[encoder_input, decoder_input], outputs=dense_layer)
python
{ "resource": "" }
q35825
decode
train
def decode(model, tokens, start_token, end_token, pad_token, max_len=10000, max_repeat=10, max_repeat_block=10): """Decode with the given model and input tokens. :param model: The trained model. :param tokens: The input tokens of encoder. :param start_token: The token that represents the start of a sentence. :param end_token: The token that represents the end of a sentence. :param pad_token: The token that represents padding. :param max_len: Maximum length of decoded list. :param max_repeat: Maximum number of repeating blocks. :param max_repeat_block: Maximum length of the repeating block. :return: Decoded tokens. """ is_single = not isinstance(tokens[0], list) if is_single: tokens = [tokens] batch_size = len(tokens) decoder_inputs = [[start_token] for _ in range(batch_size)] outputs = [None for _ in range(batch_size)] output_len = 1 while len(list(filter(lambda x: x is None, outputs))) > 0: output_len += 1 batch_inputs, batch_outputs = [], [] max_input_len = 0 index_map = {} for i in range(batch_size): if outputs[i] is None: index_map[len(batch_inputs)] = i batch_inputs.append(tokens[i][:]) batch_outputs.append(decoder_inputs[i]) max_input_len = max(max_input_len, len(tokens[i])) for i in range(len(batch_inputs)): batch_inputs[i] += [pad_token] * (max_input_len - len(batch_inputs[i])) predicts = model.predict([np.asarray(batch_inputs), np.asarray(batch_outputs)]) for i in range(len(predicts)): last_token = np.argmax(predicts[i][-1]) decoder_inputs[index_map[i]].append(last_token) if last_token == end_token or\ (max_len is not None and output_len >= max_len) or\ _get_max_suffix_repeat_times(decoder_inputs, max_repeat * max_repeat_block) >= max_repeat: outputs[index_map[i]] = decoder_inputs[index_map[i]] if is_single: outputs = outputs[0] return outputs
python
{ "resource": "" }
q35826
gelu
train
def gelu(x): """An approximation of gelu. See: https://arxiv.org/pdf/1606.08415.pdf """ return 0.5 * x * (1.0 + K.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * K.pow(x, 3))))
python
{ "resource": "" }
q35827
Env.prefixed
train
def prefixed(self, prefix): """Context manager for parsing envvars with a common prefix.""" old_prefix = self._prefix if old_prefix is None: self._prefix = prefix else: self._prefix = "{}{}".format(old_prefix, prefix) yield self self._prefix = old_prefix
python
{ "resource": "" }
q35828
Env.add_parser
train
def add_parser(self, name, func): """Register a new parser method with the name ``name``. ``func`` must receive the input value for an environment variable. """ self.__parser_map__[name] = _func2method(func, method_name=name) return None
python
{ "resource": "" }
q35829
Env.parser_for
train
def parser_for(self, name): """Decorator that registers a new parser method with the name ``name``. The decorated function must receive the input value for an environment variable. """ def decorator(func): self.add_parser(name, func) return func return decorator
python
{ "resource": "" }
q35830
Env.add_parser_from_field
train
def add_parser_from_field(self, name, field_cls): """Register a new parser method with name ``name``, given a marshmallow ``Field``.""" self.__parser_map__[name] = _field2method(field_cls, method_name=name)
python
{ "resource": "" }
q35831
NMEAFile.open
train
def open(self, fp, mode='r'): """ Open the NMEAFile. """ self._file = open(fp, mode=mode) return self._file
python
{ "resource": "" }
q35832
xml_records
train
def xml_records(filename): """ If the second return value is not None, then it is an Exception encountered during parsing. The first return value will be the XML string. @type filename str @rtype: generator of (etree.Element or str), (None or Exception) """ with Evtx(filename) as evtx: for xml, record in evtx_file_xml_view(evtx.get_file_header()): try: yield to_lxml(xml), None except etree.XMLSyntaxError as e: yield xml, e
python
{ "resource": "" }
q35833
RootNode.template_instance
train
def template_instance(self): ''' parse the template instance node. this is used to compute the location of the template definition structure. Returns: TemplateInstanceNode: the template instance. ''' ofs = self.offset() if self.unpack_byte(0x0) & 0x0F == 0xF: ofs += 4 return TemplateInstanceNode(self._buf, ofs, self._chunk, self)
python
{ "resource": "" }
q35834
RootNode.template
train
def template(self): ''' parse the template referenced by this root node. note, this template structure is not guaranteed to be located within the root node's boundaries. Returns: TemplateNode: the template. ''' instance = self.template_instance() offset = self._chunk.offset() + instance.template_offset() node = TemplateNode(self._buf, offset, self._chunk, instance) return node
python
{ "resource": "" }
q35835
evtx_chunk_xml_view
train
def evtx_chunk_xml_view(chunk): """ Generate XML representations of the records in an EVTX chunk. Does not include the XML <?xml... header. Records are ordered by chunk.records() Args: chunk (Evtx.Chunk): the chunk to render. Yields: tuple[str, Evtx.Record]: the rendered XML document and the raw record. """ for record in chunk.records(): record_str = evtx_record_xml_view(record) yield record_str, record
python
{ "resource": "" }
q35836
evtx_file_xml_view
train
def evtx_file_xml_view(file_header): """ Generate XML representations of the records in an EVTX file. Does not include the XML <?xml... header. Records are ordered by file_header.chunks(), and then by chunk.records() Args: chunk (Evtx.FileHeader): the file header to render. Yields: tuple[str, Evtx.Record]: the rendered XML document and the raw record. """ for chunk in file_header.chunks(): for record in chunk.records(): record_str = evtx_record_xml_view(record) yield record_str, record
python
{ "resource": "" }
q35837
FileHeader.get_record
train
def get_record(self, record_num): """ Get a Record by record number. @type record_num: int @param record_num: The record number of the the record to fetch. @rtype Record or None @return The record request by record number, or None if the record is not found. """ for chunk in self.chunks(): first_record = chunk.log_first_record_number() last_record = chunk.log_last_record_number() if not (first_record <= record_num <= last_record): continue for record in chunk.records(): if record.record_num() == record_num: return record return None
python
{ "resource": "" }
q35838
Record.data
train
def data(self): """ Return the raw data block which makes up this record as a bytestring. @rtype str @return A string that is a copy of the buffer that makes up this record. """ return self._buf[self.offset():self.offset() + self.size()]
python
{ "resource": "" }
q35839
Record.lxml
train
def lxml(self): ''' render the record into a lxml document. this is useful for querying data from the record using xpath, etc. note: lxml must be installed. Returns: lxml.etree.ElementTree: the rendered and parsed xml document. Raises: ImportError: if lxml is not installed. ''' import lxml.etree return lxml.etree.fromstring((e_views.XML_HEADER + self.xml()).encode('utf-8'))
python
{ "resource": "" }
q35840
UserList.get
train
def get(self): """List all users""" self.reqparse.add_argument('page', type=int, default=1, required=True) self.reqparse.add_argument('count', type=int, default=50, choices=[25, 50, 100]) self.reqparse.add_argument('authSystem', type=str, default=None, action='append') args = self.reqparse.parse_args() qry = db.User.order_by(User.username) if args['authSystem']: qry = qry.filter(User.auth_system.in_(args['authSystem'])) total = qry.count() qry = qry.limit(args['count']) if (args['page'] - 1) > 0: offset = (args['page'] - 1) * args['count'] qry = qry.offset(offset) users = qry.all() return self.make_response({ 'users': [x.to_json() for x in users], 'userCount': total, 'authSystems': list(current_app.available_auth_systems.keys()), 'activeAuthSystem': current_app.active_auth_system.name })
python
{ "resource": "" }
q35841
UserList.options
train
def options(self): """Returns metadata information required for User Creation""" roles = db.Role.all() return self.make_response({ 'roles': roles, 'authSystems': list(current_app.available_auth_systems.keys()), 'activeAuthSystem': current_app.active_auth_system.name })
python
{ "resource": "" }
q35842
UserDetails.get
train
def get(self, user_id): """Returns a specific user""" user = db.User.find_one(User.user_id == user_id) roles = db.Role.all() if not user: return self.make_response('Unable to find the user requested, might have been removed', HTTP.NOT_FOUND) return self.make_response({ 'user': user.to_json(), 'roles': roles }, HTTP.OK)
python
{ "resource": "" }
q35843
UserDetails.put
train
def put(self, user_id): """Update a user object""" self.reqparse.add_argument('roles', type=str, action='append') args = self.reqparse.parse_args() auditlog(event='user.create', actor=session['user'].username, data=args) user = db.User.find_one(User.user_id == user_id) roles = db.Role.find(Role.name.in_(args['roles'])) if not user: return self.make_response('No such user found: {}'.format(user_id), HTTP.NOT_FOUND) if user.username == 'admin' and user.auth_system == 'builtin': return self.make_response('You cannot modify the built-in admin user', HTTP.FORBIDDEN) user.roles = [] for role in roles: if role in args['roles']: user.roles.append(role) db.session.add(user) db.session.commit() return self.make_response({'message': 'User roles updated'}, HTTP.OK)
python
{ "resource": "" }
q35844
DomainHijackAuditor.return_resource_name
train
def return_resource_name(self, record, resource_type): """ Removes the trailing AWS domain from a DNS record to return the resource name e.g bucketname.s3.amazonaws.com will return bucketname Args: record (str): DNS record resource_type: AWS Resource type (i.e. S3 Bucket, Elastic Beanstalk, etc..) """ try: if resource_type == 's3': regex = re.compile('.*(\.(?:s3-|s3){1}(?:.*)?\.amazonaws\.com)') bucket_name = record.replace(regex.match(record).group(1), '') return bucket_name except Exception as e: self.log.error('Unable to parse DNS record {} for resource type {}/{}'.format(record, resource_type, e)) return record
python
{ "resource": "" }
q35845
BaseAccount.to_json
train
def to_json(self, is_admin=False): """Returns a dict representation of the object Args: is_admin (`bool`): If true, include information about the account that should be avaiable only to admins Returns: `dict` """ if is_admin: return { 'accountId': self.account_id, 'accountName': self.account_name, 'accountType': self.account_type, 'contacts': self.contacts, 'enabled': True if self.enabled == 1 else False, 'requiredRoles': self.required_roles, 'properties': {to_camelcase(prop.name): prop.value for prop in self.account.properties} } else: return { 'accountId': self.account_id, 'accountName': self.account_name, 'contacts': self.contacts }
python
{ "resource": "" }
q35846
BaseAccount.get
train
def get(account): """Returns the class object identified by `account_id` Args: account (`int`, `str`): Unique ID of the account to load from database Returns: `Account` object if found, else None """ account = Account.get(account) if not account: return None acct_type = AccountType.get(account.account_type_id).account_type account_class = get_plugin_by_name(PLUGIN_NAMESPACES['accounts'], acct_type) return account_class(account)
python
{ "resource": "" }
q35847
BaseAccount.get_all
train
def get_all(cls, include_disabled=True): """Returns a list of all accounts of a given type Args: include_disabled (`bool`): Include disabled accounts. Default: `True` Returns: list of account objects """ if cls == BaseAccount: raise InquisitorError('get_all on BaseAccount is not supported') account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name) if not include_disabled: qry = qry.filter(Account.enabled == 1) accounts = qry.find(Account.account_type_id == account_type_id) return {res.account_id: cls(res) for res in accounts}
python
{ "resource": "" }
q35848
BaseAccount.search
train
def search(*, include_disabled=True, account_ids=None, account_type_id=None, properties=None, return_query=False): """Search for accounts based on the provided filters Args: include_disabled (`bool`): Include disabled accounts (default: True) account_ids: (`list` of `int`): List of account IDs account_type_id (`int`): Account Type ID to limit results to properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list of strings, in which case a boolean OR search is performed on the values return_query (`bool`): Returns the query object prior to adding the limit and offset functions. Allows for sub-classes to amend the search feature with extra conditions. The calling function must handle pagination on its own Returns: `list` of `Account`, `sqlalchemy.orm.Query` """ qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name) if not include_disabled: qry = qry.filter(Account.enabled == 1) if account_ids: if type(account_ids) not in (list, tuple): account_ids = [account_ids] qry = qry.filter(Account.account_id.in_(account_ids)) if account_type_id: qry = qry.filter(Account.account_type_id == account_type_id) if properties: for prop_name, value in properties.items(): alias = aliased(AccountProperty) qry = qry.join(alias, Account.account_id == alias.account_id) if type(value) == list: where_clause = [] for item in value: where_clause.append(alias.value == item) qry = qry.filter( and_( alias.name == prop_name, or_(*where_clause) ).self_group() ) else: qry = qry.filter( and_( alias.name == prop_name, alias.value == value ).self_group() ) if return_query: return qry total = qry.count() return total, list(map(BaseAccount.get_typed_account, qry.all()))
python
{ "resource": "" }
q35849
SQSScheduler.execute_scheduler
train
def execute_scheduler(self): """Main entry point for the scheduler. This method will start two scheduled jobs, `schedule_jobs` which takes care of scheduling the actual SQS messaging and `process_status_queue` which will track the current status of the jobs as workers are executing them Returns: `None` """ try: # Schedule periodic scheduling of jobs self.scheduler.add_job( self.schedule_jobs, trigger='interval', name='schedule_jobs', minutes=15, start_date=datetime.now() + timedelta(seconds=1) ) self.scheduler.add_job( self.process_status_queue, trigger='interval', name='process_status_queue', seconds=30, start_date=datetime.now() + timedelta(seconds=5), max_instances=1 ) self.scheduler.start() except KeyboardInterrupt: self.scheduler.shutdown()
python
{ "resource": "" }
q35850
SQSScheduler.list_current_jobs
train
def list_current_jobs(self): """Return a list of the currently scheduled jobs in APScheduler Returns: `dict` of `str`: :obj:`apscheduler/job:Job` """ jobs = {} for job in self.scheduler.get_jobs(): if job.name not in ('schedule_jobs', 'process_status_queue'): jobs[job.name] = job return jobs
python
{ "resource": "" }
q35851
SQSScheduler.send_worker_queue_message
train
def send_worker_queue_message(self, *, batch_id, job_name, entry_point, worker_args, retry_count=0): """Send a message to the `worker_queue` for a worker to execute the requests job Args: batch_id (`str`): Unique ID of the batch the job belongs to job_name (`str`): Non-unique ID of the job. This is used to ensure that the same job is only scheduled a single time per batch entry_point (`dict`): A dictionary providing the entry point information for the worker to load the class worker_args (`dict`): A dictionary with the arguments required by the worker class (if any, can be an empty dictionary) retry_count (`int`): The number of times this one job has been attempted to be executed. If a job fails to execute after 3 retries it will be marked as failed Returns: `None` """ try: job_id = str(uuid4()) self.job_queue.send_message( MessageBody=json.dumps({ 'batch_id': batch_id, 'job_id': job_id, 'job_name': job_name, 'entry_point': entry_point, 'worker_args': worker_args, }), MessageDeduplicationId=job_id, MessageGroupId=batch_id, MessageAttributes={ 'RetryCount': { 'StringValue': str(retry_count), 'DataType': 'Number' } } ) if retry_count == 0: job = SchedulerJob() job.job_id = job_id job.batch_id = batch_id job.status = SchedulerStatus.PENDING job.data = worker_args db.session.add(job) db.session.commit() except: self.log.exception('Error when processing worker task')
python
{ "resource": "" }
q35852
SQSScheduler.send_status_message
train
def send_status_message(self, object_id, status): """Send a message to the `status_queue` to update a job's status. Returns `True` if the message was sent, else `False` Args: object_id (`str`): ID of the job that was executed status (:obj:`SchedulerStatus`): Status of the job Returns: `bool` """ try: body = json.dumps({ 'id': object_id, 'status': status }) self.status_queue.send_message( MessageBody=body, MessageGroupId='job_status', MessageDeduplicationId=get_hash((object_id, status)) ) return True except Exception as ex: print(ex) return False
python
{ "resource": "" }
q35853
SQSScheduler.process_status_queue
train
def process_status_queue(self): """Process all messages in the `status_queue` and check for any batches that needs to change status Returns: `None` """ self.log.debug('Start processing status queue') while True: messages = self.status_queue.receive_messages(MaxNumberOfMessages=10) if not messages: break for message in messages: data = json.loads(message.body) job = SchedulerJob.get(data['id']) try: if job and job.update_status(data['status']): db.session.commit() except SchedulerError as ex: if hasattr(ex, 'message') and ex.message == 'Attempting to update already completed job': pass message.delete() # Close any batch that is now complete open_batches = db.SchedulerBatch.find(SchedulerBatch.status < SchedulerStatus.COMPLETED) for batch in open_batches: open_jobs = list(filter(lambda x: x.status < SchedulerStatus.COMPLETED, batch.jobs)) if not open_jobs: open_batches.remove(batch) batch.update_status(SchedulerStatus.COMPLETED) self.log.debug('Closed completed batch {}'.format(batch.batch_id)) else: started_jobs = list(filter(lambda x: x.status > SchedulerStatus.PENDING, open_jobs)) if batch.status == SchedulerStatus.PENDING and len(started_jobs) > 0: batch.update_status(SchedulerStatus.STARTED) self.log.debug('Started batch manually {}'.format(batch.batch_id)) # Check for stale batches / jobs for batch in open_batches: if batch.started < datetime.now() - timedelta(hours=2): self.log.warning('Closing a stale scheduler batch: {}'.format(batch.batch_id)) for job in batch.jobs: if job.status < SchedulerStatus.COMPLETED: job.update_status(SchedulerStatus.ABORTED) batch.update_status(SchedulerStatus.ABORTED) db.session.commit()
python
{ "resource": "" }
q35854
IAMAuditor.run
train
def run(self, *args, **kwargs): """Iterate through all AWS accounts and apply roles and policies from Github Args: *args: Optional list of arguments **kwargs: Optional list of keyword arguments Returns: `None` """ accounts = list(AWSAccount.get_all(include_disabled=False).values()) self.manage_policies(accounts)
python
{ "resource": "" }
q35855
IAMAuditor.get_policies_from_git
train
def get_policies_from_git(self): """Retrieve policies from the Git repo. Returns a dictionary containing all the roles and policies Returns: :obj:`dict` of `str`: `dict` """ fldr = mkdtemp() try: url = 'https://{token}:x-oauth-basic@{server}/{repo}'.format(**{ 'token': self.dbconfig.get('git_auth_token', self.ns), 'server': self.dbconfig.get('git_server', self.ns), 'repo': self.dbconfig.get('git_repo', self.ns) }) policies = {'GLOBAL': {}} if self.dbconfig.get('git_no_ssl_verify', self.ns, False): os.environ['GIT_SSL_NO_VERIFY'] = '1' repo = Repo.clone_from(url, fldr) for obj in repo.head.commit.tree: name, ext = os.path.splitext(obj.name) # Read the standard policies if ext == '.json': policies['GLOBAL'][name] = obj.data_stream.read() # Read any account role specific policies if name == 'roles' and obj.type == 'tree': for account in [x for x in obj.trees]: for role in [x for x in account.trees]: role_policies = {policy.name.replace('.json', ''): policy.data_stream.read() for policy in role.blobs if policy.name.endswith('.json')} if account.name in policies: if role.name in policies[account.name]: policies[account.name][role.name] += role_policies else: policies[account.name][role.name] = role_policies else: policies[account.name] = { role.name: role_policies } return policies finally: if os.path.exists(fldr) and os.path.isdir(fldr): shutil.rmtree(fldr)
python
{ "resource": "" }
q35856
IAMAuditor.get_policies_from_aws
train
def get_policies_from_aws(client, scope='Local'): """Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the policies for the specified scope Args: client (:obj:`boto3.session.Session`): A boto3 Session object scope (`str`): The policy scope to use. Default: Local Returns: :obj:`list` of `dict` """ done = False marker = None policies = [] while not done: if marker: response = client.list_policies(Marker=marker, Scope=scope) else: response = client.list_policies(Scope=scope) policies += response['Policies'] if response['IsTruncated']: marker = response['Marker'] else: done = True return policies
python
{ "resource": "" }
q35857
IAMAuditor.get_roles
train
def get_roles(client): """Returns a list of all the roles for an account. Returns a list containing all the roles for the account. Args: client (:obj:`boto3.session.Session`): A boto3 Session object Returns: :obj:`list` of `dict` """ done = False marker = None roles = [] while not done: if marker: response = client.list_roles(Marker=marker) else: response = client.list_roles() roles += response['Roles'] if response['IsTruncated']: marker = response['Marker'] else: done = True return roles
python
{ "resource": "" }
q35858
IAMAuditor.create_policy
train
def create_policy(self, account, client, document, name, arn=None): """Create a new IAM policy. If the policy already exists, a new version will be added and if needed the oldest policy version not in use will be removed. Returns a dictionary containing the policy or version information Args: account (:obj:`Account`): Account to create the policy on client (:obj:`boto3.client`): A boto3 client object document (`str`): Policy document name (`str`): Name of the policy to create / update arn (`str`): Optional ARN for the policy to update Returns: `dict` """ if not arn and not name: raise ValueError('create_policy must be called with either arn or name in the argument list') if arn: response = client.list_policy_versions(PolicyArn=arn) # If we're at the max of the 5 possible versions, remove the oldest version that is not # the currently active policy if len(response['Versions']) >= 5: version = [x for x in sorted( response['Versions'], key=lambda k: k['CreateDate'] ) if not x['IsDefaultVersion']][0] self.log.info('Deleting oldest IAM Policy version {}/{}'.format(arn, version['VersionId'])) client.delete_policy_version(PolicyArn=arn, VersionId=version['VersionId']) auditlog( event='iam.check_roles.delete_policy_version', actor=self.ns, data={ 'account': account.account_name, 'policyName': name, 'policyArn': arn, 'versionId': version['VersionId'] } ) res = client.create_policy_version( PolicyArn=arn, PolicyDocument=document, SetAsDefault=True ) else: res = client.create_policy( PolicyName=name, PolicyDocument=document ) auditlog( event='iam.check_roles.create_policy', actor=self.ns, data={ 'account': account.account_name, 'policyName': name, 'policyArn': arn } ) return res
python
{ "resource": "" }
q35859
SlackNotifier.notify
train
def notify(self, subsystem, recipient, subject, body_html, body_text): """You can send messages either to channels and private groups by using the following formats #channel-name @username-direct-message Args: subsystem (`str`): Name of the subsystem originating the notification recipient (`str`): Recipient subject (`str`): Subject / title of the notification, not used for this notifier body_html (`str)`: HTML formatted version of the message, not used for this notifier body_text (`str`): Text formatted version of the message Returns: `None` """ if not re.match(self.validation, recipient, re.I): raise ValueError('Invalid recipient provided') if recipient.startswith('#'): target_type = 'channel' elif recipient.find('@') != -1: target_type = 'user' else: self.log.error('Unknown contact type for Slack: {}'.format(recipient)) return try: self._send_message( target_type=target_type, target=recipient, message=body_text, title=subject ) except SlackError as ex: self.log.error('Failed sending message to {}: {}'.format(recipient, ex))
python
{ "resource": "" }
q35860
SlackNotifier.send_message
train
def send_message(contacts, message): """List of contacts the send the message to. You can send messages either to channels and private groups by using the following formats #channel-name @username-direct-message If the channel is the name of a private group / channel, you must first invite the bot to the channel to ensure it is allowed to send messages to the group. Returns true if the message was sent, else `False` Args: contacts (:obj:`list` of `str`,`str`): List of contacts message (str): Message to send Returns: `bool` """ if type(contacts) == str: contacts = [contacts] recipients = list(set(contacts)) send_notification( subsystem='UNKNOWN', recipients=[NotificationContact('slack', x) for x in recipients], subject=None, body_html=message, body_text=message )
python
{ "resource": "" }
q35861
_register_default_option
train
def _register_default_option(nsobj, opt): """ Register default ConfigOption value if it doesn't exist. If does exist, update the description if needed """ item = ConfigItem.get(nsobj.namespace_prefix, opt.name) if not item: logger.info('Adding {} ({}) = {} to {}'.format( opt.name, opt.type, opt.default_value, nsobj.namespace_prefix )) item = ConfigItem() item.namespace_prefix = nsobj.namespace_prefix item.key = opt.name item.value = opt.default_value item.type = opt.type item.description = opt.description nsobj.config_items.append(item) else: if item.description != opt.description: logger.info('Updating description of {} / {}'.format(item.namespace_prefix, item.key)) item.description = opt.description db.session.add(item)
python
{ "resource": "" }
q35862
_import_templates
train
def _import_templates(force=False): """Import templates from disk into database Reads all templates from disk and adds them to the database. By default, any template that has been modified by the user will not be updated. This can however be changed by setting `force` to `True`, which causes all templates to be imported regardless of status Args: force (`bool`): Force overwrite any templates with local changes made. Default: `False` Returns: `None` """ tmplpath = os.path.join(resource_filename('cloud_inquisitor', 'data'), 'templates') disk_templates = {f: os.path.join(root, f) for root, directory, files in os.walk(tmplpath) for f in files} db_templates = {tmpl.template_name: tmpl for tmpl in db.Template.find()} for name, template_file in disk_templates.items(): with open(template_file, 'r') as f: body = f.read() disk_hash = get_hash(body) if name not in db_templates: template = Template() template.template_name = name template.template = body db.session.add(template) auditlog( event='template.import', actor='init', data={ 'template_name': name, 'template': body } ) logger.info('Imported template {}'.format(name)) else: template = db_templates[name] db_hash = get_hash(template.template) if db_hash != disk_hash: if force or not db_templates[name].is_modified: template.template = body db.session.add(template) auditlog( event='template.update', actor='init', data={ 'template_name': name, 'template_diff': diff(template.template, body) } ) logger.info('Updated template {}'.format(name)) else: logger.warning( 'Updated template available for {}. Will not import as it would' ' overwrite user edited content and force is not enabled'.format(name) )
python
{ "resource": "" }
q35863
initialize
train
def initialize(): """Initialize the application configuration, adding any missing default configuration or roles Returns: `None` """ global __initialized if __initialized: return # Setup all the default base settings try: for data in DEFAULT_CONFIG_OPTIONS: nsobj = _get_config_namespace(data['prefix'], data['name'], sort_order=data['sort_order']) for opt in data['options']: _register_default_option(nsobj, opt) db.session.add(nsobj) # Iterate over all of our plugins and setup their defaults for ns, info in CINQ_PLUGINS.items(): if info['name'] == 'commands': continue for entry_point in info['plugins']: _cls = entry_point.load() if hasattr(_cls, 'ns'): ns_name = '{}: {}'.format(info['name'].capitalize(), _cls.name) if not isinstance(_cls.options, abstractproperty): nsobj = _get_config_namespace(_cls.ns, ns_name) if _cls.options: for opt in _cls.options: _register_default_option(nsobj, opt) db.session.add(nsobj) # Create the default roles if they are missing and import any missing or updated templates, # if they havent been modified by the user _add_default_roles() _import_templates() db.session.commit() dbconfig.reload_data() __initialized = True except ProgrammingError as ex: if str(ex).find('1146') != -1: logging.getLogger('cloud_inquisitor').error( 'Missing required tables, please make sure you run `cloud-inquisitor db upgrade`' )
python
{ "resource": "" }
q35864
before_request
train
def before_request(): """Checks to ensure that the session is valid and validates the users CSRF token is present Returns: `None` """ if not request.path.startswith('/saml') and not request.path.startswith('/auth'): # Validate the session has the items we need if 'accounts' not in session: logger.debug('Missing \'accounts\' from session object, sending user to login page') return BaseView.make_unauth_response() # Require the CSRF token to be present if we are performing a change action (add, delete or modify objects) # but exclude the SAML endpoints from the CSRF check if request.method in ('POST', 'PUT', 'DELETE',): if session['csrf_token'] != request.headers.get('X-Csrf-Token'): logger.info('CSRF Token is missing or incorrect, sending user to login page') abort(403)
python
{ "resource": "" }
q35865
after_request
train
def after_request(response): """Modifies the response object prior to sending it to the client. Used to add CORS headers to the request Args: response (response): Flask response object Returns: `None` """ response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE') return response
python
{ "resource": "" }
q35866
CINQFlask.register_auth_system
train
def register_auth_system(self, auth_system): """Register a given authentication system with the framework. Returns `True` if the `auth_system` is registered as the active auth system, else `False` Args: auth_system (:obj:`BaseAuthPlugin`): A subclass of the `BaseAuthPlugin` class to register Returns: `bool` """ auth_system_settings = dbconfig.get('auth_system') if auth_system.name not in auth_system_settings['available']: auth_system_settings['available'].append(auth_system.name) dbconfig.set('default', 'auth_system', DBCChoice(auth_system_settings)) if auth_system.name == auth_system_settings['enabled'][0]: self.active_auth_system = auth_system auth_system().bootstrap() logger.debug('Registered {} as the active auth system'.format(auth_system.name)) return True else: logger.debug('Not trying to load the {} auth system as it is disabled by config'.format(auth_system.name)) return False
python
{ "resource": "" }
q35867
CINQFlask.register_menu_item
train
def register_menu_item(self, items): """Registers a views menu items into the metadata for the application. Skip if the item is already present Args: items (`list` of `MenuItem`): A list of `MenuItem`s Returns: `None` """ for itm in items: if itm.group in self.menu_items: # Only add the menu item if we don't already have it registered if itm not in self.menu_items[itm.group]['items']: self.menu_items[itm.group]['items'].append(itm) else: logger.warning('Tried registering menu item to unknown group {}'.format(itm.group))
python
{ "resource": "" }
q35868
CINQFlask.__register_types
train
def __register_types(self): """Iterates all entry points for resource types and registers a `resource_type_id` to class mapping Returns: `None` """ try: for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.types']['plugins']: cls = entry_point.load() self.types[ResourceType.get(cls.resource_type).resource_type_id] = cls logger.debug('Registered resource type {}'.format(cls.__name__)) except SQLAlchemyError as ex: logger.warning('Failed loading type information: {}'.format(ex))
python
{ "resource": "" }
q35869
CINQFlask.__register_notifiers
train
def __register_notifiers(self): """Lists all notifiers to be able to provide metadata for the frontend Returns: `list` of `dict` """ notifiers = {} for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.notifiers']['plugins']: cls = entry_point.load() notifiers[cls.notifier_type] = cls.validation return notifiers
python
{ "resource": "" }
q35870
CINQApi.register_views
train
def register_views(self, app): """Iterates all entry points for views and auth systems and dynamically load and register the routes with Flask Args: app (`CINQFlask`): CINQFlask object to register views for Returns: `None` """ self.add_resource(LoginRedirectView, '/auth/login') self.add_resource(LogoutRedirectView, '/auth/logout') for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auth']['plugins']: cls = entry_point.load() app.available_auth_systems[cls.name] = cls if app.register_auth_system(cls): for vcls in cls.views: self.add_resource(vcls, *vcls.URLS) logger.debug('Registered auth system view {} for paths: {}'.format( cls.__name__, ', '.join(vcls.URLS) )) if not app.active_auth_system: logger.error('No auth systems active, please enable an auth system and then start the system again') sys.exit(-1) for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.views']['plugins']: view = entry_point.load() self.add_resource(view, *view.URLS) app.register_menu_item(view.MENU_ITEMS) logger.debug('Registered view {} for paths: {}'.format(view.__name__, ', '.join(view.URLS)))
python
{ "resource": "" }
q35871
DNSCollector.get_axfr_records
train
def get_axfr_records(self, server, domains): """Return a `list` of `dict`s containing the zones and their records, obtained from the DNS server Returns: :obj:`list` of `dict` """ zones = [] for zoneName in domains: try: zone = { 'zone_id': get_resource_id('axfrz', zoneName), 'name': zoneName, 'source': 'AXFR', 'comment': None, 'tags': {}, 'records': [] } z = dns_zone.from_xfr(query.xfr(server, zoneName)) rdata_fields = ('name', 'ttl', 'rdata') for rr in [dict(zip(rdata_fields, x)) for x in z.iterate_rdatas()]: record_name = rr['name'].derelativize(z.origin).to_text() zone['records'].append( { 'id': get_resource_id('axfrr', record_name, ['{}={}'.format(k, str(v)) for k, v in rr.items()]), 'zone_id': zone['zone_id'], 'name': record_name, 'value': sorted([rr['rdata'].to_text()]), 'type': type_to_text(rr['rdata'].rdtype) }) if len(zone['records']) > 0: zones.append(zone) except Exception as ex: self.log.exception('Failed fetching DNS zone information for {}: {}'.format(zoneName, ex)) raise return zones
python
{ "resource": "" }
q35872
DNSCollector.get_cloudflare_records
train
def get_cloudflare_records(self, *, account): """Return a `list` of `dict`s containing the zones and their records, obtained from the CloudFlare API Returns: account (:obj:`CloudFlareAccount`): A CloudFlare Account object :obj:`list` of `dict` """ zones = [] for zobj in self.__cloudflare_list_zones(account=account): try: self.log.debug('Processing DNS zone CloudFlare/{}'.format(zobj['name'])) zone = { 'zone_id': get_resource_id('cfz', zobj['name']), 'name': zobj['name'], 'source': 'CloudFlare', 'comment': None, 'tags': {}, 'records': [] } for record in self.__cloudflare_list_zone_records(account=account, zoneID=zobj['id']): zone['records'].append({ 'id': get_resource_id('cfr', zobj['id'], ['{}={}'.format(k, v) for k, v in record.items()]), 'zone_id': zone['zone_id'], 'name': record['name'], 'value': record['value'], 'type': record['type'] }) if len(zone['records']) > 0: zones.append(zone) except CloudFlareError: self.log.exception('Failed getting records for CloudFlare zone {}'.format(zobj['name'])) return zones
python
{ "resource": "" }
q35873
DNSCollector.__cloudflare_request
train
def __cloudflare_request(self, *, account, path, args=None): """Helper function to interact with the CloudFlare API. Args: account (:obj:`CloudFlareAccount`): CloudFlare Account object path (`str`): URL endpoint to communicate with args (:obj:`dict` of `str`: `str`): A dictionary of arguments for the endpoint to consume Returns: `dict` """ if not args: args = {} if not self.cloudflare_initialized[account.account_id]: self.cloudflare_session[account.account_id] = requests.Session() self.cloudflare_session[account.account_id].headers.update({ 'X-Auth-Email': account.email, 'X-Auth-Key': account.api_key, 'Content-Type': 'application/json' }) self.cloudflare_initialized[account.account_id] = True if 'per_page' not in args: args['per_page'] = 100 response = self.cloudflare_session[account.account_id].get(account.endpoint + path, params=args) if response.status_code != 200: raise CloudFlareError('Request failed: {}'.format(response.text)) return response.json()
python
{ "resource": "" }
q35874
DNSCollector.__cloudflare_list_zones
train
def __cloudflare_list_zones(self, *, account, **kwargs): """Helper function to list all zones registered in the CloudFlare system. Returns a `list` of the zones Args: account (:obj:`CloudFlareAccount`): A CloudFlare Account object **kwargs (`dict`): Extra arguments to pass to the API endpoint Returns: `list` of `dict` """ done = False zones = [] page = 1 while not done: kwargs['page'] = page response = self.__cloudflare_request(account=account, path='/zones', args=kwargs) info = response['result_info'] if 'total_pages' not in info or page == info['total_pages']: done = True else: page += 1 zones += response['result'] return zones
python
{ "resource": "" }
q35875
DNSCollector.__cloudflare_list_zone_records
train
def __cloudflare_list_zone_records(self, *, account, zoneID, **kwargs): """Helper function to list all records on a CloudFlare DNS Zone. Returns a `dict` containing the records and their information. Args: account (:obj:`CloudFlareAccount`): A CloudFlare Account object zoneID (`int`): Internal CloudFlare ID of the DNS zone **kwargs (`dict`): Additional arguments to be consumed by the API endpoint Returns: :obj:`dict` of `str`: `dict` """ done = False records = {} page = 1 while not done: kwargs['page'] = page response = self.__cloudflare_request( account=account, path='/zones/{}/dns_records'.format(zoneID), args=kwargs ) info = response['result_info'] # Check if we have received all records, and if not iterate over the result set if 'total_pages' not in info or page >= info['total_pages']: done = True else: page += 1 for record in response['result']: if record['name'] in records: records[record['name']]['value'] = sorted(records[record['name']]['value'] + [record['content']]) else: records[record['name']] = { 'name': record['name'], 'value': sorted([record['content']]), 'type': record['type'] } return list(records.values())
python
{ "resource": "" }
q35876
CloudTrailAuditor.run
train
def run(self, *args, **kwargs): """Entry point for the scheduler Args: *args: Optional arguments **kwargs: Optional keyword arguments Returns: None """ accounts = list(AWSAccount.get_all(include_disabled=False).values()) # S3 Bucket config s3_acl = get_template('cloudtrail_s3_bucket_policy.json') s3_bucket_name = self.dbconfig.get('bucket_name', self.ns) s3_bucket_region = self.dbconfig.get('bucket_region', self.ns, 'us-west-2') s3_bucket_account = AWSAccount.get(self.dbconfig.get('bucket_account', self.ns)) CloudTrail.create_s3_bucket(s3_bucket_name, s3_bucket_region, s3_bucket_account, s3_acl) self.validate_sqs_policy(accounts) for account in accounts: ct = CloudTrail(account, s3_bucket_name, s3_bucket_region, self.log) ct.run()
python
{ "resource": "" }
q35877
CloudTrailAuditor.validate_sqs_policy
train
def validate_sqs_policy(self, accounts): """Given a list of accounts, ensures that the SQS policy allows all the accounts to write to the queue Args: accounts (`list` of :obj:`Account`): List of accounts Returns: `None` """ sqs_queue_name = self.dbconfig.get('sqs_queue_name', self.ns) sqs_queue_region = self.dbconfig.get('sqs_queue_region', self.ns) sqs_account = AWSAccount.get(self.dbconfig.get('sqs_queue_account', self.ns)) session = get_aws_session(sqs_account) sqs = session.client('sqs', region_name=sqs_queue_region) sqs_queue_url = sqs.get_queue_url(QueueName=sqs_queue_name, QueueOwnerAWSAccountId=sqs_account.account_number) sqs_attribs = sqs.get_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], AttributeNames=['Policy']) policy = json.loads(sqs_attribs['Attributes']['Policy']) for account in accounts: arn = 'arn:aws:sns:*:{}:{}'.format(account.account_number, sqs_queue_name) if arn not in policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn']: self.log.warning('SQS policy is missing condition for ARN {}'.format(arn)) policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn'].append(arn) sqs.set_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], Attributes={'Policy': json.dumps(policy)})
python
{ "resource": "" }
q35878
CloudTrail.run
train
def run(self): """Configures and enables a CloudTrail trail and logging on a single AWS Account. Has the capability to create both single region and multi-region trails. Will automatically create SNS topics, subscribe to SQS queues and turn on logging for the account in question, as well as reverting any manual changes to the trails if applicable. Returns: None """ for aws_region in AWS_REGIONS: self.log.debug('Checking trails for {}/{}'.format( self.account.account_name, aws_region )) ct = self.session.client('cloudtrail', region_name=aws_region) trails = ct.describe_trails() if len(trails['trailList']) == 0: if aws_region == self.global_ct_region: self.create_cloudtrail(aws_region) else: for trail in trails['trailList']: if trail['Name'] in ('Default', self.trail_name): if not trail['IsMultiRegionTrail']: if trail['Name'] == self.trail_name and self.global_ct_region == aws_region: ct.update_trail( Name=trail['Name'], IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True ) auditlog( event='cloudtrail.update_trail', actor=self.ns, data={ 'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'changes': [ { 'setting': 'IsMultiRegionTrail', 'oldValue': False, 'newValue': True } ] } ) else: ct.delete_trail(name=trail['Name']) auditlog( event='cloudtrail.delete_trail', actor=self.ns, data={ 'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'reason': 'Incorrect region, name or not multi-regional' } ) else: if trail['HomeRegion'] == aws_region: if self.global_ct_region != aws_region or trail['Name'] == 'Default': ct.delete_trail(Name=trail['Name']) auditlog( event='cloudtrail.delete_trail', actor=self.ns, data={ 'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'reason': 'Incorrect name or region for multi-region trail' } ) trails = ct.describe_trails() for trail in trails['trailList']: if trail['Name'] == self.trail_name and trail['HomeRegion'] == aws_region: self.validate_trail_settings(ct, aws_region, trail)
python
{ "resource": "" }
q35879
CloudTrail.validate_trail_settings
train
def validate_trail_settings(self, ct, aws_region, trail): """Validates logging, SNS and S3 settings for the global trail. Has the capability to: - start logging for the trail - create SNS topics & queues - configure or modify a S3 bucket for logging """ self.log.debug('Validating trail {}/{}/{}'.format( self.account.account_name, aws_region, trail['Name'] )) status = ct.get_trail_status(Name=trail['Name']) if not status['IsLogging']: self.log.warning('Logging is disabled for {}/{}/{}'.format( self.account.account_name, aws_region, trail['Name'] )) self.start_logging(aws_region, trail['Name']) if 'SnsTopicName' not in trail or not trail['SnsTopicName']: self.log.warning('SNS Notifications not enabled for {}/{}/{}'.format( self.account.account_name, aws_region, trail['Name'] )) self.create_sns_topic(aws_region) self.enable_sns_notification(aws_region, trail['Name']) if not self.validate_sns_topic_subscription(aws_region): self.log.warning( 'SNS Notification configured but not subscribed for {}/{}/{}'.format( self.account.account_name, aws_region, trail['Name'] ) ) self.subscribe_sns_topic_to_sqs(aws_region) if trail['S3BucketName'] != self.bucket_name: self.log.warning('CloudTrail is logging to an incorrect bucket for {}/{}/{}'.format( self.account.account_name, trail['S3BucketName'], trail['Name'] )) self.set_s3_bucket(aws_region, trail['Name'], self.bucket_name) if not trail.get('S3KeyPrefix') or trail['S3KeyPrefix'] != self.account.account_name: self.log.warning('Missing or incorrect S3KeyPrefix for {}/{}/{}'.format( self.account.account_name, aws_region, trail['Name'] )) self.set_s3_prefix(aws_region, trail['Name'])
python
{ "resource": "" }
q35880
CloudTrail.create_sns_topic
train
def create_sns_topic(self, region): """Creates an SNS topic if needed. Returns the ARN if the created SNS topic Args: region (str): Region name Returns: `str` """ sns = self.session.client('sns', region_name=region) self.log.info('Creating SNS topic for {}/{}'.format(self.account, region)) # Create the topic res = sns.create_topic(Name=self.topic_name) arn = res['TopicArn'] # Allow CloudTrail to publish messages with a policy update tmpl = get_template('cloudtrail_sns_policy.json') policy = tmpl.render(region=region, account_id=self.account.account_number, topic_name=self.topic_name) sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue=policy) auditlog( event='cloudtrail.create_sns_topic', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) return arn
python
{ "resource": "" }
q35881
CloudTrail.validate_sns_topic_subscription
train
def validate_sns_topic_subscription(self, region): """Validates SQS subscription to the SNS topic. Returns `True` if subscribed or `False` if not subscribed or topic is missing Args: region (str): Name of AWS Region Returns: `bool` """ sns = self.session.client('sns', region_name=region) arn = 'arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name) try: data = sns.list_subscriptions_by_topic(TopicArn=arn) except ClientError as ex: self.log.error('Failed to list subscriptions by topic in {} ({}): {}'.format( self.account.account_name, region, ex )) return False for sub in data['Subscriptions']: if sub['Endpoint'] == self.sqs_queue: if sub['SubscriptionArn'] == 'PendingConfirmation': self.log.warning('Subscription pending confirmation for {} in {}'.format( self.account.account_name, region )) return False return True return False
python
{ "resource": "" }
q35882
CloudTrail.subscribe_sns_topic_to_sqs
train
def subscribe_sns_topic_to_sqs(self, region): """Subscribe SQS to the SNS topic. Returns the ARN of the SNS Topic subscribed Args: region (`str`): Name of the AWS region Returns: `str` """ sns = self.session.resource('sns', region_name=region) topic = sns.Topic('arn:aws:sns:{}:{}:{}'.format(region, self.account.account_number, self.topic_name)) topic.subscribe(Protocol='sqs', Endpoint=self.sqs_queue) auditlog( event='cloudtrail.subscribe_sns_topic_to_sqs', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) return topic.attributes['TopicArn']
python
{ "resource": "" }
q35883
CloudTrail.create_cloudtrail
train
def create_cloudtrail(self, region): """Creates a new CloudTrail Trail Args: region (str): Name of the AWS region Returns: `None` """ ct = self.session.client('cloudtrail', region_name=region) # Creating the sns topic for the trail prior to creation self.create_sns_topic(region) ct.create_trail( Name=self.trail_name, S3BucketName=self.bucket_name, S3KeyPrefix=self.account.account_name, IsMultiRegionTrail=True, IncludeGlobalServiceEvents=True, SnsTopicName=self.topic_name ) self.subscribe_sns_topic_to_sqs(region) auditlog( event='cloudtrail.create_cloudtrail', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Created CloudTrail for {} in {} ({})'.format(self.account, region, self.bucket_name))
python
{ "resource": "" }
q35884
CloudTrail.enable_sns_notification
train
def enable_sns_notification(self, region, trailName): """Enable SNS notifications for a Trail Args: region (`str`): Name of the AWS region trailName (`str`): Name of the CloudTrail Trail Returns: `None` """ ct = self.session.client('cloudtrail', region_name=region) ct.update_trail(Name=trailName, SnsTopicName=self.topic_name) auditlog( event='cloudtrail.enable_sns_notification', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Enabled SNS notifications for trail {} in {}/{}'.format( trailName, self.account.account_name, region ))
python
{ "resource": "" }
q35885
CloudTrail.start_logging
train
def start_logging(self, region, name): """Turn on logging for a CloudTrail Trail Args: region (`str`): Name of the AWS region name (`str`): Name of the CloudTrail Trail Returns: `None` """ ct = self.session.client('cloudtrail', region_name=region) ct.start_logging(Name=name) auditlog( event='cloudtrail.start_logging', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Enabled logging for {} ({})'.format(name, region))
python
{ "resource": "" }
q35886
CloudTrail.set_s3_prefix
train
def set_s3_prefix(self, region, name): """Sets the S3 prefix for a CloudTrail Trail Args: region (`str`): Name of the AWS region name (`str`): Name of the CloudTrail Trail Returns: `None` """ ct = self.session.client('cloudtrail', region_name=region) ct.update_trail(Name=name, S3KeyPrefix=self.account.account_name) auditlog( event='cloudtrail.set_s3_prefix', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Updated S3KeyPrefix to {0} for {0}/{1}'.format( self.account.account_name, region ))
python
{ "resource": "" }
q35887
CloudTrail.set_s3_bucket
train
def set_s3_bucket(self, region, name, bucketName): """Sets the S3 bucket location for logfile delivery Args: region (`str`): Name of the AWS region name (`str`): Name of the CloudTrail Trail bucketName (`str`): Name of the S3 bucket to deliver log files to Returns: `None` """ ct = self.session.client('cloudtrail', region_name=region) ct.update_trail(Name=name, S3BucketName=bucketName) auditlog( event='cloudtrail.set_s3_bucket', actor=self.ns, data={ 'account': self.account.account_name, 'region': region } ) self.log.info('Updated S3BucketName to {} for {} in {}/{}'.format( bucketName, name, self.account.account_name, region ))
python
{ "resource": "" }
q35888
CloudTrail.create_s3_bucket
train
def create_s3_bucket(cls, bucket_name, bucket_region, bucket_account, template): """Creates the S3 bucket on the account specified as the destination account for log files Args: bucket_name (`str`): Name of the S3 bucket bucket_region (`str`): AWS Region for the bucket bucket_account (:obj:`Account`): Account to create the S3 bucket in template (:obj:`Template`): Jinja2 Template object for the bucket policy Returns: `None` """ s3 = get_aws_session(bucket_account).client('s3', region_name=bucket_region) # Check to see if the bucket already exists and if we have access to it try: s3.head_bucket(Bucket=bucket_name) except ClientError as ex: status_code = ex.response['ResponseMetadata']['HTTPStatusCode'] # Bucket exists and we do not have access if status_code == 403: raise Exception('Bucket {} already exists but we do not have access to it and so cannot continue'.format( bucket_name )) # Bucket does not exist, lets create one elif status_code == 404: try: s3.create_bucket( Bucket=bucket_name, CreateBucketConfiguration={ 'LocationConstraint': bucket_region } ) auditlog( event='cloudtrail.create_s3_bucket', actor=cls.ns, data={ 'account': bucket_account.account_name, 'bucket_region': bucket_region, 'bucket_name': bucket_name } ) except Exception: raise Exception('An error occured while trying to create the bucket, cannot continue') try: bucket_acl = template.render( bucket_name=bucket_name, account_id=bucket_account.account_number ) s3.put_bucket_policy(Bucket=bucket_name, Policy=bucket_acl) except Exception as ex: raise Warning('An error occurred while setting bucket policy: {}'.format(ex))
python
{ "resource": "" }
q35889
VPCFlowLogsAuditor.run
train
def run(self): """Main entry point for the auditor worker. Returns: `None` """ # Loop through all accounts that are marked as enabled accounts = list(AWSAccount.get_all(include_disabled=False).values()) for account in accounts: self.log.debug('Updating VPC Flow Logs for {}'.format(account)) self.session = get_aws_session(account) role_arn = self.confirm_iam_role(account) # region specific for aws_region in AWS_REGIONS: try: vpc_list = VPC.get_all(account, aws_region).values() need_vpc_flow_logs = [x for x in vpc_list if x.vpc_flow_logs_status != 'ACTIVE'] for vpc in need_vpc_flow_logs: if self.confirm_cw_log(account, aws_region, vpc.id): self.create_vpc_flow_logs(account, aws_region, vpc.id, role_arn) else: self.log.info('Failed to confirm log group for {}/{}'.format( account, aws_region )) except Exception: self.log.exception('Failed processing VPCs for {}/{}.'.format( account, aws_region )) db.session.commit()
python
{ "resource": "" }
q35890
VPCFlowLogsAuditor.confirm_iam_role
train
def confirm_iam_role(self, account): """Return the ARN of the IAM Role on the provided account as a string. Returns an `IAMRole` object from boto3 Args: account (:obj:`Account`): Account where to locate the role Returns: :obj:`IAMRole` """ try: iam = self.session.client('iam') rolearn = iam.get_role(RoleName=self.role_name)['Role']['Arn'] return rolearn except ClientError as e: if e.response['Error']['Code'] == 'NoSuchEntity': self.create_iam_role(account) else: raise except Exception as e: self.log.exception('Failed validating IAM role for VPC Flow Log Auditing for {}'.format(e))
python
{ "resource": "" }
q35891
VPCFlowLogsAuditor.create_iam_role
train
def create_iam_role(self, account): """Create a new IAM role. Returns the ARN of the newly created role Args: account (:obj:`Account`): Account where to create the IAM role Returns: `str` """ try: iam = self.session.client('iam') trust = get_template('vpc_flow_logs_iam_role_trust.json').render() policy = get_template('vpc_flow_logs_role_policy.json').render() newrole = iam.create_role( Path='/', RoleName=self.role_name, AssumeRolePolicyDocument=trust )['Role']['Arn'] # Attach an inline policy to the role to avoid conflicts or hitting the Managed Policy Limit iam.put_role_policy( RoleName=self.role_name, PolicyName='VpcFlowPolicy', PolicyDocument=policy ) self.log.debug('Created VPC Flow Logs role & policy for {}'.format(account.account_name)) auditlog( event='vpc_flow_logs.create_iam_role', actor=self.ns, data={ 'account': account.account_name, 'roleName': self.role_name, 'trustRelationship': trust, 'inlinePolicy': policy } ) return newrole except Exception: self.log.exception('Failed creating the VPC Flow Logs role for {}.'.format(account))
python
{ "resource": "" }
q35892
VPCFlowLogsAuditor.confirm_cw_log
train
def confirm_cw_log(self, account, region, vpcname): """Create a new CloudWatch log group based on the VPC Name if none exists. Returns `True` if succesful Args: account (:obj:`Account`): Account to create the log group in region (`str`): Region to create the log group in vpcname (`str`): Name of the VPC the log group is fow Returns: `bool` """ try: cw = self.session.client('logs', region) token = None log_groups = [] while True: result = cw.describe_log_groups() if not token else cw.describe_log_groups(nextToken=token) token = result.get('nextToken') log_groups.extend([x['logGroupName'] for x in result.get('logGroups', [])]) if not token: break if vpcname not in log_groups: cw.create_log_group(logGroupName=vpcname) cw_vpc = VPC.get(vpcname) cw_vpc.set_property('vpc_flow_logs_log_group', vpcname) self.log.info('Created log group {}/{}/{}'.format(account.account_name, region, vpcname)) auditlog( event='vpc_flow_logs.create_cw_log_group', actor=self.ns, data={ 'account': account.account_name, 'region': region, 'log_group_name': vpcname, 'vpc': vpcname } ) return True except Exception: self.log.exception('Failed creating log group for {}/{}/{}.'.format( account, region, vpcname ))
python
{ "resource": "" }
q35893
VPCFlowLogsAuditor.create_vpc_flow_logs
train
def create_vpc_flow_logs(self, account, region, vpc_id, iam_role_arn): """Create a new VPC Flow log Args: account (:obj:`Account`): Account to create the flow in region (`str`): Region to create the flow in vpc_id (`str`): ID of the VPC to create the flow for iam_role_arn (`str`): ARN of the IAM role used to post logs to the log group Returns: `None` """ try: flow = self.session.client('ec2', region) flow.create_flow_logs( ResourceIds=[vpc_id], ResourceType='VPC', TrafficType='ALL', LogGroupName=vpc_id, DeliverLogsPermissionArn=iam_role_arn ) fvpc = VPC.get(vpc_id) fvpc.set_property('vpc_flow_logs_status', 'ACTIVE') self.log.info('Enabled VPC Logging {}/{}/{}'.format(account, region, vpc_id)) auditlog( event='vpc_flow_logs.create_vpc_flow', actor=self.ns, data={ 'account': account.account_name, 'region': region, 'vpcId': vpc_id, 'arn': iam_role_arn } ) except Exception: self.log.exception('Failed creating VPC Flow Logs for {}/{}/{}.'.format( account, region, vpc_id ))
python
{ "resource": "" }
q35894
RequiredTagsAuditor.get_contacts
train
def get_contacts(self, issue): """Returns a list of contacts for an issue Args: issue (:obj:`RequiredTagsIssue`): Issue record Returns: `list` of `dict` """ # If the resources has been deleted, just return an empty list, to trigger issue deletion without notification if not issue.resource: return [] account_contacts = issue.resource.account.contacts try: resource_owners = issue.resource.get_owner_emails() # Double check get_owner_emails for it's return value if type(resource_owners) is list: for resource_owner in resource_owners: account_contacts.append({'type': 'email', 'value': resource_owner}) except AttributeError: pass return account_contacts
python
{ "resource": "" }
q35895
RequiredTagsAuditor.get_actions
train
def get_actions(self, issues): """Returns a list of actions to executed Args: issues (`list` of :obj:`RequiredTagsIssue`): List of issues Returns: `list` of `dict` """ actions = [] try: for issue in issues: action_item = self.determine_action(issue) if action_item['action'] != AuditActions.IGNORE: action_item['owners'] = self.get_contacts(issue) actions.append(action_item) finally: db.session.rollback() return actions
python
{ "resource": "" }
q35896
RequiredTagsAuditor.determine_alert
train
def determine_alert(self, action_schedule, issue_creation_time, last_alert): """Determine if we need to trigger an alert Args: action_schedule (`list`): A list contains the alert schedule issue_creation_time (`int`): Time we create the issue last_alert (`str`): Time we sent the last alert Returns: (`None` or `str`) None if no alert should be sent. Otherwise return the alert we should send """ issue_age = time.time() - issue_creation_time alert_schedule_lookup = {pytimeparse.parse(action_time): action_time for action_time in action_schedule} alert_schedule = sorted(alert_schedule_lookup.keys()) last_alert_time = pytimeparse.parse(last_alert) for alert_time in alert_schedule: if last_alert_time < alert_time <= issue_age and last_alert_time != alert_time: return alert_schedule_lookup[alert_time] else: return None
python
{ "resource": "" }
q35897
RequiredTagsAuditor.determine_action
train
def determine_action(self, issue): """Determine the action we should take for the issue Args: issue: Issue to determine action for Returns: `dict` """ resource_type = self.resource_types[issue.resource.resource_type_id] issue_alert_schedule = self.alert_schedule[resource_type] if \ resource_type in self.alert_schedule \ else self.alert_schedule['*'] action_item = { 'action': None, 'action_description': None, 'last_alert': issue.last_alert, 'issue': issue, 'resource': self.resource_classes[self.resource_types[issue.resource.resource_type_id]](issue.resource), 'owners': [], 'stop_after': issue_alert_schedule['stop'], 'remove_after': issue_alert_schedule['remove'], 'notes': issue.notes, 'missing_tags': issue.missing_tags } time_elapsed = time.time() - issue.created stop_schedule = pytimeparse.parse(issue_alert_schedule['stop']) remove_schedule = pytimeparse.parse(issue_alert_schedule['remove']) if self.collect_only: action_item['action'] = AuditActions.IGNORE elif remove_schedule and time_elapsed >= remove_schedule: action_item['action'] = AuditActions.REMOVE action_item['action_description'] = 'Resource removed' action_item['last_alert'] = remove_schedule if issue.update({'last_alert': remove_schedule}): db.session.add(issue.issue) elif stop_schedule and time_elapsed >= stop_schedule: action_item['action'] = AuditActions.STOP action_item['action_description'] = 'Resource stopped' action_item['last_alert'] = stop_schedule if issue.update({'last_alert': stop_schedule}): db.session.add(issue.issue) else: alert_selection = self.determine_alert( issue_alert_schedule['alert'], issue.get_property('created').value, issue.get_property('last_alert').value ) if alert_selection: action_item['action'] = AuditActions.ALERT action_item['action_description'] = '{} alert'.format(alert_selection) action_item['last_alert'] = alert_selection if issue.update({'last_alert': alert_selection}): db.session.add(issue.issue) else: action_item['action'] = AuditActions.IGNORE db.session.commit() return action_item
python
{ "resource": "" }
q35898
RequiredTagsAuditor.process_actions
train
def process_actions(self, actions): """Process the actions we want to take Args: actions (`list`): List of actions we want to take Returns: `list` of notifications """ notices = {} notification_contacts = {} for action in actions: resource = action['resource'] action_status = ActionStatus.SUCCEED try: if action['action'] == AuditActions.REMOVE: action_status = self.process_action( resource, AuditActions.REMOVE ) if action_status == ActionStatus.SUCCEED: db.session.delete(action['issue'].issue) elif action['action'] == AuditActions.STOP: action_status = self.process_action( resource, AuditActions.STOP ) if action_status == ActionStatus.SUCCEED: action['issue'].update({ 'missing_tags': action['missing_tags'], 'notes': action['notes'], 'last_alert': action['last_alert'], 'state': action['action'] }) elif action['action'] == AuditActions.FIXED: db.session.delete(action['issue'].issue) elif action['action'] == AuditActions.ALERT: action['issue'].update({ 'missing_tags': action['missing_tags'], 'notes': action['notes'], 'last_alert': action['last_alert'], 'state': action['action'] }) db.session.commit() if action_status == ActionStatus.SUCCEED: for owner in [ dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)} ]: if owner['value'] not in notification_contacts: contact = NotificationContact(type=owner['type'], value=owner['value']) notification_contacts[owner['value']] = contact notices[contact] = { 'fixed': [], 'not_fixed': [] } else: contact = notification_contacts[owner['value']] if action['action'] == AuditActions.FIXED: notices[contact]['fixed'].append(action) else: notices[contact]['not_fixed'].append(action) except Exception as ex: self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format( action['resource'].account.account_name, action['resource'].id, action['resource'], ex )) return notices
python
{ "resource": "" }
q35899
RequiredTagsAuditor.validate_tag
train
def validate_tag(self, key, value): """Check whether a tag value is valid Args: key: A tag key value: A tag value Returns: `(True or False)` A boolean indicating whether or not the value is valid """ if key == 'owner': return validate_email(value, self.partial_owner_match) elif key == self.gdpr_tag: return value in self.gdpr_tag_values else: return True
python
{ "resource": "" }