_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q31300
BaseAPI.clean_registration_ids
train
def clean_registration_ids(self, registration_ids=[]): """ Checks registration ids and excludes inactive ids Args: registration_ids (list, optional): list of ids to be cleaned Returns: list: cleaned registration ids """ valid_registration_ids = [] for registration_id in registration_ids: details = self.registration_info_request(registration_id) if details.status_code == 200: valid_registration_ids.append(registration_id) return valid_registration_ids
python
{ "resource": "" }
q31301
BaseAPI.get_registration_id_info
train
def get_registration_id_info(self, registration_id): """ Returns details related to a registration id if it exists otherwise return None Args: registration_id: id to be checked Returns: dict: info about registration id None: if id doesn't exist """ response = self.registration_info_request(registration_id) if response.status_code == 200: return response.json() return None
python
{ "resource": "" }
q31302
BaseAPI.subscribe_registration_ids_to_topic
train
def subscribe_registration_ids_to_topic(self, registration_ids, topic_name): """ Subscribes a list of registration ids to a topic Args: registration_ids (list): ids to be subscribed topic_name (str): name of topic Returns: True: if operation succeeded Raises: InvalidDataError: data sent to server was incorrectly formatted FCMError: an error occured on the server """ url = 'https://iid.googleapis.com/iid/v1:batchAdd' payload = { 'to': '/topics/' + topic_name, 'registration_tokens': registration_ids, } response = self.requests_session.post(url, json=payload) if response.status_code == 200: return True elif response.status_code == 400: error = response.json() raise InvalidDataError(error['error']) else: raise FCMError()
python
{ "resource": "" }
q31303
BaseAPI.parse_responses
train
def parse_responses(self): """ Parses the json response sent back by the server and tries to get out the important return variables Returns: dict: multicast_ids (list), success (int), failure (int), canonical_ids (int), results (list) and optional topic_message_id (str but None by default) Raises: FCMServerError: FCM is temporary not available AuthenticationError: error authenticating the sender account InvalidDataError: data passed to FCM was incorrecly structured """ response_dict = { 'multicast_ids': [], 'success': 0, 'failure': 0, 'canonical_ids': 0, 'results': [], 'topic_message_id': None } for response in self.send_request_responses: if response.status_code == 200: if 'content-length' in response.headers and int(response.headers['content-length']) <= 0: raise FCMServerError("FCM server connection error, the response is empty") else: parsed_response = response.json() multicast_id = parsed_response.get('multicast_id', None) success = parsed_response.get('success', 0) failure = parsed_response.get('failure', 0) canonical_ids = parsed_response.get('canonical_ids', 0) results = parsed_response.get('results', []) message_id = parsed_response.get('message_id', None) # for topic messages if message_id: success = 1 if multicast_id: response_dict['multicast_ids'].append(multicast_id) response_dict['success'] += success response_dict['failure'] += failure response_dict['canonical_ids'] += canonical_ids response_dict['results'].extend(results) response_dict['topic_message_id'] = message_id elif response.status_code == 401: raise AuthenticationError("There was an error authenticating the sender account") elif response.status_code == 400: raise InvalidDataError(response.text) else: raise FCMServerError("FCM server is temporarily unavailable") return response_dict
python
{ "resource": "" }
q31304
FCMNotification.notify_single_device
train
def notify_single_device(self, registration_id=None, message_body=None, message_title=None, message_icon=None, sound=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, click_action=None, badge=None, color=None, tag=None, body_loc_key=None, body_loc_args=None, title_loc_key=None, title_loc_args=None, content_available=None, android_channel_id=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): """ Send push notification to a single device Args: registration_id (list, optional): FCM device registration ID message_body (str, optional): Message string to display in the notification tray message_title (str, optional): Message title to display in the notification tray message_icon (str, optional): Icon that apperas next to the notification sound (str, optional): The sound file name to play. Specify "Default" for device default sound. condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs click_action (str, optional): Action associated with a user click on the notification badge (str, optional): Badge of notification color (str, optional): Color of the icon tag (str, optional): Group notification by tag body_loc_key (str, optional): Indicates the key to the body string for localization body_loc_args (list, optional): Indicates the string value to replace format specifiers in body string for localization title_loc_key (str, optional): Indicates the key to the title string for localization title_loc_args (list, optional): Indicates the string value to replace format specifiers in title string for localization content_available (bool, optional): Inactive client app is awoken android_channel_id (str, optional): Starting in Android 8.0 (API level 26), all notifications must be assigned to a channel. For each channel, you can set the visual and auditory behavior that is applied to all notifications in that channel. Then, users can change these settings and decide which notification channels from your app should be intrusive or visible at all. timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: Mostly from changes in the response of FCM, contact the project owner to resolve the issue """ if registration_id is None: raise InvalidDataError('Invalid registration ID') # [registration_id] cos we're sending to a single device payload = self.parse_payload( registration_ids=[registration_id], message_body=message_body, message_title=message_title, message_icon=message_icon, sound=sound, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, click_action=click_action, badge=badge, color=color, tag=tag, body_loc_key=body_loc_key, body_loc_args=body_loc_args, title_loc_key=title_loc_key, title_loc_args=title_loc_args, android_channel_id=android_channel_id, content_available=content_available, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs ) self.send_request([payload], timeout) return self.parse_responses()
python
{ "resource": "" }
q31305
FCMNotification.single_device_data_message
train
def single_device_data_message(self, registration_id=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, android_channel_id=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): """ Send push message to a single device Args: registration_id (list, optional): FCM device registration ID condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs content_available (bool, optional): Inactive client app is awoken android_channel_id (str, optional): Starting in Android 8.0 (API level 26), all notifications must be assigned to a channel. For each channel, you can set the visual and auditory behavior that is applied to all notifications in that channel. Then, users can change these settings and decide which notification channels from your app should be intrusive or visible at all. timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: Mostly from changes in the response of FCM, contact the project owner to resolve the issue """ if registration_id is None: raise InvalidDataError('Invalid registration ID') # [registration_id] cos we're sending to a single device payload = self.parse_payload( registration_ids=[registration_id], condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, remove_notification=True, android_channel_id=android_channel_id, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs ) self.send_request([payload], timeout) return self.parse_responses()
python
{ "resource": "" }
q31306
FCMNotification.notify_multiple_devices
train
def notify_multiple_devices(self, registration_ids=None, message_body=None, message_title=None, message_icon=None, sound=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, click_action=None, badge=None, color=None, tag=None, body_loc_key=None, body_loc_args=None, title_loc_key=None, title_loc_args=None, content_available=None, android_channel_id=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): """ Sends push notification to multiple devices, can send to over 1000 devices Args: registration_ids (list, optional): FCM device registration IDs message_body (str, optional): Message string to display in the notification tray message_title (str, optional): Message title to display in the notification tray message_icon (str, optional): Icon that apperas next to the notification sound (str, optional): The sound file name to play. Specify "Default" for device default sound. condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs click_action (str, optional): Action associated with a user click on the notification badge (str, optional): Badge of notification color (str, optional): Color of the icon tag (str, optional): Group notification by tag body_loc_key (str, optional): Indicates the key to the body string for localization body_loc_args (list, optional): Indicates the string value to replace format specifiers in body string for localization title_loc_key (str, optional): Indicates the key to the title string for localization title_loc_args (list, optional): Indicates the string value to replace format specifiers in title string for localization content_available (bool, optional): Inactive client app is awoken android_channel_id (str, optional): Starting in Android 8.0 (API level 26), all notifications must be assigned to a channel. For each channel, you can set the visual and auditory behavior that is applied to all notifications in that channel. Then, users can change these settings and decide which notification channels from your app should be intrusive or visible at all. timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it. """ if not isinstance(registration_ids, list): raise InvalidDataError('Invalid registration IDs (should be list)') payloads = [] registration_id_chunks = self.registration_id_chunks(registration_ids) for registration_ids in registration_id_chunks: # appends a payload with a chunk of registration ids here payloads.append(self.parse_payload( registration_ids=registration_ids, message_body=message_body, message_title=message_title, message_icon=message_icon, sound=sound, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, click_action=click_action, badge=badge, color=color, tag=tag, body_loc_key=body_loc_key, body_loc_args=body_loc_args, title_loc_key=title_loc_key, title_loc_args=title_loc_args, content_available=content_available, android_channel_id=android_channel_id, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs )) self.send_request(payloads, timeout) return self.parse_responses()
python
{ "resource": "" }
q31307
FCMNotification.multiple_devices_data_message
train
def multiple_devices_data_message(self, registration_ids=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): """ Sends push message to multiple devices, can send to over 1000 devices Args: registration_ids (list, optional): FCM device registration IDs condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs content_available (bool, optional): Inactive client app is awoken timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it. """ if not isinstance(registration_ids, list): raise InvalidDataError('Invalid registration IDs (should be list)') payloads = [] registration_id_chunks = self.registration_id_chunks(registration_ids) for registration_ids in registration_id_chunks: # appends a payload with a chunk of registration ids here payloads.append(self.parse_payload( registration_ids=registration_ids, condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, remove_notification=True, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs) ) self.send_request(payloads, timeout) return self.parse_responses()
python
{ "resource": "" }
q31308
get_keys
train
def get_keys(src, dst, keys): """ Copies the value of keys from source object to dest object :param src: :param dst: :param keys: :return: """ for key in keys: #dst[no_camel(key)] = src[key] if key in src else None dst[key] = src[key] if key in src else None
python
{ "resource": "" }
q31309
is_throttled
train
def is_throttled(e): """ Determines whether the exception is due to API throttling. :param e: Exception raised :return: True if it's a throttling exception else False """ return True if (hasattr(e, 'response') and e.response is not None and 'Error' in e.response and e.response['Error']['Code'] in ['Throttling', 'RequestLimitExceeded', 'ThrottlingException']) else \ False
python
{ "resource": "" }
q31310
EC2RegionConfig.parse_instance
train
def parse_instance(self, global_params, region, reservation): """ Parse a single EC2 instance :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param instance: Cluster """ for i in reservation['Instances']: instance = {} vpc_id = i['VpcId'] if 'VpcId' in i and i['VpcId'] else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) instance['reservation_id'] = reservation['ReservationId'] instance['id'] = i['InstanceId'] get_name(i, instance, 'InstanceId') get_keys(i, instance, ['KeyName', 'LaunchTime', 'InstanceType', 'State', 'IamInstanceProfile', 'SubnetId']) # Network interfaces & security groups manage_dictionary(instance, 'network_interfaces', {}) for eni in i['NetworkInterfaces']: nic = {} get_keys(eni, nic, ['Association', 'Groups', 'PrivateIpAddresses', 'SubnetId', 'Ipv6Addresses']) instance['network_interfaces'][eni['NetworkInterfaceId']] = nic self.vpcs[vpc_id].instances[i['InstanceId']] = instance
python
{ "resource": "" }
q31311
ElastiCacheRegionConfig.parse_cluster
train
def parse_cluster(self, global_params, region, cluster): """ Parse a single ElastiCache cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: ElastiCache cluster """ cluster_name = cluster.pop('CacheClusterId') cluster['name'] = cluster_name # Must fetch info about the subnet group to retrieve the VPC ID... if 'CacheSubnetGroupName' in cluster: subnet_group = api_clients[region].describe_cache_subnet_groups(CacheSubnetGroupName = cluster['CacheSubnetGroupName'])['CacheSubnetGroups'][0] vpc_id = subnet_group['VpcId'] else: vpc_id = ec2_classic subnet_group = None manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].clusters[cluster_name] = cluster if subnet_group: self.vpcs[vpc_id].subnet_groups[subnet_group['CacheSubnetGroupName']] = subnet_group
python
{ "resource": "" }
q31312
RedshiftRegionConfig.parse_cluster
train
def parse_cluster(self, global_params, region, cluster): """ Parse a single Redshift cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: Cluster """ vpc_id = cluster.pop('VpcId') if 'VpcId' in cluster else ec2_classic manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) name = cluster.pop('ClusterIdentifier') cluster['name'] = name self.vpcs[vpc_id].clusters[name] = cluster
python
{ "resource": "" }
q31313
RedshiftRegionConfig.parse_parameter_group
train
def parse_parameter_group(self, global_params, region, parameter_group): """ Parse a single Redshift parameter group and fetch all of its parameters :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param parameter_group: Parameter group """ pg_name = parameter_group.pop('ParameterGroupName') pg_id = self.get_non_aws_id(pg_name) # Name could be used as only letters digits or hyphens parameter_group['name'] = pg_name parameter_group['parameters'] = {} api_client = api_clients[region] parameters = handle_truncated_response(api_client.describe_cluster_parameters, {'ParameterGroupName': pg_name}, ['Parameters'])['Parameters'] for parameter in parameters: param = {} param['value'] = parameter['ParameterValue'] param['source'] = parameter['Source'] parameter_group['parameters'][parameter['ParameterName']] = param (self).parameter_groups[pg_id] = parameter_group
python
{ "resource": "" }
q31314
Route53DomainsConfig.parse_domains
train
def parse_domains(self, domain, params): """ Parse a single Route53Domains domain """ domain_id = self.get_non_aws_id(domain['DomainName']) domain['name'] = domain.pop('DomainName') #TODO: Get Dnssec info when available #api_client = params['api_client'] #details = api_client.get_domain_detail(DomainName = domain['name']) #get_keys(details, domain, ['Dnssec']) self.domains[domain_id] = domain
python
{ "resource": "" }
q31315
Route53Config.parse_hosted_zones
train
def parse_hosted_zones(self, hosted_zone, params): """ Parse a single Route53hosted_zoness hosted_zones """ # When resuming upon throttling error, skip if already fetched hosted_zone_id = hosted_zone.pop('Id') hosted_zone['name'] = hosted_zone.pop('Name') api_client = params['api_client'] record_sets = handle_truncated_response(api_client.list_resource_record_sets, {'HostedZoneId': hosted_zone_id}, ['ResourceRecordSets']) hosted_zone.update(record_sets) #print(str(record_sets)) #record_sets = api_client.list_resource_record_sets() #hosted_zone['RecordSets'] = record_sets['Resourc'] self.hosted_zones[hosted_zone_id] = hosted_zone
python
{ "resource": "" }
q31316
CloudFormationRegionConfig.parse_stack
train
def parse_stack(self, global_params, region, stack): """ Parse a single stack and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param stack_url: URL of the AWS stack """ stack['id'] = stack.pop('StackId') stack['name'] = stack.pop('StackName') stack_policy = api_clients[region].get_stack_policy(StackName = stack['name']) if 'StackPolicyBody' in stack_policy: stack['policy'] = json.loads(stack_policy['StackPolicyBody']) self.stacks[stack['name']] = stack
python
{ "resource": "" }
q31317
SNSRegionConfig.parse_subscription
train
def parse_subscription(self, params, region, subscription): """ Parse a single subscription and reference it in its corresponding topic :param params: Global parameters (defaults to {}) :param subscription: SNS Subscription """ topic_arn = subscription.pop('TopicArn') topic_name = topic_arn.split(':')[-1] if topic_name in self.topics: topic = self.topics[topic_name] manage_dictionary(topic['subscriptions'], 'protocol', {}) protocol = subscription.pop('Protocol') manage_dictionary(topic['subscriptions']['protocol'], protocol, []) topic['subscriptions']['protocol'][protocol].append(subscription) topic['subscriptions_count'] += 1
python
{ "resource": "" }
q31318
SNSRegionConfig.parse_topic
train
def parse_topic(self, params, region, topic): """ Parse a single topic and fetch additional attributes :param params: Global parameters (defaults to {}) :param topic: SNS Topic """ topic['arn'] = topic.pop('TopicArn') topic['name'] = topic['arn'].split(':')[-1] (prefix, partition, service, region, account, name) = topic['arn'].split(':') api_client = api_clients[region] attributes = api_client.get_topic_attributes(TopicArn=topic['arn'])['Attributes'] for k in ['Owner', 'DisplayName']: topic[k] = attributes[k] if k in attributes else None for k in ['Policy', 'DeliveryPolicy', 'EffectiveDeliveryPolicy']: topic[k] = json.loads(attributes[k]) if k in attributes else None topic['name'] = topic['arn'].split(':')[-1] manage_dictionary(topic, 'subscriptions', {}) manage_dictionary(topic, 'subscriptions_count', 0) self.topics[topic['name']] = topic
python
{ "resource": "" }
q31319
EMRRegionConfig.parse_cluster
train
def parse_cluster(self, global_params, region, cluster): """ Parse a single EMR cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: EMR cluster """ cluster_id = cluster['Id'] cluster = api_clients[region].describe_cluster(ClusterId = cluster_id)['Cluster'] cluster['id'] = cluster.pop('Id') cluster['name'] = cluster.pop('Name') vpc_id = 'TODO' # The EMR API won't disclose the VPC ID, so wait until all configs have been fetch and look up the VPC based on the subnet ID manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].clusters[cluster_id] = cluster
python
{ "resource": "" }
q31320
BaseConfig.fetch_all
train
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): """ Generic fetching function that iterates through all of the service's targets :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ global status, formatted_string # Initialize targets if not targets: targets = type(self).targets printInfo('Fetching %s config...' % format_service_name(self.service)) formatted_string = None api_service = self.service.lower() # Connect to the service if self.service in [ 's3' ]: # S3 namespace is global but APIs aren't.... api_clients = {} for region in build_region_list(self.service, regions, partition_name): api_clients[region] = connect_service('s3', credentials, region, silent = True) api_client = api_clients[list(api_clients.keys())[0]] elif self.service == 'route53domains': api_client = connect_service(self.service, credentials, 'us-east-1', silent = True) # TODO: use partition's default region else: api_client = connect_service(self.service, credentials, silent = True) # Threading to fetch & parse resources (queue consumer) params = {'api_client': api_client} if self.service in ['s3']: params['api_clients'] = api_clients q = self._init_threading(self.__fetch_target, params, self.thread_config['parse']) # Threading to list resources (queue feeder) params = {'api_client': api_client, 'q': q} if self.service in ['s3']: params['api_clients'] = api_clients qt = self._init_threading(self.__fetch_service, params, self.thread_config['list']) # Init display self.fetchstatuslogger = FetchStatusLogger(targets) # Go for target in targets: qt.put(target) # Join qt.join() q.join() # Show completion and force newline if self.service != 'iam': self.fetchstatuslogger.show(True)
python
{ "resource": "" }
q31321
SESRegionConfig.parse_identitie
train
def parse_identitie(self, global_params, region, identity_name): """ Parse a single identity and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region """ identity = {'name': identity_name, 'policies': {}} policy_names = api_clients[region].list_identity_policies(Identity = identity_name)['PolicyNames'] if len(policy_names): policies = api_clients[region].get_identity_policies(Identity = identity_name, PolicyNames = policy_names)['Policies'] for policy_name in policies: identity['policies'][policy_name] = json.loads(policies[policy_name]) dkim = api_clients[region].get_identity_dkim_attributes(Identities = [ identity_name ])['DkimAttributes'][identity_name] identity['DkimEnabled'] = dkim['DkimEnabled'] identity['DkimVerificationStatus'] = dkim['DkimVerificationStatus'] self.identities[self.get_non_aws_id(identity_name)] = identity
python
{ "resource": "" }
q31322
pass_conditions
train
def pass_conditions(all_info, current_path, conditions, unknown_as_pass_condition = False): """ Pass all conditions? :param all_info: :param current_path: :param conditions: :param unknown_as_pass_condition: Consider an undetermined condition as passed :return: """ result = False if len(conditions) == 0: return True condition_operator = conditions.pop(0) for condition in conditions: if condition[0] in condition_operators: res = pass_conditions(all_info, current_path, condition, unknown_as_pass_condition) else: # Conditions are formed as "path to value", "type of test", "value(s) for test" path_to_value, test_name, test_values = condition path_to_value = fix_path_string(all_info, current_path, path_to_value) target_obj = get_value_at(all_info, current_path, path_to_value) if type(test_values) != list: dynamic_value = re_get_value_at.match(test_values) if dynamic_value: test_values = get_value_at(all_info, current_path, dynamic_value.groups()[0], True) try: res = pass_condition(target_obj, test_name, test_values) except Exception as e: res = True if unknown_as_pass_condition else False printError('Unable to process testcase \'%s\' on value \'%s\', interpreted as %s.' % (test_name, str(target_obj), res)) printException(e, True) # Quick exit and + false if condition_operator == 'and' and not res: return False # Quick exit or + true if condition_operator == 'or' and res: return True # Still here ? # or -> false # and -> true if condition_operator == 'or': return False else: return True
python
{ "resource": "" }
q31323
DirectConnectRegionConfig.parse_connection
train
def parse_connection(self, global_params, region, connection): """ Parse a single connection and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param connection_url: URL of the AWS connection """ connection['id'] = connection.pop('connectionId') connection['name'] = connection.pop('connectionName') self.connections[connection['id']] = connection
python
{ "resource": "" }
q31324
format_listall_output
train
def format_listall_output(format_file, format_item_dir, format, rule, option_prefix = None, template = None, skip_options = False): """ Prepare listall output template :param format_file: :param format_item_dir: :param format: :param config: :param option_prefix: :param template: :param skip_options: :return: """ # Set the list of keys if printing from a file spec # _LINE_(whatever)_EOL_ # _ITEM_(resource)_METI_ # _KEY_(path_to_value) if format_file and os.path.isfile(format_file): if not template: with open(format_file, 'rt') as f: template = f.read() # Optional files if not skip_options: re_option = re.compile(r'(%_OPTION_\((.*?)\)_NOITPO_)') optional_files = re_option.findall(template) for optional_file in optional_files: if optional_file[1].startswith(option_prefix + '-'): with open(os.path.join(format_item_dir, optional_file[1].strip()), 'rt') as f: template = template.replace(optional_file[0].strip(), f.read()) # Include files if needed re_file = re.compile(r'(_FILE_\((.*?)\)_ELIF_)') while True: requested_files = re_file.findall(template) available_files = os.listdir(format_item_dir) if format_item_dir else [] for requested_file in requested_files: if requested_file[1].strip() in available_files: with open(os.path.join(format_item_dir, requested_file[1].strip()), 'rt') as f: template = template.replace(requested_file[0].strip(), f.read()) # Find items and keys to be printed re_line = re.compile(r'(_ITEM_\((.*?)\)_METI_)') re_key = re.compile(r'_KEY_\(*(.*?)\)', re.DOTALL|re.MULTILINE) # Remove the multiline ? lines = re_line.findall(template) for (i, line) in enumerate(lines): lines[i] = line + (re_key.findall(line[1]),) requested_files = re_file.findall(template) if len(requested_files) == 0: break elif format and format[0] == 'csv': keys = rule.keys line = ', '.join('_KEY_(%s)' % k for k in keys) lines = [ (line, line, keys) ] template = line return (lines, template)
python
{ "resource": "" }
q31325
generate_listall_output
train
def generate_listall_output(lines, resources, aws_config, template, arguments, nodup = False): """ Format and print the output of ListAll :param lines: :param resources: :param aws_config: :param template: :param arguments: :param nodup: :return: """ for line in lines: output = [] for resource in resources: current_path = resource.split('.') outline = line[1] for key in line[2]: outline = outline.replace('_KEY_('+key+')', get_value_at(aws_config['services'], current_path, key, True)) output.append(outline) output = '\n'.join(line for line in sorted(set(output))) template = template.replace(line[0], output) for (i, argument) in enumerate(arguments): template = template.replace('_ARG_%d_' % i, argument) return template
python
{ "resource": "" }
q31326
CloudTrailRegionConfig.parse_trail
train
def parse_trail(self, global_params, region, trail): """ Parse a single CloudTrail trail :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: Trail """ trail_config = {} trail_config['name'] = trail.pop('Name') trail_id = self.get_non_aws_id(trail_config['name']) trail_details = None api_client = api_clients[region] # Do not duplicate entries for multiregion trails if 'IsMultiRegionTrail' in trail and trail['IsMultiRegionTrail'] and trail['HomeRegion'] != region: for key in ['HomeRegion', 'TrailARN']: trail_config[key] = trail[key] trail_config['scout2_link'] = 'services.cloudtrail.regions.%s.trails.%s' % (trail['HomeRegion'], trail_id) else: for key in trail: trail_config[key] = trail[key] trail_config['bucket_id'] = self.get_non_aws_id(trail_config.pop('S3BucketName')) for key in ['IsMultiRegionTrail', 'LogFileValidationEnabled']: if key not in trail_config: trail_config[key] = False trail_details = api_client.get_trail_status(Name=trail['TrailARN']) for key in ['IsLogging', 'LatestDeliveryTime', 'LatestDeliveryError', 'StartLoggingTime', 'StopLoggingTime', 'LatestNotificationTime', 'LatestNotificationError', 'LatestCloudWatchLogsDeliveryError', 'LatestCloudWatchLogsDeliveryTime']: trail_config[key] = trail_details[key] if key in trail_details else None if trail_details: trail_config['wildcard_data_logging'] = self.data_logging_status(trail_config['name'], trail_details, api_client) self.trails[trail_id] = trail_config
python
{ "resource": "" }
q31327
IAMConfig.fetch_credential_report
train
def fetch_credential_report(self, credentials, ignore_exception = False): """ Fetch the credential report :param: api_client :type: FOO :param: ignore_exception : initiate credential report creation as not always ready :type: Boolean """ iam_report = {} try: api_client = connect_service('iam', credentials, silent = True) response = api_client.generate_credential_report() if response['State'] != 'COMPLETE': if not ignore_exception: printError('Failed to generate a credential report.') return report = api_client.get_credential_report()['Content'] lines = report.splitlines() keys = lines[0].decode('utf-8').split(',') for line in lines[1:]: values = line.decode('utf-8').split(',') manage_dictionary(iam_report, values[0], {}) for key, value in zip(keys, values): iam_report[values[0]][key] = value self.credential_report = iam_report self.fetchstatuslogger.counts['credential_report']['fetched'] = 1 except Exception as e: if ignore_exception: return printError('Failed to download a credential report.') printException(e)
python
{ "resource": "" }
q31328
IAMConfig.parse_groups
train
def parse_groups(self, group, params): """ Parse a single IAM group and fetch additional information """ # When resuming upon throttling error, skip if already fetched if group['GroupName'] in self.groups: return api_client = params['api_client'] # Ensure consistent attribute names across resource types group['id'] = group.pop('GroupId') group['name'] = group.pop('GroupName') group['arn'] = group.pop('Arn') # Get group's members group['users'] = self.__fetch_group_users(api_client, group['name']); # Get inline policies policies = self.__get_inline_policies(api_client, 'group', group['id'], group['name']) if len(policies): group['inline_policies'] = policies group['inline_policies_count'] = len(policies) self.groups[group['id']] = group
python
{ "resource": "" }
q31329
IAMConfig.parse_policies
train
def parse_policies(self, fetched_policy, params): """ Parse a single IAM policy and fetch additional information """ api_client = params['api_client'] policy = {} policy['name'] = fetched_policy.pop('PolicyName') policy['id'] = fetched_policy.pop('PolicyId') policy['arn'] = fetched_policy.pop('Arn') # Download version and document policy_version = api_client.get_policy_version(PolicyArn = policy['arn'], VersionId = fetched_policy['DefaultVersionId']) policy_version = policy_version['PolicyVersion'] policy['PolicyDocument'] = policy_version['Document'] # Get attached IAM entities policy['attached_to'] = {} attached_entities = handle_truncated_response(api_client.list_entities_for_policy, {'PolicyArn': policy['arn']}, ['PolicyGroups', 'PolicyRoles', 'PolicyUsers']) for entity_type in attached_entities: resource_type = entity_type.replace('Policy', '').lower() if len(attached_entities[entity_type]): policy['attached_to'][resource_type] = [] for entity in attached_entities[entity_type]: name_field = entity_type.replace('Policy', '')[:-1] + 'Name' resource_name = entity[name_field] policy['attached_to'][resource_type].append({'name': resource_name}) # Save policy self.policies[policy['id']] = policy
python
{ "resource": "" }
q31330
IAMConfig.fetch_password_policy
train
def fetch_password_policy(self, credentials): """ Fetch the password policy that applies to all IAM users within the AWS account """ self.fetchstatuslogger.counts['password_policy']['discovered'] = 0 self.fetchstatuslogger.counts['password_policy']['fetched'] = 0 try: api_client = connect_service('iam', credentials, silent = True) self.password_policy = api_client.get_account_password_policy()['PasswordPolicy'] if 'PasswordReusePrevention' not in self.password_policy: self.password_policy['PasswordReusePrevention'] = False else: self.password_policy['PreviousPasswordPrevented'] = self.password_policy['PasswordReusePrevention'] self.password_policy['PasswordReusePrevention'] = True # There is a bug in the API: ExpirePasswords always returns false if 'MaxPasswordAge' in self.password_policy: self.password_policy['ExpirePasswords'] = True self.fetchstatuslogger.counts['password_policy']['discovered'] = 1 self.fetchstatuslogger.counts['password_policy']['fetched'] = 1 except ClientError as e: if e.response['Error']['Code'] == 'NoSuchEntity': self.password_policy = {} self.password_policy['MinimumPasswordLength'] = '1' # As of 10/10/2016, 1-character passwords were authorized when no policy exists, even though the console displays 6 self.password_policy['RequireUppercaseCharacters'] = False self.password_policy['RequireLowercaseCharacters'] = False self.password_policy['RequireNumbers'] = False self.password_policy['RequireSymbols'] = False self.password_policy['PasswordReusePrevention'] = False self.password_policy['ExpirePasswords'] = False else: raise e except Exception as e: printError(str(e))
python
{ "resource": "" }
q31331
IAMConfig.parse_roles
train
def parse_roles(self, fetched_role, params): """ Parse a single IAM role and fetch additional data """ role = {} role['instances_count'] = 'N/A' # When resuming upon throttling error, skip if already fetched if fetched_role['RoleName'] in self.roles: return api_client = params['api_client'] # Ensure consistent attribute names across resource types role['id'] = fetched_role.pop('RoleId') role['name'] = fetched_role.pop('RoleName') role['arn'] = fetched_role.pop('Arn') # Get other attributes get_keys(fetched_role, role, [ 'CreateDate', 'Path']) # Get role policies policies = self.__get_inline_policies(api_client, 'role', role['id'], role['name']) if len(policies): role['inline_policies'] = policies role['inline_policies_count'] = len(policies) # Get instance profiles profiles = handle_truncated_response(api_client.list_instance_profiles_for_role, {'RoleName': role['name']}, ['InstanceProfiles']) manage_dictionary(role, 'instance_profiles', {}) for profile in profiles['InstanceProfiles']: manage_dictionary(role['instance_profiles'], profile['InstanceProfileId'], {}) role['instance_profiles'][profile['InstanceProfileId']]['arn'] = profile['Arn'] role['instance_profiles'][profile['InstanceProfileId']]['name'] = profile['InstanceProfileName'] # Get trust relationship role['assume_role_policy'] = {} role['assume_role_policy']['PolicyDocument'] = fetched_role.pop('AssumeRolePolicyDocument') # Save role self.roles[role['id']] = role
python
{ "resource": "" }
q31332
IAMConfig.parse_users
train
def parse_users(self, user, params): """ Parse a single IAM user and fetch additional data """ if user['UserName'] in self.users: return api_client = params['api_client'] # Ensure consistent attribute names across resource types user['id'] = user.pop('UserId') user['name'] = user.pop('UserName') user['arn'] = user.pop('Arn') policies = self.__get_inline_policies(api_client, 'user', user['id'], user['name']) if len(policies): user['inline_policies'] = policies user['inline_policies_count'] = len(policies) user['groups'] = [] groups = handle_truncated_response(api_client.list_groups_for_user, {'UserName': user['name']}, ['Groups'])['Groups'] for group in groups: user['groups'].append(group['GroupName']) try: user['LoginProfile'] = api_client.get_login_profile(UserName = user['name'])['LoginProfile'] except Exception as e: pass user['AccessKeys'] = api_client.list_access_keys(UserName = user['name'])['AccessKeyMetadata'] user['MFADevices'] = api_client.list_mfa_devices(UserName = user['name'])['MFADevices'] # TODO: Users signing certss self.users[user['id']] = user
python
{ "resource": "" }
q31333
get_subnet_flow_logs_list
train
def get_subnet_flow_logs_list(current_config, subnet): """ Return the flow logs that cover a given subnet :param current_config: :param subnet: the subnet that the flow logs should cover :return: """ flow_logs_list = [] for flow_log in current_config.flow_logs: if current_config.flow_logs[flow_log]['ResourceId'] == subnet['SubnetId'] or \ current_config.flow_logs[flow_log]['ResourceId'] == subnet['VpcId']: flow_logs_list.append(flow_log) return flow_logs_list
python
{ "resource": "" }
q31334
VPCRegionConfig.parse_subnet
train
def parse_subnet(self, global_params, region, subnet): """ Parse subnet object. :param global_params: :param region: :param subnet: :return: """ vpc_id = subnet['VpcId'] manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) subnet_id = subnet['SubnetId'] get_name(subnet, subnet, 'SubnetId') # set flow logs that cover this subnet subnet['flow_logs'] = get_subnet_flow_logs_list(self, subnet) # Save manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].subnets[subnet_id] = subnet
python
{ "resource": "" }
q31335
CloudWatchRegionConfig.parse_alarm
train
def parse_alarm(self, global_params, region, alarm): """ Parse a single CloudWatch trail :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param alarm: Alarm """ alarm['arn'] = alarm.pop('AlarmArn') alarm['name'] = alarm.pop('AlarmName') # Drop some data for k in ['AlarmConfigurationUpdatedTimestamp', 'StateReason', 'StateReasonData', 'StateUpdatedTimestamp']: foo = alarm.pop(k) if k in alarm else None alarm_id = self.get_non_aws_id(alarm['arn']) self.alarms[alarm_id] = alarm
python
{ "resource": "" }
q31336
get_attribute_at
train
def get_attribute_at(config, target_path, key, default_value=None): """ Return attribute value at a given path :param config: :param target_path: :param key: :param default_value: :return: """ for target in target_path: config = config[target] return config[key] if key in config else default_value
python
{ "resource": "" }
q31337
get_value_at
train
def get_value_at(all_info, current_path, key, to_string=False): """ Get value located at a given path :param all_info: :param current_path: :param key: :param to_string: :return: """ keys = key.split('.') if keys[-1] == 'id': target_obj = current_path[len(keys)-1] else: if key == 'this': target_path = current_path elif '.' in key: target_path = [] for i, key in enumerate(keys): if key == 'id': target_path.append(current_path[i]) else: target_path.append(key) if len(keys) > len(current_path): target_path = target_path + keys[len(target_path):] else: target_path = copy.deepcopy(current_path) target_path.append(key) target_obj = all_info for p in target_path: try: if type(target_obj) == list and type(target_obj[0]) == dict: target_obj = target_obj[int(p)] elif type(target_obj) == list: target_obj = p elif p == '': target_obj = target_obj else: try: target_obj = target_obj[p] except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception if to_string: return str(target_obj) else: return target_obj
python
{ "resource": "" }
q31338
RuleDefinition.load
train
def load(self): """ Load the definition of the rule, searching in the specified rule dirs first, then in the built-in definitions :return: None """ file_name_valid = False rule_type_valid = False # Look for a locally-defined rule for rule_dir in self.rule_dirs: file_path = os.path.join(rule_dir, self.file_name) if rule_dir else self.file_name if os.path.isfile(file_path): self.file_path = file_path file_name_valid = True break # Look for a built-in rule if not file_name_valid: for rule_type in self.rule_types: if self.file_name.startswith(rule_type): self.file_path = os.path.join(self.rules_data_path, self.file_name) rule_type_valid = True file_name_valid = True break if not rule_type_valid: for rule_type in self.rule_types: self.file_path = os.path.join(self.rules_data_path, rule_type, self.file_name) if os.path.isfile(self.file_path): file_name_valid = True break else: if os.path.isfile(self.file_path): file_name_valid = True if not file_name_valid: printError('Error: could not find %s' % self.file_name) else: try: with open(self.file_path, 'rt') as f: self.string_definition = f.read() self.load_from_string_definition() except Exception as e: printException(e) printError('Failed to load rule defined in %s' % file_path)
python
{ "resource": "" }
q31339
RegionalServiceConfig.init_region_config
train
def init_region_config(self, region): """ Initialize the region's configuration :param region: Name of the region """ self.regions[region] = self.region_config_class(region_name = region, resource_types = self.resource_types)
python
{ "resource": "" }
q31340
RegionalServiceConfig.fetch_all
train
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): """ Fetch all the configuration supported by Scout2 for a given service :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ # Initialize targets # Tweak params realtargets = () if not targets: targets = self.targets for i, target in enumerate(targets['first_region']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['first_region'] = realtargets realtargets = () for i, target in enumerate(targets['other_regions']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['other_regions'] = realtargets printInfo('Fetching %s config...' % format_service_name(self.service)) self.fetchstatuslogger = FetchStatusLogger(targets['first_region'], True) api_service = 'ec2' if self.service.lower() == 'vpc' else self.service.lower() # Init regions regions = build_region_list(api_service, regions, partition_name) # TODO: move this code within this class self.fetchstatuslogger.counts['regions']['discovered'] = len(regions) # Threading to fetch & parse resources (queue consumer) q = self._init_threading(self._fetch_target, {}, self.thread_config['parse']) # Threading to list resources (queue feeder) qr = self._init_threading(self._fetch_region, {'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': ()}, self.thread_config['list']) # Go for i, region in enumerate(regions): qr.put((region, targets['first_region'] if i == 0 else targets['other_regions'])) # Join qr.join() q.join() # Show completion and force newline self.fetchstatuslogger.show(True)
python
{ "resource": "" }
q31341
RegionalServiceConfig._init_threading
train
def _init_threading(self, function, params={}, num_threads=10): """ Initialize queue and threads :param function: :param params: :param num_threads: :return: """ q = Queue(maxsize=0) # TODO: find something appropriate for i in range(num_threads): worker = Thread(target=function, args=(q, params)) worker.setDaemon(True) worker.start() return q
python
{ "resource": "" }
q31342
RegionConfig.fetch_all
train
def fetch_all(self, api_client, fetchstatuslogger, q, targets): ''' Make all API calls as defined in metadata.json :param api_client: :param fetchstatuslogger: :param q: :param targets: :return: ''' self.fetchstatuslogger = fetchstatuslogger if targets != None: # Ensure targets is a tuple if type(targets) != list and type(targets) != tuple: targets = tuple(targets,) elif type(targets) != tuple: targets = tuple(targets) for target in targets: self._fetch_targets(api_client, q, target)
python
{ "resource": "" }
q31343
SQSRegionConfig.parse_queue
train
def parse_queue(self, global_params, region, queue_url): """ Parse a single queue and fetch additional attributes :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param queue_url: URL of the AWS queue """ queue = {'QueueUrl': queue_url} attributes = api_clients[region].get_queue_attributes(QueueUrl = queue_url, AttributeNames = ['CreatedTimestamp', 'Policy', 'QueueArn'])['Attributes'] queue['arn'] = attributes.pop('QueueArn') for k in ['CreatedTimestamp']: queue[k] = attributes[k] if k in attributes else None if 'Policy' in attributes: queue['Policy'] = json.loads(attributes['Policy']) else: queue['Policy'] = {'Statement': []} queue['name'] = queue['arn'].split(':')[-1] self.queues[queue['name']] = queue
python
{ "resource": "" }
q31344
get_s3_buckets
train
def get_s3_buckets(api_client, s3_info, s3_params): """ List all available buckets :param api_client: :param s3_info: :param s3_params: :return: """ manage_dictionary(s3_info, 'buckets', {}) buckets = api_client[get_s3_list_region(s3_params['selected_regions'])].list_buckets()['Buckets'] targets = [] for b in buckets: # Abort if bucket is not of interest if (b['Name'] in s3_params['skipped_buckets']) or (len(s3_params['checked_buckets']) and b['Name'] not in s3_params['checked_buckets']): continue targets.append(b) s3_info['buckets_count'] = len(targets) s3_params['api_clients'] = api_client s3_params['s3_info'] = s3_info thread_work(targets, get_s3_bucket, params = s3_params, num_threads = 30) show_status(s3_info) s3_info['buckets_count'] = len(s3_info['buckets']) return s3_info
python
{ "resource": "" }
q31345
S3Config.parse_buckets
train
def parse_buckets(self, bucket, params): """ Parse a single S3 bucket TODO: - CORS - Lifecycle - Notification ? - Get bucket's policy :param bucket: :param params: :return: """ bucket['name'] = bucket.pop('Name') api_client = params['api_clients'][get_s3_list_region(list(params['api_clients'].keys())[0])] bucket['CreationDate'] = str(bucket['CreationDate']) bucket['region'] = get_s3_bucket_location(api_client, bucket['name']) # h4ck :: fix issue #59, location constraint can be EU or eu-west-1 for Ireland... if bucket['region'] == 'EU': bucket['region'] = 'eu-west-1' # h4ck :: S3 is global but region-aware... if bucket['region'] not in params['api_clients']: printInfo('Skipping bucket %s (region %s outside of scope)' % (bucket['name'], bucket['region'])) self.buckets_count -= 1 return api_client = params['api_clients'][bucket['region']] get_s3_bucket_logging(api_client, bucket['name'], bucket) get_s3_bucket_versioning(api_client, bucket['name'], bucket) get_s3_bucket_webhosting(api_client, bucket['name'], bucket) get_s3_bucket_default_encryption(api_client, bucket['name'], bucket) bucket['grantees'] = get_s3_acls(api_client, bucket['name'], bucket) get_s3_bucket_policy(api_client, bucket['name'], bucket) get_s3_bucket_secure_transport(api_client, bucket['name'], bucket) # If requested, get key properties #if params['check_encryption'] or params['check_acls']: # get_s3_bucket_keys(api_client, bucket['name'], bucket, params['check_encryption'], # params['check_acls']) bucket['id'] = self.get_non_aws_id(bucket['name']) self.buckets[bucket['id']] = bucket
python
{ "resource": "" }
q31346
Ruleset.load
train
def load(self, rule_type, quiet = False): """ Open a JSON file definiting a ruleset and load it into a Ruleset object :param quiet: :return: """ if self.filename and os.path.exists(self.filename): try: with open(self.filename, 'rt') as f: ruleset = json.load(f) self.about = ruleset['about'] if 'about' in ruleset else '' self.rules = {} for filename in ruleset['rules']: self.rules[filename] = [] for rule in ruleset['rules'][filename]: self.handle_rule_versions(filename, rule_type, rule) except Exception as e: printException(e) printError('Error: ruleset file %s contains malformed JSON.' % self.filename) self.rules = [] self.about = '' else: self.rules = [] if not quiet: printError('Error: the file %s does not exist.' % self.filename)
python
{ "resource": "" }
q31347
Ruleset.handle_rule_versions
train
def handle_rule_versions(self, filename, rule_type, rule): """ For each version of a rule found in the ruleset, append a new Rule object """ if 'versions' in rule: versions = rule.pop('versions') for version_key_suffix in versions: version = versions[version_key_suffix] version['key_suffix'] = version_key_suffix tmp_rule = dict(rule, **version) self.rules[filename].append(Rule(filename, rule_type, tmp_rule)) else: self.rules[filename].append(Rule(filename, rule_type, rule))
python
{ "resource": "" }
q31348
Ruleset.prepare_rules
train
def prepare_rules(self, attributes = [], ip_ranges = [], params = {}): """ Update the ruleset's rules by duplicating fields as required by the HTML ruleset generator :return: """ for filename in self.rule_definitions: if filename in self.rules: for rule in self.rules[filename]: rule.set_definition(self.rule_definitions, attributes, ip_ranges, params) else: self.rules[filename] = [] new_rule = Rule(filename, self.rule_type, {'enabled': False, 'level': 'danger'}) new_rule.set_definition(self.rule_definitions, attributes, ip_ranges, params) self.rules[filename].append(new_rule)
python
{ "resource": "" }
q31349
Ruleset.load_rule_definitions
train
def load_rule_definitions(self, ruleset_generator = False, rule_dirs = []): """ Load definition of rules declared in the ruleset :param services: :param ip_ranges: :param aws_account_id: :param generator: :return: """ # Load rules from JSON files self.rule_definitions = {} for rule_filename in self.rules: for rule in self.rules[rule_filename]: if not rule.enabled and not ruleset_generator: continue self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename, rule_dirs = rule_dirs) # In case of the ruleset generator, list all available built-in rules if ruleset_generator: rule_dirs.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/findings')) rule_filenames = [] for rule_dir in rule_dirs: rule_filenames += [f for f in os.listdir(rule_dir) if os.path.isfile(os.path.join(rule_dir, f))] for rule_filename in rule_filenames: if rule_filename not in self.rule_definitions: self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename)
python
{ "resource": "" }
q31350
prompt_4_yes_no
train
def prompt_4_yes_no(question): """ Ask a question and prompt for yes or no :param question: Question to ask; answer is yes/no :return: :boolean """ while True: sys.stdout.write(question + ' (y/n)? ') try: choice = raw_input().lower() except: choice = input().lower() if choice == 'yes' or choice == 'y': return True elif choice == 'no' or choice == 'n': return False else: printError('\'%s\' is not a valid answer. Enter \'yes\'(y) or \'no\'(n).' % choice)
python
{ "resource": "" }
q31351
RDSRegionConfig.parse_instance
train
def parse_instance(self, global_params, region, dbi): """ Parse a single RDS instance :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param instance: Instance """ vpc_id = dbi['DBSubnetGroup']['VpcId'] if 'DBSubnetGroup' in dbi and 'VpcId' in dbi['DBSubnetGroup'] and dbi['DBSubnetGroup']['VpcId'] else ec2_classic instance = {} instance['name'] = dbi.pop('DBInstanceIdentifier') for key in ['InstanceCreateTime', 'Engine', 'DBInstanceStatus', 'AutoMinorVersionUpgrade', 'DBInstanceClass', 'MultiAZ', 'Endpoint', 'BackupRetentionPeriod', 'PubliclyAccessible', 'StorageEncrypted', 'VpcSecurityGroups', 'DBSecurityGroups', 'DBParameterGroups', 'EnhancedMonitoringResourceArn', 'StorageEncrypted']: # parameter_groups , security_groups, vpc_security_groups instance[key] = dbi[key] if key in dbi else None # If part of a cluster, multi AZ information is only available via cluster information if 'DBClusterIdentifier' in dbi: api_client = api_clients[region] cluster = api_client.describe_db_clusters(DBClusterIdentifier = dbi['DBClusterIdentifier'])['DBClusters'][0] instance['MultiAZ'] = cluster['MultiAZ'] # Save manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].instances[instance['name']] = instance
python
{ "resource": "" }
q31352
preprocessing
train
def preprocessing(aws_config, ip_ranges = [], ip_ranges_name_key = None): """ Tweak the AWS config to match cross-service resources and clean any fetching artifacts :param aws_config: :return: """ map_all_sgs(aws_config) map_all_subnets(aws_config) set_emr_vpc_ids(aws_config) #parse_elb_policies(aws_config) # Various data processing calls add_security_group_name_to_ec2_grants(aws_config['services']['ec2'], aws_config['aws_account_id']) process_cloudtrail_trails(aws_config['services']['cloudtrail']) add_cidr_display_name(aws_config, ip_ranges, ip_ranges_name_key) merge_route53_and_route53domains(aws_config) match_instances_and_roles(aws_config) match_iam_policies_and_buckets(aws_config) # Preprocessing dictated by metadata process_metadata_callbacks(aws_config)
python
{ "resource": "" }
q31353
process_vpc_peering_connections_callback
train
def process_vpc_peering_connections_callback(aws_config, current_config, path, current_path, pc_id, callback_args): """ Create a list of peering connection IDs in each VPC :param aws_config: :param current_config: :param path: :param current_path: :param pc_id: :param callback_args: :return: """ info = 'AccepterVpcInfo' if current_config['AccepterVpcInfo']['OwnerId'] == aws_config['aws_account_id'] else 'RequesterVpcInfo' region = current_path[current_path.index('regions')+1] vpc_id = current_config[info]['VpcId'] target = aws_config['services']['vpc']['regions'][region]['vpcs'][vpc_id] manage_dictionary(target, 'peering_connections', []) if pc_id not in target['peering_connections']: target['peering_connections'].append(pc_id) # VPC information for the peer'd VPC current_config['peer_info'] = copy.deepcopy(current_config['AccepterVpcInfo' if info == 'RequesterVpcInfo' else 'RequesterVpcInfo']) if 'PeeringOptions' in current_config['peer_info']: current_config['peer_info'].pop('PeeringOptions') if 'organization' in aws_config and current_config['peer_info']['OwnerId'] in aws_config['organization']: current_config['peer_info']['name'] = aws_config['organization'][current_config['peer_info']['OwnerId']]['Name'] else: current_config['peer_info']['name'] = current_config['peer_info']['OwnerId']
python
{ "resource": "" }
q31354
go_to_and_do
train
def go_to_and_do(aws_config, current_config, path, current_path, callback, callback_args = None): """ Recursively go to a target and execute a callback :param aws_config: :param current_config: :param path: :param current_path: :param callback: :param callback_args: :return: """ try: key = path.pop(0) if not current_config: current_config = aws_config if not current_path: current_path = [] keys = key.split('.') if len(keys) > 1: while True: key = keys.pop(0) if not len(keys): break current_path.append(key) current_config = current_config[key] if key in current_config: current_path.append(key) for (i, value) in enumerate(list(current_config[key])): if len(path) == 0: if type(current_config[key] == dict) and type(value) != dict and type(value) != list: callback(aws_config, current_config[key][value], path, current_path, value, callback_args) else: callback(aws_config, current_config, path, current_path, value, callback_args) else: tmp = copy.deepcopy(current_path) try: tmp.append(value) go_to_and_do(aws_config, current_config[key][value], copy.deepcopy(path), tmp, callback, callback_args) except: tmp.pop() tmp.append(i) go_to_and_do(aws_config, current_config[key][i], copy.deepcopy(path), tmp, callback, callback_args) except Exception as e: printException(e) if i: printInfo('Index: %s' % str(i)) printInfo('Path: %s' % str(current_path)) printInfo('Key = %s' % str(key)) printInfo('Value = %s' % str(value)) printInfo('Path = %s' % path)
python
{ "resource": "" }
q31355
IsErrorSuppressedByNolint
train
def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment. """ return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
python
{ "resource": "" }
q31356
Error
train
def Error(filename, linenum, category, confidence, message): """Logs the fact we've found a lint error. We log where the error was found, and also our confidence in the error, that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. False positives can be suppressed by the use of "cpplint(category)" comments on the offending line. These are parsed into _error_suppressions. Args: filename: The name of the file containing the error. linenum: The number of the line containing the error. category: A string used to describe the "category" this bug falls under: "whitespace", say, or "runtime". Categories may have a hierarchy separated by slashes: "whitespace/indent". confidence: A number from 1-5 representing a confidence score for the error, with 5 meaning that we are certain of the problem, and 1 meaning that it could be a legitimate construct. message: The error message. """ if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) else: sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence))
python
{ "resource": "" }
q31357
CheckHeaderFileIncluded
train
def CheckHeaderFileIncluded(filename, include_state, error): """Logs an error if a .cc file does not include its header.""" # Do not check test files if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'): return fileinfo = FileInfo(filename) headerfile = filename[0:len(filename) - 2] + 'h' if not os.path.exists(headerfile): return headername = FileInfo(headerfile).RepositoryName() first_include = 0 for section_list in include_state.include_list: for f in section_list: if headername in f[0] or f[0] in headername: return if not first_include: first_include = f[1] error(filename, first_include, 'build/include', 5, '%s should include its header file %s' % (fileinfo.RepositoryName(), headername))
python
{ "resource": "" }
q31358
CheckForBadCharacters
train
def CheckForBadCharacters(filename, lines, error): """Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ for linenum, line in enumerate(lines): if u'\ufffd' in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
python
{ "resource": "" }
q31359
CheckOperatorSpacing
train
def CheckOperatorSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing around operators. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Don't try to do spacing checks for operator methods. Do this by # replacing the troublesome characters with something else, # preserving column position for all other characters. # # The replacement is done repeatedly to avoid false positives from # operators that call operators. while True: match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) if match: line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) else: break # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". # Otherwise not. Note we only check for non-spaces on *both* sides; # sometimes people put non-spaces on one side when aligning ='s among # many lines (not that this is behavior that I approve of...) if ((Search(r'[\w.]=', line) or Search(r'=[\w.]', line)) and not Search(r'\b(if|while|for) ', line) # Operators taken from [lex.operators] in C++11 standard. and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) and not Search(r'operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') # It's ok not to have spaces around binary operators like + - * /, but if # there's too little whitespace, we get concerned. It's hard to tell, # though, so we punt on this one for now. TODO. # You should always have whitespace around binary operators. # # Check <= and >= first to avoid false positives with < and >, then # check non-include lines for spacing around < and >. # # If the operator is followed by a comma, assume it's be used in a # macro context and don't do any checks. This avoids false # positives. # # Note that && is not included here. Those are checked separately # in CheckRValueReference match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match(r'#.*include', line): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though # technically should should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Match(r'^(.*[^\s<])<[^\s=<,]', line) if match: (_, _, end_pos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') # Look for > that is not surrounded by spaces. Similar to the # above, we only trigger if both sides are missing spaces to avoid # false positives with shifts. match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression( clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') # We allow no-spaces around << when used like this: 10<<20, but # not otherwise (particularly, not when used as streams) # # We also allow operators following an opening parenthesis, since # those tend to be macros that deal with operators. match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line) if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') # We allow no-spaces around >> for almost anything. This is because # C++11 allows ">>" to close nested templates, which accounts for # most cases when ">>" is not followed by a space. # # We still warn on ">>" followed by alpha character, because that is # likely due to ">>" being used for right shifts, e.g.: # value >> alpha # # When ">>" is used to close templates, the alphanumeric letter that # follows would be part of an identifier, and there should still be # a space separating the template type and the identifier. # type<type<type>> alpha match = Search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') # There shouldn't be space around unary operators match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1))
python
{ "resource": "" }
q31360
IsDeletedOrDefault
train
def IsDeletedOrDefault(clean_lines, linenum): """Check if current constructor or operator is deleted or default. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if this is a deleted or default constructor. """ open_paren = clean_lines.elided[linenum].find('(') if open_paren < 0: return False (close_line, _, close_paren) = CloseExpression( clean_lines, linenum, open_paren) if close_paren < 0: return False return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
python
{ "resource": "" }
q31361
IsRValueAllowed
train
def IsRValueAllowed(clean_lines, linenum, typenames): """Check if RValue reference is allowed on a particular line. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. typenames: set of type names from template-argument-list. Returns: True if line is within the region where RValue references are allowed. """ # Allow region marked by PUSH/POP macros for i in xrange(linenum, 0, -1): line = clean_lines.elided[i] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): if not line.endswith('PUSH'): return False for j in xrange(linenum, clean_lines.NumLines(), 1): line = clean_lines.elided[j] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): return line.endswith('POP') # Allow operator= line = clean_lines.elided[linenum] if Search(r'\boperator\s*=\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) # Allow constructors match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line) if match and match.group(1) == match.group(2): return IsDeletedOrDefault(clean_lines, linenum) if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) if Match(r'\s*[\w<>]+\s*\(', line): previous_line = 'ReturnType' if linenum > 0: previous_line = clean_lines.elided[linenum - 1] if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line): return IsDeletedOrDefault(clean_lines, linenum) # Reject types not mentioned in template-argument-list while line: match = Match(r'^.*?(\w+)\s*&&(.*)$', line) if not match: break if match.group(1) not in typenames: return False line = match.group(2) # All RValue types that were in template-argument-list should have # been removed by now. Those were allowed, assuming that they will # be forwarded. # # If there are no remaining RValue types left (i.e. types that were # not found in template-argument-list), flag those as not allowed. return line.find('&&') < 0
python
{ "resource": "" }
q31362
GetTemplateArgs
train
def GetTemplateArgs(clean_lines, linenum): """Find list of template arguments associated with this function declaration. Args: clean_lines: A CleansedLines instance containing the file. linenum: Line number containing the start of the function declaration, usually one line after the end of the template-argument-list. Returns: Set of type names, or empty set if this does not appear to have any template parameters. """ # Find start of function func_line = linenum while func_line > 0: line = clean_lines.elided[func_line] if Match(r'^\s*$', line): return set() if line.find('(') >= 0: break func_line -= 1 if func_line == 0: return set() # Collapse template-argument-list into a single string argument_list = '' match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line]) if match: # template-argument-list on the same line as function name start_col = len(match.group(1)) _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col) if end_col > -1 and end_line == func_line: start_col += 1 # Skip the opening bracket argument_list = clean_lines.elided[func_line][start_col:end_col] elif func_line > 1: # template-argument-list one line before function name match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1]) if match: end_col = len(match.group(1)) _, start_line, start_col = ReverseCloseExpression( clean_lines, func_line - 1, end_col) if start_col > -1: start_col += 1 # Skip the opening bracket while start_line < func_line - 1: argument_list += clean_lines.elided[start_line][start_col:] start_col = 0 start_line += 1 argument_list += clean_lines.elided[func_line - 1][start_col:end_col] if not argument_list: return set() # Extract type names typenames = set() while True: match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$', argument_list) if not match: break typenames.add(match.group(1)) argument_list = match.group(2) return typenames
python
{ "resource": "" }
q31363
CheckRValueReference
train
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): """Check for rvalue references. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Find lines missing spaces around &&. # TODO(unknown): currently we don't check for rvalue references # with spaces surrounding the && to avoid false positives with # boolean expressions. line = clean_lines.elided[linenum] match = Match(r'^(.*\S)&&', line) if not match: match = Match(r'(.*)&&\S', line) if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)): return # Either poorly formed && or an rvalue reference, check the context # to get a more accurate error message. Mostly we want to determine # if what's to the left of "&&" is a type or not. typenames = GetTemplateArgs(clean_lines, linenum) and_pos = len(match.group(1)) if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos): if not IsRValueAllowed(clean_lines, linenum, typenames): error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.') else: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&')
python
{ "resource": "" }
q31364
_DropCommonSuffixes
train
def _DropCommonSuffixes(filename): """Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed. """ for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0]
python
{ "resource": "" }
q31365
CheckGlobalStatic
train
def CheckGlobalStatic(filename, clean_lines, linenum, error): """Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access. match = Match( r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: # - String pointers (as opposed to values). # string *pointer # const string *pointer # string const *pointer # string *const pointer # # - Functions and template specializations. # string Function<Type>(... # string Class<Type>::Method(... # # - Operators. These are matched separately because operator names # cross non-word boundaries, and trying to match both operators # and functions at the same time would decrease accuracy of # matching identifiers. # string Class::operator*() if (match and not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (match.group(1), match.group(2))) if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.')
python
{ "resource": "" }
q31366
CheckCStyleCast
train
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): """Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise. """ line = clean_lines.elided[linenum] match = Search(pattern, line) if not match: return False # Exclude lines with keywords that tend to look like casts context = line[0:match.start(1) - 1] if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): return False # Try expanding current context to see if we one level of # parentheses inside a macro. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): return False # operator++(int) and operator--(int) if context.endswith(' operator++') or context.endswith(' operator--'): return False # A single unnamed argument for a function tends to look like old # style cast. If we see those, don't issue warnings for deprecated # casts, instead issue warnings for unnamed arguments where # appropriate. # # These are things that we want warnings for, since the style guide # explicitly require all parameters to be named: # Function(int); # Function(int) { # ConstMember(int) const; # ConstMember(int) const { # ExceptionMember(int) throw (...); # ExceptionMember(int) throw (...) { # PureVirtual(int) = 0; # [](int) -> bool { # # These are functions of some sort, where the compiler would be fine # if they had named parameters, but people often omit those # identifiers to reduce clutter: # (FunctionPointer)(int); # (FunctionPointer)(int) = value; # Function((function_pointer_arg)(int)) # Function((function_pointer_arg)(int), int param) # <TemplateArgument(int)>; # <(FunctionPointerTemplateArgument)(int)>; remainder = line[match.end(0):] if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', remainder): # Looks like an unnamed parameter. # Don't warn on any kind of template arguments. if Match(r'^\s*>', remainder): return False # Don't warn on assignments to function pointers, but keep warnings for # unnamed parameters to pure virtual functions. Note that this pattern # will also pass on assignments of "0" to function pointers, but the # preferred values for those would be "nullptr" or "NULL". matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) if matched_zero and matched_zero.group(1) != '0': return False # Don't warn on function pointer declarations. For this we need # to check what came before the "(type)" string. if Match(r'.*\)\s*$', line[0:match.start(0)]): return False # Don't warn if the parameter is named with block comments, e.g.: # Function(int /*unused_param*/); raw_line = clean_lines.raw_lines[linenum] if '/*' in raw_line: return False # Passed all filters, issue warning here. error(filename, linenum, 'readability/function', 3, 'All parameters should be named in a function') return True # At this point, all that should be left is actual casts. error(filename, linenum, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))) return True
python
{ "resource": "" }
q31367
FilesBelongToSameModule
train
def FilesBelongToSameModule(filename_cc, filename_h): """Check if these two filenames belong to the same module. The concept of a 'module' here is a as follows: foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the same 'module' if they are in the same directory. some/path/public/xyzzy and some/path/internal/xyzzy are also considered to belong to the same module here. If the filename_cc contains a longer path than the filename_h, for example, '/absolute/path/to/base/sysinfo.cc', and this file would include 'base/sysinfo.h', this function also produces the prefix needed to open the header. This is used by the caller of this function to more robustly open the header file. We don't have access to the real include paths in this context, so we need this guesswork here. Known bugs: tools/base/bar.cc and base/bar.h belong to the same module according to this implementation. Because of this, this function gives some false positives. This should be sufficiently rare in practice. Args: filename_cc: is the path for the .cc file filename_h: is the path for the header path Returns: Tuple with a bool and a string: bool: True if filename_cc and filename_h belong to the same module. string: the additional prefix needed to open the header file. """ if not filename_cc.endswith('.cc'): return (False, '') filename_cc = filename_cc[:-len('.cc')] if filename_cc.endswith('_unittest'): filename_cc = filename_cc[:-len('_unittest')] elif filename_cc.endswith('_test'): filename_cc = filename_cc[:-len('_test')] filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') if not filename_h.endswith('.h'): return (False, '') filename_h = filename_h[:-len('.h')] if filename_h.endswith('-inl'): filename_h = filename_h[:-len('-inl')] filename_h = filename_h.replace('/public/', '/') filename_h = filename_h.replace('/internal/', '/') files_belong_to_same_module = filename_cc.endswith(filename_h) common_path = '' if files_belong_to_same_module: common_path = filename_cc[:-len(filename_h)] return files_belong_to_same_module, common_path
python
{ "resource": "" }
q31368
CheckForIncludeWhatYouUse
train
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): """Reports for missing stl includes. This function will output warnings to make sure you are including the headers necessary for the stl containers and functions that you use. We only give one reason to include a header. For example, if you use both equal_to<> and less<> in a .h file, only one (the latter in the file) of these will be reported as a reason to include the <functional>. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. include_state: An _IncludeState instance. error: The function to call with any errors found. io: The IO factory to use to read the header file. Provided for unittest injection. """ required = {} # A map of header name to linenumber and the template entity. # Example of required: { '<functional>': (1219, 'less<>') } for linenum in xrange(clean_lines.NumLines()): line = clean_lines.elided[linenum] if not line or line[0] == '#': continue # String is special -- it is a non-templatized type in STL. matched = _RE_PATTERN_STRING.search(line) if matched: # Don't warn about strings in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required['<string>'] = (linenum, 'string') for pattern, template, header in _re_pattern_algorithm_header: if pattern.search(line): required[header] = (linenum, template) # The following function is just a speed up, no semantics are changed. if not '<' in line: # Reduces the cpu time usage by skipping lines. continue for pattern, template, header in _re_pattern_templates: if pattern.search(line): required[header] = (linenum, template) # The policy is that if you #include something in foo.h you don't need to # include it again in foo.cc. Here, we will look at possible includes. # Let's flatten the include_state include_list and copy it into a dictionary. include_dict = dict([item for sublist in include_state.include_list for item in sublist]) # Did we find the header for this file (if any) and successfully load it? header_found = False # Use the absolute path so that matching works properly. abs_filename = FileInfo(filename).FullName() # For Emacs's flymake. # If cpplint is invoked from Emacs's flymake, a temporary file is generated # by flymake and that file name might end with '_flymake.cc'. In that case, # restore original file name here so that the corresponding header file can be # found. # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' # instead of 'foo_flymake.h' abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) # include_dict is modified during iteration, so we iterate over a copy of # the keys. header_keys = include_dict.keys() for header in header_keys: (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) fullpath = common_path + header if same_module and UpdateIncludeState(fullpath, include_dict, io): header_found = True # If we can't find the header file for a .cc, assume it's because we don't # know where to look. In that case we'll give up as we're not sure they # didn't include it in the .h file. # TODO(unknown): Do a better job of finding .h files so we are confident that # not having the .h file means there isn't one. if filename.endswith('.cc') and not header_found: return # All the lines have been processed, report the errors found. for required_header_unstripped in required: template = required[required_header_unstripped][1] if required_header_unstripped.strip('<>"') not in include_dict: error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, 'Add #include ' + required_header_unstripped + ' for ' + template)
python
{ "resource": "" }
q31369
CheckDefaultLambdaCaptures
train
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error): """Check that default lambda captures are not used. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # A lambda introducer specifies a default capture if it starts with "[=" # or if it starts with "[&" _not_ followed by an identifier. match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line) if match: # Found a potential error, check what comes after the lambda-introducer. # If it's not open parenthesis (for lambda-declarator) or open brace # (for compound-statement), it's not a lambda. line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1))) if pos >= 0 and Match(r'^\s*[{(]', line[pos:]): error(filename, linenum, 'build/c++11', 4, # 4 = high confidence 'Default lambda captures are an unapproved C++ feature.')
python
{ "resource": "" }
q31370
ProcessLine
train
def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]): """Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error) if nesting_state.InAsmBlock(): return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) CheckDefaultLambdaCaptures(filename, clean_lines, line, error) CheckRedundantVirtual(filename, clean_lines, line, error) CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error)
python
{ "resource": "" }
q31371
FlagCxx11Features
train
def FlagCxx11Features(filename, clean_lines, linenum, error): """Flag those c++11 features that we only allow in certain places. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Flag unapproved C++11 headers. include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) if include and include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error', ): error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.') % include.group(1)) # The only place where we need to worry about C++11 keywords and library # features in preprocessor directives is in macro definitions. if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return # These are classes and free functions. The classes are always # mentioned as std::*, but we only catch the free functions if # they're not found by ADL. They're alphabetical by header. for top_name in ( # type_traits 'alignment_of', 'aligned_union', ): if Search(r'\bstd::%s\b' % top_name, line): error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function. Send c-style ' 'an example of where it would make your code more readable, and ' 'they may let you use it.') % top_name)
python
{ "resource": "" }
q31372
ProcessConfigOverrides
train
def ProcessConfigOverrides(filename): """ Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further. """ abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: abs_path, base_name = os.path.split(abs_filename) if not base_name: break # Reached the root directory. cfg_file = os.path.join(abs_path, "CPPLINT.cfg") abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: with open(cfg_file) as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): continue name, _, val = line.partition('=') name = name.strip() val = val.strip() if name == 'set noparent': keep_looking = False elif name == 'filter': cfg_filters.append(val) elif name == 'exclude_files': # When matching exclude_files pattern, use the base_name of # the current file name or the directory name we are processing. # For example, if we are checking for lint errors in /foo/bar/baz.cc # and we found the .cfg file at /foo/CPPLINT.cfg, then the config # file's "exclude_files" filter is meant to be checked against "bar" # and not "baz" nor "bar/baz.cc". if base_name: pattern = re.compile(val) if pattern.match(base_name): sys.stderr.write('Ignoring "%s": file excluded by "%s". ' 'File path component "%s" matches ' 'pattern "%s"\n' % (filename, cfg_file, base_name, val)) return False elif name == 'linelength': global _line_length try: _line_length = int(val) except ValueError: sys.stderr.write('Line length must be numeric.') else: sys.stderr.write( 'Invalid configuration option (%s) in file %s\n' % (name, cfg_file)) except IOError: sys.stderr.write( "Skipping config file '%s': Can't open for reading\n" % cfg_file) keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). for filter in reversed(cfg_filters): _AddFilters(filter) return True
python
{ "resource": "" }
q31373
_CppLintState.PrintErrorCounts
train
def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" for category, count in self.errors_by_category.iteritems(): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count)
python
{ "resource": "" }
q31374
_FunctionState.Check
train
def Check(self, error, filename, linenum): """Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check. """ if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger))
python
{ "resource": "" }
q31375
FileInfo.RepositoryName
train
def RepositoryName(self): """FullName after removing the local path to the repository. If we have a real absolute path name here we can try to do something smart: detecting the root of the checkout and truncating /path/to/checkout from the name so that we get header guards that don't include things like "C:\Documents and Settings\..." or "/home/username/..." in them and thus people on different computers who have checked the source out to different locations won't see bogus errors. """ fullname = self.FullName() if os.path.exists(fullname): project_dir = os.path.dirname(fullname) if os.path.exists(os.path.join(project_dir, ".svn")): # If there's a .svn file in the current directory, we recursively look # up the directory tree for the top of the SVN checkout root_dir = project_dir one_up_dir = os.path.dirname(root_dir) while os.path.exists(os.path.join(one_up_dir, ".svn")): root_dir = os.path.dirname(root_dir) one_up_dir = os.path.dirname(one_up_dir) prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by # searching up from the current path. root_dir = os.path.dirname(fullname) while (root_dir != os.path.dirname(root_dir) and not os.path.exists(os.path.join(root_dir, ".git")) and not os.path.exists(os.path.join(root_dir, ".hg")) and not os.path.exists(os.path.join(root_dir, ".svn"))): root_dir = os.path.dirname(root_dir) if (os.path.exists(os.path.join(root_dir, ".git")) or os.path.exists(os.path.join(root_dir, ".hg")) or os.path.exists(os.path.join(root_dir, ".svn"))): prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Don't know what to do; header guard warnings may be wrong... return fullname
python
{ "resource": "" }
q31376
_NamespaceInfo.CheckEnd
train
def CheckEnd(self, filename, clean_lines, linenum, error): """Check end of namespace comments.""" line = clean_lines.raw_lines[linenum] # Check how many lines is enclosed in this namespace. Don't issue # warning for missing namespace comments if there aren't enough # lines. However, do apply checks if there is already an end of # namespace comment and it's incorrect. # # TODO(unknown): We always want to check end of namespace comments # if a namespace is large, but sometimes we also want to apply the # check if a short namespace contained nontrivial things (something # other than forward declarations). There is currently no logic on # deciding what these nontrivial things are, so this check is # triggered by namespace size only, which works most of the time. if (linenum - self.starting_linenum < 10 and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): return # Look for matching comment at end of namespace. # # Note that we accept C style "/* */" comments for terminating # namespaces, so that code that terminate namespaces inside # preprocessor macros can be cpplint clean. # # We also accept stuff like "// end of namespace <name>." with the # period at the end. # # Besides these, we don't accept anything else, otherwise we might # get false negatives when existing comment is a substring of the # expected namespace. if self.name: # Named namespace if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + r'[\*/\.\\\s]*$'), line): error(filename, linenum, 'readability/namespace', 5, 'Namespace should be terminated with "// namespace %s"' % self.name) else: # Anonymous namespace if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): # If "// namespace anonymous" or "// anonymous namespace (more text)", # mention "// anonymous namespace" as an acceptable form if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line): error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"' ' or "// anonymous namespace"') else: error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"')
python
{ "resource": "" }
q31377
PDeque.append
train
def append(self, elem): """ Return new deque with elem as the rightmost element. >>> pdeque([1, 2]).append(3) pdeque([1, 2, 3]) """ new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem) return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
python
{ "resource": "" }
q31378
PDeque.extend
train
def extend(self, iterable): """ Return new deque with all elements of iterable appended to the right. >>> pdeque([1, 2]).extend([3, 4]) pdeque([1, 2, 3, 4]) """ new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable) return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
python
{ "resource": "" }
q31379
PDeque.count
train
def count(self, elem): """ Return the number of elements equal to elem present in the queue >>> pdeque([1, 2, 1]).count(1) 2 """ return self._left_list.count(elem) + self._right_list.count(elem)
python
{ "resource": "" }
q31380
PDeque.remove
train
def remove(self, elem): """ Return new deque with first element from left equal to elem removed. If no such element is found a ValueError is raised. >>> pdeque([2, 1, 2]).remove(2) pdeque([1, 2]) """ try: return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1) except ValueError: # Value not found in left list, try the right list try: # This is severely inefficient with a double reverse, should perhaps implement a remove_last()? return PDeque(self._left_list, self._right_list.reverse().remove(elem).reverse(), self._length - 1) except ValueError: raise ValueError('{0} not found in PDeque'.format(elem))
python
{ "resource": "" }
q31381
PDeque.rotate
train
def rotate(self, steps): """ Return deque with elements rotated steps steps. >>> x = pdeque([1, 2, 3]) >>> x.rotate(1) pdeque([3, 1, 2]) >>> x.rotate(-2) pdeque([3, 1, 2]) """ popped_deque = self.pop(steps) if steps >= 0: return popped_deque.extendleft(islice(self.reverse(), steps)) return popped_deque.extend(islice(self, -steps))
python
{ "resource": "" }
q31382
PMap.set
train
def set(self, key, val): """ Return a new PMap with key and val inserted. >>> m1 = m(a=1, b=2) >>> m2 = m1.set('a', 3) >>> m3 = m1.set('c' ,4) >>> m1 pmap({'a': 1, 'b': 2}) >>> m2 pmap({'a': 3, 'b': 2}) >>> m3 pmap({'a': 1, 'c': 4, 'b': 2}) """ return self.evolver().set(key, val).persistent()
python
{ "resource": "" }
q31383
PMap.update_with
train
def update_with(self, update_fn, *maps): """ Return a new PMap with the items in Mappings maps inserted. If the same key is present in multiple maps the values will be merged using merge_fn going from left to right. >>> from operator import add >>> m1 = m(a=1, b=2) >>> m1.update_with(add, m(a=2)) pmap({'a': 3, 'b': 2}) The reverse behaviour of the regular merge. Keep the leftmost element instead of the rightmost. >>> m1 = m(a=1) >>> m1.update_with(lambda l, r: l, m(a=2), {'a':3}) pmap({'a': 1}) """ evolver = self.evolver() for map in maps: for key, value in map.items(): evolver.set(key, update_fn(evolver[key], value) if key in evolver else value) return evolver.persistent()
python
{ "resource": "" }
q31384
rex
train
def rex(expr): """ Regular expression matcher to use together with transform functions """ r = re.compile(expr) return lambda key: isinstance(key, six.string_types) and r.match(key)
python
{ "resource": "" }
q31385
PBag.update
train
def update(self, iterable): """ Update bag with all elements in iterable. >>> s = pbag([1]) >>> s.update([1, 2]) pbag([1, 1, 2]) """ if iterable: return PBag(reduce(_add_to_counters, iterable, self._counts)) return self
python
{ "resource": "" }
q31386
PBag.remove
train
def remove(self, element): """ Remove an element from the bag. >>> s = pbag([1, 1, 2]) >>> s2 = s.remove(1) >>> s3 = s.remove(2) >>> s2 pbag([1, 2]) >>> s3 pbag([1, 1]) """ if element not in self._counts: raise KeyError(element) elif self._counts[element] == 1: newc = self._counts.remove(element) else: newc = self._counts.set(element, self._counts[element] - 1) return PBag(newc)
python
{ "resource": "" }
q31387
freeze
train
def freeze(o): """ Recursively convert simple Python containers into pyrsistent versions of those containers. - list is converted to pvector, recursively - dict is converted to pmap, recursively on values (but not keys) - set is converted to pset, but not recursively - tuple is converted to tuple, recursively. Sets and dict keys are not recursively frozen because they do not contain mutable data by convention. The main exception to this rule is that dict keys and set elements are often instances of mutable objects that support hash-by-id, which this function can't convert anyway. >>> freeze(set([1, 2])) pset([1, 2]) >>> freeze([1, {'a': 3}]) pvector([1, pmap({'a': 3})]) >>> freeze((1, [])) (1, pvector([])) """ typ = type(o) if typ is dict: return pmap(dict((k, freeze(v)) for k, v in six.iteritems(o))) if typ is list: return pvector(map(freeze, o)) if typ is tuple: return tuple(map(freeze, o)) if typ is set: return pset(o) return o
python
{ "resource": "" }
q31388
thaw
train
def thaw(o): """ Recursively convert pyrsistent containers into simple Python containers. - pvector is converted to list, recursively - pmap is converted to dict, recursively on values (but not keys) - pset is converted to set, but not recursively - tuple is converted to tuple, recursively. >>> from pyrsistent import s, m, v >>> thaw(s(1, 2)) set([1, 2]) >>> thaw(v(1, m(a=3))) [1, {'a': 3}] >>> thaw((1, v())) (1, []) """ if isinstance(o, PVector): return list(map(thaw, o)) if isinstance(o, PMap): return dict((k, thaw(v)) for k, v in o.iteritems()) if isinstance(o, PSet): return set(o) if type(o) is tuple: return tuple(map(thaw, o)) return o
python
{ "resource": "" }
q31389
plist
train
def plist(iterable=(), reverse=False): """ Creates a new persistent list containing all elements of iterable. Optional parameter reverse specifies if the elements should be inserted in reverse order or not. >>> plist([1, 2, 3]) plist([1, 2, 3]) >>> plist([1, 2, 3], reverse=True) plist([3, 2, 1]) """ if not reverse: iterable = list(iterable) iterable.reverse() return reduce(lambda pl, elem: pl.cons(elem), iterable, _EMPTY_PLIST)
python
{ "resource": "" }
q31390
PSet.update
train
def update(self, iterable): """ Return a new PSet with elements in iterable added >>> s1 = s(1, 2) >>> s1.update([3, 4, 4]) pset([1, 2, 3, 4]) """ e = self.evolver() for element in iterable: e.add(element) return e.persistent()
python
{ "resource": "" }
q31391
PSet.remove
train
def remove(self, element): """ Return a new PSet with element removed. Raises KeyError if element is not present. >>> s1 = s(1, 2) >>> s1.remove(2) pset([1]) """ if element in self._map: return self.evolver().remove(element).persistent() raise KeyError("Element '%s' not present in PSet" % element)
python
{ "resource": "" }
q31392
PSet.discard
train
def discard(self, element): """ Return a new PSet with element removed. Returns itself if element is not present. """ if element in self._map: return self.evolver().remove(element).persistent() return self
python
{ "resource": "" }
q31393
maybe_parse_user_type
train
def maybe_parse_user_type(t): """Try to coerce a user-supplied type directive into a list of types. This function should be used in all places where a user specifies a type, for consistency. The policy for what defines valid user input should be clear from the implementation. """ is_type = isinstance(t, type) is_preserved = isinstance(t, type) and issubclass(t, _preserved_iterable_types) is_string = isinstance(t, string_types) is_iterable = isinstance(t, Iterable) if is_preserved: return [t] elif is_string: return [t] elif is_type and not is_iterable: return [t] elif is_iterable: # Recur to validate contained types as well. ts = t return tuple(e for t in ts for e in maybe_parse_user_type(t)) else: # If this raises because `t` cannot be formatted, so be it. raise TypeError( 'Type specifications must be types or strings. Input: {}'.format(t) )
python
{ "resource": "" }
q31394
_all_dicts
train
def _all_dicts(bases, seen=None): """ Yield each class in ``bases`` and each of their base classes. """ if seen is None: seen = set() for cls in bases: if cls in seen: continue seen.add(cls) yield cls.__dict__ for b in _all_dicts(cls.__bases__, seen): yield b
python
{ "resource": "" }
q31395
PythonPVector.tolist
train
def tolist(self): """ The fastest way to convert the vector into a python list. """ the_list = [] self._fill_list(self._root, self._shift, the_list) the_list.extend(self._tail) return the_list
python
{ "resource": "" }
q31396
PythonPVector._push_tail
train
def _push_tail(self, level, parent, tail_node): """ if parent is leaf, insert node, else does it map to an existing child? -> node_to_insert = push node one more level else alloc new path return node_to_insert placed in copy of parent """ ret = list(parent) if level == SHIFT: ret.append(tail_node) return ret sub_index = ((self._count - 1) >> level) & BIT_MASK # >>> if len(parent) > sub_index: ret[sub_index] = self._push_tail(level - SHIFT, parent[sub_index], tail_node) return ret ret.append(self._new_path(level - SHIFT, tail_node)) return ret
python
{ "resource": "" }
q31397
_make_seq_field_type
train
def _make_seq_field_type(checked_class, item_type): """Create a subclass of the given checked class with the given item type.""" type_ = _seq_field_types.get((checked_class, item_type)) if type_ is not None: return type_ class TheType(checked_class): __type__ = item_type def __reduce__(self): return (_restore_seq_field_pickle, (checked_class, item_type, list(self))) suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class] TheType.__name__ = _types_to_names(TheType._checked_types) + suffix _seq_field_types[checked_class, item_type] = TheType return TheType
python
{ "resource": "" }
q31398
_sequence_field
train
def _sequence_field(checked_class, item_type, optional, initial): """ Create checked field for either ``PSet`` or ``PVector``. :param checked_class: ``CheckedPSet`` or ``CheckedPVector``. :param item_type: The required type for the items in the set. :param optional: If true, ``None`` can be used as a value for this field. :param initial: Initial value to pass to factory. :return: A ``field`` containing a checked class. """ TheType = _make_seq_field_type(checked_class, item_type) if optional: def factory(argument): if argument is None: return None else: return TheType.create(argument) else: factory = TheType.create return field(type=optional_type(TheType) if optional else TheType, factory=factory, mandatory=True, initial=factory(initial))
python
{ "resource": "" }
q31399
pset_field
train
def pset_field(item_type, optional=False, initial=()): """ Create checked ``PSet`` field. :param item_type: The required type for the items in the set. :param optional: If true, ``None`` can be used as a value for this field. :param initial: Initial value to pass to factory if no value is given for the field. :return: A ``field`` containing a ``CheckedPSet`` of the given type. """ return _sequence_field(CheckedPSet, item_type, optional, initial)
python
{ "resource": "" }