_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q39900 | Ec2Inventory.do_api_calls_update_cache | train | def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index) | python | {
"resource": ""
} |
q39901 | Ec2Inventory.connect | train | def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn | python | {
"resource": ""
} |
q39902 | Ec2Inventory.get_instances_by_region | train | def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances') | python | {
"resource": ""
} |
q39903 | Ec2Inventory.get_rds_instances_by_region | train | def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = self.connect_to_aws(rds, region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances') | python | {
"resource": ""
} |
q39904 | Ec2Inventory.get_elasticache_replication_groups_by_region | train | def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = elasticache.connect_to_region(region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that wo can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region) | python | {
"resource": ""
} |
q39905 | Ec2Inventory.get_auth_error_message | train | def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors) | python | {
"resource": ""
} |
q39906 | Ec2Inventory.fail_with_error | train | def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1) | python | {
"resource": ""
} |
q39907 | Ec2Inventory.add_rds_instance | train | def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) | python | {
"resource": ""
} |
q39908 | Ec2Inventory.add_elasticache_node | train | def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info | python | {
"resource": ""
} |
q39909 | Ec2Inventory.add_elasticache_replication_group | train | def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info | python | {
"resource": ""
} |
q39910 | Ec2Inventory.get_route53_records | train | def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name) | python | {
"resource": ""
} |
q39911 | Ec2Inventory.get_instance_route53_names | train | def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list) | python | {
"resource": ""
} |
q39912 | Ec2Inventory.get_host_info_dict_from_describe_dict | train | def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif type(value) == type(None):
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info | python | {
"resource": ""
} |
q39913 | Ec2Inventory.get_host | train | def get_host(self, host):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not host in self.index:
# host might not exist anymore
return {}
(region, instance_id) = self.index[host]
instance = self.get_instance(region, instance_id)
return self.get_host_info_dict_from_instance(instance) | python | {
"resource": ""
} |
q39914 | Ec2Inventory.push | train | def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element) | python | {
"resource": ""
} |
q39915 | Ec2Inventory.push_group | train | def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element) | python | {
"resource": ""
} |
q39916 | Ec2Inventory.load_inventory_from_cache | train | def load_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory) | python | {
"resource": ""
} |
q39917 | Ec2Inventory.load_index_from_cache | train | def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index) | python | {
"resource": ""
} |
q39918 | Ec2Inventory.write_to_cache | train | def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = json.dumps(data, sort_keys=True, indent=2)
cache = open(filename, 'w')
cache.write(json_data)
cache.close() | python | {
"resource": ""
} |
q39919 | Ec2Inventory.to_safe | train | def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word) | python | {
"resource": ""
} |
q39920 | SentenceMaker.from_keyword_list | train | def from_keyword_list(self, keyword_list, strictness=2, timeout=3):
"""
Convert a list of keywords to sentence. The result is sometimes None
:param list keyword_list: a list of string
:param int | None strictness: None for highest strictness. 2 or 1 for a less strict POS matching
:param float timeout: timeout of this function
:return list of tuple: sentence generated
>>> SentenceMaker().from_keyword_list(['Love', 'blind', 'trouble'])
[('For', False), ('love', True), ('to', False), ('such', False), ('blind', True), ('we', False), ('must', False), ('turn', False), ('to', False), ('the', False), ('trouble', True)]
"""
keyword_tags = nltk.pos_tag(keyword_list)
start = time()
while time() - start < timeout:
index = 0
output_list = []
tagged_sent = self.random_sentences.get_tagged_sent()
for word, tag in tagged_sent:
if index >= len(keyword_tags):
return self.get_overlap(keyword_list, output_list, is_word_list=True)
if self.match_pos(tag, keyword_tags[index][1], strictness=strictness):
output_list.append(keyword_tags[index][0])
index += 1
else:
output_list.append(word)
return [] | python | {
"resource": ""
} |
q39921 | VideoDownloader.render_path | train | def render_path(self) -> str:
"""Render path by filling the path template with video information."""
# TODO: Fix defaults when date is not found (empty string or None)
# https://stackoverflow.com/questions/23407295/default-kwarg-values-for-pythons-str-format-method
from string import Formatter
class UnseenFormatter(Formatter):
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
return kwds[key]
except KeyError:
return key
else:
return super().get_value(key, args, kwds)
data = self.video.data
site_name = data['site']
try:
template = self.templates[site_name]
except KeyError:
raise NoTemplateFoundError
fmt = UnseenFormatter()
filename_raw = fmt.format(template, **data)
filename = clean_filename(filename_raw)
path = os.path.join(self.download_dir, filename)
return path | python | {
"resource": ""
} |
q39922 | PasswordAuthentication._expand_des_key | train | def _expand_des_key(key):
"""
Expand the key from a 7-byte password key into a 8-byte DES key
"""
key = key[:7] + b'\0' * (7 - len(key))
byte = struct.unpack_from('BBBBBBB', key)
s = struct.pack('B', ((byte[0] >> 1) & 0x7f) << 1)
s += struct.pack("B", ((byte[0] & 0x01) << 6 | ((byte[1] >> 2) & 0x3f)) << 1)
s += struct.pack("B", ((byte[1] & 0x03) << 5 | ((byte[2] >> 3) & 0x1f)) << 1)
s += struct.pack("B", ((byte[2] & 0x07) << 4 | ((byte[3] >> 4) & 0x0f)) << 1)
s += struct.pack("B", ((byte[3] & 0x0f) << 3 | ((byte[4] >> 5) & 0x07)) << 1)
s += struct.pack("B", ((byte[4] & 0x1f) << 2 | ((byte[5] >> 6) & 0x03)) << 1)
s += struct.pack("B", ((byte[5] & 0x3f) << 1 | ((byte[6] >> 7) & 0x01)) << 1)
s += struct.pack("B", (byte[6] & 0x7f) << 1)
return s | python | {
"resource": ""
} |
q39923 | PasswordAuthentication.get_lmv2_response | train | def get_lmv2_response(domain, username, password, server_challenge, client_challenge):
"""
Computes an appropriate LMv2 response based on the supplied arguments
The algorithm is based on jCIFS. The response is 24 bytes, with the 16 bytes of hash
concatenated with the 8 byte client client_challenge
"""
ntlmv2_hash = PasswordAuthentication.ntowfv2(domain, username, password.encode('utf-16le'))
hmac_context = hmac.HMAC(ntlmv2_hash, hashes.MD5(), backend=default_backend())
hmac_context.update(server_challenge)
hmac_context.update(client_challenge)
lmv2_hash = hmac_context.finalize()
# The LMv2 master user session key is a HMAC MD5 of the NTLMv2 and LMv2 hash
session_key = hmac.HMAC(ntlmv2_hash, hashes.MD5(), backend=default_backend())
session_key.update(lmv2_hash)
return lmv2_hash + client_challenge, session_key.finalize() | python | {
"resource": ""
} |
q39924 | BaseEngine.xml | train | def xml(self, value):
"""Set new XML string"""
self._xml = value
self._root = s2t(value) | python | {
"resource": ""
} |
q39925 | BaseEngine.root | train | def root(self, value):
"""Set new XML tree"""
self._xml = t2s(value)
self._root = value | python | {
"resource": ""
} |
q39926 | XslEngine.xsl_elements | train | def xsl_elements(self):
"""Find all "XSL" styled runs, normalize related paragraph and returns list of XslElements"""
def append_xsl_elements(xsl_elements, r, xsl):
if r is not None:
r.xpath('.//w:t', namespaces=self.namespaces)[0].text = xsl
xe = XslElement(r, logger=self.logger)
xsl_elements.append(xe)
return None, ''
if not getattr(self, '_xsl_elements', None):
xsl_elements = []
for p in self.root.xpath('.//w:p', namespaces=self.namespaces):
xsl_r, xsl = None, ''
for r in p:
# find first XSL run and add all XSL meta text
text = ''.join(t.text for t in r.xpath('.//w:t', namespaces=self.namespaces))
if r.xpath('.//w:rPr/w:rStyle[@w:val="%s"]' % self.style, namespaces=self.namespaces):
xsl += text
if xsl_r is None and text:
xsl_r = r
else:
r.getparent().remove(r)
elif text:
xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl)
xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl)
self._xsl_elements = xsl_elements
return self._xsl_elements | python | {
"resource": ""
} |
q39927 | XslEngine.render_xsl | train | def render_xsl(self, node, context):
"""Render all XSL elements"""
for e in self.xsl_elements:
e.render(e.run) | python | {
"resource": ""
} |
q39928 | XslEngine.remove_style | train | def remove_style(self):
"""Remove all XSL run rStyle elements"""
for n in self.root.xpath('.//w:rStyle[@w:val="%s"]' % self.style, namespaces=self.namespaces):
n.getparent().remove(n) | python | {
"resource": ""
} |
q39929 | XslEngine.render | train | def render(self, xml, context, raise_on_errors=True):
"""Render xml string and apply XSLT transfomation with context"""
if xml:
self.xml = xml
# render XSL
self.render_xsl(self.root, context)
# create root XSL sheet
xsl_ns = self.namespaces['xsl']
rootName = etree.QName(xsl_ns, 'stylesheet')
root = etree.Element(rootName, nsmap={'xsl': xsl_ns})
sheet = etree.ElementTree(root)
template = etree.SubElement(root, etree.QName(xsl_ns, "template"), match='/')
# put OpenOffice tree into XSLT sheet
template.append(self.root)
self.root = root
# drop XSL styles
self.remove_style()
#self.debug(self.xml)
try:
# transform XSL
xsl = etree.XSLT(self.root)
self.root = xsl(context)
except etree.Error as e:
# log errors
for l in e.error_log:
self.error("XSLT error at line %s col %s:" % (l.line, l.column))
self.error(" message: %s" % l.message)
self.error(" domain: %s (%d)" % (l.domain_name, l.domain))
self.error(' type: %s (%d)' % (l.type_name, l.type))
self.error(' level: %s (%d)' % (l.level_name, l.level))
self.error(' filename: %s' % l.filename)
if raise_on_errors:
raise
return self.xml
else:
return xml | python | {
"resource": ""
} |
q39930 | handle_image_posts | train | def handle_image_posts(function=None):
"""
Decorator for views that handles ajax image posts in base64 encoding, saving
the image and returning the url
"""
@wraps(function, assigned=available_attrs(function))
def _wrapped_view(request, *args, **kwargs):
if 'image' in request.META['CONTENT_TYPE']:
name = default_storage.save(os.path.join('images', 'aloha-uploads', request.META['HTTP_X_FILE_NAME']),
ContentFile(base64.b64decode(request.body.split(",", 1)[1])))
return HttpResponse(posixpath.join(settings.MEDIA_URL, name), content_type="text/plain")
else:
return function(request, *args, **kwargs)
return _wrapped_view | python | {
"resource": ""
} |
q39931 | verboselogs_class_transform | train | def verboselogs_class_transform(cls):
"""Make Pylint aware of our custom logger methods."""
if cls.name == 'RootLogger':
for meth in ['notice', 'spam', 'success', 'verbose']:
cls.locals[meth] = [scoped_nodes.Function(meth, None)] | python | {
"resource": ""
} |
q39932 | verboselogs_module_transform | train | def verboselogs_module_transform(mod):
"""Make Pylint aware of our custom log levels."""
if mod.name == 'logging':
for const in ['NOTICE', 'SPAM', 'SUCCESS', 'VERBOSE']:
mod.locals[const] = [nodes.Const(const)] | python | {
"resource": ""
} |
q39933 | cache_etag | train | def cache_etag(request, *argz, **kwz):
'''Produce etag value for a cached page.
Intended for usage in conditional views (@condition decorator).'''
response, site, cachekey = kwz.get('_view_data') or initview(request)
if not response: return None
return fjcache.str2md5(
'{0}--{1}--{2}'.format( site.id if site else 'x', cachekey,
response[1].strftime('%Y-%m-%d %H:%M:%S%z') ) ) | python | {
"resource": ""
} |
q39934 | cache_last_modified | train | def cache_last_modified(request, *argz, **kwz):
'''Last modification date for a cached page.
Intended for usage in conditional views (@condition decorator).'''
response, site, cachekey = kwz.get('_view_data') or initview(request)
if not response: return None
return response[1] | python | {
"resource": ""
} |
q39935 | blogroll | train | def blogroll(request, btype):
'View that handles the generation of blogrolls.'
response, site, cachekey = initview(request)
if response: return response[0]
template = loader.get_template('feedjack/{0}.xml'.format(btype))
ctx = dict()
fjlib.get_extra_context(site, ctx)
ctx = Context(ctx)
response = HttpResponse(
template.render(ctx), content_type='text/xml; charset=utf-8' )
patch_vary_headers(response, ['Host'])
fjcache.cache_set(site, cachekey, (response, ctx_get(ctx, 'last_modified')))
return response | python | {
"resource": ""
} |
q39936 | buildfeed | train | def buildfeed(request, feedclass, **criterias):
'View that handles the feeds.'
view_data = initview(request)
wrap = lambda func: ft.partial(func, _view_data=view_data, **criterias)
return condition(
etag_func=wrap(cache_etag),
last_modified_func=wrap(cache_last_modified) )\
(_buildfeed)(request, feedclass, view_data, **criterias) | python | {
"resource": ""
} |
q39937 | mainview | train | def mainview(request, **criterias):
'View that handles all page requests.'
view_data = initview(request)
wrap = lambda func: ft.partial(func, _view_data=view_data, **criterias)
return condition(
etag_func=wrap(cache_etag),
last_modified_func=wrap(cache_last_modified) )\
(_mainview)(request, view_data, **criterias) | python | {
"resource": ""
} |
q39938 | post | train | def post(request):
""" Create a Gallery """
defaultname = 'New Gallery %i' % Gallery.objects.all().count()
data = request.POST or json.loads(request.body)['body']
title = data.get('title', defaultname)
description = data.get('description', '')
security = int(data.get('security', Gallery.PUBLIC))
g, created = Gallery.objects.get_or_create(title=title)
g.security = security
g.description = description
g.owner = request.user
g.save()
res = Result()
res.append(g.json())
res.message = 'Gallery created' if created else ''
return JsonResponse(res.asDict()) | python | {
"resource": ""
} |
q39939 | put | train | def put(request, obj_id=None):
""" Adds Image and Video objects to Gallery based on GUIDs """
data = request.PUT or json.loads(request.body)['body']
guids = data.get('guids', '').split(',')
move = data.get('from')
security = request.PUT.get('security')
gallery = Gallery.objects.get(pk=obj_id)
if guids:
objects = getObjectsFromGuids(guids)
images = filter(lambda x: isinstance(x, Image), objects)
videos = filter(lambda x: isinstance(x, Video), objects)
gallery.images.add(*images)
gallery.videos.add(*videos)
if move:
fromgallery = Gallery.objects.get(pk=move)
fromgallery.images.remove(*images)
fromgallery.videos.remove(*videos)
if security is not None:
gallery.security = json.loads(security)
gallery.save()
for child in gallery.gallery_set.all():
child.security = gallery.security
child.save()
res = Result()
res.append(gallery.json())
return JsonResponse(res.asDict()) | python | {
"resource": ""
} |
q39940 | delete | train | def delete(request, obj_id=None):
""" Removes ImageVideo objects from Gallery """
data = request.DELETE or json.loads(request.body)
guids = data.get('guids').split(',')
objects = getObjectsFromGuids(guids)
gallery = Gallery.objects.get(pk=obj_id)
LOGGER.info('{} removed {} from {}'.format(request.user.email, guids, gallery))
for o in objects:
if isinstance(o, Image):
gallery.images.remove(o)
elif isinstance(o, Video):
gallery.videos.remove(o)
res = Result()
return JsonResponse(res.asDict()) | python | {
"resource": ""
} |
q39941 | filterObjects | train | def filterObjects(request, obj_id):
"""
Filters Gallery for the requested ImageVideo objects. Returns a Result object with
serialized objects
"""
if int(obj_id) == 0:
obj = None
else:
obj = Gallery.objects.get(pk=obj_id)
isanonymous = request.user.is_anonymous()
if isanonymous and obj is None:
LOGGER.warn('There was an anonymous access attempt from {} to {}'.format(getClientIP(request), obj))
raise PermissionDenied()
if isanonymous and obj and obj.security != Gallery.PUBLIC:
LOGGER.warn('There was an anonymous access attempt from {} to {}'.format(getClientIP(request), obj))
raise PermissionDenied()
tags = json.loads(request.GET.get('filters', '[[]]'))
more = json.loads(request.GET.get('more', 'false'))
orderby = request.GET.get('orderby', request.user.frog_prefs.get().json()['orderby'])
tags = [t for t in tags if t]
return _filter(request, obj, tags=tags, more=more, orderby=orderby) | python | {
"resource": ""
} |
q39942 | _sortObjects | train | def _sortObjects(orderby='created', **kwargs):
"""Sorts lists of objects and combines them into a single list"""
o = []
for m in kwargs.values():
for l in iter(m):
o.append(l)
o = list(set(o))
sortfunc = _sortByCreated if orderby == 'created' else _sortByModified
if six.PY2:
o.sort(sortfunc)
else:
o.sort(key=functools.cmp_to_key(sortfunc))
return o | python | {
"resource": ""
} |
q39943 | _sortByCreated | train | def _sortByCreated(a, b):
"""Sort function for object by created date"""
if a.created < b.created:
return 1
elif a.created > b.created:
return -1
else:
return 0 | python | {
"resource": ""
} |
q39944 | _sortByModified | train | def _sortByModified(a, b):
"""Sort function for object by modified date"""
if a.modified < b.modified:
return 1
elif a.modified > b.modified:
return -1
else:
return 0 | python | {
"resource": ""
} |
q39945 | search | train | def search(query, model):
""" Performs a search query and returns the object ids """
query = query.strip()
LOGGER.debug(query)
sqs = SearchQuerySet()
results = sqs.raw_search('{}*'.format(query)).models(model)
if not results:
results = sqs.raw_search('*{}'.format(query)).models(model)
if not results:
results = sqs.raw_search('*{}*'.format(query)).models(model)
return [o.pk for o in results] | python | {
"resource": ""
} |
q39946 | find | train | def find(whatever=None, language=None, iso639_1=None,
iso639_2=None, native=None):
"""Find data row with the language.
:param whatever: key to search in any of the following fields
:param language: key to search in English language name
:param iso639_1: key to search in ISO 639-1 code (2 digits)
:param iso639_2: key to search in ISO 639-2 code (3 digits,
bibliographic & terminological)
:param native: key to search in native language name
:return: a dict with keys (u'name', u'iso639_1', u'iso639_2_b',
u'iso639_2_t', u'native')
All arguments can be both string or unicode (Python 2).
If there are multiple names defined, any of these can be looked for.
"""
if whatever:
keys = [u'name', u'iso639_1', u'iso639_2_b', u'iso639_2_t', u'native']
val = whatever
elif language:
keys = [u'name']
val = language
elif iso639_1:
keys = [u'iso639_1']
val = iso639_1
elif iso639_2:
keys = [u'iso639_2_b', u'iso639_2_t']
val = iso639_2
elif native:
keys = [u'native']
val = native
else:
raise ValueError('Invalid search criteria.')
val = unicode(val).lower()
return next((item for item in data if any(
val in item[key].lower().split("; ") for key in keys)), None) | python | {
"resource": ""
} |
q39947 | to_iso639_1 | train | def to_iso639_1(key):
"""Find ISO 639-1 code for language specified by key.
>>> to_iso639_1("swe")
u'sv'
>>> to_iso639_1("English")
u'en'
"""
item = find(whatever=key)
if not item:
raise NonExistentLanguageError('Language does not exist.')
return item[u'iso639_1'] | python | {
"resource": ""
} |
q39948 | to_iso639_2 | train | def to_iso639_2(key, type='B'):
"""Find ISO 639-2 code for language specified by key.
:param type: "B" - bibliographical (default), "T" - terminological
>>> to_iso639_2("German")
u'ger'
>>> to_iso639_2("German", "T")
u'deu'
"""
if type not in ('B', 'T'):
raise ValueError('Type must be either "B" or "T".')
item = find(whatever=key)
if not item:
raise NonExistentLanguageError('Language does not exist.')
if type == 'T' and item[u'iso639_2_t']:
return item[u'iso639_2_t']
return item[u'iso639_2_b'] | python | {
"resource": ""
} |
q39949 | to_name | train | def to_name(key):
"""Find the English name for the language specified by key.
>>> to_name('br')
u'Breton'
>>> to_name('sw')
u'Swahili'
"""
item = find(whatever=key)
if not item:
raise NonExistentLanguageError('Language does not exist.')
return item[u'name'] | python | {
"resource": ""
} |
q39950 | to_native | train | def to_native(key):
"""Find the native name for the language specified by key.
>>> to_native('br')
u'brezhoneg'
>>> to_native('sw')
u'Kiswahili'
"""
item = find(whatever=key)
if not item:
raise NonExistentLanguageError('Language does not exist.')
return item[u'native'] | python | {
"resource": ""
} |
q39951 | address_inline | train | def address_inline(request, prefix="", country_code=None, template_name="postal/form.html"):
""" Displays postal address with localized fields """
country_prefix = "country"
prefix = request.POST.get('prefix', prefix)
if prefix:
country_prefix = prefix + '-country'
country_code = request.POST.get(country_prefix, country_code)
form_class = form_factory(country_code=country_code)
if request.method == "POST":
data = {}
for (key, val) in request.POST.items():
if val is not None and len(val) > 0:
data[key] = val
data.update({country_prefix: country_code})
form = form_class(prefix=prefix, initial=data)
else:
form = form_class(prefix=prefix)
return render_to_string(template_name, RequestContext(request, {
"form": form,
"prefix": prefix,
})) | python | {
"resource": ""
} |
q39952 | Benchmark.run_timeit | train | def run_timeit(self, stmt, setup):
""" Create the function call statement as a string used for timeit. """
_timer = timeit.Timer(stmt=stmt, setup=setup)
trials = _timer.repeat(self.timeit_repeat, self.timeit_number)
self.time_average_seconds = sum(trials) / len(trials) / self.timeit_number
# Convert into reasonable time units
time_avg = convert_time_units(self.time_average_seconds)
return time_avg | python | {
"resource": ""
} |
q39953 | _get_mx_exchanges | train | def _get_mx_exchanges(domain):
"""Fetch the MX records for the specified domain
:param str domain: The domain to get the MX records for
:rtype: list
"""
try:
answer = resolver.query(domain, 'MX')
return [str(record.exchange).lower()[:-1] for record in answer]
except (resolver.NoAnswer, resolver.NoNameservers, resolver.NotAbsolute,
resolver.NoRootSOA, resolver.NXDOMAIN, resolver.Timeout) as error:
LOGGER.error('Error querying MX for %s: %r', domain, error)
return [] | python | {
"resource": ""
} |
q39954 | _domain_check | train | def _domain_check(domain, domain_list, resolve):
"""Returns ``True`` if the ``domain`` is serviced by the ``domain_list``.
:param str domain: The domain to check
:param list domain_list: The domains to check for
:param bool resolve: Resolve the domain
:rtype: bool
"""
if domain in domain_list:
return True
if resolve:
for exchange in _get_mx_exchanges(domain):
for value in domain_list:
if exchange.endswith(value):
return True
return False | python | {
"resource": ""
} |
q39955 | normalize | train | def normalize(email_address, resolve=True):
"""Return the normalized email address, removing
:param str email_address: The normalized email address
:param bool resolve: Resolve the domain
:rtype: str
"""
address = utils.parseaddr(email_address)
local_part, domain_part = address[1].lower().split('@')
# Plus addressing is supported by Microsoft domains and FastMail
if domain_part in MICROSOFT_DOMAINS:
if '+' in local_part:
local_part = local_part.split('+')[0]
# GMail supports plus addressing and throw-away period delimiters
elif _is_gmail(domain_part, resolve):
local_part = local_part.replace('.', '').split('+')[0]
# Yahoo domain handling of - is like plus addressing
elif _is_yahoo(domain_part, resolve):
if '-' in local_part:
local_part = local_part.split('-')[0]
# FastMail has domain part username aliasing and plus addressing
elif _is_fastmail(domain_part, resolve):
domain_segments = domain_part.split('.')
if len(domain_segments) > 2:
local_part = domain_segments[0]
domain_part = '.'.join(domain_segments[1:])
elif '+' in local_part:
local_part = local_part.split('+')[0]
return '@'.join([local_part, domain_part]) | python | {
"resource": ""
} |
q39956 | validate | train | def validate(filename, verbose=False):
"""
Validate file and return JSON result as dictionary.
"filename" can be a file name or an HTTP URL.
Return "" if the validator does not return valid JSON.
Raise OSError if curl command returns an error status.
"""
# is_css = filename.endswith(".css")
is_remote = filename.startswith("http://") or filename.startswith(
"https://")
with tempfile.TemporaryFile() if is_remote else open(
filename, "rb") as f:
if is_remote:
r = requests.get(filename, verify=False)
f.write(r.content)
f.seek(0)
# if is_css:
# cmd = (
# "curl -sF \"file=@%s;type=text/css\" -F output=json -F warning=0 %s"
# % (quoted_filename, CSS_VALIDATOR_URL))
# _ = cmd
# else:
r = requests.post(
HTML_VALIDATOR_URL,
files={"file": (filename, f, "text/html")},
data={
"out": "json",
"showsource": "yes",
},
verify=False)
return r.json() | python | {
"resource": ""
} |
q39957 | main | train | def main():
"""Parser the command line and run the validator."""
parser = argparse.ArgumentParser(
description="[v" + __version__ + "] " + __doc__,
prog="w3c_validator",
)
parser.add_argument(
"--log",
default="INFO",
help=("log level: DEBUG, INFO or INFO "
"(default: INFO)"))
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__)
parser.add_argument(
"--verbose", help="increase output verbosity", action="store_true")
parser.add_argument(
"source", metavar="F", type=str, nargs="+", help="file or URL")
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log))
LOGGER.info("Files to validate: \n {0}".format("\n ".join(args.source)))
LOGGER.info("Number of files: {0}".format(len(args.source)))
errors = 0
warnings = 0
for f in args.source:
LOGGER.info("validating: %s ..." % f)
retrys = 0
while retrys < 2:
result = validate(f, verbose=args.verbose)
if result:
break
time.sleep(2)
retrys += 1
LOGGER.info("retrying: %s ..." % f)
else:
LOGGER.info("failed: %s" % f)
errors += 1
continue
# import pdb; pdb.set_trace()
if f.endswith(".css"):
errorcount = result["cssvalidation"]["result"]["errorcount"]
warningcount = result["cssvalidation"]["result"]["warningcount"]
errors += errorcount
warnings += warningcount
if errorcount > 0:
LOGGER.info("errors: %d" % errorcount)
if warningcount > 0:
LOGGER.info("warnings: %d" % warningcount)
else:
for msg in result["messages"]:
print_msg(msg)
if msg["type"] == "error":
errors += 1
else:
warnings += 1
sys.exit(min(errors, 255)) | python | {
"resource": ""
} |
q39958 | format_info_response | train | def format_info_response(value):
"""Format the response from redis
:param str value: The return response from redis
:rtype: dict
"""
info = {}
for line in value.decode('utf-8').splitlines():
if not line or line[0] == '#':
continue
if ':' in line:
key, value = line.split(':', 1)
info[key] = parse_info_value(value)
return info | python | {
"resource": ""
} |
q39959 | Multicolor.intersect | train | def intersect(self, other):
""" Computes the multiset intersection, between the current Multicolor and the supplied Multicolor
:param other: another Multicolor object to compute a multiset intersection with
:return:
:raise TypeError: an intersection can be computed only between two Multicolor objects
"""
if not isinstance(other, Multicolor):
raise TypeError("Multicolor can be intersected only with another Multicolor object")
intersection_colors_core = self.colors.intersection(other.colors)
colors_count = {color: min(self.multicolors[color], other.multicolors[color]) for color in intersection_colors_core}
return Multicolor(*(color for color in colors_count for _ in range(colors_count[color]))) | python | {
"resource": ""
} |
q39960 | text2wngram | train | def text2wngram(text, output_file, n=3, chars=63636363, words=9090909, compress=False, verbosity=2):
"""
List of every word n-gram which occurred in the text, along with its number of occurrences.
The maximum numbers of charactors and words that can be stored in the buffer are given by the chars and words parameters.
"""
cmd = ['text2wngram']
if n:
cmd.extend(['-n', n])
if chars:
cmd.extend(['-chars', chars])
if words:
cmd.extend(['-words', words])
if compress:
cmd.append('-compress')
if verbosity:
cmd.extend(['-verbosity', verbosity])
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with tempfile.SpooledTemporaryFile() as input_f:
input_f.write(text.encode('utf-8') if sys.version_info >= (3,) and type(text) is str else text)
input_f.seek(0)
with open(output_file,'w+') as output_f:
with output_to_debuglogger() as err_f:
with do_in_tempdir():
exitcode = subprocess.call(cmd, stdin=input_f, stdout=output_f, stderr=err_f)
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%s' returned with non-zero exit status '%s'" % (cmd[0], exitcode)) | python | {
"resource": ""
} |
q39961 | ngram2mgram | train | def ngram2mgram(input_file, output_file, n, m, words=False, ascii_idngram=False):
"""
Takes either a word n-gram file, or an id n-gram file and outputs a file of the same type where m < n.
"""
cmd = ['ngram2mgram', '-n', n,
'-m', m]
if words and ascii_idngram:
raise ConversionError("Parameters 'words' and 'ascii_idngram' cannot both be True")
if words:
cmd.append('-words')
elif ascii_idngram:
cmd.append('-ascii')
else:
cmd.append('-binary')
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with open(input_file,'r') as input_f:
with open(output_file,'w+') as output_f:
with output_to_debuglogger() as err_f:
exitcode = subprocess.call(cmd, stdin=input_f, stdout=output_f, stderr=err_f)
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%s' returned with non-zero exit status '%s'" % (cmd[0], exitcode)) | python | {
"resource": ""
} |
q39962 | wngram2idngram | train | def wngram2idngram(input_file, vocab_file, output_file, buffersize=100, hashtablesize=2000000, files=20, compress=False, verbosity=2, n=3, write_ascii=False, fof_size=10):
"""
Takes a word N-gram file and a vocabulary file and lists every id n-gram which occurred in the text, along with its number of occurrences, in either ASCII or binary format.
Note : It is important that the vocabulary file is in alphabetical order. If you are using vocabularies generated by wfreq2vocab then this should not be an issue, as they will already be alphabetically sorted.
"""
cmd = ['wngram2idngram', '-vocab', os.path.abspath(vocab_file),
'-idngram', os.path.abspath(output_file)]
if buffersize:
cmd.extend(['-buffer', buffersize])
if hashtablesize:
cmd.extend(['-hash', hashtablesize])
if files:
cmd.extend(['-files', files])
if verbosity:
cmd.extend(['-verbosity', verbosity])
if n:
cmd.extend(['-n', n])
if fof_size:
cmd.extend(['-fof_size', fof_size])
if compress:
cmd.append('-compress')
if write_ascii:
cmd.append('-write_ascii')
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with tempfile.SpooledTemporaryFile() as output_f:
with tempfile.SpooledTemporaryFile() as input_f:
input_f.write(text.encode('utf-8') if sys.version_info >= (3,) and type(text) is str else text)
input_f.seek(0)
with output_to_debuglogger() as err_f:
with do_in_tempdir():
exitcode = subprocess.call(cmd, stdin=input_f, stdout=output_f, stderr=err_f)
output = output_f.read()
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%r' returned with non-zero exit status '%s'" % (cmd, exitcode))
if sys.version_info >= (3,) and type(output) is bytes:
output = output.decode('utf-8')
return output.strip() | python | {
"resource": ""
} |
q39963 | idngram2stats | train | def idngram2stats(input_file, output_file, n=3, fof_size=50, verbosity=2, ascii_input=False):
"""
Lists the frequency-of-frequencies for each of the 2-grams, ... , n-grams, which can enable the user to choose appropriate cut-offs, and to specify appropriate memory requirements with the spec_num parameter in idngram2lm.
"""
cmd = ['idngram2stats']
if n:
cmd.extend(['-n', n])
if fof_size:
cmd.extend(['-fof_size'], fof_size)
if verbosity:
cmd.extend(['-verbosity'], verbosity)
if ascii_input:
cmd.append(['-ascii_input'])
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with open(input_file,'r') as input_f:
with open(output_file,'w+') as output_f:
with output_to_debuglogger() as err_f:
exitcode = subprocess.call(cmd, stdin=input_f, stdout=output_f, stderr=err_f)
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%s' returned with non-zero exit status '%s'" % (cmd[0], exitcode)) | python | {
"resource": ""
} |
q39964 | binlm2arpa | train | def binlm2arpa(input_file, output_file, verbosity=2):
"""
Converts a binary format language model, as generated by idngram2lm, into an an ARPA format language model.
"""
cmd = ['binlm2arpa', '-binary', input_file,
'-arpa'. output_file]
if verbosity:
cmd.extend(['-verbosity', verbosity])
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with tempfile.SpooledTemporaryFile() as output_f:
with output_to_debuglogger() as err_f:
exitcode = subprocess.call(cmd, stdout=output_f, stderr=err_f)
output = output_f.read()
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%s' returned with non-zero exit status '%s'" % (cmd[0], exitcode))
if sys.version_info >= (3,) and type(output) is bytes:
output = output.decode('utf-8')
return output.strip() | python | {
"resource": ""
} |
q39965 | text2vocab | train | def text2vocab(text, output_file, text2wfreq_kwargs={}, wfreq2vocab_kwargs={}):
"""
Convienience function that uses text2wfreq and wfreq2vocab to create a vocabulary file from text.
"""
with tempfile.NamedTemporaryFile(suffix='.wfreq', delete=False) as f:
wfreq_file = f.name
try:
text2wfreq(text, wfreq_file, **text2wfreq_kwargs)
wfreq2vocab(wfreq_file, output_file, **wfreq2vocab_kwargs)
except ConversionError:
raise
finally:
os.remove(wfreq_file) | python | {
"resource": ""
} |
q39966 | HyperLogLogMixin.pfadd | train | def pfadd(self, key, *elements):
"""Adds all the element arguments to the HyperLogLog data structure
stored at the variable name specified as first argument.
As a side effect of this command the HyperLogLog internals may be
updated to reflect a different estimation of the number of unique items
added so far (the cardinality of the set).
If the approximated cardinality estimated by the HyperLogLog changed
after executing the command, :meth:`~tredis.RedisClient.pfadd` returns
``1``, otherwise ``0`` is returned. The command automatically creates
an empty HyperLogLog structure (that is, a Redis String of a specified
length and with a given encoding) if the specified key does not exist.
To call the command without elements but just the variable name is
valid, this will result into no operation performed if the variable
already exists, or just the creation of the data structure if the key
does not exist (in the latter case ``1`` is returned).
For an introduction to HyperLogLog data structure check
:meth:`~tredis.RedisClient.pfcount`.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)`` to add every element.
:param key: The key to add the elements to
:type key: :class:`str`, :class:`bytes`
:param elements: One or more elements to add
:type elements: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'PFADD', key] + list(elements), 1) | python | {
"resource": ""
} |
q39967 | HyperLogLogMixin.pfmerge | train | def pfmerge(self, dest_key, *keys):
"""Merge multiple HyperLogLog values into an unique value that will
approximate the cardinality of the union of the observed Sets of the
source HyperLogLog structures.
The computed merged HyperLogLog is set to the destination variable,
which is created if does not exist (defaulting to an empty
HyperLogLog).
.. versionadded:: 0.2.0
.. note::
**Time complexity**: ``O(N)`` to merge ``N`` HyperLogLogs, but
with high constant times.
:param dest_key: The destination key
:type dest_key: :class:`str`, :class:`bytes`
:param keys: One or more keys
:type keys: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute([b'PFMERGE', dest_key] + list(keys), b'OK') | python | {
"resource": ""
} |
q39968 | Emitter.get | train | def get(cls, format):
"""
Gets an emitter, returns the class and a content-type.
"""
if cls.EMITTERS.has_key(format):
return cls.EMITTERS.get(format)
raise ValueError("No emitters found for type %s" % format) | python | {
"resource": ""
} |
q39969 | Emitter.register | train | def register(cls, name, klass, content_type='text/plain'):
"""
Register an emitter.
Parameters::
- `name`: The name of the emitter ('json', 'xml', 'yaml', ...)
- `klass`: The emitter class.
- `content_type`: The content type to serve response as.
"""
cls.EMITTERS[name] = (klass, content_type) | python | {
"resource": ""
} |
q39970 | Resource.determine_emitter | train | def determine_emitter(self, request, *args, **kwargs):
"""
Function for determening which emitter to use
for output. It lives here so you can easily subclass
`Resource` in order to change how emission is detected.
You could also check for the `Accept` HTTP header here,
since that pretty much makes sense. Refer to `Mimer` for
that as well.
"""
em = kwargs.pop('emitter_format', None)
if not em:
em = request.GET.get('format', 'json')
return em | python | {
"resource": ""
} |
q39971 | Resource.form_validation_response | train | def form_validation_response(self, e):
"""
Method to return form validation error information.
You will probably want to override this in your own
`Resource` subclass.
"""
resp = rc.BAD_REQUEST
resp.write(' '+str(e.form.errors))
return resp | python | {
"resource": ""
} |
q39972 | Resource.cleanup_request | train | def cleanup_request(request):
"""
Removes `oauth_` keys from various dicts on the
request object, and returns the sanitized version.
"""
for method_type in ('GET', 'PUT', 'POST', 'DELETE'):
block = getattr(request, method_type, { })
if True in [ k.startswith("oauth_") for k in block.keys() ]:
sanitized = block.copy()
for k in sanitized.keys():
if k.startswith("oauth_"):
sanitized.pop(k)
setattr(request, method_type, sanitized)
return request | python | {
"resource": ""
} |
q39973 | Resource.error_handler | train | def error_handler(self, e, request, meth, em_format):
"""
Override this method to add handling of errors customized for your
needs
"""
if isinstance(e, FormValidationError):
return self.form_validation_response(e)
elif isinstance(e, TypeError):
result = rc.BAD_REQUEST
hm = HandlerMethod(meth)
sig = hm.signature
msg = 'Method signature does not match.\n\n'
if sig:
msg += 'Signature should be: %s' % sig
else:
msg += 'Resource does not expect any parameters.'
if self.display_errors:
msg += '\n\nException was: %s' % str(e)
result.content = format_error(msg)
return result
elif isinstance(e, Http404):
return rc.NOT_FOUND
elif isinstance(e, HttpStatusCode):
return e.response
else:
"""
On errors (like code errors), we'd like to be able to
give crash reports to both admins and also the calling
user. There's two setting parameters for this:
Parameters::
- `PISTON_EMAIL_ERRORS`: Will send a Django formatted
error email to people in `settings.ADMINS`.
- `PISTON_DISPLAY_ERRORS`: Will return a simple traceback
to the caller, so he can tell you what error they got.
If `PISTON_DISPLAY_ERRORS` is not enabled, the caller will
receive a basic "500 Internal Server Error" message.
"""
exc_type, exc_value, tb = sys.exc_info()
rep = ExceptionReporter(request, exc_type, exc_value, tb.tb_next)
if self.email_errors:
self.email_exception(rep)
if self.display_errors:
return HttpResponseServerError(
format_error('\n'.join(rep.format_exception())))
else:
raise | python | {
"resource": ""
} |
q39974 | parse | train | def parse(path):
"""
Parses xml and returns a formatted dict.
Example:
wpparser.parse("./blog.wordpress.2014-09-26.xml")
Will return:
{
"blog": {
"tagline": "Tagline",
"site_url": "http://marteinn.se/blog",
"blog_url": "http://marteinn.se/blog",
"language": "en-US",
"title": "Marteinn / Blog"
},
"authors: [{
"login": "admin",
"last_name": None,
"display_name": "admin",
"email": "martin@marteinn.se",
"first_name": None}
],
"categories": [{
"parent": None,
"term_id": "3",
"name": "Action Script",
"nicename": "action-script",
"children": [{
"parent": "action-script",
"term_id": "20",
"name": "Flash related",
"nicename": "flash-related",
"children": []
}]
}],
"tags": [{"term_id": "36", "slug": "bash", "name": "Bash"}],
"posts": [{
"creator": "admin",
"excerpt": None,
"post_date_gmt": "2014-09-22 20:10:40",
"post_date": "2014-09-22 21:10:40",
"post_type": "post",
"menu_order": "0",
"guid": "http://marteinn.se/blog/?p=828",
"title": "Post Title",
"comments": [{
"date_gmt": "2014-09-24 23:08:31",
"parent": "0",
"date": "2014-09-25 00:08:31",
"id": "85929",
"user_id": "0",
"author": u"Author",
"author_email": None,
"author_ip": "111.111.111.111",
"approved": "1",
"content": u"Comment title",
"author_url": "http://example.com",
"type": "pingback"
}],
"content": "Text",
"post_parent": "0",
"post_password": None,
"status": "publish",
"description": None,
"tags": ["tag"],
"ping_status": "open",
"post_id": "828",
"link": "http://www.marteinn.se/blog/slug/",
"pub_date": "Mon, 22 Sep 2014 20:10:40 +0000",
"categories": ["category"],
"is_sticky": "0",
"post_name": "slug"
}]
}
"""
doc = ET.parse(path).getroot()
channel = doc.find("./channel")
blog = _parse_blog(channel)
authors = _parse_authors(channel)
categories = _parse_categories(channel)
tags = _parse_tags(channel)
posts = _parse_posts(channel)
return {
"blog": blog,
"authors": authors,
"categories": categories,
"tags": tags,
"posts": posts,
} | python | {
"resource": ""
} |
q39975 | _parse_authors | train | def _parse_authors(element):
"""
Returns a well formatted list of users that can be matched against posts.
"""
authors = []
items = element.findall("./{%s}author" % WP_NAMESPACE)
for item in items:
login = item.find("./{%s}author_login" % WP_NAMESPACE).text
email = item.find("./{%s}author_email" % WP_NAMESPACE).text
first_name = item.find("./{%s}author_first_name" % WP_NAMESPACE).text
last_name = item.find("./{%s}author_last_name" % WP_NAMESPACE).text
display_name = item.find(
"./{%s}author_display_name" % WP_NAMESPACE).text
authors.append({
"login": login,
"email": email,
"display_name": display_name,
"first_name": first_name,
"last_name": last_name
})
return authors | python | {
"resource": ""
} |
q39976 | _parse_categories | train | def _parse_categories(element):
"""
Returns a list with categories with relations.
"""
reference = {}
items = element.findall("./{%s}category" % WP_NAMESPACE)
for item in items:
term_id = item.find("./{%s}term_id" % WP_NAMESPACE).text
nicename = item.find("./{%s}category_nicename" % WP_NAMESPACE).text
name = item.find("./{%s}cat_name" % WP_NAMESPACE).text
parent = item.find("./{%s}category_parent" % WP_NAMESPACE).text
category = {
"term_id": term_id,
"nicename": nicename,
"name": name,
"parent": parent
}
reference[nicename] = category
return _build_category_tree(None, reference=reference) | python | {
"resource": ""
} |
q39977 | _build_category_tree | train | def _build_category_tree(slug, reference=None, items=None):
"""
Builds a recursive tree with category relations as children.
"""
if items is None:
items = []
for key in reference:
category = reference[key]
if category["parent"] == slug:
children = _build_category_tree(category["nicename"],
reference=reference)
category["children"] = children
items.append(category)
return items | python | {
"resource": ""
} |
q39978 | _parse_posts | train | def _parse_posts(element):
"""
Returns a list with posts.
"""
posts = []
items = element.findall("item")
for item in items:
title = item.find("./title").text
link = item.find("./link").text
pub_date = item.find("./pubDate").text
creator = item.find("./{%s}creator" % DC_NAMESPACE).text
guid = item.find("./guid").text
description = item.find("./description").text
content = item.find("./{%s}encoded" % CONTENT_NAMESPACE).text
excerpt = item.find("./{%s}encoded" % EXCERPT_NAMESPACE).text
post_id = item.find("./{%s}post_id" % WP_NAMESPACE).text
post_date = item.find("./{%s}post_date" % WP_NAMESPACE).text
post_date_gmt = item.find("./{%s}post_date_gmt" % WP_NAMESPACE).text
status = item.find("./{%s}status" % WP_NAMESPACE).text
post_parent = item.find("./{%s}post_parent" % WP_NAMESPACE).text
menu_order = item.find("./{%s}menu_order" % WP_NAMESPACE).text
post_type = item.find("./{%s}post_type" % WP_NAMESPACE).text
post_name = item.find("./{%s}post_name" % WP_NAMESPACE).text
is_sticky = item.find("./{%s}is_sticky" % WP_NAMESPACE).text
ping_status = item.find("./{%s}ping_status" % WP_NAMESPACE).text
post_password = item.find("./{%s}post_password" % WP_NAMESPACE).text
category_items = item.findall("./category")
categories = []
tags = []
for category_item in category_items:
if category_item.attrib["domain"] == "category":
item_list = categories
else:
item_list = tags
item_list.append(category_item.attrib["nicename"])
post = {
"title": title,
"link": link,
"pub_date": pub_date,
"creator": creator,
"guid": guid,
"description": description,
"content": content,
"excerpt": excerpt,
"post_id": post_id,
"post_date": post_date,
"post_date_gmt": post_date_gmt,
"status": status,
"post_parent": post_parent,
"menu_order": menu_order,
"post_type": post_type,
"post_name": post_name,
"categories": categories,
"is_sticky": is_sticky,
"ping_status": ping_status,
"post_password": post_password,
"tags": tags,
}
post["postmeta"] = _parse_postmeta(item)
post["comments"] = _parse_comments(item)
posts.append(post)
return posts | python | {
"resource": ""
} |
q39979 | _parse_postmeta | train | def _parse_postmeta(element):
import phpserialize
"""
Retrive post metadata as a dictionary
"""
metadata = {}
fields = element.findall("./{%s}postmeta" % WP_NAMESPACE)
for field in fields:
key = field.find("./{%s}meta_key" % WP_NAMESPACE).text
value = field.find("./{%s}meta_value" % WP_NAMESPACE).text
if key == "_wp_attachment_metadata":
stream = StringIO(value.encode())
try:
data = phpserialize.load(stream)
metadata["attachment_metadata"] = data
except ValueError as e:
pass
except Exception as e:
raise(e)
if key == "_wp_attached_file":
metadata["attached_file"] = value
return metadata | python | {
"resource": ""
} |
q39980 | _parse_comments | train | def _parse_comments(element):
"""
Returns a list with comments.
"""
comments = []
items = element.findall("./{%s}comment" % WP_NAMESPACE)
for item in items:
comment_id = item.find("./{%s}comment_id" % WP_NAMESPACE).text
author = item.find("./{%s}comment_author" % WP_NAMESPACE).text
email = item.find("./{%s}comment_author_email" % WP_NAMESPACE).text
author_url = item.find("./{%s}comment_author_url" % WP_NAMESPACE).text
author_ip = item.find("./{%s}comment_author_IP" % WP_NAMESPACE).text
date = item.find("./{%s}comment_date" % WP_NAMESPACE).text
date_gmt = item.find("./{%s}comment_date_gmt" % WP_NAMESPACE).text
content = item.find("./{%s}comment_content" % WP_NAMESPACE).text
approved = item.find("./{%s}comment_approved" % WP_NAMESPACE).text
comment_type = item.find("./{%s}comment_type" % WP_NAMESPACE).text
parent = item.find("./{%s}comment_parent" % WP_NAMESPACE).text
user_id = item.find("./{%s}comment_user_id" % WP_NAMESPACE).text
comment = {
"id": comment_id,
"author": author,
"author_email": email,
"author_url": author_url,
"author_ip": author_ip,
"date": date,
"date_gmt": date_gmt,
"content": content,
"approved": approved,
"type": comment_type,
"parent": parent,
"user_id": user_id,
}
comments.append(comment)
return comments | python | {
"resource": ""
} |
q39981 | Connection.open_umanager | train | def open_umanager(self):
"""Used to open an uManager session.
"""
if self.umanager_opened:
return
self.ser.write(self.cmd_umanager_invocation)
# optimistic approach first: assume umanager is not invoked
if self.read_loop(lambda x: x.endswith(self.umanager_prompt),self.timeout*self.umanager_waitcoeff):
self.umanager_opened = True
else:
#if we are already in umanager, this will give us a fresh prompt
self.ser.write(self.cr)
if self.read_loop(lambda x: x.endswith(self.umanager_prompt),self.timeout):
self.umanager_opened = True
if self.umanager_opened:
log.debug("uManager opened")
else:
raise Dam1021Error(1,"Failed to open uManager") | python | {
"resource": ""
} |
q39982 | Connection.list_current_filter_set | train | def list_current_filter_set(self,raw=False):
"""User to list a currently selected filter set"""
buf = []
self.open_umanager()
self.ser.write(''.join((self.cmd_current_filter_list,self.cr)))
if self.read_loop(lambda x: x.endswith(self.umanager_prompt),self.timeout,lambda x,y,z: buf.append(y.rstrip()[:-1])):
if raw:
rv = buf = buf[0]
else:
rv, buf = self.filter_organizer(buf[0])
else:
raise Dam1021Error(16,"Failed to list currently selected filter set")
self.close_umanager()
log.info(buf)
return rv | python | {
"resource": ""
} |
q39983 | ListsMixin.ltrim | train | def ltrim(self, key, start, stop):
"""
Crop a list to the specified range.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param int start: zero-based index to first element to retain
:param int stop: zero-based index of the last element to retain
:returns: did the operation succeed?
:rtype: bool
:raises: :exc:`~tredis.exceptions.TRedisException`
Trim an existing list so that it will contain only the specified
range of elements specified.
Both `start` and `stop` are zero-based indexes, where 0 is the first
element of the list (the head), 1 the next element and so on.
For example: ``ltrim('foobar', 0, 2)`` will modify the list stored at
``foobar`` so that only the first three elements of the list will
remain.
`start` and `stop` can also be negative numbers indicating offsets
from the end of the list, where -1 is the last element of the list,
-2 the penultimate element and so on.
Out of range indexes will not produce an error: if `start` is larger
than the `end` of the list, or `start > end`, the result will be an
empty list (which causes `key` to be removed). If `end` is larger
than the end of the list, Redis will treat it like the last element
of the list.
A common use of LTRIM is together with LPUSH / RPUSH. For example::
client.lpush('mylist', 'somelement')
client.ltrim('mylist', 0, 99)
This pair of commands will push a new element on the list, while
making sure that the list will not grow larger than 100 elements.
This is very useful when using Redis to store logs for example. It is
important to note that when used in this way LTRIM is an O(1)
operation because in the average case just one element is removed
from the tail of the list.
.. note::
Time complexity: ``O(N)`` where `N` is the number of elements to
be removed by the operation.
"""
return self._execute([b'LTRIM', key, start, stop], b'OK') | python | {
"resource": ""
} |
q39984 | ListsMixin.lpushx | train | def lpushx(self, key, *values):
"""
Insert values at the head of an existing list.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param values: One or more positional arguments to insert at the
beginning of the list. Each value is inserted at the beginning
of the list individually (see discussion below).
:returns: the length of the list after push operations, zero if
`key` does not refer to a list
:rtype: int
:raises: :exc:`~tredis.exceptions.TRedisException`
This method inserts `values` at the head of the list stored at `key`,
only if `key` already exists and holds a list. In contrary to
:meth:`.lpush`, no operation will be performed when key does not yet
exist.
.. note::
**Time complexity**: ``O(1)``
"""
return self._execute([b'LPUSHX', key] + list(values)) | python | {
"resource": ""
} |
q39985 | ListsMixin.rpushx | train | def rpushx(self, key, *values):
"""
Insert values at the tail of an existing list.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param values: One or more positional arguments to insert at the
tail of the list.
:returns: the length of the list after push operations or
zero if `key` does not refer to a list
:rtype: int
:raises: :exc:`~tredis.exceptions.TRedisException`
This method inserts value at the tail of the list stored at `key`,
only if `key` already exists and holds a list. In contrary to
method:`.rpush`, no operation will be performed when `key` does not
yet exist.
.. note::
**Time complexity**: ``O(1)``
"""
return self._execute([b'RPUSHX', key] + list(values)) | python | {
"resource": ""
} |
q39986 | encode_date_optional_time | train | def encode_date_optional_time(obj):
"""
ISO encode timezone-aware datetimes
"""
if isinstance(obj, datetime.datetime):
return timezone("UTC").normalize(obj.astimezone(timezone("UTC"))).strftime('%Y-%m-%dT%H:%M:%SZ')
raise TypeError("{0} is not JSON serializable".format(repr(obj))) | python | {
"resource": ""
} |
q39987 | Command.file_handler | train | def file_handler(self, handler_type, path, prefixed_path, source_storage):
"""
Create a dict with all kwargs of the `copy_file` or `link_file` method of the super class and add it to
the queue for later processing.
"""
if self.faster:
if prefixed_path not in self.found_files:
self.found_files[prefixed_path] = (source_storage, path)
self.task_queue.put({
'handler_type': handler_type,
'path': path,
'prefixed_path': prefixed_path,
'source_storage': source_storage
})
self.counter += 1
else:
if handler_type == 'link':
super(Command, self).link_file(path, prefixed_path, source_storage)
else:
super(Command, self).copy_file(path, prefixed_path, source_storage) | python | {
"resource": ""
} |
q39988 | Command.delete_file | train | def delete_file(self, path, prefixed_path, source_storage):
"""
We don't need all the file_exists stuff because we have to override all files anyways.
"""
if self.faster:
return True
else:
return super(Command, self).delete_file(path, prefixed_path, source_storage) | python | {
"resource": ""
} |
q39989 | Command.collect | train | def collect(self):
"""
Create some concurrent workers that process the tasks simultaneously.
"""
collected = super(Command, self).collect()
if self.faster:
self.worker_spawn_method()
self.post_processor()
return collected | python | {
"resource": ""
} |
q39990 | _load_github_hooks | train | def _load_github_hooks(github_url='https://api.github.com'):
"""Request GitHub's IP block from their API.
Return the IP network.
If we detect a rate-limit error, raise an error message stating when
the rate limit will reset.
If something else goes wrong, raise a generic 503.
"""
try:
resp = requests.get(github_url + '/meta')
if resp.status_code == 200:
return resp.json()['hooks']
else:
if resp.headers.get('X-RateLimit-Remaining') == '0':
reset_ts = int(resp.headers['X-RateLimit-Reset'])
reset_string = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(reset_ts))
raise ServiceUnavailable('Rate limited from GitHub until ' +
reset_string)
else:
raise ServiceUnavailable('Error reaching GitHub')
except (KeyError, ValueError, requests.exceptions.ConnectionError):
raise ServiceUnavailable('Error reaching GitHub') | python | {
"resource": ""
} |
q39991 | is_github_ip | train | def is_github_ip(ip_str):
"""Verify that an IP address is owned by GitHub."""
if isinstance(ip_str, bytes):
ip_str = ip_str.decode()
ip = ipaddress.ip_address(ip_str)
if ip.version == 6 and ip.ipv4_mapped:
ip = ip.ipv4_mapped
for block in load_github_hooks():
if ip in ipaddress.ip_network(block):
return True
return False | python | {
"resource": ""
} |
q39992 | check_signature | train | def check_signature(signature, key, data):
"""Compute the HMAC signature and test against a given hash."""
if isinstance(key, type(u'')):
key = key.encode()
digest = 'sha1=' + hmac.new(key, data, hashlib.sha1).hexdigest()
# Covert everything to byte sequences
if isinstance(digest, type(u'')):
digest = digest.encode()
if isinstance(signature, type(u'')):
signature = signature.encode()
return werkzeug.security.safe_str_cmp(digest, signature) | python | {
"resource": ""
} |
q39993 | Hooks.init_app | train | def init_app(self, app, url='/hooks'):
"""Register the URL route to the application.
:param app: the optional :class:`~flask.Flask` instance to
register the extension
:param url: the url that events will be posted to
"""
app.config.setdefault('VALIDATE_IP', True)
app.config.setdefault('VALIDATE_SIGNATURE', True)
@app.route(url, methods=['POST'])
def hook():
if app.config['VALIDATE_IP']:
if not is_github_ip(request.remote_addr):
raise Forbidden('Requests must originate from GitHub')
if app.config['VALIDATE_SIGNATURE']:
key = app.config.get('GITHUB_WEBHOOKS_KEY', app.secret_key)
signature = request.headers.get('X-Hub-Signature')
if hasattr(request, 'get_data'):
# Werkzeug >= 0.9
payload = request.get_data()
else:
payload = request.data
if not signature:
raise BadRequest('Missing signature')
if not check_signature(signature, key, payload):
raise BadRequest('Wrong signature')
event = request.headers.get('X-GitHub-Event')
guid = request.headers.get('X-GitHub-Delivery')
if not event:
raise BadRequest('Missing header: X-GitHub-Event')
elif not guid:
raise BadRequest('Missing header: X-GitHub-Delivery')
if hasattr(request, 'get_json'):
# Flask >= 0.10
data = request.get_json()
else:
data = request.json
if event in self._hooks:
return self._hooks[event](data, guid)
else:
return 'Hook not used\n' | python | {
"resource": ""
} |
q39994 | Hooks.register_hook | train | def register_hook(self, hook_name, fn):
"""Register a function to be called on a GitHub event."""
if hook_name not in self._hooks:
self._hooks[hook_name] = fn
else:
raise Exception('%s hook already registered' % hook_name) | python | {
"resource": ""
} |
q39995 | Hooks.hook | train | def hook(self, hook_name):
"""A decorator that's used to register a new hook handler.
:param hook_name: the event to handle
"""
def wrapper(fn):
self.register_hook(hook_name, fn)
return fn
return wrapper | python | {
"resource": ""
} |
q39996 | websocket.send | train | def send(self, *args):
"""
Send a number of frames.
"""
for frame in args:
self.sock.sendall(self.apply_send_hooks(frame, False).pack()) | python | {
"resource": ""
} |
q39997 | websocket.queue_send | train | def queue_send(self, frame, callback=None, recv_callback=None):
"""
Enqueue `frame` to the send buffer so that it is send on the next
`do_async_send`. `callback` is an optional callable to call when the
frame has been fully written. `recv_callback` is an optional callable
to quickly set the `recv_callback` attribute to.
"""
frame = self.apply_send_hooks(frame, False)
self.sendbuf += frame.pack()
self.sendbuf_frames.append([frame, len(self.sendbuf), callback])
if recv_callback:
self.recv_callback = recv_callback | python | {
"resource": ""
} |
q39998 | websocket.do_async_send | train | def do_async_send(self):
"""
Send any queued data. This function should only be called after a write
event on a file descriptor.
"""
assert len(self.sendbuf)
nwritten = self.sock.send(self.sendbuf)
nframes = 0
for entry in self.sendbuf_frames:
frame, offset, callback = entry
if offset <= nwritten:
nframes += 1
if callback:
callback()
else:
entry[1] -= nwritten
self.sendbuf = self.sendbuf[nwritten:]
self.sendbuf_frames = self.sendbuf_frames[nframes:] | python | {
"resource": ""
} |
q39999 | websocket.do_async_recv | train | def do_async_recv(self, bufsize):
"""
Receive any completed frames from the socket. This function should only
be called after a read event on a file descriptor.
"""
data = self.sock.recv(bufsize)
if len(data) == 0:
raise socket.error('no data to receive')
self.recvbuf += data
while contains_frame(self.recvbuf):
frame, self.recvbuf = pop_frame(self.recvbuf)
frame = self.apply_recv_hooks(frame, False)
if not self.recv_callback:
raise ValueError('no callback installed for %s' % frame)
self.recv_callback(frame) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.